• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python dense_design_matrix.DenseDesignMatrix类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.datasets.dense_design_matrix.DenseDesignMatrix的典型用法代码示例。如果您正苦于以下问题:Python DenseDesignMatrix类的具体用法?Python DenseDesignMatrix怎么用?Python DenseDesignMatrix使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了DenseDesignMatrix类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_feats_from_cnn

def get_feats_from_cnn(rows, model=None):
    """
    fprop rows using best trained model and returns activations of the
    penultimate layer
    """
    conf = utils.get_config()
    patch_size = conf['patch_size']
    region_size = conf['region_size']
    batch_size = None
    preds = utils.get_predictor(model=model, return_all=True)
    y = np.zeros(len(rows))
    samples = np.zeros(
        (len(rows), region_size, region_size, 1), dtype=np.float32)
    for i, row in enumerate(rows):
        print 'processing %i-th image: %s' % (i, row['image_filename'])
        try:
            samples[i] = utils.get_samples_from_image(row, False)[0]
        except ValueError as e:
            print '{1} Value error: {0}'.format(str(e), row['image_filename'])
        y[i] = utils.is_positive(row)
    ds = DenseDesignMatrix(topo_view=samples)
    pipeline = utils.get_pipeline(
        ds.X_topo_space.shape, patch_size, batch_size)
    pipeline.apply(ds)
    return preds[-2](ds.get_topological_view()), y
开发者ID:johnarevalo,项目名称:cnn-bcdr,代码行数:25,代码来源:fe_extraction.py


示例2: test_convert_to_one_hot

def test_convert_to_one_hot():
    rng = np.random.RandomState([2013, 11, 14])
    m = 11
    d = DenseDesignMatrix(
        X=rng.randn(m, 4),
        y=rng.randint(low=0, high=10, size=(m,)))
    d.convert_to_one_hot()
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:7,代码来源:test_dense_design_matrix.py


示例3: next

    def next(self):
        next_index = self._subset_iterator.next()

        # convert to boolean selection
        sel = np.zeros(self.num_examples, dtype=bool)
        sel[next_index] = True
        next_index = sel

        rval = []
        for data, fn in safe_izip(self._raw_data, self._convert):
            try:
                this_data = data[next_index]
            except TypeError:
                this_data = data[next_index, :]
            if fn:
                this_data = fn(this_data)
            if self._preprocessor is not None:
                d = DenseDesignMatrix(X=this_data)
                self._preprocessor.apply(d)
                this_data = d.get_design_matrix()
            assert not np.any(np.isnan(this_data))
            rval.append(this_data)
        rval = tuple(rval)
        if not self._return_tuple and len(rval) == 1:
            rval, = rval
        return rval    
开发者ID:everglory99,项目名称:deepAutoController,代码行数:26,代码来源:icmc.py


示例4: apply_ZCA_fast

def apply_ZCA_fast(patches, normalize, zca_preprocessor):
    patches = patches.astype(np.float32)
    if normalize:
        patches /= 255.0
    dataset = DenseDesignMatrix(X = patches.T)    
    zca_preprocessor.apply(dataset)
    patches = dataset.get_design_matrix()
    return patches.T
开发者ID:ttblue,项目名称:human_demos,代码行数:8,代码来源:create_leveldb_utils.py


示例5: test

        def test(store_inverse):
            preprocessed_X = copy.copy(self.X)
            preprocessor = ZCA(store_inverse=store_inverse)

            dataset = DenseDesignMatrix(X=preprocessed_X,
                                        preprocessor=preprocessor,
                                        fit_preprocessor=True)

            preprocessed_X = dataset.get_design_matrix()
            assert_allclose(self.X, preprocessor.inverse(preprocessed_X))
开发者ID:ASAPPinc,项目名称:pylearn2,代码行数:10,代码来源:test_preprocessing.py


示例6: make_dataset

    def make_dataset(num_batches):
        m = num_batches*batch_size
        X = rng.randn(m, num_features)
        y = rng.randn(m, num_features)

        rval =  DenseDesignMatrix(X=X, y=y)

        rval.yaml_src = "" # suppress no yaml_src warning

        return rval
开发者ID:123fengye741,项目名称:pylearn2,代码行数:10,代码来源:test_bgd.py


示例7: __init__

 def __init__(self, which_set, data_path=None, 
              term_range=None, target_type='cluster100'):
     """
     which_set: a string specifying which portion of the dataset
         to load. Valid values are 'train', 'valid' or 'test'
     data_path: a string specifying the directory containing the 
         webcluster data. If None (default), use environment 
         variable WEBCLUSTER_DATA_PATH.
     term_range: a tuple for taking only a slice of the available
         terms. Default is to use all 6275. For example, an input
         range of (10,2000) will truncate the 10 most frequent terms
         and the 6275-2000=4275 les frequent terms, whereby frequency
         we mean how many unique documents each term is in.
     target_type: the type of targets to use. Valid options are 
         'cluster[10,100,1000]'
     """
     self.__dict__.update(locals())
     del self.self
     
     self.corpus_terms = None
     self.doc_info = None
     
     print "loading WebCluster DDM. which_set =", self.which_set
     
     if self.data_path is None:
         self.data_path \
             = string_utils.preprocess('${WEBCLUSTER_DATA_PATH}')
     
     fname = os.path.join(self.data_path, which_set+'_doc_inputs.npy')
     X = np.load(fname)
     if self.term_range is not None:
         X = X[:,self.term_range[0]:self.term_range[1]]
         X = X/X.sum(1).reshape(X.shape[0],1)
     print X.sum(1).mean()
     
     fname = os.path.join(self.data_path, which_set+'_doc_targets.npy')
     # columns: 0:cluster10s, 1:cluster100s, 2:cluster1000s
     self.cluster_hierarchy = np.load(fname)
     
     y = None
     if self.target_type == 'cluster10':
         y = self.cluster_hierarchy[:,0]
     elif self.target_type == 'cluster100':
         y = self.cluster_hierarchy[:,1]
     elif self.target_type == 'cluster1000':
         y = self.cluster_hierarchy[:,2]
     elif self.target_type is None:
         pass
     else:
         raise NotImplementedError()
     
     DenseDesignMatrix.__init__(self, X=X, y=y)
     
     print "... WebCluster ddm loaded"
开发者ID:nicholas-leonard,项目名称:delicious,代码行数:54,代码来源:webcluster.py


示例8: test

    def test(store_inverse):
        rng = np.random.RandomState([1, 2, 3])
        X = as_floatX(rng.randn(15, 10))
        preprocessed_X = copy.copy(X)
        preprocessor = ZCA(store_inverse=store_inverse)

        dataset = DenseDesignMatrix(X=preprocessed_X,
                                    preprocessor=preprocessor,
                                    fit_preprocessor=True)

        preprocessed_X = dataset.get_design_matrix()

        assert_allclose(X, preprocessor.inverse(preprocessed_X))
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:13,代码来源:test_preprocessing.py


示例9: convert_to_dataset

     def convert_to_dataset(X,y):            
         X = np.vstack(X);
         y = np.vstack(y);
         
         # convert labels
         y = self.label_converter.get_labels(y, self.label_mode);
         y = np.hstack(y);
         
         one_hot_y = one_hot(y);
         
         dataset = DenseDesignMatrix(X=X, y=one_hot_y);
         dataset.labels = y; # for confusion matrix
 
         return dataset;
开发者ID:sarikayamehmet,项目名称:ismir2014-deepbeat,代码行数:14,代码来源:EEGDatasetLoader.py


示例10: make_dataset

        def make_dataset(num_batches):
            disturb_mem.disturb_mem()
            m = num_batches*batch_size
            X = rng.randn(m, num_features)
            y = np.zeros((m,1))
            y[:,0] = np.dot(X, w) > 0.

            rval =  DenseDesignMatrix(X=X, y=y)

            rval.yaml_src = "" # suppress no yaml_src warning

            X = rval.get_batch_design(batch_size)
            assert X.shape == (batch_size, num_features)

            return rval
开发者ID:mathewsbabu,项目名称:pylearn,代码行数:15,代码来源:test_sgd.py


示例11: test_zero_vector

    def test_zero_vector(self):
        """ Test that passing in the zero vector does not result in
            a divide by 0 """

        dataset = DenseDesignMatrix(X=as_floatX(np.zeros((1, 1))))

        # the settings of subtract_mean and use_norm are not relevant to
        # the test
        # std_bias = 0.0 is the only value for which there should be a risk
        # of failure occurring
        preprocessor = GlobalContrastNormalization(subtract_mean=True, sqrt_bias=0.0, use_std=True)

        dataset.apply_preprocessor(preprocessor)

        result = dataset.get_design_matrix()

        assert not np.any(np.isnan(result))
        assert not np.any(np.isinf(result))
开发者ID:sonu5623,项目名称:pylearn2,代码行数:18,代码来源:test_preprocessing.py


示例12: test_finitedataset_source_check

def test_finitedataset_source_check():
    """
    Check that the FiniteDatasetIterator returns sensible
    errors when there is a missing source in the dataset.
    """
    dataset = DenseDesignMatrix(X=np.random.rand(20,15).astype(theano.config.floatX),
                                y=np.random.rand(20,5).astype(theano.config.floatX))
    assert_raises(ValueError,
                  dataset.iterator,
                  mode='sequential',
                  batch_size=5,
                  data_specs=(VectorSpace(15),'featuresX'))
    try:
        dataset.iterator(mode='sequential',
                         batch_size=5,
                         data_specs=(VectorSpace(15),'featuresX'))
    except ValueError as e:
        assert 'featuresX' in str(e)
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:18,代码来源:test_iteration.py


示例13: test_random_image

    def test_random_image(self):
        """
        Test on a random image if the per-processor loads and works without
        anyerror and doesn't result in any nan or inf values

        """

        rng = np.random.RandomState([1, 2, 3])
        X = as_floatX(rng.randn(5, 32 * 32 * 3))

        axes = ["b", 0, 1, "c"]
        view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3), axes)
        dataset = DenseDesignMatrix(X=X, view_converter=view_converter)
        dataset.axes = axes
        preprocessor = LeCunLCN(img_shape=[32, 32])
        dataset.apply_preprocessor(preprocessor)
        result = dataset.get_design_matrix()

        assert not np.any(np.isnan(result))
        assert not np.any(np.isinf(result))
开发者ID:sonu5623,项目名称:pylearn2,代码行数:20,代码来源:test_preprocessing.py


示例14: test_split_nfold_datasets

def test_split_nfold_datasets():
    #Load and create ddm from cifar100
    path = "/data/lisa/data/cifar100/cifar-100-python/train"
    obj = serial.load(path)
    X = obj['data']

    assert X.max() == 255.
    assert X.min() == 0.

    X = np.cast['float32'](X)
    y = None #not implemented yet

    view_converter = DefaultViewConverter((32,32,3))

    ddm = DenseDesignMatrix(X = X, y =y, view_converter = view_converter)

    assert not np.any(np.isnan(ddm.X))
    ddm.y_fine = np.asarray(obj['fine_labels'])
    ddm.y_coarse = np.asarray(obj['coarse_labels'])
    folds = ddm.split_dataset_nfolds(10)
    print folds[0].shape
开发者ID:caglar,项目名称:pylearn_old,代码行数:21,代码来源:test_dense_design_matrix.py


示例15: __init__

 def __init__(self, filename, X=None, topo_view=None, y=None,
              load_all=False, **kwargs):
     if 'preprocessor' in kwargs:
         if ('fit_preprocessor' in kwargs and 
             kwargs['fit_preprocessor'] is False) or ('fit_preprocessor' 
                                                      not in kwargs):
             self._preprocessor = kwargs['preprocessor']
             kwargs['preprocessor'] = None
     else:
         self._preprocessor = None
     self.load_all = load_all
     if h5py is None:
         raise RuntimeError("Could not import h5py.")
     self._file = h5py.File(filename)
     if X is not None:
         X = self.get_dataset(X, load_all)
     if topo_view is not None:
         topo_view = self.get_dataset(topo_view, load_all)
     if y is not None:
         y = self.get_dataset(y, load_all)
     DenseDesignMatrix.__init__(self, X=X, topo_view=topo_view, y=y,
                                **kwargs)
开发者ID:everglory99,项目名称:deepAutoController,代码行数:22,代码来源:icmc.py


示例16: test_split_datasets

def test_split_datasets():
    #Load and create ddm from cifar100
    path = "/data/lisa/data/cifar100/cifar-100-python/train"
    obj = serial.load(path)
    X = obj['data']

    assert X.max() == 255.
    assert X.min() == 0.

    X = np.cast['float32'](X)
    y = None #not implemented yet

    view_converter = DefaultViewConverter((32,32,3))

    ddm = DenseDesignMatrix(X = X, y =y, view_converter = view_converter)

    assert not np.any(np.isnan(ddm.X))
    ddm.y_fine = np.asarray(obj['fine_labels'])
    ddm.y_coarse = np.asarray(obj['coarse_labels'])
    (train, valid) = ddm.split_dataset_holdout(train_prop=0.5)
    assert valid.shape[0] == np.ceil(ddm.num_examples * 0.5)
    assert train.shape[0] == (ddm.num_examples - valid.shape[0])
开发者ID:HaniAlmousli,项目名称:pylearn,代码行数:22,代码来源:test_dense_design_matrix.py


示例17: setup

 def setup(self):
     """
     We use a small predefined 8x5 matrix for
     which we know the ZCA transform.
     """
     self.X = np.array([[-10.0, 3.0, 19.0, 9.0, -15.0],
                       [7.0, 26.0, 26.0, 26.0, -3.0],
                       [17.0, -17.0, -37.0, -36.0, -11.0],
                       [19.0, 15.0, -2.0, 5.0, 9.0],
                       [-3.0, -8.0, -35.0, -25.0, -8.0],
                       [-18.0, 3.0, 4.0, 15.0, 14.0],
                       [5.0, -4.0, -5.0, -7.0, -11.0],
                       [23.0, 22.0, 15.0, 20.0, 12.0]])
     self.dataset = DenseDesignMatrix(X=as_floatX(self.X),
                                      y=as_floatX(np.ones((8, 1))))
     self.num_components = self.dataset.get_design_matrix().shape[1] - 1
开发者ID:ASAPPinc,项目名称:pylearn2,代码行数:16,代码来源:test_preprocessing.py


示例18: test_init_with_X_or_topo

def test_init_with_X_or_topo():
    # tests that constructing with topo_view works
    # tests that construction with design matrix works
    # tests that conversion from topo_view to design matrix and back works
    # tests that conversion the other way works too
    rng = np.random.RandomState([1, 2, 3])
    topo_view = rng.randn(5, 2, 2, 3)
    d1 = DenseDesignMatrix(topo_view=topo_view)
    X = d1.get_design_matrix()
    d2 = DenseDesignMatrix(X=X, view_converter=d1.view_converter)
    topo_view_2 = d2.get_topological_view()
    assert np.allclose(topo_view, topo_view_2)
    X = rng.randn(*X.shape)
    topo_view_3 = d2.get_topological_view(X)
    X2 = d2.get_design_matrix(topo_view_3)
    assert np.allclose(X, X2)
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:16,代码来源:test_dense_design_matrix.py


示例19: function

    if feature_type == 'exp_hs':
        feat = H * Mu1
    elif feature_type == 'exp_h':
        feat = H
    elif feature_type == 'map_hs':
        feat = ( H > 0.5) * Mu1
    else:
        assert False

    print 'compiling theano function'
    f = function([V],feat)

    print 'running theano function'
    feat = f(X2)

    feat_dataset = DenseDesignMatrix(X = feat, view_converter = DefaultViewConverter([1, 1, feat.shape[1]] ) )

    print 'reassembling features'
    ns = 32 - size + 1
    depatchifier = ReassembleGridPatches( orig_shape  = (ns, ns), patch_shape=(1,1) )
    feat_dataset.apply_preprocessor(depatchifier)

    print 'making topological view'
    topo_feat = feat_dataset.get_topological_view()
    assert topo_feat.shape[0] == X.shape[0]

    print 'assembling visualizer'

    n = np.ceil(np.sqrt(model.nhid))

    pv3 = PatchViewer(grid_shape = (X.shape[0], num_filters), patch_shape=(ns,ns), is_color= False)
开发者ID:cc13ny,项目名称:galatea,代码行数:31,代码来源:feature_viewer.py


示例20: __init__


#.........这里部分代码省略.........
        list_features = tmp_list_features

        print 'List of features:'
        for f in list_features:
            print f['feature'] + '.' + f['param']
        print ''

        EpilepsiaeFeatureLoader.__init__(self,
                                         patient_id=patient_id,
                                         which_set=which_set,
                                         list_features=list_features,
                                         leave_out_seizure_idx_valid=leave_out_seizure_idx_valid,
                                         leave_out_seizure_idx_test=leave_out_seizure_idx_test,
                                         data_dir=data_dir,
                                         preictal_sec=preictal_sec,
                                         use_all_nonictals=use_all_nonictals)
        # Row: samples, Col: features
        raw_X, y = self.load_data()

        if n_selected_features != -1:
            all_rank_df = None
            for f_idx, feature in enumerate(self.list_features):
                rank_df = pd.read_csv(os.path.join(data_dir, patient_id +
                                                 '/rank_feature_idx_' + feature['param'] + '_' +
                                                 'leaveout_' + str(leave_out_seizure_idx_valid) + '_' +
                                                 str(leave_out_seizure_idx_test) + '.txt'))
                if f_idx == 0:
                    all_rank_df = rank_df
                else:
                    offset_f_idx = 0
                    for i in range(f_idx):
                        offset_f_idx = offset_f_idx + self.list_features[i]['n_features']
                    rank_df['feature_idx'] = rank_df['feature_idx'].values + offset_f_idx
                    all_rank_df = pd.concat([all_rank_df, rank_df])

            sorted_feature_df = all_rank_df.sort(['D_ADH'], ascending=[0])
            self.selected_feature_idx = sorted_feature_df['feature_idx'][:n_selected_features]
            raw_X = raw_X[:, self.selected_feature_idx]
        else:
            self.selected_feature_idx = np.arange(raw_X.shape[1])

        # Print shape of input data
        print '------------------------------'
        print 'Dataset: {0}'.format(self.which_set)
        print 'Number of samples: {0}'.format(raw_X.shape[0])
        print ' Preictal samples: {0}'.format(self.preictal_samples)
        print ' Nonictal samples: {0}'.format(self.nonictal_samples)
        print ' NaN samples: {0}'.format(self.nan_non_flat_samples)
        print ' Note for ''train'' and ''valid_train'': number of samples will be equal without removing the nan samples.'
        print 'Number of features: {0}'.format(raw_X.shape[1])
        print '------------------------------'

        # Preprocessing
        if which_set == 'train':
            scaler = preprocessing.StandardScaler()
            # scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
            scaler = scaler.fit(raw_X)

            with open(os.path.join(preprocessor_dir, self.patient_id + '_scaler_feature_' +
                                                     str(self.leave_out_seizure_idx_valid) + '_' +
                                                     str(self.leave_out_seizure_idx_test) + '.pkl'), 'wb') as f:
                pickle.dump(scaler, f)

            preprocessed_X = scaler.transform(raw_X)
        else:
            with open(os.path.join(preprocessor_dir, self.patient_id + '_scaler_feature_' +
                                                     str(self.leave_out_seizure_idx_valid) + '_' +
                                                     str(self.leave_out_seizure_idx_test) + '.pkl'), 'rb') as f:
                scaler = pickle.load(f)

            preprocessed_X = scaler.transform(raw_X)

        raw_X = None

        if self.which_set == 'train' or self.which_set == 'valid_train':
            # Shuffle the data
            print ''
            print '*** Shuffle data ***'
            print ''
            permute_idx = np.random.permutation(preprocessed_X.shape[0])
            preprocessed_X = preprocessed_X[permute_idx, :]
            y = y[permute_idx, :]

        if self.balance_class and (self.which_set == 'train' or self.which_set == 'valid_train'):
            self.X_full = preprocessed_X
            self.y_full = y

            (X, y) = self.get_data()
        else:
            # Zero-padding (if necessary)
            if not (self.batch_size is None):
                preprocessed_X, y = self.zero_pad(preprocessed_X, y, self.batch_size)

            X = preprocessed_X

        # Initialize DenseDesignMatrix
        DenseDesignMatrix.__init__(self,
                                   X=X,
                                   y=y,
                                   axes=axes)
开发者ID:akaraspt,项目名称:epilepsy-system,代码行数:101,代码来源:epilepsiae.py



注:本文中的pylearn2.datasets.dense_design_matrix.DenseDesignMatrix类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python list_files.list_files函数代码示例发布时间:2022-05-25
下一篇:
Python control.get_load_data函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap