• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python decomposition.sparse_encode函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.decomposition.sparse_encode函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_encode函数的具体用法?Python sparse_encode怎么用?Python sparse_encode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sparse_encode函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_sparse_encode_input

def test_sparse_encode_input():
    n_components = 100
    rng = np.random.RandomState(0)
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    Xf = check_array(X, order='F')
    for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
        a = sparse_encode(X, V, algorithm=algo)
        b = sparse_encode(Xf, V, algorithm=algo)
        assert_array_almost_equal(a, b)
开发者ID:Lavanya-Basavaraju,项目名称:scikit-learn,代码行数:10,代码来源:test_dict_learning.py


示例2: test_sparse_encode_shapes

def test_sparse_encode_shapes():
    n_components = 12
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
        code = sparse_encode(X, V, algorithm=algo)
        assert_equal(code.shape, (n_samples, n_components))
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:7,代码来源:test_dict_learning.py


示例3: test_sparse_encode_error

def test_sparse_encode_error():
    n_components = 12
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    code = sparse_encode(X, V, alpha=0.001)
    assert_true(not np.all(code == 0))
    assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:7,代码来源:test_dict_learning.py


示例4: run

def run(dimension,raw_data_dir,out_dir):
	with open('{}/filename.list'.format(raw_data_dir), 'r') as fp:
		filenames = fp.read().splitlines()
	sensor_data = list()
	for filename in filenames:
		path = '{}/{}'.format(raw_data_dir, filename)
		with Timer('open {} with ALL sensors'.format(filename)):
			#data = np.genfromtxt(path, usecols=range(1,49)
			data = np.genfromtxt(path, usecols=[1, 4, 13, 16, 18, 26, 31, 32, 37, 38, 39, 40, 9, 11, 22, 23, 41, 10, 12, 24, 25, 29, 30, 42, 43, 44]
				, delimiter=',').tolist()
			print "# of data:", len(data)
			sensor_data.extend(data)
	with Timer('Sparse Coding...'):
		print "# of ALL data as a whole:", len(sensor_data)
		dl = sparse_coding(dimension, sensor_data,out_dir, 1, 10000, 0.00001)
	with open('{}/atoms'.format(out_dir), "w") as op:
		for component in dl.components_:
			line = ', '.join(str(e) for e in component)
        		op.write( line + '\n')

	code = sparse_encode(input_x, dl.components_)

	with open('{}/codes'.format(out_dir), "w") as op:
		for coefficient in code:
			line = ', '.join(str(e) for e in coefficient)
        		op.write( line + '\n')	

	with open('{}/filename.list'.format(raw_data_dir), 'r') as fp:
		filenames = fp.read().splitlines()
开发者ID:paramoecium,项目名称:r324_sparse_coding,代码行数:29,代码来源:learnDic.py


示例5: test_with_sparse_code

def test_with_sparse_code(components=np.loadtxt('components_of_convfeat.txt')):
    (X_train, y_train), (X_test, y_test) = util.load_feat_vec()
    X_train_codes = np.loadtxt('sparse_codes_of_convfeat.txt')
    clf = LogisticRegression(penalty='l1', multi_class='ovr')
    clf.fit(X_train_codes, y_train)
    X_test_codes = sparse_encode(X_test, components)
    print "mean accuracy", clf.score(X_test_codes, y_test)
开发者ID:HunjaeJung,项目名称:imagenet2014-modified,代码行数:7,代码来源:model_sparse.py


示例6: test_sparse_encode_positivity

def test_sparse_encode_positivity(positive):
    n_components = 12
    rng = np.random.RandomState(0)
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    for algo in ('lasso_lars', 'lasso_cd', 'lars', 'threshold'):
        code = sparse_encode(X, V, algorithm=algo, positive=positive)
        if positive:
            assert_true((code >= 0).all())
        else:
            assert_true((code < 0).any())

    try:
        sparse_encode(X, V, algorithm='omp', positive=positive)
    except ValueError:
        if not positive:
            raise
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:17,代码来源:test_dict_learning.py


示例7: to_sparse

def to_sparse(X,dim):

	sparse_dict = MiniBatchDictionaryLearning(dim)
	sparse_dict.fit(X)
	sparse_vectors = sparse_encode(X, sparse_dict.components_)

	for i in sparse_vectors:
		print i

	return sparse_vectors
开发者ID:tarekmehrez,项目名称:extended-word2vec,代码行数:10,代码来源:sparse_and_test.py


示例8: test_sparse_encode_shapes_omp

def test_sparse_encode_shapes_omp():
    rng = np.random.RandomState(0)
    algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold']
    for n_components, n_samples in itertools.product([1, 5], [1, 9]):
        X_ = rng.randn(n_samples, n_features)
        dictionary = rng.randn(n_components, n_features)
        for algorithm, n_jobs in itertools.product(algorithms, [1, 3]):
            code = sparse_encode(X_, dictionary, algorithm=algorithm,
                                 n_jobs=n_jobs)
            assert_equal(code.shape, (n_samples, n_components))
开发者ID:Lavanya-Basavaraju,项目名称:scikit-learn,代码行数:10,代码来源:test_dict_learning.py


示例9: predict

    def predict(self, imgs, neuron_idx=None, penalty_lambda=None, algorithm=None):
        """ get neuron response to images

        Parameters
        ----------
        imgs

        Returns
        -------

        """
        imgs_array = make_2d_input_matrix(imgs)
        if neuron_idx is None:
            dict_to_use = self.w
        else:
            dict_to_use = self.w[neuron_idx:(neuron_idx + 1), :]

        if penalty_lambda is None:
            _lambda = self._lambda
        else:
            _lambda = penalty_lambda
        assert np.isscalar(_lambda)

        if algorithm is None:
            _algorithm = self.algorithm
        else:
            _algorithm = algorithm


        # let's call sparse encoder to do it!
        # no scaling at all!
        # having /nsample in objective function is exactly the same as sovling each problem separately.
        # the underlying function called is elastic net, and that function fits each column of y separately.
        # each column of y is each stimulus. This is because when passing imgs_array and dict_to_use to Elastic Net,
        # they are transposed. That is, y = imgs_array.T
        #
        # in the code there's also a subtle detail, where alpha is divided by number of pixels in each stimulus.
        # I haven't figured that out, but seems that's simply a detail for using ElasticNet to do this.
        if _algorithm in ['lasso_lars', 'lasso_cd']:
            response = sparse_encode(imgs_array, dict_to_use, alpha=_lambda, algorithm=_algorithm, max_iter=10000)
        else:
            assert _algorithm == 'spams'
            #print(imgs_array.dtype, dict_to_use.dtype, _lambda.shape)
            response = lasso(np.asfortranarray(imgs_array.T), D=np.asfortranarray(dict_to_use.T), lambda1=_lambda,
                             mode=2)
            response = response.T.toarray()  # because lasso returns sparse matrix...
        # this can be used for debugging, for comparison with SPAMS.
        # notice here I give per sample cost.
        self.last_cost_recon = 0.5 * np.sum((imgs_array - np.dot(response, dict_to_use)) ** 2, axis=1)
        self.last_cost_sparsity = _lambda * np.abs(response).sum(axis=1)
        assert self.last_cost_sparsity.shape == (imgs_array.shape[0], )
        assert self.last_cost_recon.shape == (imgs_array.shape[0],)
        self.last_cost = np.mean(self.last_cost_recon + self.last_cost_sparsity)

        return response
开发者ID:leelabcnbc,项目名称:early-vision-toolbox,代码行数:55,代码来源:sparse_coding.py


示例10: test_sparse_encode

    def test_sparse_encode(self):
        iris = datasets.load_iris()
        df = pdml.ModelFrame(iris)

        _, dictionary, _ = decomposition.dict_learning(iris.data, 2, 1,
                                                       random_state=self.random_state)

        result = df.decomposition.sparse_encode(dictionary)
        expected = decomposition.sparse_encode(iris.data, dictionary)
        self.assertIsInstance(result, pdml.ModelFrame)
        tm.assert_index_equal(result.index, df.data.index)
        self.assert_numpy_array_almost_equal(result.values, expected)
开发者ID:sinhrks,项目名称:pandas-ml,代码行数:12,代码来源:test_decomposition.py


示例11: learning_sparse_coding

def learning_sparse_coding(X, components=None):
    """
    http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.DictionaryLearning.html
    http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.sparse_encode.html
    """
    if components is None:
        print('Learning the dictionary...')
        t0 = time()
        diclearner = MiniBatchDictionaryLearning(n_components=100, verbose=True)
        components = diclearner.fit(X).components_
        np.savetxt('components_of_convfeat.txt', components)
        dt = time() - t0
        print('done in %.2fs.' % dt)

    codes = sparse_encode(X, components)
    np.savetxt('sparse_codes_of_convfeat.txt', codes)
开发者ID:HunjaeJung,项目名称:imagenet2014-modified,代码行数:16,代码来源:model_sparse.py


示例12: sparse_coding

def sparse_coding(n_atom, input_x, out_dir):
	dictionary = get_dictionary(n_atom, input_x)
	code = sparse_encode(input_x, dictionary)
    
	np.set_printoptions(precision=3, suppress=True)
	#print code
	#print dictionary
	with open('{}/atoms'.format(out_dir), "w") as op:
		for component in dictionary:
			line = ', '.join(str(round(e,3)) for e in component)
        		op.write( line + '\n')
	with open('{}/codes'.format(out_dir), "w") as op:
		for coefficient in code:
			line = ', '.join(str(round(e,3)) for e in coefficient)
        		op.write( line + '\n')
	return code
开发者ID:paramoecium,项目名称:dim_reduction_via_sparse_coding,代码行数:16,代码来源:reduce.py


示例13: test_dict_learning_online_partial_fit

def test_dict_learning_online_partial_fit():
    # this test was not actually passing before!
    raise SkipTest
    n_components = 12
    rng = np.random.RandomState(0)
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    dico1 = MiniBatchDictionaryLearning(n_components, n_iter=10, batch_size=1,
                                        shuffle=False, dict_init=V,
                                        random_state=0).fit(X)
    dico2 = MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,
                                        random_state=0)
    for ii, sample in enumerate(X):
        dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter)
        # if ii == 1: break
    assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) ==
                           0))
    assert_array_equal(dico1.components_, dico2.components_)
开发者ID:2011200799,项目名称:scikit-learn,代码行数:18,代码来源:test_dict_learning.py


示例14: test_dict_learning_online_partial_fit

def test_dict_learning_online_partial_fit():
    n_components = 12
    rng = np.random.RandomState(0)
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
                                        batch_size=1,
                                        alpha=1, shuffle=False, dict_init=V,
                                        random_state=0).fit(X)
    dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
                                        n_iter=1, dict_init=V,
                                        random_state=0)
    for i in range(10):
        for sample in X:
            dict2.partial_fit(sample[np.newaxis, :])

    assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
    assert_array_almost_equal(dict1.components_, dict2.components_,
                              decimal=2)
开发者ID:hmshan,项目名称:scikit-learn,代码行数:19,代码来源:test_dict_learning.py


示例15: gabor_encode

    def gabor_encode(self):
        patches = extract_patches_2d(
            self.img, (self.patch_size, self.patch_size)
        )
        patches = patches.reshape(patches.shape[0], -1)
        # code = sparse_encode(patches, self.kernels, algorithm='threshold', alpha=1)
        code = sparse_encode(
            patches, self.kernels, algorithm='lars', n_nonzero_coefs=2)

        idx = np.std(code, axis=1) > 0.3
        selected_patches = patches #[idx]
        selected_code = code #[idx]
        min_code, max_code = np.min(selected_code), np.max(selected_code)
        # print selected_patches
        c = 0
        s = 21
        for i in xrange(selected_code.shape[0]):
            print i

            plt.subplot(s, s * 2, c)
            plt.xticks(())
            plt.gca().set_ylim([min_code, max_code])
            plt.yticks(())
            plt.plot(selected_code[i])
            c += 1

            plt.subplot(s, s * 2, c)
            plt.xticks(())
            plt.yticks(())
            plt.imshow(selected_patches[i].reshape(
                self.patch_size, self.patch_size), cmap='gray', interpolation='none')
            c += 1
        plt.show()

        orientations = np.argmax(code, axis=1)
        activations = np.std(code, axis=1)
        orientations[activations < self.activation_threshold] = self.zero_value
        # blank_batches = np.ones((patches.shape[0], self.patch_size, self.patch_size)) * orientations[:, None, None]
        # recon = reconstruct_from_patches_2d(blank_batches, (self.img_height, self.img_width))
        # return recon
        return orientations.reshape(self.map_height, self.map_width)
开发者ID:ahmedassal,项目名称:ml-playground,代码行数:41,代码来源:main.py


示例16: FindTopSCV

def FindTopSCV(k, dic, Fout2, prompt):
    sh = (Fout2.shape[0], Fout2.shape[1], k, 3)
    cplist = np.zeros(sh)
    if prompt == 'SP':
        for j in range(Fout2.shape[1]):
            for i in range(Fout2.shape[0]):
                y = np.reshape(Fout2[i,j], (Fout2.shape[2],1))
                if y.all() == 0:
                    p = np.zeros(k)
                    p[0] = 1
                    lc = np.zeros((k,2))
                    lc[0,:] = [20,20]
                else:
                    try:
                        x_hat = CSRec_SP(k, dic, y)
                        (p,lc) = prob(k, x_hat, Fout2.shape[0])
                    except:
                        p = np.zeros(k)
                        p[0] = 1
                        lc = np.zeros((k,2))
                        lc[0,:] = [20,20]
                cplist[i,j,:,0] = p
                cplist[i,j,:,1:3] = lc
                print (i,j)
    elif prompt == 'OMP':
        for j in range(Fout2.shape[1]):
            for i in range(Fout2.shape[0]):
                y = np.reshape(Fout2[i,j], (Fout2.shape[2],1))
                # X = code * dic
                y = np.reshape(Fout2,(Fout2.shape[0]*Fout2.shape[1],Fout2.shape[2]))
                x_hat = sparse_encode(X = y, dictionary=dic.transpose(), n_nonzero_coefs=k)
                x_hat = x_hat.transpose()
                (p,lc) = prob(k, x_hat, Fout2.shape[0])
                cplist[i,j,:,0] = p
                cplist[i,j,:,1:3] = lc
                print (i,j)

    return cplist
开发者ID:Sekunde,项目名称:medical_img_registration,代码行数:38,代码来源:library.py


示例17: range

print "X_train.shape", train_X.shape
print "Components shape", dl.components_.shape

# components = dl.components().reshape((n_components, n_features))
components = dl.components_

# Visualizing the components as images
component_titles = ["component %d" % i for i in range(components.shape[0])]
plot_gallery("Visualizing top components", components, component_titles, w, h, n_row=n_components / 10, n_col=10)
plt.show()

###############################################################################
# Sparse Encoding
print("\nSparse Encoding")
train_X_pca = np.zeros((len(train_X), n_components))
train_X_pca = sparse_encode(train_X[0:10], components, alpha=10, algorithm='omp')
np.set_printoptions(precision=3, suppress=True)
print train_X_pca
# for i in range(len(train_X)):
#     train_X_pca[i] = dl.transform(train_X[i])

test_X_pca = np.zeros((len(test_X), n_components))
test_X_pca = sparse_encode(test_X[0:10], components, alpha=10, algorithm='omp')
# for i in range(len(test_X)):
#     test_X_pca[i] = dl.transform(test_X[i])

print "train_X_pca.shape", train_X_pca.shape

###############################################################################
# Visualize reconstructed images
reconstructed_X = np.zeros((20, n_features))
开发者ID:JonnyTran,项目名称:ML-algorithms,代码行数:31,代码来源:sc-cifar-nonpatch.py


示例18: sparse_encode

import numpy as np
from sklearn.decomposition import sparse_encode
HS = sparse_encode( np.random.randn(108,1600), np.random.randn(108,5000), alpha = 1./5000., algorithm='lasso_lars').T

开发者ID:cc13ny,项目名称:galatea,代码行数:3,代码来源:bug.py


示例19: bow_feature_extract

	def bow_feature_extract(self, path):
		des = self.raw_feature_extract(path)
		out = sum(sparse_encode(des, self.mbdl.components_))
		out = np.array([out])
		return out
开发者ID:andreydung,项目名称:bagofwords,代码行数:5,代码来源:bow.py


示例20: len

    trajectory['x'] = []
    trajectory['y'] = []

plt.show()


alpha_schedule = [ .2/5000., .5/5000., 1./5000., 2./5000., 5./5000. ]


assert num_trajectories == len(trajectories)

for j, alpha in enumerate(alpha_schedule):
    print 'j = ',j,'; alpha = ',alpha
    from sklearn.decomposition import sparse_encode
    print 'running SC ',j
    HS = sparse_encode( model.W.get_value(), X.T, alpha = alpha, algorithm='lasso_cd').T
    assert HS.shape == (5000,1600)
    print 'done encoding'

    HS = np.abs(HS)


    if np.any(np.isnan(HS)):
        print 'has nans'

    if np.any(np.isinf(HS)):
        print 'has infs'

    print 'HS shape ',HS.shape
    print 'HS subtensor shape ',HS[0:num_trajectories].shape
    act_prob = (HS[:,0:num_trajectories] > .01).mean(axis=0)
开发者ID:cc13ny,项目名称:galatea,代码行数:31,代码来源:s3c_sparsity_scale_trajectories.py



注:本文中的sklearn.decomposition.sparse_encode函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python decomposition.DictionaryLearning类代码示例发布时间:2022-05-27
下一篇:
Python decomposition.sklearnPCA函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap