• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python decomposition.IncrementalPCA类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.decomposition.IncrementalPCA的典型用法代码示例。如果您正苦于以下问题:Python IncrementalPCA类的具体用法?Python IncrementalPCA怎么用?Python IncrementalPCA使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了IncrementalPCA类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: reduceDataset

 def reduceDataset(self,nr=3,method='PCA'):
     '''It reduces the dimensionality of a given dataset using different techniques provided by Sklearn library
      Methods available:
                         'PCA'
                         'FactorAnalysis'
                         'KPCArbf','KPCApoly'
                         'KPCAcosine','KPCAsigmoid'
                         'IPCA'
                         'FastICADeflation'
                         'FastICAParallel'
                         'Isomap'
                         'LLE'
                         'LLEmodified'
                         'LLEltsa'
     '''
     dataset=self.ModelInputs['Dataset']
     #dataset=self.dataset[Model.in_columns]
     #dataset=self.dataset[['Humidity','TemperatureF','Sea Level PressureIn','PrecipitationIn','Dew PointF','Value']]
     #PCA
     if method=='PCA':
         sklearn_pca = sklearnPCA(n_components=nr)
         reduced = sklearn_pca.fit_transform(dataset)
     #Factor Analysis
     elif method=='FactorAnalysis':
         fa=FactorAnalysis(n_components=nr)
         reduced=fa.fit_transform(dataset)
     #kernel pca with rbf kernel
     elif method=='KPCArbf':
         kpca=KernelPCA(nr,kernel='rbf')
         reduced=kpca.fit_transform(dataset)
     #kernel pca with poly kernel
     elif method=='KPCApoly':
         kpca=KernelPCA(nr,kernel='poly')
         reduced=kpca.fit_transform(dataset)
     #kernel pca with cosine kernel
     elif method=='KPCAcosine':
         kpca=KernelPCA(nr,kernel='cosine')
         reduced=kpca.fit_transform(dataset)
     #kernel pca with sigmoid kernel
     elif method=='KPCAsigmoid':
         kpca=KernelPCA(nr,kernel='sigmoid')
         reduced=kpca.fit_transform(dataset)
     #ICA
     elif method=='IPCA':
         ipca=IncrementalPCA(nr)
         reduced=ipca.fit_transform(dataset)
     #Fast ICA
     elif method=='FastICAParallel':
         fip=FastICA(nr,algorithm='parallel')
         reduced=fip.fit_transform(dataset)
     elif method=='FastICADeflation':
         fid=FastICA(nr,algorithm='deflation')
         reduced=fid.fit_transform(dataset)
     elif method == 'All':
         self.dimensionalityReduction(nr=nr)
         return self
     
     self.ModelInputs.update({method:reduced})
     self.datasetsAvailable.append(method)
     return self
开发者ID:UIUC-SULLIVAN,项目名称:ThesisProject_Andrea_Mattera,代码行数:60,代码来源:Classes.py


示例2: ipca

def ipca(mov, components = 50, batch =1000):
    # vectorize the images
    num_frames, h, w = mov.shape
    frame_size = h * w
    frame_samples = np.reshape(mov, (num_frames, frame_size)).T
    
    # run IPCA to approxiate the SVD
    
    ipca_f = IncrementalPCA(n_components=components, batch_size=batch)
    ipca_f.fit(frame_samples)
    
    # construct the reduced version of the movie vectors using only the 
    # principal component projection
    
    proj_frame_vectors = ipca_f.inverse_transform(ipca_f.transform(frame_samples))
        
    # get the temporal principal components (pixel time series) and 
    # associated singular values
    
    eigenseries = ipca_f.components_.T

    # the rows of eigenseries are approximately orthogonal
    # so we can approximately obtain eigenframes by multiplying the 
    # projected frame matrix by this transpose on the right
    
    eigenframes = np.dot(proj_frame_vectors, eigenseries)

    return eigenseries, eigenframes, proj_frame_vectors        
开发者ID:agiovann,项目名称:pyfluo,代码行数:28,代码来源:segmentation.py


示例3: get_pca_array

def get_pca_array(list_chunks, topology):
    """
    Takes a list of mdtraj.Trajectory objects and featurize them to backbone -
    Alpha Carbons pairwise distances. Perform 2 component Incremental
    PCA on the featurized trajectory.

    Parameters
    ----------
    list_chunks: list of mdTraj.Trajectory objects
    topology: str
            Name of the Topology file

    Returns
    -------
    Y: np.array shape(frames, features)

    """
    pca = IncrementalPCA(n_components=2)
    top = md.load_prmtop(topology)
    ca_backbone = top.select("name CA")
    pairs = top.select_pairs(ca_backbone, ca_backbone)
    pair_distances = []
    for chunk in list_chunks:
        X = md.compute_distances(chunk, pairs)
        pair_distances.append(X)
    distance_array = np.concatenate(pair_distances)
    print("No. of data points: %d" % distance_array.shape[0])
    print("No. of features (pairwise distances): %d" % distance_array.shape[1])
    Y = pca.fit_transform(distance_array)
    return Y
开发者ID:jeiros,项目名称:Scripts,代码行数:30,代码来源:pca_analysis.py


示例4: ipca

def ipca():
	train_features, test_features = gf.get_tfidf()
	vectorizer = gf.get_tfidf()
	n_components = 250
	ipca = IncrementalPCA(n_components=n_components, batch_size=1250)
	start_time = time.time()
	print 'start ipca on train'
	X_ipca = ipca.fit_transform(train_features)
	runtime = time.time() - start_time
	print '-----'
	print '%.2f seconds to ipca on train' % runtime
	print '-----'
	train_features = None
	
	print 'ipca train done'
	np.savetxt('train_features.csv', X_ipca, fmt='%.8e', delimiter=",")
	X_ipca = None
	print 'ipca train file done'
	test_features = gf.get_tfidf(vectorizer, False)
	Y_ipca = ipca.fit_transform(test_features)
	test_features, vectorizer = None, None
	print 'ipca test done'
	np.savetxt('test_features.csv', Y_ipca, fmt='%.8e', delimiter=",")
	svd_test_features = None
	print 'ipca test file done'
开发者ID:SaarthakKhanna2104,项目名称:Home-Depot-Product-Search-Relevance,代码行数:25,代码来源:IPCA.py


示例5: dimensionalityReduction

 def dimensionalityReduction(self,nr=5):
     '''It applies all the dimensionality reduction techniques available in this class:
     Techniques available:
                         'PCA'
                         'FactorAnalysis'
                         'KPCArbf','KPCApoly'
                         'KPCAcosine','KPCAsigmoid'
                         'IPCA'
                         'FastICADeflation'
                         'FastICAParallel'
                         'Isomap'
                         'LLE'
                         'LLEmodified'
                         'LLEltsa'
     '''
     dataset=self.ModelInputs['Dataset']
     sklearn_pca = sklearnPCA(n_components=nr)
     p_components = sklearn_pca.fit_transform(dataset)
     fa=FactorAnalysis(n_components=nr)
     factors=fa.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='rbf')
     rbf=kpca.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='poly')
     poly=kpca.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='cosine')
     cosine=kpca.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='sigmoid')
     sigmoid=kpca.fit_transform(dataset)
     ipca=IncrementalPCA(nr)
     i_components=ipca.fit_transform(dataset)
     fip=FastICA(nr,algorithm='parallel')
     fid=FastICA(nr,algorithm='deflation')
     ficaD=fip.fit_transform(dataset)
     ficaP=fid.fit_transform(dataset)
     '''isomap=Isomap(n_components=nr).fit_transform(dataset)
     try:
         lle1=LocallyLinearEmbedding(n_components=nr).fit_transform(dataset)
     except ValueError:
         lle1=LocallyLinearEmbedding(n_components=nr,eigen_solver='dense').fit_transform(dataset)
     try:
         
         lle2=LocallyLinearEmbedding(n_components=nr,method='modified').fit_transform(dataset)
     except ValueError:
         lle2=LocallyLinearEmbedding(n_components=nr,method='modified',eigen_solver='dense').fit_transform(dataset) 
     try:
         lle3=LocallyLinearEmbedding(n_components=nr,method='ltsa').fit_transform(dataset)
     except ValueError:
         lle3=LocallyLinearEmbedding(n_components=nr,method='ltsa',eigen_solver='dense').fit_transform(dataset)'''
     values=[p_components,factors,rbf,poly,cosine,sigmoid,i_components,ficaD,ficaP]#,isomap,lle1,lle2,lle3]
     keys=['PCA','FactorAnalysis','KPCArbf','KPCApoly','KPCAcosine','KPCAsigmoid','IPCA','FastICADeflation','FastICAParallel']#,'Isomap','LLE','LLEmodified','LLEltsa']
     self.ModelInputs.update(dict(zip(keys, values)))
     [self.datasetsAvailable.append(key) for key in keys ]
     
     #debug
     #dataset=pd.DataFrame(self.ModelInputs['Dataset'])
     #dataset['Output']=self.ModelOutput
     #self.debug['Dimensionalityreduction']=dataset
     ###
     return self
开发者ID:UIUC-SULLIVAN,项目名称:ThesisProject_Andrea_Mattera,代码行数:59,代码来源:Classes.py


示例6: get_pca

def get_pca(file_dir, s, t, i):
    from sklearn.decomposition import IncrementalPCA

    ipca = IncrementalPCA(n_components=48)
    for counter in range(s, t, i):
        features_file = np.load(file_dir + "/pca" + str(counter) + "_code.npy")
        ipca.partial_fit(features_file[:, 0:4096])
    return ipca
开发者ID:GMNetto,项目名称:CS2951t_Project,代码行数:8,代码来源:perform_analysis_pca.py


示例7: test_incremental_pca_num_features_change

def test_incremental_pca_num_features_change():
    """Test that changing n_components will raise an error."""
    rng = np.random.RandomState(1999)
    n_samples = 100
    X = rng.randn(n_samples, 20)
    X2 = rng.randn(n_samples, 50)
    ipca = IncrementalPCA(n_components=None)
    ipca.fit(X)
    assert_raises(ValueError, ipca.partial_fit, X2)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:9,代码来源:test_incremental_pca.py


示例8: create_pool_pca_from_files

def create_pool_pca_from_files(file_dir, dir_output, s, t, i):
    from sklearn.decomposition import IncrementalPCA
    ipca = IncrementalPCA(n_components=number_dim_pca)
    for counter in range(s, t, i):
        features_file = np.load(file_dir + '/pca' + str(counter) + '_code.npy')
	ipca.partial_fit(features_file[:, 0:4096])
    for counter in range(s, t, i):
        out_file = dir_output + 'pca_red_' + str(counter) + '_code.npy'
	features_file = np.load(file_dir + '/pca' + str(counter) + '_code.npy') 
	features_red = ipca.transform(features_file[:, 0:4096])
	np.save(out_file, np.append(features_red, features_file[:, 4096:], axis=1))
开发者ID:GMNetto,项目名称:CS2951t_Project,代码行数:11,代码来源:build_analysis.py


示例9: ipca

def ipca(data, labels, new_dimension):
    print "start incremental pca..."

    if hasattr(data, "todense"):
        data = np.array(data.todense())

    start = time.time()
    pca = IncrementalPCA(n_components=new_dimension)
    reduced = pca.fit_transform(data)
    end = time.time()
    return (reduced, end-start)
开发者ID:sebastian-alfers,项目名称:master-thesis,代码行数:11,代码来源:dimensionality_reduction.py


示例10: PCA_Train

def PCA_Train(data, result_fold, n_components=128):
    print_info("PCA training (n_components=%d)..." % n_components)

    pca = IncrementalPCA(n_components=n_components)
    pca.fit(data)

    joblib.dump(pca, result_fold + "pca_model.m")

    print_info("PCA done.")

    return pca
开发者ID:anguoyang,项目名称:face_verification_demo,代码行数:11,代码来源:joint_bayesian.py


示例11: train_pca

def train_pca(file_dir, s, t, i):
    from sklearn.decomposition import IncrementalPCA
    global timer_pca
    timer_pca = Timer()	
    timer_pca.tic()
    ipca = IncrementalPCA(n_components=pca_dimensions)
    for counter in range(s, t, i):
        features_file = np.load(file_dir + '/pca' + str(counter) + '_code.npy')
	ipca.partial_fit(features_file[:, 0:4096])
	timer_pca.toc()
    return ipca
开发者ID:GMNetto,项目名称:CS2951t_Project,代码行数:11,代码来源:perform_analysis.py


示例12: train_pca_model

def train_pca_model(collection_name, feature_name, n_components, iterations=100, batch_size=20):
    collection = collection_from_name(collection_name)
    model = IncrementalPCA(n_components=n_components)

    partial_unpickle_data = partial(unpickle_data, feature_name=feature_name)

    for _ in range(iterations):
        feature = map(partial_unpickle_data, collection.aggregate([{'$sample': {'size': batch_size}}]))
        feature = np.hstack(feature).T

        model.partial_fit(feature)

    return model
开发者ID:arturmiller,项目名称:MachineLearning,代码行数:13,代码来源:incremental_function_trainer.py


示例13: test_incremental_pca_inverse

def test_incremental_pca_inverse():
    """Test that the projection of data can be inverted."""
    rng = np.random.RandomState(1999)
    n, p = 50, 3
    X = rng.randn(n, p)  # spherical data
    X[:, 1] *= .00001  # make middle component relatively small
    X += [5, 4, 3]  # make a large mean

    # same check that we can find the original data from the transformed
    # signal (since the data is almost of rank n_components)
    ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
    Y = ipca.transform(X)
    Y_inverse = ipca.inverse_transform(Y)
    assert_almost_equal(X, Y_inverse, decimal=3)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:14,代码来源:test_incremental_pca.py


示例14: test_singular_values

def test_singular_values():
    # Check that the IncrementalPCA output has the correct singular values

    rng = np.random.RandomState(0)
    n_samples = 1000
    n_features = 100

    X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
                                      effective_rank=10, random_state=rng)

    pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
    ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
    assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)

    # Compare to the Frobenius norm
    X_pca = pca.transform(X)
    X_ipca = ipca.transform(X)
    assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
                              np.linalg.norm(X_pca, "fro")**2.0, 12)
    assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
                              np.linalg.norm(X_ipca, "fro")**2.0, 2)

    # Compare to the 2-norms of the score vectors
    assert_array_almost_equal(pca.singular_values_,
                              np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
    assert_array_almost_equal(ipca.singular_values_,
                              np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)

    # Set the singular values and see what we get back
    rng = np.random.RandomState(0)
    n_samples = 100
    n_features = 110

    X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
                                      effective_rank=3, random_state=rng)

    pca = PCA(n_components=3, svd_solver='full', random_state=rng)
    ipca = IncrementalPCA(n_components=3, batch_size=100)

    X_pca = pca.fit_transform(X)
    X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
    X_pca[:, 0] *= 3.142
    X_pca[:, 1] *= 2.718

    X_hat = np.dot(X_pca, pca.components_)
    pca.fit(X_hat)
    ipca.fit(X_hat)
    assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
    assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:49,代码来源:test_incremental_pca.py


示例15: generate_pca_compression

def generate_pca_compression(X, n_components=16, batch_size=100):
    """
    Compresses the data using sklearn PCA implementation.

    :param X: Data (n_samples, n_features)
    :param n_components: Number of dimensions for PCA to keep
    :param batch_size: Batch size for incrimental PCA

    :return: X_prime (the compressed representation), pca
    """

    pca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
    pca.fit(X)

    return pca.transform(X), pca
开发者ID:TieSKey,项目名称:database_dcnn,代码行数:15,代码来源:compression.py


示例16: test_n_components_none

def test_n_components_none():
    # Ensures that n_components == None is handled correctly
    rng = np.random.RandomState(1999)
    for n_samples, n_features in [(50, 10), (10, 50)]:
        X = rng.rand(n_samples, n_features)
        ipca = IncrementalPCA(n_components=None)

        # First partial_fit call, ipca.n_components_ is inferred from
        # min(X.shape)
        ipca.partial_fit(X)
        assert ipca.n_components_ == min(X.shape)

        # Second partial_fit call, ipca.n_components_ is inferred from
        # ipca.components_ computed from the first partial_fit call
        ipca.partial_fit(X)
        assert ipca.n_components_ == ipca.components_.shape[0]
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:16,代码来源:test_incremental_pca.py


示例17: __init__

 def __init__(self, components):
   PCAnalyzer.__init__(self)
   if isinstance(components, int):
     self.n_components = components
   self.pca = IncrementalPCA(n_components=components, batch_size=500)
   self.num_seen = 0
   self.type = 'incremental'
开发者ID:DaMSL,项目名称:ddc,代码行数:7,代码来源:pca.py


示例18: __init__

class MyPCA:

	def __init__(self, filename=None):
		if not filename:
			self.model = IncrementalPCA(NUM_COMP)
		else:
			with open(filename, 'r') as f:
				self.model = pickle.load(f)

	def train(self, X):
		self.model.partial_fit(X)

	def transform(self, X):
		return self.model.transform(X)	

	def dump(self, filename):
		with open(filename, 'w') as f:
			pickle.dump(self.model, f)
开发者ID:avg14,项目名称:galaxyzoo,代码行数:18,代码来源:mypca.py


示例19: test_incremental_pca_partial_fit

def test_incremental_pca_partial_fit():
    """Test that fit and partial_fit get equivalent results."""
    rng = np.random.RandomState(1999)
    n, p = 50, 3
    X = rng.randn(n, p)  # spherical data
    X[:, 1] *= .00001  # make middle component relatively small
    X += [5, 4, 3]  # make a large mean

    # same check that we can find the original data from the transformed
    # signal (since the data is almost of rank n_components)
    batch_size = 10
    ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
    pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
    # Add one to make sure endpoint is included
    batch_itr = np.arange(0, n + 1, batch_size)
    for i, j in zip(batch_itr[:-1], batch_itr[1:]):
        pipca.partial_fit(X[i:j, :])
    assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:18,代码来源:test_incremental_pca.py


示例20: test_whitening

def test_whitening():
    """Test that PCA and IncrementalPCA transforms match to sign flip."""
    X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
                                      effective_rank=2, random_state=1999)
    prec = 3
    n_samples, n_features = X.shape
    for nc in [None, 9]:
        pca = PCA(whiten=True, n_components=nc).fit(X)
        ipca = IncrementalPCA(whiten=True, n_components=nc,
                              batch_size=250).fit(X)

        Xt_pca = pca.transform(X)
        Xt_ipca = ipca.transform(X)
        assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
        Xinv_ipca = ipca.inverse_transform(Xt_ipca)
        Xinv_pca = pca.inverse_transform(Xt_pca)
        assert_almost_equal(X, Xinv_ipca, decimal=prec)
        assert_almost_equal(X, Xinv_pca, decimal=prec)
        assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:19,代码来源:test_incremental_pca.py



注:本文中的sklearn.decomposition.IncrementalPCA类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python decomposition.KernelPCA类代码示例发布时间:2022-05-27
下一篇:
Python decomposition.FastICA类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap