• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python cluster.SpectralClustering类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.cluster.SpectralClustering的典型用法代码示例。如果您正苦于以下问题:Python SpectralClustering类的具体用法?Python SpectralClustering怎么用?Python SpectralClustering使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了SpectralClustering类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: spectral_clustering

def spectral_clustering(matrix, N):
    spectral = SpectralClustering(n_clusters=N)
    clusters = spectral.fit_predict(matrix)
    res = [[] for _ in range(N)]
    for i, c in enumerate(clusters):
        res[c].append(i)
    return res
开发者ID:seba-1511,项目名称:specialists,代码行数:7,代码来源:specialist.py


示例2: fast_app_spe_cluster

def fast_app_spe_cluster(data, label, k, n_cluster):
    #k-means get the representative points(centers points)
    start_time = time.clock()
    k_means = KMeans(n_clusters=k)
    k_means.fit(data)
    y_centers = k_means.cluster_centers_
    # get the correspondence table
    x_to_centers_table = list()
    m = len(data)
    for i in range(m):
        min_distance = np.inf
        min_index = None
        for j in range(k):
            i_j_dis = np.sum((data[i, :] - y_centers[j, :]) ** 2)
            if min_distance > i_j_dis:
                min_index = j
                min_distance = i_j_dis
        x_to_centers_table.append(min_index)
    # spectral cluster
    spe_cluster = SpectralClustering(n_clusters=n_cluster)
    spe_cluster.fit(y_centers)
    spe_label = spe_cluster.labels_
    # get m-way cluster membership
    x_label = list()
    for i in range(m):
        x_label.append(spe_label[x_to_centers_table[i]])
    spend_time = time.clock() - start_time
    print("spend time is %f seconds" % spend_time)
    return x_label
开发者ID:Yayong-guan,项目名称:mlcode,代码行数:29,代码来源:fast_appromate_spe_cluster.py


示例3: create_word2vec_cluster

def create_word2vec_cluster(word2vec_model):
    word_vectors = word2vec_model.syn0
    num_clusters = word_vectors.shape[0] / 1000
    spectral_cluster_model = SpectralClustering(n_clusters=num_clusters)
    idx = spectral_cluster_model.fit_predict(word_vectors)
    pickle.dump(spectral_cluster_model, open(r"C:\Ofir\Tau\Machine Learning\Project\project\k_means_model.pkl", "wb"))
    return spectral_cluster_model
开发者ID:ogreenz,项目名称:Sentiment_Analysis_Movie_Reviews,代码行数:7,代码来源:clean_util.py


示例4: compute_centroid_set

    def compute_centroid_set(self, **kwargs):

        INPUT_ITR = subset_iterator(X=self.docv, m=self.subcluster_m, repeats=self.subcluster_repeats)

        kn = self.subcluster_kn
        clf = SpectralClustering(n_clusters=kn, affinity="precomputed")

        C = []

        for X in INPUT_ITR:
            # Remove any rows that have zero vectors
            bad_row_idx = (X ** 2).sum(axis=1) == 0
            X = X[~bad_row_idx]
            A = cosine_affinity(X)

            labels = clf.fit_predict(A)

            # Compute the centroids
            (N, dim) = X.shape
            centroids = np.zeros((kn, dim))

            for i in range(kn):
                idx = labels == i
                mu = X[idx].mean(axis=0)
                mu /= np.linalg.norm(mu)
                centroids[i] = mu

            C.append(centroids)

        return np.vstack(C)
开发者ID:NIHOPA,项目名称:pipeline_word2vec,代码行数:30,代码来源:metacluster.py


示例5: call_spectral

def call_spectral(num_cluster ,mode_, data, update_flag):
    X = StandardScaler().fit_transform(data)
    spectral = SpectralClustering(n_clusters=num_cluster, eigen_solver='arpack', 
                                                        affinity='precomputed')
    connectivity = kneighbors_graph(X, n_neighbors=10)
    connectivity = 0.5 * (connectivity + connectivity.T)
    spectral.fit(connectivity)
    labels = spectral.labels_

    if update_flag:
        return labels


    label_dict = {}
    label_dict_count = 0
    for label in labels:
       label_dict[str(label_dict_count)] = float(label)
       label_dict_count = label_dict_count + 1
    print label_dict

    unique_dict = {}
    unique_dict_count = 0
    for uniq in np.unique(labels):
       print uniq
       unique_dict[str(unique_dict_count)] = float(uniq)
       unique_dict_count = unique_dict_count + 1
    print unique_dict

    return label_dict, unique_dict
开发者ID:benaneesh,项目名称:cluster,代码行数:29,代码来源:algorithm_manager.py


示例6: scikit_pca

def scikit_pca(model, rel_wds, plot_lims, title, cluster="kmeans"):
    """
    Given a word2vec model and a cluster (choice of "kmeans" or "spectral")
    Make a plot of all word-vectors in the model.
    """
    X, keys = make_data_matrix(model)

    for i, key in enumerate(keys):
        X[i,] = model[key]

    if cluster == "kmeans":
        k_means = KMeans(n_clusters=8)
        labels = k_means.fit_predict(X)

    elif cluster == "spectral":
        sp_clust = SpectralClustering()
        labels = sp_clust.fit_predict(X)

    # PCA
    X_std = StandardScaler().fit_transform(X)
    sklearn_pca = PCA(n_components=2)
    X_transf = sklearn_pca.fit_transform(X_std)

    scatter_plot(X_transf[:,0], X_transf[:,1],  rel_wds, labels, title, keys, plot_lims)

    return sklearn_pca.explained_variance_ratio_
开发者ID:quinngroup,项目名称:sm_w2v,代码行数:26,代码来源:plot_utils.py


示例7: spectral_clustering

def spectral_clustering(G, graph_name, num_clusters):
    #Find a way to figure out clusters number automatically
    subgraphs = []
    write_directory = os.path.join(Constants.SPECTRAL_PATH,graph_name)
    if not os.path.exists(write_directory):
        os.makedirs(write_directory)
    nodeList = G.nodes()
    matrix_data = nx.to_numpy_matrix(G, nodelist = nodeList)
    spectral = SpectralClustering(n_clusters=2,
                                          eigen_solver='arpack',
                                          affinity="rbf")   
    spectral.fit(matrix_data)
    label = spectral.labels_
    clusters = {}
    
    for nodeIndex, nodeLabel in enumerate(label):
        if nodeLabel not in clusters:
            clusters[nodeLabel] = []
        clusters[nodeLabel].append(nodeList[nodeIndex])
        
    #countNodes is used to test whether we have all the nodes in the clusters 
   
    for clusterIndex, subGraphNodes in enumerate(clusters.keys()):
        subgraph = G.subgraph(clusters[subGraphNodes])
        subgraphs.append(subgraph)
        nx.write_gexf(subgraph, os.path.join(write_directory,graph_name+str(clusterIndex)+"_I"+Constants.GEXF_FORMAT))
        #countNodes = countNodes + len(clusters[subGraphNodes])
    return subgraphs
开发者ID:subincm,项目名称:hierarchical_nw_align,代码行数:28,代码来源:spectral_clustering.py


示例8: main

def main(cm_file, perm_file, steps, labels_file, limit_classes=None):
    """Run optimization and generate output."""
    # Load confusion matrix
    with open(cm_file) as f:
        cm = json.load(f)
        cm = np.array(cm)

    # Load labels
    if os.path.isfile(labels_file):
        with open(labels_file, "r") as f:
            labels = json.load(f)
    else:
        labels = list(range(len(cm)))

    n_clusters = 14  # hyperparameter
    spectral = SpectralClustering(n_clusters=n_clusters,
                                  eigen_solver='arpack',
                                  affinity="nearest_neighbors")
    spectral.fit(cm)
    if hasattr(spectral, 'labels_'):
        y_pred = spectral.labels_.astype(np.int)
    else:
        y_pred = spectral.predict(cm)
    sscore = silhouette_score(cm, y_pred)
    print("silhouette_score={} with {} clusters"
          .format(sscore, n_clusters))
    grouping = [[] for _ in range(n_clusters)]
    for label, y in zip(labels, y_pred):
        grouping[y].append(label)
    for group in grouping:
        print("  {}: {}".format(len(group), group))
开发者ID:directorscut82,项目名称:msthesis-experiments,代码行数:31,代码来源:spektral_clust.py


示例9: spectral_clustering

def spectral_clustering(k, X, G, W=None, run_times=5):
    if type(W) == type(None):
        W = np.eye(len(X))
    W2 = np.sqrt(W)
    Gtilde = W2.dot(G.dot(W2))
    sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
    zh = sc.fit_predict(Gtilde)
    return zh
开发者ID:neurodata,项目名称:non-parametric-clustering,代码行数:8,代码来源:wrapper.py


示例10: spectral_clustering2

def spectral_clustering2(similarity, concepts=2, euclid=False):
    if euclid:
        model = SpectralClustering(n_clusters=concepts, affinity='nearest_neighbors')
        return model.fit_predict(similarity)
    else:
        model = SpectralClustering(n_clusters=concepts, affinity='precomputed')
        similarity[similarity < 0] = 0
        return model.fit_predict(similarity)
开发者ID:thran,项目名称:experiments2.0,代码行数:8,代码来源:clusterings.py


示例11: run

 def run(self, features, number_of_clusters=2, restarts=10, delta=3.0):
     if number_of_clusters == 1:
         result = numpy.zeros(len(features), dtype=numpy.int32)
         return [result]
     classifier = SpectralClustering(k=number_of_clusters, n_init=restarts)
     similarity = get_similarity(features, delta)
     classifier.fit(similarity)
     return [classifier.labels_]
开发者ID:jspobst,项目名称:spikepy,代码行数:8,代码来源:clustering_spectral_sklearn.py


示例12: get_coregulatory_states

def get_coregulatory_states(corr_matrices, similarity_matrix, n_clusters):
    spectral = SpectralClustering(n_clusters=n_clusters, affinity='precomputed')
    labels = spectral.fit_predict(similarity_matrix)

    coreg_states = {}
    for ci in np.unique(labels):
        coreg_states[ci] = corr_matrices[labels == ci, :, :].mean(axis=0)
    return coreg_states, labels
开发者ID:dimenwarper,项目名称:scimitar,代码行数:8,代码来源:coexpression.py


示例13: dist_spectral

def dist_spectral(x, y):

    plot = []
    for s in range(dataset.shape[0]):
        plot.append(np.array([x[s], y[s]]))
    plot = np.array(plot)
    spectral = SpectralClustering(n_clusters=3, eigen_solver='arpack', affinity="nearest_neighbors")
    clusters = spectral.fit_predict(plot)
    return clusters
开发者ID:cguls,项目名称:DataScienceFinal,代码行数:9,代码来源:final.py


示例14: spectral

def spectral(k, X, G, run_times=10):
    """Spectral clustering from sklearn library. 
    run_times is the number of times the algorithm is gonna run with different
    initializations.
    
    """
    sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
    zh = sc.fit_predict(G)
    return zh
开发者ID:neurodata,项目名称:non-parametric-clustering,代码行数:9,代码来源:run_clustering.py


示例15: spectral_clustering

def spectral_clustering(S,X,config):
    '''
    Computes spectral clustering from an input similarity matrix.
    Returns the labels associated with the clustering.
    '''
    from sklearn.cluster import SpectralClustering

    nk = int(config["n_clusters"])
    clf = SpectralClustering(affinity='cosine',n_clusters=nk)
    return clf.fit_predict(X)
开发者ID:NIHOPA,项目名称:pipeline_word2vec,代码行数:10,代码来源:similarity.py


示例16: test_affinities

def test_affinities():
    X, y = make_blobs(n_samples=40, random_state=1, centers=[[1, 1], [-1, -1]], cluster_std=0.4)
    # nearest neighbors affinity
    sp = SpectralClustering(n_clusters=2, affinity="nearest_neighbors", random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)

    sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)
开发者ID:GbalsaC,项目名称:bitnamiP,代码行数:10,代码来源:test_spectral.py


示例17: run

	def run(self, k):
		if self.data_is_kernel:
			clf = SpectralClustering(n_clusters=k, gamma=self.gammav, affinity='precomputed')	
			self.allocation = clf.fit_predict(self.X)
			self.kernel = self.X
		else:
			clf = SpectralClustering(n_clusters=k, gamma=self.gammav)		#, affinity='precomputed'
			self.allocation = clf.fit_predict(self.X)
			self.kernel = clf.affinity_matrix_
	
		return self.allocation
开发者ID:juliaprocess,项目名称:chieh_libs,代码行数:11,代码来源:spectral_clustering.py


示例18: spectral

def spectral(k, X, G, z, run_times=10):
    """Spectral clustering from sklearn library. 
    run_times is the number of times the algorithm is gonna run with different
    initializations.
    
    """
    sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
    zh = sc.fit_predict(G)
    a = metric.accuracy(z, zh)
    v = metric.variation_information(z, zh)
    return a, v
开发者ID:neurodata,项目名称:non-parametric-clustering,代码行数:11,代码来源:run_clustering_bk.py


示例19: cluster_faces_CNN

def cluster_faces_CNN(name = '[email protected]', img_list = 'faces_list.txt'):
    root = '/Users/wangyufei/Documents/Study/intern_adobe/face_recognition_CNN/'+name + '/'
    f = open(root + model_name + 'similarity_matrix.cPickle','r')
    affinity_matrix = cPickle.load(f)
    f.close()

    f = SpectralClustering(affinity='precomputed', n_clusters=min(8, affinity_matrix.shape[0] - 1), eigen_solver = 'arpack', n_neighbors=min(5, affinity_matrix.shape[0]))
    a = f.fit_predict(affinity_matrix)

    groups = {}
    temp = zip(a, xrange(len(a)))
    for i in temp:
        if i[0] not in groups:
            groups[i[0]] = [i[1]]
        else:
            groups[i[0]].append(i[1])
    unique_person_id = []
    for kk in groups:
        min_similarity = np.Inf
        max_similarity = -np.Inf
        mean_similarity = 0
        this_group_ids = groups[kk]
        for j in xrange(len(this_group_ids)):
            for i in xrange(j+1, len(this_group_ids)):
                temp = affinity_matrix[this_group_ids[i],this_group_ids[j]]
                if temp < min_similarity:
                    min_similarity = temp
                if temp > max_similarity:
                    max_similarity = temp
                mean_similarity += temp
        mean_similarity /= max(1, len(this_group_ids)*(len(this_group_ids) - 1) / 2)
        print len(this_group_ids), mean_similarity, max_similarity, min_similarity
        if mean_similarity > 0.5:
            unique_person_id.append(kk)
    important_person = []
    for i in unique_person_id:
        important_person.append([i, len(groups[i])])
    important_person.sort(key = lambda x:x[1], reverse=True)
    in_path = root + img_list
    imgs_list = []
    with open(in_path, 'r') as data:
        for line in data:
            line = line[:-1]
            imgs_list.append(line.split('/')[-1])

    temp = zip(a, imgs_list)
    face_groups = {}
    for i in temp:
        if i[0] not in face_groups:
            face_groups[i[0]] = [i[1]]
        else:
            face_groups[i[0]].append(i[1])

    create_face_group_html_CNN(name, face_groups, important_person)
开发者ID:feiyu1990,项目名称:intern,代码行数:54,代码来源:face_recognition_CNN.py


示例20: spectral_clustering

def spectral_clustering(crime_rows, column_names, num_clusters, affinity='rbf', n_neighbors=0,
        assign_labels='kmeans'):
    """
        n_clusters : integer, optional
            The dimension of the projection subspace.
        affinity : string, array-like or callable, default ‘rbf’
            If a string, this may be one of ‘nearest_neighbors’, ‘precomputed’, ‘rbf’ 
            or one of the kernels supported by sklearn.metrics.pairwise_kernels.
            Only kernels that produce similarity scores 
                (non-negative values that increase with similarity) should be used. 
                This property is not checked by the clustering algorithm.
        gamma : float
            Scaling factor of RBF, polynomial, exponential chi^2 and sigmoid affinity kernel. 
            Ignored for affinity='nearest_neighbors'.
        degree : float, default=3
            Degree of the polynomial kernel. Ignored by other kernels.
        coef0 : float, default=1
            Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels.
        n_neighbors : integer
            Number of neighbors to use when constructing the affinity matrix 
            using the nearest neighbors method. Ignored for affinity='rbf'.
        n_init : int, optional, default: 10
            Number of time the k-means algorithm will be run with different 
                centroid seeds. 
            The final results will be the best output of n_init consecutive runs in 
                terms of inertia.
        assign_labels : {‘kmeans’, ‘discretize’}, default: ‘kmeans’
            The strategy to use to assign labels in the embedding space. 
            There are two ways to assign labels after the laplacian embedding. 
            k-means can be applied and is a popular choice. 
            But it can also be sensitive to initialization. 
            Discretization is another approach which is less sensitive to 
            random initialization.
        kernel_params : dictionary of string to any, optional
            Parameters (keyword arguments) and values for kernel passed 
                as callable object. Ignored by other kernels.
    """
    crime_xy = [crime[0:2] for crime in crime_rows]
    crime_info = [crime[2:] for crime in crime_rows]
    #crime_xy = [crime[1:] for crime in crime_rows]
    spectral_clustering = SpectralClustering(
            n_clusters=num_clusters, 
            affinity=affinity, 
            n_neighbors=n_neighbors, 
            assign_labels=assign_labels)
    print("Running spectral clustering....")
    print("length crimexy")
    print(len(crime_xy))
    spectral_clustering_labels = spectral_clustering.fit_predict(
            random_sampling(crime_xy, num_samples=3000))
    print("Formatting......")
    return _format_clustering(spectral_clustering_labels, crime_xy, crime_info,
            column_names, num_clusters=num_clusters)
开发者ID:egaebel,项目名称:crime-on-the-move-back-end--Python,代码行数:53,代码来源:clustering.py



注:本文中的sklearn.cluster.SpectralClustering类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python cluster.Ward类代码示例发布时间:2022-05-27
下一篇:
Python cluster.MiniBatchKMeans类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap