本文整理汇总了Python中sklearn.cluster.AffinityPropagation类的典型用法代码示例。如果您正苦于以下问题:Python AffinityPropagation类的具体用法?Python AffinityPropagation怎么用?Python AffinityPropagation使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AffinityPropagation类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: clustering
def clustering(self):
# Calculate similarity matrix
X = self.create_tfidf_vector()
X = X.toarray()
pca = PCA(n_components=300, copy=False)
X = pca.fit(X).transform(X)
S = cosine_similarity(X, X)
# Run affinity propogation
af = AffinityPropagation()
af.fit(S)
# Formulate result
tmp_clusters = defaultdict(list)
goal_clusters = defaultdict(list)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
count = 0
for label in labels:
tmp_clusters[\
self.goal_list[cluster_centers_indices[label]]].append(\
self.goal_list[count])
count += 1
# 2nd-layer clutering of each cluster
for goal, item_list in tmp_clusters.items():
subclusters = self.subcluster_by_editdistance(goal, item_list)
for subgoal, items in subclusters.items():
goal_clusters[subgoal] = items
return goal_clusters
开发者ID:a33kuo,项目名称:procedural_knowledge,代码行数:27,代码来源:goal_cluster.py
示例2: main
def main():
'''
>>> main() # stuff happens
'''
args = parse_args()
setup_logging(args.log, verbose=args.verbose)
chunks = sequence_chunk_generator(args.fasta_file,
chunk_size=args.chunk_size)
hasher = HashingVectorizer(analyzer='char',
n_features = 2 ** 18,
ngram_range=(args.ngram_min, args.ngram_max),
)
estimator = AffinityPropagation()
for chunk in chunks:
logging.info('hashing chunk')
chunk_vector = hasher.transform([ str(i.seq) for i in chunk ])
logging.info('clustering')
estimator.fit(chunk_vector)
logging.info('got %s clusters' % len(set(estimator.labels_)))
开发者ID:audy,项目名称:bfc,代码行数:28,代码来源:ooc.py
示例3: clusterSimilarityWithSklearnAPC
def clusterSimilarityWithSklearnAPC(data_file,damping=0.9,max_iter=200,convergence_iter=15,preference='min'):
"""
Compare Sparse Affinity Propagation (SAP) result with SKlearn Affinity Propagation (AP) Clustering result.
Please note that convergence condition for Sklearn AP is "no change in the number of estimated clusters",
for SAP the condition is "no change in the cluster assignment".
So SAP may take more iterations and the there will be slightly difference in final cluster assignment (exemplars for each sample).
"""
# loading data
simi_mat=loadMatrix(data_file)
simi_mat_dense=simi_mat.todense()
# get preference
if preference=='min':
preference=np.min(simi_mat_dense)
elif preference=='median':
preference=np.median(simi_mat_dense)
print('{0}, start SKlearn Affinity Propagation'.format(datetime.now()))
af=AffinityPropagation(damping=damping, preference=preference, affinity='precomputed',verbose=True)
af.fit(simi_mat_dense)
cluster_centers_indices,labels = af.cluster_centers_indices_,af.labels_
sk_exemplars=np.asarray([cluster_centers_indices[i] for i in labels])
print('{0}, start Fast Sparse Affinity Propagation Cluster'.format(datetime.now()))
sap=SAP(preference=preference,convergence_iter=convergence_iter,max_iter=max_iter,damping=damping,verboseIter=100)
sap_exemplars=sap.fit_predict(simi_mat_dense)
# Caculate similarity between sk_exemplars and sap_exemplars
exemplars_similarity=sparseAP_cy.arrSamePercent(np.array(sk_exemplars), np.array(sap_exemplars))
return exemplars_similarity
开发者ID:bioinfocao,项目名称:pysapc,代码行数:30,代码来源:test_sap.py
示例4: clusterAffinityPropagation
def clusterAffinityPropagation(self):
"""
Cluster the embeddings with affinity propagation
:return:
"""
affin = AffinityPropagation()
affin.fit(self.emb1.m)
aflabels1 = affin.labels_
afclusters1 = dict()
word2cluster1 = dict()
for i,l in enumerate(aflabels1):
points = afclusters1.setdefault(l,list())
points.append(self.emb1.rd[i])
for l,c in afclusters1.items():
for w in c:
word2cluster1[w] = l
self.cluster1 = afclusters1
self.word2cluster1 = word2cluster1
affin.fit(self.emb2.m)
aflabels2 = affin.labels_
afclusters2 = dict()
word2cluster2 = dict()
for i,l in enumerate(aflabels2):
points = afclusters2.setdefault(l,list())
points.append(self.emb2.rd[i])
for l,c in afclusters2.items():
for w in c:
word2cluster2[w] = l
self.cluster2 = afclusters2
self.word2cluster2 = word2cluster2
开发者ID:juliakreutzer,项目名称:loons,代码行数:30,代码来源:visualize.py
示例5: classify_core
def classify_core(self, N_CLUSTERS, clusterType, data_for_trial_type, begin_time, end_time):
BEGIN_TIME_FRAME = begin_time*self.griddy.TIME_GRID_SPACING
END_TIME_FRAME = end_time*self.griddy.TIME_GRID_SPACING
data = data_for_trial_type[:,BEGIN_TIME_FRAME:END_TIME_FRAME,self.griddy.VEL_X]
labels = None
if clusterType == 'kmeans':
kmeans = KMeans(n_clusters=N_CLUSTERS)
kmeans.fit(data)
labels = kmeans.labels_
elif clusterType == 'affinity_propagation':
ap = AffinityPropagation(damping=0.75)
ap.fit(data)
labels = ap.labels_
N_CLUSTERS = np.max(self.labels)+1
elif clusterType == 'DBSCAN':
dbscan = DBSCAN()
dbscan.fit(data)
labels = dbscan.labels_
N_CLUSTERS = np.max(labels)+1
print 'N_CLUSTERS=' + str(N_CLUSTERS)
elif clusterType == 'AgglomerativeClustering':
ac = AgglomerativeClustering(n_clusters=N_CLUSTERS)
ac.fit(data)
labels = ac.labels_
else:
print 'ERROR: clusterType: ' + clusterType + ' is not recognized'
return (labels, N_CLUSTERS)
开发者ID:SashaRayshubskiy,项目名称:osmotropotaxis_analysis_python,代码行数:31,代码来源:fly_trajectory_classifier.py
示例6: cluster
def cluster(mat, doc_indices):
X = mat[:, doc_indices].T
# Other clustering algorithms can easily be swapped in:
# http://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster
clust = AffinityPropagation()
clust.fit(X)
return zip(doc_indices, clust.labels_)
开发者ID:consciousgaze,项目名称:cs224u,代码行数:7,代码来源:distributedwordreps.py
示例7: cluster
def cluster(scope):
# Setup data
df = pd.read_sql('playtype_data', db_engine)
# Manipulate data into scope
if scope == 'Team':
df = df.drop('Player', 1).groupby('Team', as_index=False).mean()
elif scope == 'Player':
df = df.drop('Team', 1)
else:
raise Exception('This is never supposed to happen')
# Normalize the data
df[FEATURES] = (df[FEATURES] - df[FEATURES].mean()) / (df[FEATURES].max() - df[FEATURES].min())
# Run clustering
clstr = AffinityPropagation()
clstr.fit(df[FEATURES])
# Clump results
df['cluster'] = clstr.labels_
df = df.sort('cluster')
# Convert results to JSON for frontend
return clusters_to_json(df, scope)
开发者ID:qmac,项目名称:nba-analysis,代码行数:25,代码来源:cluster.py
示例8: affinity_propagation_cluster_analysis
def affinity_propagation_cluster_analysis(x,y,preference):
# NOT WORKING BECAUSE I DONT REALLY UNDERSTAND WHAT IT DOES...
# ADAPTED FROM:
# http://scikit-learn.org/stable/auto_examples/cluster/plot_affinity_propagation.html#example-cluster-plot-affinity-propagation-py
X = np.hstack((x.reshape((x.shape[0],1)),y.reshape((y.shape[0],1))))
af = AffinityPropagation()
af = af.fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
#print("number of estimated clusters : %d" % n_clusters_)
colors = 'bgrcmykbgrcmykbgrcmykbgrcmykbgrcmykbgrcmykbgrcmyk' #cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for i in xrange(len(np.unique(labels))):
my_members = labels == i
cluster_center = X[cluster_centers_indices[i]]
plt.scatter(X[my_members, 0], X[my_members, 1],s=90,c=colors[i],alpha=0.7)
plt.scatter(cluster_center[0], cluster_center[1],marker='+',s=280,c=colors[i])
for j in X[my_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]],c=colors[i],linestyle='--')
tolx = (X[:,0].max()-X[:,0].min())*0.03
toly = (X[:,1].max()-X[:,1].min())*0.03
plt.xlim(X[:,0].min()-tolx,X[:,0].max()+tolx)
plt.ylim(X[:,1].min()-toly,X[:,1].max()+toly)
plt.show()
return labels
开发者ID:armatita,项目名称:GEOMS2,代码行数:28,代码来源:cerena_multivariate_utils.py
示例9: cluster
def cluster(self, feat_mtx, df_lm_allusers):
# clustering artists based on AffinityPropogation
start = time.time()
af = AffinityPropagation()
af.fit(feat_mtx)
self.labels = af.labels_
self.af = af
# adding cluster labels to least misery dataframe and sorting by rank and cluster
#df_least_misery_clustered = self.df_least_misery.copy() --> changing to df_lm_allusers
print 'number of labels: ', len(self.labels)
print 'labels', self.labels
# print 'least misery clustered length', len(df_least_misery_clustered)
df_least_misery_clustered = df_lm_allusers.copy()
print 'len df least misery: ', len(df_least_misery_clustered)
df_least_misery_clustered['cluster'] = self.labels
df_least_misery_clustered[['cluster', self.score_col]] = df_least_misery_clustered[['cluster', self.score_col]].astype(float)
''' will do different sorting if not using rank '''
# now set to false as looking for highest score
df_least_misery_clustered = df_least_misery_clustered.sort(['cluster', self.score_col], ascending = False)
self.df_least_misery_clustered = df_least_misery_clustered
end = time.time()
print 'clustering completed in: ', end - start
return df_least_misery_clustered
开发者ID:bsbell21,项目名称:CapstoneProject,代码行数:27,代码来源:pipeline_full_131214.py
示例10: cluster_trajectories
def cluster_trajectories( curves ):
"""Given a list of curves, cluster_trajectories will cluster them."""
n_curves = len(curves)
X_2B_clstrd = np.zeros( (n_curves, 4) )
X_2B_clstrd[:,0] = np.array( [ curves[k][0, 0] for k in range(n_curves) ] )
X_2B_clstrd[:,1] = np.array( [ curves[k][1, 0] for k in range(n_curves) ] )
X_2B_clstrd[:,2] = np.array( [ curves[k][0,-1] for k in range(n_curves) ] )
X_2B_clstrd[:,3] = np.array( [ curves[k][1,-1] for k in range(n_curves) ] )
for col in range( 4 ):
X_2B_clstrd[:,col] /= X_2B_clstrd[:,col].std()
def distance_metric(a,b):
#A distance metric on R^4 modulo the involution
#(x0,x2,x3,x4) -> (x3,x4,x1,x2)
d = lambda a,b : np.sqrt( np.sum( (a-b)**2 ) )
T = lambda x: np.array([x[2],x[3],x[0],x[1]])
return min( d(a,b) , d(T(a),b) )
from sklearn.cluster import AffinityPropagation
clusterer = AffinityPropagation(affinity='precomputed', convergence_iter=100)
aff = np.zeros((n_curves, n_curves))
for i in range(n_curves):
for j in range(i+1,n_curves):
aff[i,j] = np.exp(-distance_metric( X_2B_clstrd[i], X_2B_clstrd[j])**2)
aff[j,i] = aff[i,j]
#clusterer.Affinity = aff
cluster_labels = clusterer.fit_predict(aff)
out = []
for label in set( cluster_labels):
cluster = map( lambda k: curves[k] , filter( lambda k: cluster_labels[k] == label , range( n_curves) ) )
out.append( cluster )
return map( align_cluster, out)
开发者ID:hoj201,项目名称:pedestrian_forecasting,代码行数:33,代码来源:cluster.py
示例11: loadKmeansData
def loadKmeansData(dataArrayTest,dataArrayTrain,k,m='load'):
if m=='load':
centroidRead=open('centroid','r')
labelClusterRead=open('labelCluster','r')
labelPreRead=open('labelPre','r')
centroid=pickle.load(centroidRead)
labelCluster=pickle.load(labelClusterRead)
labelPre=pickle.load(labelPreRead)
else:
dataArrayTestNorm = preprocessing.normalize(dataArrayTest)
dataArrayTrainNorm = preprocessing.normalize(dataArrayTrain)
#clf=MiniBatchKMeans(init='k-means++', n_clusters=k, n_init=10)
clf=AffinityPropagation()
#clf=DBSCAN(min_samples=30)
pre=clf.fit(dataArrayTrainNorm)
centroid=pre.cluster_centers_
centroidWrite=open('centroid','w')
#pickle.dump(centroid,centroidWrite)
labelCluster=pre.labels_
labelClusterWrite=open('labelCluster','w')
#pickle.dump(labelCluster,labelClusterWrite)
labelPre=clf.predict(dataArrayTestNorm)
labelPreWrite=open('labelPre','w')
#pickle.dump(labelPre,labelPreWrite)
return centroid,labelCluster,labelPre
开发者ID:357589873,项目名称:mlstudy,代码行数:30,代码来源:cup99KnNN.py
示例12: clusterise_data
def clusterise_data(data_obj):
""" Assigns a cluster label to each days present in the data received
using three different algorithms: MeanShift, Affinity Propagation,
or KMeans.
@param data_obj: List of dictionaries
"""
L = len(data_obj)
#Simply converts data_obj to a 2D list for computation
List2D = [[None for _ in range(4)] for _ in range(L-1)]
for i in range(L-1): #don't include current day
#wake_up and sleep_duration are the most important factors
List2D[i][0] = 5 * data_obj[i]["wake_up"]
List2D[i][1] = 1 * data_obj[i]["sleep"]
List2D[i][2] = 5 * data_obj[i]["sleep_duration"]
List2D[i][3] = 0.5 * data_obj[i]["activity"]
points = NumpyArray(List2D) #converts 2D list to numpyarray
if ALGO == "Affinity Propagation":
labels = AffinityPropagation().fit_predict(points)
elif ALGO == "KMeans":
labels= KMeans(init='k-means++', n_clusters=5, n_init=10) .fit_predict(points)
elif ALGO == "MeanShift":
bandwidth = estimate_bandwidth(points, quantile=0.2, n_samples=20)
labels = MeanShift(bandwidth=bandwidth, bin_seeding=True).fit_predict(points)
else:
raise Exception("Algorithm not defined: "+str(ALGO))
for i in range(L-1):
data_obj[i]["cluster"] = labels[i]
for unique_label in remove_duplicates(labels):
debug_print(ALGO+": Cluster "+str(unique_label)+" contains "+str(labels.tolist().count(unique_label))+" data points")
debug_print(ALGO+": Silhouette coefficient"+ str(metrics.silhouette_score(points, labels, metric='euclidean')*100)+"%")
开发者ID:qdm12,项目名称:Staminaputations,代码行数:33,代码来源:api_clustering.py
示例13: execute
def execute(args):
##############################################################################
if len(args) < 1:
usage()
sys.exit()
names, labels_true, X = parse(args[0])
indices = [int(i) for i in args[1:]]
relevant_names = names[1:]
if len(indices) > 0:
X = np.asarray([[sample[i] for i in indices] for sample in X])
relevant_names = [relevant_names[i] for i in indices]
print "Clustering on", str(relevant_names) + "..."
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50)
# cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
#
# n_clusters_ = len(cluster_centers_indices)
y_pred = af.fit_predict(X)
if y_pred is None or len(y_pred) is 0 or type(y_pred[0]) is np.ndarray:
return 0
counts = get_cluster_counts(labels_true, y_pred)
print counts
开发者ID:nmusgrave,项目名称:Conducere,代码行数:28,代码来源:oldAffinityPropagationCluster.py
示例14: affinity_propagation
def affinity_propagation(crime_rows, column_names):
"""
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number of estimated
clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of preferences
are more likely to be chosen as exemplars.
The number of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment precomputed and euclidean are
supported. euclidean uses the negative squared euclidean distance
between points.
"""
crime_xy = [crime[0:2] for crime in crime_rows]
crime_info = [crime[2:] for crime in crime_rows]
print("Running Affinity Propagation")
# TODO: Parameterize this
affinity_prop = AffinityPropagation()
#affinity_propagation_labels = affinity_prop.fit_predict(crime_xy)
affinity_prop.fit(random_sampling(crime_xy, num_samples=5000))
affinity_propagation_labels = affinity_prop.predict(crime_xy)
print("formatting....")
return _format_clustering(affinity_propagation_labels, crime_xy, crime_info,
column_names)
开发者ID:egaebel,项目名称:crime-on-the-move-back-end--Python,代码行数:31,代码来源:clustering.py
示例15: affinity_propagation
def affinity_propagation(feature_matrix):
sim = feature_matrix * feature_matrix.T
sim = sim.todense()
ap = AffinityPropagation()
ap.fit(sim)
clusters = ap.labels_
return ap, clusters
开发者ID:000Nelson000,项目名称:text-analytics-with-python,代码行数:8,代码来源:document_clustering.py
示例16: cluster_prop
def cluster_prop(self, filtered_data):
prop_dict={}
for review in filtered_data:
for dicti in review['line']:
if not prop_dict.has_key(dicti["prop"][0]):
prop_dict[dicti["prop"][0]]={"freq":0,"data":[],"idx":[]}
prop_dict[dicti["prop"][0]]['idx'].append(review['index'])
prop_dict[dicti["prop"][0]]["freq"] += 1
prop_dict[dicti["prop"][0]]["data"].append(dicti)
d_list=[]
word_list=[]
for word in prop_dict:
try:
d_list.append(self.wmodel[word])
word_list.append(word)
except:
pass
Aprop = AffinityPropagation(damping=0.6, convergence_iter=100, max_iter=10000)
Aprop.fit(d_list)
cluster_dict = {}
for idx, each in enumerate(Aprop.labels_):
vec = d_list[idx]
if not cluster_dict.has_key(each):
cluster_dict[each] = {"word":[],"freq":0,"seed":"","sim":0.0}
cluster_dict[each]["word"].append(word_list[idx])
total_freq=0
for each in cluster_dict.keys():
target_group_id = each
group_id = each
last_group_id = target_group_id
cluster_freq=0
max_seed=""
max_freq=0
for idx,data in enumerate(cluster_dict[each]["word"]):
cluster_freq+=prop_dict[data]["freq"]
if prop_dict[data]["freq"] > max_freq:
max_freq=prop_dict[data]["freq"]
max_seed=data
cluster_dict[each]["freq"]=cluster_freq
cluster_dict[each]["seed"]=max_seed
return (cluster_dict, prop_dict, Aprop)
开发者ID:DevinJeon,项目名称:soma0612,代码行数:54,代码来源:cluster.py
示例17: clustering_affinity_propagation
def clustering_affinity_propagation(data_res):
"""
Executes sklearn's affinity propagation function with the given data frame
"""
af = AffinityPropagation()
af.fit(data_res)
predictions = af.predict(data_res)
cluster_centers = af.cluster_centers_
return predictions, cluster_centers, af
开发者ID:luisc29,项目名称:ide-usage-data,代码行数:11,代码来源:6-mining.py
示例18: cluster_articles
def cluster_articles():
ms = MongoStore()
articles = [a for a in ms.get_pending_articles()]
if len(articles) > 0:
tfidf = TfidfVectorizer(tokenizer=preprocess)
good_articles = [article for article in articles
if article["text_content"].strip() != ""]
texts = [article["text_content"] for article in good_articles]
X_tfidf = tfidf.fit_transform(texts)
print X_tfidf
ap = AffinityPropagation(damping=0.95, max_iter=4000,
convergence_iter=400, copy=True, preference=-4,
affinity='euclidean', verbose=True)
C = ap.fit_predict(X_tfidf)
print X_tfidf.shape, C.shape
print C
centers = ap.cluster_centers_indices_
clusters = []
for c, center in enumerate(centers):
members = np.where(C == c)[0]
K = cosine_similarity(X_tfidf[members], X_tfidf[center])
member_sims = [(m, float(k)) for m, k in zip(members, K)]
member_sims.sort(key=lambda x: x[1], reverse=True)
cluster = {"articles": [], "date": datetime.now(), "summarized": False}
if len([member for member, sim in member_sims if sim > .55]) >= 3:
print texts[center][:75].replace("\n", " ")
for member, sim in member_sims:
print "\t{:3.3f} ".format(sim),
print good_articles[member]["title"][:60].replace("\n", " ")
cluster["articles"].append((good_articles[member]["_id"], sim))
else:
continue
clusters.append(cluster)
if len(clusters) > 0:
ms.insert_clusters(clusters)
ms.set_clustered_flag(articles)
开发者ID:kedz,项目名称:newsblaster,代码行数:54,代码来源:cluster.py
示例19: affinityprop
def affinityprop(lngs, lats, city, cluster_diameter):
city_area = city["area"]
city_lng = city["lng"]
city_lat = city["lat"]
lngs = np.array(lngs)#*(math.cos(city["lat"])**2)
affinity = AffinityPropagation(damping=0.75, max_iter=200, convergence_iter=15, copy=True, preference=None, affinity='euclidean', verbose=False)
affinity.fit(np.array([lngs, lats]).transpose())
cluster_labels = np.array(affinity.labels_)
return labels_to_index(cluster_labels)
开发者ID:avisochek,项目名称:scastrap_data_pipeline,代码行数:11,代码来源:clustering_algorithms.py
示例20: cluster_concepts
def cluster_concepts(context="location"):
"""
Cluster related concepts of a specific type to different categories
"""
db = Database()
concept_category = ConceptCategory()
cmd = "SELECT * FROM %s" % (context)
context_res = db.query_db(cmd)
concept_list = []
concept_matrix = []
for item in context_res:
concept_list = []
concept_matrix = []
if context == "action":
context_id, context_chinese, context_name = item[:3]
elif context == "location":
context_id, context_name, context_chinese = item
cmd = (
"SELECT b.name, b.id FROM %s_concept AS a, concept AS b \
WHERE a.%s_id = %s AND a.concept_id = b.id"
% (context, context, context_id)
)
concept_res = db.query_db(cmd)
if len(concept_res) == 0:
continue
for item in concept_res:
concept, concept_id = item
concept_vector = concept_category.concept_axes.row_named(concept)
concept_list.append((concept_id, concept))
concept_matrix.append(concept_vector)
# Run affinity propogation
S = cosine_similarity(concept_matrix, concept_matrix)
af = AffinityPropagation()
af.fit(S)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
count = 0
clusters = defaultdict(list)
for label in labels:
clusters[concept_list[cluster_centers_indices[label]][1]].append(concept_list[count])
count += 1
category_num = 0
for key, value in clusters.items():
category_num += 1
for concept in value:
cmd = (
"UPDATE %s_concept SET category = %d WHERE \
%s_id = %s AND concept_id = %s"
% (context, category_num, context, context_id, concept[0])
)
db.query_db(cmd)
print concept[1].encode("utf-8") + " ",
print ""
print "----------" + context_chinese.encode("utf-8") + "----------"
开发者ID:a33kuo,项目名称:language-learner,代码行数:54,代码来源:database.py
注:本文中的sklearn.cluster.AffinityPropagation类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论