本文整理汇总了Python中scipy.cluster.hierarchy.fclusterdata函数的典型用法代码示例。如果您正苦于以下问题:Python fclusterdata函数的具体用法?Python fclusterdata怎么用?Python fclusterdata使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fclusterdata函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: hierarchicalClusteringScipy
def hierarchicalClusteringScipy(self, vectorLayer, attributesList, normalize, clusterThreshold, linkageMethod, criterion, metric, depth, max_clust, outputFieldName):
import scipy.cluster.hierarchy as hcluster
from numpy import array
fullObjectsList = []
features = vectorLayer.getFeatures()
for feature in features:
fullObjectsList.append([])
for attribute in attributesList:
if feature[attribute[0]]:
fullObjectsList[len(fullObjectsList) - 1].append(feature[attribute[0]])
else:
fullObjectsList[len(fullObjectsList) - 1].append(0)
# NORMALIZING
if normalize:
i = 0
maxValues = []
while i < len(attributesList):
maxValues.append(max(abs(item[i]) for item in fullObjectsList))
i += 1
j = 0
while j < len(fullObjectsList):
i = 0
while i < len(fullObjectsList[j]):
fullObjectsList[j][i] = (fullObjectsList[j][i] * 1.0) / (maxValues[i] * 1.0)
i += 1
j += 1
data = array(fullObjectsList)
if criterion == 'maxclust':
clusters = hcluster.fclusterdata(data, t=max_clust, criterion=criterion, method=linkageMethod,
metric=metric, depth=depth)
else:
clusters = hcluster.fclusterdata(data, t=clusterThreshold, criterion=criterion, method=linkageMethod,
metric=metric, depth=depth)
vectorLayerDataProvider = vectorLayer.dataProvider()
#
## Create field of not exist
if vectorLayer.fields().indexFromName(outputFieldName) == -1:
vectorLayerDataProvider.addAttributes([QgsField(outputFieldName, QVariant.Int)])
#
vectorLayer.updateFields()
vectorLayer.startEditing()
attrIdx = vectorLayer.fields().indexFromName(outputFieldName)
features = vectorLayer.getFeatures()
#
i = 0
for feature in features:
vectorLayer.changeAttributeValue(feature.id(), attrIdx, int(clusters[i]))
i += 1
#
vectorLayer.updateFields()
vectorLayer.commitChanges()
开发者ID:silenteddie,项目名称:attributeBasedClustering,代码行数:58,代码来源:attribute_based_clustering_dialog.py
示例2: cluster_peaks_by_lane
def cluster_peaks_by_lane(peak_pos, hdist=8.0, return_sorted=True):
"""
:param peak_pos:
:param hdist:
:param return_sorted:
:return:
Refs:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fclusterdata.html
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
https://web.archive.org/web/20100619134310/http://www.plantbio.ohiou.edu/epb/instruct/multivariate/Week7Lectures.PDF
Linkage methods:
single linkage - produces "chains"
complete linkage - produces "sperical" clusters
intermediate linkage -
Other clustering methods:
UPGMA -
WPGMA -
UPGMC -
WPGMC -
K-means - cluster into exactly K number of clusters
"""
hdist = float(hdist) # ensure float/numeric input
if hdist is None:
hdist = 8.0
xpos = np.array([[pos[1]] for pos in peak_pos])
# printarr(xpos, "xpos")
# maybe add a little bit of y-position to the mix?
# xpos = np.array([[pos[1], pos[0]/100] for pos in peak_pos])
lane_clusters = fclusterdata(xpos, t=0.2) # fclusterdata(X, t) is for N observations each with M variables.
lane_clusters = fclusterdata(xpos, t=hdist, criterion='distance', metric='euclidean', depth=2, method='single')
# lane_clusters = linkage(xpos) # defaults to 'single', 'euclidean'
# group lane-clustered peaks: lane_id -> array of peak pos.
peaks_by_lane = defaultdict(list)
for lane_id, pos in zip(lane_clusters, peak_pos):
peaks_by_lane[lane_id].append(list(pos))
# convert
for lane_id in peaks_by_lane:
peaks_by_lane[lane_id] = np.array(peaks_by_lane[lane_id])
# pprint(peaks_by_lane)
if return_sorted:
# sort by mean x-position (indexing as [y, x] aka [row, col])
peaks_by_lane = OrderedDict(sorted(peaks_by_lane.items(), key=lambda kv: kv[1][:, 1].mean()))
# pprint(list(peaks_by_lane.values()))
return peaks_by_lane
开发者ID:scholer,项目名称:gelutils,代码行数:54,代码来源:band_quantification.py
示例3: pcaCode
def pcaCode():
##Question: PCA descriptors, or PCA final profiles?
#Principal Component Analysis
pca = deco.PCA(n_components = 10)
Xp = pca.fit_transform(X)
#Z = hier.linkage(X)
Y = hier.fclusterdata(X, 1.15)
print "Num. Clusters (no PCA): %s"%max(Y)
Yp = hier.fclusterdata(Xp, 1.15)
print "Num. Clusters (with PCA): %s"%max(Yp)
开发者ID:akuefler,项目名称:ASLrecog,代码行数:12,代码来源:process_faces.py
示例4: cluster_lane_peaks_to_bands
def cluster_lane_peaks_to_bands(lane_peaks, vdist=5.0, img=None):
vdist = float(vdist) # ensure float/numeric input
# Special case, lane only has a single peak, nothing to cluster:
if len(lane_peaks) < 2:
this_lane_bands_peaks = {0: lane_peaks} # ensure we have a dict of peaks
# print("lane_id %s has only %s peaks" % (lane_id, len(lane_peaks)))
else:
# sort by row (y-coordinate):
# print("sorting bands in lane_id %s by y position (pos[0])" % lane_id)
band_clusters = fclusterdata(lane_peaks, t=vdist, criterion='distance', metric='euclidean', depth=2, method='single')
# lane_band_cluster_ids[lane_id] = band_clusters
# print("lane_id", lane_id)
# print("lane_peaks", lane_peaks)
# print("band_clusters", band_clusters)
# group, method (1) using defaultdict:
# cannot use dict.fromkeys, because it only takes static default values, not types/functions.
this_lane_bands_peaks = defaultdict(list)
for band_id, pos in zip(band_clusters, lane_peaks):
this_lane_bands_peaks[band_id].append(pos)
# alternative grouping methods: (2) zip, sort, then groupby;
# print("this_lane_bands_peaks", this_lane_bands_peaks)
# convert to nparray and take mean:
# convert the list of peaks for each band to ndarray:
for band_id in this_lane_bands_peaks:
this_lane_bands_peaks[band_id] = np.array(this_lane_bands_peaks[band_id])
return this_lane_bands_peaks
开发者ID:scholer,项目名称:gelutils,代码行数:26,代码来源:band_quantification.py
示例5: cluster
def cluster(points, thresh):
#the x,y,z points must first be separated out
ndata = [[],[],[]]
npts = (len(points)-2)/3
for j in range(0,npts):
x = float(points[2 + 3*j])
y = float(points[3 + 3*j])
z = float(points[4 + 3*j])
ndata[0].append(x)
ndata[1].append(y)
ndata[2].append(z)
data = np.asarray(ndata)
clusterlist = hcluster.fclusterdata(np.transpose(data), thresh, criterion="distance")
nclusters = findLargest(clusterlist)
#initializes an array to the right size
#http://stackoverflow.com/questions/7745562/appending-to-2d-lists-in-python
clusters = [[] for i in range(nclusters)]
#assingns points to the correct cluster
for i in range(0, npts):
#print clusters[clusterlist[i]-1]
clusters[clusterlist[i]-1].append([ndata[0][i],ndata[1][i],ndata[2][i]])
return [data, clusterlist, clusters]
开发者ID:jlakowski,项目名称:dancemechanics,代码行数:26,代码来源:mocaplib.py
示例6: cluster_qs
def cluster_qs(qs, k=None, threshold=1.5):
"""Cluster q vectors into discrete groups.
Classifies each of the q vectors into a number of clusters. The number of clusters used is decided by the parameters passed:
* If the k parameter is supplied then the q vectors are grouped into k clusters using kmeans.
* If the threshold parameter is supplied then the q vectors a split into groups based on cophenetic distance.
:param qs: list of q vectors to cluster. Each element should be a numpy array of length three.
:param k: number of clusters to use (optional).
:param threshold: cophenetic distance cut off point for new clusters (optional)
:returns: tuple (clusters, k)
Where:
list -- clusters is a list of cluster indicies which each q belongs to
int -- k is the number of clusters used
"""
if k is not None:
centroids = kmeans_plus_plus(qs, k)
_, clusters = kmeans2(qs, centroids, minit='matrix')
if len(set(clusters)) != k:
raise ValueError("Could not group the satellite reflections "
"into {} clusters. Please check that you have "
"at least {} satellites.".format(k,k))
else:
clusters = hcluster.fclusterdata(qs, threshold, criterion="distance")
return clusters, len(set(clusters))
开发者ID:mantidproject,项目名称:mantid,代码行数:25,代码来源:fractional_indexing.py
示例7: searchForColorPoints
def searchForColorPoints(im, criteria):
points = []
pointColors = []
hsvIm = cv2.cvtColor(im, cv2.COLOR_BGR2HSV_FULL)
for i in range(11, im.shape[1] - 11, 10):
for j in range(11, im.shape[0] - 11, 10):
b = block(hsvIm, (i, j), 8)
if b[:, :, 0].std() > 25:
continue
color = (b[:, :, 0].mean(), b[:, :, 1].mean(), b[:, :, 2].mean())
matchedColor = matchColor(color, criteria)
if matchedColor >= 0:
points.append((i, j))
pointColors.append(matchedColor)
points = np.array(points, np.float16)
cluster = fclusterdata(points, 10, "distance")
centroids = []
for i in range(len(criteria)):
centroids.append([])
for i in range(1, cluster.max() + 1):
b = cluster == i
c = np.zeros((1, 2), np.int16)
for p in points[b.argsort()[len(b) - sum(b) :]]:
c = c + p / sum(b)
centroids[pointColors[b.argsort()[len(b) - sum(b)]]].append(c[0])
return centroids
开发者ID:martianboy,项目名称:Questionnaire,代码行数:33,代码来源:colorImage.py
示例8: identify
def identify(image, colors):
global pixelCounters
num_colors = 1
#data = numpy.zeros((1000,2))
n = 0
a = 0
for x in xrange(0, image.shape[0]):
for y in xrange(0, image.shape[1]):
a += 1
if a & 0b1111111 != 0:
continue
continue
for i in range(num_colors):
hue = image[x, y, 0]
sat = image[x, y, 1]
val = image[x, y, 2]
if hue >= 0 and hue < 10 and sat > 150 and val > 50:
data[n, 0] = x
data[n, 1] = y
n += 1
if n < 2:
return (None, None)
t = 30
data = data[0:n, :]
clusters = hcluster.fclusterdata(data, t, criterion="distance")
return (data, clusters)
开发者ID:jwbowler,项目名称:maslab-staff-2013,代码行数:33,代码来源:opencvhelloworld.py
示例9: magic_fragmentation
def magic_fragmentation(self):
""" This function takes the atom objects and tries to separate two fragments by a k-means-clustering algorithm. Always check the result before relying on those fragmentations!"""
#hardcoded number of fragments, for now always 2!
nr_frags = 2
coordinates = self.dimer.get_positions()
#
#centroids,_ = kmeans(coordinates, nr_frags)
# assign indices to clusters (bitmask!)
cluster_indices = fclusterdata(coordinates, self.magic_cutoff, criterion="distance")
# compress the whole coordinates to fragments
#coords_frag1 = np.array(list(itertools.compress(coordinates.tolist(), cluster_indices)))
# invert the bitmask
#cluster_indices = cluster_indices ^ 1
#coords_frag2 = np.array(list(itertools.compress(coordinates.tolist(), cluster_indices)))
self.frag1 = deepcopy(self.dimer)
self.frag2 = deepcopy(self.dimer)
# Now delete the atoms of the other fragment from the object with mighty pythonic list comprehensions!
del self.frag1[[atom.index for pos, atom in enumerate(self.frag1) if cluster_indices[pos] != 1]]
del self.frag2[[atom.index for pos, atom in enumerate(self.frag2) if cluster_indices[pos] != 2]]
print("Finished automatic fragmentation, please remember to check the result!")
self.__check_fragments__()
self.__set_charges__()
self.__get_frontiers__()
开发者ID:schober-ch,项目名称:fodft_tools,代码行数:28,代码来源:fodft_tools.py
示例10: _agglomerative_cluster_encounters
def _agglomerative_cluster_encounters(X_data, seconds_thresh):
""" Agglomerative encounter clustering algorithm
Input: Length N array of data to cluster
Output: Length N array of cluster indexes
"""
label_arr = hier.fclusterdata(X_data, seconds_thresh, criterion='distance')
return label_arr
开发者ID:byteyoo,项目名称:ibeis,代码行数:7,代码来源:preproc_encounter.py
示例11: calc_best_result
def calc_best_result(coords, threshold=0.01):
"""
Calculates most possible result based on clustering of provided coordinates.
We assume that the bigger cluster represents the value most of the best agent
have agreed on. Method uses SciPy's hierarchy.fclusterdata function.
Parameters
----------
coords : list of two-element tuples
coordinates to guess result from
threshold : float
see documentation for scipy.hierarchy.fclusterdata
Returns
-------
x : float
x coordinate of the result
y : float
y coordinate of the result
"""
coords = np.array(coords)
t = coords[:,0].std()
idx = hierarchy.fclusterdata(coords, threshold * t)
best = int(stats.mode(idx)[0][0])
ans = np.array([coords[i] for i in range(len(coords)) if idx[i] == best])
return namedtuple('Ans', 'x, y')(ans[:,0].mean(), ans[:,1].mean())
开发者ID:tomachalek,项目名称:marg,代码行数:27,代码来源:marg.py
示例12: merge_paths
def merge_paths(rides):
waypoints = list(itertools.chain(*[ride.route.waypoints for ride in rides]))
waypoints = sorted(waypoints, key=lambda x: x.country)
logger.info("Merging {} rides with {} total waypoints".format(len(rides), len(waypoints)))
for country, group in itertools.groupby(waypoints, key=lambda x: x.country):
waypoints = list(group)
country_lat_lng_points = [(x.lat, x.lng) for x in waypoints]
country_xyz_points = [latlng_to_xyz(lat, lng) for lat, lng in country_lat_lng_points]
logger.debug("Processing {} with {} waypoints".format(country, len(country_xyz_points)))
wh = whiten(country_xyz_points)
k_guess = max(1,len(country_xyz_points)/BEARABLE_CLUSTER_SIZE)
k_centroids = kmeans(wh,k_guess)[0]
k_labels = vq(wh, k_centroids)[0]
k_labeled = sorted(zip(country_xyz_points,country_lat_lng_points,waypoints,k_labels), key=lambda x: x[3])
logger.debug("Got {} miniclusters".format(len(k_centroids)))
for key, gr in itertools.groupby(k_labeled, key=lambda x:x[3]):
gr = list(gr)
k_waypoints = [x[2] for x in gr]
k_lat_lng_points = [x[1] for x in gr]
k_xyz_points = [x[0] for x in gr]
logger.debug("Running {} minicluster with {} waypoints".format(key, len(k_waypoints)))
cluster_labels = fclusterdata(np.array(k_xyz_points), 0.2, criterion="distance", metric="euclidean")
centroids = cluster_centroids(zip(k_lat_lng_points, cluster_labels))
logger.debug("Got {} hierarhical clusters".format(len(set(cluster_labels))))
for i in range(0, len(k_waypoints)):
new_lat, new_lng = centroids[cluster_labels[i]-1]
k_waypoints[i].lat = new_lat
k_waypoints[i].lng = new_lng
开发者ID:agentcooper,项目名称:cabviewExpress,代码行数:34,代码来源:merger.py
示例13: compute_encounters
def compute_encounters(hs, back, seconds_thresh=15):
'''
clusters encounters togethers (by time, not space)
An encounter is a meeting, localized in time and space between a camera and
a group of animals.
Animals are identified within each encounter.
'''
if not 'seconds_thresh' in vars():
seconds_thresh = 15
gx_list = hs.get_valid_gxs()
datetime_list = hs.gx2_exif(gx_list, tag='DateTime')
unixtime_list = [io.exiftime_to_unixtime(datetime_str) for datetime_str in datetime_list]
unixtime_list = np.array(unixtime_list)
X = np.vstack([unixtime_list, np.zeros(len(unixtime_list))]).T
print('[scripts] clustering')
# Build a mapping from clusterxs to member gxs
gx2_clusterid = fclusterdata(X, seconds_thresh, criterion='distance')
clusterx2_gxs = [[] for _ in xrange(gx2_clusterid.max())]
for gx, clusterx in enumerate(gx2_clusterid):
clusterx2_gxs[clusterx - 1].append(gx) # IDS are 1 based
clusterx2_nGxs = np.array(map(len, clusterx2_gxs))
print('cluster size stats: %s' % helpers.printable_mystats(clusterx2_nGxs))
# Change IDs such that higher number = more gxs
gx2_ex = [None] * len(gx2_clusterid)
gx2_eid = [None] * len(gx2_clusterid)
ex2_clusterx = clusterx2_nGxs.argsort()
ex2_gxs = [None] * len(ex2_clusterx)
for ex in xrange(len(ex2_clusterx)):
clusterx = ex2_clusterx[ex]
gxs = clusterx2_gxs[clusterx]
ex2_gxs[ex] = gxs
for gx in gxs:
nGx = len(gxs)
USE_STRING_ID = True
if USE_STRING_ID:
# String ID
eid = 'ex=%r_nGxs=%d' % (ex, nGx)
else:
# Float ID
eid = ex + (nGx / 10 ** np.ceil(np.log(nGx) / np.log(10)))
gx2_eid[gx] = eid
gx2_ex[gx] = ex
hs.tables.gx2_ex = np.array(gx2_ex)
hs.tables.gx2_eid = np.array(gx2_eid)
# Give info to GUI
extra_cols = {'eid': lambda gx_list: [gx2_eid[gx] for gx in iter(gx_list)]}
back.append_header('gxs', 'eid')
back.populate_image_table(extra_cols=extra_cols)
return locals()
开发者ID:Erotemic,项目名称:hotspotter,代码行数:58,代码来源:scripts.py
示例14: clust
def clust(fp_list):
np_fps = []
for fp in fp_list:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
thresh = 6.5
clusters = hcluster.fclusterdata(np_fps, thresh, criterion="distance")
return clusters
开发者ID:dkdeconti,项目名称:PAINS-train,代码行数:9,代码来源:hclust_PAINS.py
示例15: clusterization
def clusterization(data, clastersNum = 2):
import scipy.cluster.hierarchy as hcluster
#import pylab
data = np.array(data)
#clusters = hcluster.fclusterdata(np.transpose(data), 3, criterion='maxclust', metric='euclidean', depth=1)
#clusters = hcluster.fclusterdata(data, 2, criterion='maxclust', metric='euclidean', depth=1)
thresh = 1.5
clusters = hcluster.fclusterdata(data, thresh, criterion="distance")
return np.array(clusters)
开发者ID:forregg,项目名称:cbTester,代码行数:10,代码来源:trade.py
示例16: clusterAndPlotAverages
def clusterAndPlotAverages(distmatrix, labeldates, data, noOfClusters=0, cutoff=0, clustersize=0):
'''runs hierarchical clustering on the given distance matrix using UPGMA
and plots the clusters average days, set either noOfClusters or cutoff as
keyword arguments and specify clustersize to plot only clusters with a
minimum size'''
if cutoff == 0: #method="average" == UPGMA
clusters = hierarchy.fclusterdata(distmatrix, noOfClusters, criterion='maxclust', metric='euclidean', method='average')
if noOfClusters == 0:
clusters = hierarchy.fclusterdata(distmatrix, cutoff, criterion='distance', metric='euclidean', method='average')
if noOfClusters == 0 and cutoff == 0:
raise ValueError('Call clusterAndPlotAverages with specifying either cutoff or noOfClusters')
#print clusters
groupedDays = []
for i in range(max(clusters)):
groupedDays.append([])
for i in range(len(clusters)):
groupedDays[clusters[i]-1].append(i)
for group in groupedDays:
if len(group)> clustersize:
averageday(data, group, labeldates)
开发者ID:estofado,项目名称:glucoluster,代码行数:20,代码来源:main.py
示例17: add_band_product_id_annotation
def add_band_product_id_annotation(df, vdist=5.0):
# maybe use fcluster instead of fclusterdata? - nope, fcluster(Z) takes a pre-calculated linkage matrix Z.
# manually calculate Z?
# ypos = [[ypos] for df.]
# ypos = df.ypos[:, np.newaxis]
product_clusters_ids = fclusterdata(
df.ypos[:, np.newaxis], t=vdist,
criterion='distance', metric='euclidean', depth=2, method='single'
)
df['product_id'] = product_clusters_ids
开发者ID:scholer,项目名称:gelutils,代码行数:11,代码来源:band_quantification.py
示例18: Counting_Clusters
def Counting_Clusters(col,row,tot):
sizes = []
pixels = [[col[i],row[i]] for i,x in enumerate(col)]
if(len(pixels)>1):
results=fclusterdata(pixels,sqrt(2.),criterion="distance",method="single")
y = numpy.bincount(results)
ii = numpy.nonzero(y)[0]
j = 0
previous = 0
for result, hit, TOT in zip(results,pixels,tot) :
i = 0
if y[result]>1:
if previous != result :
while i <= y[result]-1:
if j < len(results) :
j+=1
i+=1
if j == len(results) :
break
else :
sizes.append(y[result])
previous = result
if y[result]==1:
if j < len(results) :
sizes.append(y[result])
j+=1
else :
oneHitClusters = [[pixels[0][0],pixels[0][1],tot[0]]]
return sizes
开发者ID:StevenGreen1,项目名称:TimepixCalibration,代码行数:53,代码来源:DataConversion.py
示例19: run_cosine_clustering
def run_cosine_clustering(self, method="greedy", th_clustering=0.55):
if not hasattr(self, "topic_word"):
raise ValueError("Thresholding not done yet.")
# Swap the NaNs for zeros. Turn into a numpy array and grab the parent names
data = self.docdf.fillna(0)
data_array = np.array(data)
peak_names = list(data.columns.values)
# Create a matrix with the normalised values (each parent ion has magnitude 1)
l = np.sqrt((data_array ** 2).sum(axis=0))
norm_data = np.divide(data_array, l)
if method.lower() == "hierarchical": # scipy hierarchical clustering
clustering = hierarchy.fclusterdata(
norm_data.transpose(), th_clustering, criterion="distance", metric="euclidean", method="single"
)
elif method.lower() == "greedy": # greedy cosine clustering
cosine_sim = np.dot(norm_data.transpose(), norm_data)
finished = False
total_intensity = data_array.sum(axis=0)
total_intensity = total_intensity
n_features, n_parents = data_array.shape
clustering = np.zeros((n_parents,), np.int)
current_cluster = 1
thresh = th_clustering
count = 0
while not finished:
# Find the parent with the max intensity left
current = np.argmax(total_intensity)
total_intensity[current] = 0.0
count += 1
clustering[current] = current_cluster
# Find other parents with cosine similarity over the threshold
friends = np.where((cosine_sim[current, :] > thresh) * (total_intensity > 0.0))[0]
clustering[friends] = current_cluster
total_intensity[friends] = 0.0
# When points are clustered, their total_intensity is set zto zero.
# If there is nothing left with zero, quit
left = np.where(total_intensity > 0.0)[0]
if len(left) == 0:
finished = True
current_cluster += 1
else:
raise ValueError("Unknown clustering method")
return peak_names, clustering
开发者ID:sdrogers,项目名称:MS2LDA,代码行数:52,代码来源:lda_for_fragments.py
示例20: generate_linkage_clusters
def generate_linkage_clusters(bags_of_words_file,t=0.,method='single',metric='braycurtis'):
"""Performs clustering on the bag of words listed in the input file.
This function reads a list of bags of words and performs clustering using hierarchical clustering, but without using the metrics defined in this module. The bags of words are listed one per line in the input file.
"""
# first of all we need to read the vocabulary
# we also count the lines
voc = set()
n_of_bags = 0
bags_of_words_file.seek(0)
for line in bags_of_words_file:
voc.update(line.split())
n_of_bags += 1
# this is the space inside which the multiset vectors live
space = sorted(voc)
# we create the numpy array that will store the bags vectors
data = np.zeros((n_of_bags,len(space)))
# now we store the bags as vectors in memory
i = 0
bags_of_words_file.seek(0)
for line in bags_of_words_file:
m = Multiset(line.split())
data[i] = m.to_vector(space)
i += 1
# now we can perform the clustering
clusters = fclusterdata(data,t,metric=metric,method=method)
# and we return a dict of clusters
clusters_dict = dict()
bags_of_words_file.seek(0)
i = 0
for line in bags_of_words_file:
line = line.strip()
c = clusters[i]
v = clusters_dict.get(c,[])
v.append(line)
clusters_dict[c] = v
i+=1
return clusters_dict
return kmeans(data,k)
开发者ID:jhlau,项目名称:acceptability_prediction,代码行数:52,代码来源:multiset.py
注:本文中的scipy.cluster.hierarchy.fclusterdata函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论