本文整理汇总了Python中pyspark.mllib.clustering.KMeans类的典型用法代码示例。如果您正苦于以下问题:Python KMeans类的具体用法?Python KMeans怎么用?Python KMeans使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了KMeans类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: initializeModels
def initializeModels(self):
try:
if self.kmeansDF:
logger.info("Already loaded this DataFrame")
pass
except AttributeError:
self.kmeansDF = None
commandsDF = self.bashDF.map(lambda row: Row(date=row.date,
source=row.source,
username=row.username,
exec_as=row.exec_as,
srcip=row.srcip,
command=row.command.split(" "))).toDF()
commandsDF.cache()
word2Vec = Word2Vec(vectorSize=100, minCount=1, inputCol="command", outputCol="features")
w2model = word2Vec.fit(commandsDF)
resultDF = w2model.transform(commandsDF)
resultDF.cache()
kmeans = KMeans(k=650, seed=42, featuresCol="features", predictionCol="prediction", maxIter=10, initSteps=3)
kmodel = kmeans.fit(resultDF)
kmeansDF = kmodel.transform(resultDF)
kmeansDF.cache()
kmeansDF.coalesce(1).write.parquet('/user/jleaniz/ml/kmeans', mode='append')
outliers = kmeansDF.groupBy("prediction").count().filter('count < 10').withColumnRenamed("prediction", "cluster")
self.outlierCmds = outliers.join(kmeansDF, kmeansDF.prediction == outliers.cluster)
开发者ID:jleaniz,项目名称:bdsa,代码行数:31,代码来源:engine.py
示例2: test_kmeans_deterministic
def test_kmeans_deterministic(self):
from pyspark.mllib.clustering import KMeans
X = range(0, 100, 10)
Y = range(0, 100, 10)
data = [[x, y] for x, y in zip(X, Y)]
clusters1 = KMeans.train(self.sc.parallelize(data), 3, initializationMode="k-means||", seed=42)
clusters2 = KMeans.train(self.sc.parallelize(data), 3, initializationMode="k-means||", seed=42)
centers1 = clusters1.centers
centers2 = clusters2.centers
for c1, c2 in zip(centers1, centers2):
# TODO: Allow small numeric difference.
self.assertTrue(array_equal(c1, c2))
开发者ID:vidur89,项目名称:spark,代码行数:13,代码来源:tests.py
示例3: train_subquantizers
def train_subquantizers(sc, split_vecs, M, subquantizer_clusters, model, seed=None):
"""
Project each data point into it's local space and compute subquantizers by clustering
each fine split of the locally projected data.
"""
b = sc.broadcast(model)
def project_local(x):
x = np.concatenate(x)
coarse = b.value.predict_coarse(x)
return b.value.project(x, coarse)
projected = split_vecs.map(project_local)
# Split the vectors into the subvectors
split_vecs = projected.map(lambda x: np.split(x, M))
split_vecs.cache()
subquantizers = []
for split in xrange(M):
data = split_vecs.map(lambda x: x[split])
data.cache()
sub = KMeans.train(data, subquantizer_clusters, initializationMode='random', maxIterations=10, seed=seed)
data.unpersist()
subquantizers.append(np.vstack(sub.clusterCenters))
return (subquantizers[:len(subquantizers) / 2], subquantizers[len(subquantizers) / 2:])
开发者ID:svebk,项目名称:DeepSentiBank_memex,代码行数:27,代码来源:train_model_wpca.py
示例4: test_kmeans
def test_kmeans(self):
from pyspark.mllib.clustering import KMeans
data = [[0, 1.1], [0, 1.2], [1.1, 0], [1.2, 0]]
clusters = KMeans.train(self.sc.parallelize(data), 2, initializationMode="k-means||")
self.assertEquals(clusters.predict(data[0]), clusters.predict(data[1]))
self.assertEquals(clusters.predict(data[2]), clusters.predict(data[3]))
开发者ID:vidur89,项目名称:spark,代码行数:7,代码来源:tests.py
示例5: main
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
logger.info('Loading pickled noun to vector dictionary')
# Load noun to vector dictionary
with open(NOUN_TO_VECT_DICT_FILE_LOC, 'rb') as pickled:
noun_to_vect_dict = pickle.load(pickled)
# Create vector array from mapping
vectors = np.array(noun_to_vect_dict.values())
max_k = int(sqrt(len(vectors) / 2.0))
# Define search space for k
numbers_of_clusters = reversed(range(MIN_K, max_k))
# For each k
for i, k in enumerate(numbers_of_clusters):
# Initialize Spark Context
sc = ps.SparkContext()
# Load data
data = sc.parallelize(vectors, 1024)
logger.info('Trial %i of %i, %i clusters', (i + 1), max_k - 1, k)
# Calculate cluster
kmeans_model = KMeans.train(data, k, maxIterations=10, runs=10,
initializationMode='k-means||')
logger.info('Calculating WSSSE')
# Calculate WSSSE
WSSSE = data.map(lambda point: error(kmeans_model, point)) \
.reduce(lambda x, y: x + y)
logger.info('Writing WSSSE')
# Write k and WSSSE
with open(path.join(OUT_FILES_LOC, 'elbow_data.txt'), 'a') as elbow_data:
elbow_data.write(str(k) + '\t' + str(WSSSE) + '\n')
sc.stop()
开发者ID:gushecht,项目名称:noungroups,代码行数:35,代码来源:spark_clustering.py
示例6: main
def main(sc):
stopset = set(stopwords.words('english'))
tweets = sc.textFile('hdfs:/adi/sample.txt')
words = tweets.map(lambda word: word.split(" "))
wordArr = []
for wArr in words.collect():
tempArr = []
for w in wArr:
if not w in stopset:
tempArr.append(w)
wordArr.append(tempArr)
# Open a file
# print wordArr
#tokens = sc.textFile("hdfs:/adi/tokens1.txt")
# Load documents (one per line).
documents = sc.textFile("hdfs:/adi/tokens1.txt").map(lambda line: line.split(" "))
numDims = 100000
hashingTF = HashingTF(numDims)
tf = hashingTF.transform(documents)
tf.cache()
idf = IDF().fit(tf)
tfidf = idf.transform(tf)
tfidf.count()
model = KMeans.train(tfidf, 5)
model.save(sc,"tweetModel1")
print("Final centers: " + str(model.clusterCenters))
# print("Total Cost: " + str(model.computeCost(data)))
sc.stop()
开发者ID:aditcoding,项目名称:zfs,代码行数:31,代码来源:ml.py
示例7: fit
def fit(self, Z):
"""Compute k-means clustering.
Parameters
----------
Z : ArrayRDD or DictRDD containing array-like or sparse matrix
Train data.
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (np.ndarray, sp.spmatrix))
if self.init == 'k-means||':
self._mllib_model = MLlibKMeans.train(
X.unblock(),
self.n_clusters,
maxIterations=self.max_iter,
initializationMode="k-means||")
self.cluster_centers_ = self._mllib_model.centers
else:
models = X.map(lambda X: super(SparkKMeans, self).fit(X))
models = models.map(lambda model: model.cluster_centers_).collect()
return super(SparkKMeans, self).fit(np.concatenate(models))
开发者ID:KartikPadmanabhan,项目名称:sparkit-learn,代码行数:25,代码来源:k_means_.py
示例8: KMeansModel
def KMeansModel(dataPath, label, k, character, master):
sc = SparkContext(master)
data = sc.textFile(dataPath).map(lambda line: line.replace(character, ','))
if label == 0:
label_sum = data.map(lambda line: line.split(',')).map(lambda data: (float(data[0]), 1)).reduceByKey(add).collect()
label = data.map(lambda line: line.split(',')).map(lambda data: float(data[0])).collect()
train_data = data.map(lambda line: line.split(',')).map(lambda x: map(lambda part: float(part), x[1:len(x)]))
else:
label_sum = data.map(lambda line: line.split(',')).map(lambda data: (float(data[-1]), 1)).reduceByKey(add).collect()
label = data.map(lambda line: line.split(',')).map(lambda data: float(data[-1])).collect()
train_data = data.map(lambda line: line.split(',')).map(lambda x: map(lambda part: float(part) if part is not None else '', x[:len(x) - 1]))
model = km.train(train_data, k)
predict_data = train_data.collect()
train = len(predict_data)
acc = 0
for i in range(len(label_sum)):
ksum = np.zeros(k, dtype = int)
cur_label = label_sum[i][0]
for j in range(train):
if label[j] == cur_label:
ksum[model.predict(predict_data[j])] += 1
acc += max(ksum)
string = "KMeans Result: \n"
center = model.centers
for i in range(k):
cur = str(i) + ":" + str(center[i]) + '\n'
string += cur
string = string + "Acc: " + str((float(acc)/train) * 100) + "%"
sc.stop()
return string
开发者ID:Tomlong,项目名称:MLlib-UI,代码行数:33,代码来源:mlKmeans.py
示例9: main
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
# Load in pickled noun to vector dictionary
logger.info('Loading pickled noun to vector dictionary')
# Load noun to vector dictionary
with open(NOUN_TO_VECT_DICT_FILE_LOC, 'rb') as f:
noun_to_vect_dict = pickle.load(f)
# Create vectors array
vectors = noun_to_vect_dict.values()
# Initialize Spark Context
sc = ps.SparkContext('local[*]')
# Load data
data = sc.parallelize(vectors, 1024)
# Create and fit a KMeans model to the data
logger.info('Fitting KMeans model')
kmeans_model = KMeans.train(data, N_CLUSTERS, maxIterations=10, runs=10,
initializationMode='k-means||')
# Create a list of labels corresponding to vectors
logger.info('Labeling vectors')
labels = [kmeans_model.predict(vector) for vector in vectors]
# Write to text file
logger.info('Writing labels to file')
with open(path.join(OUT_FILE_LOC, 'labels.txt'), 'w') as f:
for label in labels:
f.write(str(label) + '\n')
开发者ID:gushecht,项目名称:noungroups,代码行数:30,代码来源:spark_cluster_labels.py
示例10: kMeans
def kMeans(vecs, clusterNum):
clusters = KMeans.train(vecs, clusterNum, maxIterations=10, runs=10, initializationMode="random")
if pv.outputDebugMsg:
Utils.logMessage("\nKmean cluster finished")
return clusters
开发者ID:yfliu87,项目名称:VestAccountDetection,代码行数:7,代码来源:ClusterModule.py
示例11: clusterKMeanSpark
def clusterKMeanSpark(matrix,k):
m = transformInRealMatrix(matrix)
sc = SparkContext(appName="Jsonizer: Remove stop words")
parsedData = sc.parallelize(m)
y = []
x = []
clustersControl = range(k,k+1)
for kc in clustersControl:
clusters = KMeans.train(parsedData, kc, maxIterations=50000,runs=200, initializationMode="k-means||",epsilon=0.0001)
clu = []
def error(point,clust):
center = clust.centers[clust.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = parsedData.map(lambda point: error(point,clusters)).reduce(lambda x, y: x + y)
for n in m:
clu += [clusters.predict(np.array(n))]
x += [kc]
y += [WSSSE]
#print(kc,WSSSE)
#plt.plot(x,y)
#plt.ylabel('some numbers')
#plt.show()
ret = [[] for i in range(0,max(clu)+1)]
for i in range(0,len(clu)):
ret[clu[i]] += [i]
sc.stop()
return ret
开发者ID:luca-zamboni,项目名称:Big-Data,代码行数:34,代码来源:aggregator.py
示例12: train_model
def train_model(self, dataframe, k, model_name):
'''
use data to train model
:param dataframe: all columns for train
:param k:k value
:param model_name:the trained model
:return:None
'''
data = self.prepare_data(dataframe)
# train to get model
model = KMeans.train(data, k)
# create model saving path
path = self.base + model_name
# try to delete the old model if it exists
try:
import subprocess
subprocess.call(["hadoop", "fs", "-rm", "-f", path])
except:
pass
# save new model on hdfs
model.save(self.sc, path)
# print all cluster of the model
for c in model.clusterCenters:
l = []
for i in c:
i = decimal.Decimal(i).quantize(decimal.Decimal('0.01'))
l.append(float(i))
print(l)
开发者ID:summer-apple,项目名称:spark,代码行数:32,代码来源:kmeans_analyse.py
示例13: kmeans
def kmeans(iterations, theRdd):
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
clusters = KMeans.train(theRdd, iterations, maxIterations=10,
runs=10, initializationMode="random")
WSSSE = theRdd.map(lambda point: error(point)).reduce(lambda x, y: x + y)
return WSSSE, clusters
开发者ID:4sp1r3,项目名称:monad,代码行数:8,代码来源:TravelRecommendation_version_1.2.py
示例14: spark_KMeans
def spark_KMeans(train_data):
maxIterations = 10
runs = 20
numClusters = [2,3,4,5,6,7,8,9,10,11,12,13,14]
errors = []
for k in numClusters:
model = KMeans.train(train_data, k, maxIterations=maxIterations, runs=runs,initializationMode='random', seed=10, initializationSteps=5, epsilon=1e-4)
WSSSE = model.computeCost(train_data)
errors.append(WSSSE)
plt.plot(numClusters, errors, 'ro')
plt.xlabel(r'k')
plt.ylabel(r'inertia')
plt.title(r'inertia v.s. k')
plt.savefig('kmeans_cross_validation.png')
bestModel = KMeans.train(train_data, 6, maxIterations=maxIterations, runs=runs,initializationMode='random', seed=10, initializationSteps=5, epsilon=1e-4)
return bestModel
开发者ID:DataLAUSDEclassProject,项目名称:spark,代码行数:18,代码来源:spark_cluster.py
示例15: main
def main(arg1, arg2):
sc = SparkContext(appName="KMeans")
lines = sc.textFile(arg1)
data = lines.map(parseVector)
k = int(arg2)
model = KMeans.train(data, k)
print("Final centers: " + str(model.clusterCenters))
print("Total Cost: " + str(model.computeCost(data)))
sc.stop()
开发者ID:Riuchando,项目名称:Spark,代码行数:9,代码来源:kmeansSpark.py
示例16: test_clustering
def test_clustering(self):
from pyspark.mllib.clustering import KMeans
data = [
self.scipy_matrix(3, {1: 1.0}),
self.scipy_matrix(3, {1: 1.1}),
self.scipy_matrix(3, {2: 1.0}),
self.scipy_matrix(3, {2: 1.1})
]
clusters = KMeans.train(self.sc.parallelize(data), 2, initializationMode="k-means||")
self.assertEqual(clusters.predict(data[0]), clusters.predict(data[1]))
self.assertEqual(clusters.predict(data[2]), clusters.predict(data[3]))
开发者ID:drewrobb,项目名称:spark,代码行数:11,代码来源:test_linalg.py
示例17: k_means
def k_means(loadTrainingFilePath, sc):
# Load and parse the data
loadTrainingFilePath = "../data/kmeans_data.txt"
data = sc.textFile(loadTrainingFilePath)
parsedData = data.map(lambda line: array([float(x) for x in line.split(' ')]))
# Build the model (cluster the data)
clusters = KMeans.train(parsedData, 3, maxIterations=10, runs=30, initializationMode="random")
WSSSE = parsedData.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Within Set Sum of Squared Error = " + str(WSSSE))
开发者ID:honeycombcmu,项目名称:SparkService,代码行数:11,代码来源:k_means.py
示例18: cluster_data
def cluster_data(sc, qc):
drivers = read_file_path(BASE_PATH)
print "Number of drivers: %d" % len(drivers)
# Load and parse the data
for i, dr in enumerate(drivers):
# extract driver number from path
dr_num = re.search("[0-9]+$", dr.strip())
if dr_num:
dr_num = dr_num.group(0)
if dr_num == '1018':
continue
else:
print 'driver number error for %s' % dr
continue
dr_data = sc.textFile("hdfs://" + dr + "/" + dr_num + "_all_trips.txt")
data = dr_data.map(lambda row: [float(x) for x in row.split(',')])
if i == 0:
all_data = data
else:
all_data = all_data.union(data)
data.unpersist()
print 'Total number of records: %d' % all_data.count()
# Build the model (cluster the data), k = Number of clusters
k = 5
t = time()
clusters = KMeans.train(all_data, k, maxIterations=100, runs=100, initializationMode="random", )
print 'KMeans took %.2f seconds' % (time() - t)
# Compute cost
WSSSE_map = all_data.map(lambda point: error(point, clusters))
# Join cluster ID to original data
all_data_w_cluster = all_data.map(lambda point: np.hstack((point, get_cluster_id(clusters, point))))
# all_data_w_cluster.saveAsTextFile("hdfs:///usr/local/spark/kmeans/results.txt")
for i in xrange(0,k):
subset = all_data_w_cluster.filter(lambda x: x[-1] == i)
print "Number of items in cluster %d: %d" % (i, subset.count())
# Computer functions on different features:
all_features_average = subset.sum() / subset.count()
print 'Average of all features'
print all_features_average
WSSSE = all_data.map(lambda point: error(point, clusters)).reduce(lambda x, y: x + y)
print("Within set sum of squared error: " + str(WSSSE))
开发者ID:amy12xx,项目名称:AXADriverChallenge,代码行数:54,代码来源:kmeans_cc_spark.py
示例19: build_cluster_model
def build_cluster_model(tfidf_vectors_rdd, num_clusters, max_iterations, runs):
"""Perform the clustering of vectors using K-means.
Returns:
k means model learned from the training data in
tfidf_vectors_rdd
"""
# Build the model (cluster the training data)
return KMeans.train(tfidf_vectors_rdd, num_clusters, maxIterations=max_iterations, runs=runs)
开发者ID:rohithvsm,项目名称:spark_exercises,代码行数:11,代码来源:tweets_kmeans_classifier.py
示例20: main
def main(noun_file_loc, model_file_loc, percent, n_trials, out_files_loc):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
logger.info('Loading Word2Vec model')
# Load trained Word2Vec model
model = Word2Vec.load('model_file_loc')
logger.info('Reading in list of nouns')
# Read in list of sorted nouns
sorted_nouns = []
with open(noun_file_loc, 'r') as f:
for line in f:
sorted_nouns += line
# Count number of nouns
n_nouns = len(sorted_nouns)
# Create dictionary to map nouns to vectors
noun_to_vect_dict = {}
# Calculate index to stop slice as percentage of total nouns
n_nouns_to_keep = int(n_nouns * percent / 100.)
logger.info('Keeping %i nouns, %i percent of %i',
n_nouns_to_keep, percent, n_nouns)
# Add nouns and vectors to dictionary
for noun in sorted_nouns[0:n_nouns_to_keep]:
noun_to_vect_dict[noun] = model[noun]
vectors = np.array(noun_to_vect_dict.values())
# Initialize Spark Context
sc = ps.SparkContext('local[4]')
# Load data
data = sc.parallelize(vectors)
# Define search space for k
ns_clusters = [int(x) for x in np.linspace(2, n_nouns, n_trials)]
# Open WSSSEs output file
with open(path.join(out_files_loc, 'elbow_data.txt'), 'w') as elbow_data:
# For each k
for i, k in enumerate(ns_clusters):
logger.info('Trial %i of %i, %i clusters', (i + 1), n_trials, k)
# Calculate cluster
kmeans_model = KMeans.train(data, k, maxIterations=10, runs=10,
initalizationMode='k-means||')
# Calculate WSSSE
WSSSE = data.map(lambda point: error(kmeans_model, point)) \
.reduce(lambda x, y: x + y)
# Save centroids
with open(path.join(out_files_loc, '_', k, '.pkl'), 'w') as f:
pickle.dump(kmeans_model.clusterCenters(), f)
# Write k and WSSSE
elbow_data.write('%i, %f', k, WSSSE)
开发者ID:gushecht,项目名称:noungroups,代码行数:51,代码来源:spark_clustering+copy.py
注:本文中的pyspark.mllib.clustering.KMeans类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论