本文整理汇总了Python中pyspark.sql.SQLContext类的典型用法代码示例。如果您正苦于以下问题:Python SQLContext类的具体用法?Python SQLContext怎么用?Python SQLContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SQLContext类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: Spark_MapReduce_Parents
def Spark_MapReduce_Parents(keyword, tokensofprevlevel, graphcache):
#tokensofprevlevelkeyword=tokensofprevlevel
#tokensofprevlevelkeyword.append(keyword)
md5hashparents = hashlib.md5(keyword).hexdigest()
#md5hashparents = keyword
md5hashparents = md5hashparents + "$parents"
picklef_keyword=open("RecursiveGlossOverlap_MapReduce_Parents_Persisted.txt","w")
asfer_pickle_string_dump(keyword,picklef_keyword)
picklef_keyword.close()
cachevalue=graphcache.get(md5hashparents)
if cachevalue:
print "Spark_MapReduce_Parents(): hash = ", md5hashparents, "; returning from cache"
return cachevalue
else:
#picklelock.acquire()
spcon = SparkContext("local[2]","Spark_MapReduce_Parents")
#picklef_keyword=open("RecursiveGlossOverlap_MapReduce_Parents_Persisted.txt","w")
#asfer_pickle_string_dump(keyword,picklef_keyword)
#picklef_keyword.close()
paralleldata = spcon.parallelize(tokensofprevlevel).cache()
#k=paralleldata.map(lambda keyword: mapFunction_Parents(keyword,tokensofprevlevel)).reduceByKey(reduceFunction_Parents)
k=paralleldata.map(mapFunction_Parents).reduceByKey(reduceFunction_Parents)
sqlContext=SQLContext(spcon)
parents_schema=sqlContext.createDataFrame(k.collect())
parents_schema.registerTempTable("Interview_RecursiveGlossOverlap_Parents")
query_results=sqlContext.sql("SELECT * FROM Interview_RecursiveGlossOverlap_Parents")
dict_query_results=dict(query_results.collect())
#print "Spark_MapReduce_Parents() - SparkSQL DataFrame query results:"
#picklelock.release()
graphcache.set(md5hashparents,dict_query_results[1])
spcon.stop()
print "graphcache_mapreduce_parents updated:", graphcache
return dict_query_results[1]
开发者ID:shrinivaasanka,项目名称:asfer-github-code,代码行数:35,代码来源:InterviewAlgorithmWithIntrinisicMerit_SparkMapReducer.py
示例2: test_save_load
def test_save_load(self):
temp_path = tempfile.mkdtemp()
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
开发者ID:Bella-Lin,项目名称:spark,代码行数:25,代码来源:tests.py
示例3: test_nested_pipeline_persistence
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
sqlContext = SQLContext(self.sc)
temp_path = tempfile.mkdtemp()
try:
df = sqlContext.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
开发者ID:Bella-Lin,项目名称:spark,代码行数:29,代码来源:tests.py
示例4: TestSQL
class TestSQL(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.sqlCtx = SQLContext(self.sc)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist(StorageLevel.MEMORY_ONLY_SER)
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
开发者ID:zjmwqx,项目名称:spark-,代码行数:33,代码来源:tests.py
示例5: main
def main():
log = logging.getLogger(prog)
log.setLevel(logging.INFO)
# bit hackish and hard to keep aligned with docstring changes, not using this
# usage = '\r\b\r\b\r' + __doc__ + "usage: %prog -j file.json -p directory.parquet"
# parser = OptionParser(usage=usage, version='%prog ' + __version__)
parser = OptionParser(version='%prog ' + __version__)
parser.add_option('-j', '--json', dest='jsonFile', help='JSON input file/dir', metavar='<file/dir>')
parser.add_option('-p', '--parquetDir', dest='parquetDir', help='Parquet output dir', metavar='<dir>')
(options, args) = parser.parse_args()
jsonFile = options.jsonFile
parquetDir = options.parquetDir
if args or not jsonFile or not parquetDir:
usage(parser)
conf = SparkConf().setAppName('HS PySpark JSON => Parquet')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
spark_version = sc.version
log.info('Spark version detected as %s' % spark_version)
if not isVersionLax(spark_version):
die("Spark version couldn't be determined. " + support_msg('pytools'))
if isMinVersion(spark_version, 1.4):
json = sqlContext.read.json(jsonFile)
json.write.parquet(parquetDir)
else:
log.warn('running legacy code for Spark <= 1.3')
json = sqlContext.jsonFile(jsonFile)
json.saveAsParquetFile(parquetDir)
开发者ID:zhumzhu,项目名称:pytools,代码行数:32,代码来源:spark-json-to-parquet.py
示例6: main
def main(argv):
Conf = (SparkConf().setAppName("recommendation"))
sc = SparkContext(conf=Conf)
sqlContext = SQLContext(sc)
dirPath = "hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/recommend/data/sr_userCount.parquet"
rawDF = sqlContext.read.parquet(dirPath).persist(StorageLevel.MEMORY_AND_DISK_SER)
# argv[1] is the dump of training data in hdfs
# argv[2] is the user perferences
# User Hash Lookup stored into cassandra
user_hash = rawDF.map(lambda (a,b,c): (a,hashFunction(a)))
distinctUser = user_hash.distinct()
userHashDF = sqlContext.createDataFrame(distinctUser,["user","hash"])
userHashDF.write.format("org.apache.spark.sql.cassandra").options(table ="userhash", keyspace = keyspace).save(mode="append")
# Product Hash Lookup stored into cassandra
product_hash = rawDF.map(lambda (a,b,c): (b, hashFunction(b)))
distinctProduct = product_hash.distinct()
productHashDF = sqlContext.createDataFrame(distinctProduct,["product","hash"])
productHashDF.write.format("org.apache.spark.sql.cassandra").options(table ="producthash", keyspace = keyspace).save(mode="append")
# Ratings for training
# ALS requires a java hash of string. This function does that and stores it as Rating Object
# for the algorithm to consume
ratings = rawDF.map(lambda (a,b,c) : Rating(hashFunction(a),hashFunction(b),float(c)))
model = ALS.trainImplicit(ratings,10,10,alpha=0.01,seed=5)
model.save(sc, "hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/recommend/model")
sc.stop()
开发者ID:Swebask,项目名称:RedditR--Insight-Data-Engineering-Project,代码行数:34,代码来源:engine.py
示例7: __init__
def __init__(self, predictionAndLabels):
sc = predictionAndLabels.ctx
sql_ctx = SQLContext(sc)
df = sql_ctx.createDataFrame(predictionAndLabels,
schema=sql_ctx._inferSchema(predictionAndLabels))
java_model = callMLlibFunc("newRankingMetrics", df._jdf)
super(RankingMetrics, self).__init__(java_model)
开发者ID:HubPeter,项目名称:spark,代码行数:7,代码来源:evaluation.py
示例8: main
def main(n_part, hdfs_path):
print "********************\n*"
print "* Start main\n*"
print "********************"
conf = SparkConf().setAppName("Benchmark Spark SQL")
sc = SparkContext(conf = conf)
sqlContext = SQLContext(sc)
rowsRDD = sc.textFile(hdfs_path).repartition(n_part).map(lambda x: recordToRows(x)).cache()
df = sqlContext.createDataFrame(rowsRDD).cache()
df.count()
df.registerTempTable("msd_table")
print "********************\n*"
print "* Start querres\n*"
print "********************"
[ave_t1, std1, dt1, n1] = time_querry("SELECT * FROM msd_table WHERE msd_table.artist_name = 'Taylor Swift'", sqlContext)
[ave_t2, std2, dt2, n2] = time_querry("SELECT COUNT(*) FROM msd_table WHERE msd_table.artist_name = 'Taylor Swift'", sqlContext, method=1)
[ave_t3, std3, dt3, n3] = time_querry("SELECT * FROM msd_table WHERE msd_table.artist_hotness > 0.75", sqlContext)
[ave_t4, std4, dt4, n4] = time_querry("SELECT COUNT(*) FROM msd_table WHERE msd_table.artist_hotness > 0.75", sqlContext, method=1)
if n1 != n2:
print "\t!!!!Error, counts disagree for the number of T.S. songs!"
if n3 != n4:
print "\t!!!!Error, counts disagree for the number of high paced songs!"
print "********************\n*"
print "* Results"
print "\t".join(map(lambda x: str(x), [ave_t1, std1, dt1, ave_t2, std2, dt2, ave_t3, std3, dt3, ave_t4, std4, dt4]))
print "********************"
开发者ID:drJAGartner,项目名称:benchmarking_scripts,代码行数:26,代码来源:benchmark_spark_sql.py
示例9: RunRandomForest
def RunRandomForest(tf, ctx):
sqlContext = SQLContext(ctx)
rdd = tf.map(parseForRandomForest)
# The schema is encoded in a string.
schema = ['genre', 'track_id', 'features']
# Apply the schema to the RDD.
songDF = sqlContext.createDataFrame(rdd, schema)
# Register the DataFrame as a table.
songDF.registerTempTable("genclass")
labelIndexer = StringIndexer().setInputCol("genre").setOutputCol("indexedLabel").fit(songDF)
trainingData, testData = songDF.randomSplit([0.8, 0.2])
labelConverter = IndexToString().setInputCol("prediction").setOutputCol("predictedLabel").setLabels(labelIndexer.labels)
rfc = RandomForestClassifier().setMaxDepth(10).setNumTrees(2).setLabelCol("indexedLabel").setFeaturesCol("features")
#rfc = SVMModel([.5, 10, 20], 5)
#rfc = LogisticRegression(maxIter=10, regParam=0.01).setLabelCol("indexedLabel").setFeaturesCol("features")
pipeline = Pipeline(stages=[labelIndexer, rfc, labelConverter])
model = pipeline.fit(trainingData)
predictions = model.transform(testData)
predictions.show()
evaluator = MulticlassClassificationEvaluator().setLabelCol("indexedLabel").setPredictionCol("prediction").setMetricName("precision")
accuracy = evaluator.evaluate(predictions)
print 'Accuracy of RandomForest = ', accuracy * 100
print "Test Error = ", (1.0 - accuracy) * 100
开发者ID:Sunhick,项目名称:music-cognita,代码行数:30,代码来源:genre_classification.py
示例10: mock_data
def mock_data(self):
"""Mock data to imitate read from database."""
sqlContext = SQLContext(self.sc)
mock_data_rdd = self.sc.parallelize([("A", 1, 1), ("B", 1, 0), ("C", 0, 2), ("D", 2, 4), ("E", 3, 5) ])
schema = ["id", "x", "y"]
mock_data_df = sqlContext.createDataFrame(mock_data_rdd, schema)
return mock_data_df
开发者ID:Sandy4321,项目名称:spark-tdd-example,代码行数:7,代码来源:test_clustering.py
示例11: log_mapreducer
def log_mapreducer(logfilename, pattern, filt="None"):
spcon=SparkContext()
if filt == "None":
input=open(logfilename,'r')
paralleldata=spcon.parallelize(input.readlines())
patternlines=paralleldata.filter(lambda patternline: pattern in patternline)
print "pattern lines",patternlines.collect()
matches=patternlines.map(mapFunction).reduceByKey(reduceFunction)
else:
input=spcon.textFile(logfilename)
matches=input.flatMap(lambda line:line.split()).filter(lambda line: filt in line).map(mapFunction).reduceByKey(reduceFunction)
matches_collected=matches.collect()
print "matches_collected:",matches_collected
if len(matches_collected) > 0:
sqlContext=SQLContext(spcon)
bytes_stream_schema=sqlContext.createDataFrame(matches_collected)
bytes_stream_schema.registerTempTable("USBWWAN_bytes_stream")
query_results=sqlContext.sql("SELECT * FROM USBWWAN_bytes_stream")
dict_query_results=dict(query_results.collect())
print "----------------------------------------------------------------------------------"
print "log_mapreducer(): pattern [",pattern,"] in [",logfilename,"] for filter [",filt,"]"
print "----------------------------------------------------------------------------------"
dict_matches=dict(matches_collected)
sorted_dict_matches = sorted(dict_matches.items(),key=operator.itemgetter(1), reverse=True)
print "pattern matching lines:",sorted_dict_matches
print "----------------------------------------------------------------------------------"
print "SparkSQL DataFrame query results:"
print "----------------------------------------------------------------------------------"
pprint.pprint(dict_query_results)
print "----------------------------------------------------------------------------------"
print "Cardinality of Stream Dataset:"
print "----------------------------------------------------------------------------------"
print len(dict_query_results)
spcon.stop()
return sorted_dict_matches
开发者ID:shrinivaasanka,项目名称:usb-md-github-code,代码行数:35,代码来源:Spark_USBWWANLogMapReduceParser.py
示例12: main
def main(sc):
sql_context = SQLContext(sc)
all_data = get_all_data()
# Input data: Each row is a bag of words from a sentence or document.
training_data = [(id_gen.next(), text.split(" ")) for text in all_data]
documentdf = sql_context.createDataFrame(training_data, ["id", "text"])
remover = StopWordsRemover(inputCol="text", outputCol="text_filtered")
cleaned_document = remover.transform(documentdf)
# Learn a mapping from words to Vectors.
word2vec = Word2Vec(vectorSize=len(training_data),
inputCol="text_filtered",
outputCol="result")
model = word2vec.fit(cleaned_document)
matrix = column_similarities(model.transform(cleaned_document))
# We use the size of the target data to filter only
# products of target data to filter data and avoid
# products of taret data to itself
values = matrix.entries.filter(
lambda x: x.j >= TARGET_DATA_SIZE and x.i < TARGET_DATA_SIZE).sortBy(
keyfunc=lambda x: x.value, ascending=False).map(
lambda x: x.j).distinct().take(100)
training_data_index = dict(training_data)
for position, item in enumerate(values):
line = " ".join(training_data_index[int(item)])
print('%d -> %s' % (position, line.encode('utf-8')))
开发者ID:victorpoluceno,项目名称:socialbasebr-desafio,代码行数:30,代码来源:main.py
示例13: main
def main(argv):
Conf = (SparkConf().setAppName("SimpleGraph"))
sc = SparkContext(conf=Conf)
sqlContext = SQLContext(sc)
dirPath = "hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/data/"+argv[1]+".parquet"
rawDF = sqlContext.read.parquet(dirPath).registerTempTable("comments")
# This is where the magic happens
# SQL self join to join users who have interacted with one another
df = sqlContext.sql("""
SELECT t1.subreddit as Subreddit,
t1.id as OrigId , t2.id as RespId,
t1.author AS OrigAuth, t2.author AS RespAuth,
t1.score AS OrigScore, t2.score AS RespScore,
t1.ups AS OrigUps, t2.ups AS RespUps,
t1.downs AS OrigDowns, t2.downs AS RespDowns,
t1.controversiality AS OrigControv, t2.controversiality AS RespControv
FROM comments t1 INNER JOIN comments t2 ON CONCAT("t1_",t1.id) = t2.parent_id where t1.author!='[deleted]' and t2.author!='[deleted]'
""")
# write it into parquet ? Why ? Cause it compresses the data and is really fast to read from !
df.write.parquet("hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/data/"+argv[1]+"-selfjoin.parquet")
开发者ID:Swebask,项目名称:RedditR--Insight-Data-Engineering-Project,代码行数:27,代码来源:self_joinDump.py
示例14: main
def main(dataFile, outputPath):
conf = SparkConf().setAppName("S3 Example").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
raw_text = sc.textFile(dataFile).persist(StorageLevel.MEMORY_AND_DISK)
csv_data = raw_text.map(lambda l: l.split(","))
row_data = csv_data.map(lambda p: dataIO.dataStruc(p))
interaction_df = sqlContext.createDataFrame(row_data)
# features.save_hdfs_parquet(interaction_df, outputPath)
dataIO.save_hdfs_parquet(interaction_df, outputPath)
interaction_df.registerTempTable("interactions")
tcp_interactions = sqlContext.sql( """
SELECT duration, dst_bytes, protocol_type FROM interactions WHERE protocol_type = 'tcp' AND duration > 1000 AND dst_bytes=0
""")
tcp_interactions.show()
features.print_tcp_interactions(tcp_interactions)
dataIO.print_from_dataio()
features.print_from_feature()
sc.stop()
开发者ID:yuantuo,项目名称:pysparkexample,代码行数:29,代码来源:example.py
示例15: main
def main(sc):
sqlContext = SQLContext(sc)
tasteProfileRdd = sc.textFile("userTaste/*")
songRdd = sc.textFile("songsDict/*")
# Load a text file and convert each line to a Row.
tasteProfile = tasteProfileRdd.filter(lambda l:len(l) > 0)
parsedSplits = tasteProfile.map(lambda l: l.split('\t'))
userTaste = parsedSplits.map(lambda p: Row(userId=p[0], songId=p[1], playCount=p[2]))
individualSong = songRdd.map(lambda l:l.split('|'))
songData = individualSong.map(lambda s: Row(songId=s[0],featureSet=s[1]))
# Infer the schema, and register the DataFrame as a table.
schemaUserTaste = sqlContext.inferSchema(userTaste)
schemaUserTaste.registerTempTable("userTaste")
schemaSongData = sqlContext.inferSchema(songData)
schemaSongData.registerTempTable("songData")
test2 = sqlContext.sql("select * from songData limit 5")
songIds = test2.map(lambda p: "songIds: " + s.songId)
#test1 = sqlContext.sql("SELECT distinct * FROM userTaste limit 5")
#songIds = test1.map(lambda p: "songIds: " + p.songId)
for i in songIds.collect():
print i
开发者ID:himaja20,项目名称:MusicRecommenderSystem,代码行数:27,代码来源:userPlayCountsSpark.py
示例16: get_recommendations
class RecommendationEngine:
"""A travel recommendation engine
"""
def get_recommendations(self, user_id):
"""Recommends travel for user
"""
data = (1,2,3,4,5)
even_rdd = self.sc.parallelize(data)
#ratings = even_rdd.collect()
reco = self.sqlContext.sql("SELECT c.contact_id, o.prod_id FROM contacts c , offres o WHERE o.continent_offre = c.continent and o.envie_offre = c.envie and o.moyen_offre = c.moyen").collect()
return reco
def __init__(self, sc):
"""Init the recommendation engine given a Spark context and a dataset path
"""
logger.info("Starting up the Recommendation Engine: ")
self.sc = sc
self.sqlContext = SQLContext(sc)
path_contacts = "data_v3/contacts/attempt_contactV3_perfect_match.json"
df_contacts = self.sqlContext.jsonFile(path_contacts)
df_contacts.registerTempTable("contacts")
path_offres = "data_v3/offres/attempt_productV3_perfect_match.json"
df_offres = self.sqlContext.jsonFile(path_offres)
df_offres.registerTempTable("offres")
开发者ID:nicolasclaudon,项目名称:travel,代码行数:27,代码来源:engine.py
示例17: run
def run(self):
jsonFile = self.options.jsonFile
parquetDir = self.options.parquetDir
if not jsonFile:
self.usage('--json not defined')
if not parquetDir:
self.usage('--parquetDir not defined')
if self.args:
self.usage()
conf = SparkConf().setAppName('HS PySpark JSON => Parquet')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
spark_version = sc.version
log.info('Spark version detected as %s' % spark_version)
if not isVersionLax(spark_version):
die("Spark version couldn't be determined. " + support_msg('pytools'))
if isMinVersion(spark_version, 1.4):
json = sqlContext.read.json(jsonFile)
json.write.parquet(parquetDir)
else:
log.warn('running legacy code for Spark <= 1.3')
json = sqlContext.jsonFile(jsonFile)
json.saveAsParquetFile(parquetDir)
开发者ID:gggordon,项目名称:pytools,代码行数:25,代码来源:spark-json-to-parquet.py
示例18: get_latest_data
def get_latest_data(self):
from pyspark.sql import SparkSession
import config
import pandas as pd
# initialise sparkContext
spark1 = SparkSession.builder \
.master(config.sp_master) \
.appName(config.sp_appname) \
.config('spark.executor.memory', config.sp_memory) \
.config("spark.cores.max", config.sp_cores) \
.getOrCreate()
sc = spark1.sparkContext
# using SQLContext to read parquet file
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
from datetime import datetime
t1 = datetime.now()
df = sqlContext.read.parquet(config.proj_path+'/datas/appid_datapoint_parquet1')
# creating and querying fron the temporory table
df1 = df.registerTempTable('dummy')
df1 = sqlContext.sql('select count(distinct application) as app_count, time_stamp, source from dummy group by source, time_stamp')
# data cleaning
self.p2_df = df1.toPandas()
dates_outlook = pd.to_datetime(pd.Series(self.p2_df.time_stamp),unit='ms')
self.p2_df.index = dates_outlook
self.p2_df['date'] = self.p2_df.index.date
self.p2_df = self.p2_df.sort_values(by='time_stamp')
t2 =datetime.now()
time_to_fetch = str(t2-t1)
开发者ID:abhoopathi,项目名称:friendly-lamp,代码行数:35,代码来源:p2_api.py
示例19: test_persistence
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
sqlContext = SQLContext(self.sc)
df = sqlContext.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
开发者ID:bsangee,项目名称:spark,代码行数:35,代码来源:tests.py
示例20: __init__
def __init__(self, sparkContext):
"""Create a new HbaseContext.
@param sparkContext: The SparkContext to wrap.
"""
SQLContext.__init__(self, sparkContext)
self._scala_HBaseSQLContext = self._get_hbase_ctx()
开发者ID:06094051,项目名称:Spark-SQL-on-HBase,代码行数:7,代码来源:context.py
注:本文中的pyspark.sql.SQLContext类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论