• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python feature.IDF类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyspark.mllib.feature.IDF的典型用法代码示例。如果您正苦于以下问题:Python IDF类的具体用法?Python IDF怎么用?Python IDF使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了IDF类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main():
    # 初始化 SparkContext
    sc = spark_context(spark_master)

    # 读取文件
    data = sc.textFile(hdfs_path)

    # 分词
    documents = data.map(tokenize)
    documents.cache()

    # TF
    hashingTF = HashingTF()
    tf = hashingTF.transform(documents)

    # IDF
    idf = IDF(minDocFreq=2).fit(tf)
    
    # TFIDF
    tfidf = idf.transform(tf)

    # 链接到 MongoDB
    from pymongo import MongoClient
    mongo_client = MongoClient(mongo_host)
    mongo_client.admin.authenticate(mongo_user, mongo_pass, mechanism='SCRAM-SHA-1')
    clear_mongodb(mongo_client)

    # zip
    term_tfidf = documents.zip(tfidf).map(doc_tfidf)
    articles = term_tfidf.flatMap(lambda i: i).reduceByKey(lambda x, y: x + y)
    for article in articles.collect():
        item = {}
        item['text'] = article[0].encode('utf-8')
        item['size'] = int(article[1] * 10)
        send_mongodb(mongo_client, item)
开发者ID:yankaics,项目名称:zhangxinyun-spark,代码行数:35,代码来源:tfidf.py


示例2: tfidf

 def tfidf(self):
     self._create_rdd()
     hashingTF = HashingTF()
     tf = hashingTF.transform(self.token_rdd)
     idf = IDF(minDocFreq=2).fit(tf)
     tfidf = idf.transform(tf)
     return tfidf
开发者ID:nhu2000,项目名称:wiki-search,代码行数:7,代码来源:make_tfidf.py


示例3: get_feature_vectors

def get_feature_vectors(sc, input_file, feature_dimensions):
    """Get feature vector from the lines in input_file_obj using
    TF/IDF.

    Returns:
        vectors RDD

    """

    # Load documents (one per line).
    tweet_file = sc.textFile(input_file)
    input_text_rdd = tweet_file.map(lambda line: _tokenize(line))
    input_text_rdd.cache()

    # The default feature dimension is 2^20; for a corpus with million
    # tweets recommended dimensions are 50000 or 100000. Use higher
    # dimensions for larger corpus of tweets.
    hashing_tf = HashingTF(feature_dimensions)
    tf = hashing_tf.transform(input_text_rdd)
    tf.cache()
    idf = IDF(minDocFreq=2).fit(tf)
    tfidf = idf.transform(tf)
    tfidf.cache()

    return input_text_rdd, tfidf
开发者ID:rohithvsm,项目名称:spark_exercises,代码行数:25,代码来源:tweets_kmeans_classifier.py


示例4: main

def main(sc):

    stopset = set(stopwords.words('english'))

    tweets = sc.textFile('hdfs:/adi/sample.txt')
    words = tweets.map(lambda word: word.split(" "))
    wordArr = []
    for wArr in words.collect():
        tempArr = []
        for w in wArr:
                if not w in stopset:
                        tempArr.append(w)
        wordArr.append(tempArr)
    # Open a file
   # print wordArr
    #tokens = sc.textFile("hdfs:/adi/tokens1.txt")

    # Load documents (one per line).
    documents = sc.textFile("hdfs:/adi/tokens1.txt").map(lambda line: line.split(" "))
    numDims = 100000
    hashingTF = HashingTF(numDims)
    tf = hashingTF.transform(documents)
    tf.cache()
    idf = IDF().fit(tf)
    tfidf = idf.transform(tf)
    tfidf.count()
    model = KMeans.train(tfidf, 5)
    model.save(sc,"tweetModel1")
    print("Final centers: " + str(model.clusterCenters))
#    print("Total Cost: " + str(model.computeCost(data)))
    sc.stop()
开发者ID:aditcoding,项目名称:zfs,代码行数:31,代码来源:ml.py


示例5: TFIDF

def TFIDF(source, destination):
    if destination[-1] != '/':
        destination=destination+'/'
## typically define the source message
    rdd=sc.wholeTextFiles(source).map(lambda (name,text): text.split())
    tf=HashingTF()
    tfVectors=tf.transform(rdd).cache()
    a = tfVectors.collect()
    # Storing the TF values above in individual files, one per link
    ind = 0
    for vector in a:
        dest_path = destination + "TF_%d"%ind + ".txt"
        ind = ind + 1
        file = open(dest_path,'w')
        file.write(str(vector))
        file.close()
    # Calculating IDF Values for each case.
    idf=IDF()
    idfModel=idf.fit(tfVectors)
    tfIdfVectors=idfModel.transform(tfVectors)
    # Writing TF-IDF values to a single file.
    file = open(destination+"TF-IDF.txt", 'w')
    file.write(str(tfIdfVectors.collect()))
    try:
        for i in range(0,100):
            print ""#Testing Printing"
    except KeyboardInterrupt:
            pass
开发者ID:rikinmathur,项目名称:EECS-6895-FINAL-PROJECT,代码行数:28,代码来源:maanittf.py


示例6: tfidf

def tfidf(rdd_doc):
    hasingTF = HashingTF()
    trainTf = hasingTF.transform(rdd_doc)
    trainTf.cache()
    idf = IDF().fit(trainTf)
    trainTfidf = idf.transform(trainTf)
    trainTfidf.cache()
    return trainTfidf, lambda x: hasingTF.indexOf(x)
开发者ID:hendrydong,项目名称:StackOverFlow_Analysis_PySpark,代码行数:8,代码来源:tfidf_v2.py


示例7: get_tfidf_features

def get_tfidf_features(txt):
    hashingTF = HashingTF()
    tf = hashingTF.transform(txt)
    tf.cache()
    idf = IDF().fit(tf)
    tfidf = idf.transform(tf)

    return tfidf
开发者ID:Veterun,项目名称:SparkPythonHanhan,代码行数:8,代码来源:amazon_review_tfidf_normalized.py


示例8: test_idf_model

 def test_idf_model(self):
     data = [
         Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
         Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),
         Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),
         Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9])
     ]
     model = IDF().fit(self.sc.parallelize(data, 2))
     idf = model.idf()
     self.assertEqual(len(idf), 11)
开发者ID:HodaAlemi,项目名称:spark,代码行数:10,代码来源:tests.py


示例9: tf_idf

def tf_idf(sc,title_token):
    hashingTF = HashingTF(100)
    title_token = sc.parallelize(title_token)
    tf = hashingTF.transform(title_token)
    print tf, ' tf'
   
    idf = IDF().fit(tf)
    tfidf = idf.transform(tf)
   
    return tfidf
开发者ID:IcedNecro,项目名称:AWO-61-backend,代码行数:10,代码来源:service_func.py


示例10: tfidf

 def tfidf(self, tokenizer):
     """
     Get TFIDF matrix rdd with spark tfidf functions
     """
     self._create_rdd(tokenizer)
     hashingTF = HashingTF()
     tf = hashingTF.transform(self.token_rdd)
     idf = IDF(minDocFreq=2).fit(tf)
     tfidf = idf.transform(tf)
     return self.rdd, idf, tfidf
开发者ID:xiaoyubai,项目名称:wiki-search,代码行数:10,代码来源:model.py


示例11: generate_tf_idf

def generate_tf_idf(twProfilesRdd,numFe):
    """
    Generate TF IDF tuple (gender,sparse vector) from rdd containing following tuples:
    (gender,(clean words tuple))
    """
    gtlp=generate_gender_tf(twProfilesRdd, numFe)
    idf=IDF()
    tfVectorsRDD=gtlp.map(lambda tp: tp[1])
    idfModel=idf.fit(tfVectorsRDD)
    idfRdd=idfModel.transform(tfVectorsRDD)
    return (idfRdd.zip(gtlp).map(lambda tp:(tp[1][0],tp[0])),idfModel)
开发者ID:Fiware,项目名称:incubated.Social-data-aggregator,代码行数:11,代码来源:gra_usr_descr.py


示例12: tf_idf_cal

def tf_idf_cal(words_rdd):
	hashingTF = HashingTF()
	tf = hashingTF.transform(words_rdd)

	idf = IDF().fit(tf)
	
	tfidf = idf.transform(tf).cache()

	tfidf_str = tfidf.map(lambda line: str(line)).cache()

	return tfidf_str
开发者ID:gitofsid,项目名称:MyBigDataCode,代码行数:11,代码来源:tf_idf_amazon.py


示例13: mySpark

def mySpark(minFreq, keyWord):

    # text cleaning function
    def removePunctuation(text):
        res=text.lower().strip()
        res=re.sub("[^0-9a-zA-Z ]", "", res)
        return res.split(" ")

    # Function for printing each element in RDD
    def println(x):
        for i in x:
            print i

    # Boilerplate Spark stuff:
    conf = SparkConf().setMaster("local").setAppName("SparkTFIDF")
    sc = SparkContext(conf = conf)

    # Load documents content (one per line) + cleaning.
    rawData = sc.textFile("list_berita-30.tsv")
    fields = rawData.map(lambda x: x.split("\t"))
    documents = fields.map(lambda x: removePunctuation(x[3]))

    # Get documents content without word mapping
    documentNames = fields.map(lambda x: x[3])

    # TF processing
    hashingTF = HashingTF(100000)  #100K hash buckets just to save some memory
    tf = hashingTF.transform(documents)

    # IDF & TF-IDF processing
    tf.cache()
    idf = IDF(minDocFreq=int(minFreq)).fit(tf)
    tfidf = idf.transform(tf)

    # Get keyword relevance with content and zip it
    keywordTF = hashingTF.transform(removePunctuation(keyWord))
    keywordHashValue = int(keywordTF.indices[0])
    keywordRelevance = tfidf.map(lambda x: x[keywordHashValue])
    zippedResults = keywordRelevance.zip(documentNames)

    # print result
    print "Best document for keywords is:"
    print zippedResults.max()
开发者ID:arsoedjono,项目名称:big-data,代码行数:43,代码来源:tfidf_mod.py


示例14: run_tf_idf_spark_mllib

def run_tf_idf_spark_mllib(df, numFeatures=1 << 20):
    tokenizer = Tokenizer(inputCol="body", outputCol="words")
    wordsData = tokenizer.transform(df)

    words = wordsData.select("words").rdd.map(lambda x: x.words)

    hashingTF = MllibHashingTF(numFeatures)
    tf = hashingTF.transform(words)
    tf.cache()

    idf = MllibIDF().fit(tf)
    tfidf = idf.transform(tf)

    # @TODO make this nicer
    tmp = sqlContext.createDataFrame(wordsData.rdd.zip(tfidf), ["data", "features"])
    tmp.registerTempTable("tmp")
    old_columns = ', '.join(map(lambda x: 'data.%s' % x, wordsData.columns))
    with_features = sqlContext.sql("SELECT %s, features FROM tmp" % old_columns)
    tmp = sqlContext.createDataFrame(with_features.rdd.zip(tf), ["data", "rawFeatures"])
    tmp.registerTempTable("tmp")
    old_columns = ', '.join(map(lambda x: 'data.%s' % x, with_features.columns))
    return sqlContext.sql("SELECT %s, rawFeatures FROM tmp" % old_columns)
开发者ID:ctavan,项目名称:bbuzz2016,代码行数:22,代码来源:bbuzz2016-backup.py


示例15: extract_features

    def extract_features(self, feat='tfidf', **kwargs):
        """
        Converts each subtitle into its TF/TFIDF representation.
        Normalizes if necessary.

        Parameters
        --------
        Feat: 'tf' or 'tfidf'.
        kwargs: num_features, minDocFreq, or other arguments to be passed
        to the MLLib objects.

        Returns
        --------
        RDD of features with key.
        """

        # transform BOW into TF vectors
        num_features = kwargs.get('num_features', 10000)
        htf = HashingTF(num_features)
        feat_rdd = self.RDD.mapValues(htf.transform).cache()

        # transform TF vectors into IDF vectors
        if feat == 'tfidf':
            keys, tf_vecs = feat_rdd.keys(), feat_rdd.values()
            minDocFreq = kwargs.get('minDocFreq', 2)
            idf = IDF(minDocFreq=minDocFreq)
            idf_model = idf.fit(tf_vecs)
            idf_rdd = idf_model.transform(tf_vecs.map(lambda vec: vec.toArray()))
            feat_rdd = keys.zip(idf_rdd)

        if self.model_type == 'log_reg':
            normalizer = StandardScaler(withMean=True, withStd=True)
            keys, vecs = feat_rdd.keys(), feat_rdd.values()
            norm_model = normalizer.fit(vecs)
            norm_rdd = norm_model.transform(vecs.map(lambda vec: vec.toArray()))
            feat_rdd = keys.zip(norm_rdd)

        return feat_rdd
开发者ID:Nathx,项目名称:parental_advisory_ml,代码行数:38,代码来源:spark_model.py


示例16: use_naive_nayes

def use_naive_nayes():
    """
    Running the Naive Bayes from Spark's Mlib library
    """
    from pyspark.mllib.classification import NaiveBayes
    from pyspark.mllib.feature import HashingTF, IDF
    from pyspark.mllib.linalg import SparseVector, Vectors
    from pyspark.mllib.regression import LabeledPoint
    #loading the files
    path = "/Users/abhisheksingh29895/Desktop/courses/CURRENT/Advance_Machine_Learning/HW2/aclImdb/"
    train_pos = sc.textFile(path + "train/pos/*txt").map(lambda line: line.encode('utf8')).map(lambda line: line.split())
    train_neg = sc.textFile(path + "train/neg/*txt").map(lambda line: line.encode('utf8')).map(lambda line: line.split())
    test_pos = sc.textFile(path + "test/pos/*txt").map(lambda line: line.encode('utf8')).map(lambda line: line.split())
    test_neg = sc.textFile(path + "test/neg/*txt").map(lambda line: line.encode('utf8'))
    #TF-IDF
    tr_pos = HashingTF().transform(train_pos)  ;  tr_pos_idf = IDF().fit(tr_pos)
    tr_neg = HashingTF().transform(train_neg)  ;  tr_neg_idf = IDF().fit(tr_neg)
    te_pos = HashingTF().transform(test_pos)  ;  te_pos_idf = IDF().fit(te_pos)
    te_neg = HashingTF().transform(test_neg)  ;  te_neg_idf = IDF().fit(te_neg)
    #IDF step
    tr_pos_tfidf = tr_pos_idf.transform(tr_pos)  ;  tr_neg_tfidf = tr_neg_idf.transform(tr_neg)
    te_pos_tfidf = te_pos_idf.transform(te_pos)  ;  te_neg_tfidf = te_neg_idf.transform(te_neg)
    #Creating labels
    pos_label = [1] * 12500  ;  pos_label = sc.parallelize(pos_label)
    neg_label = [1] * 12500  ;  neg_label = sc.parallelize(neg_label)
    # Combine using zip
    train_pos_file = pos_label.zip(tr_pos_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
    train_neg_file = neg_label.zip(tr_neg_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
    test_pos_file = pos_label.zip(te_pos_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
    test_neg_file = neg_label.zip(te_neg_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
    #Joining 2 RDDS to form the final training set
    train_file = train_pos_file.union(train_neg_file)
    test_file = test_pos_file.union(test_neg_file)
    # Fitting a Naive bayes model
    model = NaiveBayes.train(train_file)
    # Make prediction and test accuracy
    predictionAndLabel = test_file.map(lambda p: (model.predict(p[1]), p[0]))
    accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()
    print ""
    print "Test accuracy is {}".format(round(accuracy,4))
开发者ID:Abhishek19895,项目名称:Document_Classification,代码行数:40,代码来源:hw2.py


示例17: SparkContext

sc = SparkContext()

rdd = sc.wholeTextFiles("/usr/local/Cellar/BigDataAdvanced/Assignment1/TwitterStuff/TweetData").map(lambda (name,text):text.split())
tf = HashingTF()
tfVectors = tf.transform(rdd).cache()
a = tfVectors.collect()
count = 0 
for vec in a:
        print vec
        count = count + 1
        with open("TF_Tweet"+str(count)+".txt","w") as f:
                f.write(str(vec))
        f.close()

idf = IDF()
idfModel = idf.fit(tfVectors)
tfIdfVectors = idfModel.transform(tfVectors)
file = open("TF-IDF_tweet.txt", 'w')
file.write(str(tfIdfVectors.collect()))

#count = 0
#output=tfIdfVectors.collect()
#for vec in output:
#	print vec
#	count = count + 1
#	with open("TF_Wiki"+str(count)+".txt","w") as f:
#		f.write(str(vec))
#	f.close()
	
	
开发者ID:PalashMatey,项目名称:AdvBigData,代码行数:28,代码来源:TF_IDF.py


示例18: HashingTF

dim=pow(2,18)

hashingTF = HashingTF(dim)

tf=hashingTF.transform(tokens)

tf.cache()

v=tf.first()

print(v.size)
print(v.values)
print(v.indices)

idf = IDF().fit(tf)

tfidf=idf.transform(tf)

v2=tfidf.first()

print(v2.size)
print(v2.values)
print(v2.indices)

minMaxVals = tfidf.map(lambda v: (min(v.values),max(v.values)))
globalMin=minMaxVals.reduce(min)
globalMax=minMaxVals.reduce(max)
globalMinMax=(globalMin[0],globalMax[1])

###Using a TF-IDF model
开发者ID:stanworld,项目名称:ApacheSpark,代码行数:30,代码来源:TextProcess.py


示例19: islice

    # remove top 3 lines from document
    doc_wo_counters = documents.mapPartitionsWithIndex(lambda i, iter: islice(iter, 3, None) if i == 0 else iter)

    final_doc = doc_wo_counters.map(lambda x: (int(x[0]), doc_to_words(int(x[1]), int(x[2])).encode("utf8"))).reduceByKey(lambda x, y: x + " " + y)

    vect_rep = final_doc.map(lambda x: x[1])

    raw_document = sc.textFile("test.txt")
    vect_rep = raw_document.map(lambda line: line.encode("utf8").split(" "))

    
    # TfIDF
    hashingTF = HashingTF()
    tf = hashingTF.transform(vect_rep)
    tf.cache()
    idf = IDF().fit(tf)
    tfidf_vectors = idf.transform(tf)
    
    #Build the model (cluster the data)
    clusters = KMeans.train(tfidf_vectors, 10, maxIterations=100)
    
    # Evaluate clustering by computing Within Set Sum of Squared Errors
    def error(point):
        center = clusters.centers[clusters.predict(point)]
        return sqrt(sum([x**2 for x in (point.toArray() - center)]))

    WSSSE = tfidf_vectors.map(lambda point: error(point)).reduce(lambda x, y: x + y)
    print("Within Set Sum of Squared Error = " + str(WSSSE))

    # Save and load model
    clusters.save(sc, "myModelPath")
开发者ID:nitinsaroha,项目名称:kmeans-clustering,代码行数:31,代码来源:kMeans-mllib.py


示例20: tfIdf_cluster

	def tfIdf_cluster(self,content,title,date,tfidf):
                tfidf_list=content
		inputRDD = sc.parallelize(tfidf_list)
		hasingTF = HashingTF(2 ** 20)
		trainTf = hasingTF.transform(inputRDD)
                idf = IDF().fit(trainTf)
                trainTfidf = idf.transform(trainTf)
                km = KMeans.train(trainTfidf, 2, maxIterations=100, runs=10) #training new model
		 
		result = km.predict(trainTfidf)
		k_data = array(result.collect())
		
		grp1_news = []
		grp2_news = []
		
		#把抓到的新聞存成[{},{}] key & value 的樣子方便前端取用
		# i = 0 
		for idx, grp in enumerate(k_data):
		
			if grp == 0:
				news =  {
					'title':title[idx],
					'date':date[idx],
					'content':''.join(content[idx].split()),
					'tfidf':tfidf[idx],
				}
				grp1_news.append(news)

			
			if grp == 1:
				news =  {
					'title':title[idx],
					'date':date[idx],
					'content':''.join(content[idx].split()),
					'tfidf':tfidf[idx],
				}
				grp2_news.append(news)
				
		#存取新聞分群TFIDF詞數量開始------------------------------------
		tfidf_word_grp1=[]           #用來裝TFIDF詞跟數量
		all_tfidf_grp1=[]                #用來裝所有TFIDF詞
		for post in grp1_news:
			tfidf = post['tfidf']
			for i in tfidf:
				all_tfidf_grp1.append(i)
		tfidf_dic1 = {}
		for ele in all_tfidf_grp1: # n
			if not ele in tfidf_dic1:
				tfidf_dic1[ele] = 1
			else:
				tfidf_dic1[ele] = tfidf_dic1[ele] + 1
		for i in range(0,len(tfidf_dic1)):
			data = {
				"text":tfidf_dic1.keys()[i],
				"size":(tfidf_dic1.values()[i])*1.5,
			}
			tfidf_word_grp1.append(data)
		
		tfidf_word_grp1.sort(key=lambda d:d['size'],reverse=True)   #幫情緒字進行排序
		
		tfidf_word_grp1 = tfidf_word_grp1[0:50]
		tfidf_word_grp1 = json.dumps(tfidf_word_grp1)
		
		#---------------------------------------------------------------------------------------------
		tfidf_word_grp2=[]           #用來裝TFIDF詞跟數量
		all_tfidf_grp2=[]                #用來裝所有TFIDF詞
		for post in grp2_news:
			tfidf = post['tfidf']
			for i in tfidf:
				all_tfidf_grp2.append(i)
		tfidf_dic2 = {}
		for ele in all_tfidf_grp2: # n
			if not ele in tfidf_dic2:
				tfidf_dic2[ele] = 1
			else:
				tfidf_dic2[ele] = tfidf_dic2[ele] + 1
		for i in range(0,len(tfidf_dic2)):
			data = {
				"text":tfidf_dic2.keys()[i],
				"size":(tfidf_dic2.values()[i])*1.5,
			}
			tfidf_word_grp2.append(data)
		tfidf_word_grp2.sort(key=lambda d:d['size'],reverse=True)   #幫情緒字進行排序
		tfidf_word_grp2 = tfidf_word_grp2[0:50]
		tfidf_word_grp2 = json.dumps(tfidf_word_grp2)	
		
		#存取新聞分群TFIDF詞數量結束------------------------------------
			
		return grp1_news,grp2_news,tfidf_word_grp1,tfidf_word_grp2
开发者ID:gooa1c2e3,项目名称:NAAG,代码行数:89,代码来源:models.py



注:本文中的pyspark.mllib.feature.IDF类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python feature.StandardScaler类代码示例发布时间:2022-05-26
下一篇:
Python feature.HashingTF类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap