本文整理汇总了Python中nltk.wordpunct_tokenize函数的典型用法代码示例。如果您正苦于以下问题:Python wordpunct_tokenize函数的具体用法?Python wordpunct_tokenize怎么用?Python wordpunct_tokenize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了wordpunct_tokenize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: do_it
def do_it(self, sources):
for source in sources:
words = nltk.wordpunct_tokenize(source.headline)
words.extend(nltk.wordpunct_tokenize(source.summary))
lowerwords=[x.lower() for x in words if len(x) > 1]
self.ct += 1
print self.ct, "TITLE",source.headline
self.corpus.append(lowerwords)
self.titles.append(source.headline)
self.links.append(source.url)
[[self.key_word_list.add(x) for x in self.top_keywords(self.nkeywords,doc,self.corpus)] for doc in self.corpus]
self.ct=-1
for doc in self.corpus:
self.ct+=1
print self.ct,"KEYWORDS"," ".join(self.top_keywords(self.nkeywords,doc,self.corpus))
for document in self.corpus:
vec=[]
[vec.append(self.tfidf(word, document, self.corpus) if word in document else 0) for word in self.key_word_list]
self.feature_vectors.append(vec)
self.n=len(self.corpus)
mat = numpy.empty((self.n, self.n))
for i in xrange(0,self.n):
for j in xrange(0,self.n):
mat[i][j] = nltk.cluster.util.cosine_distance(self.feature_vectors[i],self.feature_vectors[j])
Z = linkage(mat, 'single')
dendrogram(Z, color_threshold=self.t)
clusters = self.extract_clusters(Z,self.t,self.n)
stories = []
for key in clusters:
print "============================================="
story = Story()
for id in clusters[key]:
story.add_source(sources[id])
print id,self.titles[id],sources[id].url
stories.append(story)
return stories
开发者ID:gitzain,项目名称:project-x,代码行数:60,代码来源:run2.py
示例2: getDomainUnigram
def getDomainUnigram(self, directory = None):
collocations = set() #collocation items
ewordlists = list() #list of lists of words
#extract words from essays
if directory is not None:
doclist = os.listdir(directory)
for essay in doclist:
dir_essay = directory+'/'+essay
etext = open(dir_essay,'r').read()
tokens = nltk.wordpunct_tokenize(etext)
tokens = [word.lower() for word in tokens]
#stemming
if self._stemoption ==True:
st = PorterStemmer()
tokens = [st.stem(t) for t in tokens]
#extract the collocation for the given essay
e_bigram = set(Mytext(tokens).collocations())
collocations = collocations | e_bigram
ewordlists.append(tokens)
else: # using the mapped essay to calcuate the candidate bigrams
#need to call mapessay fuction first
for ins in self._data:
if ins['essay'] is not None:
etext = open(ins['essay'],'r').read()
tokens = nltk.wordpunct_tokenize(etext)
tokens = [word.lower() for word in tokens]
#stemming
if self._stemoption ==True:
st = PorterStemmer()
tokens = [st.stem(t) for t in tokens]
#extract the collocation for the given essay
e_bigram = set(Mytext(tokens).collocations())
collocations = collocations | e_bigram
ewordlists.append(tokens)
#get collection of all essays under the specified directory / associated essays
collection_text = TextCollection(ewordlists)
itemlist = list()
for (a, b) in collocations:
itemlist.append(a)
itemlist.append(b)
itemlist = list(set(itemlist))
word_idf = []
for i in range(len(itemlist)):
word_idf.append((collection_text.idf(itemlist[i]), itemlist[i]))
word_idf = sorted(word_idf, key = operator.itemgetter(0))
ave = 0
if len(word_idf)!=0:
ave = sum(map(operator.itemgetter(0), word_idf)) / len(word_idf)
wlist = [j for (i, j) in word_idf if i<ave]
return wlist
开发者ID:wencanluo,项目名称:Summarization,代码行数:60,代码来源:OrigReader.py
示例3: get_utterances
def get_utterances(utterances, line, category, wgram, cgram):
tknzr = TweetTokenizer()
gram_list = []
# WORD GRAMS
if wgram == 1: # unigram
wgram_list = tknzr.tokenize(line)
elif wgram == 2: # uni + bigram
# unigram list
tokens = nltk.wordpunct_tokenize(line)
# bigram list
finder = BigramCollocationFinder.from_words(tokens)
scored = finder.score_ngrams(bigram_measures.raw_freq)
bigram_list = sorted(bigram for bigram, score in scored)
# res
wgram_list = tknzr.tokenize(line) + bigram_list
elif wgram == 3: # uni + bi + trigram
# unigram list
tokens = nltk.wordpunct_tokenize(line)
# bigram list
bi_finder = BigramCollocationFinder.from_words(tokens)
bi_scored = bi_finder.score_ngrams(bigram_measures.raw_freq)
bigram_list = sorted(bigram for bigram, biscore in bi_scored)
# trigram list
tri_finder = TrigramCollocationFinder.from_words(tokens)
tri_scored = tri_finder.score_ngrams(trigram_measures.raw_freq)
trigram_list = sorted(trigram for trigram, triscore in tri_scored)
# res
wgram_list = tknzr.tokenize(line) + bigram_list + trigram_list
# CHAR GRAMS
cgram_list = []
if cgram == 1: # uni-chargram
cgram_list = [line[i:i+1] for i in range(len(line)-1)]
elif cgram == 2: # bi-chargram
cgram_list = [line[i:i+2] for i in range(len(line)-1)]
elif cgram == 3: # tri-chargram
cgram_list = [line[i:i+3] for i in range(len(line)-1)]
# RESULT
if category == 'QA': # non-task
utterances.append((wgram_list + cgram_list, 0))
elif category == 'Shopping': # task
utterances.append((wgram_list + cgram_list, 1))
elif category == 'Travel': # task
utterances.append((wgram_list + cgram_list, 2))
elif category == 'Hotel': # task
utterances.append((wgram_list + cgram_list, 3))
elif category == 'Food': # task
utterances.append((wgram_list + cgram_list, 4))
elif category == 'Art': # task
utterances.append((wgram_list + cgram_list, 5))
elif category == 'Weather': # task
utterances.append((wgram_list + cgram_list, 6))
elif category == 'Friends': # task
utterances.append((wgram_list + cgram_list, 7))
elif category == 'Chat': # chat
utterances.append((wgram_list + cgram_list, 8))
else:
print utt_category,"ERROR"
开发者ID:SharleneL,项目名称:SpellErrorDetection,代码行数:59,代码来源:sklearn_lr_detect.py
示例4: getArticleKeywords
def getArticleKeywords(articles, maxLength=3):
""" Parse titles of a number of articles and extract keywords that occur
in them. A keyword is defined as a grouping of several words, with punctuation
and stopwords (*nltk.corpus.stopwords.words('english')*) removed. Will
also add keywords from every input Article into the corresponding entry
in articles list.
Arguments
----------
articles - a list of Articles.
maxLength - int, the largest number of tokens per keyword.
Returns
----------
2-tuple with numpy.ndarrays of shape (len(articles),) with
* strings of keywords
* ints with the number of occurrences of the given keyword in all titles
Example
----------
"A general theory of the plasma of an arc" would return keywords:
['A', 'general', 'theory', 'of', 'the', 'plasma', 'of', 'an', 'arc',
'A general', 'general theory', 'theory of', 'of the', 'the plasma',
'plasma of', 'of an', 'an arc', 'A general theory', 'general theory of',
'theory of the', 'of the plasma', 'the plasma of', 'plasma of an', 'of an arc']
Out of these, ['A','of','the','an','of the','of an'] would be filtered out.
"""
# Identify keywords.
tokens=[]
for title in [art.Title for art in articles]:
tokens.extend(nltk.wordpunct_tokenize(title))
# Filter out meaningless words and punctuation.
tokens=filter(lambda s: not s.lower() in nltk.corpus.stopwords.words('english') and
not s in string.punctuation, tokens)
# Find keywords (length 1, 2, or 3) and how often they occur in all the titles.
keywords,frequencies=findNGrams(tokens,lengths=range(1,maxLength+1))
keywords=numpy.array(keywords)
frequencies=numpy.array(frequencies)
sortedIndices=frequencies.argsort()[::-1] # Go in descending order of frequencies.
frequencies=frequencies[sortedIndices]
keywords=keywords[sortedIndices]
# Assign keywords to Articles.
for i in range(len(articles)):
artTitleTokens=nltk.wordpunct_tokenize(articles[i].Title) # The tokens of this article's title.
# Filter out meaningless words and punctuation.
artTitleTokens=filter(lambda s: not s.lower() in nltk.corpus.stopwords.words('english') and
not s in string.punctuation, artTitleTokens)
# Use the same algorithm but for this article only.
artKeywords,artFreq=findNGrams(artTitleTokens,lengths=[1,2,3])
articles[i].Keywords=artKeywords
return keywords,frequencies
开发者ID:AleksanderLidtke,项目名称:AnalyseScinetificArticles,代码行数:57,代码来源:DownloadArticles.py
示例5: product_features
def product_features(product):
name = nltk.FreqDist(normalize_words(nltk.wordpunct_tokenize(product['name'])))
desc = nltk.FreqDist(normalize_words(nltk.wordpunct_tokenize(product['description'])))
feats = {}
for word in name.keys():
feats['name(%s)' % word] = True
for word in desc.keys():
feats['description(%s)' % word] = True
return feats
开发者ID:DistrictDataLabs,项目名称:intro-to-nltk,代码行数:10,代码来源:products.py
示例6: do_it
def do_it(self):
for feed in self.feeds:
d = feedparser.parse(feed)
for e in d['entries']:
words = nltk.wordpunct_tokenize(self.clean_html(e['description']))
words.extend(nltk.wordpunct_tokenize(e['title']))
lowerwords=[x.lower() for x in words if len(x) > 1]
self.ct += 1
print self.ct, "TITLE",e['title']
self.corpus.append(lowerwords)
self.titles.append(e['title'])
self.links.append(e['link'])
[[self.key_word_list.add(x) for x in self.top_keywords(self.nkeywords,doc,self.corpus)] for doc in self.corpus]
self.ct=-1
for doc in self.corpus:
self.ct+=1
print self.ct,"KEYWORDS"," ".join(self.top_keywords(self.nkeywords,doc,self.corpus))
for document in self.corpus:
vec=[]
[vec.append(self.tfidf(word, document, self.corpus) if word in document else 0) for word in self.key_word_list]
self.feature_vectors.append(vec)
self.n=len(self.corpus)
mat = numpy.empty((self.n, self.n))
for i in xrange(0,self.n):
for j in xrange(0,self.n):
mat[i][j] = nltk.cluster.util.cosine_distance(self.feature_vectors[i],self.feature_vectors[j])
Z = linkage(mat, 'single')
dendrogram(Z, color_threshold=self.t)
clusters = self.extract_clusters(Z,self.t,self.n)
for key in clusters:
print "============================================="
for id in clusters[key]:
print id,self.titles[id]
开发者ID:gitzain,项目名称:project-x,代码行数:54,代码来源:cluster.py
示例7: jaccard
def jaccard(sen_1, sen_2):
tagged_sent = POSWrapper.pos_tag(nltk.wordpunct_tokenize(sen_1))
words = [word for word,pos in tagged_sent if (pos == 'NN' or pos == 'NNS' or pos == 'JJ' or pos == '' or pos == 'VB' or pos == 'VBN' or pos == 'VBD' or pos == 'RB')]
sen_set_1 = set(words)
tagged_sent = POSWrapper.pos_tag(nltk.wordpunct_tokenize(sen_2))
words = [word for word,pos in tagged_sent if (pos == 'NN' or pos == 'NNS' or pos == 'JJ' or pos == '' or pos == 'VB' or pos == 'VBN' or pos == 'VBD' or pos == 'RB')]
sen_set_2 = set(words)
jaccard_value = jaccard_distance(sen_set_1, sen_set_2)
return jaccard_value
开发者ID:anhtukhtn,项目名称:Similarity,代码行数:13,代码来源:Literal.py
示例8: main
def main():
stem = nltk.stem.LancasterStemmer()
cleanword = lambda w : stem.stem(w.strip(w).lower())
bib = btparse.load(sys.argv[1])
aid = np.random.randint(len(bib))
while ('abstract' in bib[aid].keys()) == False:
aid = np.random.randint(len(bib))
abstract = nltk.wordpunct_tokenize(bib[aid]['abstract']+" "+bib[aid]['title'])
q_vec0 = sorted([x[0] for x in nltk.pos_tag(abstract) if x[1] in ("NN")])
q_vec = []
q_val = []
for w in q_vec0:
w = cleanword(w)
if len(w)>2 and w not in ignore_list and re.search('\\\\',w) == None:
if (w in q_vec) == False:
q_vec.append(w)
q_val.append(1)
else:
q_val[-1] += 1
q_val = np.array(q_val)/np.sqrt(np.dot(q_val,q_val))
prob = np.zeros(len(bib))
if pytools:
progress = pytools.ProgressBar("Analysing",len(bib))
progress.draw()
for ind,entry in enumerate(bib):
if ind != aid and ('abstract' in bib[ind].keys()):
abstract = nltk.wordpunct_tokenize(bib[ind]['abstract']+" "+bib[ind]['title'])
r_vec = sorted([x[0] for x in nltk.pos_tag(abstract) if x[1] in ("NN")])
r_val = np.zeros(len(q_val))
for w in r_vec:
w = cleanword(w)
if w in q_vec:
r_val[q_vec.index(w)] += 1
mod = np.dot(r_val,r_val)
if mod > 0:
prob[ind] = np.dot(r_val/np.sqrt(mod),q_val)
if pytools: progress.progress()
if pytools: print ""
# sort based on probability (best first)
inds_sort = np.argsort(prob)[::-1]
print 'similar papers to:\n\t%s\n\t\tby: %s\n'%(bib[aid]['title'],bib[aid]['author'])
for i in range(10):
best = inds_sort[i]
print '%3d.\t%s\n\t\tby: %s\n\t\tid = %3d, prob = %f\n'%(i+1,bib[best]['title'],bib[best]['author'],best,prob[best])
开发者ID:dfm,项目名称:pyarxiv,代码行数:50,代码来源:compareabstract-nltk.py
示例9: feedTech
def feedTech(request):
corpus = []
titles=[]
ct = -1
for feed in feeds:
d = feedparser.parse(feed)
for e in d['entries']:
words = nltk.wordpunct_tokenize((e['description']))
words.extend(nltk.wordpunct_tokenize(e['title']))
lowerwords=[x.lower() for x in words if len(x) > 1]
ct += 1
print (ct, "TITLE",e['title'])
corpus.append(lowerwords)
titles.append(e['title'])
return render(request, 'dash/feeds.html')
开发者ID:satyam07,项目名称:BlueDash,代码行数:15,代码来源:views.py
示例10: tag_files_for_cross_validation
def tag_files_for_cross_validation(file_list, tmp_models):
# first clean CV files folder
if os.path.exists(CV_FILES_PATH_DEFAULT):
shutil.rmtree(CV_FILES_PATH_DEFAULT)
if os.path.exists(CV_FILES_PATH_PUNCT):
shutil.rmtree(CV_FILES_PATH_PUNCT)
if os.path.exists(CV_FILES_PATH_LOWER):
shutil.rmtree(CV_FILES_PATH_LOWER)
if os.path.exists(CV_FILES_PATH_LOWER_PUNCT):
shutil.rmtree(CV_FILES_PATH_LOWER_PUNCT)
# then create new CV folders
os.makedirs(CV_FILES_PATH_DEFAULT)
os.makedirs(CV_FILES_PATH_PUNCT)
os.makedirs(CV_FILES_PATH_LOWER)
os.makedirs(CV_FILES_PATH_LOWER_PUNCT)
for file_name in file_list:
path = ORIGINAL_STORIES + '/' + file_name + '.txt'
if not os.path.isfile(path):
print('File ' + path + ' does not exist!')
continue
content = get_content(path)
content_lower = content.lower()
tokenized_content = nltk.wordpunct_tokenize(content)
tokenized_content_punct = nltk.word_tokenize(content)
tokenized_content_lower = nltk.wordpunct_tokenize(content_lower)
tokenized_content_lower_punct = nltk.word_tokenize(content_lower)
tagged_content = tag_tokens_with_model(tokenized_content, tmp_models.default, lowercase=False, message=False)
tagged_file_path = CV_FILES_PATH_DEFAULT + '/' + file_name + '.tsv'
write_tagged_content_to_file(tagged_content, tagged_file_path, message=False)
tagged_content = tag_tokens_with_model(tokenized_content_punct, tmp_models.punct, lowercase=False,
message=False)
tagged_file_path = CV_FILES_PATH_PUNCT + '/' + file_name + '.tsv'
write_tagged_content_to_file(tagged_content, tagged_file_path, message=False)
tagged_content = tag_tokens_with_model(tokenized_content_lower, tmp_models.lower, lowercase=True, message=False)
tagged_file_path = CV_FILES_PATH_LOWER + '/' + file_name + '.tsv'
write_tagged_content_to_file(tagged_content, tagged_file_path, message=False)
tagged_content = tag_tokens_with_model(tokenized_content_lower_punct, tmp_models.lower_punct, lowercase=True,
message=False)
tagged_file_path = CV_FILES_PATH_LOWER_PUNCT + '/' + file_name + '.tsv'
write_tagged_content_to_file(tagged_content, tagged_file_path, message=False)
开发者ID:thorina,项目名称:strojno-ucenje,代码行数:48,代码来源:models.py
示例11: calculate_language_scores
def calculate_language_scores(text):
"""
Calculate probability of given text to be written in several languages and
return a dictionary that looks like {'french': 2, 'spanish': 4, 'english': 0}.
:param text: Text to analyze.
:type text: str
:return: Dictionary with languages and unique stopwords seen in analyzed text.
:rtype: dict(str -> int)
:raises: TypeError
"""
if not isinstance(text, basestring):
raise TypeError("Expected basestring, got '%s' instead" % type(text))
if not text:
return {}
languages_ratios = {}
# Split the text into separate tokens, using natural language punctuation signs.
tokens = wordpunct_tokenize(text)
tokenized_words = [word.lower() for word in tokens]
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(tokenized_words)
common_elements = words_set.intersection(stopwords_set)
languages_ratios[language] = len(common_elements) # language "score"
return languages_ratios
开发者ID:Autoscan,项目名称:golismero,代码行数:31,代码来源:natural_language.py
示例12: translateHinglishTweets
def translateHinglishTweets(tweets_text):
counter = 0
tweets_text_translated = []
n = len(tweets_text)
open_file = open("dictionary.pickle", "rb")
dictionary = pickle.load(open_file)
open_file.close()
english_stopwords_set = set(stopwords.words('english'))
for i in range(n):
text = tweets_text[i]
translated_text = ""
tokens = wordpunct_tokenize(text)
words = [word.lower() for word in tokens]
for word in words:
if word in english_stopwords_set:
translated_text = translated_text + " " + word
elif (word in dictionary):
#print word + "-" + dictionary[word]
translated_text = translated_text + " " + dictionary[word]
counter = counter + 1
else:
translated_text = translated_text + " " + word
tweets_text_translated.append(translated_text)
#print counter
return tweets_text_translated
开发者ID:anant14014,项目名称:TwitterHinglishTranslation,代码行数:29,代码来源:analyzeTweets.py
示例13: statScore
def statScore(text,d_index):
tokens = nltk.wordpunct_tokenize(text)
val = 0
for token in tokens:
w_index = vocabulary.index(token)
val = val + self.stat_lte[w_index][d_index]
return val
开发者ID:saurabhmaurya06,项目名称:ADM,代码行数:7,代码来源:sentenceScoring.py
示例14: tokenize
def tokenize(text):
"""This handles tokenizing and normalizing everything."""
return [
token.lower()
for token in nltk.wordpunct_tokenize(text)
if token.isalnum()
]
开发者ID:erochest,项目名称:c18sgml,代码行数:7,代码来源:add_pos.py
示例15: convert_to_weka
def convert_to_weka(src, des, voc):
stemmer = nltk.LancasterStemmer()
word_reg = re.compile('[0-9A-Za-z]+')
des.write('@relation review_rate\n')
des.write('\n')
for word in voc:
des.write('@attribute ' + word + ' real\n')
des.write('@attribute rate {s1,s2,s3,s4,s5}\n')
des.write('\n')
des.write('@data\n')
for line in iter(src.readline, ''):
feature_vector = []
try:
rate, title, review = [item.strip() for item in line.split('\t')[5:8]]
except (IndexError, ValueError):
continue
ws = set([])
for w in nltk.wordpunct_tokenize(title + ' ' + review):
m = word_reg.match(w)
if m:
ws.add(stemmer.stem(m.group(0).lower()))
for w in voc:
if w in ws:
feature_vector.append('1')
else:
feature_vector.append('0')
des.write(','.join(feature_vector) + ',' + 's' + str(int(math.ceil(float(rate)))) + '\n')
return
开发者ID:yaocheng-cs,项目名称:misc,代码行数:32,代码来源:converter.py
示例16: findBestWorstDress
def findBestWorstDress(tweeters):
possibleBestDress = []
possibleWorstDress = []
bestDressPat = re.compile(".*best dress.*",re.IGNORECASE)
worstDressPat = re.compile(".*worst dress.*",re.IGNORECASE)
pat = ""
for twtr in tweeters:
for twt in twtr.tweets:
properNoun =[]
if bestDressPat.match(twt.text):
pat = "best"
elif worstDressPat.match(twt.text):
pat = "worst"
else:
continue
firstHalfOfTweet = re.search("(?i).*(?=%s)" % pat,twt.text)
tokenizedText = nltk.wordpunct_tokenize(firstHalfOfTweet.group())
if tokenizedText:
properNoun = extractProperNouns(tokenizedText)
for pn in properNoun:
if len(pn.split())==2 :
if pat == 'best':
possibleBestDress.append(pn)
else:
possibleWorstDress.append(pn)
bestData = collections.Counter(possibleBestDress)
worstData = collections.Counter(possibleWorstDress)
print("\n\nList of Best Dressed:\n========================")
for host in bestData.most_common()[0:5]:
print(host[0])
print("\n\nList of Worst Dressed:\n========================")
for host in worstData.most_common()[0:5]:
print(host[0])
开发者ID:ChosunOne,项目名称:Tweet_Analysis,代码行数:35,代码来源:gg.py
示例17: write_to_mod_html_file
def write_to_mod_html_file(sentences,locs,tex):
global count
g_dic = group_locs_by_sentences(locs)
ll= []
for l in g_dic.keys():
ll.append(l)
ll.sort(cmp=cmp_by_ind)
for (x,y) in ll:
l = g_dic[(x,y)]
sen = sentences[x]
slash_n_split = sen.splitlines()
wds = reg_remove_special_chars.sub(r' ',slash_n_split[y])
words = nltk.wordpunct_tokenize(wds)
l.sort(cmp=cmp_by_ind)
for (h,k) in l:
words[h] = """<i style="color:red">"""+words[h]
words[k] = words[k]+'</i>'
words = ' '.join(words)
slash_n_split[y] = words
sentences[x] = '\n'.join(slash_n_split)
t = '\n'.join(sentences)
f = open('html/%d_mod.html'%count, "w")
t = reg_replace_slashn.sub(r'<br/>',t)
f.write(t)
f.close()
count +=1
开发者ID:sainath-vellal,项目名称:birdinginfo,代码行数:28,代码来源:4.py
示例18: word_feats
def word_feats(words):
feats={}
words=words.strip()
hasbadw=0
hasyou=0
sentences=0
for sentense in re.split(r' *[\.\?!]["\)\]]* *', words):
sentences+=1
for word in nltk.wordpunct_tokenize(sentense):
for curse in badwords:
if word.lower().endswith(curse.lower()) or word.lower().startswith(curse.lower()):
hasbadw+=1
break
if word.lower() in ("you","u","ur","your","urs","urz","yours"):
hasyou+=1
feats["you"]=hasyou
feats["badw"]=hasbadw
feats["length"]= len(words)
feats["caps"]=len(re.findall('[A-Z]', words))
feats["smalls"]=len(re.findall('[a-z]', words))
feats["sentences"]=sentences
feats["capsratio"]=float(feats["caps"])/len(words)
featslist=[]
for k,v in feats.iteritems():
featslist.append(v)
return featslist
开发者ID:hrishikeshio,项目名称:insult,代码行数:29,代码来源:rf.py
示例19: _calculate_languages_ratios
def _calculate_languages_ratios(text):
"""
Calculate probability of given text to be written in several languages and
return a dictionary that looks like {'french': 2, 'spanish': 4, 'english': 0}
@param text: Text whose language want to be detected
@type text: str
@return: Dictionary with languages and unique stopwords seen in analyzed text
@rtype: dict
"""
languages_ratios = {}
'''
nltk.wordpunct_tokenize() splits all punctuations into separate tokens
'''
tokens = wordpunct_tokenize(text)
words = [word.lower() for word in tokens]
# Compute per language included in nltk number of unique stopwords appearing in analyzed text
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
languages_ratios[language] = len(common_elements) # language "score"
return languages_ratios
开发者ID:annamarie-g,项目名称:capstone_project,代码行数:30,代码来源:clean_dataframe.py
示例20: get_bigram_dict
def get_bigram_dict(filename):
input_file = codecs.open(filename, 'r', encoding='utf8')
content = input_file.read()
dic = {}
tokens = nltk.wordpunct_tokenize(content)
finder = BigramCollocationFinder.from_words(tokens)
return finder.ngram_fd
开发者ID:jasoncao11,项目名称:myscripts,代码行数:7,代码来源:bigram_fre.py
注:本文中的nltk.wordpunct_tokenize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论