本文整理汇总了Python中nltk.corpus.sentiwordnet.senti_synsets函数的典型用法代码示例。如果您正苦于以下问题:Python senti_synsets函数的具体用法?Python senti_synsets怎么用?Python senti_synsets使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了senti_synsets函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: analiseSentimento
def analiseSentimento(resposta):
texto = resposta['corpo']
frases = sentencesTokenizer.tokenize(texto)
palavras = []
for frase in frases:
palavras.extend(wordsTokenizer.tokenize(frase))
posTags = pos_tag(palavras)
positivo = 0
negativo = 0
for palavra, tag in posTags:
synsets = None
if tag.startswith('J'):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.ADJ)
elif tag.startswith('V'):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.VERB)
elif tag.startswith('N'):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.NOUN)
elif tag.startswith('R'):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.ADV)
else:
synsets = sentiwordnet.senti_synsets(palavra, '')
if synsets != None:
synsets = list(synsets)
if len(synsets) > 0:
synset = synsets[0]
positivo = positivo + synset.pos_score()
negativo = negativo + synset.neg_score()
if positivo > negativo:
return (resposta, 'positivo')
elif negativo > positivo:
return (resposta, 'negativo')
else:
return (resposta, 'neutro')
开发者ID:vbozelli,项目名称:Sentiment-Analysis,代码行数:33,代码来源:analise_sentimento_sentiwordnet_com_stopwords.py
示例2: getting_sentiment
def getting_sentiment(word,pos):
flag = 0
if 'NN' in pos:
tag = 'n'
elif 'JJ' in pos:
tag = 'a'
if pos == 'JJS':
flag = 1
elif 'VB' in pos:
tag = 'v'
elif 'RB' in pos:
tag = 'r'
else:
tag = ''
stemmer = WordNetLemmatizer()
if tag != '':
x = stemmer.lemmatize(word,tag)
else:
x = stemmer.lemmatize(word)
try:
score = float(score_dic[x]) #* float(m1)
except KeyError:
if len(swn.senti_synsets(x,tag)) > 0:
score = swn.senti_synsets(x,tag)[0].pos_score() * 5
else:
score = 100
if flag == 1 and score != -100 and score < 4:
score = score + 1
elif flag == 1 and score != -100 and score > -4 and score < 0:
score = score - 1
print word + '--->' + str(score)
return score
开发者ID:farhan0581,项目名称:Recommendation-system,代码行数:34,代码来源:main_file.py
示例3: get_score
def get_score(adjective):
if adjective not in d:
scores = swn.senti_synsets(adjective)
pos_scores = [i.pos_score() for i in scores]
neg_scores = [i.neg_score() for i in scores]
obj_scores = [i.obj_score() for i in scores]
pos_score = maxi(pos_scores)
neg_score = maxi(neg_scores)
obj_score = maxi(obj_scores)
if len(scores) == 0:
d[adjective] = (-1,-1,-1,-1,-1,-1)
return (-1,-1,-1,-1,-1,-1)
scores_ad = swn.senti_synsets(adjective,pos='ar')
pos_scores_ad = [i.pos_score() for i in scores_ad]
neg_scores_ad = [i.neg_score() for i in scores_ad]
obj_scores_ad = [i.obj_score() for i in scores_ad]
pos_score_ad = maxi(pos_scores_ad)
neg_score_ad = maxi(neg_scores_ad)
obj_score_ad = maxi(obj_scores_ad)
d[adjective] = (pos_score,neg_score,obj_score,pos_score_ad,neg_score_ad,obj_score_ad)
else:
(pos_score,neg_score,obj_score,pos_score_ad,neg_score_ad,obj_score_ad) = d[adjective]
if pos_score == -1:
return -100
if pos_score_ad > neg_score_ad:
return pos_score_ad
elif pos_score_ad < neg_score_ad:
return -neg_score_ad
elif pos_score > neg_score:
return pos_score
elif pos_score < neg_score:
return -neg_score
else:
return 0
开发者ID:priyanshu2501,项目名称:feature-specific-opinion-summarization,代码行数:35,代码来源:feature_reduction.py
示例4: processoFeatures
def processoFeatures(resposta):
frases = tokenizerFrases.tokenize(resposta["corpo"])
palavras = []
palavrasTexto = {}
for frase in frases:
palavrasTemp = tokenizerPalavras.tokenize(frase)
for palavra in palavrasTemp:
palavrasTexto[palavra] = True
posTags = pos_tag(palavras)
positivo = 0
negativo = 0
for palavra, tag in posTags:
synsets = None
if tag.startswith("J"):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.ADJ)
elif tag.startswith("V"):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.VERB)
elif tag.startswith("N"):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.NOUN)
elif tag.startswith("R"):
synsets = sentiwordnet.senti_synsets(palavra, wordnet.ADV)
else:
synsets = sentiwordnet.senti_synsets(palavra, "")
if synsets != None:
synsets = list(synsets)
if len(synsets) > 0:
synset = synsets[0]
positivo = positivo + synset.pos_score()
negativo = negativo + synset.neg_score()
if positivo > negativo:
return (palavrasTexto, "positivo")
elif negativo > positivo:
return (palavrasTexto, "negativo")
else:
return (palavrasTexto, "neutro")
开发者ID:vbozelli,项目名称:Sentiment-Analysis,代码行数:35,代码来源:criar_classificador_com_stopwords.py
示例5: sentiwordnetSentimentWordsPresenceFeatures
def sentiwordnetSentimentWordsPresenceFeatures(wordsTagged):
features = {}
for word, tag in wordsTagged:
wordnetTag = translateFromNltkToWordnetTag(tag)
wordNegated = isWordNegated(word)
word = stripNegation(word)
if wordnetTag:
synsets = list(sentiwordnet.senti_synsets(word, wordnetTag))
if not synsets:
synsets = list(sentiwordnet.senti_synsets(word))
else:
synsets = list(sentiwordnet.senti_synsets(word))
if len(synsets) > 0:
synset = synsets[0]
if synset.pos_score() > 0.5:
if wordNegated:
features["neg_word_presence"] = True
else:
features["pos_word_presence"] = True
if synset.neg_score() > 0.5:
if wordNegated:
features["pos_word_presence"] = True
else:
features["neg_word_presence"] = True
return features
开发者ID:ekedziora,项目名称:sentiment,代码行数:25,代码来源:featureExtractors.py
示例6: senti_analisys
def senti_analisys(tokens):
#print tokens
scorePosTot = 0
scoreNegTot = 0
scoreObjTot = 0
scoreObjNorm = scoreNegNorm = scorePosNorm = 0
count = 0
for token,part in tokens:
if part.startswith("JJ") or part.startswith("NN") or part.startswith("VB"):
scorePos = 0
scoreNeg = 0
scoreObj = 0
#print swn.senti_synsets(token)
#if token == "wonderful":
# print "i'm "+token,swn.senti_synsets(token)
if swn.senti_synsets(token) != []:
list_synset = list(swn.senti_synsets(token))
dim_synset = list_synset.__len__()
for i in list_synset:
scorePos += i.pos_score()
scoreNeg += i.neg_score()
scoreObj += i.obj_score()
scorePos = scorePos / dim_synset
scoreNeg = scoreNeg / dim_synset
scoreObj = scoreObj / dim_synset
#print "The token is: "+token + "\n\tscore pos: "+str(round(scorePos,2)) + "\n\tscore neg: "+str(round(scoreNeg,2))+\
#"\n\tscore obj: " + str(round(scoreObj,2))
scorePosTot += scorePos
scoreNegTot += scoreNeg
scoreObjTot += scoreObj
count += 1
if count != 0:
scorePosNorm = scorePosTot / count
scoreNegNorm = scoreNegTot / count
scoreObjNorm = scoreObjTot / count
#print "NORM: "+ "\n\tscorePOS: "+str(round(scorePosNorm,2)) + "\n\tscoreNEG: "+str(round(scoreNegNorm,2)) \
#+ "\n\tscoreOBJ: "+str(round(scoreObjNorm,2))
if scoreNegNorm < scorePosNorm :
#print "POSITIVE"
return 1,scorePosNorm
elif scoreNegNorm > scorePosNorm:
#print "NEGATIVE"
return -1,-scoreNegNorm
else:
#print "OBJECTIVE"
return 0,0
开发者ID:samzek,项目名称:sentiment_analysis,代码行数:55,代码来源:SentiAnalisys.py
示例7: get_net_pos_neg
def get_net_pos_neg(word):
netPos = 0
netNeg = 0
if len(list(swn.senti_synsets(word))) != 0:
sentisyn = list(swn.senti_synsets(word))
for item in sentisyn:
netPos += item.pos_score()
netNeg += item.neg_score()
return netPos, netNeg
开发者ID:saatvikshah1994,项目名称:SmartMM,代码行数:11,代码来源:emotion_recognizer.py
示例8: strip_proppers_POS
def strip_proppers_POS(text, search):
text = text.decode('utf-8', 'ignore')
tokens = nltk.word_tokenize(text.lower())
tagged = nltk.tag._pos_tag(tokens, tagset, tagger)
res = []
search_index = [i for i, val in enumerate(tokens)
if (p.singular_noun(val) == search or
(not p.singular_noun(val) and val == search))
]
words = [(word, pos) for word, pos in tagged if (pos[0] == "J") and
len(word) > 2 and
word not in stop and
not p.singular_noun(word) and
eng_check.check(word) and
not any(ccc.isdigit() for ccc in word)]
adj_count = 0
for a in range(0, len(tagged)):
if tagged[a] in words:
if tagged[a][1][0] == "J":
adj = tagged[a][0]
dist = min([abs(a-s) for s in search_index])
score = 0
adj_synset = swn.senti_synsets(adj, 'a')
if len(adj_synset) <= 0:
adj_synset = swn.senti_synsets(adj, 'v')
if len(adj_synset) <= 0:
synonyms = []
for ss in wn.synsets(adj):
for j in ss.lemma_names():
synonyms.append(j)
if len(synonyms) > 1:
synonym_count = 0
for s in range(0, len(synonyms)):
if synonym_count < 2 and synonyms[s] != adj:
w1 = synonyms[s]
adj_synset1 = swn.senti_synsets(w1, 'a')
if len(adj_synset1) > 0:
score += adj_synset1[0].pos_score()\
- adj_synset1[0].neg_score()
synonym_count += 1
score = score/2
else:
score = adj_synset[0].pos_score() \
- adj_synset[0].neg_score()
try:
res.append((adj, score/(pow(dist, 2))))
adj_count += 1
except:
pass
return (res, adj_count)
开发者ID:Zephrys,项目名称:Bro,代码行数:53,代码来源:ranks.py
示例9: getNumberOfAppearances
def getNumberOfAppearances(self,tokens):
total = 0
for token in tokens:
if len(s.senti_synsets(token)) > 0:
total += 1
return total
开发者ID:adna9,项目名称:twitter-sentiment-analysis,代码行数:7,代码来源:SentiWordNetLexicon.py
示例10: main
def main():
# str=['s','a','f','e','v','i','w','j','k','x','k']
# for i in str:
# for j in str:
#
i = raw_input("Length:")
str = raw_input("Letter sequence:")
f = open("combinations.txt",'w')
k = list(itertools.permutations(str,int(i)))
for i in k:
for l in i:
f.write(l)
f.write('\n')
f.close()
h = []
with open('combinations.txt') as hai:
h = [word.lower().strip() for word in hai ]
dic = {}
for o in h:
if o not in dic:
dic [o]= 0
else:
dic[o] += 1
m = open("out.txt",'w')
for l in dic:
v= list(swn.senti_synsets(l))
if v:
m.write(l)
m.write('\n')
开发者ID:sebinsphilip,项目名称:4pics1word,代码行数:34,代码来源:generator.py
示例11: getSentimentOfWord
def getSentimentOfWord(self, word):
try:
sentSet = list(swn.senti_synsets(word))
except:
#print("swn.senti_synsets(word) threw an error")
return 0
#if not found, assume objective word
if len(sentSet) == 0:
#print('empty sentSet for word '+word)
return 0
#else:
#print('non empty sentSet for word '+word)
totalPos = 0
totalNeg = 0
totalObj = 0
for sentiword in sentSet:
totalPos += sentiword.pos_score()
totalNeg += sentiword.neg_score()
totalObj += sentiword.obj_score()
totalPos = totalPos / len(sentSet)
totalNeg = totalNeg / len(sentSet)
totalObj = totalObj / len(sentSet)
#determine sentiment
if (totalPos >= totalObj) and (totalPos >= totalNeg):
return 1
if (totalNeg >= totalObj) and (totalNeg >= totalPos):
return -1
if (totalObj >= totalPos) and (totalObj >= totalNeg):
return 0
开发者ID:shaypal5,项目名称:OrZukFinalProject,代码行数:33,代码来源:SentimentWordFrequencyModel.py
示例12: get_sentiment_score
def get_sentiment_score(ls):
'''
input type sentence
this method estimate a score for the sentence based on the swn model
'''
from nltk.tokenize import word_tokenize
import re
word_list = word_tokenize(ls.content)
punctuation = re.compile(r'[-.?!,":;()|0-9]') # remove these punctuations and number
word_list = [punctuation.sub("", word) for word in word_list]
word_list = filter(None, word_list) #filters empty
ls.tokens = word_list
pos_score = 0.0
neg_score = 0.0
num_valid_word = 0.1
for w in word_list:
try:
res = swn.senti_synsets(w)
pos_score += res[0]._pos_score
neg_score += res[0]._neg_score
num_valid_word += 1
except:
pass
ls.score = (pos_score - neg_score)/num_valid_word
"""
开发者ID:zhilongz,项目名称:Smart_Review_Summarization,代码行数:28,代码来源:swnModel.py
示例13: main
def main():
args = sys.argv
if(len(args) != 2):
print('usage: python proj filename')
return -1
lines = []
values = []
data = open(args[1], 'r')
for line in data:
temp = line.split('.')
for sen in temp:
tokens = word_tokenize(sen.strip('\n').strip(',').strip('-'))
if tokens != []:
lines.append(tokens)
total_pos = 0
total_neg = 0
for line in lines:
pos = 0.0
neg = 0.0
pcount = 0
ncount = 0
for word in line:
sp = 0
sn = 0
sub_pos = 0
sub_neg = 0
x = swn.senti_synsets(word)
for a in x:
if(a.pos_score() > 0):
sub_pos += 1
sp += a.pos_score()
if a.neg_score() > 0:
sub_neg += 1
sn += a.neg_score()
# if(sub_pos != 0):
# sp /= sub_pos
# if(sub_neg != 0):
# sn /= sub_neg
pos += sp
neg += sn
if sp > 0:
pcount += 1
if sn > 0:
ncount += 1
if(pos == 0) or (neg == 0):
values.append((pos, neg))
total_pos += pos
total_neg += neg
else:
values.append((pos/(pos+neg), neg/(pos+neg)))
total_pos += (pos/(pos+neg))
total_neg += (neg/(pos+neg))
print(str(total_pos / len(values)) + ',' + str(total_neg / len(values)))
for x in range(0, len(lines)):
# print('sentence: ' + str(lines[x]))
print(str(values[x][0]) + ',' + str(values[x][1]))
开发者ID:JacobPawlak,项目名称:sensingSemanticsCathacks,代码行数:60,代码来源:test.py
示例14: sentiwordnetSentimentScoreFeatures
def sentiwordnetSentimentScoreFeatures(wordsTagged):
posScoreSum = 0.0
negScoreSum = 0.0
for word, tag in wordsTagged:
wordnetTag = translateFromNltkToWordnetTag(tag)
word = stripNegation(word)
if wordnetTag:
synsets = list(sentiwordnet.senti_synsets(word, wordnetTag))
else:
synsets = list(sentiwordnet.senti_synsets(word))
if len(synsets) > 0:
synset = synsets[0]
posScoreSum = synset.pos_score()
negScoreSum = synset.neg_score()
return {"pos_neg_score": posScoreSum - negScoreSum}
开发者ID:ekedziora,项目名称:sentiment,代码行数:16,代码来源:featureExtractors.py
示例15: score
def score(self, tokens):
pos_value = 0.0
neg_value = 0.0
obj_value = 0.0
#TODO disambiguation via POS tagging using nps_chat or Brown Corpus
nltk_tagged = nltk.pos_tag(tokens)
#print(nltk_tagged)
lengthOfData = 0
for word in nltk_tagged:
meanings = list(swn.senti_synsets(word[0], self.get_wordnet_pos(word[1])))
if len(meanings) > 0:
wordSynset0 = meanings[0]
pos_value += wordSynset0.pos_score()
neg_value += wordSynset0.neg_score()
obj_value += wordSynset0.obj_score()
lengthOfData += 1
if lengthOfData > 0:
pos_value = pos_value
neg_value = neg_value
obj_value = obj_value/lengthOfData
return [ pos_value , neg_value, obj_value]
开发者ID:BonShillings,项目名称:tweet-mood-analyzer,代码行数:31,代码来源:unigram_swn_feature_parser.py
示例16: getSentimentFeatures
def getSentimentFeatures(feats, text, prefix):
pos_sum = 0
neg_sum = 0
most_positive = 0
most_negative = 0
for string in text.lower().split(' '):
if len(string) > 0 and string[0] == '#':
string = string[1:]
senti_synset = list(swn.senti_synsets(string))
if len(senti_synset) > 0:
senti_synset = senti_synset[0] #just use the 1st one for now
pos_score = senti_synset.pos_score()
if pos_score > most_positive: most_positive = pos_score
pos_sum += pos_score
neg_score = senti_synset.neg_score()
if neg_score > most_negative: most_negative = neg_score
neg_sum += neg_score
feats[prefix + 'POS_SUM'] = pos_sum
feats[prefix + 'NEG_SUM'] = neg_sum
feats[prefix + 'MEAN_POS_NEG'] = (pos_sum + neg_sum) / 2.0
feats[prefix + 'POS_NEG_GAP'] = pos_sum - neg_sum
feats[prefix + 'SINGLE_POS_GAP'] = most_positive - (pos_sum + neg_sum) / 2.0
feats[prefix + 'SINGLE_NEG_GAP'] = most_negative - (pos_sum + neg_sum) / 2.0
开发者ID:atran3,项目名称:sarcasm_detection,代码行数:25,代码来源:basicFeaturizer.py
示例17: sentiment
def sentiment(word):
# print word
posScore = 0
negScore = 0
if word[:1] == "~" and len(getAntonyms(word[1:])) != 0:
word = getAntonyms(word[1:]).keys()[0]
opinions = swn.senti_synsets(word)
for o in list(opinions):
negScore += o.neg_score()
posScore += o.pos_score()
# print "POS " + str(posScore)
# print "NEG " + str(negScore)
negWords = ['rude','arrogant','boring','difficult','terrible','hard','dull','long','tricky','impossible','long','intimidating','ridiculous','tough','challenging']
posWords = ['exciting', 'cool','smart','incredible','super','great','good','excellent','engaging','clear','entertaining','interesting','easy','straightforward','helpful','amazing','awesome','related','funny','doable']
if word.lower() in negWords:
return 'neg'
elif word.lower() in posWords:
return 'pos'
if word[:1] == "~":
if word[1:].lower() in negWords:
return 'pos'
elif word[1:].lower() in posWords:
return 'neg'
if posScore > negScore:
return 'pos'
elif posScore < negScore:
return 'neg'
else:
return 'neut'
开发者ID:Handroo,项目名称:CSCI544_Team49,代码行数:30,代码来源:SentenceGenerator.py
示例18: SentimentAnalysis_RGO_Belief_Propagation
def SentimentAnalysis_RGO_Belief_Propagation(nxg):
#Bayesian Pearl Belief Propagation is done by
#assuming the senti scores as probabilities with positive
#and negative signs and the Recursive Gloss Overlap
#definition graph being the graphical model.
#Sentiment as a belief potential is passed through
#the DFS tree of this graph.
dfs_positive_belief_propagated=1.0
core_positive_belief_propagated=1.0
dfs_negative_belief_propagated=1.0
core_negative_belief_propagated=1.0
core_xnegscore=core_xposscore=1.0
dfs_knegscore=dfs_kposscore=dfs_vposscore=dfs_vnegscore=1.0
sorted_core_nxg=sorted(nx.core_number(nxg).items(),key=operator.itemgetter(1), reverse=True)
kcore_nxg=nx.k_core(nxg,6,nx.core_number(nxg))
for x in sorted_core_nxg:
xsset = swn.senti_synsets(x[0])
if len(xsset) > 2:
core_xnegscore = float(xsset[0].neg_score())*10.0
core_xposscore = float(xsset[0].pos_score())*10.0
if core_xnegscore == 0.0:
core_xnegscore = 1.0
if core_xposscore == 0.0:
core_xposscore = 1.0
core_positive_belief_propagated *= float(core_xposscore)
core_negative_belief_propagated *= float(core_xnegscore)
print "Core Number: RGO_sentiment_analysis_belief_propagation: %f, %f" % (float(core_positive_belief_propagated), float(core_negative_belief_propagated))
#for k,v in nx.dfs_edges(nxg):
for k,v in nx.dfs_edges(kcore_nxg):
ksynset = swn.senti_synsets(k)
vsynset = swn.senti_synsets(v)
if len(ksynset) > 2:
dfs_knegscore = float(ksynset[0].neg_score())*10.0
dfs_kposscore = float(ksynset[0].pos_score())*10.0
if len(vsynset) > 2:
dfs_vnegscore = float(vsynset[0].neg_score())*10.0
dfs_vposscore = float(vsynset[0].pos_score())*10.0
dfs_kposscore_vposscore = float(dfs_kposscore*dfs_vposscore)
dfs_knegscore_vnegscore = float(dfs_knegscore*dfs_vnegscore)
if dfs_kposscore_vposscore == 0.0:
dfs_kposscore_vposscore = 1.0
if dfs_knegscore_vnegscore == 0.0:
dfs_knegscore_vnegscore = 1.0
dfs_positive_belief_propagated *= float(dfs_kposscore_vposscore)
dfs_negative_belief_propagated *= float(dfs_knegscore_vnegscore)
print "K-Core DFS: RGO_sentiment_analysis_belief_propagation: %f, %f" % (float(dfs_positive_belief_propagated),float(dfs_negative_belief_propagated))
return (dfs_positive_belief_propagated, dfs_negative_belief_propagated, core_positive_belief_propagated, core_negative_belief_propagated)
开发者ID:shrinivaasanka,项目名称:asfer-github-code,代码行数:47,代码来源:SocialNetworkAnalysis_WebSpider.py
示例19: __getitem__
def __getitem__(self, k):
synsets = list(swn.senti_synsets(k))
if synsets:
p, n = synsets[0].pos_score(), synsets[0].neg_score()
v = (float(p) - float(n), float(p) + float(n))
return v
else:
return None
开发者ID:clips,项目名称:pattern,代码行数:8,代码来源:__init__.py
示例20: remove_nonSentiWord
def remove_nonSentiWord(word):
shouldInclude = False
if len(list(swn.senti_synsets(word))) == 0:
# print word, " not in sentiWordNet"
if len(list(wn.synsets(word))) == 0:
# print word, " not in wordNet"
shouldInclude = False
else:
shouldInclude = True
else:
synSet = list(swn.senti_synsets(word))
# print "Word: ", word
for item in synSet:
# print "+ ", item.pos_score(), " - ", item.neg_score(), " Neutral ", item.obj_score()
if item.pos_score() > sentiWordNet_ThreshHold or item.neg_score() > sentiWordNet_ThreshHold:
shouldInclude = True
break
return shouldInclude
开发者ID:saatvikshah1994,项目名称:SmartMM,代码行数:18,代码来源:emotion_recognizer.py
注:本文中的nltk.corpus.sentiwordnet.senti_synsets函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论