本文整理汇总了Python中nltk.corpus.util.LazyCorpusLoader类的典型用法代码示例。如果您正苦于以下问题:Python LazyCorpusLoader类的具体用法?Python LazyCorpusLoader怎么用?Python LazyCorpusLoader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LazyCorpusLoader类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test
def test():
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
assert isinstance(jeita.tagged_words()[0][1], basestring)
开发者ID:Kuew,项目名称:hashtagify,代码行数:7,代码来源:chasen.py
示例2: test
def test():
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader(
'jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')
assert isinstance(jeita.tagged_words()[0][1], basestring)
开发者ID:Akira55,项目名称:nltk,代码行数:8,代码来源:chasen.py
示例3: test
def test():
from nltk.corpus.util import LazyCorpusLoader
knbc = LazyCorpusLoader("knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp")
assert isinstance(knbc.words()[0], basestring)
assert isinstance(knbc.sents()[0][0], basestring)
assert type(knbc.tagged_words()[0]) == tuple
assert type(knbc.tagged_sents()[0][0]) == tuple
开发者ID:ongxuanhong,项目名称:jazzparser-master-thesis,代码行数:9,代码来源:knbc.py
示例4: test
def test():
from nltk.corpus.util import LazyCorpusLoader
knbc = LazyCorpusLoader(
'knbc/corpus1', KNBCorpusReader, r'.*/KN.*', encoding='euc-jp')
assert isinstance(knbc.words()[0], string_types)
assert isinstance(knbc.sents()[0][0], string_types)
assert isinstance(knbc.tagged_words()[0], tuple)
assert isinstance(knbc.tagged_sents()[0][0], tuple)
开发者ID:DrDub,项目名称:nltk,代码行数:9,代码来源:knbc.py
示例5: demo
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
print "/".join(jeita.words()[22100:22140])
print "\nEOS\n".join(
["\n".join("%s/%s" % (w[0], w[1].split("\t")[2]) for w in sent) for sent in jeita.tagged_sents()[2170:2173]]
)
开发者ID:Kuew,项目名称:hashtagify,代码行数:11,代码来源:chasen.py
示例6: demo
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader(
'jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')
print '/'.join( jeita.words()[22100:22140] )
print '\nEOS\n'.join(['\n'.join("%s/%s" % (w[0],w[1].split('\t')[2]) for w in sent)
for sent in jeita.tagged_sents()[2170:2173]])
开发者ID:Akira55,项目名称:nltk,代码行数:12,代码来源:chasen.py
示例7: demo
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
root = nltk.data.find('corpora/knbc/corpus1')
fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]
def _knbc_fileids_sort(x):
cells = x.split('-')
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader,
sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')
print knbc.fileids()[:10]
print ''.join( knbc.words()[:100] )
print '\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] )
knbc.morphs2str = lambda morphs: '/'.join(
"%s(%s)"%(m[0], m[1].split(' ')[2]) for m in morphs if m[0] != 'EOS'
).encode('utf-8')
print '\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] )
print '\n'.join( ' '.join("%s/%s"%(w[0], w[1].split(' ')[2]) for w in sent)
for sent in knbc.tagged_sents()[0:2] )
开发者ID:B-Rich,项目名称:Fem-Coding-Challenge,代码行数:29,代码来源:knbc.py
示例8: parse_wsj
def parse_wsj(processes=8):
ptb = LazyCorpusLoader( # Penn Treebank v3: WSJ portions
'ptb', CategorizedBracketParseCorpusReader, r'wsj/\d\d/wsj_\d\d\d\d.mrg',
cat_file='allcats.txt', tagset='wsj')
fileids = ptb.fileids()
params = []
for f in fileids:
corpus = zip(ptb.parsed_sents(f), ptb.tagged_sents(f))
for i, (parsed, tagged) in enumerate(corpus):
params.append((f, i, parsed, tagged))
p = Pool(processes)
p.starmap(get_best_parse, sorted(params, key=lambda x: (x[0], x[1])))
开发者ID:jonpiffle,项目名称:ltag_parser,代码行数:14,代码来源:run_parser.py
示例9: main
def main():
# matplotlib.use('Qt5Agg')
# import matplotlib.pyplot as plt
download('punkt')
# Download and load the english europarl corpus
downloader.download('europarl_raw')
english = LazyCorpusLoader('europarl_raw/english', EuroparlCorpusReader, r'ep-.*\.en', encoding='utf-8')
words = english.words()
# Calculate the frequency distribution of the words in the corpus
word_frequency_distribution = FreqDist([word.lower() for word in words])
# Get the sentences of the corpus, all in lower case, with infrequent words replaced by the token "<unknown>"
sentences = [
['start0'] + [word.lower() if word_frequency_distribution[word.lower()] >= 10 else '<unknown>' for word in
sentence] + ['end0']
for sentence in english.sents()]
# create train and test dataset
train = sentences[0:int(len(sentences) * 0.8)]
test = sentences[int(len(sentences) * 0.8):]
vocabulary = list(word_frequency_distribution)
vocabulary_length = word_frequency_distribution.B()
# Calculate bigrams
bigrams_train = list(chain.from_iterable(ngrams_sentences(train, 2)))
# Calculate the conditional frequency distribution for bigrams
bigrams_fd = ConditionalFreqDist(((f,), s) for f, s in bigrams_train)
# Calculate the conditional probability distribution for bigrams
cpd_bigram = ConditionalProbDist(bigrams_fd, LaplaceProbDist, vocabulary_length)
lower_case_letters = string.ascii_lowercase
error_test = copy.deepcopy(test)
for sentence in error_test:
word = random.randrange(1, len(sentence)-1)
sentence[word] = random.choice(vocabulary)
word = random.choice(sentence[1:-2])
word = random.randrange(1, len(sentence) - 1)
letter = random.randrange(0, len(sentence[word]))
sentence[word] = sentence[word][0:letter] + random.choice(lower_case_letters) + sentence[word][letter+1:]
corrected = viterbi(error_test[25][:-1], vocabulary, cpd_bigram)
print('Corrected:{}'.format(corrected))
print('Original:{}'.format(test[25]))
开发者ID:BabisK,项目名称:M36209P,代码行数:50,代码来源:ex3.py
示例10: read_knbc
def read_knbc(train_file, test_file, reference_file):
root = nltk.data.find('corpora/knbc/corpus1')
fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]
knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader,
sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')
sentences = knbc.sents()
write_train(sentences[0:4000], train_file)
write_test(sentences[4000:-1], test_file)
write_reference(sentences[4000:-1], reference_file)
开发者ID:LeopoldC,项目名称:cross-language_IR,代码行数:14,代码来源:knbc_to_xml.py
示例11: demo
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
root = nltk.data.find("corpora/knbc/corpus1")
fileids = [
f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)
]
def _knbc_fileids_sort(x):
cells = x.split("-")
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
knbc = LazyCorpusLoader("knbc/corpus1", KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding="euc-jp")
print knbc.fileids()[:10]
print "".join(knbc.words()[:100])
print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])
knbc.morphs2str = lambda morphs: "/".join(
"%s(%s)" % (m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS"
).encode("utf-8")
print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])
print "\n".join(" ".join("%s/%s" % (w[0], w[1].split(" ")[2]) for w in sent) for sent in knbc.tagged_sents()[0:2])
开发者ID:ongxuanhong,项目名称:jazzparser-master-thesis,代码行数:28,代码来源:knbc.py
示例12: treebank_tagger_demo
def treebank_tagger_demo():
from nltk.corpus.util import LazyCorpusLoader
from nltk.corpus.reader import PlaintextCorpusReader
from nltk_contrib.coref.util import TreebankTaggerCorpusReader
state_union = LazyCorpusLoader(
'state_union', PlaintextCorpusReader, r'(?!\.svn).*\.txt')
state_union = TreebankTaggerCorpusReader(state_union)
print 'Treebank tagger demo...'
print 'Tagged sentences:'
for sent in state_union.tagged_sents()[500:505]:
print sent
print
print
print 'Tagged words:'
for word in state_union.tagged_words()[500:505]:
print word
print
开发者ID:Sandy4321,项目名称:nltk_contrib,代码行数:19,代码来源:util.py
示例13: __init__
def __init__(self, languages=['nl', 'en', 'fr', 'de', 'es', 'th', 'pt', 'pl', "id", "ru", "it", "ru", "tr"]):
logger.info("Build " + self.__class__.__name__ + " ... ")
self.language_trigrams = {}
self.langid = LazyCorpusLoader('langid', LangIdCorpusReader, r'(?!\.).*\.txt')
self.__mutex = threading.Semaphore()
for lang in languages:
self.language_trigrams[lang] = FreqDist()
for f in self.langid.freqs(fileids=lang+"-3grams.txt"):
self.language_trigrams[lang].inc(f[0], f[1])
logger.info("Build " + self.__class__.__name__ + ": done!")
开发者ID:soldierkam,项目名称:pynews,代码行数:10,代码来源:lang.py
示例14: from_nltk
def from_nltk(cls):
"""Returns a fully populated Propbank with the help of NLTK's interface"""
ptb = LazyCorpusLoader(
'ptb',
CategorizedBracketParseCorpusReader,
r'wsj/\d\d/wsj_\d\d\d\d.mrg',
cat_file='allcats.txt'
)
propbank_ptb = LazyCorpusLoader(
'propbank', PropbankCorpusReader,
'prop.txt', 'frames/.*\.xml', 'verbs.txt',
lambda filename: filename.upper(),
ptb
) # Must be defined *after* ptb corpus.
role_dict = {}
for roleset_xml in propbank_ptb.rolesets():
role = Role.fromxml(roleset_xml)
role_dict[role.roleset_id] = role
instance_dict = defaultdict(dict)
pb_instances = propbank_ptb.instances()
for instance in pb_instances:
instance.fileid = instance.fileid.lower()
file_num = instance.fileid.split("/")[-1].split(".")[0].replace("wsj_", "")
sentnum = str(instance.sentnum)
predicate = instance.predicate
tree = instance.tree
if isinstance(predicate, nltk.corpus.reader.propbank.PropbankTreePointer):
key = Propbank.pointer_to_word(predicate, tree)
elif isinstance(predicate, nltk.corpus.reader.propbank.PropbankSplitTreePointer):
key = tuple([Propbank.pointer_to_word(p, tree) for p in predicate.pieces])
else:
### TODO: Investigate when this is the case ###
#assert False
continue
pb_instance = PropbankInstance(instance.fileid, file_num, sentnum, key, instance.roleset, instance.arguments)
instance_dict[(file_num, sentnum)][key] = pb_instance
return Propbank(role_dict, instance_dict)
开发者ID:jonpiffle,项目名称:xtag_verbnet,代码行数:43,代码来源:propbank.py
示例15: treebank_chunk_tagger_demo
def treebank_chunk_tagger_demo():
from nltk.corpus.util import LazyCorpusLoader
from nltk.corpus.reader import PlaintextCorpusReader
from nltk_contrib.coref.util import TreebankChunkTaggerCorpusReader
state_union = LazyCorpusLoader(
'state_union', PlaintextCorpusReader, r'(?!\.svn).*\.txt')
state_union = TreebankChunkTaggerCorpusReader(state_union)
print 'Treebank chunker demo...'
print 'Chunked sentences:'
for sent in state_union.chunked_sents()[500:505]:
print sent
print
print
print 'Parsed sentences:'
for tree in state_union.parsed_sents()[500:505]:
print tree
print
print
开发者ID:Sandy4321,项目名称:nltk_contrib,代码行数:20,代码来源:util.py
示例16: demo
def demo(**kwargs):
import nltk
from nltk_contrib.coref import NLTK_COREF_DATA
from nltk_contrib.coref.muc import muc6_documents, muc7_documents
from nltk_contrib.coref.muc import MUCCorpusReader
nltk.data.path.insert(0, NLTK_COREF_DATA)
muc6 = LazyCorpusLoader('muc6/', MUCCorpusReader, muc6_documents)
for sent in muc6.iob_sents()[:]:
for word in sent:
print word
print
print
for sent in muc6.mentions(depth=None):
for mention in sent:
print mention
if sent: print
print
muc7 = LazyCorpusLoader('muc7/', MUCCorpusReader, muc7_documents)
for sent in muc7.iob_sents()[:]:
for word in sent:
print word
print
print
for sent in muc7.mentions(depth=None):
for mention in sent:
print mention
if sent: print
print
开发者ID:knowlp,项目名称:nltk_contrib,代码行数:28,代码来源:muc.py
示例17: __init__
def __init__(self, languages=LangIDDict().keys()):
self.language_trigrams = {}
self.langid = LazyCorpusLoader('langid', LangIdReader, r'(?!\.).*\.txt')
for lang in languages:
self.language_trigrams[lang] = FreqDist()
for f in self.langid.freqs(fileids=lang+"-3grams.txt"):
self.language_trigrams[lang].inc(f[0], f[1])
self.language_dicts = dict([
(id, dict([(trigram, float(value)/float(fdist.N())) for trigram, value in fdist.items()]))
for id, fdist in self.language_trigrams.items()
])
开发者ID:harixxy,项目名称:solutions,代码行数:13,代码来源:f7_language.py
示例18: dictionary_backoff
def dictionary_backoff(option_tone, backoff):
'''Creates a dictionary according to the option: tonal/nontonal'''
if option_tone == "tonal":
bambara_dict_toolbox = BambaraTagging("cookbook/bambara", ["bamadaba.txt"], option_tone, "POS")
bambara_dict_toolbox.copy_files()
reader = LazyCorpusLoader("cookbook/bambara/", ToolboxCorpusReader, ["bamadaba.txt"])
entries = reader.entries("bamadaba.txt") #tonal
words = reader.words("bamadaba.txt")#tonal
pos = reader.words("bamadaba.txt", key="ps")#tonal
else:
bambara_dict_toolbox = BambaraTagging("cookbook/bambara", ["bamadaba_non_tonal.txt"], option_tone, "POS")
bambara_dict_toolbox.copy_files()
reader = LazyCorpusLoader("cookbook/bambara/", ToolboxCorpusReader, ["bamadaba_non_tonal.txt"])
entries = reader.entries("bamadaba_non_tonal.txt") #tonal
words = reader.words("bamadaba_non_tonal.txt")#tonal
pos = reader.words("bamadaba_non_tonal.txt", key="ps")#tonal
own_model = get_alt_pos(entries, pos, reader, option_tone)#tonal
print("Dictionary created")
dic = UnigramTagger(model=own_model, backoff=backoff)
return dic
开发者ID:Batene,项目名称:Bamanankan,代码行数:21,代码来源:create_reader.py
示例19: LangDetector
class LangDetector(object):
def __init__(self, languages=LangIDDict().keys()):
self.language_trigrams = {}
self.langid = LazyCorpusLoader('langid', LangIdReader, r'(?!\.).*\.txt')
for lang in languages:
self.language_trigrams[lang] = FreqDist()
for f in self.langid.freqs(fileids=lang+"-3grams.txt"):
self.language_trigrams[lang].inc(f[0], f[1])
self.language_dicts = dict([
(id, dict([(trigram, float(value)/float(fdist.N())) for trigram, value in fdist.items()]))
for id, fdist in self.language_trigrams.items()
])
def detect(self, text):
words = nltk_word_tokenize(text.lower())
trigrams = {}
scores = dict([(lang, 0) for lang in self.language_trigrams.keys()])
trigcount = [(trigram, 1.0) for match in words for trigram in self.get_word_trigrams(match)]
if len(trigcount) > 0:
trigdf = pandas.DataFrame(trigcount, columns = ["key", "value"])
trigrams = trigdf.groupby("key")["value"].sum().to_dict()
else:
trigrams = {}
total = sum(trigrams.values())
maxscore, maxid = 0, ""
for trigram, count in trigrams.items():
trishare = (float(count) / float(total))
for lang, frequencies in filter(lambda (l, f): trigram in f, self.language_dicts.iteritems()):
scores[lang] += frequencies[trigram] * trishare
if scores[lang] > maxscore:
maxid, maxscore = lang, scores[lang]
return sorted(scores.items(), key=lambda x: x[1], reverse=True)
开发者ID:harixxy,项目名称:solutions,代码行数:39,代码来源:f7_language.py
示例20: LazyCorpusLoader
from nltk.tokenize import RegexpTokenizer
from nltk.corpus.util import LazyCorpusLoader
from nltk.corpus.reader import *
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
import sys, os
import cPickle
from feats import words_in_sentence
pathname = os.path.dirname(sys.argv[0])
nltk.data.path.append(os.path.abspath(pathname)+'/data');
movie_reviews = LazyCorpusLoader(
sys.argv[1], CategorizedPlaintextCorpusReader,
r'(?!\.).*\.txt', cat_pattern=r'(neg|pos)/.*',
encoding='utf-8')
train_test_ratio = 2.0/3
def pickleObject():
obj = classifier
savefile = open('classifier.pickle', 'w')
cPickle.dump(obj, savefile, cPickle.HIGHEST_PROTOCOL)
def pickleFeats():
obj = words_in_sentence
savefile = open('feats.pickle', 'w')
cPickle.dump(obj, savefile, cPickle.HIGHEST_PROTOCOL)
开发者ID:asketak,项目名称:IB030-sentiment,代码行数:32,代码来源:classifier.py
注:本文中的nltk.corpus.util.LazyCorpusLoader类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论