• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python parse.load_parser函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nltk.parse.load_parser函数的典型用法代码示例。如果您正苦于以下问题:Python load_parser函数的具体用法?Python load_parser怎么用?Python load_parser使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了load_parser函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, gramfile=None):
     """
     :param gramfile: name of file where grammar can be loaded
     :type gramfile: str
     """
     self._gramfile = (gramfile if gramfile else 'grammars/book_grammars/discourse.fcfg')
     self._parser = load_parser(self._gramfile)
开发者ID:2ricecrackerfolder,项目名称:twittermood,代码行数:7,代码来源:discourse.py


示例2: batch_parse

def batch_parse(inputs, grammar, trace=0):
    """
    Convert input sentences into syntactic trees.
    
    :param inputs: sentences to be parsed
    :type inputs: list of str
    :param grammar: L{FeatureGrammar} or name of feature-based grammar
    :rtype: dict
    :return: a mapping from input sentences to a list of L{Tree}s
    """

    # put imports here to avoid circult dependencies
    from nltk.grammar import FeatureGrammar
    from nltk.parse import FeatureChartParser, load_parser

    if isinstance(grammar, FeatureGrammar):
        cp = FeatureChartParser(grammar)
    else:
        cp = load_parser(grammar, trace=trace)
    parses = []
    for sent in inputs:
        tokens = sent.split() # use a tokenizer?
        syntrees = cp.nbest_parse(tokens)
        parses.append(syntrees)
    return parses
开发者ID:gijs,项目名称:nltk,代码行数:25,代码来源:util.py


示例3: parse_sents

def parse_sents(inputs, grammar, trace=0):
    """
    Convert input sentences into syntactic trees.

    :param inputs: sentences to be parsed
    :type inputs: list(str)
    :param grammar: ``FeatureGrammar`` or name of feature-based grammar
    :type grammar: nltk.grammar.FeatureGrammar
    :rtype: list(nltk.tree.Tree) or dict(list(str)): list(Tree)
    :return: a mapping from input sentences to a list of ``Tree``s
    """
    # put imports here to avoid circult dependencies
    from nltk.grammar import FeatureGrammar
    from nltk.parse import FeatureChartParser, load_parser

    if isinstance(grammar, FeatureGrammar):
        cp = FeatureChartParser(grammar)
    else:
        cp = load_parser(grammar, trace=trace)
    parses = []
    for sent in inputs:
        tokens = sent.split()  # use a tokenizer?
        syntrees = list(cp.parse(tokens))
        parses.append(syntrees)
    return parses
开发者ID:prz3m,项目名称:kind2anki,代码行数:25,代码来源:util.py


示例4: hole_readings

def hole_readings(sentence, grammar_filename=None, verbose=False):
    if not grammar_filename:
        grammar_filename = 'grammars/sample_grammars/hole.fcfg'

    if verbose: print 'Reading grammar file', grammar_filename
    
    parser = load_parser(grammar_filename)

    # Parse the sentence.
    tokens = sentence.split()
    trees = parser.nbest_parse(tokens)
    if verbose: print 'Got %d different parses' % len(trees)

    all_readings = []
    for tree in trees:
        # Get the semantic feature from the top of the parse tree.
        sem = tree.node['SEM'].simplify()

        # Print the raw semantic representation.
        if verbose: print 'Raw:       ', sem

        # Skolemize away all quantifiers.  All variables become unique.
        while isinstance(sem, logic.LambdaExpression):
            sem = sem.term
        skolemized = skolemize(sem)
        
        if verbose: print 'Skolemized:', skolemized

        # Break the hole semantics representation down into its components
        # i.e. holes, labels, formula fragments and constraints.
        hole_sem = HoleSemantics(skolemized)

        # Maybe show the details of the semantic representation.
        if verbose:
            print 'Holes:       ', hole_sem.holes
            print 'Labels:      ', hole_sem.labels
            print 'Constraints: ', hole_sem.constraints
            print 'Top hole:    ', hole_sem.top_hole
            print 'Top labels:  ', hole_sem.top_most_labels
            print 'Fragments:'
            for (l,f) in hole_sem.fragments.items():
                print '\t%s: %s' % (l, f)

        # Find all the possible ways to plug the formulas together.
        pluggings = hole_sem.pluggings()

        # Build FOL formula trees using the pluggings.
        readings = map(hole_sem.formula_tree, pluggings)

        # Print out the formulas in a textual format.
        if verbose: 
            for i,r in enumerate(readings):
                print
                print '%d. %s' % (i, r)
            print
        
        all_readings.extend(readings)
        
    return all_readings
开发者ID:jparise,项目名称:haitwu-appengine,代码行数:59,代码来源:hole.py


示例5: sentence_analysis

def sentence_analysis(sent, out=True):
    if out:
        cp = parse.load_parser('pt_grammar.fcfg', trace=1)
    else:
        cp = parse.load_parser('pt_grammar.fcfg', trace=0)
    san = sent.strip(',.').lower()
    tokens = san.split()
    try:
        trees = cp.parse(tokens)
        for tree in trees:
            if out:
                print(tree)
        return True
    except:
        if out:
            print("Esta sentenca nao e valida ou a gramatica ainda nao esta completa...")
        return False
开发者ID:gangsterveggies,项目名称:trabalho3_IA,代码行数:17,代码来源:dcg.py


示例6: demo

def demo():
    cp = parse.load_parser('file:rdf.fcfg', trace=0)
    tokens = 'list the actors in the_shining'.split()
    trees = cp.nbest_parse(tokens)
    tree = trees[0]
    semrep = sem.root_semrep(tree)
    trans = SPARQLTranslator()
    trans.translate(semrep)
    print trans.query
开发者ID:Sandy4321,项目名称:nltk_contrib,代码行数:9,代码来源:rdfquery.py


示例7: parse_with_bindops

def parse_with_bindops(sentence, grammar=None, trace=0):
    """
    Use a grammar with Binding Operators to parse a sentence.
    """
    if not grammar:
        grammar = 'grammars/book_grammars/storage.fcfg'
    parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart)
    # Parse the sentence.
    tokens = sentence.split()
    return parser.nbest_parse(tokens)
开发者ID:BohanHsu,项目名称:developer,代码行数:10,代码来源:cooper_storage.py


示例8: __init__

 def __init__(self, gramfile=None):
     """
     :param gramfile: name of file where grammar can be loaded
     :type gramfile: str
     """
     if gramfile is None:
         self._gramfile = "grammars/book_grammars/discourse.fcfg"
     else:
         self._gramfile = gramfile
     self._parser = load_parser(self._gramfile)
开发者ID:sergeyromanov,项目名称:nltk,代码行数:10,代码来源:discourse.py


示例9: load_parser

'''
Created on 09 Ara 2012

@author: burakkerim
'''

import sys

from nltk.parse import load_parser 
cp = load_parser('file:extended.fcfg')

sentences = [
             #----------------------------------
             # POSITIVES - already covered by the grammar
             #----------------------------------
##             ' ALREADY POSITIVES',
##             'Mary likes John',
##             'a boy disappeared',
##             'John eats sandwiches',
##             'a boy finds cats',
##             'the boy finds cats',
##             'Kim believes John likes her',
##             'the students vanished with the telescope',
##             'every woman likes John', 
##             'Kim believes John likes her',
             #----------------------------------
             # MISSING - add these to the grammar
##             #----------------------------------
##             ' POSITIVES',
             'the dog chased the cat which ate the mouse',
             'people chase Sue who ate the unicorn which Tom saw',
开发者ID:onuryilmaz-wip,项目名称:CFG-FeatureStructures,代码行数:31,代码来源:try.py


示例10: mute

from nltk import *
from nltk.corpus import machado
from nltk import grammar, parse
from nltk.parse.featurechart import InstantiateVarsChart

sent_tokenizer=nltk.data.load('tokenizers/punkt/portuguese.pickle')
raw_text1 = machado.raw('romance/marm05.txt')
raw_text2 = machado.raw('romance/marm04.txt')
raw_text3 = machado.raw('romance/marm03.txt')

ptext1 = nltk.Text(machado.words('romance/marm01.txt'))
ptext2 = nltk.Text(machado.words('romance/marm02.txt'))
ptext3 = nltk.Text(machado.words('romance/marm03.txt'))
ptext4 = nltk.Text(machado.words('romance/marm04.txt'))

cp = parse.load_parser('grammars/book_grammars/feat0.fcfg', trace=1)
stemmer = nltk.stem.RSLPStemmer()

## Checking version of the benchmarking
if 'PyPy' in sys.version:
    version = 'PyPy {}'.format(sys.version)
else:
    version = 'CPython {}'.format(sys.version)

report.setup('PyPy' in version)

def mute():
    sys.stdout = codecs.open('/dev/null','w','utf8') #use codecs to avoid decoding errors
def unmute():
    sys.stdout = sys.__stdout__
开发者ID:fccoelho,项目名称:NLTK_Pypy_Benchmark,代码行数:30,代码来源:benchmark.py


示例11: hole_readings

def hole_readings(sentence, grammar_filename=None, verbose=False):
    if not grammar_filename:
        grammar_filename = "grammars/sample_grammars/hole.fcfg"

    if verbose:
        print("Reading grammar file", grammar_filename)

    parser = load_parser(grammar_filename)

    # Parse the sentence.
    tokens = sentence.split()
    trees = parser.nbest_parse(tokens)
    if verbose:
        print("Got %d different parses" % len(trees))

    all_readings = []
    for tree in trees:
        # Get the semantic feature from the top of the parse tree.
        sem = tree.label()["SEM"].simplify()

        # Print the raw semantic representation.
        if verbose:
            print("Raw:       ", sem)

        # Skolemize away all quantifiers.  All variables become unique.
        while isinstance(sem, LambdaExpression):
            sem = sem.term
        skolemized = skolemize(sem)

        if verbose:
            print("Skolemized:", skolemized)

        # Break the hole semantics representation down into its components
        # i.e. holes, labels, formula fragments and constraints.
        hole_sem = HoleSemantics(skolemized)

        # Maybe show the details of the semantic representation.
        if verbose:
            print("Holes:       ", hole_sem.holes)
            print("Labels:      ", hole_sem.labels)
            print("Constraints: ", hole_sem.constraints)
            print("Top hole:    ", hole_sem.top_hole)
            print("Top labels:  ", hole_sem.top_most_labels)
            print("Fragments:")
            for (l, f) in hole_sem.fragments.items():
                print("\t%s: %s" % (l, f))

        # Find all the possible ways to plug the formulas together.
        pluggings = hole_sem.pluggings()

        # Build FOL formula trees using the pluggings.
        readings = list(map(hole_sem.formula_tree, pluggings))

        # Print out the formulas in a textual format.
        if verbose:
            for i, r in enumerate(readings):
                print()
                print("%d. %s" % (i, r))
            print()

        all_readings.extend(readings)

    return all_readings
开发者ID:xim,项目名称:nltk,代码行数:63,代码来源:hole.py


示例12: sorted

        tbwc = tb.word_counts
        srtd = sorted(tbwc, key=tbwc.get, reverse=True)
        for w in srtd:
            if not w in fnagl:
                notinlist.append(w)
        with  open(r'notingloss.txt', 'w', encoding='utf-8') as f:
            for w in notinlist:
                print(w, file=f)

    if (False):
        from nltk import grammar, parse

        sent = ' to 1·5–2·3 cm. tall'
        tokens = ['to', '15', '-', '23', 'cm', '.', 'in', 'diam.']
        # tokens = ['to','23','m','tall']
        cp = parse.load_parser('../resources/simplerange.fcfg', trace=2)
        trees = cp.parse(tokens)
        for tree in trees:
            print(tree)

    if (False):
        import linkgrammar as lg

        sents = re.split(r'(?<=\.)\s+(?=[A-Z])|;\s+', testtext)

        p = lg.Parser(lang="en", verbosity=1, max_null_count=10)
        for sent in sents:
            print(sent)
            linkages = p.parse_sent(sent)
            for linkage in linkages[0:1]:
                print(linkage.num_of_links, linkage.constituent_phrases_nested)
开发者ID:ggosline,项目名称:taxonparser,代码行数:31,代码来源:floras_nltk.py


示例13: nbest_parse

	def nbest_parse(self, xx):
		parser = parse.load_parser('file:hw2.fcfg', trace =2)
		wordlist = xx.split()
		tree = parser.nbest_parse(wordlist)
		for a in tree : print a
开发者ID:enginertas,项目名称:codebase,代码行数:5,代码来源:hw2.py



注:本文中的nltk.parse.load_parser函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python chart.TreeEdge类代码示例发布时间:2022-05-27
下一篇:
Python distance.edit_distance函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap