• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python analysis.Token类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中whoosh.analysis.Token的典型用法代码示例。如果您正苦于以下问题:Python Token类的具体用法?Python Token怎么用?Python Token使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Token类的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __call__

    def __call__(self, text, **kargs):
        token  = Token()

        words = set()
        words_list = []

        for (i, start_pos, stop_pos) in jieba.tokenize(text, mode='search'):
            i = i.strip()
            if not i:
                continue
            if i in words:
                continue
            if i in punct:
                continue
            words.add(i)
            words_list.append(i)

        for w in words:
            if not accepted_chars.match(w):
                if len(w) <= 1:
                    continue
            token.original = token.text = w
            token.pos = start_pos
            token.startchar = start_pos
            token.endchar = stop_pos
            yield token
开发者ID:Shu-Ji,项目名称:dht,代码行数:26,代码来源:tor_parser.py


示例2: __call__

 def __call__(self, text, **kargs):
     token  = Token()
     start_pos = 0
     for w in group_words(text):
         token.original = token.text = w
         token.pos = start_pos
         token.startchar = start_pos
         token.endchar = start_pos + len(w)
         start_pos = token.endchar
         yield token
开发者ID:jannson,项目名称:iskdaemon,代码行数:10,代码来源:simdb.py


示例3: __call__

 def __call__(self, text, **kargs):
     words = jieba.tokenize(text, mode="search")
     token = Token()
     for (w,start_pos,stop_pos) in words:
         if not accepted_chars.match(w) and len(w)<=1:
             continue
         token.original = token.text = w
         token.pos = start_pos
         token.startchar = start_pos
         token.endchar = stop_pos
         yield token
开发者ID:LoyukiL,项目名称:bot_reorganized,代码行数:11,代码来源:analyzer.py


示例4: __call__

 def __call__(self,text,**kargs):
     words = tokenize_1(text)
     token  = Token()
     for (w,start_pos,stop_pos) in words:
         if not accepted_chars.match(w):
             if len(w) <= 1:
                 continue
         token.original = token.text = w
         token.pos = start_pos
         token.startchar = start_pos
         token.endchar = stop_pos
         yield token
开发者ID:blueicesir,项目名称:cppjiebapy,代码行数:12,代码来源:analyzer.py


示例5: __call__

 def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0, start_char=0, mode='', **kwargs):  
     assert isinstance(value, text_type), "%r is not unicode" % value  
     t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs)  
     seglist=jieba.cut_for_search(value)                       #使用结巴分词库进行分词  
     for w in seglist:  
         t.original = t.text = w  
         t.boost = 1.0  
         if positions:  
             t.pos=start_pos+value.find(w)  
         if chars:  
             t.startchar=start_char+value.find(w)  
             t.endchar=start_char+value.find(w)+len(w)  
         yield t                                               #通过生成器返回每个分词的结果token
开发者ID:luochuwei,项目名称:Generate-response-demo,代码行数:13,代码来源:generation_demo-flask.py


示例6: __call__

 def __call__(self, value, positions=False, chars=False, keeporiginal=False,removestops=True,start_pos=0, start_char=0, mode='',**kwargs):
     assert isinstance(value, text_type), "%r is not unicode" % value 
     t = Token(positions, chars, removestops=removestops, mode=mode,**kwargs)
     seglist = value.split(' ')
     for w in seglist:
         t.original = t.text = w
         t.boost = 1.0
         if positions:
             t.pos=start_pos+value.find(w)
         if chars:
             t.startchar=start_char+value.find(w)
             t.endchar=start_char+value.find(w)+len(w)
         yield t         
开发者ID:luckistmaomao,项目名称:weibo,代码行数:13,代码来源:search.py


示例7: __call__

 def __call__(self,text,**kargs):
     words = _cuttor.tokenize(text, search=True)
     token  = Token()
     for (w,start_pos,stop_pos) in words:
         if not accepted_chars.match(w):
             if len(w)>1:
                 pass
             else:
                 continue
         token.original = token.text = w
         token.pos = start_pos
         token.startchar = start_pos
         token.endchar = stop_pos
         yield token
开发者ID:ZoeyYoung,项目名称:Bookmarks_Cloud,代码行数:14,代码来源:analyzer.py


示例8: __call__

    def __call__(self, value, positions=False, chars=False,
                 keeporiginal=False, removestops=True,
                 start_pos=0, start_char=0, mode='', **kwargs):
        t = Token(positions, chars, removestops=removestops, mode=mode,
                  **kwargs)

        seglist = jieba.cut(value, cut_all=False)
        for word in seglist:
            t.original = t.text = word
            t.boost = 1.0
            if positions:
                t.pos = start_pos + value.find(word)
            if chars:
                t.startchar = start_char + value.find(word)
                t.endchar = t.startchar + len(word)
            yield t
开发者ID:baiyanghese,项目名称:yuan,代码行数:16,代码来源:search.py


示例9: __call__

    def __call__(self, value, mode='', positions=False, **kwargs):
        assert isinstance(value, unicode), "%r is not unicode" % value
        token = Token(**kwargs)
        tagger = MeCab.Tagger('mecabrc')
        result = tagger.parse(value.encode("utf8")).decode('utf8')

        cur = 0
        for match in re.compile("(\S+)\s+(\S+)\n").finditer(result):
            category = match.group(2).split(",")
            if 0 < len(category) and \
                    (category[0] == u'名詞' or category[0] == u'動詞' \
                         or category[0] == u'形容詞' or category[0] == u'副詞'):
                token.text = match.group(1)
                token.pos  = cur
                yield token
            cur += len(match.group(1))
开发者ID:faggion,项目名称:sample-codes,代码行数:16,代码来源:mecab.py


示例10: _merge_matched_tokens

 def _merge_matched_tokens(self, tokens):
     token_ready = False
     for t in tokens:
         if not t.matched:
             yield t
             continue
         if not token_ready:
             token = Token(**t.__dict__)
             token_ready = True
         elif t.startchar <= token.endchar:
             if t.endchar > token.endchar:
                 token.text += t.text[token.endchar-t.endchar:]
                 token.endchar = t.endchar
         else:
             yield token
             token_ready = False
     if token_ready:
         yield token
开发者ID:alexeib2014,项目名称:Flask-Android,代码行数:18,代码来源:highlight.py


示例11: __call__

    def __call__(self, value, start_pos=0, positions=False, **kwargs):
        """
        Tokenizer behaviour:

        Input: u"text/x.moin.wiki;charset=utf-8"
        Output: u"text/x.moin.wiki;charset=utf-8", u"text", u"x.moin.wiki", u"charset=utf-8"

        Input: u"application/pdf"
        Output: u"application/pdf", u"application", u"pdf"

        :param value: String for tokenization
        :param start_pos: The position number of the first token. For example,
            if you set start_pos=2, the tokens will be numbered 2,3,4,...
            instead of 0,1,2,...
        :param positions: Whether to record token positions in the token.
        """
        assert isinstance(value, unicode), "{0!r} is not unicode".format(value)
        if u'/' not in value: # Add '/' if user forgot do this
            value += u'/'
        pos = start_pos
        tk = Token()
        tp = Type(value)
        # we need to yield the complete contenttype in one piece,
        # so we can find it with Term(CONTENTTYPE, contenttype):
        if tp.type is not None and tp.subtype is not None:
            # note: we do not use "value" directly, so Type.__unicode__ can normalize it:
            tk.text = unicode(tp)
            if positions:
                tk.pos = pos
                pos += 1
            yield tk
        # now yield the pieces:
        tk.text = tp.type
        if positions:
            tk.pos = pos
            pos += 1
        yield tk
        if tp.subtype is not None:
            tk.text = tp.subtype
            if positions:
                tk.pos = pos
                pos += 1
            yield tk
        for key, value in tp.parameters.items():
            tk.text = u"{0}={1}".format(key, value)
            if positions:
                tk.pos = pos
                pos += 1
            yield tk
开发者ID:pombredanne,项目名称:moin2,代码行数:49,代码来源:analyzers.py


示例12: __call__

 def __call__(self, value, positions=False, chars=False,
              keeporiginal=False, removestops=True,
              start_pos=0, start_char=0, mode='', **kwargs):
     assert isinstance(value, text_type), "%r is not unicode" % value
     t = Token(positions, chars, removestops=removestops, mode=mode,
         **kwargs)
     nlpir.Init(nlpir.PACKAGE_DIR, nlpir.UTF8_CODE)
     pynlpir.open()
     pynlpir.open(encoding='utf-8')
     seglist = pynlpir.segment(value,)
     for w in seglist:
         t.original = t.text = w
         t.boost = 1.0
         if positions:
             t.pos=start_pos+value.find(w)
         if chars:
             t.startchar=start_char+value.find(w)
             t.endchar=start_char+value.find(w)+len(w)
         yield t      #通过生成器返回每个分词的结果token
开发者ID:tianmaxingkonggrant,项目名称:tianmaflaskblog,代码行数:19,代码来源:ChineseAnalyzer.py


示例13: __call__

 def __call__(self, value, positions=False, chars=False,
              keeporiginal=False, removestops=True,
              start_pos=0, start_char=0,
              tokenize=True, mode='', **kwargs):
     assert isinstance(value, text_type), "%r is not unicode" % value
     t = Token(positions, chars, removestops=removestops, mode=mode)
     if not tokenize:
         t.original = t.text = value
         t.boost = 1.0
         if positions:
             t.pos = start_pos
         if chars:
             t.startchar = start_char
             t.endchar = start_char + len(value)
         yield t
     else:
         if self.strip:
             strip = lambda s: s.strip()
         else:
             strip = lambda s: s
         pos = start_pos
         startchar = start_char
         for s, l in \
                 [(strip(s), len(s)) for s in
                  self.segmenter.tokenize(value)]:
             t.text = s
             t.boost = 1.0
             if keeporiginal:
                 t.original = t.text
             t.stopped = False
             if positions:
                 t.pos = pos
                 pos += 1
             if chars:
                 t.startchar = startchar
                 startchar += l
                 t.endchar = startchar
             yield t
开发者ID:knzm,项目名称:whoosh-igo,代码行数:38,代码来源:TinySegmenterTokenizer.py


示例14: __call__

 def __call__(
     self,
     value,
     positions=False,
     chars=False,
     keeporiginal=False,
     removestops=True,
     start_pos=0,
     start_char=0,
     tokenize=True,
     mode="",
     **kwargs
 ):
     assert isinstance(value, text_type), "%r is not unicode" % value
     t = Token(positions, chars, removestops=removestops, mode=mode)
     if not tokenize:
         t.original = t.text = value
         t.boost = 1.0
         if positions:
             t.pos = start_pos
         if chars:
             t.startchar = start_char
             t.endchar = start_char + len(value)
         yield t
     else:
         pos = start_pos
         for m in self.tagger.parse(value):
             t.text = m.surface
             t.feature = m.feature
             # TODO: use base form.
             t.boost = 1.0
             if keeporiginal:
                 t.original = t.text
             t.stopped = False
             if positions:
                 t.pos = pos
                 pos += 1
             if chars:
                 t.startchar = start_char + m.start
                 t.endchar = t.startchar + len(m.surface)
             yield t
开发者ID:knzm,项目名称:whoosh-igo,代码行数:41,代码来源:IgoTokenizer.py


示例15: frozenset

# STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
#                         'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
#                         'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
#                         'to', 'us', 'we', 'when', 'will', 'with', 'yet',
#                         'you', 'your',u'的',u'了',u'和',u'的',u'我',u'你',u'地',u'我们',u'我的',u'你们',u'你的',u'','_'))

STOP_WORDS =frozenset(([for line.strip() in open("stopwords.dic",'r')])
print 'stopwords'

accepted_chars = re.compile(ur"[\u4E00-\u9FA5]+")

class ChineseTokenizer(Tokenizer):
    def __call__(self,text,**kargs):
        words = jieba.tokenize(text,mode="search")
        token  = Token()
        for (w,start_pos,stop_pos) in words:
            if not accepted_chars.match(w):
                if len(w)>1:
                    pass
                else:
                    continue
            token.original = token.text = w
            token.pos = start_pos
            token.startchar = start_pos
            token.endchar = stop_pos
            yield token

def ChineseAnalyzer(stoplist=STOP_WORDS,minsize=1,stemfn=stem,cachesize=50000):
    return ChineseTokenizer() | LowercaseFilter() | StopFilter(stoplist=stoplist,minsize=minsize)\
                                        |StemFilter(stemfn=stemfn, ignore=None,cachesize=cachesize)
开发者ID:darlinglele,项目名称:portal,代码行数:30,代码来源:analyzer.py


示例16: __call__

 def __call__(self, value, positions=False, chars=False,
              keeporiginal=False, removestops=True,
              start_pos=0, start_char=0,
              tokenize=True, mode='', **kwargs):
     assert isinstance(value, text_type), "%r is not unicode" % value
     t = Token(positions, chars, removestops=removestops, mode=mode)
     if not tokenize:
         t.original = t.text = value
         t.boost = 1.0
         if positions:
             t.pos = start_pos
         if chars:
             t.startchar = start_char
             t.endchar = start_char + len(value)
         yield t
     else:
         pos = start_pos
         offset = start_char
         byte_offset = 0
         # TODO: support other encodings
         byte = value.encode('utf-8')
         m = self.tagger.parseToNode(byte)
         while m:
             if len(m.surface) == 0:
                 m = m.next
                 continue
             t.text = m.surface.decode('utf-8')
             t.feature = m.feature
             # TODO: use base form.
             t.boost = 1.0
             if keeporiginal:
                 t.original = t.text
             t.stopped = False
             if positions:
                 t.pos = pos
                 pos += 1
             if chars:
                 s = byte_offset + m.rlength - m.length
                 e = s + m.length
                 t.startchar = offset + \
                     len(byte[byte_offset:s].decode('utf-8'))
                 t.endchar = t.startchar + len(byte[s:e].decode('utf-8'))
                 offset = t.endchar
                 byte_offset = e
             m = m.next
             yield t
开发者ID:knzm,项目名称:whoosh-igo,代码行数:46,代码来源:MeCabTokenizer.py



注:本文中的whoosh.analysis.Token类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python compat.b函数代码示例发布时间:2022-05-26
下一篇:
Python analysis.unstopped函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap