• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python lexers.PythonLexer类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pygments.lexers.PythonLexer的典型用法代码示例。如果您正苦于以下问题:Python PythonLexer类的具体用法?Python PythonLexer怎么用?Python PythonLexer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了PythonLexer类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: checkBadLoopCollect

def checkBadLoopCollect(code):
    """
    Look for bad loop like 'for i in range(len(list))'
    Documentation: https://youtu.be/OSGv2VnC0go?t=4m47s
    """
    sequence = [(Token.Keyword, '^for$'),
                (Token.Name, '^\w+$'),
                (Token.Operator.Word, '^in$'),
                (Token.Name.Builtin, '^range$|^xrange$'),
                (Token.Punctuation, '^\($'),
                (Token.Name.Builtin, '^len$'),
                (Token.Punctuation, '^\($'),
                (Token.Name, '^\w+$')]
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    badLoopCollectIdiom = PythonIdiom('badLoop')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        badLoopCollectIdiom.addNew(lineNumber)
    log("badLoopCollect found in lines {0}".format(badLoopCollectIdiom.getLines()))

    return badLoopCollectIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:28,代码来源:pythonic.py


示例2: findBadUseImport

def findBadUseImport(code):
    """
    Find when use from foo import *
    Documentation: http://python.net/~goodger/projects/pycon/2007/idiomatic/handout.html#importing
                   https://docs.python.org/2/howto/doanddont.html#from-module-import
    """
    sequence = [(Token.Keyword.Namespace, '^from$'),
                (Token.Name.Namespace, '.*'),
                (Token.Keyword.Namespace, '^import$'),
                (Token.Operator, '\*')]
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    badUseImport = PythonIdiom('badImport')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        badUseImport.addNew(lineNumber)
    log("badUseImport found in lines {0}".format(badUseImport.getLines()))

    return badUseImport
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:25,代码来源:pythonic.py


示例3: findDocstring

def findDocstring(code):
    """Find the use of documentation in the functions, classes or script
    Documentation: https://www.python.org/dev/peps/pep-0257/
    """
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')

    classDefToken = (Token.Keyword, '^class$')
    functDefToken = (Token.Keyword, '^def$')
    tokens = pygments.lex(code, lexer)

    docIdiom = PythonIdiom('docstring')
    docstringFound = defaultdict(int)
    typeDoc = 'module'
    lineNumber = 1


    for ttype, word in tokens:
        if _sameToken((ttype, word), classDefToken):
            typeDoc = 'class'
        elif _sameToken((ttype, word), functDefToken):
            typeDoc = 'function'
        elif ttype == Token.Literal.String.Doc:
            docstringFound[typeDoc] += 1
            docIdiom.addNew(lineNumber)
        lineNumber += _getNewLines((ttype, word))

    for typeDoc in docstringFound:
        log("type %s: %d found" % (typeDoc, docstringFound[typeDoc]))
    log('DocString found in lines: ' + str(docIdiom.getLines()))
    return docIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:31,代码来源:pythonic.py


示例4: checkNotRange

def checkNotRange(code):
    """
    Check if there is: for xx in [0,1,2] instead of for xxx in (x)range
    Documentation: https://youtu.be/OSGv2VnC0go?t=3m4s
    """
    sequence = [(Token.Keyword, '^for$'),
                (Token.Name, '^\w+$'),
                (Token.Operator.Word, '^in$'),
                (Token.Punctuation, '^\[$'),
                (Token.Literal.Number.Integer, '^\d$')]

    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    notRangeIdiom = PythonIdiom('notRange')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        notRangeIdiom.addNew(lineNumber)
    log("badForIn found in lines {0}".format(notRangeIdiom.getLines()))
    return notRangeIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:25,代码来源:pythonic.py


示例5: python_line_tokens

def python_line_tokens(code_lines, blank_lines=False):
    from pygments.lexers import PythonLexer
    lexer = PythonLexer()
    code_str = "".join(code_lines)
    all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
    line_tokens = []
    current_line = []

    for t in all_tokens:
        if t[1] == u"\n":
            line_tokens.append(current_line)
            current_line = []
        else:
            current_line.append(t)

    rows = []
    for i, tokens in enumerate(line_tokens):
        # Check for blank line
        line_str = code_lines[i].rstrip()
        if (not blank_lines) and len(line_str.strip()) == 0:
            continue

        for t in tokens:
            kind, value = str(t[0]), t[1]
            yield line_str, i, kind, value, t
开发者ID:synesthesiam,项目名称:eyecode,代码行数:25,代码来源:__init__.py


示例6: basicStructure

def basicStructure(code):
    sequence = []
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    for token in tokens:
        print token
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:7,代码来源:pythonicSimple.py


示例7: python_prettify

def python_prettify(code, style):
    lexer = PythonLexer()
    lexer.add_filter(VisibleWhitespaceFilter(spaces="&nbsp"))
    pretty_code = highlight(
        code, lexer, HtmlFormatter(
            linenos=style, linenostart=0))
    # print(pretty_code)
    return format_html('{}', mark_safe(pretty_code))
开发者ID:ifjorissen,项目名称:vrfy,代码行数:8,代码来源:pretty_code.py


示例8: redent

def redent(s):
    """
    Shamelessly stolen from infobob(#python bot) code
    https://code.launchpad.net/~pound-python/infobat/infobob
    """
    lexer = PythonLexer()
    lexer.add_filter(_RedentFilter())
    return highlight(s, lexer, NullFormatter())
开发者ID:stranac,项目名称:raritythebot,代码行数:8,代码来源:utils.py


示例9: _analyse_source_code

    def _analyse_source_code(self, source_code):

        lexer = PythonLexer()
        token_source = lexer.get_tokens(source_code)
        for token_type, value in token_source:
            if len(value) > 3 and value.startswith('gl') and ord('A') <= ord(value[2]) <= ord('Z'):
                self.gl_functions.add(value)
            elif len(value) > 3 and value.startswith('GL_'):
                self.gl_constants.add(value)
开发者ID:rougier,项目名称:PyOpenGLng,代码行数:9,代码来源:opengl-usage.py


示例10: python_token_metrics

def python_token_metrics(code_lines, indent_size=4):
    from pygments.lexers import PythonLexer
    indent_regex = re.compile(r"^\s*")

    lexer = PythonLexer()
    code_str = "".join(code_lines)
    all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
    line_tokens = []
    current_line = []

    for t in all_tokens:
        if t[1] == u"\n":
            line_tokens.append(current_line)
            current_line = []
        else:
            current_line.append(t)

    rows = []
    for i, tokens in enumerate(line_tokens):
        line_number = i + 1

        # Check for blank line
        line_str = code_lines[i].rstrip()
        if len(line_str.strip()) == 0:
            rows.append([line_number, 0, 0, 0, 0, 0, 0])
            continue

        assert len(tokens) > 0, "No tokens for line"

        num_keywords = 0
        num_identifiers = 0
        num_operators = 0
        line_length = len(line_str)
        line_indent = len(indent_regex.findall(line_str)[0]) / indent_size

        # Indentation is not considered
        line_str_noindent = line_str.lstrip()
        line_length_noindent = len(line_str_noindent)
        whitespace_prop = line_str_noindent.count(" ") / float(line_length_noindent)

        for t in tokens:
            kind, value = str(t[0]), t[1]
            if kind.startswith(u"Token.Keyword"):
                num_keywords += 1
            elif kind.startswith(u"Token.Name"):
                num_identifiers += 1
            elif kind.startswith(u"Token.Operator"):
                num_operators += 1

        rows.append([line_number, line_length_noindent, num_keywords,
            num_identifiers, num_operators, whitespace_prop,
            line_indent])

    columns = ["line", "line_length", "keywords",
               "identifiers", "operators", "whitespace_prop",
               "line_indent"]
    return pandas.DataFrame(rows, columns=columns)
开发者ID:synesthesiam,项目名称:eyecode,代码行数:57,代码来源:__init__.py


示例11: testWorksAsExpected

    def testWorksAsExpected(self):
        code = '''
        """ Increment number of decision points in function."""
        #if tok and tok.text in McCabeKeywords:
        if (tok[0][0] == b'Keyword') and tok[1] in McCabeKeywords:
            self.metrics['mccabe'] += 1
        '''
        result = [(Token.Text, u'        '), (Token.Literal.String.Doc, u'""" Increment number of decision points in function."""'), (Token.Text, u'\n'), (Token.Text, u'        '), (Token.Comment, u'#if tok and tok.text in McCabeKeywords:'), (Token.Text, u'\n'), (Token.Text, u'        '), (Token.Keyword, u'if'), (Token.Text, u' '), (Token.Punctuation, u'('), (Token.Name, u'tok'), (Token.Punctuation, u'['), (Token.Literal.Number.Integer, u'0'), (Token.Punctuation, u']'), (Token.Punctuation, u'['), (Token.Literal.Number.Integer, u'0'), (Token.Punctuation, u']'), (Token.Text, u' '), (Token.Operator, u'=='), (Token.Text, u' '), (Token.Name, u'b'), (Token.Literal.String, u"'"), (Token.Literal.String, u'Keyword'), (Token.Literal.String, u"'"), (Token.Punctuation, u')'), (Token.Text, u' '), (Token.Operator.Word, u'and'), (Token.Text, u' '), (Token.Name, u'tok'), (Token.Punctuation, u'['), (Token.Literal.Number.Integer, u'1'), (Token.Punctuation, u']'), (Token.Text, u' '), (Token.Operator.Word, u'in'), (Token.Text, u' '), (Token.Name, u'McCabeKeywords'), (Token.Punctuation, u':'), (Token.Text, u'\n'), (Token.Text, u'            '), (Token.Name.Builtin.Pseudo, u'self'), (Token.Operator, u'.'), (Token.Name, u'metrics'), (Token.Punctuation, u'['), (Token.Literal.String, u"'"), (Token.Literal.String, u'mccabe'), (Token.Literal.String, u"'"), (Token.Punctuation, u']'), (Token.Text, u' '), (Token.Operator, u'+'), (Token.Operator, u'='), (Token.Text, u' '), (Token.Literal.Number.Integer, u'1'), (Token.Text, u'\n'), (Token.Text, u'        '), (Token.Text, u'\n')]

        lex = PythonLexer()
        tokenList = lex.get_tokens(code)
        self.assertEqual(list(tokenList), result)
开发者ID:GadgetSteve,项目名称:metrics,代码行数:12,代码来源:test_lexer.py


示例12: PythonTest

class PythonTest(unittest.TestCase):
    def setUp(self):
        self.lexer = PythonLexer()

    def test_cls_builtin(self):
        """
        Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo

        """
        fragment = 'class TestClass():\n    @classmethod\n    def hello(cls):\n        pass\n'
        tokens = [
            (Token.Keyword, 'class'),
            (Token.Text, ' '),
            (Token.Name.Class, 'TestClass'),
            (Token.Punctuation, '('),
            (Token.Punctuation, ')'),
            (Token.Punctuation, ':'),
            (Token.Text, '\n'),
            (Token.Text, '    '),
            (Token.Name.Decorator, '@classmethod'),
            (Token.Text, '\n'),
            (Token.Text, '    '),
            (Token.Keyword, 'def'),
            (Token.Text, ' '),
            (Token.Name.Function, 'hello'),
            (Token.Punctuation, '('),
            (Token.Name.Builtin.Pseudo, 'cls'),
            (Token.Punctuation, ')'),
            (Token.Punctuation, ':'),
            (Token.Text, '\n'),
            (Token.Text, '        '),
            (Token.Keyword, 'pass'),
            (Token.Text, '\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
开发者ID:sol,项目名称:pygments,代码行数:35,代码来源:test_python.py


示例13: __init__

 def __init__(self, makecolors = True, style = standardcols):
     if makecolors:
         self.makecolorpairs()
     if style is None:
         style = standardcols
     self.style = style
     self.lexer = PythonLexer()
开发者ID:wackywendell,项目名称:ipycurses,代码行数:7,代码来源:cursesparser.py


示例14: get_tokens_unprocessed

 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Keyword.Pseudo, value
         else:
             yield index, token, value
开发者ID:BoPeng,项目名称:SOS,代码行数:7,代码来源:converter.py


示例15: __init__

 def __init__(self):
     """ constructor """
     self._lexer = PythonLexer()
     self._formatter = HtmlFormatter()
     self._document = QtGui.QTextDocument()
     self._document.setDefaultStyleSheet(self._formatter.get_style_defs())
     self._format_cache = dict()
开发者ID:knossos-project,项目名称:PythonQt,代码行数:7,代码来源:PygmentsHighlighter.py


示例16: PygmentsHighlighter

class PygmentsHighlighter(object):
    """ highlight python code with a QSyntaxHighlighter,
        callable class (e.g. function with a state) to """

    def __init__(self):
        """ constructor """
        self._lexer = PythonLexer()
        self._formatter = HtmlFormatter()
        self._document = QtGui.QTextDocument()
        self._document.setDefaultStyleSheet(self._formatter.get_style_defs())
        self._format_cache = dict()

    def __call__(self, code):
        """ makes this class callable, actually do the highlightning """
        index = 0
        for token, text in self._lexer.get_tokens(code):
            length = len(text)
            char_format = self._get_format(token)
            pygmentsHighlighter._setFormat(index, length, char_format)
            index += length

    def _get_format(self, token):
        """ get the QTextCharFormat for a token """
        if token in self._format_cache:
            return self._format_cache[token]

        # get format from document
        code, html = next(self._formatter._format_lines([(token, u"dummy")]))
        self._document.setHtml(html)
        char_format = QtGui.QTextCursor(self._document).charFormat()

        # cache result
        self._format_cache[token] = char_format

        return char_format
开发者ID:knossos-project,项目名称:PythonQt,代码行数:35,代码来源:PygmentsHighlighter.py


示例17: __init__

    def __init__(self,
            prompt='>>> ',
            continuation='... ',
            parent=None):
        QTextEdit.__init__(self, parent)
        self.shutting_down = False
        self.compiler = CommandCompiler()
        self.buf = self.old_buf = []
        self.history = History([''], dynamic.get('console_history', []))
        self.prompt_frame = None
        self.allow_output = False
        self.prompt_frame_format = QTextFrameFormat()
        self.prompt_frame_format.setBorder(1)
        self.prompt_frame_format.setBorderStyle(QTextFrameFormat.BorderStyle_Solid)
        self.prompt_len = len(prompt)

        self.doc.setMaximumBlockCount(int(prefs['scrollback']))
        self.lexer = PythonLexer(ensurenl=False)
        self.tb_lexer = PythonTracebackLexer()

        self.context_menu = cm = QMenu(self) # {{{
        cm.theme = ThemeMenu(cm)
        # }}}

        self.formatter = Formatter(prompt, continuation, style=prefs['theme'])
        p = QPalette()
        p.setColor(p.Base, QColor(self.formatter.background_color))
        p.setColor(p.Text, QColor(self.formatter.color))
        self.setPalette(p)

        self.key_dispatcher = { # {{{
                Qt.Key_Enter : self.enter_pressed,
                Qt.Key_Return : self.enter_pressed,
                Qt.Key_Up : self.up_pressed,
                Qt.Key_Down : self.down_pressed,
                Qt.Key_Home : self.home_pressed,
                Qt.Key_End : self.end_pressed,
                Qt.Key_Left : self.left_pressed,
                Qt.Key_Right : self.right_pressed,
                Qt.Key_Backspace : self.backspace_pressed,
                Qt.Key_Delete : self.delete_pressed,
        } # }}}

        motd = textwrap.dedent('''\
        # Python {0}
        # {1} {2}
        '''.format(sys.version.splitlines()[0], __appname__,
            __version__))

        sys.excepthook = self.unhandled_exception

        self.controllers = []
        QTimer.singleShot(0, self.launch_controller)


        with EditBlock(self.cursor):
            self.render_block(motd)
开发者ID:AEliu,项目名称:calibre,代码行数:57,代码来源:console.py


示例18: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
            if token is Name and value in self.EXTRA_KEYWORDS:
                yield index, Keyword, value

            elif token is Name and value in self.EXTRA_FUNC:
                yield index, Name.Function, value
            else:
                yield index, token, value
开发者ID:TigerTopher,项目名称:FINALX,代码行数:9,代码来源:IDE.py


示例19: get_context

def get_context(string):
    """ Assuming the cursor is at the end of the specified string, get the
        context (a list of names) for the symbol at cursor position.
    """
    lexer = PythonLexer()
    context = []
    reversed_tokens = list(lexer.get_tokens(string))
    reversed_tokens.reverse()

    # Pygments often tacks on a newline when none is specified in the input.
    # Remove this newline.
    if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \
            not string.endswith('\n'):
        reversed_tokens.pop(0)

    current_op = ''
    for token, text in reversed_tokens:

        if is_token_subtype(token, Token.Name):

            # Handle a trailing separator, e.g 'foo.bar.'
            if current_op == '.':
                if not context:
                    context.insert(0, '')

            # Handle non-separator operators and punction.
            elif current_op:
                break

            context.insert(0, text)
            current_op = ''

        # Pygments doesn't understand that, e.g., '->' is a single operator
        # in C++. This is why we have to build up an operator from
        # potentially several tokens.
        elif token is Token.Operator or token is Token.Punctuation:
            current_op = text + current_op

        # Break on anything that is not a Operator, Punctuation, or Name.
        else:
            break

    return context
开发者ID:bkerler,项目名称:PythonQt,代码行数:43,代码来源:PythonCompleter.py


示例20: __init__

    def __init__(self, parent, lexer=None):
        super(PygmentsHighlighter, self).__init__(parent)

        try:
            self._lexer = get_lexer_by_name(lexer)
        except:
            self._lexer = PythonLexer()

        # Caches for formats and brushes.
        self._brushes = {}
        self._formats = {}
开发者ID:fabioz,项目名称:pyvmmonitor-qt,代码行数:11,代码来源:pygments_highlighter.py



注:本文中的pygments.lexers.PythonLexer类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python lexers.TextLexer类代码示例发布时间:2022-05-25
下一篇:
Python lexers.guess_lexer_for_filename函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap