• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tokenize.tokenize函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tokenize.tokenize函数的典型用法代码示例。如果您正苦于以下问题:Python tokenize函数的具体用法?Python tokenize怎么用?Python tokenize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tokenize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: check

def check(file):


    if os.path.isdir(file) and not os.path.islink(file):

        names = os.listdir(file)
        for name in names:
            fullname = os.path.join(file, name)
            if (os.path.isdir(fullname) and
                not os.path.islink(fullname) or
                os.path.normcase(name[-3:]) == ".py"):
                check(fullname)
        return


    f = open(file)

    reset_globals()
    try:
        tokenize.tokenize(f.readline, tokeneater)



    except NannyNag, nag:
        badline = nag.get_lineno()

        return badline
开发者ID:Bail-jw,项目名称:mediacomp-jes,代码行数:27,代码来源:JESTabnanny.py


示例2: extracts

	def extracts(self):
		# calculate escapes
		make_escapes(self.options.escape)

		# calculate all keywords
		self.options.keywords.extend(default_keywords)

		# slurp through all the files
		eater = TokenEater(self.options)
		fp = self.pythonCode
		closep = 1
		try:
			# eater.set_filename(self.filename)
			try:
				tokenize.tokenize(fp.readline, eater)
			except tokenize.TokenError, e:
				print >> sys.stderr, '%s: %s, line %d, column %d' % (
					e[0], filename, e[1][0], e[1][1])
		finally:
			if closep:
				fp.close()

		# write the output
		fp = sys.stdout
		closep = 0
		res=[]
		try:
			res=eater.write(fp)
		finally:
			if closep:
				fp.close()
		return res
开发者ID:bigbigbug,项目名称:workspace,代码行数:32,代码来源:k_pygettext.py


示例3: format

    def format(self, formatter):
        """ Parse and send the colored source.
        """
        # store line offsets in self.lines
        self.lines = [0, 0]
        pos = 0
        while 1:
            pos = self.raw.find('\n', pos) + 1
            if not pos: break
            self.lines.append(pos)
        self.lines.append(len(self.raw))

        self._code_id = sha.new(self.raw.encode(config.charset)).hexdigest()
        self.request.write(formatter.code_area(1, self._code_id, 'ColorizedPython', self.show_num, self.num_start, self.num_step))
        self.formatter = formatter
        self.request.write(formatter.code_line(1))
        #len('%d' % (len(self.lines)-1, )))
        
        # parse the source and write it
        self.pos = 0
        text = StringIO.StringIO(self.raw)
        try:
            tokenize.tokenize(text.readline, self)
        except tokenize.TokenError, ex:
            msg = ex[0]
            line = ex[1][0]
            self.request.write("<b>ERROR: %s</b><br>%s\n" % (
                msg, self.formatter.text(self.raw[self.lines[line]:])))
开发者ID:mikejamesthompson,项目名称:orgsites,代码行数:28,代码来源:python.py


示例4: inspect_traceback

def inspect_traceback(tb):
    """Inspect a traceback and its frame, returning source for the expression
    where the exception was raised, with simple variable replacement performed
    and the line on which the exception was raised marked with '>>'
    """
    log.debug('inspect traceback %s', tb)

    # we only want the innermost frame, where the exception was raised
    while tb.tb_next:
        tb = tb.tb_next
        
    frame = tb.tb_frame
    lines, exc_line = tbsource(tb)
        
    # figure out the set of lines to grab.
    inspect_lines, mark_line = find_inspectable_lines(lines, exc_line)
    src = StringIO(textwrap.dedent(''.join(inspect_lines)))
    exp = Expander(frame.f_locals, frame.f_globals)

    while inspect_lines:
        try:
            tokenize.tokenize(src.readline, exp)
        except tokenize.TokenError, e:
            # this can happen if our inspectable region happens to butt up
            # against the end of a construct like a docstring with the closing
            # """ on separate line
            log.debug("Tokenizer error: %s", e)
            inspect_lines.pop(0)
            mark_line -= 1
            src = StringIO(textwrap.dedent(''.join(inspect_lines)))
            exp = Expander(frame.f_locals, frame.f_globals)
            continue
        break
开发者ID:thraxil,项目名称:gtreed,代码行数:33,代码来源:inspector.py


示例5: getTokens

def getTokens(command):
    """Return list of token tuples for command."""

    # In case the command is unicode try encoding it
    if isinstance(command, str):
        try:
            command = command.encode('utf-8')
        except UnicodeEncodeError:
            pass # otherwise leave it alone
                
    f = StringIO(command)
    # tokens is a list of token tuples, each looking like: 
    # (type, string, (srow, scol), (erow, ecol), line)
    tokens = []
    # Can't use list comprehension:
    #   tokens = [token for token in tokenize.generate_tokens(f.readline)]
    # because of need to append as much as possible before TokenError.
    try:
##        This code wasn't backward compatible with Python 2.1.3.
##
##        for token in tokenize.generate_tokens(f.readline):
##            tokens.append(token)

        # This works with Python 2.1.3 (with nested_scopes).
        if not PY3:
            def eater(*args):
                tokens.append(args)
            tokenize.tokenize_loop(f.readline, eater)
        else:
            tokenize.tokenize(f.readline)
    except tokenize.TokenError:
        # This is due to a premature EOF, which we expect since we are
        # feeding in fragments of Python code.
        pass
    return tokens    
开发者ID:oneApple,项目名称:Phoenix,代码行数:35,代码来源:introspect.py


示例6: format

    def format(self, formatter, form):
        """ Parse and send the colored source.
        """
        # store line offsets in self.lines
        self.lines = [0, 0]
        pos = 0
        while 1:
            pos = string.find(self.raw, '\n', pos) + 1
            if not pos: break
            self.lines.append(pos)
        self.lines.append(len(self.raw))

        # parse the source and write it
        self.pos = 0
        text = cStringIO.StringIO(self.raw)
        self.out.write("""<?xml version="1.0" encoding="utf-8" ?>
        <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
        <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
        <head>
        <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
        <meta name="generator" content="Color.py" />
        <title>%s</title>
        <link rel="stylesheet" href="../doc.css" type="text/css" />
        </head>
        <body>
        <pre class="literal-block">""" % self.title)
        try:
            tokenize.tokenize(text.readline, self)
        except tokenize.TokenError, ex:
            msg = ex[0]
            line = ex[1][0]
            self.out.write("<h3>ERROR: %s</h3>%s\n" % (
                msg, self.raw[self.lines[line]:]))
开发者ID:auag92,项目名称:n2dm,代码行数:33,代码来源:Color.py


示例7: _read_block

def _read_block(input, startlineno):
    r"""Read an indented block of expressions

    startlineno is *zero* origined line number.

    pre::
        input.readline  # must have readline function

    Examples:
    #>>> _read_block(StringIO('\tfoo:\n'), 0)
    #0
    >>> _read_block(StringIO('\tpost[]: True\n'), 0)
    ('post', [], [('True', 1)], 1)
    >>> _read_block(StringIO('\tpre: 5 + 6 > 10\n'), 0)
    ('pre', [], [('5 + 6 > 10', 1)], 1)
    >>> _read_block(StringIO('\tpost:\n\t\t5 + 6 < 12\n\t\t2 + 2 == 4\n'), 0)
    ('post', [], [('5 + 6 < 12', 2), ('2 + 2 == 4', 3)], 3)
    >>> _read_block(StringIO('\tpost[foo.bar]: # changes\n' \
    ...                      '\t\tlen(foo.bar) > 0\n'), 0)
    ('post', [['foo', 'bar']], [('len ( foo . bar ) > 0', 2)], 2)

    Handles double colons (for re-structured text)::
    >>> _read_block(StringIO('\tpre:: 5 + 6 > 10\n'), 0)
    ('pre', [], [('5 + 6 > 10', 1)], 1)
    """
    t = tokenizer(input, startlineno)
    try:
        tokenize.tokenize(input.readline, t.next)
    except Done:
        pass
    input.seek(t.offset)
    return (t.keyword, t.decls, t.exprs, t.endlineno)
开发者ID:Bafou,项目名称:SVL,代码行数:32,代码来源:contract.py


示例8: py_strings

def py_strings(dir, domain="none", exclude=()):
    """Retrieve all Python messages from `dir` that are in the `domain`.
    """
    eater = TokenEater()
    make_escapes(0)
    for filename in find_files(
            # We want to include cpy and vpy scripts as well
            # dir, '*.py', exclude=('extract.py', 'pygettext.py')+tuple(exclude)):  # noqa
            dir,
            '*.*py',
            exclude=('extract.py', 'pygettext.py') + tuple(exclude)
        ):
        fp = codecs.open(filename, 'r', DEFAULT_CHARSET)
        try:
            eater.set_filename(filename)
            try:
                tokenize.tokenize(fp.readline, eater)
            except tokenize.TokenError, e:
                print >> sys.stderr, '%s: %s, line %d, column %d' % (
                    e[0], filename, e[1][0], e[1][1])
        finally:
            fp.close()
    # One limitation of the Python message extractor is that it cannot
    # determine the domain of the string, since it is not contained anywhere
    # directly. The only way this could be done is by loading the module and
    # inspect the '_' function. For now we simply assume that all the found
    # strings have the domain the user specified.
    return eater.getCatalog()
开发者ID:staeff,项目名称:i18ndude,代码行数:28,代码来源:extract.py


示例9: format

    def format(self, linenumber=True):
        """ Parse and send the colored source.
        """
        # store line offsets in self.lines
        self.lines = [0, 0]
        pos = 0
        while 1:
            pos = self.raw.find('\n', pos) + 1
            if not pos: break
            self.lines.append(pos)
        self.lines.append(len(self.raw))

        # write line numbers
        if linenumber:
            self.result.append('<table border="0"><tr><td align="right" valign="top">')
            self.result.append('<td align="right" valign="top"><pre><font face="Lucida,Courier New" color="%s">' % _colors[_TEXT])
            for idx in range(1, len(self.lines)-1):
                self.result.append('%3d \n' % idx)
            self.result.append('</font></pre></td><td valign="top">')

        # parse the source and write it
        self.pos = 0
        text = StringIO.StringIO(self.raw)
        self.result.append('<pre><font face="Lucida,Courier New">')
        try:
            tokenize.tokenize(text.readline, self)
        except tokenize.TokenError, ex:
            msg = ex[0]
            line = ex[1][0]
            self.result.append("<h3>ERROR: %s</h3>%s\n" % (
                msg, self.raw[self.lines[line]:]))
开发者ID:LaoMa3953,项目名称:ulipad,代码行数:31,代码来源:colourize.py


示例10: parse

    def parse(self, source):
        """ Parse and send the colored source.
        """
        self.source = string.expandtabs(source)
        self.tokenlist = []

        # store line offsets in self.offset
        self.offset = [0, 0]
        self.lines = 0
        pos = 0
        while pos < len(self.source):
            self.lines = self.lines + 1
            pos = string.find(self.source, '\n', pos) + 1
            if not pos: break
            self.offset.append(pos)
        self.offset.append(len(self.source))

        # parse the source
        self.pos = 0
        text = cStringIO.StringIO(self.source)
        try:
            tokenize.tokenize(text.readline, self)
        except tokenize.TokenError, ex:
            msg = ex[0]
            line = ex[1][0]
            raise ParseError("ERROR %s\n%s" % (
                msg, self.source[self.offset[line]:]))
开发者ID:nkzxw,项目名称:solrex,代码行数:27,代码来源:grok.py


示例11: format

    def format(self, formatter):
        """ Parse and send the colored source.
        """
        # store line offsets in self.lines
        self.lines = [0, 0]
        pos = 0
        while 1:
            try:
                pos = self.raw.index('\n', pos) + 1
            except ValueError:
                break
            self.lines.append(pos)
        self.lines.append(len(self.raw))

        self.result = [] # collects output

        self._code_id = hash_new('sha1', self.raw.encode(config.charset)).hexdigest()
        self.result.append(formatter.code_area(1, self._code_id, 'ColorizedPython', self.show_num, self.num_start, self.num_step))
        self.formatter = formatter
        self.result.append(formatter.code_line(1))
        #len('%d' % (len(self.lines)-1, )))

        # parse the source and write it
        self.pos = 0
        text = StringIO.StringIO(self.raw)
        try:
            tokenize.tokenize(text.readline, self)
        except IndentationError, ex:
            msg = ex[0]
            errmsg = (self.formatter.linebreak() +
                      self.formatter.strong(1) + "ERROR: %s" % msg + self.formatter.strong(0) +
                      self.formatter.linebreak())
            self.result.append(errmsg)
开发者ID:steveyen,项目名称:moingo,代码行数:33,代码来源:text_python.py


示例12: colorize

    def colorize(self):
        """
        Return an HTML string that renders the source code for the
        module that was specified in the constructor.
        """
        # Initialize all our state variables
        self.pos = 0
        self.cur_line = []
        self.context = []
        self.indents = []
        self.lineno = 1
        self.def_name = None

        # Load the module's text.
        self.text = open(self.module_filename).read()
        self.text = self.text.expandtabs().rstrip()+'\n'

        # Construct the line_offsets table.
        self.find_line_offsets()

        num_lines = self.text.count('\n')+1
        self.linenum_size = len(`num_lines+1`)
        
        # Call the tokenizer, and send tokens to our `tokeneater()`
        # method.  If anything goes wrong, then fall-back to using
        # the input text as-is (with no colorization).
        try:
            output = StringIO()
            self.out = output.write
            tokenize.tokenize(StringIO(self.text).readline, self.tokeneater)
            html = output.getvalue()
        except tokenize.TokenError, ex:
            html = self.text
开发者ID:Angeleena,项目名称:selenium,代码行数:33,代码来源:html_colorize.py


示例13: stringioize

 def stringioize(self, string):
     """(internal)
     
     the following is really just a stupid hack to emulate the quirky
     behavior of the string tokenizer in java; it is a historical artifact
     that just isn't badly broken enough to require being removed yet.
     """
     self.tokens = []
     self._neg = None
     fd = StringIO.StringIO(string)
     tokenize.tokenize(fd.readline,self.eat)
     self.reset()
     sn = self.next()
     try:
         while sn.ttype != tokenize.ERRORTOKEN:
             sn = self.next()
         # this is the best part.  It works completely by accident.
         # After 3 tries, you end up with a """ on the end of your
         # string, which is a multi-line string -- the tokenizer
         # will throw an exception for that (god knows why it
         # doesn't throw an exception for an EOF in a single-line
         # string...)
         self.stringioize(string+'"')
     except:
         pass
         # import traceback
         # traceback.print_exc()
     self.reset()
开发者ID:lhl,项目名称:songclub,代码行数:28,代码来源:sentence.py


示例14: findUsages

def findUsages():
    global directory, objMap, sharedFolders
    suffixes = (".py", ".csv", ".tsv")
    directories = [directory]
    # avoid folders that will be processed anyhow
    for shared in sharedFolders:
        skip = False
        tmpS = shared + "/"
        for folder in directories:
            tmpD = folder + "/"
            if platform.system() in ('Microsoft', 'Windows'):
                tmpS = tmpS.lower()
                tmpD = tmpD.lower()
            if tmpS.startswith(tmpD):
                skip = True
                break
        if not skip:
            directories.append(shared)

    for directory in directories:
        for root, dirnames, filenames in os.walk(directory):
            for filename in filter(lambda x: x.endswith(suffixes), filenames):
                currentFile = open(os.path.join(root, filename))
                if filename.endswith(".py"):
                    tokenize.tokenize(currentFile.readline, handle_token)
                elif filename.endswith(".csv"):
                    handleDataFiles(currentFile, ",")
                elif filename.endswith(".tsv"):
                    handleDataFiles(currentFile, "\t")
                currentFile.close()
    currentFile = open(objMap)
    tokenize.tokenize(currentFile.readline, handle_token)
    currentFile.close()
开发者ID:kai66673,项目名称:qt-creator,代码行数:33,代码来源:findUnusedObjects.py


示例15: check_roundtrip

    def check_roundtrip(self, f):
        """
        Test roundtrip for `untokenize`. `f` is an open file or a string.
        The source code in f is tokenized to both 5- and 2-tuples.
        Both sequences are converted back to source code via
        tokenize.untokenize(), and the latter tokenized again to 2-tuples.
        The test fails if the 3 pair tokenizations do not match.

        When untokenize bugs are fixed, untokenize with 5-tuples should
        reproduce code that does not contain a backslash continuation
        following spaces.  A proper test should test this.
        """
        # Get source code and original tokenizations
        if isinstance(f, str):
            code = f.encode('utf-8')
        else:
            code = f.read()
            f.close()
        readline = iter(code.splitlines(keepends=True)).__next__
        tokens5 = list(tokenize(readline))
        tokens2 = [tok[:2] for tok in tokens5]
        # Reproduce tokens2 from pairs
        bytes_from2 = untokenize(tokens2)
        readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__
        tokens2_from2 = [tok[:2] for tok in tokenize(readline2)]
        self.assertEqual(tokens2_from2, tokens2)
        # Reproduce tokens2 from 5-tuples
        bytes_from5 = untokenize(tokens5)
        readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__
        tokens2_from5 = [tok[:2] for tok in tokenize(readline5)]
        self.assertEqual(tokens2_from5, tokens2)
开发者ID:chidea,项目名称:GoPythonDLLWrapper,代码行数:31,代码来源:test_tokenize.py


示例16: filter

def filter(filename):
    global name, module_has_docstring

    path,name = os.path.split(filename)
    root,ext  = os.path.splitext(name)

    output("namespace "+root+" {\n",(0,0))

    # set module name for tok_eater to use if there's a module doc string
    name = root

    sys.stderr.write('Filtering "'+filename+'"...')
    f = open(filename)
    tokenize.tokenize(f.readline, tok_eater)
    f.close()
    print_comment((0,0))

    output("\n",(0,0))
    output("}  // end of namespace\n",(0,0))

    if not module_has_docstring:
        # Put in default namespace documentation
        output('/** \\namespace '+root+' \n',(0,0))
        output('    \\brief Module "%s" */\n'%(root),(0,0))

    for s in outbuffer:
        outfile.write(s)
开发者ID:JosephBoyd,项目名称:pylinda,代码行数:27,代码来源:pythfilter.py


示例17: findMultiLineQuote

 def findMultiLineQuote(s):
   quotelist = []
   def eatToken(type, string, begin, end, _, quotelist=quotelist):
     if type == token.STRING and RE_MULTI_LINE_QUOTE_BEGIN.match(string):
       quotelist.append((string, begin,end))
   tokenize.tokenize(StringIO(s).readline, eatToken)
   return quotelist
开发者ID:derobert,项目名称:debianlink-xbmc,代码行数:7,代码来源:spyceCompile.py


示例18: __call__

 def __call__(self):
     """ Parse and send the colored source.
     """
     # store line offsets in self.lines
     self.lines = [0, 0]
     pos = 0
     while True:
         pos = self.raw.find(b'\n', pos) + 1
         if not pos:
             break
         self.lines.append(pos)
     self.lines.append(len(self.raw))
     # parse the source and write it
     self.pos = 0
     text = BytesIO(self.raw)
     self.out.write(b'<pre class="python">\n')
     try:
         if six.PY2:
             tokenize.tokenize(text.readline, self.format_tokenizer)
         else:
             for args in tokenize.tokenize(text.readline):
                 self.format_tokenizer(*args)
     except tokenize.TokenError as ex:
         msg = ex.args[0]
         line = ex.args[1][0]
         self.out.write(b"<h5 class='error>'ERROR: %s%s</h5>" % (
             msg, self.raw[self.lines[line]:]))
     self.out.write(b'\n</pre>\n')
     return safe_nativestring(self.out.getvalue())
开发者ID:plone,项目名称:Products.PortalTransforms,代码行数:29,代码来源:python.py


示例19: format

    def format(self, formatter, form):
        ''' Parse and send the colored source.
        '''
        # Store line offsets in self.lines
        self.lines = [0, 0]
        pos = 0

        # Gather lines
        while 1:
            pos = string.find(self.raw, '\n', pos) + 1
            if not pos: break
            self.lines.append(pos)
        self.lines.append(len(self.raw))

        # Wrap text in a filelike object
        self.pos = 0
        text = cStringIO.StringIO(self.raw)

        # Html start
        self.doPageStart()

        # Parse the source.
        ## Tokenize calls the __call__ 
        ## function for each token till done.
        try:
            tokenize.tokenize(text.readline, self)
        except tokenize.TokenError, ex:
            msg = ex[0]
            line = ex[1][0]
            self.out.write("<h3>ERROR: %s</h3>%s\n" % (
                msg, self.raw[self.lines[line]:]))
开发者ID:FreakTheMighty,项目名称:pyjamas,代码行数:31,代码来源:pyColourize.py


示例20: format

    def format(self, filename):
        global HEADER
        # store line offsets in self.lines
        self.lines = [0, 0]
        pos = 0
        while 1:
           pos = string.find(self.raw, '\n', pos) + 1
           if not pos: break
           self.lines.append(pos)
        self.lines.append(len(self.raw))

        # parse the source and write it
        self.pos = 0
        text = cStringIO.StringIO(self.raw)
        HEADER = HEADER.replace("$FILE", filename)
        if LOCAL_CONVERT:
           HEADER = HEADER.replace("$HIDE_INFO", "display: none;")
        self.out.write(HEADER)
        try:
           tokenize.tokenize(text.readline, self)
        except tokenize.TokenError, ex:
           msg = ex[0]
           line = ex[1][0]
           self.out.write("<h3>ERROR: %s</h3>%s\n" % (
              msg, self.raw[self.lines[line]:]))
           self.out.write('</font></pre>')
开发者ID:AndroidMarv,项目名称:sikuli,代码行数:26,代码来源:sikuli2html.py



注:本文中的tokenize.tokenize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tokenize.untokenize函数代码示例发布时间:2022-05-27
下一篇:
Python tokenize.open函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap