• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tokenize.open函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tokenize.open函数的典型用法代码示例。如果您正苦于以下问题:Python open函数的具体用法?Python open怎么用?Python open使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了open函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: convert_dataset_metadata

def convert_dataset_metadata(in_dir, out_dir):
    meta_dict = OrderedDict()
    meta_dict["BIDSVersion"] = "1.0.0"
    
    study_key_file = os.path.join(in_dir, "study_key.txt")
    if os.path.exists(study_key_file):
        meta_dict["Name"] = tokenize.open(study_key_file).read().strip()
    else:
        if in_dir.endswith(os.sep):
            meta_dict["Name"] = in_dir.split(os.sep)[-1]
        else:
            meta_dict["Name"] = in_dir.split(os.sep)[-2]
        
    ref_file = os.path.join(in_dir, "references.txt")
    if os.path.exists(ref_file):
        meta_dict["ReferencesAndLinks"] = tokenize.open(ref_file).read().strip()
        
    lic_file = os.path.join(in_dir, "license.txt")
    if os.path.exists(lic_file):
        meta_dict["License"] = tokenize.open(lic_file).read().strip()
        
    json.dump(meta_dict, open(os.path.join(out_dir,
                                           "dataset_description.json"), "w"),
                  sort_keys=True, indent=4, separators=(',', ': '))
              
    readme = os.path.join(in_dir, "README")
    if os.path.exists(readme):
        shutil.copy(readme, os.path.join(out_dir,"README"))
    elif os.path.exists(readme + ".txt"):
        shutil.copy(readme + ".txt", os.path.join(out_dir,"README"))
开发者ID:chrisfilo,项目名称:openfmri2bids,代码行数:30,代码来源:converter.py


示例2: investigate_pep8_status

    def investigate_pep8_status(self, filename):
        sys.stdout.write("%s: " % (filename,))
        sys.stdout.flush()
        with tokenize.open(filename) as i:
            source = i.read()
        if not is_valid_source(source):
            return
        errors = self.errors_in_source(source)

        if errors:
            print(', '.join(errors))
        else:
            print('clean')
            return

        changed = True
        while changed:
            changed = False
            for error, source in list(self.best_examples.items()):
                self.note_source(source)
                target = self.example_file_for_error(error)
                if os.path.exists(target):
                    existing_length = len(tokenize.open(target).read())
                    if existing_length <= len(source):
                        continue
                    else:
                        print((
                            "A smaller example for %s (%d < %d). Simplifying "
                            "an example from %s"
                        ) % (
                            error,
                            len(source), existing_length,
                            self.trash_file(source)))

                else:

                    print(
                        '%s is new. Simplifying an example from %s' % (
                            error, self.trash_file(source)))

                changed = True
                example = self.find_minimal_example_from_source(
                    source,
                    is_example=lambda source:
                        error in self.errors_in_source(
                            source),
                )
                assert len(example) <= len(source)
                with open(target, 'w') as o:
                    o.write(example)
开发者ID:patrickod,项目名称:peppy,代码行数:50,代码来源:__main__.py


示例3: main

def main():
    def make_callback(text):
        return count_calls_decorator(
            lambda file_, start, stop: log(text, file_, start, stop)
        )

    nci_callback = make_callback('None-coalescing `if` block')
    nco_callback = make_callback('[Possible] None-coalescing `or`')
    nct_callback = make_callback('None-coalescing ternary')
    sna_callback = make_callback('Safe navigation `and`')
    sni_callback = make_callback('Safe navigation `if` block')
    snt_callback = make_callback('Safe navigation ternary')

    files = sys.argv[1:]
    if files:
        expanded_files = []
        for file_ in files:
            if '*' in file_:
                expanded_files.extend(glob.glob(file_))
            else:
                expanded_files.append(file_)
    else:
        files = glob.glob(os.path.join(sys.prefix, 'Lib', '**', '*.py'))

    for file_ in files:
        try:
            source = tokenize.open(file_)
        except (SyntaxError, UnicodeDecodeError):
            continue

        with source:
            try:
                tree = ast.parse(source.read(), filename=file_)
            except SyntaxError:
                continue

            NoneCoalesceIfBlockVisitor(file_, nci_callback).visit(tree)
            NoneCoalesceOrVisitor(file_, nco_callback).visit(tree)
            NoneCoalesceTernaryVisitor(file_, nct_callback).visit(tree)
            SafeNavAndVisitor(file_, sna_callback).visit(tree)
            SafeNavIfBlockVisitor(file_, sni_callback).visit(tree)
            SafeNavTernaryVisitor(file_, snt_callback).visit(tree)

    print('Total None-coalescing `if` blocks: {}'
          .format(get_call_count(nci_callback)))

    print('Total [possible] None-coalescing `or`: {}'
          .format(get_call_count(nco_callback)))

    print('Total None-coalescing ternaries: {}'
          .format(get_call_count(nct_callback)))

    print('Total Safe navigation `and`: {}'
          .format(get_call_count(sna_callback)))

    print('Total Safe navigation `if` blocks: {}'
          .format(get_call_count(sni_callback)))

    print('Total Safe navigation ternaries: {}'
          .format(get_call_count(snt_callback)))
开发者ID:emilyemorehouse,项目名称:peps,代码行数:60,代码来源:find-pep505.py


示例4: patch

    def patch(self, filename):
        self.current_file = filename

        with tokenize.open(filename) as fp:
            content = fp.read()

        old_content = content
        for operation in self.operations:
            content = operation.patch(content)

        if content == old_content:
            # no change
            self.check(content)
            if self.options.to_stdout:
                self.write_stdout(content)
            return False

        with open(filename, "rb") as fp:
            encoding, _ = tokenize.detect_encoding(fp.readline)

        if not self.options.quiet:
            print("Patch %s" % filename)
        if not self.options.to_stdout:
            with open(filename, "w", encoding=encoding) as fp:
                fp.write(content)
        else:
            self.write_stdout(content)
        self.check(content)
        return True
开发者ID:mscuthbert,项目名称:sixer,代码行数:29,代码来源:sixer.py


示例5: read_py_file

def read_py_file(filepath):
    if sys.version_info < (3, ):
        return open(filepath, 'rU').read()
    else:
        # see https://docs.python.org/3/library/tokenize.html#tokenize.detect_encoding
        # first just see if the file is properly encoded
        try:
            with open(filepath, 'rb') as f:
                tokenize.detect_encoding(f.readline)
        except SyntaxError as err:
            # this warning is issued:
            #   (1) in badly authored files (contains non-utf8 in a comment line)
            #   (2) a coding is specified, but wrong and
            #   (3) no coding is specified, and the default
            #       'utf8' fails to decode.
            #   (4) the encoding specified by a pep263 declaration did not match
            #       with the encoding detected by inspecting the BOM
            raise CouldNotHandleEncoding(filepath, err)

        try:
            return tokenize.open(filepath).read()
            # this warning is issued:
            #   (1) if uft-8 is specified, but latin1 is used with something like \x0e9 appearing
            #       (see http://stackoverflow.com/a/5552623)
        except UnicodeDecodeError as err:
            raise CouldNotHandleEncoding(filepath, err)
开发者ID:landscapeio,项目名称:prospector,代码行数:26,代码来源:encoding.py


示例6: main

def main():
    if len(sys.argv) > 1 and sys.argv[1] == "+diag":
        del sys.argv[1]
        diag = True
    else:
        diag = False

    if len(sys.argv) > 1 and sys.argv[1] == "+compile":
        del sys.argv[1]
        compile_only = True
    else:
        compile_only = False

    ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
    dmgr = DeviceManager(DeviceDB(ddb_path))

    with tokenize.open(sys.argv[1]) as f:
        testcase_code = compile(f.read(), f.name, "exec")
        testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr}
        exec(testcase_code, testcase_vars)

    try:
        core = dmgr.get("core")
        if compile_only:
            core.compile(testcase_vars["entrypoint"], (), {})
        else:
            core.run(testcase_vars["entrypoint"], (), {})
    except CompileError as error:
        if not diag:
            exit(1)
开发者ID:m-labs,项目名称:artiq,代码行数:30,代码来源:embedding.py


示例7: _verify_pre_check

def _verify_pre_check(filepath):
    """Check student code for certain issues."""
    # Make sure the program doesn't crash for students.
    # Could use some improvement for better logging and error reporting.
    try:
        # Check for inline "pylint:" comment, which may indicate a student
        # trying to disable a check.
        with tokenize.open(os.path.expanduser(filepath)) as f:
            for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
                if tok_type != tokenize.COMMENT:
                    continue
                match = pylint.utils.OPTION_RGX.search(content)
                if match is not None:
                    print('ERROR: string "pylint:" found in comment. ' +
                          'No check run on file `{}`\n'.format(filepath))
                    return False
    except IndentationError as e:
        print('ERROR: python_ta could not check your code due to an ' +
              'indentation error at line {}'.format(e.lineno))
        return False
    except tokenize.TokenError as e:
        print('ERROR: python_ta could not check your code due to a ' +
              'syntax error in your file')
        return False
    return True
开发者ID:nigef,项目名称:pyta,代码行数:25,代码来源:__init__.py


示例8: check

def check(file):
    """check(file_or_dir)

    If file_or_dir is a directory and not a symbolic link, then recursively
    descend the directory tree named by file_or_dir, checking all .py files
    along the way. If file_or_dir is an ordinary Python source file, it is
    checked for whitespace related problems. The diagnostic messages are
    written to standard output using the print statement.
    """

    if os.path.isdir(file) and not os.path.islink(file):
        if verbose:
            print("%r: listing directory" % (file,))
        names = os.listdir(file)
        for name in names:
            fullname = os.path.join(file, name)
            if (os.path.isdir(fullname) and
                not os.path.islink(fullname) or
                os.path.normcase(name[-3:]) == ".py"):
                check(fullname)
        return

    try:
        f = tokenize.open(file)
    except IOError as msg:
        errprint("%r: I/O Error: %s" % (file, msg))
        return

    if verbose > 1:
        print("checking %r ..." % file)

    try:
        process_tokens(tokenize.generate_tokens(f.readline))

    except tokenize.TokenError as msg:
        errprint("%r: Token Error: %s" % (file, msg))
        return

    except IndentationError as msg:
        errprint("%r: Indentation Error: %s" % (file, msg))
        return

    except NannyNag as nag:
        badline = nag.get_lineno()
        line = nag.get_line()
        if verbose:
            print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
            print("offending line: %r" % (line,))
            print(nag.get_msg())
        else:
            if ' ' in file: file = '"' + file + '"'
            if filename_only: print(file)
            else: print(file, badline, repr(line))
        return

    finally:
        f.close()

    if verbose:
        print("%r: Clean bill of health." % (file,))
开发者ID:7modelsan,项目名称:kbengine,代码行数:60,代码来源:tabnanny.py


示例9: _open

 def _open(self, filename):
     if filename.endswith('.py') and hasattr(tokenize, 'open'):
         # On Python 3.2 and newer, open Python files with tokenize.open().
         # This functions uses the encoding cookie to get the encoding.
         return tokenize.open(filename)
     else:
         return open(filename)
开发者ID:andreesg,项目名称:collective.article,代码行数:7,代码来源:grep.py


示例10: on_file

    def on_file(self, tree: MypyFile, type_map: Dict[Expression, Type]) -> None:
        self.last_xml = None
        path = os.path.relpath(tree.path)
        if stats.is_special_module(path):
            return
        if path.startswith('..'):
            return
        if 'stubs' in path.split('/'):
            return

        visitor = stats.StatisticsVisitor(inferred=True, typemap=type_map, all_nodes=True)
        tree.accept(visitor)

        root = etree.Element('mypy-report-file', name=path, module=tree._fullname)
        doc = etree.ElementTree(root)
        file_info = FileInfo(path, tree._fullname)

        with tokenize.open(path) as input_file:
            for lineno, line_text in enumerate(input_file, 1):
                status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
                file_info.counts[status] += 1
                etree.SubElement(root, 'line',
                                 number=str(lineno),
                                 precision=stats.precision_names[status],
                                 content=line_text[:-1])
        # Assumes a layout similar to what XmlReporter uses.
        xslt_path = os.path.relpath('mypy-html.xslt', path)
        transform_pi = etree.ProcessingInstruction('xml-stylesheet',
                'type="text/xsl" href="%s"' % cgi.escape(xslt_path, True))
        root.addprevious(transform_pi)
        self.schema.assertValid(doc)

        self.last_xml = doc
        self.files.append(file_info)
开发者ID:alexandrul,项目名称:mypy,代码行数:34,代码来源:report.py


示例11: updatecache

def updatecache(filename, module_globals=None):
    """Update a cache entry and return its list of lines.
    If something's wrong, print a message, discard the cache entry,
    and return an empty list."""

    if filename in cache:
        if len(cache[filename]) != 1:
            del cache[filename]
    if not filename or (filename.startswith("<") and filename.endswith(">")):
        return []

    fullname = filename
    try:
        stat = os.stat(fullname)
    except OSError:
        basename = filename

        # Realise a lazy loader based lookup if there is one
        # otherwise try to lookup right now.
        if lazycache(filename, module_globals):
            try:
                data = cache[filename][0]()
            except (ImportError, OSError):
                pass
            else:
                if data is None:
                    # No luck, the PEP302 loader cannot find the source
                    # for this module.
                    return []
                cache[filename] = (len(data), None, [line + "\n" for line in data.splitlines()], fullname)
                return cache[filename][2]

        # Try looking through the module search path, which is only useful
        # when handling a relative filename.
        if os.path.isabs(filename):
            return []

        for dirname in sys.path:
            try:
                fullname = os.path.join(dirname, basename)
            except (TypeError, AttributeError):
                # Not sufficiently string-like to do anything useful with.
                continue
            try:
                stat = os.stat(fullname)
                break
            except OSError:
                pass
        else:
            return []
    try:
        with tokenize.open(fullname) as fp:
            lines = fp.readlines()
    except OSError:
        return []
    if lines and not lines[-1].endswith("\n"):
        lines[-1] += "\n"
    size, mtime = stat.st_size, stat.st_mtime
    cache[filename] = size, mtime, lines, fullname
    return lines
开发者ID:ClayMason,项目名称:BlackrockFBP,代码行数:60,代码来源:linecache.py


示例12: check_spelling

def check_spelling():
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {'[Bb]ehaviour', '[Qq]uitted', 'Ll]ikelyhood', '[Ss]ucessfully',
             '[Oo]ccur[^r .]', '[Ss]eperator', '[Ee]xplicitely', '[Rr]esetted',
             '[Aa]uxillary', '[Aa]ccidentaly', '[Aa]mbigious', '[Ll]oosly',
             '[Ii]nitialis', '[Cc]onvienence', '[Ss]imiliar', '[Uu]ncommited',
             '[Rr]eproducable'}

    # Words which look better when splitted, but might need some fine tuning.
    words |= {'[Kk]eystrings', '[Ww]ebelements', '[Mm]ouseevent',
              '[Kk]eysequence', '[Nn]ormalmode', '[Ee]ventloops',
              '[Ss]izehint', '[Ss]tatemachine', '[Mm]etaobject',
              '[Ll]ogrecord', '[Ff]iletype'}

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _py_files():
            with tokenize.open(fn) as f:
                if fn == os.path.join('.', 'scripts', 'misc_checks.py'):
                    continue
                for line in f:
                    for w in words:
                        if re.search(w, line) and fn not in seen[w]:
                            print('Found "{}" in {}!'.format(w, fn))
                            seen[w].append(fn)
                            ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
开发者ID:B0073D,项目名称:qutebrowser,代码行数:33,代码来源:misc_checks.py


示例13: check_spelling

def check_spelling(target):
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {'behaviour', 'quitted', 'likelyhood', 'sucessfully',
             'occur[^r .]', 'seperator', 'explicitely', 'resetted',
             'auxillary', 'accidentaly', 'ambigious', 'loosly',
             'initialis', 'convienence', 'similiar', 'uncommited',
             'reproducable'}

    # Words which look better when splitted, but might need some fine tuning.
    words |= {'keystrings', 'webelements', 'mouseevent', 'keysequence',
              'normalmode', 'eventloops', 'sizehint', 'statemachine',
              'metaobject', 'logrecord', 'monkeypatch', 'filetype'}

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _py_files(target):
            with tokenize.open(fn) as f:
                if fn == os.path.join('scripts', 'misc_checks.py'):
                    continue
                for line in f:
                    for w in words:
                        if re.search(w, line) and fn not in seen[w]:
                            print("Found '{}' in {}!".format(w, fn))
                            seen[w].append(fn)
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
开发者ID:JIVS,项目名称:qutebrowser,代码行数:31,代码来源:misc_checks.py


示例14: on_file

    def on_file(self,
                tree: MypyFile,
                type_map: Dict[Expression, Type],
                options: Options) -> None:
        path = os.path.relpath(tree.path)
        visitor = stats.StatisticsVisitor(inferred=True, filename=tree.fullname(),
                                          typemap=type_map, all_nodes=True)
        tree.accept(visitor)

        class_name = os.path.basename(path)
        file_info = FileInfo(path, tree._fullname)
        class_element = etree.Element('class',
                                      filename=path,
                                      complexity='1.0',
                                      name=class_name)
        etree.SubElement(class_element, 'methods')
        lines_element = etree.SubElement(class_element, 'lines')

        with tokenize.open(path) as input_file:
            class_lines_covered = 0
            class_total_lines = 0
            for lineno, _ in enumerate(input_file, 1):
                status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
                hits = 0
                branch = False
                if status == stats.TYPE_EMPTY:
                    continue
                class_total_lines += 1
                if status != stats.TYPE_ANY:
                    class_lines_covered += 1
                    hits = 1
                if status == stats.TYPE_IMPRECISE:
                    branch = True
                file_info.counts[status] += 1
                line_element = etree.SubElement(lines_element, 'line',
                                                number=str(lineno),
                                                precision=stats.precision_names[status],
                                                hits=str(hits),
                                                branch=str(branch).lower())
                if branch:
                    line_element.attrib['condition-coverage'] = '50% (1/2)'
            class_element.attrib['branch-rate'] = '0'
            class_element.attrib['line-rate'] = get_line_rate(class_lines_covered,
                                                              class_total_lines)
            # parent_module is set to whichever module contains this file.  For most files, we want
            # to simply strip the last element off of the module.  But for __init__.py files,
            # the module == the parent module.
            parent_module = file_info.module.rsplit('.', 1)[0]
            if file_info.name.endswith('__init__.py'):
                parent_module = file_info.module

            if parent_module not in self.root_package.packages:
                self.root_package.packages[parent_module] = CoberturaPackage(parent_module)
            current_package = self.root_package.packages[parent_module]
            packages_to_update = [self.root_package, current_package]
            for package in packages_to_update:
                package.total_lines += class_total_lines
                package.covered_lines += class_lines_covered
            current_package.classes[class_name] = class_element
开发者ID:chadrik,项目名称:mypy,代码行数:59,代码来源:report.py


示例15: test_getline

    def test_getline(self):
        with tokenize.open(self.file_name) as fp:
            for index, line in enumerate(fp):
                if not line.endswith('\n'):
                    line += '\n'

                cached_line = linecache.getline(self.file_name, index + 1)
                self.assertEqual(line, cached_line)
开发者ID:1st1,项目名称:cpython,代码行数:8,代码来源:test_linecache.py


示例16: build_from_file

 def build_from_file(self, filename):
     self.filename = filename
     modtxt = ""
     with tokenize.open(filename) as f:
         modtxt = f.read()
     self.source = modtxt
     modast = ast.parse(modtxt, mode="exec")
     self.build_from_ast(modast)
开发者ID:fredokun,项目名称:MrPython,代码行数:8,代码来源:prog_ast.py


示例17: main

def main():
    if not len(sys.argv) == 2:
        print("Expected exactly one module filename", file=sys.stderr)
        exit(1)

    def process_diagnostic(diag):
        print("\n".join(diag.render()), file=sys.stderr)
        if diag.level in ("fatal", "error"):
            exit(1)

    engine = diagnostic.Engine()
    engine.process = process_diagnostic

    with tokenize.open(sys.argv[1]) as f:
        testcase_code = compile(f.read(), f.name, "exec")
        testcase_vars = {'__name__': 'testbench'}
        exec(testcase_code, testcase_vars)

    device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
    device_mgr = DeviceManager(DeviceDB(device_db_path))

    dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
    dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))

    argument_mgr = ProcessArgumentManager({})

    def embed():
        experiment = testcase_vars["Benchmark"]((device_mgr, dataset_mgr, argument_mgr))

        stitcher = Stitcher(core=experiment.core, dmgr=device_mgr)
        stitcher.stitch_call(experiment.run, (), {})
        stitcher.finalize()
        return stitcher

    stitcher = embed()
    module = Module(stitcher)
    target = OR1KTarget()
    llvm_ir = target.compile(module)
    elf_obj = target.assemble(llvm_ir)
    elf_shlib = target.link([elf_obj])

    benchmark(lambda: embed(),
              "ARTIQ embedding")

    benchmark(lambda: Module(stitcher),
              "ARTIQ transforms and validators")

    benchmark(lambda: target.compile(module),
              "LLVM optimizations")

    benchmark(lambda: target.assemble(llvm_ir),
              "LLVM machine code emission")

    benchmark(lambda: target.link([elf_obj]),
              "Linking")

    benchmark(lambda: target.strip(elf_shlib),
              "Stripping debug information")
开发者ID:m-labs,项目名称:artiq,代码行数:58,代码来源:perf_embedding.py


示例18: check_spelling

def check_spelling():
    """Check commonly misspelled words."""
    # Words which I often misspell
    words = {
        "[Bb]ehaviour",
        "[Qq]uitted",
        "Ll]ikelyhood",
        "[Ss]ucessfully",
        "[Oo]ccur[^r .]",
        "[Ss]eperator",
        "[Ee]xplicitely",
        "[Rr]esetted",
        "[Aa]uxillary",
        "[Aa]ccidentaly",
        "[Aa]mbigious",
        "[Ll]oosly",
        "[Ii]nitialis",
        "[Cc]onvienence",
        "[Ss]imiliar",
        "[Uu]ncommited",
        "[Rr]eproducable",
        "[Aa]n [Uu]ser",
    }

    # Words which look better when splitted, but might need some fine tuning.
    words |= {
        "[Ww]ebelements",
        "[Mm]ouseevent",
        "[Kk]eysequence",
        "[Nn]ormalmode",
        "[Ee]ventloops",
        "[Ss]izehint",
        "[Ss]tatemachine",
        "[Mm]etaobject",
        "[Ll]ogrecord",
        "[Ff]iletype",
    }

    seen = collections.defaultdict(list)
    try:
        ok = True
        for fn in _get_files():
            with tokenize.open(fn) as f:
                if fn == os.path.join(".", "scripts", "dev", "misc_checks.py"):
                    continue
                for line in f:
                    for w in words:
                        if re.search(w, line) and fn not in seen[w]:
                            print('Found "{}" in {}!'.format(w, fn))
                            seen[w].append(fn)
                            ok = False
        print()
        return ok
    except Exception:
        traceback.print_exc()
        return None
开发者ID:r8b7xy,项目名称:qutebrowser,代码行数:56,代码来源:misc_checks.py


示例19: check

def check(filenames, select=None, ignore=None, ignore_decorators=None):
    """Generate docstring errors that exist in `filenames` iterable.

    By default, the PEP-257 convention is checked. To specifically define the
    set of error codes to check for, supply either `select` or `ignore` (but
    not both). In either case, the parameter should be a collection of error
    code strings, e.g., {'D100', 'D404'}.

    When supplying `select`, only specified error codes will be reported.
    When supplying `ignore`, all error codes which were not specified will be
    reported.

    Note that ignored error code refer to the entire set of possible
    error codes, which is larger than just the PEP-257 convention. To your
    convenience, you may use `pydocstyle.violations.conventions.pep257` as
    a base set to add or remove errors from.

    Examples
    ---------
    >>> check(['pydocstyle.py'])
    <generator object check at 0x...>

    >>> check(['pydocstyle.py'], select=['D100'])
    <generator object check at 0x...>

    >>> check(['pydocstyle.py'], ignore=conventions.pep257 - {'D100'})
    <generator object check at 0x...>

    """
    if select is not None and ignore is not None:
        raise IllegalConfiguration('Cannot pass both select and ignore. '
                                   'They are mutually exclusive.')
    elif select is not None:
        checked_codes = select
    elif ignore is not None:
        checked_codes = list(set(violations.ErrorRegistry.get_error_codes()) -
                             set(ignore))
    else:
        checked_codes = violations.conventions.pep257

    for filename in filenames:
        log.info('Checking file %s.', filename)
        try:
            with tk.open(filename) as file:
                source = file.read()
            for error in ConventionChecker().check_source(source, filename,
                                                          ignore_decorators):
                code = getattr(error, 'code', None)
                if code in checked_codes:
                    yield error
        except (EnvironmentError, AllError, ParseError) as error:
            log.warning('Error in file %s: %s', filename, error)
            yield error
        except tk.TokenError:
            yield SyntaxError('invalid syntax in file %s' % filename)
开发者ID:PyCQA,项目名称:pydocstyle,代码行数:55,代码来源:checker.py


示例20: _find_executable_linenos

def _find_executable_linenos(filename):
    try:
        with tokenize.open(filename) as f:
            prog = f.read()
            encoding = f.encoding
    except IOError as err:
        print('Not printing coverage data for %r: %s' % (filename, err), file=sys.stderr)
        return {}
    code = compile(prog, filename, 'exec')
    strs = _find_strings(filename, encoding)
    return _find_lines(code, strs)
开发者ID:johndpope,项目名称:sims4-ai-engine,代码行数:11,代码来源:trace.py



注:本文中的tokenize.open函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tokenize.tokenize函数代码示例发布时间:2022-05-27
下一篇:
Python tokenize.generate_tokens函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap