• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python urllib2._urlopen函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中urllib2._urlopen函数的典型用法代码示例。如果您正苦于以下问题:Python _urlopen函数的具体用法?Python _urlopen怎么用?Python _urlopen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了_urlopen函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_all_obsolete

    def get_all_obsolete(self):
        """Returns a list of all obsolete entries ever in the PDB.

        Returns a list of all obsolete pdb codes that have ever been
        in the PDB.

        Gets and parses the file from the PDB server in the format
        (the first pdb_code column is the one used). The file looks
        like this:

         LIST OF OBSOLETE COORDINATE ENTRIES AND SUCCESSORS
        OBSLTE    31-JUL-94 116L     216L
        ...
        OBSLTE    29-JAN-96 1HFT     2HFT
        OBSLTE    21-SEP-06 1HFV     2J5X
        OBSLTE    21-NOV-03 1HG6
        OBSLTE    18-JUL-84 1HHB     2HHB 3HHB
        OBSLTE    08-NOV-96 1HID     2HID
        OBSLTE    01-APR-97 1HIU     2HIU
        OBSLTE    14-JAN-04 1HKE     1UUZ
        ...

        """
        url = self.pdb_server + '/pub/pdb/data/status/obsolete.dat'
        with contextlib.closing(_urlopen(url)) as handle:
            # Extract pdb codes. Could use a list comprehension, but I want
            # to include an assert to check for mis-reading the data.
            obsolete = []
            for line in handle:
                if not line.startswith("OBSLTE "):
                    continue
                pdb = line.split()[2]
                assert len(pdb) == 4
                obsolete.append(pdb)
        return obsolete
开发者ID:Dologan,项目名称:biopython,代码行数:35,代码来源:PDBList.py


示例2: get_filepath_or_buffer

def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
    """
    If the filepath_or_buffer is a url, translate and return the buffer
    passthru otherwise.

    Parameters
    ----------
    filepath_or_buffer : a url, filepath, or buffer
    encoding : the encoding to use to decode py3 bytes, default is 'utf-8'

    Returns
    -------
    a filepath_or_buffer, the encoding
    """

    if _is_url(filepath_or_buffer):
        req = _urlopen(str(filepath_or_buffer))
        return maybe_read_encoded_stream(req,encoding)

    if _is_s3_url(filepath_or_buffer):
        try:
            import boto
        except:
            raise ImportError("boto is required to handle s3 files")
        # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
        # are environment variables
        parsed_url = parse_url(filepath_or_buffer)
        conn = boto.connect_s3()
        b = conn.get_bucket(parsed_url.netloc)
        k = boto.s3.key.Key(b)
        k.key = parsed_url.path
        filepath_or_buffer = StringIO(k.get_contents_as_string())
        return filepath_or_buffer, None

    return filepath_or_buffer, None
开发者ID:Al-Harazmi,项目名称:pandas,代码行数:35,代码来源:common.py


示例3: get_recent_changes

    def get_recent_changes(self):
        """Returns three lists of the newest weekly files (added,mod,obsolete).

        Reads the directories with changed entries from the PDB server and
        returns a tuple of three URL's to the files of new, modified and
        obsolete entries from the most recent list. The directory with the
        largest numerical name is used.
        Returns None if something goes wrong.

        Contents of the data/status dir (20031013 would be used);
        drwxrwxr-x   2 1002     sysadmin     512 Oct  6 18:28 20031006
        drwxrwxr-x   2 1002     sysadmin     512 Oct 14 02:14 20031013
        -rw-r--r--   1 1002     sysadmin    1327 Mar 12  2001 README
        """
        url = self.pdb_server + '/pub/pdb/data/status/'
        with contextlib.closing(_urlopen(url)) as handle:
            recent = filter(str.isdigit,
                            (x.split()[-1] for x in handle.readlines())
                            )[-1]

        path = self.pdb_server + '/pub/pdb/data/status/%s/' % (recent)

        # Retrieve the lists
        added = self.get_status_list(path + 'added.pdb')
        modified = self.get_status_list(path + 'modified.pdb')
        obsolete = self.get_status_list(path + 'obsolete.pdb')
        return [added, modified, obsolete]
开发者ID:Dologan,项目名称:biopython,代码行数:27,代码来源:PDBList.py


示例4: urlopen

def urlopen(url):
    sys.stdout.flush()
    url = url.replace('&max-results=20', '&max-results=100')
    if '&key' not in url:
        url += key
    print url
    return _urlopen(url, timeout=60).read()
开发者ID:immissile,项目名称:42qu_github_mirror,代码行数:7,代码来源:dump_google_buzz.py


示例5: get_all_entries

 def get_all_entries(self):
     """Retrieves a big file containing all the 
     PDB entries and some annotation to them. 
     Returns a list of PDB codes in the index file.
     """
     print "retrieving index file. Takes about 5 MB."
     url = _urlopen(self.pdb_server + "/pub/pdb/derived_data/index/entries.idx")
     return [line[:4] for line in url.readlines()[2:] if len(line) > 4]
开发者ID:deepgenomics,项目名称:biopython,代码行数:8,代码来源:PDBList.py


示例6: urlopen

def urlopen(url, data=None, lang='en'):
    request = Request(url, data, {
        "Accept-Language": "%s,en-us;q=0.7,en;q=0.3"%lang.lower(),
        "User-Agent": UA,
    })
    logging.debug("urlopen: %s", url)
    time.sleep(URLOPEN_DELAY)
    return _urlopen(request)
开发者ID:thuvh,项目名称:filmmaster,代码行数:8,代码来源:import_ratings_helper.py


示例7: get_filepath_or_buffer

def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
                           compression=None):
    """
    If the filepath_or_buffer is a url, translate and return the buffer
    passthru otherwise.

    Parameters
    ----------
    filepath_or_buffer : a url, filepath, or buffer
    encoding : the encoding to use to decode py3 bytes, default is 'utf-8'

    Returns
    -------
    a filepath_or_buffer, the encoding, the compression
    """

    if _is_url(filepath_or_buffer):
        req = _urlopen(str(filepath_or_buffer))
        if compression == 'infer':
            content_encoding = req.headers.get('Content-Encoding', None)
            if content_encoding == 'gzip':
                compression = 'gzip'
            else:
                compression = None
        # cat on the compression to the tuple returned by the function
        to_return = list(maybe_read_encoded_stream(req, encoding, compression)) + \
                    [compression]
        return tuple(to_return)

    if _is_s3_url(filepath_or_buffer):
        try:
            import boto
        except:
            raise ImportError("boto is required to handle s3 files")
        # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
        # are environment variables
        parsed_url = parse_url(filepath_or_buffer)

        try:
            conn = boto.connect_s3()
        except boto.exception.NoAuthHandlerFound:
            conn = boto.connect_s3(anon=True)

        b = conn.get_bucket(parsed_url.netloc, validate=False)
        if compat.PY2 and (compression == 'gzip' or
                           (compression == 'infer' and
                            filepath_or_buffer.endswith(".gz"))):
            k = boto.s3.key.Key(b, parsed_url.path)
            filepath_or_buffer = BytesIO(k.get_contents_as_string(
                encoding=encoding))
        else:
            k = BotoFileLikeReader(b, parsed_url.path, encoding=encoding)
            k.open('r')  # Expose read errors immediately
            filepath_or_buffer = k
        return filepath_or_buffer, None, compression

    return _expand_user(filepath_or_buffer), None, compression
开发者ID:BrenBarn,项目名称:pandas,代码行数:57,代码来源:common.py


示例8: fetch_film_info_from_criticker

def fetch_film_info_from_criticker(film_data):
    url = 'http://www.criticker.com/?f=' + film_data['criticker_id']
    title_page = None
    try:
        page = unicode(_urlopen(url, None, 5).read(), 'iso-8859-1')
        soup = BeautifulSoup(page)
        title_page = soup.find("div", attrs={"id":"fi_info_filmname"})
    except URLError, e:
        logger.error("URL Error: " + str(e.reason) + ": " + url)
开发者ID:thuvh,项目名称:filmmaster,代码行数:9,代码来源:import_ratings_helper.py


示例9: get_filepath_or_buffer

def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
                           compression=None, mode=None):
    """
    If the filepath_or_buffer is a url, translate and return the buffer.
    Otherwise passthrough.

    Parameters
    ----------
    filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
                         or buffer
    encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
    mode : str, optional

    Returns
    -------
    tuple of ({a filepath_ or buffer or S3File instance},
              encoding, str,
              compression, str,
              should_close, bool)
    """
    filepath_or_buffer = _stringify_path(filepath_or_buffer)

    if _is_url(filepath_or_buffer):
        req = _urlopen(filepath_or_buffer)
        content_encoding = req.headers.get('Content-Encoding', None)
        if content_encoding == 'gzip':
            # Override compression based on Content-Encoding header
            compression = 'gzip'
        reader = BytesIO(req.read())
        req.close()
        return reader, encoding, compression, True

    if is_s3_url(filepath_or_buffer):
        from pandas.io import s3
        return s3.get_filepath_or_buffer(filepath_or_buffer,
                                         encoding=encoding,
                                         compression=compression,
                                         mode=mode)

    if is_gcs_url(filepath_or_buffer):
        from pandas.io import gcs
        return gcs.get_filepath_or_buffer(filepath_or_buffer,
                                          encoding=encoding,
                                          compression=compression,
                                          mode=mode)

    if isinstance(filepath_or_buffer, (compat.string_types,
                                       compat.binary_type,
                                       mmap.mmap)):
        return _expand_user(filepath_or_buffer), None, compression, False

    if not is_file_like(filepath_or_buffer):
        msg = "Invalid file path or buffer object type: {_type}"
        raise ValueError(msg.format(_type=type(filepath_or_buffer)))

    return filepath_or_buffer, None, compression, False
开发者ID:jakevdp,项目名称:pandas,代码行数:56,代码来源:common.py


示例10: get_all_entries

 def get_all_entries(self):
     """Retrieves a big file containing all the
     PDB entries and some annotation to them.
     Returns a list of PDB codes in the index file.
     """
     print("retrieving index file. Takes about 5 MB.")
     url = self.pdb_server + '/pub/pdb/derived_data/index/entries.idx'
     with contextlib.closing(_urlopen(url)) as handle:
         all_entries = [line[:4] for line in handle.readlines()[2:]
                        if len(line) > 4]
     return all_entries
开发者ID:Dologan,项目名称:biopython,代码行数:11,代码来源:PDBList.py


示例11: get_seqres_file

 def get_seqres_file(self, savefile="pdb_seqres.txt"):
     """Retrieves a (big) file containing all the sequences of PDB entries
     and writes it to a file.
     """
     print "retrieving sequence file. Takes about 15 MB."
     handle = _urlopen(self.pdb_server + "/pub/pdb/derived_data/pdb_seqres.txt")
     lines = handle.readlines()
     outfile = open(savefile, "w")
     outfile.writelines(lines)
     outfile.close()
     handle.close()
开发者ID:deepgenomics,项目名称:biopython,代码行数:11,代码来源:PDBList.py


示例12: urlopen

def urlopen(url, data=None, *args, **kwargs):
    if not isinstance(url, Request):
        url = Request(url, data)
        data = None
    if 'basic_auth' in kwargs:
        if kwargs['basic_auth']:
            a = base64.b64encode(':'.join(kwargs['basic_auth']))
            url.add_header('Authorization', 'Basic '+a)
        del(kwargs['basic_auth'])
    if 'authorization' in kwargs:
        if kwargs['authorization']:
            url.add_header('Authorization', kwargs['authorization'])
        del(kwargs['authorization'])
    if sys.version_info[0] == 2:
        url.add_header('Host', url.get_origin_req_host())
        return _urlopen(url, data, *args, **kwargs)
    else:
        url.add_header('Host', url.origin_req_host)
        kwargs['cadefaults'] = True
        return _urlopen(url, data, *args, **kwargs)
开发者ID:paoneJP,项目名称:oauth2-oidc-experiment,代码行数:20,代码来源:xurllib.py


示例13: get_status_list

    def get_status_list(self, url):
        """Retrieves a list of pdb codes in the weekly pdb status file
        from the given URL. Used by get_recent_files.

        Typical contents of the list files parsed by this method is now
        very simply one PDB name per line.
        """
        with contextlib.closing(_urlopen(url)) as handle:
            answer = []
            for line in handle:
                pdb = line.strip()
                assert len(pdb) == 4
                answer.append(pdb)
        return answer
开发者ID:Dologan,项目名称:biopython,代码行数:14,代码来源:PDBList.py


示例14: get_filepath_or_buffer

def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
                           compression=None):
    """
    If the filepath_or_buffer is a url, translate and return the buffer.
    Otherwise passthrough.

    Parameters
    ----------
    filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
                         or buffer
    encoding : the encoding to use to decode py3 bytes, default is 'utf-8'

    Returns
    -------
    a filepath_or_buffer, the encoding, the compression
    """

    if _is_url(filepath_or_buffer):
        url = str(filepath_or_buffer)
        req = _urlopen(url)
        content_encoding = req.headers.get('Content-Encoding', None)
        if content_encoding == 'gzip':
            # Override compression based on Content-Encoding header
            compression = 'gzip'
        reader = BytesIO(req.read())
        return reader, encoding, compression

    if _is_s3_url(filepath_or_buffer):
        from pandas.io import s3
        return s3.get_filepath_or_buffer(filepath_or_buffer,
                                         encoding=encoding,
                                         compression=compression)

    # Convert pathlib.Path/py.path.local or string
    filepath_or_buffer = _stringify_path(filepath_or_buffer)

    if isinstance(filepath_or_buffer, (compat.string_types,
                                       compat.binary_type,
                                       mmap.mmap)):
        return _expand_user(filepath_or_buffer), None, compression

    if not is_file_like(filepath_or_buffer):
        msg = "Invalid file path or buffer object type: {_type}"
        raise ValueError(msg.format(_type=type(filepath_or_buffer)))

    return filepath_or_buffer, None, compression
开发者ID:RogerThomas,项目名称:pandas,代码行数:46,代码来源:common.py


示例15: get_filepath_or_buffer

def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
    """
    If the filepath_or_buffer is a url, translate and return the buffer
    passthru otherwise.

    Parameters
    ----------
    filepath_or_buffer : a url, filepath, or buffer
    encoding : the encoding to use to decode py3 bytes, default is 'utf-8'

    Returns
    -------
    a filepath_or_buffer, the encoding
    """

    if _is_url(filepath_or_buffer):
        req = _urlopen(str(filepath_or_buffer))
        if compat.PY3:  # pragma: no cover
            if encoding:
                errors = 'strict'
            else:
                errors = 'replace'
                encoding = 'utf-8'
            out = StringIO(req.read().decode(encoding, errors))
        else:
            encoding = None
            out = req
        return out, encoding

    if _is_s3_url(filepath_or_buffer):
        try:
            import boto
        except:
            raise ImportError("boto is required to handle s3 files")
        # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
        # are environment variables
        parsed_url = parse_url(filepath_or_buffer)
        conn = boto.connect_s3()
        b = conn.get_bucket(parsed_url.netloc)
        k = boto.s3.key.Key(b)
        k.key = parsed_url.path
        filepath_or_buffer = StringIO(k.get_contents_as_string())
        return filepath_or_buffer, None

    return filepath_or_buffer, None
开发者ID:esc,项目名称:pandas,代码行数:45,代码来源:common.py


示例16: get_filepath_or_buffer

def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
                           compression=None):
    """
    If the filepath_or_buffer is a url, translate and return the buffer
    passthru otherwise.

    Parameters
    ----------
    filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
                         or buffer
    encoding : the encoding to use to decode py3 bytes, default is 'utf-8'

    Returns
    -------
    a filepath_or_buffer, the encoding, the compression
    """

    if _is_url(filepath_or_buffer):
        req = _urlopen(str(filepath_or_buffer))
        if compression == 'infer':
            content_encoding = req.headers.get('Content-Encoding', None)
            if content_encoding == 'gzip':
                compression = 'gzip'
            else:
                compression = None
        # cat on the compression to the tuple returned by the function
        to_return = (list(maybe_read_encoded_stream(req, encoding,
                                                    compression)) +
                     [compression])
        return tuple(to_return)

    if _is_s3_url(filepath_or_buffer):
        from pandas.io.s3 import get_filepath_or_buffer
        return get_filepath_or_buffer(filepath_or_buffer,
                                      encoding=encoding,
                                      compression=compression)

    # It is a pathlib.Path/py.path.local or string
    filepath_or_buffer = _stringify_path(filepath_or_buffer)
    return _expand_user(filepath_or_buffer), None, compression
开发者ID:lababidi,项目名称:pandas,代码行数:40,代码来源:common.py


示例17: get_filepath_or_buffer

def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
                           compression=None):
    """
    If the filepath_or_buffer is a url, translate and return the buffer.
    Otherwise passthrough.

    Parameters
    ----------
    filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
                         or buffer
    encoding : the encoding to use to decode py3 bytes, default is 'utf-8'

    Returns
    -------
    a filepath_or_buffer, the encoding, the compression
    """

    if _is_url(filepath_or_buffer):
        url = str(filepath_or_buffer)
        req = _urlopen(url)
        content_encoding = req.headers.get('Content-Encoding', None)
        if content_encoding == 'gzip':
            # Override compression based on Content-Encoding header
            compression = 'gzip'
        reader = BytesIO(req.read())
        return reader, encoding, compression

    if _is_s3_url(filepath_or_buffer):
        from pandas.io import s3
        return s3.get_filepath_or_buffer(filepath_or_buffer,
                                         encoding=encoding,
                                         compression=compression)

    # It is a pathlib.Path/py.path.local or string
    filepath_or_buffer = _stringify_path(filepath_or_buffer)
    return _expand_user(filepath_or_buffer), None, compression
开发者ID:andrewkittredge,项目名称:pandas,代码行数:36,代码来源:common.py


示例18: urlopen

 def urlopen(*args, **kwargs):
     with closing(_urlopen(*args, **kwargs)) as f:
         yield f
开发者ID:jakevdp,项目名称:pandas,代码行数:3,代码来源:common.py


示例19: retrieve_pdb_file

    def retrieve_pdb_file(self, pdb_code, obsolete=0, compression=None, uncompress=None, pdir=None):
        """ Retrieves a PDB structure file from the PDB server and
        stores it in a local file tree.
        The PDB structure is returned as a single string.
        If obsolete==1, the file will be saved in a special file tree.
        If uncompress is specified, a system utility will decompress the .gz
        archive. Otherwise, Python gzip utility will handle it.
        compression does nothing, as all archives are already in .gz format

        @param pdir: put the file in this directory (default: create a PDB-style directory tree) 
        @type pdir: string

        @return: filename
        @rtype: string
        """
        # Alert the user about deprecated parameters
        if compression is not None:
            warnings.warn(
                "PDB file servers now only host .gz archives: " "the compression parameter will not do anything",
                BiopythonDeprecationWarning,
            )
        if uncompress is not None:
            warnings.warn(
                "Decompression is handled with the gzip module: " "the uncompression parameter will not do anything",
                BiopythonDeprecationWarning,
            )

        # Get the structure
        code = pdb_code.lower()
        filename = "pdb%s.ent.gz" % code
        if not obsolete:
            url = self.pdb_server + "/pub/pdb/data/structures/divided/pdb/%s/pdb%s.ent.gz" % (code[1:3], code)
        else:
            url = self.pdb_server + "/pub/pdb/data/structures/obsolete/pdb/%s/pdb%s.ent.gz" % (code[1:3], code)

        # In which dir to put the pdb file?
        if pdir is None:
            if self.flat_tree:
                if not obsolete:
                    path = self.local_pdb
                else:
                    path = self.obsolete_pdb
            else:
                # Put in PDB-style directory tree
                if not obsolete:
                    path = os.path.join(self.local_pdb, code[1:3])
                else:
                    path = os.path.join(self.obsolete_pdb, code[1:3])
        else:
            # Put in specified directory
            path = pdir

        if not os.access(path, os.F_OK):
            os.makedirs(path)

        filename = os.path.join(path, filename)
        # the final uncompressed file
        final_file = os.path.join(path, "pdb%s.ent" % code)

        # Skip download if the file already exists
        if not self.overwrite:
            if os.path.exists(final_file):
                print "Structure exists: '%s' " % final_file
                return final_file

        # Retrieve the file
        print "Downloading PDB structure '%s'..." % pdb_code
        lines = _urlopen(url).read()
        open(filename, "wb").write(lines)

        # Uncompress the file
        gz = gzip.open(filename, "rb")
        out = open(final_file, "wb")
        out.writelines(gz.read())
        gz.close()
        out.close()
        os.remove(filename)

        return final_file
开发者ID:deepgenomics,项目名称:biopython,代码行数:79,代码来源:PDBList.py


示例20: urlopen

def urlopen(id):
    key = choice(API_KEY)

    url = URL%(id.strip(), key)
    return _urlopen(url, timeout=60).read()
开发者ID:immissile,项目名称:42qu_github_mirror,代码行数:5,代码来源:dump_google_profile.py



注:本文中的urllib2._urlopen函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python urllib2.addinfourl函数代码示例发布时间:2022-05-27
下一篇:
Python urllib2._parse_list_header函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap