• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.urljoin函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中utils.urljoin函数的典型用法代码示例。如果您正苦于以下问题:Python urljoin函数的具体用法?Python urljoin怎么用?Python urljoin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了urljoin函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_latest_build_date

    def get_latest_build_date(self):
        """ Returns date of latest available nightly build."""
        if self.application not in ('fennec'):
            url = urljoin(self.base_url, 'nightly', 'latest-%s/' % self.branch)
        else:
            url = urljoin(self.base_url, 'nightly', 'latest-%s-%s/' %
                          (self.branch, self.platform))

        self.logger.info('Retrieving the build status file from %s' % url)
        parser = DirectoryParser(url, authentication=self.authentication,
                                 timeout=self.timeout_network)
        parser.entries = parser.filter(r'.*%s\.txt' % self.platform_regex)
        if not parser.entries:
            message = 'Status file for %s build cannot be found' % \
                self.platform_regex
            raise errors.NotFoundError(message, url)

        # Read status file for the platform, retrieve build id,
        # and convert to a date
        headers = {'Cache-Control': 'max-age=0'}

        r = requests.get(url + parser.entries[-1],
                         auth=self.authentication, headers=headers)
        try:
            r.raise_for_status()

            return datetime.strptime(r.text.split('\n')[0], '%Y%m%d%H%M%S')
        finally:
            r.close()
开发者ID:pombredanne,项目名称:mozdownload,代码行数:29,代码来源:scraper.py


示例2: path_regex

    def path_regex(self):
        """Return the regex for the path"""

        try:
            path = urljoin(self.monthly_build_list_regex,
                           self.builds[self.build_index])
            return path
        except:
            folder = urljoin(self.base_url, self.monthly_build_list_regex)
            raise NotFoundError("Specified sub folder cannot be found",
                                folder)
开发者ID:bitshadow,项目名称:mozdownload,代码行数:11,代码来源:scraper.py


示例3: path_regex

    def path_regex(self):
        """Return the regex for the path"""

        try:
            path = urljoin(self.monthly_build_list_regex,
                           self.builds[self.build_index])
            if self.application in APPLICATIONS_MULTI_LOCALE \
                    and self.locale != 'multi':
                path = urljoin(path, self.locale)
            return path
        except:
            folder = urljoin(self.base_url, self.monthly_build_list_regex)
            raise errors.NotFoundError("Specified sub folder cannot be found",
                                       folder)
开发者ID:pombredanne,项目名称:mozdownload,代码行数:14,代码来源:scraper.py


示例4: downloadRawFile

    def downloadRawFile ( self , remote , local=None ) :
        """Downloads a remote file to the local system.

        remote - path relative to repository base
        local - Optional local name for the file

        Returns the local file name or False if errors"""

        remote = utils.urljoin( self.base_url() , remote ) 

        if not local :
            (handle, fname) = tempfile.mkstemp()
        else :
            fname = local
            handle = os.open( fname , os.O_WRONLY | os.O_TRUNC | os.O_CREAT )
        try:
            response = urllib2.urlopen( remote )
            data = response.read(256)
            while data :
                os.write(handle, data)
                data = response.read(256)
            os.close(handle)
        except Exception ,ex :
            repolib.logger.error( "Exception : %s" % ex )
            os.close(handle)
            os.unlink(fname)
            return False
开发者ID:pombredanne,项目名称:repomirror,代码行数:27,代码来源:base.py


示例5: get_build_info_for_index

    def get_build_info_for_index(self, build_index=None):
        url = urljoin(self.base_url, self.build_list_regex)

        self.logger.info('Retrieving list of builds from %s' % url)
        parser = DirectoryParser(url, authentication=self.authentication,
                                 timeout=self.timeout_network)
        parser.entries = parser.filter(r'^\d+$')

        if self.timestamp:
            # If a timestamp is given, retrieve the folder with the timestamp
            # as name
            parser.entries = self.timestamp in parser.entries and \
                [self.timestamp]

        elif self.date:
            # If date is given, retrieve the subset of builds on that date
            parser.entries = filter(self.date_matches, parser.entries)

        if not parser.entries:
            message = 'No builds have been found'
            raise NotFoundError(message, url)

        self.show_matching_builds(parser.entries)

        # If no index has been given, set it to the last build of the day.
        if build_index is None:
            build_index = len(parser.entries) - 1

        return (parser.entries, build_index)
开发者ID:bitshadow,项目名称:mozdownload,代码行数:29,代码来源:scraper.py


示例6: get_build_info

    def get_build_info(self):
        """Defines additional build information"""

        # Internally we access builds via index
        url = urljoin(self.base_url, self.candidate_build_list_regex)
        self.logger.info('Retrieving list of candidate builds from %s' % url)

        parser = DirectoryParser(url, authentication=self.authentication,
                                 timeout=self.timeout_network)
        if not parser.entries:
            message = 'Folder for specific candidate builds at %s has not' \
                'been found' % url
            raise errors.NotFoundError(message, url)

        self.show_matching_builds(parser.entries)
        self.builds = parser.entries
        self.build_index = len(parser.entries) - 1

        if self.build_number and \
                ('build%s' % self.build_number) in self.builds:
            self.builds = ['build%s' % self.build_number]
            self.build_index = 0
            self.logger.info('Selected build: build%s' % self.build_number)
        else:
            self.logger.info('Selected build: build%d' %
                             (self.build_index + 1))
开发者ID:pombredanne,项目名称:mozdownload,代码行数:26,代码来源:scraper.py


示例7: get_build_info_for_date

    def get_build_info_for_date(self, date, has_time=False, build_index=None):
        url = urljoin(self.base_url, self.monthly_build_list_regex)

        self.logger.info('Retrieving list of builds from %s' % url)
        parser = DirectoryParser(url, authentication=self.authentication,
                                 timeout=self.timeout_network)
        regex = r'%(DATE)s-(\d+-)+%(BRANCH)s%(L10N)s$' % {
            'DATE': date.strftime('%Y-%m-%d'),
            'BRANCH': self.branch,
            'L10N': '' if self.locale == 'en-US' else '(-l10n)?'}
        parser.entries = parser.filter(regex)
        parser.entries = parser.filter(self.is_build_dir)

        if has_time:
            # If a time is included in the date, use it to determine the
            # build's index
            regex = r'.*%s.*' % date.strftime('%H-%M-%S')
            parser.entries = parser.filter(regex)

        if not parser.entries:
            date_format = '%Y-%m-%d-%H-%M-%S' if has_time else '%Y-%m-%d'
            message = 'Folder for builds on %s has not been found' % \
                self.date.strftime(date_format)
            raise NotFoundError(message, url)

        # If no index has been given, set it to the last build of the day.
        self.show_matching_builds(parser.entries)
        if build_index is None:
            build_index = len(parser.entries) - 1

        return (parser.entries, build_index)
开发者ID:bitshadow,项目名称:mozdownload,代码行数:31,代码来源:scraper.py


示例8: path_regex

    def path_regex(self):
        """Return the regex for the path to the build folder"""

        if self.locale_build:
            return self.build_list_regex

        return '%s/' % urljoin(self.build_list_regex, self.builds[self.build_index])
开发者ID:parkouss,项目名称:mozdownload,代码行数:7,代码来源:scraper.py


示例9: get_build_info_for_index

    def get_build_info_for_index(self, build_index=None):
        url = urljoin(self.base_url, self.build_list_regex)

        self.logger.info('Retrieving list of builds from %s' % url)
        parser = self._create_directory_parser(url)
        parser.entries = parser.filter(r'^\d+$')

        if self.timestamp:
            # If a timestamp is given, retrieve the folder with the timestamp
            # as name
            parser.entries = self.timestamp in parser.entries and \
                [self.timestamp]

        elif self.date:
            # If date is given, retrieve the subset of builds on that date
            parser.entries = filter(self.date_matches, parser.entries)

        if not parser.entries:
            message = 'No builds have been found'
            raise errors.NotFoundError(message, url)

        self.show_matching_builds(parser.entries)

        # If no index has been given, set it to the last build of the day.
        if build_index is None:
            # Find the most recent non-empty entry.
            build_index = len(parser.entries)
            for build in reversed(parser.entries):
                build_index -= 1
                if not build_index or self.is_build_dir(build):
                    break

        self.logger.info('Selected build: %s' % parser.entries[build_index])

        return (parser.entries, build_index)
开发者ID:parkouss,项目名称:mozdownload,代码行数:35,代码来源:scraper.py


示例10: __init__

    def __init__(self, destination=None, platform=None,
                 application='firefox', locale=None, extension=None,
                 username=None, password=None,
                 retry_attempts=0, retry_delay=10.,
                 is_stub_installer=False, timeout=None,
                 log_level='INFO',
                 base_url=BASE_URL):

        # Private properties for caching
        self._filename = None
        self._binary = None

        self.destination = destination or os.getcwd()

        if not locale:
            if application in APPLICATIONS_MULTI_LOCALE:
                self.locale = 'multi'
            else:
                self.locale = 'en-US'
        else:
            self.locale = locale

        self.platform = platform or self.detect_platform()

        self.session = requests.Session()
        if (username, password) != (None, None):
            self.session.auth = (username, password)

        self.retry_attempts = retry_attempts
        self.retry_delay = retry_delay
        self.is_stub_installer = is_stub_installer
        self.timeout_download = timeout
        # this is the timeout used in requests.get. Unlike "auth",
        # it does not work if we attach it on the session, so we handle
        # it independently.
        self.timeout_network = 60.

        logging.basicConfig(format=' %(levelname)s | %(message)s')
        self.logger = logging.getLogger(self.__module__)
        self.logger.setLevel(log_level)
        logging.getLogger('redo').setLevel(logging.INFO)

        # build the base URL
        self.application = application
        self.base_url = '%s/' % urljoin(
            base_url,
            APPLICATIONS_TO_FTP_DIRECTORY.get(self.application, self.application)
        )

        if extension:
            self.extension = extension
        else:
            if self.application in APPLICATIONS_MULTI_LOCALE and \
                    self.platform in ('win32', 'win64'):
                # builds for APPLICATIONS_MULTI_LOCALE only exist in zip
                self.extension = 'zip'
            else:
                self.extension = DEFAULT_FILE_EXTENSIONS[self.platform]

        self._retry_check_404(self.get_build_info)
开发者ID:parkouss,项目名称:mozdownload,代码行数:60,代码来源:scraper.py


示例11: buffer_sync

 def buffer_sync(self):
     """bulk upsert of everything in self.buffer
     """
     data = protocol.serialize_web(
         [x.to_broadcast_json(include_hidden=True) for x in self.buffer])
     url = utils.urljoin(self.baseurl, self.docid + "/", 'bulkupsert')
     self.s.post(url, data=data)
     for m in self.buffer:
         m.set('created', True)
     self.buffer = []
开发者ID:yogeshc,项目名称:Bokeh,代码行数:10,代码来源:bbmodel.py


示例12: fetch

 def fetch(self, typename=None, id=None):
     if typename is None:
         url = utils.urljoin(self.baseurl, self.docid)
         data = self.s.get(url).content
         specs = self.ph.deserialize_web(data)
         models =  [ContinuumModel(
             x['type'], **x['attributes']) for x in specs]
         return models
     elif typename is not None and id is None:
         url = utils.urljoin(self.baseurl, self.docid +"/", typename)
         attrs = self.ph.deserialize_web(self.s.get(url).content)
         models = [ContinuumModel(typename, **x) for x in attrs]
         return models
     elif typename is not None and id is not None:
         url = utils.urljoin(self.baseurl, self.docid +"/", typename + "/", id)
         attr = self.ph.deserialize_web(self.s.get(url).content)
         if attr is None:
             return None
         model = ContinuumModel(typename, **attr)
         return model
开发者ID:fivejjs,项目名称:Bokeh,代码行数:20,代码来源:bbmodel.py


示例13: is_build_dir

    def is_build_dir(self, dir):
        """Return whether or not the given dir contains a build."""

        url = urljoin(self.base_url, self.monthly_build_list_regex, dir)

        if self.application in MULTI_LOCALE_APPLICATIONS \
                and self.locale != 'multi':
            url = urljoin(url, self.locale)

        parser = DirectoryParser(url, authentication=self.authentication,
                                 timeout=self.timeout_network)

        pattern = re.compile(self.binary_regex, re.IGNORECASE)
        for entry in parser.entries:
            try:
                pattern.match(entry).group()
                return True
            except:
                # No match, continue with next entry
                continue
        return False
开发者ID:jayrajput,项目名称:mozdownload,代码行数:21,代码来源:scraper.py


示例14: create

 def create(self, model, defer=False):
     if not model.get('docs'):
         model.set('docs', [self.docid])
     if defer:
         self.buffer.append(model)
     else:
         url = utils.urljoin(self.baseurl,
                             self.docid + "/",
                             model.typename)
         log.debug("create %s", url)
         self.s.post(url, data=self.ph.serialize_msg(model.to_json()))
     return model
开发者ID:fivejjs,项目名称:Bokeh,代码行数:12,代码来源:bbmodel.py


示例15: update

 def update(self, model, defer=False):
     model.set('doc', self.docid)        
     if defer:
         self.buffer.append(model)
     else:
         url = utils.urljoin(self.baseurl,
                             self.docid + "/",
                             model.typename + "/",
                             model.id +"/")
         log.debug("create %s", url)
         self.s.put(url, data=protocol.serialize_web(
             model.to_json(include_hidden=True)))
     return model
开发者ID:yogeshc,项目名称:Bokeh,代码行数:13,代码来源:bbmodel.py


示例16: create

 def create(self, model, defer=False):
     model.set('doc', self.docid)
     if defer:
         self.buffer.append(model)
     else:
         url = utils.urljoin(self.baseurl,
                             self.docid + "/",
                             model.typename +"/")
         log.debug("create %s", url)
         self.s.post(url, data=self.ph.serialize_msg(
             model.to_json(include_hidden=True)))
         model.set('created', True)
     return model
开发者ID:dengemann,项目名称:Bokeh,代码行数:13,代码来源:bbmodel.py


示例17: is_build_dir

    def is_build_dir(self, folder_name):
        """Return whether or not the given dir contains a build."""

        # Cannot move up to base scraper due to parser.entries call in
        # get_build_info_for_index (see below)
        url = '%s/' % urljoin(self.base_url, self.build_list_regex, folder_name)

        if self.application in APPLICATIONS_MULTI_LOCALE \
                and self.locale != 'multi':
            url = '%s/' % urljoin(url, self.locale)

        parser = self._create_directory_parser(url)

        pattern = re.compile(self.binary_regex, re.IGNORECASE)
        for entry in parser.entries:
            try:
                pattern.match(entry).group()
                return True
            except:
                # No match, continue with next entry
                continue
        return False
开发者ID:parkouss,项目名称:mozdownload,代码行数:22,代码来源:scraper.py


示例18: __init__

    def __init__(self, directory, version, platform=None,
                 application='firefox', locale='en-US', extension=None,
                 authentication=None, retry_attempts=0, retry_delay=10.,
                 is_stub_installer=False, timeout=None, log_level='INFO',
                 base_url=BASE_URL):

        # Private properties for caching
        self._target = None
        self._binary = None

        self.directory = directory
        self.locale = locale
        self.platform = platform or self.detect_platform()
        self.version = version
        self.extension = extension or DEFAULT_FILE_EXTENSIONS[self.platform]
        self.authentication = authentication
        self.retry_attempts = retry_attempts
        self.retry_delay = retry_delay
        self.is_stub_installer = is_stub_installer
        self.timeout_download = timeout
        self.timeout_network = 60.

        self.logger = mozlog.getLogger(' ')
        self.logger.setLevel(getattr(mozlog, log_level.upper()))

        # build the base URL
        self.application = application
        self.base_url = urljoin(base_url, self.application)

        attempt = 0
        while True:
            attempt += 1
            try:
                self.get_build_info()
                break
            except (NotFoundError, requests.exceptions.RequestException), e:
                if self.retry_attempts > 0:
                    # Log only if multiple attempts are requested
                    self.logger.warning("Build not found: '%s'" % e.message)
                    self.logger.info('Will retry in %s seconds...' %
                                     (self.retry_delay))
                    time.sleep(self.retry_delay)
                    self.logger.info("Retrying... (attempt %s)" % attempt)

                if attempt >= self.retry_attempts:
                    if hasattr(e, 'response') and \
                            e.response.status_code == 404:
                        message = "Specified build has not been found"
                        raise NotFoundError(message, e.response.url)
                    else:
                        raise
开发者ID:manzee,项目名称:mozdownload,代码行数:51,代码来源:scraper.py


示例19: fetch

 def fetch(self, typename=None, id=None, include_hidden=False):
     query = urllib.urlencode({'include_hidden' : include_hidden})
     if typename is None:
         url = utils.urljoin(self.baseurl, self.docid +"/") + "?" + query
         data = self.s.get(url).content
         specs = protocol.deserialize_web(data)
         models =  [make_model(x['type'], client=self, **x['attributes'])\
                    for x in specs]
         return models
     elif typename is not None and id is None:
         url = utils.urljoin(self.baseurl, self.docid +"/", typename + "/")
         url += "?" + query
         attrs = protocol.deserialize_web(self.s.get(url).content)
         models = [make_model(typename, client=self, **x) for x in attrs]
         return models
     elif typename is not None and id is not None:
         url = utils.urljoin(self.baseurl, self.docid +"/",
                             typename + "/", id +"/")
         url += "?" + query            
         attr = protocol.deserialize_web(self.s.get(url).content)
         if attr is None:
             return None
         model = make_model(typename, client=self, **attr)
         return model
开发者ID:yogeshc,项目名称:Bokeh,代码行数:24,代码来源:bbmodel.py


示例20: get_build_info_for_index

    def get_build_info_for_index(self, build_index=None):
        url = urljoin(self.base_url, self.build_list_regex)

        self.logger.info('Retrieving list of builds from %s' % url)
        parser = DirectoryParser(url, authentication=self.authentication,
                                 timeout=self.timeout_network)
        parser.entries = parser.filter('.*-%s$' % self.changeset)

        if not parser.entries:
            raise NotFoundError('No builds have been found', url)

        self.show_matching_builds(parser.entries)

        self.logger.info('Selected build: %s' % parser.entries[0])

        return (parser.entries, 0)
开发者ID:chetankm-cs,项目名称:mozdownload,代码行数:16,代码来源:scraper.py



注:本文中的utils.urljoin函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.user_grant_permission函数代码示例发布时间:2022-05-26
下一篇:
Python utils.url_fails函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap