• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python httpclient.grab_url函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中miro.httpclient.grab_url函数的典型用法代码示例。如果您正苦于以下问题:Python grab_url函数的具体用法?Python grab_url怎么用?Python grab_url使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了grab_url函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: check_for_updates

def check_for_updates(up_to_date_callback=None):
    """Checks the AUTOUPDATE_URL for the recent version.

    The ``up_to_date_callback`` is a function that should take no
    arguments and return nothing.
    """
    import miro.plat
    if miro.plat.AUTOUPDATE == False:
        logging.info("this platform has autoupdate disabled.  skipping.")
        return

    global check_in_progress
    if not check_in_progress:
        check_in_progress = True
        logging.info("Checking for updates...")
        if app.config.get(prefs.APP_FINAL_RELEASE) == u"0":
            # if this is not a final release, look at the beta
            # channel
            url = app.config.get(prefs.AUTOUPDATE_BETA_URL)
            logging.info("Using the beta channel")
        else:
            # if this is a final release, look at the final
            # channel
            url = app.config.get(prefs.AUTOUPDATE_URL)
            logging.info("Using the final channel")
        logging.info("check_for_updates: checking %s", url)
        update_handler = lambda data: _handle_app_cast(
            data, up_to_date_callback)
        error_handler = _handle_error
        grab_url(url, update_handler, error_handler)
开发者ID:codito,项目名称:miro,代码行数:30,代码来源:autoupdate.py


示例2: _scrape_youtube_url

def _scrape_youtube_url(url, callback):
    check_u(url)

    components = urlparse.urlsplit(url)
    params = cgi.parse_qs(components[3])

    video_id = None
    if components[2] == u'/watch' and 'v' in params:
        try:
            video_id = params['v'][0]
        except IndexError:
            pass
    elif components[2].startswith('/v/'):
        m = re.compile(r'/v/([\w-]+)').match(components[2])
        if m is not None:
            video_id = m.group(1)

    if video_id is None:
        logging.warning('_scrape_youtube_url: unable to scrape YouTube Video URL')
        callback(None)
        return

    try:
        url = u"http://www.youtube.com/get_video_info?video_id=%s&el=embedded&ps=default&eurl=" % video_id
        httpclient.grab_url(
            url,
            lambda x: _youtube_callback_step2(x, video_id, callback),
            lambda x: _youtube_errback(x, callback))

    except StandardError:
        logging.exception("youtube_callback: unable to scrape YouTube Video URL")
        callback(None)
开发者ID:bluezone,项目名称:miro,代码行数:32,代码来源:flashscraper.py


示例3: callback

    def callback(headers):
        """We need to figure out if the URL is a external video link,
        or a link to a feed.
        """
        if check_url_exists(url):
            return

        content_type = headers.get("content-type")
        if content_type:
            if filetypes.is_feed_content_type(content_type):
                add_feeds([url])
                return

            if flashscraper.is_maybe_flashscrapable(url):
                entry = _build_entry(url, "video/x-flv", additional=metadata)
                download_video(entry)
                return

            if filetypes.is_maybe_feed_content_type(content_type):
                logging.info("%s content type is %s.  " "going to peek to see if it's a feed....", url, content_type)
                httpclient.grab_url(url, callback_peek, errback)
                return

        entry = _build_entry(url, content_type)

        if filetypes.is_video_enclosure(entry["enclosures"][0]):
            download_video(entry)
        else:
            handle_unknown_callback(url)
开发者ID:cool-RR,项目名称:Miro,代码行数:29,代码来源:singleclick.py


示例4: add_subscription_url

def add_subscription_url(prefix, expected_content_type, url):
    real_url = url[len(prefix):]
    def callback(info):
        if info.get('content-type') == expected_content_type:
            subscription_list = autodiscover.parse_content(info['body'])
            if subscription_list is None:
                text = _(
                    "This %(appname)s podcast file has an invalid format: "
                    "%(url)s.  Please notify the publisher of this file.",
                    {"appname": app.config.get(prefs.SHORT_APP_NAME),
                     "url": real_url}
                    )
                _complain_about_subscription_url(text)
            else:
                subscription.Subscriber().add_subscriptions(
                    subscription_list)
        else:
            text = _(
                "This %(appname)s podcast file has the wrong content type: "
                "%(url)s. Please notify the publisher of this file.",
                {"appname": app.config.get(prefs.SHORT_APP_NAME),
                 "url": real_url}
                )
            _complain_about_subscription_url(text)

    def errback(error):
        text = _(
            "Could not download the %(appname)s podcast file: %(url)s",
            {"appname": app.config.get(prefs.SHORT_APP_NAME),
             "url": real_url}
            )
        _complain_about_subscription_url(text)

    httpclient.grab_url(real_url, callback, errback)
开发者ID:bbucommander,项目名称:miro,代码行数:34,代码来源:commandline.py


示例5: request_icon

    def request_icon(self):
        if self.removed:
            app.icon_cache_updater.update_finished()
            return

        self.dbItem.confirm_db_thread()
        if self.updating:
            self.needsUpdate = True
            app.icon_cache_updater.update_finished()
            return

        if hasattr(self.dbItem, "get_thumbnail_url"):
            url = self.dbItem.get_thumbnail_url()
        else:
            url = self.url

        # Only verify each icon once per run unless the url changes
        if (url == self.url and self.filename
                and fileutil.access(self.filename, os.R_OK)):
            app.icon_cache_updater.update_finished()
            return

        self.updating = True

        # No need to extract the icon again if we already have it.
        if url is None or url.startswith(u"/") or url.startswith(u"file://"):
            self.error_callback(url)
            return

        # Last try, get the icon from HTTP.
        httpclient.grab_url(url, lambda info: self.update_icon_cache(url, info),
                lambda error: self.error_callback(url, error))
开发者ID:CodeforEvolution,项目名称:miro,代码行数:32,代码来源:iconcache.py


示例6: query_7digital

 def query_7digital(self, release_id):
     if release_id not in self.seven_digital_cache:
         self.release_id = release_id
         seven_digital_url = self._make_7digital_url(release_id)
         httpclient.grab_url(seven_digital_url,
                             self.seven_digital_callback,
                             self.seven_digital_errback)
     else:
         self.handle_7digital_cache_hit(release_id)
开发者ID:ShriramK,项目名称:miro,代码行数:9,代码来源:echonest.py


示例7: query_echonest_with_code

 def query_echonest_with_code(self, code, version, metadata):
     post_vars = {
         'api_key': ECHO_NEST_API_KEY,
         'bucket': ['tracks', 'id:7digital'],
         'query': self._make_echonest_query(code, version, metadata),
     }
     url = 'http://echonest.pculture.org/api/v4/song/identify?'
     httpclient.grab_url(url,
                         self.echonest_callback, self.echonest_errback,
                         post_vars=post_vars)
开发者ID:ShriramK,项目名称:miro,代码行数:10,代码来源:echonest.py


示例8: _scrape_vimeo_moogaloop_url

def _scrape_vimeo_moogaloop_url(url, callback):
    try:
        id_ = MEGALOOP_RE.match(url).group(2)
        url = u"http://www.vimeo.com/moogaloop/load/clip:%s" % id_
        httpclient.grab_url(
            url, lambda x: _scrape_vimeo_callback(x, callback), lambda x: _scrape_vimeo_errback(x, callback)
        )
    except StandardError:
        logging.warning("Unable to scrape vimeo.com moogaloop URL: %s", url)
        callback(None)
开发者ID:codito,项目名称:miro,代码行数:10,代码来源:flashscraper.py


示例9: query_echonest_with_echonest_id

 def query_echonest_with_echonest_id(self, echonest_id):
     url_data = [
         ('api_key', ECHO_NEST_API_KEY),
         ('bucket', 'tracks'),
         ('bucket', 'id:7digital'),
         ('id', echonest_id),
     ]
     url = ('http://echonest.pculture.org/api/v4/song/profile?' +
             urllib.urlencode(url_data))
     httpclient.grab_url(url,
                         self.echonest_callback, self.echonest_errback)
开发者ID:CodeforEvolution,项目名称:miro,代码行数:11,代码来源:echonest.py


示例10: _scrape_veohtv_video_url

def _scrape_veohtv_video_url(url, callback):
    try:
        components = urlparse.urlsplit(url)
        params = cgi.parse_qs(components[3])
        t = params['type'][0]
        permalink_id = params['permalinkId'][0]
        url = u'http://www.veoh.com/movieList.html?type=%s&permalinkId=%s&numResults=45' % (t, permalink_id)
        httpclient.grab_url(url, lambda x: _scrape_veohtv_callback(x, callback),
                           lambda x: _scrape_veohtv_errback(x, callback))
    except StandardError:
        logging.warning("unable to scrape Veoh URL: %s", url)
        callback(None)
开发者ID:bluezone,项目名称:miro,代码行数:12,代码来源:flashscraper.py


示例11: _scrape_vimeo_video_url_try_2

def _scrape_vimeo_video_url_try_2(url, callback, vimeo_id):
    """Try scraping vimeo URLs by scraping the javascript code.

    This method seems less reliable than the regular method, but it works for
    private videos.  See #19305
    """
    video_url = u'http://vimeo.com/%s' % vimeo_id

    httpclient.grab_url(
            video_url,
            lambda x: _scrape_vimeo_download_try_2_callback(x, callback,
                                                            vimeo_id),
            lambda x: _scrape_vimeo_download_errback(x, callback, url))
开发者ID:CodeforEvolution,项目名称:miro,代码行数:13,代码来源:flashscraper.py


示例12: _scrape_vimeo_video_url

def _scrape_vimeo_video_url(url, callback):
    try:
        id_ = VIMEO_RE.match(url).group(2)
        url = u"http://www.vimeo.com/moogaloop/load/clip:%s" % id_
        httpclient.grab_url(
            url,
            lambda x: _scrape_vimeo_callback(x, callback),
            lambda x: _scrape_vimeo_errback(x, callback))
    except (SystemExit, KeyboardInterrupt):
        raise
    except:
        logging.warning("Unable to scrape vimeo.com video URL: %s", url)
        callback(None)
开发者ID:nxmirrors,项目名称:miro,代码行数:13,代码来源:flashscraper.py


示例13: _scrape_vmix_video_url

def _scrape_vmix_video_url(url, callback):
    try:
        components = urlparse.urlsplit(url)
        params = cgi.parse_qs(components[3])
        type_ = params['type'][0]
        id_ = params['id'][0]
        l = params['l'][0]
        url = (u"http://sdstage01.vmix.com/videos.php?type=%s&id=%s&l=%s" %
               (type_, id_, l))
        httpclient.grab_url(url, lambda x: _scrape_vmix_callback(x, callback),
                           lambda x: _scrape_vmix_errback(x, callback))

    except StandardError:
        logging.warning("unable to scrape VMix Video URL: %s", url)
        callback(None)
开发者ID:bluezone,项目名称:miro,代码行数:15,代码来源:flashscraper.py


示例14: get_metainfo

    def get_metainfo(self):
        if self.metainfo is None:
            if self.url.startswith('file://'):
                path = get_file_url_path(self.url)
                try:
                    metainfoFile = open(path, 'rb')
                except IOError:
                    self.handle_error(
                        _("Torrent file deleted"),
                        _("The torrent file for this item was deleted "
                          "outside of %(appname)s.",
                          {"appname": app.config.get(prefs.SHORT_APP_NAME)}
                          ))

                    return
                try:
                    metainfo = metainfoFile.read()
                finally:
                    metainfoFile.close()

                self.handle_metainfo(metainfo)
            else:
                self.description_client = httpclient.grab_url(self.url,
                        self.on_metainfo_download,
                        self.on_metainfo_download_error,
                        content_check_callback=self.check_description)
        else:
            self.got_metainfo()
开发者ID:nxmirrors,项目名称:miro,代码行数:28,代码来源:download.py


示例15: __init__

    def __init__(self, report, description, send_database):
        signals.SignalEmitter.__init__(self)
        self.create_signal('finished')

        self.is_done = False

        backupfile = None
        if send_database:
            try:
                logging.info("Sending entire database")
                backupfile = self._backup_support_dir()
            except StandardError:
                logging.exception("Failed to backup database")

        if isinstance(report, str):
            report = report.decode(locale.getpreferredencoding())
        report = report.encode("utf-8", "ignore")
        if isinstance(description, str):
            description = description.decode(locale.getpreferredencoding())
        description = description.encode("utf-8", "ignore")
        post_vars = {"description": description,
                     "app_name": app.config.get(prefs.LONG_APP_NAME),
                     "log": report}
        if backupfile:
            post_files = {"databasebackup":
                              {"filename": "databasebackup.zip",
                               "mimetype": "application/octet-stream",
                               "handle": backupfile,
                               }}
        else:
            post_files = None
        logging.info("Sending crash report....")
        self.client = httpclient.grab_url(BOGON_URL,
                           self.callback, self.errback,
                           post_vars=post_vars, post_files=post_files)
开发者ID:ShriramK,项目名称:miro,代码行数:35,代码来源:controller.py


示例16: add_subscriptions

    def add_subscriptions(self, subscriptions_list, parent_folder=None):
        """
        We loop through the list of subscriptions, creating things as
        we go (if needed).  We also keep track of what we've added.

        Each type (folder, feed, site, download) gets dispatched to
        one of our methods.  Each dispatcher returns True if it's
        added the subscription, anything else if it's been ignored for
        some reason (generally because it's already present in the
        DB).

        The only exception to this is the 'folder' type, which has the
        same return signature as this method.

        Returns a tuple of dictionaries (added, ignored).  Each
        dictionary maps a subscription type (feed, site, download) to
        the number of added/ignored items in this subscription.
        """
        added = {}
        ignored = {}
        for subscription in subscriptions_list:
            subscription_type = subscription['type']
            handler = getattr(self, 'handle_%s' % subscription_type, None)
            if handler:
                trackback = subscription.get('trackback')
                if trackback:
                    httpclient.grab_url(trackback, lambda x: None,
                                       lambda x: None)
                ret = handler(subscription, parent_folder)
                if ret:
                    if subscription_type == 'folder':
                        for key, value in ret[0].items():
                            added.setdefault(key, [])
                            added[key].extend(value)
                        for key, value in ret[1].items():
                            ignored.setdefault(key, [])
                            ignored[key].extend(value)
                    else:
                        added.setdefault(subscription_type, [])
                        added[subscription_type].append(subscription)
                else:
                    ignored.setdefault(subscription_type, [])
                    ignored[subscription_type].append(subscription)
            else:
                raise ValueError('unknown subscription type: %s' %
                                 subscription_type)
        return added, ignored
开发者ID:CodeforEvolution,项目名称:miro,代码行数:47,代码来源:subscription.py


示例17: query_echonest_with_tags

 def query_echonest_with_tags(self, metadata):
     url_data = [
         ('api_key', ECHO_NEST_API_KEY),
         ('bucket', 'tracks'),
         ('bucket', 'id:7digital'),
         # In case there are multiple songs for the same artist/title, only
         # use the "hottest" song, AKA the most popular.
         ('results', '1'),
         ('sort', 'song_hotttnesss-desc'),
     ]
     for key in ('title', 'artist'):
         if key in metadata:
             url_data.append((key, metadata[key].encode('utf-8')))
     url = ('http://echonest.pculture.org/api/v4/song/search?' +
             urllib.urlencode(url_data))
     httpclient.grab_url(url, self.echonest_callback,
                         self.echonest_errback)
开发者ID:ShriramK,项目名称:miro,代码行数:17,代码来源:echonest.py


示例18: _scrape_vimeo_video_url

def _scrape_vimeo_video_url(url, callback, countdown=10):
    try:
        id_ = VIMEO_RE.match(url).group(2)
        url = 'http://vimeo.com/%s?action=download' % id_
        httpclient.grab_url(
            url,
            lambda x: _scrape_vimeo_download_callback(x, callback),
            lambda x: _scrape_vimeo_download_errback(x, callback, url),
            extra_headers={
                'Referer': 'http://vimeo.com/%s' % id_,
                'X-Requested-With': 'XMLHttpRequest',
                'User-Agent': ('Mozilla/5.0 (X11; Linux x86_64) '
                               'AppleWebKit/536.11 (KHTML, like Gecko) '
                               'Chrome/20.0.1132.8 Safari/536.11')
                })
    except StandardError:
        logging.exception("Unable to scrape vimeo.com video URL: %s", url)
        callback(None)
开发者ID:bluezone,项目名称:miro,代码行数:18,代码来源:flashscraper.py


示例19: check_for_updates

def check_for_updates(up_to_date_callback=None):
    """Checks the AUTOUPDATE_URL for the recent version.

    The ``up_to_date_callback`` is a function that should take no
    arguments and return nothing.
    """
    import miro.plat
    if miro.plat.AUTOUPDATE == False:
        logging.info("this platform has autoupdate disabled.  skipping.")
        return

    global check_in_progress
    if not check_in_progress:
        check_in_progress = True
        logging.info("Checking for updates...")
        url = app.config.get(prefs.AUTOUPDATE_URL)
        update_handler = lambda data: _handle_app_cast(data,
                                                       up_to_date_callback)
        error_handler = _handle_error
        grab_url(url, update_handler, error_handler)
开发者ID:nxmirrors,项目名称:miro,代码行数:20,代码来源:autoupdate.py


示例20: start_download

    def start_download(self, resume=True):
        if self.retryDC:
            self.retryDC.cancel()
            self.retryDC = None
        if resume:
            resume = self._resume_sanity_check()

        logging.info("start_download: %s", self.url)

        self.client = httpclient.grab_url(
            self.url, self.on_download_finished, self.on_download_error,
            header_callback=self.on_headers, write_file=self.filename,
            resume=resume)
        self.update_client()
        eventloop.add_timeout(self.CHECK_STATS_TIMEOUT, self.update_stats,
                'update http downloader stats')
开发者ID:nxmirrors,项目名称:miro,代码行数:16,代码来源:download.py



注:本文中的miro.httpclient.grab_url函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python item.Item类代码示例发布时间:2022-05-27
下一篇:
Python gtcache.ngettext函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap