• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scraper_utils.gv_get_quality函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中salts_lib.scraper_utils.gv_get_quality函数的典型用法代码示例。如果您正苦于以下问题:Python gv_get_quality函数的具体用法?Python gv_get_quality怎么用?Python gv_get_quality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了gv_get_quality函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if source_url and source_url != FORCE_NO_MATCH:
         url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(url, cache_limit=.5)
         fragment = dom_parser.parse_dom(html, 'div', {'class': '[^"]*screen[^"]*'})
         if fragment:
             js_src = dom_parser.parse_dom(fragment[0], 'script', ret='src')
             if js_src:
                 js_url = urlparse.urljoin(self.base_url, js_src[0])
                 html = self._http_get(js_url, cache_limit=.5)
             else:
                 html = fragment[0]
                 
             for match in re.finditer('<source[^>]+src="([^"]+)', html):
                 stream_url = match.group(1)
                 host = self._get_direct_hostname(stream_url)
                 if host == 'gvideo':
                     quality = scraper_utils.gv_get_quality(stream_url)
                 elif 'blogspot' in stream_url:
                     quality = scraper_utils.gv_get_quality(stream_url)
                 else:
                     _, _, height, _ = scraper_utils.parse_movie_link(stream_url)
                     quality = scraper_utils.height_get_quality(height)
                     stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
                     
                 hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                 hosters.append(hoster)
     return hosters
开发者ID:c0ns0le,项目名称:YCBuilds,代码行数:30,代码来源:rainierland_scraper.py


示例2: __get_gk_links

 def __get_gk_links(self, link, iframe_url):
     sources = {}
     data = {'link': link}
     headers = XHR
     headers.update({'Referer': iframe_url, 'User-Agent': USER_AGENT})
     html = self._http_get(GK_URL, data=data, headers=headers, cache_limit=.25)
     js_data = scraper_utils.parse_json(html, GK_URL)
     if 'link' in js_data:
         if isinstance(js_data['link'], basestring):
             stream_url = js_data['link']
             if self._get_direct_hostname(stream_url) == 'gvideo':
                 temp = self._parse_google(stream_url)
                 for source in temp:
                     sources[source] = {'quality': scraper_utils.gv_get_quality(source), 'direct': True}
             else:
                 sources[stream_url] = {'quality': QUALITIES.HIGH, 'direct': False}
         else:
             for link in js_data['link']:
                 stream_url = link['link']
                 if self._get_direct_hostname(stream_url) == 'gvideo':
                     quality = scraper_utils.gv_get_quality(stream_url)
                 elif 'label' in link:
                     quality = scraper_utils.height_get_quality(link['label'])
                 else:
                     quality = QUALITIES.HIGH
                 sources[stream_url] = {'quality': quality, 'direct': True}
     return sources
开发者ID:freeworldxbmc,项目名称:KAOSbox-Repo,代码行数:27,代码来源:pubfilm_scraper.py


示例3: get_sources

    def get_sources(self, video):
        source_url = self.get_url(video)
        sources = {}
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=0.5)
            for match in re.finditer("embeds\[(\d+)\]\s*=\s*'([^']+)", html):
                match = re.search('src="([^"]+)', match.group(2))
                if match:
                    iframe_url = match.group(1)
                    if "play-en.php" in iframe_url:
                        match = re.search('id=([^"&]+)', iframe_url)
                        if match:
                            proxy_link = match.group(1)
                            proxy_link = proxy_link.split("*", 1)[-1]
                            picasa_url = scraper_utils.gk_decrypt(self.get_name(), GK_KEY, proxy_link)
                            for stream_url in self._parse_google(picasa_url):
                                sources[stream_url] = {
                                    "quality": scraper_utils.gv_get_quality(stream_url),
                                    "direct": True,
                                }
                    else:
                        html = self._http_get(iframe_url, cache_limit=0.25)
                        temp_sources = self._parse_sources_list(html)
                        for source in temp_sources:
                            if "download.php" in source:
                                redir_html = self._http_get(source, allow_redirect=False, method="HEAD", cache_limit=0)
                                if redir_html.startswith("http"):
                                    temp_sources[redir_html] = temp_sources[source]
                                    del temp_sources[source]
                        sources.update(temp_sources)
                        for source in dom_parser.parse_dom(html, "source", {"type": "video/mp4"}, ret="src"):
                            sources[source] = {"quality": QUALITIES.HD720, "direct": True}

        for source in sources:
            host = self._get_direct_hostname(source)
            stream_url = source + "|User-Agent=%s" % (scraper_utils.get_ua())
            if host == "gvideo":
                quality = scraper_utils.gv_get_quality(source)
            else:
                quality = sources[source]["quality"]
                if quality not in Q_ORDER:
                    quality = QUALITY_MAP.get(sources[source]["quality"], QUALITIES.HIGH)

            hoster = {
                "multi-part": False,
                "url": stream_url,
                "host": host,
                "class": self,
                "quality": quality,
                "views": None,
                "rating": None,
                "direct": True,
            }
            hosters.append(hoster)

        return hosters
开发者ID:EPiC-APOC,项目名称:repository.xvbmc,代码行数:58,代码来源:moviesplanet_scraper.py


示例4: get_sources

    def get_sources(self, video):
        source_url = self.get_url(video)
        sources = {}
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=0)
        for match in re.finditer("embeds\[(\d+)\]\s*=\s*'([^']+)", html):
            match = re.search('src="([^"]+)', match.group(2))
            if match:
                iframe_url = match.group(1)
                if 'play-en.php' in iframe_url:
                    match = re.search('id=([^"&]+)', iframe_url)
                    if match:
                        proxy_link = match.group(1)
                        proxy_link = proxy_link.split('*', 1)[-1]
                        picasa_url = scraper_utils.gk_decrypt(self.get_name(), GK_KEY, proxy_link)
                        for stream_url in scraper_utils.parse_google(self, picasa_url):
                            sources[stream_url] = {'quality': scraper_utils.gv_get_quality(stream_url), 'direct': True}
                else:
                    html = self._http_get(iframe_url, cache_limit=0)
                    temp_sources = scraper_utils.parse_sources_list(self, html)
                    for source in temp_sources:
                        if 'download.php' in source:
                            redir_html = self._http_get(source, allow_redirect=False, method='HEAD', cache_limit=0)
                            if redir_html.startswith('http'):
                                temp_sources[redir_html] = temp_sources[source]
                                del temp_sources[source]
                    sources.update(temp_sources)
                    for source in dom_parser2.parse_dom(html, 'source', {'type': 'video/mp4'}, req='src'):
                        sources[source.attrs['src']] = {'quality': QUALITIES.HD720, 'direct': True, 'referer': iframe_url}
                                
        for source, values in sources.iteritems():
            host = scraper_utils.get_direct_hostname(self, source)
            headers = {'User-Agent': scraper_utils.get_ua()}
            if 'referer' in values: headers['Referer'] = values['referer']
            stream_url = source + scraper_utils.append_headers(headers)
            if host == 'gvideo':
                quality = scraper_utils.gv_get_quality(source)
            else:
                quality = values['quality']
                if quality not in Q_ORDER:
                    quality = QUALITY_MAP.get(values['quality'], QUALITIES.HIGH)
                    
            hoster = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
            hosters.append(hoster)

        return hosters
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:48,代码来源:moviesplanet_scraper.py


示例5: __get_links

    def __get_links(self, url):
        sources = []
        match = re.search('src="([^"]+)', url)
        if match:
            url = match.group(1).replace('\\/', '/')
            html = self._http_get(url, cache_limit=0)
            match = re.search('<script\s+src="([^\']+)\'\+(\d+)\+\'([^\']+)', html)
            if match:
                page_url = ''.join(match.groups())
                page_url += str(random.random())
                html = self._http_get(page_url, cache_limit=0)
                
            for match in re.finditer('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"', html):
                stream_url, height = match.groups()
                stream_url = stream_url.replace('\\&', '&').replace('\\/', '/')
                if 'v.asp' in stream_url and 'ok.ru' not in url:
                    stream_redirect = self._http_get(stream_url, allow_redirect=False, cache_limit=0)
                    if stream_redirect: stream_url = stream_redirect

                if self._get_direct_hostname(stream_url) == 'gvideo':
                    quality = scraper_utils.gv_get_quality(stream_url)
                else:
                    quality = scraper_utils.height_get_quality(height)
                        
                host = self._get_direct_hostname(stream_url)
                stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(url))
                hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                sources.append(hoster)
        return sources
开发者ID:assli100,项目名称:kodi-openelec,代码行数:29,代码来源:sezonlukdizi_scraper.py


示例6: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if source_url and source_url != FORCE_NO_MATCH:
         page_url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(page_url, cache_limit=.5)
         match = re.search('var\s*video_id="([^"]+)', html)
         if match:
             video_id = match.group(1)
             url = urlparse.urljoin(self.base_url, VIDEO_URL)
             data = {'v': video_id}
             headers = XHR
             headers['Referer'] = page_url
             html = self._http_get(url, data=data, headers=headers, cache_limit=.5)
             sources = scraper_utils.parse_json(html, url)
             for source in sources:
                 match = re.search('url=(.*)', sources[source])
                 if match:
                     stream_url = urllib.unquote(match.group(1))
                     host = self._get_direct_hostname(stream_url)
                     if host == 'gvideo':
                         quality = scraper_utils.gv_get_quality(stream_url)
                     else:
                         quality = scraper_utils.height_get_quality(source)
                     stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
                     hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                     hosters.append(hoster)
     return hosters
开发者ID:assli100,项目名称:kodi-openelec,代码行数:28,代码来源:xmovies8_scraper.py


示例7: get_sources

    def get_sources(self, video):
        source_url = self.get_url(video)
        sources = []
        if source_url and source_url != FORCE_NO_MATCH:
            page_url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(page_url, cache_limit=8)
            q_str = dom_parser.parse_dom(html, 'div', {'class': 'poster-qulabel'})
            if q_str:
                q_str = q_str[0].replace(' ', '').upper()
                page_quality = Q_MAP.get(q_str, QUALITIES.HIGH)
            else:
                page_quality = QUALITIES.HIGH
                
            for fragment in dom_parser.parse_dom(html, 'div', {'class': 'tab_box'}):
                match = re.search('file\s*:\s*"([^"]+)', fragment)
                if match:
                    stream_url = match.group(1)
                else:
                    stream_url = self.__get_ajax_sources(fragment, page_url)
                    
                if stream_url:
                    host = self._get_direct_hostname(stream_url)
                    if host == 'gvideo':
                        quality = scraper_utils.gv_get_quality(stream_url)
                    else:
                        quality = page_quality
                        
                    stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(page_url))
                    source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
                    sources.append(source)

        return sources
开发者ID:EPiC-APOC,项目名称:repository.xvbmc,代码行数:32,代码来源:moviego_scraper.py


示例8: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     sources = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, cache_limit=1)
     fragment = dom_parser2.parse_dom(html, 'div', {'class': 'player'})
     if not fragment: return hosters
     
     iframe_url = dom_parser2.parse_dom(fragment[0].content, 'iframe', req='src')
     if not iframe_url: return hosters
     
     html = self._http_get(iframe_url[0].attrs['src'], cache_limit=.25)
     sources.append(self.__get_embedded_sources(html))
     sources.append(self.__get_linked_sources(html))
     for source in sources:
         for stream_url in source['sources']:
             host = scraper_utils.get_direct_hostname(self, stream_url)
             if host == 'gvideo':
                 stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
                 quality = scraper_utils.gv_get_quality(stream_url)
                 hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                 hoster['subs'] = source.get('subs', True)
                 hosters.append(hoster)
 
     return hosters
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:27,代码来源:diziay_scraper.py


示例9: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     sources = {}
     if source_url and source_url != FORCE_NO_MATCH:
         url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(url, cache_limit=.5)
         fragment = dom_parser.parse_dom(html, 'div', {'class': 'meta-media'})
         if fragment:
             iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
             if iframe_url:
                 iframe_url = urlparse.urljoin(self.base_url, iframe_url[0])
                 html = self._http_get(iframe_url, cache_limit=.5)
                 for match in re.finditer('window.location.href\s*=\s*"([^"]+)', html):
                     stream_url = match.group(1)
                     host = self._get_direct_hostname(stream_url)
                     if host == 'gvideo':
                         sources[stream_url] = scraper_utils.gv_get_quality(stream_url)
                     else:
                         sources[source_url] = QUALITIES.HIGH
                 
         for source in sources:
             host = self._get_direct_hostname(stream_url)
             stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
             hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': sources[source], 'views': None, 'rating': None, 'url': source, 'direct': True}
             hosters.append(hoster)
     return hosters
开发者ID:freeworldxbmc,项目名称:KAOSbox-Repo,代码行数:27,代码来源:firemovies_scraper.py


示例10: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     js_url = scraper_utils.urljoin(self.base_url, '/javascript/movies.js')
     html = self._http_get(js_url, cache_limit=48)
     if source_url.startswith('/'):
         source_url = source_url[1:]
     pattern = '''getElementById\(\s*"%s".*?play\(\s*'([^']+)''' % (source_url)
     match = re.search(pattern, html, re.I)
     if match:
         stream_url = match.group(1)
         if 'drive.google' in stream_url or 'docs.google' in stream_url:
             sources = scraper_utils.parse_google(self, stream_url)
         else:
             sources = [stream_url]
         
         for source in sources:
             stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
             host = scraper_utils.get_direct_hostname(self, source)
             if host == 'gvideo':
                 quality = scraper_utils.gv_get_quality(source)
                 direct = True
             elif 'youtube' in stream_url:
                 quality = QUALITIES.HD720
                 direct = False
                 host = 'youtube.com'
             else:
                 quality = QUALITIES.HIGH
                 direct = True
             hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
             hosters.append(hoster)
     return hosters
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:33,代码来源:piratejunkies_scraper.py


示例11: __get_links_from_playlist

 def __get_links_from_playlist(self, grab_url, headers):
     sources = {}
     grab_url = grab_url.replace('\\', '')
     grab_html = self._http_get(grab_url, headers=headers, cache_limit=.5)
     js_data = scraper_utils.parse_json(grab_html, grab_url)
     try: playlist = js_data['playlist'][0]['sources']
     except: playlist = []
     for item in playlist:
         stream_url = item.get('file')
         if stream_url:
             if stream_url.startswith('/'):
                 stream_url = scraper_utils.urljoin(self.base_url, stream_url)
                 redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD')
                 if redir_url.startswith('http'):
                     stream_url = redir_url
             
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in item:
                 quality = scraper_utils.height_get_quality(item['label'])
             else:
                 quality = QUALITIES.HIGH
             
             logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
             sources[stream_url] = {'quality': quality, 'direct': True}
             if not kodi.get_setting('scraper_url'): break
     return sources
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:27,代码来源:watch5s_scraper.py


示例12: get_sources

    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            page_url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(page_url, cache_limit=.5)
            # exit early if trailer
            if re.search('Şu an fragman*', html, re.I):
                return hosters
            
            match = re.search('''url\s*:\s*"([^"]+)"\s*,\s*data:\s*["'](id=\d+)''', html)
            if match:
                url, data = match.groups()
                url = urlparse.urljoin(self.base_url, url)
                result = self._http_get(url, data=data, headers=XHR, cache_limit=.5)
                for match in re.finditer('"videolink\d*"\s*:\s*"([^"]+)","videokalite\d*"\s*:\s*"?(\d+)p?', result):
                    stream_url, height = match.groups()
                    stream_url = stream_url.replace('\\/', '/')
                    host = self._get_direct_hostname(stream_url)
                    if host == 'gvideo':
                        quality = scraper_utils.gv_get_quality(stream_url)
                    else:
                        quality = scraper_utils.height_get_quality(height)
                        stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(page_url))

                    hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                    hosters.append(hoster)
    
        return hosters
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:29,代码来源:dizimag_scraper.py


示例13: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if source_url and source_url != FORCE_NO_MATCH:
         url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(url, cache_limit=.5)
         match = re.search('<iframe[^>]+src="([^"]+watch=([^"]+))', html)
         if match:
             iframe_url, link_id = match.groups()
             data = {'link': link_id}
             headers = {'Referer': iframe_url}
             gk_url = urlparse.urljoin(self.base_url, GK_URL)
             html = self._http_get(gk_url, data=data, headers=headers, cache_limit=.5)
             js_data = scraper_utils.parse_json(html, gk_url)
             if 'link' in js_data:
                 for link in js_data['link']:
                     stream_url = link['link']
                     host = self._get_direct_hostname(stream_url)
                     if host == 'gvideo':
                         quality = scraper_utils.gv_get_quality(stream_url)
                     else:
                         quality = scraper_utils.height_get_quality(link['label'])
                     hoster = {'multi-part': False, 'url': stream_url, 'class': self, 'quality': quality, 'host': host, 'rating': None, 'views': None, 'direct': True}
                     hosters.append(hoster)
     return hosters
开发者ID:assli100,项目名称:kodi-openelec,代码行数:25,代码来源:yifystreaming_scraper.py


示例14: get_sources

    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=8)
        for _attrs, fragment in dom_parser2.parse_dom(html, 'div', {'class': 'movieplay'}):
            iframe_src = dom_parser2.parse_dom(fragment, 'iframe', req='src')
            if iframe_src:
                iframe_src = iframe_src[0].attrs['src']
                if re.search('o(pen)?load', iframe_src, re.I):
                    meta = scraper_utils.parse_movie_link(iframe_src)
                    quality = scraper_utils.height_get_quality(meta['height'])
                    links = {iframe_src: {'quality': quality, 'direct': False}}
                else:
                    links = self.__get_links(iframe_src, url)

                for link in links:
                    direct = links[link]['direct']
                    quality = links[link]['quality']
                    if direct:
                        host = scraper_utils.get_direct_hostname(self, link)
                        if host == 'gvideo':
                            quality = scraper_utils.gv_get_quality(link)
                        stream_url = link + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': url})
                    else:
                        host = urlparse.urlparse(link).hostname
                        stream_url = link
                        
                    source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
                    hosters.append(source)

        return hosters
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:33,代码来源:vebup_scraper.py


示例15: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     sources = {}
     if source_url and source_url != FORCE_NO_MATCH:
         url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(url, cache_limit=.5)
         fragment = dom_parser.parse_dom(html, 'div', {'class': 'meta-media'})
         if fragment:
             iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
             if iframe_url:
                 iframe_url = urlparse.urljoin(self.base_url, iframe_url[0])
                 html = self._http_get(iframe_url, cache_limit=.5)
                 for source in dom_parser.parse_dom(html, 'source', ret='src'):
                     source_url = urlparse.urljoin(self.base_url, source)
                     redir_url = self._http_get(source_url, allow_redirect=False, cache_limit=.5)
                     if redir_url.startswith('http'):
                         sources[redir_url] = scraper_utils.gv_get_quality(redir_url)
                     else:
                         sources[source_url] = QUALITIES.HIGH
                 
         for source in sources:
             hoster = {'multi-part': False, 'host': self._get_direct_hostname(source), 'class': self, 'quality': sources[source], 'views': None, 'rating': None, 'url': source, 'direct': True}
             hosters.append(hoster)
     return hosters
开发者ID:c0ns0le,项目名称:YCBuilds,代码行数:25,代码来源:firemovies_scraper.py


示例16: __get_sources

 def __get_sources(self, html, page_url):
     sources = []
     fragment = dom_parser.parse_dom(html, 'div', {'class': 'video-content'})
     if fragment:
         iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
         if iframe_url:
             iframe_url = iframe_url[0]
             if self.base_url in iframe_url:
                 headers = {'Referer': page_url}
                 html = self._http_get(iframe_url, headers=headers, cache_limit=.5)
                 referer = iframe_url
                 iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
                 if iframe_url:
                     iframe_url = iframe_url[0]
                     headers = {'Referer': referer}
                     html = self._http_get(iframe_url, headers=headers, cache_limit=.5)
                     links = self._parse_sources_list(html)
                     for link in links:
                         host = self._get_direct_hostname(link)
                         if host == 'gvideo':
                             quality = scraper_utils.gv_get_quality(link)
                         else:
                             quality = links[link]['quality']
                         source = {'multi-part': False, 'url': link, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
                         sources.append(source)
             else:
                 host = urlparse.urlparse(iframe_url).hostname
                 source = {'multi-part': False, 'url': iframe_url, 'host': host, 'class': self, 'quality': QUALITIES.HD720, 'views': None, 'rating': None, 'direct': False}
                 sources.append(source)
     return sources
开发者ID:EPiC-APOC,项目名称:repository.xvbmc,代码行数:30,代码来源:veocube_scraper.py


示例17: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     sources = {}
     if source_url and source_url != FORCE_NO_MATCH:
         page_url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(page_url, cache_limit=.5)
         fragment = dom_parser.parse_dom(html, 'div', {'class': 'videos'})
         if fragment:
             for match in re.finditer('href="([^"]+)[^>]*>([^<]+)', fragment[0]):
                 page_url, page_label = match.groups()
                 page_label = page_label.lower()
                 if page_label not in ALLOWED_LABELS: continue
                 sources = self.__get_sources(page_url, ALLOWED_LABELS[page_label])
                 for source in sources:
                     host = self._get_direct_hostname(source)
                     if host == 'gvideo':
                         quality = scraper_utils.gv_get_quality(source)
                         direct = True
                         stream_url = source + '|User-Agent=%s' % (scraper_utils.get_ua())
                     elif sources[source]['direct']:
                         quality = sources[source]['quality']
                         direct = True
                         stream_url = source + '|User-Agent=%s' % (scraper_utils.get_ua())
                     else:
                         quality = sources[source]['quality']
                         direct = False
                         host = urlparse.urlparse(source).hostname
                         stream_url = source
                     
                     hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
                     if sources[source]['subs']: hoster['subs'] = 'Turkish Subtitles'
                     hosters.append(hoster)
             
     return hosters
开发者ID:EPiC-APOC,项目名称:repository.xvbmc,代码行数:35,代码来源:maksi_scraper.py


示例18: __get_gk_links

 def __get_gk_links(self, html, page_url, video_type, episode):
     sources = {}
     phimid = dom_parser.parse_dom(html, 'input', {'name': 'phimid'}, ret='value')
     if phimid and video_type == VIDEO_TYPES.EPISODE:
         url = urlparse.urljoin(self.tv_base_url, '/ajax.php')
         data = {'ipos_server': 1, 'phimid': phimid[0], 'keyurl': episode}
         headers = XHR
         headers['Referer'] = page_url
         html = self._http_get(url, data=data, headers=headers, cache_limit=.5)
         
     for link in dom_parser.parse_dom(html, 'div', {'class': '[^"]*server_line[^"]*'}):
         film_id = dom_parser.parse_dom(link, 'a', ret='data-film')
         name_id = dom_parser.parse_dom(link, 'a', ret='data-name')
         server_id = dom_parser.parse_dom(link, 'a', ret='data-server')
         if film_id and name_id and server_id:
             data = {'ipplugins': 1, 'ip_film': film_id[0], 'ip_server': server_id[0], 'ip_name': name_id[0]}
             headers = XHR
             headers['Referer'] = page_url
             url = urlparse.urljoin(self.__get_base_url(video_type), LINK_URL)
             html = self._http_get(url, data=data, headers=headers, cache_limit=.25)
             js_data = scraper_utils.parse_json(html, url)
             if 's' in js_data:
                 if isinstance(js_data['s'], basestring):
                     sources[js_data['s']] = QUALITIES.HIGH
                 else:
                     for link in js_data['s']:
                         stream_url = link['file']
                         if self._get_direct_hostname(stream_url) == 'gvideo':
                             quality = scraper_utils.gv_get_quality(stream_url)
                         elif 'label' in link:
                             quality = scraper_utils.height_get_quality(link['label'])
                         else:
                             quality = QUALITIES.HIGH
                         sources[stream_url] = quality
     return sources
开发者ID:kevintone,项目名称:tdbaddon,代码行数:35,代码来源:moviesub_scraper.py


示例19: __get_gk_links

 def __get_gk_links(self, html, page_url):
     sources = {}
     for link in dom_parser.parse_dom(html, 'div', {'class': '[^"]*server_line[^"]*'}):
         film_id = dom_parser.parse_dom(link, 'a', ret='data-film')
         name_id = dom_parser.parse_dom(link, 'a', ret='data-name')
         server_id = dom_parser.parse_dom(link, 'a', ret='data-server')
         if film_id and name_id and server_id:
             data = {'ipplugins': 1, 'ip_film': film_id[0], 'ip_server': server_id[0], 'ip_name': name_id[0]}
             headers = XHR
             headers['Referer'] = page_url
             url = urlparse.urljoin(self.base_url, LINK_URL)
             html = self._http_get(url, data=data, headers=headers, cache_limit=.25)
             js_data = scraper_utils.parse_json(html, url)
             if 's' in js_data:
                 if isinstance(js_data['s'], basestring):
                     sources[js_data['s']] = QUALITIES.HIGH
                 else:
                     for link in js_data['s']:
                         stream_url = link['file']
                         if self._get_direct_hostname(stream_url) == 'gvideo':
                             quality = scraper_utils.gv_get_quality(stream_url)
                         elif 'label' in link:
                             quality = scraper_utils.height_get_quality(link['label'])
                         else:
                             quality = QUALITIES.HIGH
                         sources[stream_url] = quality
     return sources
开发者ID:ScriptUp,项目名称:salts,代码行数:27,代码来源:tunemovie_scraper.py


示例20: get_sources

 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, cache_limit=.5)
     match = re.search('var\s*video_id="([^"]+)', html)
     if not match: return hosters
     
     video_id = match.group(1)
     data = {'v': video_id}
     headers = {'Referer': page_url}
     headers.update(XHR)
     html = self._http_get(INFO_URL, data=data, headers=headers, cache_limit=.5)
     sources = scraper_utils.parse_json(html, INFO_URL)
     for source in sources:
         match = re.search('url=(.*)', sources[source])
         if not match: continue
         
         stream_url = urllib.unquote(match.group(1))
         host = scraper_utils.get_direct_hostname(self, stream_url)
         if host == 'gvideo':
             quality = scraper_utils.gv_get_quality(stream_url)
         else:
             quality = scraper_utils.height_get_quality(source)
         stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
         hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
         hosters.append(hoster)
     return hosters
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:29,代码来源:fmovie_scraper.py



注:本文中的salts_lib.scraper_utils.gv_get_quality函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scraper_utils.height_get_quality函数代码示例发布时间:2022-05-27
下一篇:
Python scraper_utils.get_ua函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap