本文整理汇总了Python中resources.lib.modules.debrid.status函数的典型用法代码示例。如果您正苦于以下问题:Python status函数的具体用法?Python status怎么用?Python status使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了status函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
try:
self._sources = []
self.items = []
if url is None:
return self._sources
if debrid.status() is False:
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search.format(urllib.quote(query))
self._get_items(url)
self.hostDict = hostDict + hostprDict
threads = []
for i in self.items:
threads.append(workers.Thread(self._get_sources, i))
[i.start() for i in threads]
[i.join() for i in threads]
return self._sources
except BaseException:
return self._sources
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:33,代码来源:kickass2.py
示例2: movie
def movie(self, imdb, title, year):
try:
if debrid.status() == False: raise Exception()
t = cleantitle.get(title)
headers = {'X-Requested-With': 'XMLHttpRequest'}
query = self.search_link + urllib.quote_plus(title)
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, headers=headers)
r = json.loads(r)
r = [i for i in r if 'category' in i and 'movie' in i['category'].lower()]
r = [(i['url'], i['label']) for i in r if 'label' in i and 'url' in i]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:msousinha,项目名称:myhome-repository,代码行数:26,代码来源:dltube_mv.py
示例3: sources
def sources(self, url, hostDict, hostprDict):
try:
self._sources = []
if url is None: return self._sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
query = cleantitle.geturl(query)
url = urlparse.urljoin(self.base_link, query)
headers = {'User-Agent': client.agent()}
r = client.request(url, headers=headers)
posts = dom_parser2.parse_dom(r, 'li', {'class': re.compile('.+?'), 'id': re.compile('comment-.+?')})
self.hostDict = hostDict + hostprDict
threads = []
for i in posts: threads.append(workers.Thread(self._get_sources, i.content))
[i.start() for i in threads]
[i.join() for i in threads]
return self._sources
except Exception:
return self._sources
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:34,代码来源:scnsrc.py
示例4: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'input', {'id': 'movie_id'}, ret='value')
if r:
r = client.request(urlparse.urljoin(self.base_link, self.download_link), post='movie=%s' % r, referer=url)
links = client.parseDOM(r, 'p')
hostDict = hostprDict + hostDict
locDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
for link in links:
try:
host = re.findall('Downloads-Server(.+?)(?:\'|\")\)', link)[0]
host = host.strip().lower().split()[-1]
if host == 'fichier': host = '1fichier'
host = [x[1] for x in locDict if host == x[0]][0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = client.parseDOM(link, 'a', ret='href')[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
r = client.parseDOM(link, 'a')[0]
fmt = r.strip().lower().split()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', r)[-1]
div = 1 if size.endswith(' GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
info = '%.2f GB' % size
except:
info = ''
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
开发者ID:amadu80,项目名称:repository.xvbmc,代码行数:59,代码来源:dltube.py
示例5: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title'].replace(':','').lower()
year = data['year']
query = '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.post_link)
post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query)
r = client.request(url, post=post)
r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i]
r = [(i[0].attrs['href'], i[0].content) for i in r if i]
hostDict = hostprDict + hostDict
for item in r:
try:
name = item[1]
y = re.findall('\((\d{4})\)', name)[0]
if not y == year: raise Exception()
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name)
s = s[0] if s else '0'
data = client.request(item[0])
data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'})
data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
data[0].content, re.DOTALL)
u = [(i[0], i[1], s) for i in data if i]
for name, url, size in u:
try:
if '4K' in name:
quality = '4K'
elif '2160p' in name:
quality = '4K'
elif '1440p' in name:
quality = '4K'
elif '1080p' in name:
quality = '1080p'
elif '720p' in name:
quality = '720p'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in name):
quality = 'SCR'
开发者ID:vphuc81,项目名称:MyRepository,代码行数:57,代码来源:ultrahd.py
示例6: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
if debrid.status(True) is False:
return
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:11,代码来源:zoogle.py
示例7: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
if debrid.status(True) is False:
return
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
failure = traceback.format_exc()
log_utils.log('TPB - Exception: \n' + str(failure))
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:12,代码来源:fantastic+(29).py
示例8: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
if debrid.status() == False: raise Exception()
url = urlparse.urljoin(self.base_link, '%s-%s' % (cleantitle.geturl(title), year))
url = client.request(url, output='geturl')
if url == None:
url = urlparse.urljoin(self.base_link, '%s' % (cleantitle.geturl(title)))
url = client.request(url, output='geturl')
if url == None: raise Exception()
return url
except:
return
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:12,代码来源:allrls.py
示例9: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if debrid.status() == False: raise Exception()
r = self.scraper.get(url).content
r = re.findall('<iframe src="(.+?)"', r)
for url in r:
valid, host = source_utils.is_host_valid(url, hostDict)
quality = source_utils.check_sd_url(url)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:13,代码来源:123moviesfree.py
示例10: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle']
season = '%01d' % int(data['season'])
episode = '%02d' % int(data['episode'])
r = cache.get(self.ddlseries_tvcache, 120)
r = [(i[0], i[3]) for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and season == i[2]]
links = []
for url, quality in r:
try:
link = client.request(url)
vidlinks = client.parseDOM(link, 'span', attrs = {'class': 'overtr'})[0]
match = re.compile('href="([^"]+)[^>]*>\s*Episode\s+(\d+)<').findall(vidlinks)
match = [(i[0], quality) for i in match if episode == i[1]]
links += match
except:
pass
for url, quality in links:
try:
if "protect-links" in url:
redirect = client.request(url)
url = re.findall('<a href="(.*?)" target="_blank">', redirect)
url = url[0]
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostprDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:50,代码来源:ddls.py
示例11: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if debrid.status(True) is False:
return
try:
if url is None:
return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except Exception:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:16,代码来源:zoogle.py
示例12: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
if debrid.status() == False: raise Exception()
data = url
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = requests.get(url).text
posts = re.findall(r'(?s)<item>(.*?)</item>', r)
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
title = re.findall(r'<title>(.*?)</title>', post)[0]
if query.lower() in title.lower():
linksDivs = re.findall(r'(?s)<singlelink></singlelink><br />(.*?)<br />.<strong>', post)
for div in linksDivs:
links = re.findall(r'<a href="(.*?)"', div)
for link in links:
quality = source_utils.get_quality_simple(link)
valid, host = source_utils.is_host_valid(link, hostDict)
if valid:
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': '', 'direct': False, 'debridonly': True})
except:
traceback.print_exc()
pass
return sources
except:
traceback.print_exc()
return sources
开发者ID:varunrai,项目名称:repository.magicality,代码行数:47,代码来源:wrzcraft.py
示例13: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url is None:
return sources
if debrid.status() is False:
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
if 'tvshowtitle' in data:
url = self.tvsearch.format(urllib.quote_plus(query))
url = urlparse.urljoin(self.base_link, url)
else:
url = self.moviesearch.format(urllib.quote_plus(query))
url = urlparse.urljoin(self.base_link, url)
items = self._get_items(url)
hostDict = hostDict + hostprDict
for item in items:
try:
name = item[0]
quality, info = source_utils.get_release_quality(name, name)
info.append(item[2])
info = ' | '.join(info)
url = item[1]
url = url.split('&tr')[0]
sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
'direct': False, 'debridonly': True})
except BaseException:
pass
return sources
except BaseException:
return sources
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:47,代码来源:glodls.py
示例14: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
html = client.request(url)
url_list = re.compile('<h2><a href="([^"]+)"',re.DOTALL).findall(html)
hostDict = hostprDict + hostDict
for url in url_list:
if cleantitle.get(title) in cleantitle.get(url):
html = client.request(url)
links = re.compile('href="([^"]+)" rel="nofollow"',re.DOTALL).findall(html)
for vid_url in links:
if 'ouo.io' in vid_url:
continue
if 'sh.st' in vid_url:
continue
if 'linx' in vid_url:
log_utils.log('2DDL - sources - linx: ' + str(vid_url))
continue
if '.rar' not in vid_url:
if '.srt' not in vid_url:
'SD',info = source_utils.get_release_quality(url, vid_url)
host = vid_url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': vid_url, 'info': info, 'direct': False, 'debridonly': False})
return sources
except Exception, argument:
return sources
开发者ID:vphuc81,项目名称:MyRepository,代码行数:46,代码来源:2ddl.py
示例15: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
if debrid.status() == False: raise Exception()
query = self.search_link % (self.base_link, urllib.quote_plus(title).replace('+', '-'))
html = client.request(query, XHR=True)
results = re.compile('<ul id=first-carousel1(.+?)</ul>',re.DOTALL).findall(html)
result = re.compile('alt="(.+?)".+?<h2><a href="(.+?)".+?</h2>.+?>(.+?)</p>',re.DOTALL).findall(str(results))
for found_title,url,date in result:
new_url = self.base_link + url
if cleantitle.get(title) in cleantitle.get(found_title):
if year in date:
return new_url
except:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:17,代码来源:dltube.py
示例16: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if debrid.status(True) is False:
return
try:
if url is None:
return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except Exception:
failure = traceback.format_exc()
log_utils.log('TPB - Exception: \n' + str(failure))
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:17,代码来源:fantastic+(29).py
示例17: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r)
r = client.parseDOM(r, 'item')
title = client.parseDOM(r, 'title')[0]
if hdlr in title:
r = re.findall('<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3', r[0], re.DOTALL)
for name, size, url in r:
quality, info = source_utils.get_release_quality(name, url)
try:
size = re.sub('i', '', size)
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except:
pass
info = ' | '.join(info)
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
'direct': False, 'debridonly': True})
return sources
except:
return sources
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:42,代码来源:seriescr.py
示例18: sources
def sources(self, url, hostDict, hostprDict):
try:
self.validHosts = hostprDict + hostDict
sources = []
if url == None: return sources
print('URL INFO - ' + str(url))
if not debrid.status(): raise Exception()
if 'tvshowtitle' in url:
url['season'] = '%02d' % int(url['season'])
url['episode'] = '%02d' % int(url['episode'])
query = '%s S%sE%s' % (url['tvshowtitle'], url['season'], url['episode'])
else:
query = '%s %s' % (url['title'], url['year'])
query = urlencode(query)
url = self.search_link % query
url = urlparse.urljoin(self.base_link, url)
r = self.scraper.get(url)
r = BeautifulSoup(r.text, 'html.parser')
posts = r.findAll('h2')
for post in posts:
if query.lower() in post.text.lower():
postLink = post.find('a')['href']
self.threads.append(Thread(target=self.getPost, args=(postLink,)))
for i in self.threads:
i.start()
for i in self.threads:
i.join()
return self.sourceList
except:
traceback.print_exc()
开发者ID:vphuc81,项目名称:MyRepository,代码行数:41,代码来源:tvrelease.py
示例19: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if debrid.status() == False: raise Exception()
scraper = cfscrape.create_scraper()
r = scraper.get(url).content
try:
qual = re.compile('class="quality">(.+?)<').findall(r)
print qual
for i in qual:
if 'HD' in i:
quality = '1080p'
else:
quality = 'SD'
match = re.compile('<iframe src="(.+?)"').findall(r)
for url in match:
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host,'quality': quality,'language': 'en','url': url,'direct': False,'debridonly': False})
except:
return
except Exception:
return
return sources
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:23,代码来源:playmovies.py
示例20: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
hostDict = hostprDict + hostDict
pages = url
for page_url in pages:
r = client.request(page_url)
urls = client.parseDOM(r, 'a', ret = 'href')
for url in urls:
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
if any(x in url for x in ['.rar', '.zip', '.iso']): continue
quality, infoo = source_utils.get_release_quality(url)
info = []
if any(x in url.upper() for x in ['HEVC', 'X265', 'H265']): info.append('HEVC')
info = ' | '.join(info)
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('ALLRLS - Exception: \n' + str(failure))
return sources
开发者ID:varunrai,项目名称:repository.magicality,代码行数:37,代码来源:allrls.py
注:本文中的resources.lib.modules.debrid.status函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论