本文整理汇总了Python中resources.lib.modules.client.parseDOM函数的典型用法代码示例。如果您正苦于以下问题:Python parseDOM函数的具体用法?Python parseDOM怎么用?Python parseDOM使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parseDOM函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
links = client.parseDOM(result, 'tr')
links = [(client.parseDOM(i, 'a', attrs={'class': 'watch'}, ret='data-iframe')[0],
client.parseDOM(i, 'img', ret='alt')[0],
client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links]
for i in links:
try:
url1 = '%s?%s' % (url, i[0])
url1 = url1.encode('utf-8')
language, info = self.get_language_by_type(i[2]);
sources.append({'source': i[1].encode('utf-8'), 'quality': 'SD', 'language': language, 'url': url1, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
开发者ID:mpie,项目名称:repo,代码行数:28,代码来源:alltube.py
示例2: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'main_body')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = client.replaceHTMLCodes(url[0][0])
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:28,代码来源:primewire.py
示例3: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'}
lang = 'http://www.imdb.com/title/%s/' % imdb
lang = client.request(lang)
lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang)
lang = [i for i in lang if 'primary_language' in i]
lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang]
lang = [i['primary_language'] for i in lang if 'primary_language' in i]
lang = langMap[lang[0][0]]
q = self.search_link % (lang, urllib.quote_plus(title))
q = urlparse.urljoin(self.base_link, q)
t = cleantitle.get(title)
r = client.request(q)
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = str(r)
return url
except:
return
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:30,代码来源:einthusan.py
示例4: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
t = cleantitle.get(tvshowtitle)
q = urllib.quote_plus(cleantitle.query(tvshowtitle))
p = urllib.urlencode({'term': q})
r = client.request(self.search_link, post=p, XHR=True)
try: r = json.loads(r)
except: r = None
if r:
r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
else:
r = requests.get(self.search_link_2 % q, 'tv shows').text
r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
url = r[0][0]
url = proxy.parse(url)
url = url.strip('/').split('/')[-1]
url = url.encode('utf-8')
return url
except:
return
开发者ID:varunrai,项目名称:repository.magicality,代码行数:31,代码来源:mywatchseries.py
示例5: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
tv_maze = tvmaze.tvMaze()
tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
tvshowtitle = tvshowtitle['name']
t = cleantitle.get(tvshowtitle)
q = urlparse.urljoin(self.base_link, self.search_link)
q = q % urllib.quote_plus(tvshowtitle)
r = client.request(q)
r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
failure = traceback.format_exc()
log_utils.log('GoGoAnime - Exception: \n' + str(failure))
return
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:28,代码来源:gogoanime.py
示例6: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = self.__search(data['tvshowtitle'], season)
if not url and data['tvshowtitle'] is not data['localtvshowtitle']: url = self.__search(data['localtvshowtitle'], season)
if not url: return
print urlparse.urljoin(self.base_link, url)
r = client.request(urlparse.urljoin(self.base_link, url))
r = client.parseDOM(r, 'div', attrs={'class': 'keremiya_part'})
r = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(r))
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span')) for i in r]
r = [(i[0][0], re.findall('(\d+)', i[1][0])) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [i[0] for i in r if len(i[1]) > 0 and int(i[1][0]) == int(episode)][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:27,代码来源:streamingseries.py
示例7: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'tv_episode_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = url[0][0]
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:30,代码来源:watchfree.py
示例8: imdb_person_list
def imdb_person_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
except:
return
for item in items:
try:
name = client.parseDOM(item, 'a', ret='title')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = re.findall('(nm\d*)', url, re.I)[0]
url = self.person_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', image)
image = client.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
开发者ID:freeworldxbmc,项目名称:maximumTv,代码行数:31,代码来源:movies.py
示例9: imdb_user_list
def imdb_user_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
开发者ID:freeworldxbmc,项目名称:maximumTv,代码行数:26,代码来源:movies.py
示例10: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(url)
links = client.parseDOM(r, 'div', attrs={'class': 'mtos'})
for i in range(1, len(links)):
idioma = client.parseDOM(links[i], 'img', ret= 'src')[0]
if 'in.' in idioma: continue
quality = client.parseDOM(links[i], 'div', attrs={'class': 'dcalidad'})[0]
servidor = re.findall("src='.+?'\s*/>(.+?)</div>", links[i])[0]
lang, info = self.get_lang_by_type(idioma)
quality = self.quality_fixer(quality)
link = dom_parser.parse_dom(links[i], 'a', req='href')[0][0]['href']
url = link
if 'streamcloud' in url: quality = 'SD'
valid, host = source_utils.is_host_valid(servidor, hostDict)
sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info,
'direct':False,'debridonly': False})
return sources
except:
return sources
开发者ID:amadu80,项目名称:repository.xvbmc,代码行数:28,代码来源:seriespapaya.py
示例11: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'div', attrs = {'class': 'player_wraper'})
r = client.parseDOM(r, 'iframe', ret='src')[0]
r = urlparse.urljoin(url, r)
r = client.request(r, referer=url)
a = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k2'})[-1]
b = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k1'})[-1]
c = client.parseDOM(r, 'body', ret='style')[0]
c = re.findall('(\d+)', c)[-1]
r = '/player/%s?s=%s&e=%s' % (a, b, c)
r = urlparse.urljoin(url, r)
r = client.request(r, referer=url)
r = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', r)
for i in r:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
return sources
except:
return sources
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:29,代码来源:movies14.py
示例12: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
html = client.request(url)
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', html)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': true, 'debridonly': False})
except:
pass
except:
pass
parsed = client.parseDOM(html, 'div', {'class': 'server_line'})
parsed = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in parsed]
if parsed:
for i in parsed:
try:
host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:35,代码来源:123fox.py
示例13: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, limit='1')
r = client.parseDOM(r, 'title')
if not r:
url = 'http://www.imdb.com/title/%s' % imdb
url = client.request(url, headers={'Accept-Language':'es-ES'})
url = client.parseDOM(url, 'title')[0]
url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
url = cleantitle.normalize(url.encode("utf-8"))
url = self.tvsearch_link % cleantitle.geturl(url)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, limit='1')
r = client.parseDOM(r, 'title')
if not year in r[0]: raise Exception()
return url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:25,代码来源:pelispedia.py
示例14: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
pages = []
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})
season_base = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', season_base)
tvshowtitle = data['tvshowtitle']
tvshowtitle = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', tvshowtitle)
query = query.replace("&", "and")
query = query.replace(" ", " ")
query = query.replace(" ", "+")
tvshowtitle = tvshowtitle.replace("&", "and")
tvshowtitle = tvshowtitle.replace(" ", " ")
tvshowtitle = tvshowtitle.replace(" ", "+")
start_url = urlparse.urljoin(self.base_link, self.search_link % (tvshowtitle, query))
html = client.request(start_url)
results = client.parseDOM(html, 'h2', attrs={'class':'entry-title'})
for content in results:
found_link = client.parseDOM(content, 'a', ret='href')[0]
if self.base_link in found_link:
if cleantitle.get(data['tvshowtitle']) in cleantitle.get(found_link):
if cleantitle.get(season_base) in cleantitle.get(found_link):
pages.append(found_link)
return pages
except:
failure = traceback.format_exc()
log_utils.log('ALLRLS - Exception: \n' + str(failure))
return pages
开发者ID:varunrai,项目名称:repository.magicality,代码行数:34,代码来源:allrls.py
示例15: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
result = client.request(url)
rows = client.parseDOM(result, 'tr', attrs={'data-id':'.*?'})
for row in rows:
try:
link = client.parseDOM(row, 'td', attrs={'class':'name hover'}, ret='data-bind')[0]
link = re.findall(r"'(.*?)'", link, re.DOTALL)[0]
valid, host = source_utils.is_host_valid(link, hostDict)
if not valid: continue
found_quality = client.parseDOM(row, 'td')[1]
q = 'SD'
if 'Wysoka' in found_quality: q = 'HD'
type_desc= client.parseDOM(row, 'font')[0]
lang, info = self.get_lang_by_type(type_desc)
sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
开发者ID:amadu80,项目名称:repository.xvbmc,代码行数:33,代码来源:paczamy.py
示例16: __search
def __search(self, title, season):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(i[2]) == int(season)][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:25,代码来源:streamingseries.py
示例17: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link)
if ':' in title:
title2 = title.split(':')[0] + ':'
post = 'search=%s&what=title' % title2
else: post = 'search=%s&what=title' % cleantitle.getsearch(title)
t = cleantitle.get(title)
r = client.request(query, post=post)
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a',)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = urlparse.urljoin(self.base_link, re.findall('(?://.+?|)(/.+)', r)[0])
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:28,代码来源:afdah.py
示例18: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
locDict = [i[0] for i in hostDict]
print urlparse.urljoin(self.base_link, url)
r = client.request(urlparse.urljoin(self.base_link, url))
r = client.parseDOM(r, 'div', attrs={'class': 'filmicerik'})
r = client.parseDOM(r, 'p')
r = [(client.parseDOM(i, 'iframe', ret='src'), client.parseDOM(i, 'b'), client.parseDOM(r, 'span', attrs={'class': 'lg'})) for i in r]
r = [(i[0], [x.lower().replace('lecteur','').strip() for x in i[1]], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
r = [(i[0], [[y[1] for y in hostDict if y[0] == x][0] for x in i[1] if x in locDict], i[2], re.findall('\((.+?)\)$', i[2])) for i in r]
r = [(dict(zip(i[0], i[1])), i[3][0] if len(i[2]) > 0 else i[2]) for i in r]
for links, lang in r:
for link, host in links.iteritems():
sources.append({'source': host, 'quality': 'SD', 'language': 'fr', 'info': lang, 'url': link, 'direct': False, 'debridonly': False})
return sources
except:
return sources
开发者ID:vphuc81,项目名称:MyRepository,代码行数:27,代码来源:streamingseries.py
示例19: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
url = url + self.video_tab
result = client.request(url)
rows = client.parseDOM(result, 'ul', attrs={'class':'player_ul'})[0]
rows = client.parseDOM(rows, 'li')
for row in rows:
try:
desc = client.parseDOM(row, 'a')[0]
link = client.parseDOM(row, 'a', ret='href')[0]
host, lang, info, q = self.get_info_from_desc(desc)
sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
开发者ID:YourFriendCaspian,项目名称:dotfiles,代码行数:26,代码来源:openkatalog.py
示例20: movie
def movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote_plus(title)
query = urlparse.urljoin(self.base_link, query)
result = self.request(query, 'movie_table')
result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'})
title = cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
result = [i[0] for i in result if title == cleantitle.get(i[1])][0]
url = client.replaceHTMLCodes(result)
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:AMOboxTV,项目名称:AMOBox.LegoBuild,代码行数:27,代码来源:movie25_mv.py
注:本文中的resources.lib.modules.client.parseDOM函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论