本文整理汇总了Python中sickbeard.helpers.tryInt函数的典型用法代码示例。如果您正苦于以下问题:Python tryInt函数的具体用法?Python tryInt怎么用?Python tryInt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tryInt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: check_users_watched
def check_users_watched(self, users, media_id):
if not self.home_user_tokens:
self.home_user_tokens = self.get_plex_home_user_tokens()
result = {}
if 'all' in users:
users = self.home_user_tokens.keys()
for user in users:
user_media_page = self.get_url_pms('/library/metadata/%s' % media_id, token=self.home_user_tokens[user])
if None is not user_media_page:
video_node = user_media_page.find('Video')
progress = 0
if None is not video_node.get('viewOffset') and None is not video_node.get('duration'):
progress = tryInt(video_node.get('viewOffset')) * 100 / tryInt(video_node.get('duration'))
played = int(video_node.get('viewCount') or 0)
if not progress and not played:
continue
date_watched = 0
if (0 < tryInt(video_node.get('viewCount'))) or (0 < self.default_progress_as_watched < progress):
last_viewed_at = video_node.get('lastViewedAt')
if last_viewed_at and last_viewed_at not in ('', '0'):
date_watched = last_viewed_at
if date_watched:
result[user] = dict(played=played, progress=progress, date_watched=date_watched)
else:
self.log('Do not have the token for %s.' % user)
return result
开发者ID:JackDandy,项目名称:SickGear,代码行数:34,代码来源:plex.py
示例2: _get_absolute_numbering_for_show
def _get_absolute_numbering_for_show(tbl, indexer, indexer_id):
result = {}
if None is not indexer_id:
if 'tv_episodes' == tbl:
xem_refresh(indexer_id, indexer)
my_db = db.DBConnection()
# noinspection SqlResolve
rows = my_db.select(
'SELECT season, episode, absolute_number, scene_absolute_number'
' FROM %s' % tbl +
' WHERE indexer = ? AND %s = ?' % ('indexer_id', 'showid')['tv_episodes' == tbl] +
' AND scene_absolute_number != 0'
' ORDER BY season, episode',
[int(indexer), int(indexer_id)])
for row in rows:
season, episode, abs_num = map(lambda x: tryInt(row[x], None), ('season', 'episode', 'absolute_number'))
if None is season and None is episode and None is not abs_num:
season, episode, _ = _get_sea(indexer, indexer_id, absolute_number=abs_num)
if None is not season and None is not episode:
scene_absolute_number = tryInt(row['scene_absolute_number'], None)
if None is not scene_absolute_number:
result[(season, episode)] = scene_absolute_number
return result
开发者ID:JackDandy,项目名称:SickGear,代码行数:29,代码来源:scene_numbering.py
示例3: parse_date_time
def parse_date_time(d, t, network):
if network_dict is None:
load_network_dict()
mo = time_regex.search(t)
if mo is not None and len(mo.groups()) >= 2:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(2))
ap = mo.group(3)
# convert am/pm to 24 hour clock
if ap is not None:
if ap.lower() == u" pm" and hr != 12:
hr += 12
elif ap.lower() == u" am" and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(d))
foreign_timezone = get_network_timezone(network, network_dict)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
try:
return foreign_naive.astimezone(sb_timezone)
except (ValueError):
return foreign_naive
开发者ID:vertigo235,项目名称:Sick-Beard-XEM,代码行数:31,代码来源:network_timezones.py
示例4: get_aired_in_season
def get_aired_in_season(show, return_sql=False):
ep_count = {}
ep_count_scene = {}
tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
my_db = db.DBConnection()
if show.air_by_date:
sql_string = 'SELECT ep.status, ep.season, ep.scene_season, ep.episode, ep.airdate ' + \
'FROM [tv_episodes] AS ep, [tv_shows] AS show ' + \
'WHERE season != 0 AND ep.showid = show.indexer_id AND show.paused = 0 ' + \
'AND ep.showid = ? AND ep.indexer = ? AND show.air_by_date = 1'
else:
sql_string = 'SELECT status, season, scene_season, episode, airdate ' + \
'FROM [tv_episodes] ' + \
'WHERE showid = ? AND indexer = ? AND season > 0'
sql_results = my_db.select(sql_string, [show.indexerid, show.indexer])
for result in sql_results:
if 1 < helpers.tryInt(result['airdate']) <= tomorrow:
cur_season = helpers.tryInt(result['season'])
ep_count[cur_season] = ep_count.setdefault(cur_season, 0) + 1
cur_scene_season = helpers.tryInt(result['scene_season'], -1)
if -1 != cur_scene_season:
ep_count_scene[cur_scene_season] = ep_count.setdefault(cur_scene_season, 0) + 1
if return_sql:
return ep_count, ep_count_scene, sql_results
return ep_count, ep_count_scene
开发者ID:JackDandy,项目名称:SickGear,代码行数:29,代码来源:search.py
示例5: _doSearch
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys():
for search_string in search_strings[mode]:
try:
self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string.strip()})
data = self.getURL(self.urls['rss'], params=self.search_params)
if not data:
continue
data = xmltodict.parse(data)
for item in data['rss']['channel']['item']:
title = item['title']
info_hash = item['info_hash']
url = item['enclosure']['@url']
size = int(item['enclosure']['@length'] or item['size'])
seeders = helpers.tryInt(item['seeders'],0)
leechers = helpers.tryInt(item['leechers'],0)
if not seeders or seeders < self.minseed or leechers < self.minleech:
continue
items[mode].append((title, url, seeders, leechers, size, info_hash))
except Exception:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
results += items[mode]
return results
开发者ID:Henk2484,项目名称:SickRage,代码行数:33,代码来源:extratorrent.py
示例6: _get_numbering_for_show
def _get_numbering_for_show(tbl, indexer, indexer_id):
result = {}
if None is not indexer_id:
if 'tv_episodes' == tbl:
xem_refresh(indexer_id, indexer)
my_db = db.DBConnection()
# noinspection SqlResolve
rows = my_db.select(
'SELECT season, episode, scene_season, scene_episode'
' FROM %s' % tbl +
' WHERE indexer = ? AND %s = ?' % ('indexer_id', 'showid')['tv_episodes' == tbl] +
' AND (scene_season OR scene_episode) != 0'
' ORDER BY season, episode',
[int(indexer), int(indexer_id)])
for row in rows:
season, episode = tryInt(row['season'], None), tryInt(row['episode'], None)
if None is not season and None is not episode:
scene_season, scene_episode = tryInt(row['scene_season'], None), tryInt(row['scene_episode'], None)
if None is not scene_season and None is not scene_episode:
result[(season, episode)] = (scene_season, scene_episode)
return result
开发者ID:JackDandy,项目名称:SickGear,代码行数:26,代码来源:scene_numbering.py
示例7: parse_time
def parse_time(t):
mo = time_regex.search(t)
if mo is not None and len(mo.groups()) >= 5:
if mo.group(5) is not None:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(4))
ap = mo.group(5)
# convert am/pm to 24 hour clock
if ap is not None:
if pm_regex.search(ap) is not None and hr != 12:
hr += 12
elif am_regex.search(ap) is not None and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(6))
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
return hr, m
开发者ID:JackDandy,项目名称:SickGear,代码行数:32,代码来源:network_timezones.py
示例8: wanted_episodes
def wanted_episodes(show, from_date, make_dict=False, unaired=False):
ep_count, ep_count_scene, sql_results_org = get_aired_in_season(show, return_sql=True)
from_date_ord = from_date.toordinal()
if unaired:
sql_results = [s for s in sql_results_org if s['airdate'] > from_date_ord or s['airdate'] == 1]
else:
sql_results = [s for s in sql_results_org if s['airdate'] > from_date_ord]
if make_dict:
wanted = {}
else:
wanted = []
total_wanted = total_replacing = total_unaired = 0
if 0 < len(sql_results) and 2 < len(sql_results) - len(show.episodes):
myDB = db.DBConnection()
show_ep_sql = myDB.select('SELECT * FROM tv_episodes WHERE showid = ? AND indexer = ?',
[show.indexerid, show.indexer])
else:
show_ep_sql = None
for result in sql_results:
ep_obj = show.getEpisode(int(result['season']), int(result['episode']), ep_sql=show_ep_sql)
cur_status, cur_quality = common.Quality.splitCompositeStatus(ep_obj.status)
ep_obj.wantedQuality = get_wanted_qualities(ep_obj, cur_status, cur_quality, unaired=unaired)
if not ep_obj.wantedQuality:
continue
ep_obj.eps_aired_in_season = ep_count.get(helpers.tryInt(result['season']), 0)
ep_obj.eps_aired_in_scene_season = ep_count_scene.get(
helpers.tryInt(result['scene_season']), 0) if result['scene_season'] else ep_obj.eps_aired_in_season
if make_dict:
wanted.setdefault(ep_obj.scene_season if ep_obj.show.is_scene else ep_obj.season, []).append(ep_obj)
else:
wanted.append(ep_obj)
if cur_status in (common.WANTED, common.FAILED):
total_wanted += 1
elif cur_status in (common.UNAIRED, common.SKIPPED, common.IGNORED, common.UNKNOWN):
total_unaired += 1
else:
total_replacing += 1
if 0 < total_wanted + total_replacing + total_unaired:
actions = []
for msg, total in ['%d episode%s', total_wanted], \
['to upgrade %d episode%s', total_replacing], \
['%d unaired episode%s', total_unaired]:
if 0 < total:
actions.append(msg % (total, helpers.maybe_plural(total)))
logger.log(u'We want %s for %s' % (' and '.join(actions), show.name))
return wanted
开发者ID:JackDandy,项目名称:SickGear,代码行数:56,代码来源:search.py
示例9: __init__
def __init__(self, name, url, cookies='', search_mode='eponly', search_fallback=False,
enable_recentsearch=False, enable_backlog=False, enable_scheduled_backlog=False):
self.enable_backlog = bool(tryInt(enable_backlog))
self.enable_scheduled_backlog = bool(tryInt(enable_scheduled_backlog))
generic.TorrentProvider.__init__(self, name, supports_backlog=self.enable_backlog, cache_update_freq=15)
self.url = url.rstrip('/')
self.url_base = self.url
self.cookies = cookies
self.enable_recentsearch = bool(tryInt(enable_recentsearch)) or not self.enable_backlog
self.search_mode = search_mode
self.search_fallback = bool(tryInt(search_fallback))
开发者ID:JackDandy,项目名称:SickGear,代码行数:13,代码来源:rsstorrent.py
示例10: parse_date_time
def parse_date_time(d, t, network):
"""
Parse date and time string into local time
:param d: date string
:param t: time string
:param network: network to use as base
:return: datetime object containing local time
"""
if not network_dict:
load_network_dict()
mo = time_regex.search(t)
if mo is not None and len(mo.groups()) >= 5:
if mo.group(5) is not None:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(4))
ap = mo.group(5)
# convert am/pm to 24 hour clock
if ap is not None:
if pm_regex.search(ap) is not None and hr != 12:
hr += 12
elif am_regex.search(ap) is not None and hr == 12:
hr -= 12
except Exception:
hr = 0
m = 0
else:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(6))
except Exception:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(d))
try:
foreign_timezone = get_network_timezone(network, network_dict)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
return foreign_naive
except Exception:
return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=sb_timezone)
开发者ID:SerialShadow,项目名称:SickRage,代码行数:50,代码来源:network_timezones.py
示例11: _search_provider
def _search_provider(self, search_params, **kwargs):
results = []
if not self._authorised():
return results
items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
search_url = self.urls['search'] % search_string
data_json = self.get_url(search_url, headers=dict(Authorization='Bearer %s' % self._token), json=True)
if self.should_skip():
return results
cnt = len(items[mode])
if data_json:
for tr in data_json.get('releases'):
seeders, leechers, size = (tryInt(n, n) for n in [
tr.get(x) for x in ('seeders', 'leechers', 'size')])
if not self._reject_item(seeders, leechers):
title, download_url = tr.get('releaseName'), self._link(tr.get('shortId'))
if title and download_url:
items[mode].append((title, download_url, seeders, self._bytesizer(size)))
self._log_search(mode, len(items[mode]) - cnt, search_url)
results = self._sort_seeding(mode, results + items[mode])
return results
开发者ID:JackDandy,项目名称:SickGear,代码行数:32,代码来源:milkie.py
示例12: _xem_get_ids
def _xem_get_ids(indexer_name, xem_origin):
xem_ids = []
url = 'http://thexem.de/map/havemap?origin=%s' % xem_origin
task = 'Fetching show ids with%s xem scene mapping%s for origin'
logger.log(u'%s %s' % (task % ('', 's'), indexer_name))
parsed_json = helpers.getURL(url, json=True, timeout=90)
if not parsed_json:
logger.log(u'Failed %s %s, Unable to get URL: %s'
% (task.lower() % ('', 's'), indexer_name, url), logger.ERROR)
else:
if 'result' in parsed_json and 'success' == parsed_json['result'] and 'data' in parsed_json:
try:
for indexerid in parsed_json['data']:
xem_id = helpers.tryInt(indexerid)
if xem_id and xem_id not in xem_ids:
xem_ids.append(xem_id)
except:
pass
if 0 == len(xem_ids):
logger.log(u'Failed %s %s, no data items parsed from URL: %s'
% (task.lower() % ('', 's'), indexer_name, url), logger.WARNING)
logger.log(u'Finished %s %s' % (task.lower() % (' %s' % len(xem_ids), helpers.maybe_plural(len(xem_ids))),
indexer_name))
return xem_ids
开发者ID:JackDandy,项目名称:SickGear,代码行数:27,代码来源:scene_exceptions.py
示例13: _change_missing_episodes
def _change_missing_episodes():
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
cur_date = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
cur_date = (datetime.date.today() - datetime.timedelta(days=2)).toordinal()
cur_time = datetime.datetime.now(network_timezones.sb_timezone)
my_db = db.DBConnection()
sql_results = my_db.select(
'SELECT * FROM tv_episodes'
' WHERE status = ? AND season > 0 AND airdate <= ? AND airdate > 1'
' ORDER BY showid', [common.UNAIRED, cur_date])
sql_l = []
show = None
wanted = False
for sqlEp in sql_results:
try:
if not show or show.indexerid != int(sqlEp['showid']):
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp['showid']))
# for when there is orphaned series in the database but not loaded into our showlist
if not show:
continue
except exceptions.MultipleShowObjectsException:
logger.log(u'ERROR: expected to find a single show matching %s' % sqlEp['showid'])
continue
try:
end_time = (network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network) +
datetime.timedelta(minutes=helpers.tryInt(show.runtime, 60)))
# filter out any episodes that haven't aired yet
if end_time > cur_time:
continue
except (StandardError, Exception):
# if an error occurred assume the episode hasn't aired yet
continue
ep = show.getEpisode(int(sqlEp['season']), int(sqlEp['episode']))
with ep.lock:
# Now that it is time, change state of UNAIRED show into expected or skipped
ep.status = (common.WANTED, common.SKIPPED)[ep.show.paused]
result = ep.get_sql()
if None is not result:
sql_l.append(result)
wanted |= (False, True)[common.WANTED == ep.status]
else:
logger.log(u'No unaired episodes marked wanted')
if 0 < len(sql_l):
my_db = db.DBConnection()
my_db.mass_action(sql_l)
if wanted:
logger.log(u'Found new episodes marked wanted')
开发者ID:JackDandy,项目名称:SickGear,代码行数:60,代码来源:search_queue.py
示例14: get_media_info
def get_media_info(video_node):
progress = 0
if None is not video_node.get('viewOffset') and None is not video_node.get('duration'):
progress = tryInt(video_node.get('viewOffset')) * 100 / tryInt(video_node.get('duration'))
for media in video_node.findall('Media'):
for part in media.findall('Part'):
file_name = part.get('file')
# if '3' > sys.version: # remove HTML quoted characters, only works in python < 3
# file_name = urllib2.unquote(file_name.encode('utf-8', errors='replace'))
# else:
file_name = urllib2.unquote(file_name)
return {'path_file': file_name, 'media_id': video_node.get('ratingKey'),
'played': int(video_node.get('viewCount') or 0), 'progress': progress}
开发者ID:JackDandy,项目名称:SickGear,代码行数:16,代码来源:plex.py
示例15: _search_provider
def _search_provider(self, search_params, **kwargs):
results = []
if not self._authorised():
return results
items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
rc = dict((k, re.compile('(?i)' + v))
for (k, v) in {'info': 'view', 'get': 'download', 'title': 'view\s+torrent\s+'}.items())
for mode in search_params.keys():
for search_string in search_params[mode]:
search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
# fetch 15 results by default, and up to 100 if allowed in user profile
search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1'))
html = self.get_url(search_url)
cnt = len(items[mode])
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
torrent_table = soup.find('table', attrs={'class': 'torrent_table'})
torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')
if 2 > len(torrent_rows):
raise generic.HaltParseException
for tr in torrent_rows[1:]:
try:
seeders, leechers, size = [tryInt(n, n) for n in [
tr.find_all('td')[x].get_text().strip() for x in (-2, -1, -4)]]
if self._peers_fail(mode, seeders, leechers):
continue
info = tr.find('a', title=rc['info'])
title = 'title' in info.attrs and rc['title'].sub('', info.attrs['title']) \
or info.get_text().strip()
link = str(tr.find('a', title=rc['get'])['href']).replace('&', '&').lstrip('/')
download_url = self.urls['get'] % link
except (AttributeError, TypeError, ValueError):
continue
if title and download_url:
items[mode].append((title, download_url, seeders, self._bytesizer(size)))
except generic.HaltParseException:
pass
except Exception:
logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
self._log_search(mode, len(items[mode]) - cnt, search_url)
self._sort_seeders(mode, items)
results = list(set(results + items[mode]))
return results
开发者ID:Apocrathia,项目名称:SickGear,代码行数:60,代码来源:torrentshack.py
示例16: buildNameCache
def buildNameCache(show=None):
"""Adds all new name exceptions to the namecache memory and flushes any removed name exceptions
:param show (optional): Only update namecache for this show object
"""
global nameCache
with nameCacheLock:
if show:
# search for only the requested show id and flush old show entries from namecache
indexer_ids = [show.indexerid]
nameCache = dict((k, v) for k, v in nameCache.items() if v != show.indexerid)
# add standard indexer name to namecache
nameCache[sickbeard.helpers.full_sanitizeSceneName(show.name)] = [show.indexerid, -1]
else:
# generate list of indexer ids to look up in cache.db
indexer_ids = [x.indexerid for x in sickbeard.showList if x]
# add all standard show indexer names to namecache
nameCache = dict(
(sickbeard.helpers.full_sanitizeSceneName(x.name), [x.indexerid, -1]) for x in sickbeard.showList if x)
cacheDB = db.DBConnection()
cache_results = cacheDB.select(
'SELECT show_name, indexer_id, season FROM scene_exceptions WHERE indexer_id IN (%s)' % ','.join(
['?'] * len(indexer_ids)), indexer_ids)
if cache_results:
for cache_result in cache_results:
indexer_id = int(cache_result['indexer_id'])
season = tryInt(cache_result['season'], -1)
name = sickbeard.helpers.full_sanitizeSceneName(cache_result['show_name'])
nameCache[name] = [indexer_id, season]
开发者ID:JackDandy,项目名称:SickGear,代码行数:35,代码来源:name_cache.py
示例17: html
def html(self, mode, search_string, results):
if 'Content-Type' in self.session.headers:
del (self.session.headers['Content-Type'])
setattr(self.session, 'reserved', {'headers': {
'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Language': 'en-GB',
'Cache-Control': 'no-cache', 'Referer': 'https://broadcasthe.net/login.php', 'User-Agent': self.ua}})
self.headers = None
if self.auth_html or self._authorised_html():
del (self.session.reserved['headers']['Referer'])
if 'Referer' in self.session.headers:
del (self.session.headers['Referer'])
self.auth_html = True
search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1'))
html = self.get_url(search_url, use_tmr_limit=False)
if self.should_skip(log_warning=False, use_tmr_limit=False):
return results
cnt = len(results)
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
torrent_table = soup.find(id='torrent_table')
torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')
if 2 > len(torrent_rows):
raise generic.HaltParseException
rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
'cats': '(?i)cat\[(?:%s)\]' % self._categories_string(mode, template='', delimiter='|'),
'get': 'download'}.items())
head = None
for tr in torrent_rows[1:]:
cells = tr.find_all('td')
if 5 > len(cells):
continue
try:
head = head if None is not head else self._header_row(tr)
seeders, leechers, size = [tryInt(n, n) for n in [
cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
if not tr.find('a', href=rc['cats']) or self._reject_item(
seeders, leechers, container=self.reject_m2ts and (
re.search(r'(?i)\[.*?m2?ts.*?\]', tr.get_text('', strip=True)))):
continue
title = tr.select('td span[title]')[0].attrs.get('title').strip()
download_url = self._link(tr.find('a', href=rc['get'])['href'])
except (AttributeError, TypeError, ValueError, KeyError, IndexError):
continue
if title and download_url:
results.append((title, download_url, seeders, self._bytesizer(size)))
开发者ID:JackDandy,项目名称:SickGear,代码行数:59,代码来源:btn.py
示例18: _search_provider
def _search_provider(self, search_params, **kwargs):
results = []
if not self._authorised():
return results
items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download', 'fl': '\[\W*F\W?L\W*\]'
}.items())
for mode in search_params.keys():
for search_string in search_params[mode]:
search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
search_url = self.urls['search'] % (search_string, self._categories_string())
html = self.get_url(search_url, timeout=90)
cnt = len(items[mode])
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
with BS4Parser(html, features=['html5lib', 'permissive'], attr='border="1"') as soup:
torrent_table = soup.find('table', attrs={'border': '1'})
torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')
if 2 > len(torrent_rows):
raise generic.HaltParseException
for tr in torrent_rows[1:]:
try:
info = tr.find('a', href=rc['info'])
seeders, leechers, size = [tryInt(n, n) for n in [
tr.find_all('td')[x].get_text().strip() for x in (-2, -1, -4)]]
if self.freeleech and (len(info.contents) < 2 or not rc['fl'].search(info.contents[1].string.strip())) \
or self._peers_fail(mode, seeders, leechers):
continue
title = 'title' in info.attrs and info.attrs['title'] or info.contents[0]
title = (isinstance(title, list) and title[0] or title).strip()
download_url = self.urls['get'] % str(tr.find('a', href=rc['get'])['href']).lstrip('/')
except (AttributeError, TypeError, ValueError):
continue
if title and download_url:
items[mode].append((title, download_url, seeders, self._bytesizer(size)))
except generic.HaltParseException:
pass
except Exception:
logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
self._log_search(mode, len(items[mode]) - cnt, search_url)
self._sort_seeders(mode, items)
results = list(set(results + items[mode]))
return results
开发者ID:Apocrathia,项目名称:SickGear,代码行数:59,代码来源:torrentbytes.py
示例19: _episode_strings
def _episode_strings(self, ep_obj, **kwargs):
return super(TVChaosUKProvider, self)._episode_strings(ep_obj, scene=False, prefix='%', date_detail=(
lambda d: [x.strip('0') for x in (
['{0} {1}% {2}'.format(d.strftime('%d')[-1], d.strftime('%b'), d.strftime('%Y'))] +
[d.strftime('%d %b %Y')] + ([d.strftime('%d %B %Y')], [])[d.strftime('%b') == d.strftime('%B')])]),
ep_detail=(lambda e: [naming_ep_type[2] % e] + (
[], ['%(episodenumber)dof' % e])[1 == tryInt(e.get('seasonnumber'))]), **kwargs)
开发者ID:JackDandy,项目名称:SickGear,代码行数:8,代码来源:tvchaosuk.py
示例20: _search_provider
def _search_provider(self, search_params, **kwargs):
results = []
if not self._authorised():
return results
items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download',
'cats': 'cat=(?:%s)' % self._categories_string(template='', delimiter='|')
}.items())
for mode in search_params.keys():
for search_string in search_params[mode]:
search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
search_url = self.urls['search'] % (self._categories_string(), search_string)
html = self.get_url(search_url)
cnt = len(items[mode])
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
torrent_table = soup.find('table', id='torrentsTable')
torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')
if 2 > len(torrent_rows):
raise generic.HaltParseException
for tr in torrent_rows[1:]:
try:
seeders, leechers, size = [tryInt(n, n) for n in [
tr.find_all('td')[x].get_text().strip() for x in (-2, -1, -3)]]
if None is tr.find('a', href=rc['cats']) or self._peers_fail(mode, seeders, leechers):
continue
info = tr.find('a', href=rc['info'])
title = 'title' in info.attrs and info.attrs['title'] or info.get_text().strip()
download_url = self.urls['get'] % tr.find('a', href=rc['get']).get('href')
except (AttributeError, TypeError, ValueError):
continue
if title and download_url:
items[mode].append((title, download_url, seeders, self._bytesizer(size)))
except generic.HaltParseException:
pass
except Exception:
logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
self._log_search(mode, len(items[mode]) - cnt, search_url)
self._sort_seeders(mode, items)
results = list(set(results + items[mode]))
return results
开发者ID:Apocrathia,项目名称:SickGear,代码行数:58,代码来源:torrenting.py
注:本文中的sickbeard.helpers.tryInt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论