本文整理汇总了Python中sickrage.helper.common.try_int函数的典型用法代码示例。如果您正苦于以下问题:Python try_int函数的具体用法?Python try_int怎么用?Python try_int使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了try_int函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: search
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string})
url = self.urls['rss'] if not self.custom_url else self.urls['rss'].replace(self.urls['index'], self.custom_url)
data = self.get_url(url, params=self.search_params)
if not data:
logger.log(u"No data returned from provider", logger.DEBUG)
continue
if not data.startswith('<?xml'):
logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
continue
with BS4Parser(data, 'html5lib') as parser:
for item in parser.findAll('item'):
try:
title = re.sub(r'^<!\[CDATA\[|\]\]>$', '', item.find('title').get_text(strip=True))
seeders = try_int(item.find('seeders').get_text(strip=True))
leechers = try_int(item.find('leechers').get_text(strip=True))
torrent_size = item.find('size').get_text()
size = convert_size(torrent_size) or -1
if sickbeard.TORRENT_METHOD == 'blackhole':
enclosure = item.find('enclosure') # Backlog doesnt have enclosure
download_url = enclosure['url'] if enclosure else item.find('link').next.strip()
download_url = re.sub(r'(.*)/torrent/(.*).html', r'\1/download/\2.torrent', download_url)
else:
info_hash = item.find('info_hash').get_text(strip=True)
download_url = "magnet:?xt=urn:btih:" + info_hash + "&dn=" + title + self._custom_trackers
except (AttributeError, TypeError, KeyError, ValueError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items.append(item)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
开发者ID:allan84,项目名称:SickRage,代码行数:60,代码来源:extratorrent.py
示例2: search
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
results = []
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode == 'Season':
search_string = re.sub(r'(.*)S0?', r'\1Saison ', search_string)
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
else:
search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'
data = self.get_url(search_url, returns='text')
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_rows = html(class_=re.compile('ligne[01]'))
for result in torrent_rows:
try:
title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
title = re.sub(r' Saison', ' Season', title, flags=re.I)
tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
download_url = (self.url + '/telechargement/{0}'.format(tmp))
if not all([title, download_url]):
continue
seeders = try_int(result.find(class_="up").get_text(strip=True))
leechers = try_int(result.find(class_="down").get_text(strip=True))
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = result.find(class_="poid").get_text(strip=True)
units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
开发者ID:Row,项目名称:SickRage,代码行数:59,代码来源:cpasbien.py
示例3: search
def search(self, search_strings, age=0, ep_obj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
try:
self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string})
url = self.urls['rss'] if not self.custom_url else self.urls['rss'].replace(self.urls['index'], self.custom_url)
data = self.get_url(url, params=self.search_params)
if not data:
logger.log(u"No data returned from provider", logger.DEBUG)
continue
if not data.startswith('<?xml'):
logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
continue
with BS4Parser(data, 'html5lib') as parser:
for item in parser.findAll('item'):
title = re.sub(r'^<!\[CDATA\[|\]\]>$', '', item.find('title').text)
# info_hash = item.get('info_hash', '')
size = try_int(item.find('size').text, -1)
seeders = try_int(item.find('seeders').text)
leechers = try_int(item.find('leechers').text)
enclosure = item.find('enclosure')
download_url = enclosure['url'] if enclosure else self._magnet_from_details(item.find('link').text)
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except (AttributeError, TypeError, KeyError, ValueError):
logger.log(u"Failed parsing provider. Traceback: %r" % traceback.format_exc(), logger.WARNING)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
开发者ID:madtrix74,项目名称:SickRage,代码行数:59,代码来源:extratorrent.py
示例4: search
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
results = []
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
else:
search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'
logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
data = self.get_url(search_url)
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_rows = html.find_all(class_=re.compile('ligne[01]'))
for result in torrent_rows:
try:
title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
title = re.sub(r' Saison', ' Season', title, flags=re.IGNORECASE)
tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
download_url = (self.url + '/telechargement/%s' % tmp)
if not all([title, download_url]):
continue
seeders = try_int(result.find(class_="up").get_text(strip=True))
leechers = try_int(result.find(class_="down").get_text(strip=True))
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = result.find(class_="poid").get_text(strip=True)
units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
size = convert_size(torrent_size, units=units) or -1
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
开发者ID:Hydrog3n,项目名称:SickRage,代码行数:54,代码来源:cpasbien.py
示例5: parse_date_time
def parse_date_time(d, t, network):
"""
Parse date and time string into local time
:param d: date string
:param t: time string
:param network: network to use as base
:return: datetime object containing local time
"""
if not network_dict:
load_network_dict()
mo = time_regex.search(t)
if mo is not None and len(mo.groups()) >= 5:
if mo.group(5) is not None:
try:
hr = try_int(mo.group(1))
m = try_int(mo.group(4))
ap = mo.group(5)
# convert am/pm to 24 hour clock
if ap is not None:
if pm_regex.search(ap) is not None and hr != 12:
hr += 12
elif am_regex.search(ap) is not None and hr == 12:
hr -= 12
except Exception:
hr = 0
m = 0
else:
try:
hr = try_int(mo.group(1))
m = try_int(mo.group(6))
except Exception:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(try_int(d) or 1)
try:
foreign_timezone = get_network_timezone(network, network_dict)
return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
except Exception:
return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=sb_timezone)
开发者ID:mvarrieur,项目名称:SickRage,代码行数:49,代码来源:network_timezones.py
示例6: fetch_latest_hot_shows
def fetch_latest_hot_shows(self):
"""Get popular show information from IMDB"""
shows = []
result = []
shows = anidbquery.query(QUERY_HOT)
for show in shows:
try:
recommended_show = RecommendedShow(
show.id,
show.titles["x-jat"][0],
1,
show.tvdbid,
cache_subfolder=self.cache_subfolder,
rating=str(show.ratings["temporary"]["rating"]),
votes=str(try_int(show.ratings["temporary"]["count"], 0)),
image_href=show.url,
)
# Check cache or get and save image
recommended_show.cache_image("http://img7.anidb.net/pics/anime/{0}".format(show.image_path))
result.append(recommended_show)
except:
pass
return result
开发者ID:Hydrog3n,项目名称:SickRage,代码行数:28,代码来源:anidb.py
示例7: index
def index(self, limit=None):
if limit is None:
if sickbeard.HISTORY_LIMIT:
limit = int(sickbeard.HISTORY_LIMIT)
else:
limit = 100
else:
limit = try_int(limit, 100)
sickbeard.HISTORY_LIMIT = limit
sickbeard.save_config()
history = self.history.get(limit)
t = PageTemplate(rh=self, filename='history.mako')
submenu = [
{'title': 'Clear History', 'path': 'history/clearHistory', 'icon': 'ui-icon ui-icon-trash', 'class': 'clearhistory', 'confirm': True},
{'title': 'Trim History', 'path': 'history/trimHistory', 'icon': 'menu-icon-cut', 'class': 'trimhistory', 'confirm': True},
]
return t.render(historyResults=history.detailed, compactResults=history.compact, limit=limit,
submenu=submenu, title='History', header='History',
topmenu='history', controller='history', action='index')
开发者ID:Eiber,项目名称:SickRage-Medusa,代码行数:25,代码来源:history.py
示例8: test_try_int
def test_try_int(self):
"""
Test try int
"""
test_cases = {
None: 0,
'': 0,
'123': 123,
'-123': -123,
'12.3': 0,
'-12.3': 0,
0: 0,
123: 123,
-123: -123,
12.3: 12,
-12.3: -12,
}
unicode_test_cases = {
u'': 0,
u'123': 123,
u'-123': -123,
u'12.3': 0,
u'-12.3': 0,
}
for test in test_cases, unicode_test_cases:
for (candidate, result) in test.iteritems():
self.assertEqual(try_int(candidate), result)
开发者ID:miigotu,项目名称:sickrage-readynas,代码行数:29,代码来源:common_tests.py
示例9: test_try_int_with_default
def test_try_int_with_default(self):
"""
Test try int
"""
default_value = 42
test_cases = {
None: default_value,
'': default_value,
'123': 123,
'-123': -123,
'12.3': default_value,
'-12.3': default_value,
0: 0,
123: 123,
-123: -123,
12.3: 12,
-12.3: -12,
}
unicode_test_cases = {
u'': default_value,
u'123': 123,
u'-123': -123,
u'12.3': default_value,
u'-12.3': default_value,
}
for test in test_cases, unicode_test_cases:
for (candidate, result) in test.iteritems():
self.assertEqual(try_int(candidate, default_value), result)
开发者ID:miigotu,项目名称:sickrage-readynas,代码行数:30,代码来源:common_tests.py
示例10: _verify_added
def _verify_added(self, torrent_hash, attempts=5):
self.url = self.host + 'query/propertiesGeneral/' + torrent_hash.lower()
for i in range(attempts):
if self._request(method='get', cookies=self.session.cookies):
if try_int(self.response.headers.get('Content-Length')) > 0:
return True
sleep(2)
return False
开发者ID:magicseb,项目名称:SickRage,代码行数:8,代码来源:qbittorrent_client.py
示例11: search
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
"""Start searching for anime using the provided search_strings. Used for backlog and daily"""
results = []
if self.show and not self.show.is_anime:
return results
for mode in search_strings:
items = []
logger.log('Search mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {search}'.format
(search=search_string), logger.DEBUG)
search_url = (self.urls['rss'], self.urls['api'] + search_string)[mode != 'RSS']
data = self.get_url(search_url, returns='text')
if not data:
logger.log('No data returned from provider', logger.DEBUG)
continue
if not data.startswith('<?xml'):
logger.log('Expected xml but got something else, is your mirror failing?', logger.INFO)
continue
with BS4Parser(data, 'html5lib') as html:
entries = html('item')
if not entries:
logger.log('Returned xml contained no results', logger.INFO)
continue
for item in entries:
try:
title = item.title.get_text(strip=True)
download_url = item.enclosure.get('url').strip()
if not (title and download_url):
continue
# description = item.find('description')
size = try_int(item.enclosure.get('length', -1))
item = {
'title': title,
'link': download_url,
'size': size,
}
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
logger.log('Failed parsing provider. Traceback: {0!r}'.format
(traceback.format_exc()), logger.ERROR)
continue
results += items
return results
开发者ID:Eiber,项目名称:SickRage-Medusa,代码行数:57,代码来源:anizb.py
示例12: search
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
results = []
for mode in search_strings:
items = []
logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
# Feed verified does not exist on this clone
# search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
search_url = self.urls['feed']
if mode != 'RSS':
logger.log(u"Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
data = self.get_url(search_url, params={'f': search_string}, returns='text')
if not data:
logger.log(u"No data returned from provider", logger.DEBUG)
continue
if not data.startswith("<?xml"):
logger.log(u"Expected xml but got something else, is your mirror failing?", logger.INFO)
continue
try:
with BS4Parser(data, 'html5lib') as parser:
for item in parser('item'):
if item.category and 'tv' not in item.category.get_text(strip=True).lower():
continue
title = item.title.get_text(strip=True)
t_hash = item.guid.get_text(strip=True).rsplit('/', 1)[-1]
if not all([title, t_hash]):
continue
download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
torrent_size, seeders, leechers = self._split_description(item.find('description').text)
size = convert_size(torrent_size) or -1
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}
items.append(result)
except StandardError:
logger.log(u"Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()), logger.ERROR)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
开发者ID:Arcanemagus,项目名称:SickRage,代码行数:57,代码来源:torrentz.py
示例13: vres
def vres(self):
"""
The vertical found in the name
:returns: an empty string if not found
"""
attr = 'res'
match = self._get_match_obj(attr)
return None if not match else try_int(match.group('vres'))
开发者ID:Comptezero,项目名称:SickRage,代码行数:9,代码来源:episode.py
示例14: _get_size
def _get_size(self, item):
try:
size = item.get('links')[1].get('length', -1)
except (AttributeError, IndexError, TypeError):
size = -1
if not size:
logger.log('The size was not found in the provider response', logger.DEBUG)
return try_int(size, -1)
开发者ID:Elettronik,项目名称:SickRage,代码行数:10,代码来源:NZBProvider.py
示例15: change_SUBTITLES_FINDER_FREQUENCY
def change_SUBTITLES_FINDER_FREQUENCY(subtitles_finder_frequency):
"""
Change frequency of subtitle thread
:param subtitles_finder_frequency: New frequency
"""
if subtitles_finder_frequency == '' or subtitles_finder_frequency is None:
subtitles_finder_frequency = 1
sickbeard.SUBTITLES_FINDER_FREQUENCY = try_int(subtitles_finder_frequency, 1)
开发者ID:hernandito,项目名称:SickRage,代码行数:10,代码来源:config.py
示例16: min_max
def min_max(val, default, low, high):
""" Return value forced within range """
val = try_int(val, default)
if val < low:
return low
if val > high:
return high
return val
开发者ID:ArthurGarnier,项目名称:SickRage,代码行数:11,代码来源:config.py
示例17: change_DAILYSEARCH_FREQUENCY
def change_DAILYSEARCH_FREQUENCY(freq):
"""
Change frequency of daily search thread
:param freq: New frequency
"""
sickbeard.DAILYSEARCH_FREQUENCY = try_int(freq, sickbeard.DEFAULT_DAILYSEARCH_FREQUENCY)
if sickbeard.DAILYSEARCH_FREQUENCY < sickbeard.MIN_DAILYSEARCH_FREQUENCY:
sickbeard.DAILYSEARCH_FREQUENCY = sickbeard.MIN_DAILYSEARCH_FREQUENCY
sickbeard.dailySearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.DAILYSEARCH_FREQUENCY)
开发者ID:hernandito,项目名称:SickRage,代码行数:12,代码来源:config.py
示例18: change_UPDATE_FREQUENCY
def change_UPDATE_FREQUENCY(freq):
"""
Change frequency of daily updater thread
:param freq: New frequency
"""
sickbeard.UPDATE_FREQUENCY = try_int(freq, sickbeard.DEFAULT_UPDATE_FREQUENCY)
if sickbeard.UPDATE_FREQUENCY < sickbeard.MIN_UPDATE_FREQUENCY:
sickbeard.UPDATE_FREQUENCY = sickbeard.MIN_UPDATE_FREQUENCY
sickbeard.versionCheckScheduler.cycleTime = datetime.timedelta(hours=sickbeard.UPDATE_FREQUENCY)
开发者ID:hernandito,项目名称:SickRage,代码行数:12,代码来源:config.py
示例19: retrieveShowMetadata
def retrieveShowMetadata(self, folder):
"""
Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.
"""
empty_return = (None, None, None)
assert isinstance(folder, unicode)
metadata_path = ek(os.path.join, folder, self._show_metadata_filename)
if not ek(os.path.isdir, folder) or not ek(os.path.isfile, metadata_path):
logger.log(u"Can't load the metadata file from " + metadata_path + ", it doesn't exist", logger.DEBUG)
return empty_return
logger.log(u"Loading show info from metadata file in " + metadata_path, logger.DEBUG)
try:
with io.open(metadata_path, 'rb') as xmlFileObj:
showXML = etree.ElementTree(file=xmlFileObj)
if showXML.findtext('title') is None or (showXML.findtext('tvdbid') is None and showXML.findtext('id') is None):
logger.log(u"Invalid info in tvshow.nfo (missing name or id): {0} {1} {2}".format(showXML.findtext('title'), showXML.findtext('tvdbid'), showXML.findtext('id')))
return empty_return
name = showXML.findtext('title')
indexer_id_text = showXML.findtext('tvdbid') or showXML.findtext('id')
if indexer_id_text:
indexer_id = try_int(indexer_id_text, None)
if indexer_id is None or indexer_id < 1:
logger.log(u"Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file", logger.DEBUG)
return empty_return
else:
logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find a ID, not using metadata file", logger.DEBUG)
return empty_return
indexer = 1
epg_url_text = showXML.findtext('episodeguide/url')
if epg_url_text:
epg_url = epg_url_text.lower()
if str(indexer_id) in epg_url:
if 'tvrage' in epg_url:
logger.log(u"Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file because it has TVRage info", logger.WARNING)
return empty_return
except Exception as e:
logger.log(
u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e),
logger.WARNING)
return empty_return
return indexer_id, name, indexer
开发者ID:Dpons039,项目名称:SickRage,代码行数:53,代码来源:generic.py
示例20: __init__
def __init__(self, indexer_id, media_format='normal'):
"""
:param indexer_id: The indexer id of the show
:param media_format: The format of the media to get. Must be either 'normal' or 'thumb'
"""
self.indexer_id = try_int(indexer_id, 0)
if media_format in ('normal', 'thumb'):
self.media_format = media_format
else:
self.media_format = 'normal'
开发者ID:hernandito,项目名称:SickRage,代码行数:12,代码来源:GenericMedia.py
注:本文中的sickrage.helper.common.try_int函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论