本文整理汇总了Python中salts_lib.log_utils.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: search
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/index.php?search_keywords=')
search_url += urllib.quote_plus(title)
search_url += '&year=' + urllib.quote_plus(str(year))
if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
search_url += '&search_section=2'
else:
search_url += '&search_section=1'
results=[]
html = self. _http_get(self.base_url, cache_limit=0)
match = re.search('input type="hidden" name="key" value="([0-9a-f]*)"', html)
if match:
key=match.group(1)
search_url += '&key=' + key
html = self._http_get(search_url, cache_limit=.25)
pattern = r'class="index_item.+?href="(.+?)" title="Watch (.+?)"?\(?([0-9]{4})?\)?"?>'
for match in re.finditer(pattern, html):
result={}
url, title, year = match.groups('')
result['url']=url
result['title']=title
result['year']=year
results.append(result)
else:
log_utils.log('Unable to locate PW search key', xbmc.LOGWARNING)
return results
开发者ID:cyberwarrior,项目名称:dmind,代码行数:28,代码来源:pw_scraper.py
示例2: _set_cookies
def _set_cookies(self, base_url, cookies):
cookie_file = os.path.join(COOKIEPATH, "%s_cookies.lwp" % (self.get_name()))
cj = cookielib.LWPCookieJar(cookie_file)
try:
cj.load(ignore_discard=True)
except:
pass
if xbmcaddon.Addon().getSetting("cookie_debug") == "true":
log_utils.log("Before Cookies: %s - %s" % (self, self.cookies_as_str(cj)), xbmc.LOGDEBUG)
domain = urlparse.urlsplit(base_url).hostname
for key in cookies:
c = cookielib.Cookie(
0,
key,
str(cookies[key]),
port=None,
port_specified=False,
domain=domain,
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest={},
)
cj.set_cookie(c)
cj.save(ignore_discard=True)
if xbmcaddon.Addon().getSetting("cookie_debug") == "true":
log_utils.log("After Cookies: %s - %s" % (self, self.cookies_as_str(cj)), xbmc.LOGDEBUG)
return cj
开发者ID:rickardrocks,项目名称:tknorris-beta-repo,代码行数:34,代码来源:scraper.py
示例3: onInit
def onInit(self):
log_utils.log('onInit:', log_utils.LOGDEBUG)
self.OK = False
self.radio_buttons = []
posy = starty
for label in RADIO_BUTTONS:
self.radio_buttons.append(self.__get_radio_button(posx, posy, label))
posy += gap
try: responses = json.loads(kodi.get_setting('prev_responses'))
except: responses = [True] * len(self.radio_buttons)
if len(responses) < len(self.radio_buttons):
responses += [True] * (len(self.radio_buttons) - len(responses))
self.addControls(self.radio_buttons)
last_button = None
for response, radio_button in zip(responses, self.radio_buttons):
radio_button.setSelected(response)
if last_button is not None:
radio_button.controlUp(last_button)
radio_button.controlLeft(last_button)
last_button.controlDown(radio_button)
last_button.controlRight(radio_button)
last_button = radio_button
continue_ctrl = self.getControl(CONTINUE_BUTTON)
cancel_ctrl = self.getControl(CANCEL_BUTTON)
self.radio_buttons[0].controlUp(cancel_ctrl)
self.radio_buttons[0].controlLeft(cancel_ctrl)
self.radio_buttons[-1].controlDown(continue_ctrl)
self.radio_buttons[-1].controlRight(continue_ctrl)
continue_ctrl.controlUp(self.radio_buttons[-1])
continue_ctrl.controlLeft(self.radio_buttons[-1])
cancel_ctrl.controlDown(self.radio_buttons[0])
cancel_ctrl.controlRight(self.radio_buttons[0])
开发者ID:SQL-MisterMagoo,项目名称:salts,代码行数:35,代码来源:gui_utils.py
示例4: update_all_scrapers
def update_all_scrapers():
try: last_check = int(kodi.get_setting('last_list_check'))
except: last_check = 0
now = int(time.time())
list_url = kodi.get_setting('scraper_url')
scraper_password = kodi.get_setting('scraper_password')
list_path = os.path.join(kodi.translate_path(kodi.get_profile()), 'scraper_list.txt')
exists = os.path.exists(list_path)
if list_url and scraper_password and (not exists or last_check < (now - (24 * 60 * 60))):
scraper_list = utils2.get_and_decrypt(list_url, scraper_password)
if scraper_list:
try:
with open(list_path, 'w') as f:
f.write(scraper_list)
kodi.set_setting('last_list_check', str(now))
kodi.set_setting('scraper_last_update', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)))
for line in scraper_list.split('\n'):
line = line.replace(' ', '')
if line:
scraper_url, filename = line.split(',')
if scraper_url.startswith('http'):
update_scraper(filename, scraper_url)
except Exception as e:
log_utils.log('Exception during scraper update: %s' % (e), log_utils.LOGWARNING)
开发者ID:monicarero,项目名称:repository.xvbmc,代码行数:25,代码来源:__init__.py
示例5: onPlayBackStopped
def onPlayBackStopped(self):
log_utils.log('Service: Playback Stopped')
if self.tracked:
# clear the playlist if SALTS was playing and only one item in playlist to
# use playlist to determine playback method in get_sources
pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if pl.size() == 1 and pl[0].getfilename().lower().startswith(plugin_url):
log_utils.log('Service: Clearing Single Item SALTS Playlist', log_utils.LOGDEBUG)
pl.clear()
playedTime = float(self._lastPos)
try: percent_played = int((playedTime / self._totalTime) * 100)
except: percent_played = 0 # guard div by zero
pTime = utils2.format_time(playedTime)
tTime = utils2.format_time(self._totalTime)
log_utils.log('Service: Played %s of %s total = %s%%' % (pTime, tTime, percent_played), log_utils.LOGDEBUG)
if playedTime == 0 and self._totalTime == 999999:
log_utils.log('Kodi silently failed to start playback', log_utils.LOGWARNING)
elif playedTime >= 5:
log_utils.log('Service: Setting bookmark on |%s|%s|%s| to %s seconds' % (self.trakt_id, self.season, self.episode, playedTime), log_utils.LOGDEBUG)
db_connection.set_bookmark(self.trakt_id, playedTime, self.season, self.episode)
if percent_played >= 75:
if xbmc.getCondVisibility('System.HasAddon(script.trakt)'):
run = 'RunScript(script.trakt, action=sync, silent=True)'
xbmc.executebuiltin(run)
self.reset()
开发者ID:assli100,项目名称:kodi-openelec,代码行数:27,代码来源:service.py
示例6: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
params = urlparse.parse_qs(source_url)
if video.video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
result_key = 'moviedetails'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
result_key = 'episodedetails'
run = cmd % (params['id'][0])
meta = xbmc.executeJSONRPC(run)
meta = json.loads(meta)
log_utils.log('Source Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and result_key in meta['result']:
details = meta['result'][result_key]
def_quality = [item[0] for item in sorted(SORT_KEYS['quality'].items(), key=lambda x:x[1])][self.def_quality]
host = {'multi-part': False, 'class': self, 'url': details['file'], 'host': 'XBMC Library', 'quality': def_quality, 'views': details['playcount'], 'rating': None, 'direct': True}
stream_details = details['streamdetails']
if len(stream_details['video']) > 0 and 'width' in stream_details['video'][0]:
host['quality'] = self._width_get_quality(stream_details['video'][0]['width'])
hosters.append(host)
return hosters
开发者ID:matt2005,项目名称:salts,代码行数:25,代码来源:local_scraper.py
示例7: search
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, "/index.php?menu=search&query=")
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=0.25)
results = []
# filter the html down to only tvshow or movie results
if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE]:
pattern = 'id="series".*'
pattern2 = '<a title="Watch (.*?) Online For FREE".*?href="([^"]+)".*\((\d{1,4})\)</a>'
else:
pattern = 'id="movies".*id="series"'
pattern2 = '<a\s+title="([^"]+)\s+\d{4}\.?".*?href="([^"]+)".*?\((\d{4})\.?\)</a>'
match = re.search(pattern, html, re.DOTALL)
if match:
try:
fragment = match.group(0)
for match in re.finditer(pattern2, fragment):
res_title, url, res_year = match.groups("")
if not year or not res_year or year == res_year:
result = {"title": res_title, "url": url.replace(self.base_url, ""), "year": res_year}
results.append(result)
except Exception as e:
log_utils.log(
"Failure during %s search: |%s|%s|%s| (%s)" % (self.get_name(), video_type, title, year, str(e)),
xbmc.LOGWARNING,
)
return results
开发者ID:rickardrocks,项目名称:tknorris-beta-repo,代码行数:28,代码来源:uflix_scraper.py
示例8: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
sources = []
if source_url:
params = urlparse.parse_qs(source_url)
show_url = CONTENT_URL % (params['catalog_id'][0])
url = urlparse.urljoin(self.base_url, show_url)
html = self._http_get(url, cache_limit=.5)
try:
js_data = json.loads(html)
if video.video_type == VIDEO_TYPES.EPISODE:
js_data = self.__get_episode_json(params, js_data)
except ValueError:
log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)
else:
for film in js_data['listvideos']:
source_url = SOURCE_URL % (film['film_id'], params['catalog_id'][0])
url = urlparse.urljoin(self.base_url, source_url)
time.sleep(1.5)
html = self._http_get(url, cache_limit=.5)
try:
film_js = json.loads(html)
except ValueError:
log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)
else:
for film in film_js['videos']:
film_link = self.__decrypt(FILM_KEY, base64.b64decode(film['film_link']))
for match in re.finditer('(http.*?(?:#(\d+)#)?)(?=http|$)', film_link):
link, height = match.groups()
source = {'multi-part': False, 'url': link, 'host': self._get_direct_hostname(link), 'class': self, 'quality': self._gv_get_quality(link), 'views': None, 'rating': None, 'direct': True}
if height is not None: source['resolution'] = '%sp' % (height)
sources.append(source)
return sources
开发者ID:rickardrocks,项目名称:tknorris-beta-repo,代码行数:34,代码来源:megabox_scraper.py
示例9: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
if re.search('<span[^>]+>\s*Low Quality\s*</span>', html):
quality = QUALITIES.LOW
else:
quality = QUALITIES.HIGH
for match in re.finditer('gkpluginsphp.*?link\s*:\s*"([^"]+)', html):
data = {'link': match.group(1)}
headers = XHR
headers['Referer'] = url
gk_url = urlparse.urljoin(self.base_url, GK_URL)
html = self._http_get(gk_url, data=data, headers=headers, cache_limit=.25)
if html:
try:
js_result = json.loads(html)
except ValueError:
log_utils.log('Invalid JSON returned: %s: %s' % (url, html), log_utils.LOGWARNING)
else:
if 'link' in js_result and 'func' not in js_result:
if isinstance(js_result['link'], list):
sources = dict((link['link'], self._height_get_quality(link['label'])) for link in js_result['link'])
else:
sources = {js_result['link']: quality}
for source in sources:
host = self._get_direct_hostname(source)
hoster = {'multi-part': False, 'url': source, 'class': self, 'quality': sources[source], 'host': host, 'rating': None, 'views': None, 'direct': True}
hosters.append(hoster)
return hosters
开发者ID:bialagary,项目名称:mw,代码行数:35,代码来源:viooz_scraper.py
示例10: _http_get
def _http_get(self, url, data=None, headers=None, cache_limit=8):
html = self._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
cookie = scraper_utils.get_sucuri_cookie(html)
if cookie:
log_utils.log('Setting Pubfilm cookie: %s' % (cookie), log_utils.LOGDEBUG)
html = self._cached_http_get(url, self.base_url, self.timeout, cookies=cookie, data=data, headers=headers, cache_limit=0)
return html
开发者ID:assli100,项目名称:kodi-openelec,代码行数:7,代码来源:pubfilm_scraper.py
示例11: __init__
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.__scraper = None
try:
self.__scraper = real_scraper(timeout)
except Exception as e:
log_utils.log('Failure during %s scraper creation: %s' % (self.get_name(), e), log_utils.LOGDEBUG)
开发者ID:assli100,项目名称:kodi-openelec,代码行数:7,代码来源:ocw_proxy.py
示例12: _parse_google
def _parse_google(self, link):
sources = []
html = self._http_get(link, cache_limit=.5)
i = link.rfind('#')
if i > -1:
link_id = link[i + 1:]
match = re.search('feedPreload:\s*(.*}]}})},', html, re.DOTALL)
if match:
try:
js = json.loads(match.group(1))
except ValueError:
log_utils.log('Invalid JSON returned for: %s' % (link), log_utils.LOGWARNING)
else:
for item in js['feed']['entry']:
if item['gphoto$id'] == link_id:
for media in item['media']['content']:
if media['type'].startswith('video'):
sources.append(media['url'].replace('%3D', '='))
else:
match = re.search('preload\'?:\s*(.*}})},', html, re.DOTALL)
if match:
try:
js = json.loads(match.group(1))
except ValueError:
log_utils.log('Invalid JSON returned for: %s' % (link), log_utils.LOGWARNING)
else:
for media in js['feed']['media']['content']:
if media['type'].startswith('video'):
sources.append(media['url'].replace('%3D', '='))
return sources
开发者ID:Oggie101,项目名称:tknorris-beta-repo,代码行数:31,代码来源:scraper.py
示例13: _get_episode_url
def _get_episode_url(self, show_url, video):
params = urlparse.parse_qs(show_url)
catalog_id = params['catalog_id'][0]
sid = hashlib.md5('content%scthd' % (catalog_id)).hexdigest()
source_url = CONTENT_URL % (catalog_id, sid)
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
try:
js_data = json.loads(html)
except ValueError:
log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)
else:
force_title = self._force_title(video)
if not force_title:
for episode in js_data['listvideos']:
if ' S%02dE%02d ' % (int(video.season), int(video.episode)) in episode['film_name']:
return EPISODE_URL % (video.video_type, params['catalog_id'][0], video.season, video.episode)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title:
norm_title = self._normalize_title(video.ep_title)
for episode in js_data['listvideos']:
match = re.search('-\s*S(\d+)E(\d+)\s*-\s*(.*)', episode['film_name'])
if match:
season, episode, title = match.groups()
if title and norm_title == self._normalize_title(title):
return EPISODE_URL % (video.video_type, params['catalog_id'][0], int(season), int(episode))
开发者ID:rickardrocks,项目名称:tknorris-beta-repo,代码行数:26,代码来源:gvcenter_scraper.py
示例14: search
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/search/?criteria=title&search_query=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=.25)
results=[]
# filter the html down to only tvshow or movie results
if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE]:
pattern='<h1>Tv Shows</h1>.*'
else:
pattern='<div class="filmDiv".*(<h1>Tv Shows</h1>)*'
match = re.search(pattern, html, re.DOTALL)
try:
if match:
fragment = match.group(0)
pattern = 'href="([^"]+)" class="filmname">(.*?)\s*</a>.*?/all/byViews/(\d+)/'
for match in re.finditer(pattern, fragment, re.DOTALL):
result={}
url, res_title, res_year = match.groups('')
if not year or year == res_year:
result['title']=res_title
result['url']=url.replace(self.base_url,'')
result['year']=res_year
results.append(result)
except Exception as e:
log_utils.log('Failure during %s search: |%s|%s|%s| (%s)' % (self.get_name(), video_type, title, year, str(e)), xbmc.LOGWARNING)
return results
开发者ID:gerritvt,项目名称:tknorris-beta-repo,代码行数:28,代码来源:2movies_scraper.py
示例15: get_url
def get_url(self, video):
url = None
result = self.db_connection.get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url))
else:
date_match = False
search_title = '%s S%02dE%02d' % (video.title, int(video.season), int(video.episode))
results = self.search(video.video_type, search_title, '')
if not results and video.ep_airdate is not None:
search_title = '%s %s' % (video.title, video.ep_airdate.strftime('%Y.%m.%d'))
results = self.search(video.video_type, search_title, '')
date_match = True
best_q_index = -1
for result in results:
if date_match and video.ep_airdate.strftime('%Y.%m.%d') not in result['title']:
continue
if Q_DICT[result['quality']] > best_q_index:
best_q_index = Q_DICT[result['quality']]
url = result['url']
self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url)
return url
开发者ID:djbijo,项目名称:salts,代码行数:25,代码来源:directdl_scraper.py
示例16: search
def search(self, video_type, title, year, season=''):
search_url = urlparse.urljoin(self.base_url, '/search.php?q=%s&limit=20×tamp=%s' % (urllib.quote_plus(title), int(time.time())))
html = self._http_get(search_url, cache_limit=.25)
results = []
items = dom_parser.parse_dom(html, 'li')
if len(items) >= 2:
items = items[1:]
for item in items:
match_url = dom_parser.parse_dom(item, 'a', ret='href')
match_title_year = dom_parser.parse_dom(item, 'strong')
if match_url and match_title_year:
match_url = match_url[0]
match_title_year = re.sub('</?strong>', '', match_title_year[0])
is_season = re.search('S(?:eason\s+)?(\d+)$', match_title_year, re.I)
if not is_season and video_type == VIDEO_TYPES.MOVIE or is_season and VIDEO_TYPES.SEASON:
if video_type == VIDEO_TYPES.MOVIE:
match = re.search('(.*?)(?:\s+\(?(\d{4})\)?)', match_title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = match_title_year
match_year = ''
else:
log_utils.log(is_season.group(1))
if season and int(is_season.group(1)) != int(season):
continue
match_title = match_title_year
match_year = ''
result = {'title': match_title, 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
开发者ID:henry73,项目名称:salts,代码行数:32,代码来源:view47_scraper.py
示例17: search
def search(self, video_type, title, year):
filter_str = '{"field": "title", "operator": "contains", "value": "%s"}' % (title)
if year: filter_str = '{"and": [%s, {"field": "year", "operator": "is", "value": "%s"}]}' % (filter_str, year)
if video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year", "file", "streamdetails"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libMovies"}'
result_key = 'movies'
id_key = 'movieid'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libTvShows"}'
result_key = 'tvshows'
id_key = 'tvshowid'
results = []
cmd = cmd % (filter_str)
meta = xbmc.executeJSONRPC(cmd)
meta = json.loads(meta)
log_utils.log('Search Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and result_key in meta['result']:
for item in meta['result'][result_key]:
if video_type == VIDEO_TYPES.MOVIE and item['file'].endswith('.strm'):
continue
result = {'title': item['title'], 'year': item['year'], 'url': 'video_type=%s&id=%s' % (video_type, item[id_key])}
results.append(result)
return results
开发者ID:matt2005,项目名称:salts,代码行数:27,代码来源:local_scraper.py
示例18: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
fragment = dom_parser.parse_dom(html, 'ul', {'class': 'css_server_new'})
if fragment:
for match in re.finditer('href="([^"]+)[^>]*>(.*?)(?:-\d+)?</a>', fragment[0]):
url, host = match.groups()
host = host.lower()
host = re.sub('<img.*?/>', '', host)
host = HOSTS.get(host, host)
log_utils.log('%s - %s' % (url, host))
if host in GVIDEO_NAMES:
sources = self.__get_links(urlparse.urljoin(self.base_url, url))
direct = True
else:
sources = {url: host}
direct = False
for source in sources:
if self._get_direct_hostname(source) == 'gvideo':
quality = scraper_utils.gv_get_quality(source)
source = source + '|User-Agent=%s' % (scraper_utils.get_ua())
else:
quality = scraper_utils.get_quality(video, source, QUALITIES.HIGH)
hoster = {'multi-part': False, 'host': sources[source], 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': source, 'direct': direct}
hosters.append(hoster)
return hosters
开发者ID:henry73,项目名称:salts,代码行数:31,代码来源:view47_scraper.py
示例19: _default_get_url
def _default_get_url(self, video):
temp_video_type = video.video_type
if video.video_type == VIDEO_TYPES.EPISODE: temp_video_type = VIDEO_TYPES.TVSHOW
url = None
self.create_db_connection()
result = self.db_connection.get_related_url(temp_video_type, video.title, video.year, self.get_name())
if result:
url = result[0][0]
log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (temp_video_type, video.title, video.year, self.get_name(), url))
else:
results = self.search(temp_video_type, video.title, video.year)
if results:
url = results[0]['url']
self.db_connection.set_related_url(temp_video_type, video.title, video.year, self.get_name(), url)
if url and video.video_type == VIDEO_TYPES.EPISODE:
result = self.db_connection.get_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
log_utils.log('Got local related url: |%s|%s|%s|' % (video, self.get_name(), url))
else:
show_url = url
url = self._get_episode_url(show_url, video)
if url:
self.db_connection.set_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), url, video.season, video.episode)
return url
开发者ID:ayadmustafa,项目名称:tknorris-beta-repo,代码行数:28,代码来源:scraper.py
示例20: update_settings
def update_settings():
path = xbmcaddon.Addon().getAddonInfo('path')
full_path = os.path.join(path, 'resources', 'settings.xml')
try:
with open(full_path, 'r') as f:
xml = f.read()
except:
raise
new_settings = []
cat_count = 1
old_xml = xml
classes = scraper.Scraper.__class__.__subclasses__(scraper.Scraper)
for cls in sorted(classes, key=lambda x: x.get_name().upper()):
new_settings += cls.get_settings()
if len(new_settings) > 90:
xml = update_xml(xml, new_settings, cat_count)
new_settings = []
cat_count += 1
if new_settings:
xml = update_xml(xml, new_settings, cat_count)
if xml != old_xml:
try:
with open(full_path, 'w') as f:
f.write(xml)
except:
raise
else:
log_utils.log('No Settings Update Needed', xbmc.LOGDEBUG)
开发者ID:watsa1,项目名称:tknorris-beta-repo,代码行数:32,代码来源:__init__.py
注:本文中的salts_lib.log_utils.log函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论