• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python common.random_agent函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中resources.lib.modules.common.random_agent函数的典型用法代码示例。如果您正苦于以下问题:Python random_agent函数的具体用法?Python random_agent怎么用?Python random_agent使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了random_agent函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: episode

    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        
        try:
            headers = {'User-Agent': random_agent()}	
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            data['season'], data['episode'] = season, episode
            year = data['year']
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            seasoncheck = "s%02d" % int(season)
            html = requests.get(query, headers=headers, timeout=15).content
            containers = re.compile('<span class="year">(.+?)</span><a class="play" href="(.+?)" title="(.+?)">').findall(html)
            for r_year, r_href, r_title in containers:
                if cleanmovie in cleantitle.get(r_title):
					if seasoncheck in cleantitle.get(r_title):
						if year == r_year:
							url = r_href.encode('utf-8') + "?p=" + episode + "&s=11"
							print ("SOCKSHARE PASSED EPISODE", url)
							return url			
        except:
            pass
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:25,代码来源:sockshare_mv_tv.py


示例2: movie

    def movie(self, imdb, title, year):
        self.elysium_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			self.elysium_url = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}

			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			html = BeautifulSoup(requests.get(query, headers=headers, timeout=10).content)
			containers = html.findAll('h2', attrs={'class': 'title'})
			for result in containers:
				
				r_title = result.findAll('a')[0]
				r_title = r_title.string
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				c_title = cleantitle_get_2(r_title)
				if titlecheck in c_title:
					self.elysium_url.append([r_href,r_title])

			
			return self.elysium_url

        except:
            return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:30,代码来源:tinydl.py


示例3: sources

    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            headers = {'User-Agent': random_agent()}
            if url == None: return sources
            # print("ANIMETOON SOURCES", url)
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('iframe')
            # print ("ANIMETOON s1",  r)
            for u in r:
                try:
                    u = u['src'].encode('utf-8')
                    # print ("ANIMETOON s2",  u)
                    
                    html = requests.get(u, headers=headers).text
                    r_src = re.compile("url:\s*'(.+?)'").findall(html)
                    for src in r_src:
                        # print ("ANIMETOON s3",  src)
                        vid_url = src.encode('utf-8')
                        sources.append({'source': 'cdn', 'quality': 'SD', 'provider': 'Animetoon', 'url': vid_url, 'direct': True, 'debridonly': False})
                       
                except:
                    pass

            return sources
        except:
            return sources
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:27,代码来源:animetoon.py


示例4: movie

    def movie(self, imdb, title, year):
        self.zen_url = []	
        try:
            headers = {'User-Agent': random_agent()}
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            html = BeautifulSoup(requests.get(query, headers=headers, timeout=30).content)
           
            containers = html.findAll('div', attrs={'class': 'name'})
            for container in containers:
                # print ("MOVIEXK r1", container)
                r_href = container.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
                r_title = re.findall('</span>(.*?)</a>', str(container))[0]
                # print ("MOVIEXK r3", r_title)
                r_title = r_title.encode('utf-8')
                # print ("MOVIEXK RESULTS", r_title, r_href)
                if year in r_title:

					if cleaned_title == cleantitle.get(r_title):
						redirect = requests.get(r_href, headers=headers, timeout=30).text
						try:
							r_url_trailer = re.search('<dd>[Tt]railer</dd>', redirect)
							if r_url_trailer: continue
						except:
							pass
						r_url = re.findall('<a href="(.*?)" class="btn-watch"',redirect)[0]
						r_url = r_url.encode('utf-8')
						print ("MOVIEXK PLAY URL", r_url)
						self.zen_url.append(r_url)
            return self.zen_url
        except:
            return	
开发者ID:noobsandnerds,项目名称:noobsandnerds,代码行数:35,代码来源:moviexk_mv_tv.py


示例5: sources

    def sources(self, url, hostDict, hostprDict):
	
        try:
			sources = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			
			for movielink,title in self.genesisreborn_url:
				quality = quality_tag(title)
				html = BeautifulSoup(requests.get(movielink, headers=headers, timeout=10).content)		
				containers = html.findAll('div', attrs={'class': 'txt-block'})
				for result in containers:
					print("THREEMOVIES LINKS ",result)
					links = result.findAll('a')
					
					for r_href in links: 
						url = r_href['href']
						myurl = str(url)
						if any (value in myurl for value in hostprDict):

								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Threemovies'
								
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')														
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')										
								sources.append({'source': host, 'quality': quality, 'provider': 'Threemovies', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:33,代码来源:threemovies.py


示例6: episode

    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			self.elysium_url = []
			if not debridstatus == 'true': raise Exception() 
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			year = data['year'] 
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			ep_search = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(ep_search).lower()
			titlecheck = cleanmovie+episodecheck
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			print("HEVC query", query)
			html = BeautifulSoup(rq.get(query, headers=headers, timeout=10).content)
			
			containers = html.findAll('div', attrs={'class': 'postcontent'})
			
			for result in containers:
				print("HEVC containers", result)
				r_title = result.findAll('a')[0]["title"]
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				check = cleantitle.get(r_title)
				if titlecheck in check:
					self.elysium_url.append([r_href,r_title])
					print("HEVC PASSED MOVIE ", r_title, r_href)
			return self.url
		except:
			return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:34,代码来源:hevcfilm.py


示例7: episode

	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		self.zen_url = []
		try:
			headers = {'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.zen_url = []
			title = cleantitle.getsearch(title)
			title = title.replace(' ','-')
			query = title + "-" + season + "x" + episode
			query= self.ep_link % query
			query = urlparse.urljoin(self.base_link, query)
			r = BeautifulSoup(requests.get(query, headers=headers, timeout=10).content)
			r = r.findAll('iframe')
            # print ("ANIMETOON s1",  r)
			for u in r:
				u = u['src'].encode('utf-8')
				if u.startswith("//"): u = "http:" + u
				
				self.zen_url.append(u)
			return self.zen_url
		except:
			return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:25,代码来源:bcinema_mv_tv.py


示例8: sources

    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            headers = {'User-Agent': random_agent()}
            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('iframe')
            # print ("GOGOANIME s1",  r)
            for u in r:
                try:
                    u = u['src'].encode('utf-8')
                    # print ("GOGOANIME s2",  u)
                    if not  'vidstreaming' in u: raise Exception()
                    html = BeautifulSoup(requests.get(u, headers=headers).content)
                    r_src = html.findAll('source')
                    for src in r_src:
                        vid_url = src['src'].encode('utf-8')
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(vid_url)[0]['quality'], 'provider': 'Gogoanime', 'url': vid_url, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:26,代码来源:gogoanime.py


示例9: sources

    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            referer = url
            headers = {'User-Agent': random_agent(), 'X-Requested-With': 'XMLHttpRequest', 'Referer': referer}
            url_plugin = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php')
            html = BeautifulSoup(requests.get(referer, headers=headers, timeout=15).content)
            # print ("SOCKSHARE NEW SOURCES", html)
            r = html.findAll('div', attrs={'class': 'new_player'})
            for container in r:
				block = container.findAll('a')
				for items in block:
					p1 = items['data-film'].encode('utf-8')
					p2 = items['data-name'].encode('utf-8')
					p3 = items['data-server'].encode('utf-8')
					post = {'ipplugins': '1', 'ip_film': p1, 'ip_name': p2 , 'ip_server': p3}
					req = requests.post(url_plugin, data=post, headers=headers).json()
					token = req['s'].encode('utf-8')
					server = req['v'].encode('utf-8')
					url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')
					post = {'u': token, 'w': '100%', 'h': '360' , 's': server, 'n':'0'}
					req_player = requests.post(url, data=post, headers=headers).json()
					# print ("SOCKSHARE SOURCES", req_player)
					result = req_player['data']
					result = [i['files'] for i in result]
					for i in result:
						try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Sockshare', 'url': i, 'direct': True, 'debridonly': False})
						except: pass


            return sources
        except:
            return sources
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:34,代码来源:sockshare_mv_tv.py


示例10: movie

    def movie(self, imdb, title, year):
		try:
			self.elysium_url = []
			if not debridstatus == 'true': raise Exception() 
			headers = {'User-Agent': random_agent()}

			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title), year)
			query = urlparse.urljoin(self.base_link, query)
			print("HEVC query", query)
			html = BeautifulSoup(rq.get(query, headers=headers, timeout=10).content)
			
			containers = html.findAll('div', attrs={'class': 'postcontent'})
			
			for result in containers:
				print("HEVC containers", result)
				r_title = result.findAll('a')[0]["title"]
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				c_title = cleantitle.get(r_title)
				if year in r_title and cleanmovie in c_title:
					self.elysium_url.append([r_href,r_title])
					print("HEVC PASSED MOVIE ", r_title, r_href)
			return self.elysium_url
		except:
			return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:29,代码来源:hevcfilm.py


示例11: sources

    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,title in self.elysium_url:

				headers = {'User-Agent': random_agent()}
				html = BeautifulSoup(requests.get(movielink, headers=headers, timeout=15).content)
				result = html.findAll('div', attrs={'class': 'comm_content'})[:3]
				for r in result:
					r_href = r.findAll('a')
					for items in r_href:
						url = items['href'].encode('utf-8')
						if "1080" in url: quality = "1080p"
						elif "720" in url: quality = "HD"
						else: quality = "SD"
						info = ''
						if "hevc" in url.lower(): info = "HEVC"
						if not any(value in url for value in ['sample','uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'ul.to', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
							if any(value in url for value in hostprDict):
									try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
									except: host = 'Videomega'
									url = client.replaceHTMLCodes(url)
									url = url.encode('utf-8')
									sources.append({'source': host, 'quality': quality, 'provider': 'Scnsrc', 'url': url, 'info': info,'direct': False, 'debridonly': True})
			return sources
        except:
            return sources
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:27,代码来源:scnsrc.py


示例12: sources

	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			headers = {'User-Agent': random_agent()}
			for url in self.genesisreborn_url:
				if url == None: return
				
				html = requests.get(url, headers=headers, timeout=10).text
				
				match = re.compile('<a href="[^"]+go.php\?url=([^"]+)" target="_blank">').findall(html)
				for url in match:
					try:
						# print("SOLAR SOURCE", url)
						host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
						host = host.encode('utf-8')			
						if not host in hostDict: raise Exception()
						quality = "SD"
							# print("OpenMovies SOURCE", stream_url, label)
						sources.append({'source': host, 'quality':quality, 'provider': 'Solar', 'url': url, 'direct': False, 'debridonly': False})
					except:
						pass


			return sources
		except:
			return sources
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:26,代码来源:solar.py


示例13: movie

    def movie(self, imdb, title, year):
        self.elysium_url = []	
        try:
            if not alluc_status == 'true': raise Exception()
            print ("ALLUC STARTED" , alluc_user, alluc_pw, max_items)
            headers = {'User-Agent': random_agent()}
            search_title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title) + year
            query = "%s+%s" % (urllib.quote_plus(search_title),year)
            print ("ALLUC r1", query)
            query =  self.api_link % (alluc_user, alluc_pw, query)
            if alluc_debrid == 'true': query =	query + max_result_string
            else: query = query + '+%23newlinks' + max_result_string
            print ("ALLUC r2", query)
            html = requests.get(query, headers=headers, timeout=15).json()
            for result in html['result']:
				if len(result['hosterurls']) > 1: continue
				if result['extension'] == 'rar': continue
				stream_url = result['hosterurls'][0]['url'].encode('utf-8')
				stream_title = result['title'].encode('utf-8')
				stream_title = cleantitle.getsearch(stream_title)
				if cleanmovie in cleantitle.get(stream_title): 
					self.elysium_url.append([stream_url,stream_title])		
					print ("ALLUC r3", self.elysium_url)
            return self.elysium_url
        except:
            return	
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:27,代码来源:alluc.py


示例14: movie

    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
            if not debridstatus == 'true': raise Exception()			
            self.elysium_url = []
           
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            titlecheck = cleanmovie+year
			
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            query = query + "&x=0&y=0"
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(requests.get(query, headers=headers, timeout=30).content)
           
            result = html.findAll('div', attrs={'class': 'post'})

            for r in result:
				r_href = r.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
				r_title = r.findAll('a')[0]["title"]
                # print ("MOVIEXK r3", r_title)
				r_title = r_title.encode('utf-8')
				c_title = cleantitle_get_2(r_title)		
				if year in r_title:
					if titlecheck in c_title:
						self.elysium_url.append([r_href,r_title])
						# print "SCNSRC MOVIES %s %s" % (r_title , r_href)
            return self.elysium_url
        except:
            return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:33,代码来源:scnsrc.py


示例15: episode

    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			if not debridstatus == 'true': raise Exception()
			self.zen_url = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			ep_search = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(ep_search).lower()
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			titlecheck = cleanmovie + episodecheck
			html = BeautifulSoup(OPEN_URL(query).content)
			
			containers = html.findAll('h1', attrs={'class': 'entry-title'})
			
			for result in containers:
				
				r_title = result.findAll('a')[0]
				r_title = r_title.string
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				r_title = cleantitle.get(r_title)
				if titlecheck in r_title:
					self.zen_url.append([r_href,r_title])
					
			return self.url
		except:
			return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:34,代码来源:bmoviez.py


示例16: episode

    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = (urllib.quote_plus(title)) + "+season+" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)

            print ("ONEMOVIES EPISODES", r)
            checkseason = cleantitle.get(title) + "season" + season
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(requests.get(r, headers=headers, timeout=20).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
               
                links = result.findAll('a')

                for link in links:
                    link_title = str(link['title'])
                    href = str(link['href'])
                    href = client.replaceHTMLCodes(href)
                    if cleantitle.get(link_title) == checkseason:
                        ep_id = '?episode=%01d' % int(episode)
                        url = href + ep_id
                        # print("ONEMOVIES Passed", href)
                        return url

        except:
            return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:33,代码来源:onemovies.py


示例17: episode

    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []	
        try:
            # print ("MOVIEXK")
            headers = {'User-Agent': random_agent()}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            data['season'], data['episode'] = season, episode
            year = data['year']
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            ep_id = int(episode)
            season_id = int(season)
            season_check = "%02d" % (int(data['season']))
            ep_check =season_id + ep_id
            # print("MOVIEXK EPISODE CHECK", ep_check)
            html = BeautifulSoup(OPEN_URL(query, mobile=True).content)
            containers = html.findAll('div', attrs={'class': 'name'})
            for container in containers:
                # print ("MOVIEXK r1", container)
                r_href = container.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
                r_title = re.findall('</span>(.*?)</a>', str(container))[0]
                # print ("MOVIEXK r3", r_title)
                r_title = r_title.encode('utf-8')

                r_title = re.sub('^(watch movies)|(watch movie)|(watch)', '', r_title.lower())
                # print ("MOVIEXK RESULTS", r_title, r_href)
                if cleaned_title in cleantitle.get(r_title):
						redirect = OPEN_URL(r_href, mobile=True).text
						try:
							r_url_trailer = re.search('<dd>[Tt]railer</dd>', redirect)
							if r_url_trailer: continue
						except:
							pass
						try:
							p = client.parseDOM(redirect, 'div', attrs = {'class': 'btn-groups.+?'})
							r_url = client.parseDOM(p, 'a', ret='href')[0]
							print ("MOVIEXK PLAY BUTTON 1", r_url)
							url = '%s?season=%01d&episode=%01d' % (r_url.encode('utf-8'), int(season), int(episode))
							return url
						except:
							p = client.parseDOM(redirect, 'div', attrs = {'id': 'servers'})
							r = client.parseDOM(p, 'li')
							r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
							r = [i[0] for i in r]
							r = r[0]
							r_url = r.encode('utf-8')
							print ("MOVIEXK PLAY BUTTON 2", r)
							url = '%s?season=%01d&episode=%01d' % (r_url, int(season), int(episode))
							return url
        except:
            return		
开发者ID:vphuc81,项目名称:MyRepository,代码行数:58,代码来源:moviexk.py


示例18: movie

	def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			headers = {'User-Agent': random_agent()}
			
			title = cleantitle_geturl(title)
			title = title + "-" + year
			query = self.movie_link % title
			u = urlparse.urljoin(self.base_link, query)
			self.zen_url.append(u)
			return self.zen_url
		except:
			return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:13,代码来源:solar.py


示例19: movie

	def movie(self, imdb, title, year):
		self.genesisreborn_url = []
		try:
			headers = {'User-Agent': random_agent()}
			
			title = cleantitle.getsearch(title)
			title = title.replace(' ','-')
			title = title + "-" + year
			query = self.movie_link % title
			u = urlparse.urljoin(self.base_link, query)
			self.genesisreborn_url.append(u)
			return self.genesisreborn_url
		except:
			return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:14,代码来源:solar.py


示例20: tvshow

    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            headers = {'User-Agent': random_agent()}
            # print("WATCHCARTOON")
            title = cleantitle.get(tvshowtitle)
            for url in self.cartoon_link:
				r = requests.get(url, headers=headers).text
				match = re.compile('<a href="(.+?)" title=".+?">(.+?)</a>').findall(r)
				for url, name in match:
					if title == cleantitle.get(name):
						print("WATCHCARTOON PASSED", url)
						return url
        except:
            return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:14,代码来源:watchcartoon.py



注:本文中的resources.lib.modules.common.random_agent函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python control.addItem函数代码示例发布时间:2022-05-26
下一篇:
Python cloudflare.source函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap