本文整理汇总了Python中resources.lib.modules.cleantitle.getsearch函数的典型用法代码示例。如果您正苦于以下问题:Python getsearch函数的具体用法?Python getsearch怎么用?Python getsearch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getsearch函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: movie
def movie(self, imdb, title, year):
self.elysium_url = []
try:
if not alluc_status == 'true': raise Exception()
print ("ALLUC STARTED" , alluc_user, alluc_pw, max_items)
headers = {'User-Agent': random_agent()}
search_title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title) + year
query = "%s+%s" % (urllib.quote_plus(search_title),year)
print ("ALLUC r1", query)
query = self.api_link % (alluc_user, alluc_pw, query)
if alluc_debrid == 'true': query = query + max_result_string
else: query = query + '+%23newlinks' + max_result_string
print ("ALLUC r2", query)
html = requests.get(query, headers=headers, timeout=15).json()
for result in html['result']:
if len(result['hosterurls']) > 1: continue
if result['extension'] == 'rar': continue
stream_url = result['hosterurls'][0]['url'].encode('utf-8')
stream_title = result['title'].encode('utf-8')
stream_title = cleantitle.getsearch(stream_title)
if cleanmovie in cleantitle.get(stream_title):
self.elysium_url.append([stream_url,stream_title])
print ("ALLUC r3", self.elysium_url)
return self.elysium_url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:27,代码来源:alluc.py
示例2: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.genesisreborn_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
data['season'], data['episode'] = season, episode
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck).lower()
titlecheck = cleanmovie+episodecheck
query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),query)
mylink = urlparse.urljoin(self.base_link, query)
link = client.request(mylink)
r = client.parseDOM(link, 'div', attrs = {'id': 'post-.+?'})
for item in r:
href = client.parseDOM(item, 'a', ret = 'href')[0]
item_title = client.parseDOM(item, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
item_title = cleantitle.get(item_title)
if titlecheck in item_title:
item_title = item_title + "=episode"
self.genesisreborn_url.append([href,item_title])
# print "DAILYRLS TV SHOWS %s %s" % (item_title , href)
return self.genesisreborn_url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:32,代码来源:dailyrls.py
示例3: movie
def movie(self, imdb, title, year):
try:
self.elysium = []
cleaned_title = cleantitle.get(title)
title = cleantitle.getsearch(title)
q = self.search_link % (urllib.quote_plus(title))
r = urlparse.urljoin(self.base_link, q)
html = BeautifulSoup(OPEN_URL(r).content)
print ("ONEMOVIES EPISODES", html)
containers = html.findAll('div', attrs={'class': 'ml-item'})
for link in containers:
link_title = link('a')[0]['title'].encode('utf-8')
href = link('a')[0]['href'].encode('utf-8')
info = link('a')[0]['data-url'].encode('utf-8')
if cleantitle.get(link_title) == cleaned_title:
info = urlparse.urljoin(self.base_link, info)
html = OPEN_URL(info).content
pattern = '<div class="jt-info">%s</div>' % year
match = re.findall(pattern, html)
if match:
url = client.replaceHTMLCodes(href)
url = {'url': url, 'type': 'movie' }
url = urllib.urlencode(url)
print("SOLARMOVIE PASSED", url)
return url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:29,代码来源:solarmovie.py
示例4: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link)
if ':' in title:
title2 = title.split(':')[0] + ':'
post = 'search=%s&what=title' % title2
else: post = 'search=%s&what=title' % cleantitle.getsearch(title)
t = cleantitle.get(title)
r = client.request(query, post=post)
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a',)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = urlparse.urljoin(self.base_link, re.findall('(?://.+?|)(/.+)', r)[0])
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:28,代码来源:afdah.py
示例5: __search
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(titles[0])
scraper = cfscrape.create_scraper()
data = scraper.get(query).content
#data = client.request(query, referer=self.base_link)
data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
r = zip(dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))
url = []
for i in range(len(r)):
title = cleantitle.get(r[i][0][1])
title = re.sub('(\d+p|4k|3d|hd|season\d+)','',title)
y = r[i][1][1]
link = r[i][0][0]['href']
if 'season' in title: continue
if t == title and y == year:
if 'season' in link:
url.append(source_utils.strip_domain(link))
print url[0]
return url[0]
else: url.append(source_utils.strip_domain(link))
return url
except:
return
开发者ID:CYBERxNUKE,项目名称:xbmc-addon,代码行数:31,代码来源:mzmovies.py
示例6: __search
def __search(self, titles, year):
try:
query = self.search_link % (cleantitle.getsearch(titles[0].replace(' ','%20')))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i][0]
r = client.request(query)
r = client.parseDOM(r, 'li', attrs={'class': 'item everyone-item over_online haveTooltip'})
for i in r:
title = client.parseDOM(i, 'a', ret='title')[0]
url = client.parseDOM(i, 'a', ret='href')[0]
data = client.request(url)
y = re.findall('<p><span>Año:</span>(\d{4})',data)[0]
original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',data, re.DOTALL)[0]
original_t, title = cleantitle.get(original_t), cleantitle.get(title)
if (t in title or t in original_t) and y == year :
x = dom_parser.parse_dom(i, 'a', req='href')
return source_utils.strip_domain(x[0][0]['href'])
return
except:
return
开发者ID:mpie,项目名称:repo,代码行数:27,代码来源:pelisplustv.py
示例7: __search
def __search(self, titles, year, content):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i][0] #cleantitle.get(titles[0])
r = client.request(query)
r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})
if content == 'movies':
r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
else:
r = client.parseDOM(r, 'div', attrs={'id': 'series'})
data = dom_parser.parse_dom(r, 'figcaption')
for i in data:
title = i[0]['title']
title = cleantitle.get(title)
if title in t:
x = dom_parser.parse_dom(i, 'a', req='href')
return source_utils.strip_domain(x[0][0]['href'])
else:
url = dom_parser.parse_dom(i, 'a', req='href')
data = client.request(url[0][0]['href'])
data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])
return
except:
return
开发者ID:mpie,项目名称:repo,代码行数:35,代码来源:tainiesonline.py
示例8: movie
def movie(self, imdb, title, year):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
self.zen_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
# print("CRAZY4AD query", query)
link = client.request(query)
r = client.parseDOM(link, 'h1', attrs = {'class': 'entry-title'})
for items in r:
href = client.parseDOM(items, 'a', ret = 'href')[0]
item_title = client.parseDOM(items, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
# print("CRAZY4AD LINKS", href,item_title)
if year in item_title:
if cleanmovie in cleantitle.get(item_title):
# print("CRAZY4AD LINKS PASSED", href,item_title)
self.zen_url.append(href)
return self.zen_url
except:
return
开发者ID:noobsandnerds,项目名称:noobsandnerds,代码行数:25,代码来源:crazy_mv_tv.py
示例9: movie
def movie(self, imdb, title, year):
self.genesisreborn_url = []
try:
if not debridstatus == 'true': raise Exception()
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
titlecheck = cleanmovie+year
query = self.search_link % (urllib.quote_plus(title),year)
query = self.base_link + query
r = client.request(query)
posts = client.parseDOM(r, 'item')
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
t = t.encode('utf-8')
if not cleanmovie in cleantitle.get(t) and year in t.lower(): continue
c = client.parseDOM(post, 'content.+?')[0]
u = client.parseDOM(post, 'a', ret='href')
if not u: raise Exception()
u = [(t, i) for i in u]
self.genesisreborn_url += u
except:
pass
print ("RLSLOG PASSED", self.genesisreborn_url)
return self.genesisreborn_url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:33,代码来源:rlslog.py
示例10: movie
def movie(self, imdb, title, year):
try:
self.elysium_url = []
if not debridstatus == 'true': raise Exception()
headers = {'User-Agent': random_agent()}
cleanmovie = cleantitle.get(title)
title = cleantitle.getsearch(title)
titlecheck = cleanmovie+year
query = self.search_link % (urllib.quote_plus(title), year)
query = urlparse.urljoin(self.base_link, query)
print("HEVC query", query)
html = BeautifulSoup(rq.get(query, headers=headers, timeout=10).content)
containers = html.findAll('div', attrs={'class': 'postcontent'})
for result in containers:
print("HEVC containers", result)
r_title = result.findAll('a')[0]["title"]
r_href = result.findAll('a')[0]["href"]
r_href = r_href.encode('utf-8')
r_title = r_title.encode('utf-8')
c_title = cleantitle.get(r_title)
if year in r_title and cleanmovie in c_title:
self.elysium_url.append([r_href,r_title])
print("HEVC PASSED MOVIE ", r_title, r_href)
return self.elysium_url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:29,代码来源:hevcfilm.py
示例11: movie
def movie(self, imdb, title, year):
self.genesisreborn_url = []
try:
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
headers={'Host':'webapp.bobbyhd.com',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69',
'Accept-Language':'en-gb',
'Accept-Encoding':'gzip, deflate',
'Connection':'keep-alive'}
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
r = session.get(query,headers=headers).content
# print ("BOBBYAPP", r)
match=re.compile('alias=(.+?)\'">(.+?)</a>').findall(r)
for id,name in match:
name = cleantitle.get(name)
# print ("BOBBYAPP id name", id, name)
if cleanmovie == name:
type = 'type_movies'
ep = '0'
self.genesisreborn_url.append([id,type,ep])
# print ("BOBBY PASSED", self.genesisreborn_url)
return self.genesisreborn_url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:30,代码来源:bobby.py
示例12: movie
def movie(self, imdb, title, year):
self.zen_url = []
try:
self.zen_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
checktitle = cleanmovie+year
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
link = client.request(query)
result = json.loads(link)
items = result['suggestions']
for item in items:
href = item['data']['href']
value = item['value']
url = href.encode('utf-8')
value = value.encode('utf-8')
if checktitle == cleantitle.get(value):
if not self.base_link in url: url = urlparse.urljoin(self.base_link, url)
print ("MVGEE PASSED", url)
return url
except:
return
开发者ID:noobsandnerds,项目名称:noobsandnerds,代码行数:26,代码来源:moviegee_mv.py
示例13: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
data['season'], data['episode'] = season, episode
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck).lower()
query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),query)
query = urlparse.urljoin(self.base_link, query)
query = query + "&x=0&y=0"
link = client.request(query)
r = client.parseDOM(link, 'div', attrs = {'class': 'post'})
for item in r:
match = re.compile('<a href="(.*?)" rel="bookmark" title="(.*?)">').findall(item)
for url,title in match:
title = cleantitle.get(title)
if cleanmovie in title:
if episodecheck in title:
self.zen_url.append([url,title])
# print "SCNSRC MOVIES %s %s" % (title , url)
return self.zen_url
except:
return
开发者ID:noobsandnerds,项目名称:noobsandnerds,代码行数:32,代码来源:scnsrc_mv_tv.py
示例14: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
data['season'], data['episode'] = season, episode
self.zen_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = episodecheck.lower()
titlecheck = cleanmovie+episodecheck
query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),query)
query = urlparse.urljoin(self.base_link, query)
link = client.request(query)
match = re.compile('<h2><a href="(.+?)" rel=".+?" title=".+?" data-wpel-link="internal">(.+?)</a></h2>').findall(link)
for movielink,title2 in match:
r_title = cleantitle.get(title2)
if titlecheck in r_title:
self.zen_url.append([movielink,r_title])
return self.zen_url
except:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:26,代码来源:ddlvalley.py
示例15: movie
def movie(self, imdb, title, year):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
link = client.request(query)
titlecheck = cleanmovie+year
match = re.compile('<h2><a href="(.+?)" rel=".+?" title=".+?" data-wpel-link="internal">(.+?)</a></h2>').findall(link)
for movielink,r_title in match:
if year in r_title:
r_title = cleantitle_get_2(r_title)
if titlecheck in r_title:
self.zen_url.append([movielink,r_title])
return self.zen_url
except:
return
开发者ID:vphuc81,项目名称:MyRepository,代码行数:26,代码来源:ddlvalley.py
示例16: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.elysium_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
data['season'], data['episode'] = season, episode
self.elysium_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck)
episodecheck = episodecheck.lower()
titlecheck = cleanmovie+episodecheck
query = '%s+S%02dE%02d' % (urllib.quote_plus(title), int(data['season']), int(data['episode']))
movielink = "http://scnlog.eu/tv-shows/?s=" + str(query)
link = client.request(movielink)
match = re.compile('<a href="(.+?)" rel="bookmark" title="(.+?)">').findall(link)
for movielink,title2 in match:
title = cleantitle.get(title2)
if titlecheck in title:
self.elysium_url.append([movielink,title])
return self.elysium_url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:26,代码来源:scnlog.py
示例17: movie
def movie(self, imdb, title, year):
self.elysium_url = []
try:
if not debridstatus == 'true': raise Exception()
count = 0
self.elysium_url = []
cleanmovie = cleantitle.get(title)
title = cleantitle.getsearch(title)
titlecheck = cleanmovie+year
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
link = client.request(query)
r = client.parseDOM(link, 'div', attrs = {'id': 'content'})
for containers in r:
print ("TVRLS containers", containers)
match = re.compile("<a href='(.+?)'>(.+?)</a>").findall(containers)
for movielink,title2 in match:
title3 = cleantitle_get_2(title2)
if titlecheck in title3:
if "1080" in title2 or "720" in title2:
count += 1
if not count > 6:
self.elysium_url.append([movielink,title2])
return self.elysium_url
except:
return
开发者ID:azumimuo,项目名称:family-xbmc-addon,代码行数:27,代码来源:tvrls.py
示例18: movie
def movie(self, imdb, title, year):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
self.zen_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
# print ("RLSBBONLINE query", query)
link = client.request(query)
r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
# print ("RLSBBONLINE r", r)
for item in r:
href = client.parseDOM(item, 'a', ret = 'href')[0]
item_title = client.parseDOM(item, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
if year in item_title:
if cleanmovie in cleantitle.get(item_title):
self.zen_url.append([href,item_title])
# print "RLSBBONLINE MOVIES %s %s" % (item_title , href)
return self.zen_url
except:
return
开发者ID:noobsandnerds,项目名称:noobsandnerds,代码行数:26,代码来源:rlsbbonline_mv_tv.py
示例19: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
year = data['year']
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
data['season'], data['episode'] = season, episode
ep_query = "S%02dE%02d" % (int(data['season']),int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),ep_query )
query = urlparse.urljoin(self.base_link, query)
# print("CRAZY4AD query", query)
link = client.request(query)
r = client.parseDOM(link, 'h1', attrs = {'class': 'entry-title'})
for items in r:
href = client.parseDOM(items, 'a', ret = 'href')[0]
item_title = client.parseDOM(items, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
# print("CRAZY4AD LINKS", href,item_title)
if ep_query.lower() in cleantitle.get(item_title):
if cleanmovie in cleantitle.get(item_title):
# print("CRAZY4AD LINKS PASSED", href,item_title)
self.zen_url.append(href)
return self.zen_url
except:
return
开发者ID:noobsandnerds,项目名称:noobsandnerds,代码行数:31,代码来源:crazy_mv_tv.py
示例20: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
cleanmovie = cleantitle.get(title)
title = cleantitle.getsearch(title)
data['season'], data['episode'] = season, episode
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck).lower()
query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),query)
mylink = urlparse.urljoin(self.base_link, query)
link = client.request(mylink)
r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
# print ("RLSBBONLINE TV r", r)
for item in r:
href = client.parseDOM(item, 'a', ret = 'href')[0]
item_title = client.parseDOM(item, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
if cleanmovie in cleantitle.get(item_title):
if episodecheck in cleantitle.get(item_title):
self.zen_url.append([href,item_title])
# print ("RLSBBONLINE TV PASSED", self.zen_url)
return self.zen_url
except:
return
开发者ID:noobsandnerds,项目名称:noobsandnerds,代码行数:31,代码来源:rlsbbonline_mv_tv.py
注:本文中的resources.lib.modules.cleantitle.getsearch函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论