• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python wikipedia.search函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中wikipedia.search函数的典型用法代码示例。如果您正苦于以下问题:Python search函数的具体用法?Python search怎么用?Python search使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了search函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: predict_movie_filter

def predict_movie_filter(movie, year):
    """Predict movie title and return that title."""
    my_searches = set(wikipedia.search('%s (%i film)' % (movie, year)) + 
                      wikipedia.search('%s (film)' % movie))
    
    #list Comprehesions: [expression for item in list if conditional]
    searches = [element for element in list(my_searches)
                if (movie.lower() in element.lower()) &
                ('List of accolades' not in element)]
    
    #if list of searches (searches) is empty, return None, otherwise
    #     convert everyhting to lower case
    if len(searches) == 0:
        return None
    else:
        searches = [search.lower() for search in searches]
        
    #return movie titles in preference of: 
    #     1. movie (year film)
    #     2. movie (film)
    #     3. movie
    #otherwise: return searches[0]
    #note: must movie.lower because wikipedia package returns lower
    lmovie = movie.lower()
    first = '%s (%i film)' % (lmovie, year)
    second = '%s (film)' % lmovie
    
    if  first in searches:
        return '%s (%i film)' % (movie, year)
    if second in searches:
        return '%s (film' % movie
    if lmovie in searches:
        return movie
    return searches[0]
开发者ID:jennyzsun,项目名称:wiki_film,代码行数:34,代码来源:wiki_film.py


示例2: info

def info():
    query = "USA"
    #query = None
    query = query if query is not None else default_query

    print("Searching Wikipedia")
    results = wikipedia.search(query, results=5)
    if results is None:
        results = [wikipedia.search(default_query, results=3, suggestion=True)]
    print("Found searches: {}".format(results))

    wiki_page = random.choice(results)
    wiki = wikipedia.page(wiki_page, auto_suggest=True, redirect=True)

    print("Loading wikipedia page: " + wiki_page)
    html = u"""<center><b>{}</b></center>
               <hr>
               <center>{}</center>
               <br>
               {}"""
    images = wiki.images
    random.shuffle(images)
    images_html = ["<img src={} width='200px'/>".format(image)
                   for image in images[:10]]
    images_html = "".join(images_html)
    #html = html.format(wiki.title, wiki.html())
    html = html.format(wiki.title, images_html, wiki.summary)
    return html
开发者ID:MrValdez,项目名称:SingingEarth,代码行数:28,代码来源:main.py


示例3: search

 def search(self, args):
     """::search::
     used to search for a certain word.
     example:
     1. search wikipedia -> return words refer to 'wikipedia'
     2. search wiki pedia -> return words refer to 'wiki' & 'pedia'
     3. search wiki pedia 10 -> return 10 of the results refer to 'wiki'
             & 'pedia' """
     res, key_words = [], []
     num = 0
     if len(args) < 1:
         raise AssertionError(colored("function search needs at least one argument!",
                 "red", attrs=["bold"]))
         # there may be more than 2 arguments, for example: search wiki pedia 10
     elif len(args) >= 2:
         try:
             num = int(args[-1])
         except ValueError:
             raise AssertionError("num should be a decimal number")
             res = wikipedia.search(args[0: len(args) - 1], results=num)
             key_words = args[0: len(args) - 1]
     else:
         res = wikipedia.search(args[0])
     key_words = args[0]
     self.w_print(res, "white", key_words, "green")
开发者ID:JASON0916,项目名称:wiki_terminal,代码行数:25,代码来源:functions.py


示例4: wiki_search

def wiki_search(option, opt_str, value, parser):
    """
    used to search for a certain word.
    """
    res = []
    key_words = []
    length = len(parser.rargs)
    if length < 1:
        parser.error(colored("option -S needs at least one argument!", "red", attrs=["bold"]))
    # there may be more than 2 arguments, for example: wiki -S wiki pedia 10
    elif length >= 2:
        try:
            global num
            num = int(parser.rargs[-1])
        except ValueError:
            parser.error("num should be a decimal number")
            exit(1)
        res = search(parser.rargs[0 : length - 1], results=num)
        key_words = parser.rargs[0 : length - 1]
    else:
        res = search(parser.rargs[0])
        key_words = parser.rargs[0]

    w_print(res, "white", key_words, "green")
    return parser.rargs[0]
开发者ID:forblackking,项目名称:wiki_terminal,代码行数:25,代码来源:wiki_functions.py


示例5: get_text

    def get_text(self):

        try:
            # do a wikipedia search for the topic
            topic_results = wikipedia.search(self.topic)

            # pick one of the results and grab the content
            self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content

            # DO IT MORE
            more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
            if more_content not in self.content:
                self.content += more_content
            more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
            if more_content not in self.content:
                self.content += more_content
            more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
        except wikipedia.exceptions.DisambiguationError as e:
            self.content += self.topic + self.uncertain
        except wikipedia.exceptions.PageError as e:
            self.content += self.topic + self.unknown

        # if there are more than one word in the topic try to get some more results with the first and last word
        if len(self.topic.split()) > 1:
            try:
                # get more results with less of the topic for some ambiguity
                topic_results = wikipedia.search(self.topic.split()[:1])
                self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                if more_content not in self.content:
                    self.content += more_content
            except wikipedia.exceptions.DisambiguationError as e:
                self.content += self.topic + self.uncertain
            except wikipedia.exceptions.PageError as e:
                self.content += self.topic + self.unknown
            try:
                # get even more with the second half of the topic for wierd results maybe
                topic_results = wikipedia.search(self.topic.split()[-1:])
                self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                if more_content not in self.content:
                    self.content += more_content
            except wikipedia.exceptions.DisambiguationError as e:
                self.content += self.topic + self.uncertain
            except wikipedia.exceptions.PageError as e:
                self.content += self.topic + self.unknown
        try:
            # do a wikipedia search for the topic
            topic_results = wikipedia.search(self.topic[:len(self.topic) / 2])

            # pick one of the results and grab the self.content
            self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
        except wikipedia.exceptions.DisambiguationError as e:
            self.content += self.topic + self.uncertain
        except wikipedia.exceptions.PageError as e:
            self.content += self.topic + self.unknown

        return self.content.capitalize()
开发者ID:pflammertsma,项目名称:Trollette,代码行数:58,代码来源:content_troll.py


示例6: search_page

def search_page(searchterm):
    # Assume the user knows the specific page, it will be first result
    try:
        result = str(search(searchterm)[0])
    except:
        # Transform unicode to ascii
        searchterm = convert_unicode(searchterm)
        result = search(searchterm)[0]
    return page(result)
开发者ID:vsoch,项目名称:repofish,代码行数:9,代码来源:wikipedia.py


示例7: get_text

    def get_text(self):

        try:
            # do a wikipedia search for the topic
            topic_results = wikipedia.search(self.topic)

            # pick one of the results and grab the content
            self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content

            # DO IT MORE
            more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
            if more_content not in self.content:
                self.content += more_content
            more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
            if more_content not in self.content:
                self.content += more_content
            more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
        except wikipedia.exceptions.DisambiguationError as e:
            self.content += self.topic + ' can mean many things but to me it is'
        except wikipedia.exceptions.PageError as e:
            self.content += self.topic + ' is sometimes hard to find'

        # if there are more than one word in the topic try to get some more results with the first and last word
        if len(self.topic.split()) > 1:
            try:
                # get more results with less of the topic for some ambiguity
                topic_results = wikipedia.search(self.topic.split()[:1])
                self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                if more_content not in self.content:
                    self.content += more_content
            except wikipedia.exceptions.DisambiguationError as e:
                self.content += self.topic + ' can mean many things but to me it is'
            except wikipedia.exceptions.PageError as e:
                self.content += self.topic + ' is sometimes hard to find'
            try:
                # get even more with the second half of the topic for wierd results maybe
                topic_results = wikipedia.search(self.topic.split()[-1:])
                self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                more_content = wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
                if more_content not in self.content:
                    self.content += more_content
            except wikipedia.exceptions.DisambiguationError as e:
                self.content += self.topic + ' can mean many things but to me it is'
            except wikipedia.exceptions.PageError as e:
                self.content += self.topic + ' is sometimes hard to find'
        try:
            # do a wikipedia search for the topic
            topic_results = wikipedia.search(self.topic[:len(self.topic) / 2])

            # pick one of the results and grab the self.content
            self.content += wikipedia.page(topic_results[rand(0, len(topic_results) - 1)]).content
        except wikipedia.exceptions.DisambiguationError as e:
            self.content += self.topic + ' can mean many things but to me it is'
        except wikipedia.exceptions.PageError as e:
            self.content += self.topic + ' is sometimes hard to find'
        return self.content
开发者ID:wartortell,项目名称:Trollette,代码行数:57,代码来源:content_troll.py


示例8: clean_leader

def clean_leader(data):
    pope = wikipedia.search('current pope')
    data = data.replace('incumbent pope', pope[0])
    data = re.sub(r'\[.+?\]\s?', '', data)
    data = re.sub(r'\(.+?\)\)s?', '', data).strip()
    if '{{' in data and '|' in data:
        # print data
        try:
            data = data.split('|')[1]+' and '+data.split('|')[2].replace('}}', '').strip()
        except IndexError:
            try:
                if '{{' in data and '|' in data:
                    data = data.split('|')[1].replace('}}', '').strip()
            except IndexError:
                print data+'cacca22'
                pass

    if 'http' in data:
        data = data.split('http')[0].strip()

    if 'holds' in data:
        data = data.split('holds')[0].strip()

    # if 'current' in data:
    if 'current' in data and 'pope' not in data or 'unbulleted' in data:
        missing = wikipedia.search(data.replace('{{', '').replace('}}', '').replace('current', '').replace('unbulleted list', 'president of switzerland').replace('\n', '').strip())
        try:
            missing = wikipedia.page(missing[0])
            missing = missing.html()
            missing = BeautifulSoup(missing)
            for b in missing.find_all('b'):
                if 'incumbent' in b.get_text().lower():
                    missing_president = b.find('a').get_text()
                    data = data.replace(data, missing_president)
        except IndexError:
            print data+'cacca3'
            pass
        except AttributeError:
            # print data+'cacca5'
            pass

    if '|' in data:
        data = data.split('|')[0].strip()
    if 'party' in data or 'congress' in data or 'front' in data or 'rally' in data:
         data = data.split('(')[0].strip()
    if ' of ' in data:
        data = data.split(' of ')[0].strip()

    data = re.sub('&nbsp;', ' ', data).replace('{{', '').replace('}}', '').strip().replace('(n)', '').replace('  ', ' ').replace('(independent ', '')
    if 'born' not in data:
        data = re.sub('([0-9])', '', data).replace(u"\u2013", '').replace('()', '').strip()
    return data
开发者ID:StefanoLoi,项目名称:http---gitrepo.jeeves.ask.info-cgi-bin-cgit-sa-ke,代码行数:52,代码来源:world_leaders.py


示例9: getWiki

def getWiki(activity):
	pattern = re.compile("\\b(of|the|in|for|at|check|find|how|how\'s|is|tell|me|check|out|about|wiki|wikipedia|summarize)\\W", re.I)
	string = re.sub(pattern, "", activity)

	if "summarize" in activity or "summary" in activity:
		result = wikipedia.summary(string[len('summarize'):], sentences = 2)
	elif "wiki" in activity or "wikipedia" in activity:
		result = wikipedia.summary(activity[len('wiki'):])
	else:
		try:
			result = wikipedia.search(string,results=10,suggestion=False)
		except Exception, e:
			result = wikipedia.search(string,results=10,suggestion=True)
开发者ID:cphayash,项目名称:Beeker,代码行数:13,代码来源:searchWiki.py


示例10: searchwiki

def searchwiki(question):
	
	key = wikipedia.search(question)
	app.logger.info(repr("searching wikipedia for" + str(question)))
	if key:
		try:
			answer = wikipedia.summary(key[0],sentences=1)
		except Exception:
			m = wikipedia.page(wikipedia.search(key[0]))
			answer = wikipedia.summary(m.title,sentences=1)
	else:
		answer = False

	return answer
开发者ID:gokulnathgm,项目名称:natlan,代码行数:14,代码来源:app.py


示例11: ne

    def ne(self, ne):
        """ 
            Returns Wikipedia article ids related to the given ne, 
            downloading them if necessary
        """
        
        ne = ne.lower()
        d = self.db.nes.find_one({'_id': ne})

        if d is not None:
            return d # already processed
        
        # Not found -> download it
        
        try:
            search = wikipedia.search(ne)
        except wikipedia.exceptions.WikipediaException:
            search = []
            
        pages = []

        for title in search:
            found = False
            
            d = self.article(title=title)
            if d is None:
                continue

            pages.append(d['_id'])

        d = {'_id': ne, 'articles': pages}
        self.db.nes.insert_one(d)

        return d
开发者ID:aparafita,项目名称:news-similarity,代码行数:34,代码来源:wikipedia.py


示例12: searchWiki

def searchWiki(page):
    wikipedia.set_lang("fr")
    link = ''
    try:
#        p = wikipedia.page(page)
#        link = p.url
        propos = wikipedia.search(page,results=5,suggestion=False)
        for choice in propos:
            if choice.encode('utf-8') == page.encode('utf-8'):
                p = wikipedia.page(page)
                link = p.url
                break
            elif page in choice:
                #TODO
                print 'There is a proposition containing the keyWord '
                print choice
            else:
                try:
                    wikipedia.page(page,redirect=False,auto_suggest=False)
                except wikipedia.exceptions.RedirectError:
                    p = wikipedia.page(page)
                    link = p.url
                    break
                except:
                    link =''
    except:
        link = ""
    return link#.encode('utf-8')
开发者ID:Droxef,项目名称:DigitalHumanities,代码行数:28,代码来源:wikipediaSearch.py


示例13: page

def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
    '''
    Get a WikipediaPage object for the page with title `title` or the pageid
    `pageid` (mutually exclusive).

    Keyword arguments:

    * title - the title of the page to load
    * pageid - the numeric pageid of the page to load
    * auto_suggest - let Wikipedia find a valid page title for the query
    * redirect - allow redirection without raising RedirectError
    * preload - load content, summary, images, references, and links during initialization
    '''

    if title is not None:
        if auto_suggest:
            results, suggestion = search(title, results=1, suggestion=True)
            try:
                title = suggestion or results[0]
            except IndexError:
                # if there is no suggestion or search results, the page doesn't
                # exist
                raise PageError(title)
        return WikipediaPage(title, redirect=redirect, preload=preload)
    elif pageid is not None:
        return WikipediaPage(pageid=pageid, preload=preload)
    else:
        raise ValueError("Either a title or a pageid must be specified")
开发者ID:weblyzard,项目名称:ewrt,代码行数:28,代码来源:wikipedia_wl.py


示例14: wiki_func

def wiki_func(paras, infos):
    """中文维基百科查询"""
    wikipedia.set_lang("zh")
    candidates = wikipedia.search(paras)

    if len(candidates) <= 0:
        return {
            'text': 'not found',
        }
    else:
        summary = None
        for keyword in candidates:
            try:
                summary = wikipedia.summary(keyword, sentences=1)
                break
            except Exception: # 可能发生歧义异常,见 wikipedia 文档
                continue
        if summary:
            answer = decode_to_unicode(summary) + \
                     u'\n候选关键词: %s' % u', '.join(candidates)
            return {
                'text': answer,
            }
        else:
            return {
                'text': 'not found',
            }
开发者ID:Linusp,项目名称:bbot,代码行数:27,代码来源:component.py


示例15: index

def index(request):
    start = current_milli_time()
    print 'START: ', start
    
    auth = tweepy.OAuthHandler(
        settings.TWITTER_CONSUMER_KEY,
        settings.TWITTER_CONSUMER_SECRET
    )
    auth.set_access_token(
        settings.TWITTER_ACCESS_TOKEN,
        settings.TWITTER_ACCESS_SECRET_TOKEN
    )
    twitter = tweepy.API(auth)

    if 'query' in request.GET:
        query = request.GET['query']
        
        twitterdata = twitter.search(query, 'en')
        print 'AFTER TWITTER CALL: ', current_milli_time() - start
        wikidata = wikipedia.search(query)
        print 'AFTER WIKIPEDIA CALL: ', current_milli_time() - start
    else: 
        twitterdata = None
        wikidata = None
    
    
    return render(request, 'index.html', {'twitter': twitterdata, 'wikipedia': wikidata
        })
开发者ID:suchoX,项目名称:brassica,代码行数:28,代码来源:views.py


示例16: readTweet

def readTweet(): #takes on top 10  hashtags and @ tags
 fl = open("xiaomi.csv","r")
 x=object()
 tweet = ''
 lst = []
 for line in fl:
   x=line.split(",")
   if len(x) > 3:
     tweet = x[-3]
     tweet = tweet.split(" ")
   for items in tweet:
     if items.startswith("#") or items.startswith("@"):
        print items  
        lst = lst + [items]
 fdist=nltk.FreqDist(lst)
 print fdist
 g=nx.Graph()
 g.add_node("Xiaomi")
 g.add_nodes_from(fdist.keys()[:10])
 nodes = []
 for node in fdist.keys()[:10] :
  print node
  g.add_edge("Xiaomi",node)
  try :
   nodes = wikipedia.search(node)[:2]
  except Exception,e :
    print str(e)
  if len(nodes) >= 2 :
   g.add_nodes_from(nodes)
   g.add_edge(node,nodes[0])
   g.add_edge(node,nodes[1])
开发者ID:chaitu4068,项目名称:project-till-now,代码行数:31,代码来源:extraction.py


示例17: wiki

def wiki():
    '''
    Search Anything in wikipedia
    '''

    word=raw_input("Wikipedia Search : ")
    results=wk.search(word)
    for i in enumerate(results):
        print(i)
    try:    
        key=input("Enter the number : ")    
    except AssertionError:
        key=input("Please enter corresponding article number : ")    
    
    page=wk.page(results[key])
    url=page.url
    #originalTitle=page.original_title
    pageId=page.pageid
    #references=page.references
    title=page.title
    #soup=BeautifulSoup(page.content,'lxml')
    pageLength=input('''Wiki Page Type : 1.Full 2.Summary : ''')
    if pageLength==1:
        soup=fullPage(page)
        print(soup)
    else:    
        print(title)
        print("Page Id = ",pageId)
        print(page.summary)
        print("Page Link = ",url)
    #print "References : ",references
    
    
    pass
开发者ID:geekcomputers,项目名称:Python,代码行数:34,代码来源:WikipediaModule.py


示例18: wikiwatch

def wikiwatch():
    """
    Search wikipedia
    """
    try:
        if text.lower().find("!wiki") != -1 or text.lower().find("!wikipedia") != -1 or text.lower().find("!w ") != -1:
            string = text[text.lower().rfind("!w") :]
            search = string[string.find(" ") + 1 :].rstrip("\n")
            search_list = text.split()[4:]
            # search = " ".join(search_list)
            summary = wikipedia.summary(search, sentences=2)
            if len(summary) > 430:
                summary = summary[0:430] + "..."
            # print(summary)
            page = wikipedia.page(search)
            title = page.title
            url = page.url
            sendmsg(title + " | " + url)
            try:
                sendmsg(summary.encode("utf-8", "ignore"))
            except:
                sendmsg("fix this when you have the time, probably to do with non-unicode characters.")
    except:
        try:
            search_list = text.split()[4:]
            search = " ".join(search_list)
            lookup = wikipedia.search(search, results=5)
            # page = wikipedia.page(lookup[0])
            # title = page.title
            url = "https://en.wikipedia.org/wiki/" + lookup[0].replace(" ", "+")
            sendmsg("I'm not sure what you mean, this is what I could find. | " + url)
        except:
            sendmsg(error)
开发者ID:Cha0zz,项目名称:Jimmy,代码行数:33,代码来源:Cha0zzB0t.py


示例19: crawlAndLearn

def crawlAndLearn(topic):
    print 'topic: ' + str(topic)
    if not 'educatedOn' in meta:
        meta['educatedOn'] = []

    if topic in meta['educatedOn']:
        print "Already Learned: " + topic
    else:
        search = wikipedia.search(topic, results=int(config['readMoreLimit']))
        for page in search:
            print "Learning about: " + page
            try:
                article = wikipedia.page(page)
                content = re.sub(r'=+\sSee also\s=+.+$', ' ', article.content, flags=re.M | re.S)
                content = re.sub(r'=+\s.+\s=+', ' ', content)
                content = re.sub(r'\(.+\)', ' ', content, flags=re.M | re.S)
                #print content
                iterateInput(content)
            except wikipedia.exceptions.DisambiguationError:
                content = ""
            except wikipedia.exceptions.PageError:
                content = ""
        if not 'educatedOn' in meta:
            meta['educatedOn'] = []
        meta['educatedOn'].append(topic)
开发者ID:ajn0592,项目名称:robowriter,代码行数:25,代码来源:roboreader.py


示例20: findRelevantArticles

def findRelevantArticles(term,data_path='.'):
    articleList = []
    articles = wikipedia.search(term) #Setting suggestion = False (default value); No clear use for it now

    for article in articles:
        try: 
            article = wikipedia.page(article)
            category_keywords = set(list(itertools.chain.from_iterable([category.lower().split() for category in article.categories])))
            if len(category_keywords & relevant_categories) > 0:
                articlefilename = "content_"+str(article.title.lower())+".txt"
                if os.path.isfile(articlefilename):
                     articlefilename = "content_"+ str(article.title.lower())+'%s.txt' % str(term+time.strftime("%Y%m%d-%H%M%S"))
                with codecs.open(os.path.join(data_path,articlefilename),'wb', 'utf-8') as outfile:
                    content = wikipedia.page(article).content
                    print>>outfile,content
                articleList.append(str(article.title))
        except wikipedia.exceptions.PageError as e:
            pass
        except wikipedia.exceptions.DisambiguationError as e:
            for article in e.options:
                try:
                    article = wikipedia.page(article)
                    category_keywords = set(list(itertools.chain.from_iterable([category.lower().split() for category in article.categories])))
                    if len(category_keywords & relevant_categories) > 0:
                        articlefilename = "content_"+str(article.title.lower())+".txt"
                        if os.path.isfile(articlefilename):
                            articlefilename = "content_"+ str(article.title.lower())+'%s.txt' % str(term+time.strftime("%Y%m%d-%H%M%S"))
                        with codecs.open(os.path.join(data_path,articlefilename),'wb','utf-8') as outfile:
                            print>>outfile,article.content
                        articleList.append(str(article.title))
                except wikipedia.exceptions.DisambiguationError as f:
                    pass
开发者ID:mac389,项目名称:computational-medical-knowledge,代码行数:32,代码来源:wikipedia_fetch.py



注:本文中的wikipedia.search函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python wikipedia.setAction函数代码示例发布时间:2022-05-26
下一篇:
Python wikipedia.replaceLanguageLinks函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap