• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python wikipedia.page函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中wikipedia.wikipedia.page函数的典型用法代码示例。如果您正苦于以下问题:Python page函数的具体用法?Python page怎么用?Python page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了page函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get

    def get(self):
        wikipedia.set_lang(u'ru')
        try:
            page = wikipedia.page(u'Проект:Города_России/Списки_улиц/Казани')
            streets = []
            for link in page.links:
                nlink = unicode(link).encode('utf-8').strip().decode('utf-8')
                norm_name = normalize(nlink)
                try:
                    street_info = StreetInfo.get_by_norm_name(norm_name)
                    if not street_info:
                        street_info = StreetInfo()

                    street_page = wikipedia.page(nlink)
                    street_info.name = nlink
                    street_info.norm_name = norm_name
                    street_info.info = unicode(street_page.summary).encode('utf-8').strip()
                    street_info.images = [Image(url=x) for x in street_page.images]
                    street_info.city = u'Казань'.encode('utf-8').strip()
                    street_info.lang=u'ru'.encode('utf-8').strip()
                    
                    street_info.put()
                    
                except Exception, e:
                    print nlink.encode('utf-8')
        except DeadlineExceededError:
            pass
        
        self.response.headers['Content-Type'] = "text/html; charset=utf-8"
        self.response.write(json.dumps({'success':True}))
开发者ID:radik,项目名称:kznhack,代码行数:30,代码来源:setup.py


示例2: test_redirect_normalization

  def test_redirect_normalization(self):
    """Test that a page redirect loads correctly with or without a query normalization"""
    capital_party = wikipedia.page("Communist Party", auto_suggest=False)
    lower_party = wikipedia.page("communist Party", auto_suggest=False)

    self.assertIsInstance(capital_party, wikipedia.WikipediaPage)
    self.assertIsInstance(lower_party, wikipedia.WikipediaPage)
    self.assertEqual(capital_party.title, "Communist party")
    self.assertEqual(capital_party, lower_party)
开发者ID:remusao,项目名称:Wikipedia,代码行数:9,代码来源:test_page.py


示例3: get

 def get(self):
     wikipedia.set_lang(u"ru")
     page = wikipedia.page(u"Проект:Города_России/Списки_улиц/Казани")
     streets = []
     for link in page.links:
         nlink = unicode(link).encode("utf-8").strip()
         try:
             street_page = wikipedia.page(nlink)
             streets.append(
                 {"name": nlink, "info": street_page.summary, "images": street_page.images, "city": u"Казань"}
             )
         except Exception, e:
             print nlink
开发者ID:radik,项目名称:kznhack,代码行数:13,代码来源:main.py


示例4: test_disambiguate

    def test_disambiguate(self):
        """Test that page raises an error when a disambiguation page is reached."""
        try:
            ram = wikipedia.page("Dodge Ram (disambiguation)", auto_suggest=False, redirect=False)
            error_raised = False
        except wikipedia.DisambiguationError as e:
            error_raised = True
            options = e.options

        self.assertTrue(error_raised)
        self.assertEqual(
            options,
            [
                u"Dodge Ramcharger",
                u"Dodge Ram Van",
                u"Dodge Mini Ram",
                u"Dodge Caravan C/V",
                u"Dodge Caravan C/V",
                u"Ram C/V",
                u"Dodge Ram 50",
                u"Dodge D-Series",
                u"Dodge Rampage",
                u"Ram (brand)",
            ],
        )
开发者ID:hrichardlee,项目名称:Wikipedia,代码行数:25,代码来源:page_test.py


示例5: test_redirect_true

  def test_redirect_true(self):
    """Test that a page successfully redirects a query."""
    # no error should be raised if redirect is test_redirect_true
    mp = wikipedia.page("Menlo Park, New Jersey")

    self.assertEqual(mp.title, "Edison, New Jersey")
    self.assertEqual(mp.url, "http://en.wikipedia.org/wiki/Edison,_New_Jersey")
开发者ID:remusao,项目名称:Wikipedia,代码行数:7,代码来源:test_page.py


示例6: test_auto_suggest

  def test_auto_suggest(self):
    """Test that auto_suggest properly corrects a typo."""
    # yum, butter.
    butterfly = wikipedia.page("butteryfly")

    self.assertEqual(butterfly.title, "Butterfly")
    self.assertEqual(butterfly.url, "http://en.wikipedia.org/wiki/Butterfly")
开发者ID:remusao,项目名称:Wikipedia,代码行数:7,代码来源:test_page.py


示例7: import_images

def import_images():
    image_collection.remove(source='wiki')

    wikipedia.set_lang('ru')
    root_page = wikipedia.page('Экспонаты эрмитажа')

    for link in root_page.links:
        import_images_from_page(link)
开发者ID:dzharkov,项目名称:swys-server,代码行数:8,代码来源:wiki_import.py


示例8: wiki

def wiki(bot, event, *args):
    """
    **Wikipedia:**
    Usage: /wiki <keywords to search for> <optional: sentences to display [defaults to 3]>
    Purpose: Get summary from Wikipedia on keywords.
    """
    from wikipedia import wikipedia, PageError, DisambiguationError

    def summary(self, sentences=3):
        if not getattr(self, '_summary', False):
            query_params = {
                'prop': 'extracts',
                'explaintext': '',
                'exintro': '',
            }
        query_params['exsentences'] = sentences
        if not getattr(self, 'title', None) is None:
            query_params['titles'] = self.title
        else:
            query_params['pageids'] = self.pageid

        request = wikipedia._wiki_request(query_params)
        self._summary = request['query']['pages'][self.pageid]['extract']

        return self._summary

    wikipedia.WikipediaPage.summary = summary
    try:
        sentences = 3
        try:
            if args[-1].isdigit():
                sentences = args[-1]
                args = args[:-1]
            page = wikipedia.page(' '.join(args))
        except DisambiguationError as e:
            page = wikipedia.page(wikipedia.search(e.options[0], results=1)[0])
        segments = [
            hangups.ChatMessageSegment(page.title, hangups.SegmentType.LINK, is_bold=True, link_target=page.url),
            hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK),
            hangups.ChatMessageSegment(page.summary(sentences=sentences))]

        bot.send_message_segments(event.conv, segments)
    except PageError:
        bot.send_message(event.conv, "Couldn't find \"{}\". Try something else.".format(' '.join(args)))
开发者ID:busterkieton,项目名称:HangoutsBot,代码行数:44,代码来源:DefaultCommands.py


示例9: test_disambiguate

  def test_disambiguate(self):
    """Test that page raises an error when a disambiguation page is reached."""
    try:
      ram = wikipedia.page("Smith", auto_suggest=False, redirect=False)
      error_raised = False
    except wikipedia.DisambiguationError as e:
      error_raised = True
      options = e.options

    self.assertTrue(error_raised)
    self.assertEqual(options, [u'Dodge Ramcharger', u'Dodge Ram Van', u'Dodge Mini Ram', u'Dodge Caravan C/V', u'Dodge Caravan C/V', u'Ram C/V', u'Dodge Ram 50', u'Dodge D-Series', u'Dodge Rampage', u'Ram (brand)'])
开发者ID:speedydeletion,项目名称:Wikipedia,代码行数:11,代码来源:page_test.py


示例10: import_images_from_page

def import_images_from_page(title):
    print("Importing from [" + title + "]")
    try:
        p = wikipedia.page(title)
    except wikipedia.PageError as e:
        print("could not load the page: " + str(e))
        return

    query_params = {
        'generator': 'images',
        'gimlimit': 'max',
        'prop': 'imageinfo',
        'iiprop': 'url',
        'titles': p.title,
    }
    try:
        request = wikipedia._wiki_request(**query_params)

        image_keys = request['query']['pages'].keys()
        images = (request['query']['pages'][key] for key in image_keys)
        urls_and_desc = filter(
            lambda x: re.search(r'(?:jpg|jpeg)$', x[0].lower()),
            ((image['imageinfo'][0]['url'], image['imageinfo'][0]['descriptionurl']) for image in images if image.get('imageinfo'))
        )
    except KeyError or URLError as e:
        print("could not load page images: " + str(e))
        return

    processed = set()

    for item in urls_and_desc:
        if item[0] in processed:
            continue

        match = re.search(r'File:(.*?)(?:[0-9]{3})?\.(?:jpg|jpeg)$', unquote(item[1]))

        if match is None:
            continue

        file_title = re.sub(r'[_-]+', ' ', match.group(1)).strip()

        image = Image.create_from_dict({
            'title': file_title,
            'image_url': item[0],
            'description_url': item[1],
            'source': 'wiki',
        })

        image_collection.insert(image)
        processed.add(item[0])
开发者ID:dzharkov,项目名称:swys-server,代码行数:50,代码来源:wiki_import.py


示例11: get_articles

def get_articles(lat, lon):
    """
    :type lat: str
    :type lon: str
    :return: list of dicts representing articles
    """

    # Use really large radius, in case very far away from somewhere.
    # Results are sorted by distance and limited so that works fine.
    radius = 20000  # Upper limit
    landmark_articles = wikilocation.articles(lat, lon, radius, 10, "landmark")
    # event_articles = wikilocation.articles(lat, lon, radius, 5, "event")

    if len(landmark_articles) == 0:
        OLD_STREET_ROUNDABOUT = ("51.525603", "-0.087558")
        lat, lon = OLD_STREET_ROUNDABOUT
        landmark_articles = wikilocation.articles(lat, lon, radius, 10, "landmark")

    # wikilocation_articles = event_articles + landmark_articles
    # wikilocation_articles = random.sample(wikilocation_articles, 5)
    # wikilocation_articles = _interleave(landmark_articles, event_articles)
    wikilocation_articles = landmark_articles
    wikilocation_articles = _remove_lists(wikilocation_articles)

    articles = []
    for wikilocation_article in wikilocation_articles:

        article = {}

        title = wikilocation_article["title"]
        article["title"] = title

        # first_sentence = wikipedia.summary(title, sentences=1)
        page = wikipedia.page(title)
        # article["first_sentence"] = first_sentence
        article["summary"] = page.summary

        article["image"] = "http://upload.wikimedia.org/wikipedia/commons/3/3c/Stonehenge2007_07_30.jpg"

        article["url"] = page.url

        articles.append(article)

    return articles
开发者ID:CalumJEadie,项目名称:factsnearme,代码行数:44,代码来源:model.py


示例12: setUp

 def setUp(self):
   # shortest wikipedia articles with images and sections
   self.celtuce = wikipedia.page("Celtuce")
   self.cyclone = wikipedia.page("Tropical Depression Ten (2005)")
   self.great_wall_of_china = wikipedia.page("Great Wall of China")
开发者ID:remusao,项目名称:Wikipedia,代码行数:5,代码来源:test_page.py


示例13: test


#.........这里部分代码省略.........

            if answer == 0:
                flash('Answer not in Wikipedia database... Lets search Wikipedia Internet', 'Answer')
                ny = wikipedia.search(data)
                if ny == []:
                    return redirect ('http://www.lmgtfy.com/?q=' + data1)
                else:
                    try:
                        ny1 = wikipedia.summary(data1, chars=0, auto_suggest=True, redirect=True, sentences=3)
                        max_value = int(max_check * 0.8 + 0.5)
                        ip_wiki = ny1.encode('ascii','ignore')
                        # Noun
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        ent = re.findall(r'NE\s(.*?)/', str(name))
                        chunkGram = r"""Noun: {<NN\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        NNnoun = chunkParser.parse(p)
                        db_noun = re.findall(r'Noun\s(.*?)/', str(NNnoun))

                        # Verbs
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Verb: {<VB\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        VBverb = chunkParser.parse(p)
                        db_verb = re.findall(r'Verb\s(.*?)/', str(VBverb))

                        # Adjective
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Verb: {<JJ\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        JJAdj = chunkParser.parse(p)
                        db_adj = re.findall(r'Verb\s(.*?)/', str(JJAdj))

                        # Number
                        tokenized = nltk.word_tokenize(ip_wiki)
                        p = nltk.pos_tag(tokenized)
                        name = nltk.ne_chunk(p, binary=True)
                        chunkGram = r"""Number: {<CD\w?>} """
                        chunkParser = nltk.RegexpParser(chunkGram)
                        CDNumber = chunkParser.parse(p)
                        db_number = re.findall(r'Number\s(.*?)/', str(CDNumber))

                        db_total = db_noun + db_adj + db_verb + db_number
                        db_total = list(set(db_total))

                        count = 0
                        for ip in ip_total:
                            for dbs in db_total:
                                db_plural = re.escape(dbs) + 's?'
                                ip_plural = re.escape(ip) + 's?'
                                if re.match(db_plural, ip,flags=0|re.IGNORECASE):
                                    count = count + 1
                                if re.match(ip_plural,dbs,flags=0|re.IGNORECASE):
                                    count = count + 1
                                if ip == dbs:
                                    count = count - 1

                        if max_value <= count:
                            display_ans = ny1

                        if display_ans == '':
                            answer = 0
                        else:
                            answer = 1

                        if answer == 0:
                            flash('Answer not precise in wikipedia Interet', 'Answer')
                            flash(ny1, 'Answer')
                            wikiflag = 1
                        else:
                            display_ans=ny1
                            flash(ny1, 'Answer')
                            ny2 = wikipedia.page(data1)
                            flash('Source: '+ ny2.url, 'Answer')
                            #u = models.Wikipedia(question=data1, answer=ny1)
                            #db.session.add(u)
                            #db.session.commit()
                    except Exception as inst:
                        flash('Your question is either out of scope of very trival for me to answer  ' + str(inst), 'Answer')
                        display_ans = 'Your question is either out of scope of very trival for me to answer'
            else:
                flash(display_ans, 'Answer')
        #s = models.Chats.query.all()
        #for chat in reversed(s):
            #flash('Question: ' + chat.question, 'Display')
            #flash('Answer: ' + chat.answer , 'Display')
            #flash('.', 'Display')
        #u = models.Chats(question=data1, answer=display_ans)
        #db.session.add(u)
        #db.session.commit() 
        return redirect('/test')
    return render_template("index2.html",
        title = 'ChatterBot',
        form = form)
开发者ID:Razin-Tailor,项目名称:ChatterBot,代码行数:101,代码来源:views.py


示例14: str

print 'layers down = ' + str(args.NumLayers)
print 'sentences in = ' + str(args.NumSentences)

if args.NumLayers < 0 or args.NumLayers > 10:
   print 'Too many or too few layers'
   exit()
if args.NumSentences < 1 or args.NumSentences > 10:
   print 'Too many or too few sentences'
   exit()

# randomly choose a page to start at from the list
pageName = choice(SOURCE_PAGE_NAMES)

# get the page
page = wikipedia.page(pageName)

# for each depth to traverse,
   # randomly choose a link to go down
page = traverseDepth(page, args.NumLayers)

# randomly choose a section. Keep trying until you find one that has content
# (they may be empty). Dont try toooo many times
sectionTitle = ''
section = ''
remaining = len(page.sections)
while remaining > 0:
   print '.'
   sectionTitle = choice(page.sections)
   section = page.section(sectionTitle)
开发者ID:borgel,项目名称:CatFactor,代码行数:29,代码来源:catFactor.py


示例15: test_redirect_with_normalization

 def test_redirect_with_normalization(self):
   """Test that a page redirect with a normalized query loads correctly"""
   the_party = wikipedia.page("communist Party", auto_suggest=False)
   self.assertIsInstance(the_party, wikipedia.WikipediaPage)
   self.assertEqual(the_party.title, "Communist party")
开发者ID:remusao,项目名称:Wikipedia,代码行数:5,代码来源:test_page.py


示例16: test_redirect_false

 def test_redirect_false(self):
   """Test that page raises an error on a redirect when redirect == False."""
   mp = lambda: wikipedia.page("Menlo Park, New Jersey", auto_suggest=False, redirect=False)
   self.assertRaises(wikipedia.RedirectError, mp)
开发者ID:remusao,项目名称:Wikipedia,代码行数:4,代码来源:test_page.py


示例17: test


#.........这里部分代码省略.........
            inflag = 0           
            for neg in negator:
                if ser == neg and flag == 0 or data.find(neg) != -1 and flag == 0:
                    inflag = 1
                    nege = 1
                    flash('Negative', 'Answer')
                    flag = 1
            if inflag == 0:
                data = data + ser + ' '
        if nege == 1:
            data = data.strip()
            abc = models.Negative.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Postive Section

        if flag == 0:
            data = form.openid.data.lower()
            flash('Positive', 'Answer')
            abc = models.Positive.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in database... Lets search Wikipedia Database', 'Answer')
                wikiflag = 1
                #return redirect ('http://www.lmgtfy.com/?q=' + data)
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')

        #Wiki Section
        ans = 0
        if wikiflag == 1:
            abc = models.Wikipedia.query.all()
            for a in abc:
                if (data.find(a.question.lower()) != -1 or a.question.lower().find(data) != -1) and len(data) >= 4:
                    ans = 1
                    break
            if ans == 0:
                answer = 0
            else:
                answer = 1

            if answer == 0:
                flash('Answer not in Wikipedia database... Lets search Wikipedia Internet', 'Answer')
                ny = wikipedia.search(data)
                if ny == []:
                    return redirect ('http://www.lmgtfy.com/?q=' + data1)
                else:
                    try:
                        ny1 = wikipedia.summary(data1, chars=0, auto_suggest=True, redirect=True, sentences=3)
                        finalans=ny1
                        flash(ny1, 'Answer')
                        ny2 = wikipedia.page(data1)
                        flash('Source: '+ ny2.url, 'Answer')
                        #u = models.Wikipedia(question=data, answer=ny1)
                        #db.session.add(u)
                        #db.session.commit()
                    except Exception as inst:
                        flash('Your question is either out of scope of very trival for me to answer', 'Answer')
                        finalans = 'Your question is either out of scope of very trival for me to answer'
            else:
                finalans=a.answer
                flash(a.answer, 'Answer')
        display = '\n'
        s = models.Chats.query.all()
        for chat in reversed(s):
            flash('Question: ' + chat.question, 'Display')
            flash('Answer: ' + chat.answer , 'Display')
            flash('.', 'Display')
        u = models.Chats(question=data1, answer=finalans)
        db.session.add(u)
        db.session.commit() 

        return redirect('/test')
    return render_template("index2.html",
        title = 'ChatterBot',
        form = form)
开发者ID:Razin-Tailor,项目名称:ChatterBot,代码行数:101,代码来源:views.py


示例18: setUp

 def setUp(self):
     # shortest wikipedia articles with images and sections
     self.celtuce = wikipedia.page("Celtuce")
     self.cyclone = wikipedia.page("Tropical Depression Ten (2005)")
开发者ID:hrichardlee,项目名称:Wikipedia,代码行数:4,代码来源:page_test.py


示例19: setUp

	def setUp(self):
		# one of the shortest wikipedia articles that includes images
		self.celtuce = wikipedia.page("Celtuce")
开发者ID:BobPyron,项目名称:Wikipedia,代码行数:3,代码来源:page_test.py


示例20: test_something_else

	def test_something_else(self):
		print wikipedia.page('Menlo Park, New Jersey', auto_suggest=False)
开发者ID:hrichardlee,项目名称:Wikipedia,代码行数:2,代码来源:new_test.py



注:本文中的wikipedia.wikipedia.page函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python models.Article类代码示例发布时间:2022-05-26
下一篇:
Python wikipedia.warning函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap