• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pywikibot.getSite函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pywikibot.getSite函数的典型用法代码示例。如果您正苦于以下问题:Python getSite函数的具体用法?Python getSite怎么用?Python getSite使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了getSite函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main():
    startpage = 'Anarana iombonana amin\'ny teny malagasy'
    pages = pagegenerators.CategorizedPageGenerator(catlib.Category(pywikibot.getSite('mg','wiktionary'), startpage))
    for page in pages:
        pagename = page.title()
        try:
            t_p = page.get()
        except wikipedia.NoPage:
            print '  Tsy misy pejy.'
            t_p = ''
        except wikipedia.IsRedirectPage:
            print '  Pejy fihodinana.'
            continue
        except wikipedia.Error:
            print '  Hadisoana.'
            continue
        f_pages = traite(pagename) # mamerina tuple [1s, 2s, 3s, 1pi, 1pp, 2pp, 3pp] ho lohatenimpejy
        c_pages = tupleur(pagename) # mamerina tuple [1s, 2s, 3s, 1pi, 1pp, 2pp, 3pp] ho votoatimpejy
        cont = 0
        b = 0
        while cont <= 6:
            try:
                wikipedia.output((wikipedia.Page(wikipedia.getSite('mg','wiktionary'), f_pages[cont]).get()))
                b += 1
                print b
                cont += 6

            except wikipedia.NoPage:
                try:
                    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), f_pages[cont]).put(c_pages[cont])
                    cont = cont + 1
                except UnicodeDecodeError :
                    cont = cont + 1
                    continue
            if cont >= 6: break
开发者ID:Webysther,项目名称:botjagwar,代码行数:35,代码来源:malagasy_declinaison.py


示例2: __iter__

 def __iter__(self):
     """Yield page objects until the entire XML dump has been read."""
     from pywikibot import xmlreader
     mysite = pywikibot.getSite()
     dump = xmlreader.XmlDump(self.xmlfilename)
     # regular expression to find the original template.
     # {{vfd}} does the same thing as {{Vfd}}, so both will be found.
     # The old syntax, {{msg:vfd}}, will also be found.
     # TODO: check site.nocapitalize()
     templatePatterns = []
     for template in self.templates:
         templatePattern = template.titleWithoutNamespace()
         if not pywikibot.getSite().nocapitalize:
             templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
                                             templatePattern[0].lower(),
                                             templatePattern[1:])
         templatePattern = re.sub(' ', '[_ ]', templatePattern)
         templatePatterns.append(templatePattern)
     templateRegex = re.compile(
         r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}'
                                % '|'.join(templatePatterns))
     for entry in dump.parse():
         if templateRegex.search(entry.text):
             page = pywikibot.Page(mysite, entry.title)
             yield page
开发者ID:reza1615,项目名称:pywikipedia-rewrite,代码行数:25,代码来源:template.py


示例3: addCats

	def addCats(self):
		text = u"""
[[Categorie:Filme românești]]
[[Categorie:Filme în limba română]]
"""
		if self._year:
			text += u"[[Categorie:Filme din %d]]\n" % self._year
		if self._director:
			directors = self._director.split(",")
			for director in directors:
				cat = u"Categorie:Filme regizate de %s" % director.strip()
				cat = pywikibot.Category(pywikibot.getSite(), cat)
				if cat.exists():
					text += u"[[Categorie:Filme regizate de %s]]\n" % director.strip()
		for t in self._types:
			cat = u"Filme de %s" % t.lower()
			catp = None
			if cat in categories:
				catp = pywikibot.Category(pywikibot.getSite(), categories[cat])
			if not catp or not catp.exists():
				catp = pywikibot.Category(pywikibot.getSite(), cat)

			if catp.exists():
				for p in catp.templatesWithParams():
					if p[0].title() == "Format:Redirect categorie":
						break
				else:
					text += u"[[%s]]\n" % catp.title()
		self._text += text
开发者ID:rowiki,项目名称:wikiro,代码行数:29,代码来源:filme_ro.py


示例4: getTranslatedStringForUser

	def getTranslatedStringForUser(self):
		"""
		Gets the local namespace name for User pages. e.g. Bruker on no.

		Uses pywikibot.

		API method:
			https://no.wikipedia.org/w/api.php?action=query&meta=siteinfo
				 &siprop=namespaces&format=json
		"""
		try:
			logging.info("Fetching User Namespace Name")
			format_language = self.language
			if '_' in format_language:
				wikiSite = pywikibot.getSite(format_language.replace('_','-'))
			else:
				wikiSite = pywikibot.getSite(self.language)
			#print wikiSite
			r = pywikibot.data.api.Request(
				site=wikiSite, action="query", meta="siteinfo")
			r['siprop'] = u'namespaces'
			data = r.submit()
			if self.language == 'pt':
				localized_user = data['query']['namespaces']['2']['*']
				return localized_user.split('(')[0]
			else:
				return data['query']['namespaces']['2']['*']
		except pywikibot.exceptions.NoSuchSite, e:
			logging.error(e)		
开发者ID:uduwage,项目名称:Multilingual-Wikipedian-Research,代码行数:29,代码来源:CollectUsersWithTemplates.py


示例5: __init__

 def __init__(self, pageToUnlink, namespaces, always):
     self.pageToUnlink = pageToUnlink
     gen = pagegenerators.ReferringPageGenerator(pageToUnlink)
     if namespaces != []:
         gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
     self.generator = pagegenerators.PreloadingGenerator(gen)
     linktrail = pywikibot.getSite().linktrail()
     # The regular expression which finds links. Results consist of four
     # groups:
     #
     # group title is the target page title, that is, everything
     # before | or ].
     #
     # group section is the page section.
     # It'll include the # to make life easier for us.
     #
     # group label is the alternative link title, that's everything
     # between | and ].
     #
     # group linktrail is the link trail, that's letters after ]] which are
     # part of the word.
     # note that the definition of 'letter' varies from language to language.
     self.linkR = re.compile(r'\[\[(?P<title>[^\]\|#]*)(?P<section>#[^\]\|]*)?(\|(?P<label>[^\]]*))?\]\](?P<linktrail>%s)'
                             % linktrail)
     self.always = always
     self.done = False
     self.comment = i18n.twtranslate(pywikibot.getSite(), 'unlink-unlinking',
                                     self.pageToUnlink.title())
开发者ID:blueprintmrk,项目名称:pywikibot-core,代码行数:28,代码来源:unlink.py


示例6: __init__

 def __init__(self, page, filename, summary, dry, always):
     self.page = pywikibot.Page( pywikibot.getSite(), page )
     self.filename = filename
     self.summary = summary
     if not self.summary:
         self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
     pywikibot.setAction( self.summary )
开发者ID:masao,项目名称:savemlak,代码行数:7,代码来源:put.py


示例7: save_translation_from_bridge_language

 def save_translation_from_bridge_language(self, infos):
     summary = "Dikan-teny avy amin'ny dikan-teny avy amin'i %(olang)s.wiktionary"%infos
     wikipage = self.output.wikipage(infos)
     try: 
         mg_Page = wikipedia.Page(wikipedia.getSite('mg','wiktionary'), infos['entry'])
     except UnicodeDecodeError: 
         mg_Page = wikipedia.Page(wikipedia.getSite('mg','wiktionary'), infos['entry'].decode('utf8'))
         
     try:
         if mg_Page.exists():
             pagecontent = mg_Page.get()
             if pagecontent.find('{{=%s=}}'%infos['lang'])!=-1:
                 if verbose: print "Efa misy ilay teny iditra."
                 self.output.db(infos)
                 return
             else:
                 wikipage += pagecontent
                 summary= u"+"+summary 
     except wikipedia.exceptions.IsRedirectPage:                
         infos['entry'] = mg_Page.getRedirectTarget().title()
         save_translation_from_bridge_language(self, infos, summary)
         return
     
     except wikipedia.exceptions.InvalidTitle:
         if verbose: print "lohateny tsy mety ho lohatenim-pejy"
         return
     
     except Exception:
         return
     
 
     if verbose: 
         wikipedia.output("\n \03{red}%(entry)s\03{default} : %(lang)s "%infos)
         wikipedia.output("\03{white}%s\03{default}"%wikipage)
     mg_Page.put_async(wikipage, summary)
开发者ID:Webysther,项目名称:botjagwar,代码行数:35,代码来源:dikantenyvaovao+-+Copie.py


示例8: MakeAppendix

def MakeAppendix(mot):
    verb = mot.title()
    form = """{{subst:-e-mat-vo|%s}}
[[sokajy:Volapoky/Matoanteny|%s]]"""%(verb[:-2], verb[0])
    
    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), "Wiktionary:Raki-bolana volapoky/matoanteny/%s"%verb).put("#FIHODINANA [[Rakibolana:volapoky/matoanteny/%s]]"%verb)
    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), "Rakibolana:volapoky/matoanteny/%s"%verb).put(form,'Matoanteny %s' %verb)
开发者ID:Webysther,项目名称:botjagwar,代码行数:7,代码来源:voverb.py


示例9: main

def main():
    featured = False
    gen = None

    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if arg == '-featured':
            featured = True
        else:
            genFactory.handleArg(arg)

    mysite = pywikibot.getSite()
    if mysite.sitename() == 'wikipedia:nl':
        pywikibot.output(
            u'\03{lightred}There is consensus on the Dutch Wikipedia that bots should not be used to fix redirects.\03{default}')
        sys.exit()

    if featured:
        featuredList = i18n.translate(mysite, featured_articles)
        ref = pywikibot.Page(pywikibot.getSite(), featuredList)
        gen = pagegenerators.ReferringPageGenerator(ref)
        gen = pagegenerators.NamespaceFilterPageGenerator(gen, [0])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if gen:
        for page in pagegenerators.PreloadingGenerator(gen):
            workon(page)
    else:
        pywikibot.showHelp('fixing_redirects')
开发者ID:octobertech,项目名称:pywikibot-core,代码行数:33,代码来源:fixing_redirects.py


示例10: __init__

    def __init__(self, generator, dry, always):
        """
        Constructor. Parameters:
            * generator - The page generator that determines on which pages
                          to work on.
            * dry       - If True, doesn't do any real changes, but only shows
                          what would have been changed.
            * always    - If True, don't prompt for each redirect page.
        """
        self.generator = generator
        self.dry = dry
        self.always = always
        self.lang = pywikibot.getSite().lang
        
        # Set the edit summary message
        self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
        self.templates = pywikibot.translate(pywikibot.getSite(), self.taxoboxTemplates)
        self.templateParameters = pywikibot.translate(pywikibot.getSite(), self.sciNameParameters)

        # Initialize the cache
        try:
            self.cache = pickle.load(file(self.cacheFilename, 'rb'))
        except:
            self.cache = {}
        if not self.lang in self.cache:
            self.cache[self.lang] = {}
开发者ID:silvonen,项目名称:pywikipedia-fi,代码行数:26,代码来源:sciname.py


示例11: checkWait

def checkWait():
        newlist = ""  # blank variable for later
        site = pywikibot.getSite()
        pagename = localconfig.waitlist
        page = pywikibot.Page(site, pagename)
        waiters = page.get()
        waiters = waiters.replace("}}", "")
        waiters = waiters.replace("*{{User|", "")
        waiters = waiters.split("\n")
        for waiter in waiters:
                if waiter == "":continue  # Non-existant user
                if checkRegisterTime(waiter, 7, False):continue
                if checkBlocked(waiter):continue  # If user is blocked, skip putting them back on the list.
                if getEditCount(waiter) == True:  # If edited, send them to UAA
                        checkUser(waiter, False, False)
                        continue
                if waiter in newlist:continue  # If user already in the list, in case duplicates run over
                # Continue if none of the other checks have issues with the conditions for staying on the waitlist
                newlist = newlist + "\n*{{User|" + waiter + "}}"
                # print "\n*{{User|" + waiter + "}}"
        summary = localconfig.editsumwait
        site = pywikibot.getSite()
        pagename = localconfig.waitlist
        page = pywikibot.Page(site, pagename)
        pagetxt = page.get()
        newlist = newlist.replace("\n*{{User|}}", "")
        page.put(newlist, comment=summary)
开发者ID:dqwiki,项目名称:UAA,代码行数:27,代码来源:globalfunc.py


示例12: main

def main():
    index = None
    djvu = None
    pages = None
    # what would have been changed.
    ask = False
    overwrite = 'ask'

    # Parse command line arguments
    for arg in pywikibot.handleArgs():
        if arg.startswith("-ask"):
            ask = True
        elif arg.startswith("-overwrite:"):
            overwrite = arg[11:12]
            if overwrite != 'y' and overwrite != 'n':
                pywikibot.output(
                    u"Unknown argument %s; will ask before overwriting" % arg)
                overwrite = 'ask'
        elif arg.startswith("-djvu:"):
            djvu = arg[6:]
        elif arg.startswith("-index:"):
            index = arg[7:]
        elif arg.startswith("-pages:"):
            pages = arg[7:]
        else:
            pywikibot.output(u"Unknown argument %s" % arg)

    # Check the djvu file exists
    if djvu:
        os.stat(djvu)

        if not index:
            import os.path
            index = os.path.basename(djvu)

    if djvu and index:
        site = pywikibot.getSite()
        index_page = pywikibot.Page(site, index)

        if site.family.name != 'wikisource':
            raise pywikibot.PageNotFound(
                u"Found family '%s'; Wikisource required." % site.family.name)

        if not index_page.exists() and index_page.namespace() == 0:
            index_namespace = site.mediawiki_message(
                'Proofreadpage index namespace')

            index_page = pywikibot.Page(pywikibot.getSite(),
                                        u"%s:%s" % (index_namespace, index))
        if not index_page.exists():
            raise pywikibot.NoPage(u"Page '%s' does not exist" % index)
        pywikibot.output(u"uploading text from %s to %s"
                         % (djvu, index_page.title(asLink=True)))
        bot = DjVuTextBot(djvu, index, pages, ask, overwrite)
        if not bot.has_text():
            raise ValueError("No text layer in djvu file")
        bot.run()
    else:
        pywikibot.showHelp()
开发者ID:Rodehi,项目名称:GFROS,代码行数:59,代码来源:djvutext.py


示例13: main

def main():
    #page generator
    gen = None
    # This temporary array is used to read the page title if one single
    # page to work on is specified by the arguments.
    pageTitle = []
    # Which namespaces should be processed?
    # default to [] which means all namespaces will be processed
    namespaces = []
    # Never ask before changing a page
    always = False
    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if arg.startswith('-xml'):
            if len(arg) == 4:
                xmlFilename = i18n.input('pywikibot-enter-xml-filename')
            else:
                xmlFilename = arg[5:]
            gen = XmlDumpNoReferencesPageGenerator(xmlFilename)
        elif arg.startswith('-namespace:'):
            try:
                namespaces.append(int(arg[11:]))
            except ValueError:
                namespaces.append(arg[11:])
        elif arg == '-always':
            always = True
        else:
            if not genFactory.handleArg(arg):
                pageTitle.append(arg)

    if pageTitle:
        page = pywikibot.Page(pywikibot.getSite(), ' '.join(pageTitle))
        gen = iter([page])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if not gen:
        site = pywikibot.getSite()
        try:
            cat = maintenance_category[site.family.name][site.lang]
        except:
            pass
        else:
            if not namespaces:
                namespaces = [0]
            cat = pywikibot.Category(site, "%s:%s" % (
                site.category_namespace(), cat))
            gen = pagegenerators.CategorizedPageGenerator(cat)
    if not gen:
        pywikibot.showHelp('noreferences')
    else:
        if namespaces:
            gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
        preloadingGen = pagegenerators.PreloadingGenerator(gen)
        bot = NoReferencesBot(preloadingGen, always)
        bot.run()
开发者ID:bjonesin,项目名称:pywikibot-core,代码行数:59,代码来源:noreferences.py


示例14: __init__

 def __init__(self, myscraper, testing=False):
     self.myscraper = myscraper
     self.testing = testing
     if testing:
         self.destination_site = pywikibot.getSite("test", "test")
     else:
         self.destination_site = pywikibot.getSite("commons", "commons")
     print self.destination_site
开发者ID:gameguy43,项目名称:usable_image_scraper,代码行数:8,代码来源:wikiuploader.py


示例15: __init__

 def __init__(self, page, filename, summary, overwrite):
     self.page = pywikibot.Page( pywikibot.getSite(), page )
     self.filename = filename
     self.summary = summary
     self.overwrite = overwrite
     if not self.summary:
         self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
     pywikibot.setAction( self.summary )
开发者ID:masao,项目名称:savemlak,代码行数:8,代码来源:createpage.py


示例16: Link

def Link(cat):
    site = wikipedia.getSite('mg','wiktionary')
    print cat
    pages = pagegenerators.CategorizedPageGenerator(catlib.Category(site, cat))
    checkpages = list()


    for page in pages:
        pagename = page.title()
        page_c = content = page.get()
        wikipedia.output('  >>> %s <<< '%pagename)
        try:
            defs = page.get().split('#')
            if len(defs)>2:
                continue
            else:
                defs = defs[1]
        except Exception:
            continue
        g = defs.find('\n')

        if g != -1:
            defs = defs[:g]
        else:
            pass
        if len(defs) < 2: continue
        for char in u'[]':
            defs = defs.replace(char, u'')
        wikipedia.output('\03{red}%s\03{default}'%defs)
        defs = defs.strip()
        linked_def = dolink(defs)
        wikipedia.output('\03{blue}%s\03{default}'%linked_def)
        if len(linked_def) < 1: continue

        page_c = page_c.replace(defs, linked_def)

        for link in re.findall('\[\[\[A-Za-z][\]|\[]?\]\]', page_c):
            wikipedia.output('\03{green}%s\03{default}'%link)
            page_c = page_c.replace(delink(link), linkfix(link))



        wikipedia.showDiff(page.get(), page_c)
        if (len(content) != len(page_c)):
            if checkbracks(page_c):
                pass#page.put_async(page_c, 'fanisiana rohy')
				
            else:
                print "Ilainao ahitsiana ny rohy eo amin'io pejy io"
                checkpages.append(page.title())
                pass
    c = "== Lisitry ny pejy mila tsaraina ny {{subst:CURRENTDAY}} {{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}} ==\n"
    for page in checkpages:
        wikipedia.output('\03{orange}%s\03'%page)
        c+= "[[%s]]\n"%page
    p = wikipedia.Page(wikipedia.getSite('mg','wiktionary'),
                   "Mpikambana:Jagwar/pejy mila tsaraina")
    pass#p.put_async(c)
开发者ID:Webysther,项目名称:botjagwar,代码行数:58,代码来源:Catmaker.py


示例17: categories

    def categories(self):
        for page in self.generator:
            try:
                pywikibot.output(u'\n>>>> %s <<<<' % page.title())
                commons = pywikibot.getSite().image_repository()
                commonsCategory = pywikibot.Category(commons,
                                                     'Category:%s' % page.title())
                try:
                    getcommonscat = commonsCategory.get(get_redirect=True)
                    commonsCategoryTitle = commonsCategory.title()
                    categoryname = commonsCategoryTitle.split('Category:', 1)[1]
                    if page.title() == categoryname:
                        oldText = page.get()
                        text = oldText

                        # for commonscat template
                        findTemplate = re.compile(ur'\{\{[Cc]ommons')
                        s = findTemplate.search(text)
                        findTemplate2 = re.compile(ur'\{\{[Ss]isterlinks')
                        s2 = findTemplate2.search(text)
                        if s or s2:
                            pywikibot.output(u'** Already done.')
                        else:
                            text = pywikibot.replaceCategoryLinks(
                                text + u'{{commonscat|%s}}' % categoryname,
                                list(page.categories()))
                            if oldText != text:
                                pywikibot.showDiff(oldText, text)
                                if not self.acceptall:
                                    choice = pywikibot.inputChoice(
                                        u'Do you want to accept these changes?',
                                        ['Yes', 'No', 'All'], ['y', 'N', 'a'],
                                        'N')
                                    if choice == 'a':
                                        self.acceptall = True
                                if self.acceptall or choice == 'y':
                                    try:
                                        msg = i18n.twtranslate(
                                            pywikibot.getSite(), 'commons_link-cat-template-added')
                                        page.put(text, msg)
                                    except pywikibot.EditConflict:
                                        pywikibot.output(
                                            u'Skipping %s because of edit '
                                            u'conflict'
                                            % (page.title()))

                except pywikibot.NoPage:
                    pywikibot.output(u'Category does not exist in Commons!')

            except pywikibot.NoPage:
                pywikibot.output(u'Page %s does not exist?!' % page.title())
            except pywikibot.IsRedirectPage:
                pywikibot.output(u'Page %s is a redirect; skipping.'
                                 % page.title())
            except pywikibot.LockedPage:
                pywikibot.output(u'Page %s is locked?!' % page.title())
开发者ID:bjonesin,项目名称:pywikibot-core,代码行数:56,代码来源:commons_link.py


示例18: addCoords

def addCoords(countryconfig, monument, coordconfig):
    '''
    Add the coordinates to article.
    '''
    countrycode = countryconfig.get('country')
    lang = countryconfig.get('lang')
    if (countrycode and lang):
        coordTemplate = coordconfig.get('coordTemplate')
        coordTemplateSyntax = coordconfig.get('coordTemplateSyntax')
        site = pywikibot.getSite(lang, 'wikipedia')

        page = pywikibot.Page(site, monument.article)
        try:
            text = page.get()
        except pywikibot.NoPage:  # First except, prevent empty pages
            return False
        except pywikibot.IsRedirectPage:  # second except, prevent redirect
            pywikibot.output(u'%s is a redirect!' % monument.article)
            return False
        except pywikibot.Error:  # third exception, take the problem and print
            pywikibot.output(u"Some error, skipping..")
            return False

        if coordTemplate in page.templates():
            return False

        newtext = text
        replCount = 1
        coordText = coordTemplateSyntax % (monument.lat, monument.lon,
                                           countrycode.upper())
        localCatName = pywikibot.getSite().namespace(WP_CATEGORY_NS)
        catStart = r'\[\[(' + localCatName + '|Category):'
        catStartPlain = u'[[' + localCatName + ':'
        replacementText = u''
        replacementText = coordText + '\n\n' + catStartPlain

        # insert coordinate template before categories
        newtext = re.sub(catStart, replacementText, newtext, replCount, flags=re.IGNORECASE)

        if text != newtext:
            try:
                source_link = common.get_source_link(
                    monument.source,
                    countryconfig.get('type'))
            except ValueError:
                source_link = ''
            comment = u'Adding template %s based on %s, # %s' % (coordTemplate, source_link, monument.id)
            pywikibot.showDiff(text, newtext)
            modPage = pywikibot.input(u'Modify page: %s ([y]/n) ?' % (monument.article))
            if (modPage.lower == 'y' or modPage == ''):
                page.put(newtext, comment)
            return True
        else:
            return False
    else:
        return False
开发者ID:wikimedia,项目名称:labs-tools-heritage,代码行数:56,代码来源:add_coord_to_articles.py


示例19: getLanguageLinks

def getLanguageLinks(text, insite=None, pageLink="[[]]",
                     template_subpage=False):
    """
    Return a dict of interlanguage links found in text.

    Dict uses language codes as keys and Page objects as values.
    Do not call this routine directly, use Page.interwiki() method
    instead.

    """
    if insite is None:
        insite = pywikibot.getSite()
    fam = insite.family
    # when interwiki links forward to another family, retrieve pages & other
    # infos there
    if fam.interwiki_forward:
        fam = pywikibot.site.Family(fam.interwiki_forward)
    result = {}
    # Ignore interwiki links within nowiki tags, includeonly tags, pre tags,
    # and HTML comments
    tags = ['comments', 'nowiki', 'pre', 'source']
    if not template_subpage:
        tags += ['includeonly']
    text = removeDisabledParts(text, tags)

    # This regular expression will find every link that is possibly an
    # interwiki link.
    # NOTE: language codes are case-insensitive and only consist of basic latin
    # letters and hyphens.
    # TODO: currently, we do not have any, but BCP 47 allows digits, and
    #       underscores.
    # TODO: There is no semantic difference between hyphens and
    #       underscores -> fold them.
    interwikiR = re.compile(r'\[\[([a-zA-Z\-]+)\s?:([^\[\]\n]*)\]\]')
    for lang, pagetitle in interwikiR.findall(text):
        lang = lang.lower()
        # Check if it really is in fact an interwiki link to a known
        # language, or if it's e.g. a category tag or an internal link
        if lang in fam.obsolete:
            lang = fam.obsolete[lang]
        if lang in list(fam.langs.keys()):
            if '|' in pagetitle:
                # ignore text after the pipe
                pagetitle = pagetitle[:pagetitle.index('|')]
            # we want the actual page objects rather than the titles
            site = pywikibot.getSite(code=lang, fam=fam)
            try:
                result[site] = pywikibot.Page(site, pagetitle, insite=insite)
            except pywikibot.InvalidTitle:
                pywikibot.output(u'[getLanguageLinks] Text contains invalid '
                                 u'interwiki link [[%s:%s]].'
                                 % (lang, pagetitle))
                continue
    return result
开发者ID:bjonesin,项目名称:pywikibot-core,代码行数:54,代码来源:textlib.py


示例20: main

def main():
    text = """{{Proiect:Aniversările zilei/Antet}}
Tabelul de mai jos conține informații despre erorile găsite în datele de naștere și deces ale personalităților menționate în paginile zilelor și ale anilor. Comparația se face între listele de pe Wikipedia și elementele Wikidata ale personalităților respective.

Legendă:
* liniile cu fundal <span style="background-color:#ff8888">roșu</span> reprezintă nepotriviri certe (datele sunt complete în ambele părți, dar nu se potrivesc)
* liniile cu fundal <span style="background-color:#ffff88">galben</span> reprezintă intrări unde Wikidata nu are date complete
* liniile cu fundal <span style="background-color:#88ffff">albastru</span> reprezintă intrări unde Wikidata are mai multe date posibile, toate cu același rank
* liniile cu fundal <span style="background-color:#88ff88">verde</span> reprezintă diferențe de calendar (gregorian vs. julian) 

Scorul este alocat automat pe baza numărului de posibile date de naștere de la wikidata (%d/dată) și pe baza numărului de surse ce susțin data aleasă de algoritm (+%d/sursă, 0 dacă nu este aleasă nicio dată). Scorul are rolul de a prioritiza rezolvarea problemelor ușoare. '''Scor mai mare înseamnă încredere mai mare în datele de la Wikidata'''.
{| class=\"wikitable sortable\"
! Secțiune
! Articol
! Pagină aniversări
! Dată Wikipedia
! Item Wikidata
! Dată Wikidata
! Scor
""" % (MULTIPLE_DATE_PENALTY, MULTIPLE_SOURCES_BONUS)
    #day = 4
    #month = "octombrie"
    #event = "Nașteri"
    #page = pywikibot.Page(pywikibot.getSite(),  "%d %s" % (day, month))
    #import pdb
    #pdb.set_trace()
    #treat(page, day, month, event)
    #return
    for year in range(1901, time.localtime().tm_year):
        for suffix in ["", " î.Hr."]:
            page = pywikibot.Page(pywikibot.getSite(),  "%d%s" % (year, suffix))
            if not page.exists():
                continue
            if page.isRedirectPage():
                page = page.getRedirectTarget()
            if suffix != "":
                year = -year
            for event in sections.keys():
                text += treat_year(page, year, suffix, event)
    for month in months:
        for day in range(1,32):
            page = pywikibot.Page(pywikibot.getSite(),  "%d %s" % (day, month))
            if not page.exists():
                continue
            if page.isRedirectPage():
                page = page.getRedirectTarget()
            for event in sections.keys():
                text += treat_date(page, day, month, event)


    page = pywikibot.Page(pywikibot.getSite(), "Proiect:Aniversări/Erori")
    page.put(text + "|}", "Actualizare nepotriviri")
开发者ID:rowiki,项目名称:wikiro,代码行数:52,代码来源:aniversari.py



注:本文中的pywikibot.getSite函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pywikibot.handleArgs函数代码示例发布时间:2022-05-26
下一篇:
Python pywikibot.exception函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap