• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python textlib.removeDisabledParts函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pywikibot.textlib.removeDisabledParts函数的典型用法代码示例。如果您正苦于以下问题:Python removeDisabledParts函数的具体用法?Python removeDisabledParts怎么用?Python removeDisabledParts使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了removeDisabledParts函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: weblinksIn

def weblinksIn(text, withoutBracketed=False, onlyBracketed=False):
    text = textlib.removeDisabledParts(text)

    # MediaWiki parses templates before parsing external links. Thus, there
    # might be a | or a } directly after a URL which does not belong to
    # the URL itself.

    # First, remove the curly braces of inner templates:
    nestedTemplateR = re.compile(r'{{([^}]*?){{(.*?)}}(.*?)}}')
    while nestedTemplateR.search(text):
        text = nestedTemplateR.sub(r'{{\1 \2 \3}}', text)

    # Then blow up the templates with spaces so that the | and }} will not
    # be regarded as part of the link:.
    templateWithParamsR = re.compile(r'{{([^}]*?[^ ])\|([^ ][^}]*?)}}',
                                     re.DOTALL)
    while templateWithParamsR.search(text):
        text = templateWithParamsR.sub(r'{{ \1 | \2 }}', text)

    # Add <blank> at the end of a template
    # URL as last param of multiline template would not be correct
    text = text.replace('}}', ' }}')

    # Remove HTML comments in URLs as well as URLs in HTML comments.
    # Also remove text inside nowiki links etc.
    text = textlib.removeDisabledParts(text)
    linkR = textlib.compileLinkR(withoutBracketed, onlyBracketed)
    for m in linkR.finditer(text):
        if m.group('url'):
            yield m.group('url')
        else:
            yield m.group('urlb')
开发者ID:anrao91,项目名称:pywikibot-core,代码行数:32,代码来源:weblinkchecker.py


示例2: find_discussion

 def find_discussion(self, category):
     """Find the section with the relevant discussion."""
     if self.section():
         return self.title(as_link=True)
     text = removeDisabledParts(self.text, site=self.site)
     wikicode = mwparserfromhell.parse(text, skip_style_tags=True)
     for section in wikicode.get_sections(levels=[4]):
         heading = section.filter(forcetype=Heading)[0]
         section_title = str(heading.title).strip()
         discussion = '[[{}#{}]]'.format(self.title(), section_title)
         if category.title() == section_title:
             return discussion
         # Split approximately into close, nom, and others
         parts = str(section).split('(UTC)')
         if len(parts) < 3:
             continue
         # Parse the nom for links
         for wikilink in pywikibot.link_regex.finditer(parts[1]):
             title = wikilink.group('title').strip().split('#')[0]
             if not title:
                 continue
             title = pywikibot.Page(self.site, title).title()
             if category.title() == title:
                 return discussion
     return self.title(as_link=True)
开发者ID:JJMC89,项目名称:JJMC89_bot,代码行数:25,代码来源:cfdw.py


示例3: __iter__

 def __iter__(self):
     import xmlreader
     dump = xmlreader.XmlDump(self.xmlFilename)
     for entry in dump.parse():
         text = textlib.removeDisabledParts(entry.text)
         if self.refR.search(text) and not self.referencesR.search(text):
             yield pywikibot.Page(pywikibot.Site(), entry.title)
开发者ID:gitter-badger,项目名称:pywikibot,代码行数:7,代码来源:noreferences.py


示例4: getWordCount

	def getWordCount(self, text):
		text = textlib.removeDisabledParts(text)
		text = textlib.removeHTMLParts(text)
		text = textlib.removeLanguageLinks(text)
		text = textlib.removeCategoryLinks(text)
		word_list = re.findall(r"[\w']+", text)

		return len(word_list)
开发者ID:edgarskos,项目名称:wikiro,代码行数:8,代码来源:ceespring_count.py


示例5: check

def check(text, languages):
    tags = ['comments', 'nowiki', 'pre', 'source']
    text = textlib.removeDisabledParts(text, tags)
    interwikiR = re.compile(r'\[\[([a-zA-Z\-]+)\s?:([^\[\]\n]*)\]\]')
    for lang, pagetitle in interwikiR.findall(text):
        lang = lang.lower()
        # Check if it really is in fact an interwiki link to a known
        # language, or if it's e.g. a category tag or an internal link
        if lang in languages:
            return True
    return False
开发者ID:HazardSJ,项目名称:wikidata,代码行数:11,代码来源:xmlscanner.py


示例6: processArticle

def processArticle(page):
	text = page.get()
	text = textlib.removeDisabledParts(text)
	# pywikibot.output(u'Working on "%s"' % title)
	global codeRegexp
	global templateRegexp
	result  = re.findall(codeRegexp, text)
	template = re.findall(templateRegexp, text)
	if len(result) > 0 and len(template) == 0:
		msg = u"* [[%s]]: " % page.title()
		for res in result:
			msg += str(res)
		log(msg)
		pywikibot.output(msg)
开发者ID:rowiki,项目名称:wikiro,代码行数:14,代码来源:search_monument_articles.py


示例7: treat_property_and_talk

    def treat_property_and_talk(self, prop, page):
        self.current_talk_page = page
        # todo: skip sandbox properties
        # todo: removeDisabledParts now?
        code = mwparserfromhell.parse(page.text, skip_style_tags=True)
        for template in code.ifilter_templates():
            if not template.name.matches(self.template_metadata):
                continue
            params = OrderedDict()
            for param in template.params:
                params[str(param.name).strip()] = str(param.value).strip()
            break
        else:
            pywikibot.output('Template "{}" not found'.format(
                self.template_metadata))
            return

        keys = set(self.func_dict.keys()) & set(params.keys())
        # formatter URL must go before example
        if {'formatter URL', 'example'} <= keys:
            keys.remove('formatter URL')
            keys = ['formatter URL'] + list(keys)

        clear_params = []
        for key in keys:
            param = textlib.removeDisabledParts(params[key])
            if param == '-':
                continue
            if param != '':
                pywikibot.output('Found param "{}"'.format(key))
                try:
                    remove = self.func_dict[key](param)
                except pywikibot.data.api.APIError as exc:
                    remove = False
                if remove:
                    clear_params.append(key)
        if self.getOption('importonly'):
            return

        for par in clear_params:
            template.remove(par, keep_field=True)
        for par in set(params.keys()) & set(self.obsolete_params):
            template.remove(par)

        self.current_page = self.current_talk_page
        self.put_current(str(code), show_diff=True,
                         summary='removing migrated/obsolete parameters')
开发者ID:matejsuchanek,项目名称:pywikibot-scripts,代码行数:47,代码来源:metabot.py


示例8: parse_page

def parse_page(page):
    """Parse a CFD working page."""
    text = removeDisabledParts(page.text, site=page.site)
    wikicode = mwparserfromhell.parse(text, skip_style_tags=True)
    for section in wikicode.get_sections(flat=True, include_lead=False):
        heading = section.filter(forcetype=Heading)[0]
        section_title = str(heading.title).lower()
        print(section_title)
        if 'move' in section_title:
            mode = 'move'
            edit_summary = 'Moving {old_cat} to {new_cats} per {cfd}'
        elif 'empty' in section_title:
            mode = 'empty'
            edit_summary = 'Removing {old_cat} per {cfd}'
        else:
            continue
        parse_section(section, page.site, mode, edit_summary)
开发者ID:JJMC89,项目名称:JJMC89_bot,代码行数:17,代码来源:cfdw.py


示例9: lacksReferences

 def lacksReferences(self, text):
     """Check whether or not the page is lacking a references tag."""
     oldTextCleaned = textlib.removeDisabledParts(text)
     if self.referencesR.search(oldTextCleaned) or \
        self.referencesTagR.search(oldTextCleaned):
         if self.getOption('verbose'):
             pywikibot.output(u'No changes necessary: references tag found.')
         return False
     elif self.referencesTemplates:
         templateR = u'{{(' + u'|'.join(self.referencesTemplates) + ')'
         if re.search(templateR, oldTextCleaned, re.IGNORECASE | re.UNICODE):
             if self.getOption('verbose'):
                 pywikibot.output(
                     u'No changes necessary: references template found.')
             return False
     if not self.refR.search(oldTextCleaned):
         if self.getOption('verbose'):
             pywikibot.output(u'No changes necessary: no ref tags found.')
         return False
     else:
         if self.getOption('verbose'):
             pywikibot.output(u'Found ref without references.')
         return True
开发者ID:gitter-badger,项目名称:pywikibot,代码行数:23,代码来源:noreferences.py


示例10: run

    def run(self):
        """Run the Bot."""
        try:
            deadLinks = codecs.open(listof404pages, 'r', 'latin_1').read()
        except IOError:
            pywikibot.output(
                'You need to download '
                'http://www.twoevils.org/files/wikipedia/404-links.txt.gz '
                'and to ungzip it in the same directory')
            raise
        socket.setdefaulttimeout(30)
        editedpages = 0
        for page in self.generator:
            try:
                # Load the page's text from the wiki
                new_text = page.get()
                if not page.canBeEdited():
                    pywikibot.output(u"You can't edit page %s"
                                      % page.title(asLink=True))
                    continue
            except pywikibot.NoPage:
                pywikibot.output(u'Page %s not found' % page.title(asLink=True))
                continue
            except pywikibot.IsRedirectPage:
                pywikibot.output(u'Page %s is a redirect'
                                 % page.title(asLink=True))
                continue

            # for each link to change
            for match in linksInRef.finditer(
                    textlib.removeDisabledParts(page.get())):

                link = match.group(u'url')
                # debugging purpose
                # print link
                if u'jstor.org' in link:
                    # TODO: Clean URL blacklist
                    continue

                ref = RefLink(link, match.group('name'))
                f = None
                try:
                    socket.setdefaulttimeout(20)
                    try:
                        f = urlopen(ref.url.decode("utf8"))
                    except UnicodeError:
                        ref.url = quote(ref.url.encode("utf8"), "://")
                        f = urlopen(ref.url)
                    # Try to get Content-Type from server
                    headers = f.info()
                    contentType = headers.getheader('Content-Type')
                    if contentType and not self.MIME.search(contentType):
                        if ref.link.lower().endswith('.pdf') and \
                           not self.getOption('ignorepdf'):
                            # If file has a PDF suffix
                            self.getPDFTitle(ref, f)
                        else:
                            pywikibot.output(
                                u'\03{lightyellow}WARNING\03{default} : '
                                u'media : %s ' % ref.link)
                        if ref.title:
                            if not re.match(
                                    u'(?i) *microsoft (word|excel|visio)',
                                    ref.title):
                                ref.transform(ispdf=True)
                                repl = ref.refTitle()
                            else:
                                pywikibot.output(
                                    u'\03{lightyellow}WARNING\03{default} : '
                                    u'PDF title blacklisted : %s ' % ref.title)
                                repl = ref.refLink()
                        else:
                            repl = ref.refLink()
                        new_text = new_text.replace(match.group(), repl)
                        continue
                    # Get the real url where we end (http redirects !)
                    redir = f.geturl()
                    if redir != ref.link and \
                       domain.findall(redir) == domain.findall(link):
                        if soft404.search(redir) and \
                           not soft404.search(ref.link):
                            pywikibot.output(
                                u'\03{lightyellow}WARNING\03{default} : '
                                u'Redirect 404 : %s ' % ref.link)
                            continue
                        if dirIndex.match(redir) and \
                           not dirIndex.match(ref.link):
                            pywikibot.output(
                                u'\03{lightyellow}WARNING\03{default} : '
                                u'Redirect to root : %s ' % ref.link)
                            continue

                    # uncompress if necessary
                    if headers.get('Content-Encoding') in ('gzip', 'x-gzip'):
                        # XXX: small issue here: the whole page is downloaded
                        # through f.read(). It might fetch big files/pages.
                        # However, truncating an encoded gzipped stream is not
                        # an option, or unzipping will fail.
                        compressed = io.BytesIO(f.read())
                        f = gzip.GzipFile(fileobj=compressed)
#.........这里部分代码省略.........
开发者ID:skamithi,项目名称:pywikibot-core,代码行数:101,代码来源:reflinks.py


示例11: _match_xml_page_text

def _match_xml_page_text(text):
    """Match page text."""
    text = textlib.removeDisabledParts(text)
    return _ref_regex.search(text) and not _references_regex.search(text)
开发者ID:KaiCode2,项目名称:pywikibot-core,代码行数:4,代码来源:noreferences.py


示例12: standardizePageFooter

    def standardizePageFooter(self, text):
        """
        Standardize page footer.

        Makes sure that interwiki links, categories and star templates are
        put to the correct position and into the right order. This combines the
        old instances standardizeInterwiki and standardizeCategories
        The page footer has the following section in that sequence:
        1. categories
        2. ## TODO: template beyond categories ##
        3. additional information depending on local site policy
        4. stars templates for featured and good articles
        5. interwiki links

        """
        starsList = [
            u'bueno',
            u'bom interwiki',
            u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed',
            u'destacado', u'destaca[tu]',
            u'enllaç[ _]ad',
            u'enllaz[ _]ad',
            u'leam[ _]vdc',
            u'legătură[ _]a[bcf]',
            u'liamm[ _]pub',
            u'lien[ _]adq',
            u'lien[ _]ba',
            u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt',
            u'liên[ _]kết[ _]chọn[ _]lọc',
            u'ligam[ _]adq',
            u'ligazón[ _]a[bd]',
            u'ligoelstara',
            u'ligoleginda',
            u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km',
            u'link[ _]sm', u'linkfa',
            u'na[ _]lotura',
            u'nasc[ _]ar',
            u'tengill[ _][úg]g',
            u'ua',
            u'yüm yg',
            u'רא',
            u'وصلة مقالة جيدة',
            u'وصلة مقالة مختارة',
        ]

        categories = None
        interwikiLinks = None
        allstars = []

        # The PyWikipediaBot is no longer allowed to touch categories on the
        # German Wikipedia. See
        # https://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22
        # ignoring nn-wiki of cause of the comment line above iw section
        if not self.template and '{{Personendaten' not in text and \
           '{{SORTIERUNG' not in text and '{{DEFAULTSORT' not in text and \
           self.site.code not in ('et', 'it', 'bg', 'ru'):
            categories = textlib.getCategoryLinks(text, site=self.site)

        if not self.talkpage:  # and pywikibot.calledModuleName() <> 'interwiki':
            subpage = False
            if self.template:
                loc = None
                try:
                    tmpl, loc = moved_links[self.site.code]
                    del tmpl
                except KeyError:
                    pass
                if loc is not None and loc in self.title:
                    subpage = True
            interwikiLinks = textlib.getLanguageLinks(
                text, insite=self.site, template_subpage=subpage)

            # Removing the interwiki
            text = textlib.removeLanguageLinks(text, site=self.site)
            # Removing the stars' issue
            starstext = textlib.removeDisabledParts(text)
            for star in starsList:
                regex = re.compile(r'(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
                                   % star, re.I)
                found = regex.findall(starstext)
                if found != []:
                    text = regex.sub('', text)
                    allstars += found

        # Adding categories
        if categories:
            # TODO: Sorting categories in alphabetic order.
            # e.g. using categories.sort()

            # TODO: Taking main cats to top
            #   for name in categories:
            #       if re.search(u"(.+?)\|(.{,1}?)",name.title()) or name.title()==name.title().split(":")[0]+title:
            #            categories.remove(name)
            #            categories.insert(0, name)
            text = textlib.replaceCategoryLinks(text, categories,
                                                site=self.site)
        # Adding stars templates
        if allstars:
            text = text.strip() + self.site.family.interwiki_text_separator
            allstars.sort()
#.........这里部分代码省略.........
开发者ID:skamithi,项目名称:pywikibot-core,代码行数:101,代码来源:cosmetic_changes.py


示例13: find_and_replace

    def find_and_replace(self, text, init):
        new_params = []
        old_params = []
        unknown_params = []
        removed_params = []
        changed = False
        for template, fielddict in textlib.extract_templates_and_params(
                text, remove_disabled_parts=False, strip=False):
            if self.normalize(template) not in (self.template,
                                                self.new_template):
                continue

            changed = self.normalize(template) != self.new_template
            start_match = re.search(r'\{\{\s*((%s)\s*:\s*)?%s\s*' % (
                '|'.join(self.site.namespaces[10]), re.escape(template)), text)
            if not start_match:
                if not init:
                    pywikibot.error("Couldn't find the template")
                return text, 0

            start = start_match.start()
            if len(fielddict) > 0:
                end = text.index('|', start)
            else:
                end = text.index('}}', start)

            unnamed = {}
            for name, value in chain(fielddict.items(), IterUnnamed(unnamed)):
                end += len('|%s=%s' % (name, value))

                name = name.strip()
                value = (value
                         .replace('\n<!-- Zastaralé parametry -->', '')
                         .replace('\n<!-- Neznámé parametry -->', '')
                         .strip())

                try:
                    new_name = self.handle_param(name)
                except OldParamException:
                    if textlib.removeDisabledParts(value, ['comments']).strip():
                        old_params.append(
                            (name, value)
                        )
                except RemoveParamException:
                    changed = True
                    if textlib.removeDisabledParts(value, ['comments']).strip():
                        removed_params.append(
                            (name, value)
                        )
                except UnknownParamException:
                    if textlib.removeDisabledParts(value, ['comments']).strip():
                        unknown_params.append(
                            (name, value)
                        )
                except AssertionError:
                    pywikibot.error('Couldn\'t handle parameter "%s"' % name)
                    return text, 0
                except UnnamedParamException:
                    unnamed[value] = ''
                else:
                    new_params.append(
                        (new_name, value)
                    )
                    if new_name != name:
                        changed = True

            end += len('}}')

            while text[start:end].count('{{') < text[start:end].count('}}'):
                end = text[:end].rindex('}}') + len('}}')

            if text[start:end].count('{{') > text[start:end].count('}}'):
                ballance = 1
                end = start
                while ballance > 0:
                    next_close = text.index('}}', end)
                    ballance += text[end:next_close].count('{{') - 1
                    end = next_close + len('}}')

            if not text[start:end].endswith('}}'): # elif?
                end = text[:end].rindex('}}') + len('}}')

            if (end < start or not text[start:end].endswith('}}') or
                    text[start:end].count('{{') != text[start:end].count('}}')):
                pywikibot.error("Couldn't parse the template")
                return text, 0
            break

        else:
            pywikibot.error("Couldn't parse the template")
            return text, 0

        if not changed:
            pywikibot.output('No parameters changed')
            return text, 0

        while end < len(text) and text[end].isspace(): # todo: also before
            end += 1

        lines = []
#.........这里部分代码省略.........
开发者ID:matejsuchanek,项目名称:pywikibot-scripts,代码行数:101,代码来源:migrate_infobox.py


示例14: add_text

def add_text(page, addText, summary=None, regexSkip=None,
             regexSkipUrl=None, always=False, up=False, putText=True,
             oldTextGiven=None, reorderEnabled=True, create=False):
    """
    Add text to a page.

    @rtype: tuple of (text, newtext, always)
    """
    site = page.site
    if not summary:
        summary = i18n.twtranslate(site, 'add_text-adding',
                                   {'adding': addText[:200]})

    # When a page is tagged as "really well written" it has a star in the
    # interwiki links. This is a list of all the templates used (in regex
    # format) to make the stars appear.

    errorCount = 0

    if putText:
        pywikibot.output(u'Loading %s...' % page.title())
    if oldTextGiven is None:
        try:
            text = page.get()
        except pywikibot.NoPage:
            if create:
                pywikibot.output(u"%s doesn't exist, creating it!"
                                 % page.title())
                text = u''
            else:
                pywikibot.output(u"%s doesn't exist, skip!" % page.title())
                return (False, False, always)
        except pywikibot.IsRedirectPage:
            pywikibot.output(u"%s is a redirect, skip!" % page.title())
            return (False, False, always)
    else:
        text = oldTextGiven
    # Understand if the bot has to skip the page or not
    # In this way you can use both -except and -excepturl
    if regexSkipUrl is not None:
        url = page.full_url()
        result = re.findall(regexSkipUrl, site.getUrl(url))
        if result != []:
            pywikibot.output(
                'Exception! regex (or word) used with -exceptUrl '
                'is in the page. Skip!\n'
                'Match was: %s' % result)
            return (False, False, always)
    if regexSkip is not None:
        result = re.findall(regexSkip, text)
        if result != []:
            pywikibot.output(
                'Exception! regex (or word) used with -except '
                'is in the page. Skip!\n'
                'Match was: %s' % result)
            return (False, False, always)
    # If not up, text put below
    if not up:
        newtext = text
        # Translating the \\n into binary \n
        addText = addText.replace('\\n', config.line_separator)
        if (reorderEnabled):
            # Getting the categories
            categoriesInside = textlib.getCategoryLinks(newtext, site)
            # Deleting the categories
            newtext = textlib.removeCategoryLinks(newtext, site)
            # Getting the interwiki
            interwikiInside = textlib.getLanguageLinks(newtext, site)
            # Removing the interwiki
            newtext = textlib.removeLanguageLinks(newtext, site)

            # Adding the text
            newtext += u"%s%s" % (config.line_separator, addText)
            # Reputting the categories
            newtext = textlib.replaceCategoryLinks(newtext,
                                                   categoriesInside, site,
                                                   True)
            # Dealing the stars' issue
            allstars = []
            starstext = textlib.removeDisabledParts(text)
            for star in starsList:
                regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
                                   % star, re.I)
                found = regex.findall(starstext)
                if found != []:
                    newtext = regex.sub('', newtext)
                    allstars += found
            if allstars != []:
                newtext = newtext.strip() + config.line_separator * 2
                allstars.sort()
                for element in allstars:
                    newtext += '%s%s' % (element.strip(), config.LS)
            # Adding the interwiki
            newtext = textlib.replaceLanguageLinks(newtext, interwikiInside,
                                                   site)
        else:
            newtext += u"%s%s" % (config.line_separator, addText)
    else:
        newtext = addText + config.line_separator + text
    if putText and text != newtext:
#.........这里部分代码省略.........
开发者ID:happy5214,项目名称:pywikibot-core,代码行数:101,代码来源:add_text.py


示例15: linkedImages

def linkedImages(page):
    """Return a list of Pages that this Page links to.

    Only returns pages from "normal" internal links. Category links are
    omitted unless prefixed with ":". Image links are omitted when parameter
    withImageLinks is False. Embedded templates are omitted (but links
    within them are returned). All interwiki and external links are omitted.

    @param thistxt: the wikitext of the page
    @return: a list of Page objects.
    """

    Rlink = re.compile(r'\[\[(?P<title>[^\]\|\[]*)(\|[^\]]*)?\]\]')
    result = []
    try:
        thistxt = textlib.removeLanguageLinks(page.get(get_redirect=True),
                                              page.site)
    except pywikibot.NoPage:
        raise
    except pywikibot.IsRedirectPage:
        raise
    except pywikibot.SectionError:
        return []
    thistxt = textlib.removeCategoryLinks(thistxt, page.site)

    # remove HTML comments, pre, nowiki, and includeonly sections
    # from text before processing
    thistxt = textlib.removeDisabledParts(thistxt)

    # resolve {{ns:-1}} or {{ns:Help}}
    # thistxt = page.site.resolvemagicwords(thistxt)

    for match in Rlink.finditer(thistxt):
        try:
            #print(match.group(0))
            title = match.group('title')
            title = title.replace("_", " ").strip(" ")
            # print title
            if title == "":
                # empty link - problem in the page
                continue
            # convert relative link to absolute link
            if title.startswith(".."):
                parts = self.title().split('/')
                parts.pop()
                title = '/'.join(parts) + title[2:]
            elif title.startswith("/"):
                title = '%s/%s' % (page.title(), title[1:])
            if title.startswith("#"):
                # this is an internal section link
                continue
            if not page.site.isInterwikiLink(title):
                page2 = pywikibot.Page(page.site, title)
                try:
                    hash(str(page2))
                except Exception:
                    pywikibot.output("Page %s contains invalid link to [[%s]]."
                                 % (page.title(), title))
                    continue
                if not page2.isImage():
                    continue
                if page2.title(withSection=False) and page2 not in result:
                    result.append(page2)
        except pywikibot.NoUsername:
            continue
        except:
            raise
    return result
开发者ID:rowiki,项目名称:wikiro,代码行数:68,代码来源:strainu_functions.py


示例16: processRE

def processRE(param, rx):
    cleaned_text = textlib.removeDisabledParts(unicode(param.value.strip()))
    relist = re.findall(rx, cleaned_text)
    return relist
开发者ID:notconfusing,项目名称:harvest_infobox_book,代码行数:4,代码来源:harvest_books.py


示例17: remove_cats_and_comments

 def remove_cats_and_comments(self, text):
     """Remove categories, comments and trailing spaces from wikitext."""
     text = textlib.removeCategoryLinks(text, site=self.site)
     text = textlib.removeDisabledParts(text, tags=['comments'])
     return text.strip()
开发者ID:lokal-profil,项目名称:LSH,代码行数:5,代码来源:replace_descriptions.py


示例18: run

    def run(self):
        """Run the Bot."""
        try:
            deadLinks = codecs.open(listof404pages, 'r', 'latin_1').read()
        except IOError:
            raise NotImplementedError(
                '404-links.txt is required for reflinks.py\n'
                'You need to download\n'
                'http://www.twoevils.org/files/wikipedia/404-links.txt.gz\n'
                'and to unzip it in the same directory')

        editedpages = 0
        for page in self.generator:
            try:
                # Load the page's text from the wiki
                new_text = page.get()
                if not page.canBeEdited():
                    pywikibot.output(u"You can't edit page %s"
                                     % page.title(asLink=True))
                    continue
            except pywikibot.NoPage:
                pywikibot.output(u'Page %s not found' % page.title(asLink=True))
                continue
            except pywikibot.IsRedirectPage:
                pywikibot.output(u'Page %s is a redirect'
                                 % page.title(asLink=True))
                continue

            # for each link to change
            for match in linksInRef.finditer(
                    textlib.removeDisabledParts(page.get())):

                link = match.group(u'url')
                # debugging purpose
                # print link
                if u'jstor.org' in link:
                    # TODO: Clean URL blacklist
                    continue

                ref = RefLink(link, match.group('name'))
                f = None

                try:
                    f = comms.http.fetch(
                        ref.url, use_fake_user_agent=self._use_fake_user_agent)

                    # Try to get Content-Type from server
                    contentType = f.response_headers.get('content-type')
                    if contentType and not self.MIME.search(contentType):
                        if ref.link.lower().endswith('.pdf') and \
                           not self.getOption('ignorepdf'):
                            # If file has a PDF suffix
                            self.getPDFTitle(ref, f)
                        else:
                            pywikibot.output(color_format(
                                '{lightyellow}WARNING{default} : '
                                'media : {0} ', ref.link))
                        if ref.title:
                            if not re.match(
                                    u'(?i) *microsoft (word|excel|visio)',
                                    ref.title):
                                ref.transform(ispdf=True)
                                repl = ref.refTitle()
                            else:
                                pywikibot.output(color_format(
                                    '{lightyellow}WARNING{default} : '
                                    'PDF title blacklisted : {0} ', ref.title))
                                repl = ref.refLink()
                        else:
                            repl = ref.refLink()
                        new_text = new_text.replace(match.group(), repl)
                        continue

                    # Get the real url where we end (http redirects !)
                    redir = f.data.url
                    if redir != ref.link and \
                       domain.findall(redir) == domain.findall(link):
                        if soft404.search(redir) and \
                           not soft404.search(ref.link):
                            pywikibot.output(color_format(
                                '{lightyellow}WARNING{default} : '
                                'Redirect 404 : {0} ', ref.link))
                            continue
                        if dirIndex.match(redir) and \
                           not dirIndex.match(ref.link):
                            pywikibot.output(color_format(
                                u'{lightyellow}WARNING{default} : '
                                u'Redirect to root : {0} ', ref.link))
                            continue

                    if f.status != requests.codes.ok:
                        pywikibot.output(u'HTTP error (%s) for %s on %s'
                                         % (f.status, ref.url,
                                            page.title(asLink=True)),
                                         toStdout=True)
                        # 410 Gone, indicates that the resource has been purposely
                        # removed
                        if f.status == 410 or \
                           (f.status == 404 and (u'\t%s\t' % ref.url in deadLinks)):
                            repl = ref.refDead()
#.........这里部分代码省略.........
开发者ID:magul,项目名称:pywikibot-core,代码行数:101,代码来源:reflinks.py



注:本文中的pywikibot.textlib.removeDisabledParts函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python textlib.replaceCategoryLinks函数代码示例发布时间:2022-05-26
下一篇:
Python textlib.getCategoryLinks函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap