本文整理汇总了Python中wikipedia.removeDisabledParts函数的典型用法代码示例。如果您正苦于以下问题:Python removeDisabledParts函数的具体用法?Python removeDisabledParts怎么用?Python removeDisabledParts使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了removeDisabledParts函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: weblinksIn
def weblinksIn(text, withoutBracketed = False, onlyBracketed = False):
text = pywikibot.removeDisabledParts(text)
# MediaWiki parses templates before parsing external links. Thus, there
# might be a | or a } directly after a URL which does not belong to
# the URL itself.
# First, remove the curly braces of inner templates:
nestedTemplateR = re.compile(r'{{([^}]*?){{(.*?)}}(.*?)}}')
while nestedTemplateR.search(text):
text = nestedTemplateR.sub(r'{{\1 \2 \3}}', text)
# Then blow up the templates with spaces so that the | and }} will not be regarded as part of the link:.
templateWithParamsR = re.compile(r'{{([^}]*?[^ ])\|([^ ][^}]*?)}}',
re.DOTALL)
while templateWithParamsR.search(text):
text = templateWithParamsR.sub(r'{{ \1 | \2 }}', text)
linkR = pywikibot.compileLinkR(withoutBracketed, onlyBracketed)
# Remove HTML comments in URLs as well as URLs in HTML comments.
# Also remove text inside nowiki links etc.
text = pywikibot.removeDisabledParts(text)
for m in linkR.finditer(text):
yield m.group('url')
开发者ID:dbow,项目名称:Project-OPEN,代码行数:25,代码来源:weblinkchecker.py
示例2: lacksReferences
def lacksReferences(self, text, verbose = True):
"""
Checks whether or not the page is lacking a references tag.
"""
oldTextCleaned = pywikibot.removeDisabledParts(text)
if self.referencesR.search(oldTextCleaned) or \
self.referencesTagR.search(oldTextCleaned):
if verbose:
pywikibot.output(u'No changes necessary: references tag found.')
return False
elif self.referencesTemplates:
templateR = u'{{(' + u'|'.join(self.referencesTemplates) + ')'
if re.search(templateR, oldTextCleaned, re.IGNORECASE|re.UNICODE):
if verbose:
pywikibot.output(
u'No changes necessary: references template found.')
return False
if not self.refR.search(oldTextCleaned):
if verbose:
pywikibot.output(u'No changes necessary: no ref tags found.')
return False
else:
if verbose:
pywikibot.output(u'Found ref without references.')
return True
开发者ID:Protonk,项目名称:pywikipedia2,代码行数:25,代码来源:noreferences.py
示例3: __iter__
def __iter__(self):
import xmlreader
dump = xmlreader.XmlDump(self.xmlFilename)
for entry in dump.parse():
text = pywikibot.removeDisabledParts(entry.text)
if self.refR.search(text) and not self.referencesR.search(text):
yield pywikibot.Page(pywikibot.getSite(), entry.title)
开发者ID:Protonk,项目名称:pywikipedia2,代码行数:7,代码来源:noreferences.py
示例4: procesPage
def procesPage(self, page):
"""
Proces a single page
"""
item = pywikibot.DataPage(page)
pywikibot.output('Processing %s' % page)
if not item.exists():
pywikibot.output('%s doesn\'t have a wikidata item :(' % page)
#TODO FIXME: We should provide an option to create the page
else:
pagetext = page.get()
pagetext = pywikibot.removeDisabledParts(pagetext)
templates = pywikibot.extract_templates_and_params(pagetext)
for (template, fielddict) in templates:
# We found the template we were looking for
if template.replace(u'_', u' ') == self.templateTitle:
for field, value in fielddict.items():
# This field contains something useful for us
if field in self.fields:
# Check if the property isn't already set
claim = self.fields[field]
if claim in item.get().get('claims'):
pywikibot.output(
u'A claim for %s already exists. Skipping'
% (claim,))
# TODO FIXME: This is a very crude way of dupe
# checking
else:
# Try to extract a valid page
match = re.search(re.compile(
r'\[\[(?P<title>[^\]|[#<>{}]*)(\|.*?)?\]\]'),
value)
if match:
try:
link = match.group(1)
linkedPage = pywikibot.Page(self.site,
link)
if linkedPage.isRedirectPage():
linkedPage = linkedPage.getRedirectTarget()
linkedItem = pywikibot.DataPage(linkedPage)
pywikibot.output('Adding %s --> %s'
% (claim,
linkedItem.getID()))
if self.setSource(self.site().language()):
item.editclaim(
str(claim),
linkedItem.getID(),
refs={self.setSource(
self.site().language())})
else:
item.editclaim(str(claim),
linkedItem.getID())
except pywikibot.NoPage:
pywikibot.output(
"[[%s]] doesn't exist so I can't link to it"
% linkedItem.title())
开发者ID:Botomatik,项目名称:JackBot,代码行数:56,代码来源:harvest_template.py
示例5: add_text
#.........这里部分代码省略.........
if not up:
newtext = text
# Translating the \\n into binary \n
addText = addText.replace('\\n', '\n')
if (reorderEnabled):
# Getting the categories
categoriesInside = pywikibot.getCategoryLinks(newtext, site)
# Deleting the categories
newtext = pywikibot.removeCategoryLinks(newtext, site)
# Getting the interwiki
interwikiInside = pywikibot.getLanguageLinks(newtext, site)
# Removing the interwiki
newtext = pywikibot.removeLanguageLinks(newtext, site)
# nn got a message between the categories and the iw's
# and they want to keep it there, first remove it
hasCommentLine = False
if (site.language()==u'nn'):
regex = re.compile('(<!-- ?interwiki \(no(?:/nb)?, ?sv, ?da first; then other languages alphabetically by name\) ?-->)')
found = regex.findall(newtext)
if found:
hasCommentLine = True
newtext = regex.sub('', newtext)
# Adding the text
newtext += u"\n%s" % addText
# Reputting the categories
newtext = pywikibot.replaceCategoryLinks(newtext,
categoriesInside, site, True)
#Put the nn iw message back
if site.language()==u'nn' and (interwikiInside or hasCommentLine):
newtext = newtext + u'\r\n\r\n' + nn_iw_msg
# Dealing the stars' issue
allstars = []
starstext = pywikibot.removeDisabledParts(text)
for star in starsList:
regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star,
re.I)
found = regex.findall(starstext)
if found != []:
newtext = regex.sub('', newtext)
allstars += found
if allstars != []:
newtext = newtext.strip()+'\r\n\r\n'
allstars.sort()
for element in allstars:
newtext += '%s\r\n' % element.strip()
# Adding the interwiki
newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside, site)
else:
# Adding the text
newtext += u"\n%s" % addText
# If instead the text must be added above...
else:
newtext = addText + '\n' + text
if putText and text != newtext:
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% page.title())
pywikibot.showDiff(text, newtext)
# Let's put the changes.
while True:
# If someone load it as module, maybe it's not so useful to put the
# text in the page
if putText:
if not always:
choice = pywikibot.inputChoice(
u'Do you want to accept these changes?',
开发者ID:RileyHuntley,项目名称:Italic-title-bot,代码行数:67,代码来源:Italic+title.py
示例6: run
def run(self):
"""
Runs the Bot
"""
pywikibot.setAction(pywikibot.translate(self.site, msg))
try:
deadLinks = codecs.open(listof404pages, 'r', 'latin_1').read()
except IOError:
pywikibot.output(
'You need to download http://www.twoevils.org/files/wikipedia/404-links.txt.gz and to ungzip it in the same directory')
raise
socket.setdefaulttimeout(30)
editedpages = 0
for page in self.generator:
try:
# Load the page's text from the wiki
new_text = page.get()
if not page.canBeEdited():
pywikibot.output(u"You can't edit page %s"
% page.title(asLink=True))
continue
except pywikibot.NoPage:
pywikibot.output(u'Page %s not found' % page.title(asLink=True))
continue
except pywikibot.IsRedirectPage:
pywikibot.output(u'Page %s is a redirect'
% page.title(asLink=True))
continue
for match in linksInRef.finditer(pywikibot.removeDisabledParts(page.get())):
#for each link to change
link = match.group(u'url')
#debugging purpose
#print link
if u'jstor.org' in link:
#TODO: Clean URL blacklist
continue
ref = RefLink(link, match.group('name'))
f = None
try:
socket.setdefaulttimeout(20)
try:
f = urllib2.urlopen(ref.url.decode("utf8"))
except UnicodeError:
ref.url = urllib2.quote(ref.url.encode("utf8"),"://")
f = urllib2.urlopen(ref.url)
#Try to get Content-Type from server
headers = f.info()
contentType = headers.getheader('Content-Type')
if contentType and not self.MIME.search(contentType):
if ref.link.lower().endswith('.pdf') and \
not self.ignorepdf:
# If file has a PDF suffix
self.getPDFTitle(ref, f)
else:
pywikibot.output(
u'\03{lightyellow}WARNING\03{default} : media : %s '
% ref.link)
if ref.title:
if not re.match(
'(?i) *microsoft (word|excel|visio)',
ref.title):
ref.transform(ispdf=True)
repl = ref.refTitle()
else:
pywikibot.output(
'\03{lightyellow}WARNING\03{default} : PDF title blacklisted : %s '
% ref.title)
repl = ref.refLink()
else:
repl = ref.refLink()
new_text = new_text.replace(match.group(), repl)
continue
# Get the real url where we end (http redirects !)
redir = f.geturl()
if redir != ref.link and \
domain.findall(redir) == domain.findall(link):
if soft404.search(redir) and \
not soft404.search(ref.link):
pywikibot.output(
u'\03{lightyellow}WARNING\03{default} : Redirect 404 : %s '
% ref.link)
continue
if dirIndex.match(redir) and \
not dirIndex.match(ref.link):
pywikibot.output(
u'\03{lightyellow}WARNING\03{default} : Redirect to root : %s '
% ref.link)
continue
# uncompress if necessary
if headers.get('Content-Encoding') in ('gzip', 'x-gzip'):
# XXX: small issue here: the whole page is downloaded
# through f.read(). It might fetch big files/pages.
# However, truncating an encoded gzipped stream is not
# an option, for unzipping will fail.
compressed = StringIO.StringIO(f.read())
f = gzip.GzipFile(fileobj=compressed)
#.........这里部分代码省略.........
开发者ID:edgarskos,项目名称:pywikipedia-git,代码行数:101,代码来源:reflinks.py
示例7: add_text
def add_text(page=None, addText=None, summary=None, regexSkip=None,
regexSkipUrl=None, always=False, up=False, putText=True,
oldTextGiven=None, reorderEnabled=True, create=False):
# When a page is tagged as "really well written" it has a star in the
# interwiki links. This is a list of all the templates used (in regex
# format) to make the stars appear.
starsList = [
u'bueno',
u'bom interwiki',
u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed',
u'destacado', u'destaca[tu]',
u'enllaç[ _]ad',
u'enllaz[ _]ad',
u'leam[ _]vdc',
u'legătură[ _]a[bcf]',
u'liamm[ _]pub',
u'lien[ _]adq',
u'lien[ _]ba',
u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt',
u'liên[ _]kết[ _]chọn[ _]lọc',
u'ligam[ _]adq',
u'ligoelstara',
u'ligoleginda',
u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km',
u'link[ _]sm', u'linkfa',
u'na[ _]lotura',
u'nasc[ _]ar',
u'tengill[ _][úg]g',
u'ua',
u'yüm yg',
u'רא',
u'وصلة مقالة جيدة',
u'وصلة مقالة مختارة',
]
errorCount = 0
site = pywikibot.getSite()
pathWiki = site.family.nicepath(site.lang)
site = pywikibot.getSite()
if oldTextGiven is None:
try:
text = page.get()
except pywikibot.NoPage:
if create:
pywikibot.output(u"%s doesn't exist, creating it!"
% page.title())
text = u''
else:
pywikibot.output(u"%s doesn't exist, skip!" % page.title())
return (False, False, always)
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is a redirect, skip!" % page.title())
return (False, False, always)
else:
text = oldTextGiven
# If not up, text put below
if not up:
newtext = text
# Translating the \\n into binary \n
addText = addText.replace('\\n', '\n')
if (reorderEnabled):
# Getting the categories
categoriesInside = pywikibot.getCategoryLinks(newtext, site)
# Deleting the categories
newtext = pywikibot.removeCategoryLinks(newtext, site)
# Getting the interwiki
interwikiInside = pywikibot.getLanguageLinks(newtext, site)
# Removing the interwiki
newtext = pywikibot.removeLanguageLinks(newtext, site)
# Adding the text
newtext += u"\n%s" % addText
# Reputting the categories
newtext = pywikibot.replaceCategoryLinks(newtext,
categoriesInside, site,
True)
# Dealing the stars' issue
allstars = []
starstext = pywikibot.removeDisabledParts(text)
for star in starsList:
regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
% star, re.I)
found = regex.findall(starstext)
if found != []:
newtext = regex.sub('', newtext)
allstars += found
if allstars != []:
newtext = newtext.strip() + '\r\n\r\n'
allstars.sort()
for element in allstars:
newtext += '%s\r\n' % element.strip()
# Adding the interwiki
newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside,
site)
else:
newtext += u"\n%s" % addText
else:
newtext = addText + '\n' + text
if putText and text != newtext:
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
#.........这里部分代码省略.........
开发者ID:Technical-13,项目名称:g13bot_tools,代码行数:101,代码来源:g13_nom_bot.py
示例8: add_text
#.........这里部分代码省略.........
# In this way you can use both -except and -excepturl
if regexSkipUrl != None:
url = "%s%s" % (pathWiki, page.urlname())
result = re.findall(regexSkipUrl, site.getUrl(url))
if result != []:
wikipedia.output(u"Exception! regex (or word) used with -exceptUrl is in the page. Skip!")
return (False, False, always) # continue
if regexSkip != None:
result = re.findall(regexSkip, text)
if result != []:
wikipedia.output(u"Exception! regex (or word) used with -except is in the page. Skip!")
return (False, False, always) # continue
# If not up, text put below
if not up:
newtext = text
# Getting the categories
categoriesInside = wikipedia.getCategoryLinks(newtext, site)
# Deleting the categories
newtext = wikipedia.removeCategoryLinks(newtext, site)
# Getting the interwiki
interwikiInside = wikipedia.getLanguageLinks(newtext, site)
# Removing the interwiki
newtext = wikipedia.removeLanguageLinks(newtext, site)
# nn got a message between the categories and the iw's and they want to keep it there, first remove it
if site.language() == u"nn":
newtext = newtext.replace(nn_iw_msg, "")
# Translating the \\n into binary \n
addText = addText.replace("\\n", "\n")
# Adding the text
newtext += u"\n%s" % addText
# Reputting the categories
newtext = wikipedia.replaceCategoryLinks(newtext, categoriesInside, site, True)
# Put the nn iw message back
if site.language() == u"nn":
newtext = newtext + u"\n" + nn_iw_msg
# Dealing the stars' issue
allstars = []
starstext = wikipedia.removeDisabledParts(text)
for star in starsList:
regex = re.compile("(\{\{(?:template:|)%s\|.*?\}\}[\s]*)" % star, re.I)
found = regex.findall(starstext)
if found != []:
newtext = regex.sub("", newtext)
allstars += found
if allstars != []:
newtext = newtext.strip() + "\r\n\r\n"
allstars.sort()
for element in allstars:
newtext += "%s\r\n" % element.strip()
# Adding the interwiki
newtext = wikipedia.replaceLanguageLinks(newtext, interwikiInside, site)
# If instead the text must be added above...
else:
newtext = addText + "\n" + text
if putText and text != newtext:
wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title())
wikipedia.showDiff(text, newtext)
# Let's put the changes.
while True:
# If someone load it as module, maybe it's not so useful to put the
# text in the page
if putText:
if not always:
choice = wikipedia.inputChoice(
u"Do you want to accept these changes?", ["Yes", "No", "All"], ["y", "N", "a"], "N"
)
if choice == "a":
always = True
elif choice == "n":
return (False, False, always)
if always or choice == "y":
try:
if always:
page.put(newtext, summary)
else:
page.put_async(newtext, summary)
except wikipedia.EditConflict:
wikipedia.output(u"Edit conflict! skip!")
return (False, False, always)
except wikipedia.ServerError:
errorCount += 1
if errorCount < 5:
wikipedia.output(u"Server Error! Wait..")
time.sleep(5)
continue
else:
raise wikipedia.ServerError(u"Fifth Server Error!")
except wikipedia.SpamfilterError, e:
wikipedia.output(u"Cannot change %s because of blacklist entry %s" % (page.title(), e.url))
return (False, False, always)
except wikipedia.PageNotSaved, error:
wikipedia.output(u"Error putting page: %s" % error.args)
return (False, False, always)
except wikipedia.LockedPage:
wikipedia.output(u"Skipping %s (locked page)" % page.title())
return (False, False, always)
else:
# Break only if the errors are one after the other...
errorCount = 0
return (True, True, always)
开发者ID:dantman,项目名称:pywikia,代码行数:101,代码来源:add_text.py
示例9: standardizePageFooter
def standardizePageFooter(self, text):
"""
Makes sure that interwiki links, categories and star templates are
put to the correct position and into the right order. This combines the
old instances standardizeInterwiki and standardizeCategories
The page footer has the following section in that sequence:
1. categories
2. ## TODO: template beyond categories ##
3. additional information depending on local site policy
4. stars templates for featured and good articles
5. interwiki links
"""
starsList = [
u'bueno',
u'bom interwiki',
u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed',
u'destacado', u'destaca[tu]',
u'enllaç[ _]ad',
u'enllaz[ _]ad',
u'leam[ _]vdc',
u'legătură[ _]a[bcf]',
u'liamm[ _]pub',
u'lien[ _]adq',
u'lien[ _]ba',
u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt',
u'liên[ _]kết[ _]chọn[ _]lọc',
u'ligam[ _]adq',
u'ligazón[ _]a[bd]',
u'ligoelstara',
u'ligoleginda',
u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km',
u'link[ _]sm', u'linkfa',
u'na[ _]lotura',
u'nasc[ _]ar',
u'tengill[ _][úg]g',
u'ua',
u'yüm yg',
u'רא',
u'وصلة مقالة جيدة',
u'وصلة مقالة مختارة',
]
categories = None
interwikiLinks = None
allstars = []
# The PyWikipediaBot is no longer allowed to touch categories on the
# German Wikipedia. See
# http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22
# ignoring nn-wiki of cause of the comment line above iw section
if not self.template and '{{Personendaten' not in text and \
'{{SORTIERUNG' not in text and '{{DEFAULTSORT' not in text and \
self.site.lang not in ('et', 'it', 'bg', 'ru'):
try:
categories = pywikibot.getCategoryLinks(text, site=self.site)
# there are categories like [[categoy:Foo {{#time:Y...}}]]
except pywikibot.InvalidTitle:
pass
if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki':
subpage = False
if self.template:
loc = None
try:
tmpl, loc = moved_links[self.site.lang]
del tmpl
except KeyError:
pass
if loc is not None and loc in self.title:
subpage = True
interwikiLinks = pywikibot.getLanguageLinks(
text, insite=self.site, template_subpage=subpage)
# Removing the interwiki
text = pywikibot.removeLanguageLinks(text, site=self.site)
# Removing the stars' issue
starstext = pywikibot.removeDisabledParts(text)
for star in starsList:
regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
% star, re.I)
found = regex.findall(starstext)
if found != []:
text = regex.sub('', text)
allstars += found
# Adding categories
if categories:
##Sorting categories in alphabetic order. beta test only on Persian Wikipedia, TODO fix bug for sorting
#if self.site.language() == 'fa':
# categories.sort()
##Taking main cats to top
# for name in categories:
# if re.search(u"(.+?)\|(.{,1}?)",name.title()) or name.title()==name.title().split(":")[0]+title:
# categories.remove(name)
# categories.insert(0, name)
text = pywikibot.replaceCategoryLinks(text, categories,
site=self.site)
# Adding stars templates
if allstars:
#.........这里部分代码省略.........
开发者ID:Rodehi,项目名称:GFROS,代码行数:101,代码来源:cosmetic_changes.py
示例10: standardizePageFooter
def standardizePageFooter(self, text):
"""
Makes sure that interwiki links, categories and star templates are
put to the correct position and into the right order.
This combines the old instances standardizeInterwiki and standardizeCategories
The page footer has the following section in that sequence:
1. categories
2. additional information depending on local site policy
3. stars templates for featured and good articles
4. interwiki links
"""
starsList = [
u"bueno",
u"cyswllt[ _]erthygl[ _]ddethol",
u"dolen[ _]ed",
u"destacado",
u"destaca[tu]",
u"enllaç[ _]ad",
u"enllaz[ _]ad",
u"leam[ _]vdc",
u"legătură[ _]a[bcf]",
u"liamm[ _]pub",
u"lien[ _]adq",
u"lien[ _]ba",
u"liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt",
u"liên[ _]kết[ _]chọn[ _]lọc",
u"ligam[ _]adq",
u"ligoelstara",
u"ligoleginda",
u"link[ _][afgu]a",
u"link[ _]adq",
u"link[ _]f[lm]",
u"link[ _]km",
u"link[ _]sm",
u"linkfa",
u"na[ _]lotura",
u"nasc[ _]ar",
u"tengill[ _][úg]g",
u"ua",
u"yüm yg",
u"רא",
u"وصلة مقالة جيدة",
u"وصلة مقالة مختارة",
]
categories = None
interwikiLinks = None
allstars = []
hasCommentLine = False
# The PyWikipediaBot is no longer allowed to touch categories on the German Wikipedia.
# See http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/bis_2006#Position_der_Personendaten_am_.22Artikelende.22
# ignoring nn-wiki of cause of the comment line above iw section
if not self.template and not "{{Personendaten" in text:
categories = pywikibot.getCategoryLinks(text, site=self.site)
if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki':
subpage = False
if self.template:
loc = None
try:
tmpl, loc = moved_links[self.site.lang]
del tmpl
except KeyError:
pass
if loc != None and loc in self.title:
subpage = True
interwikiLinks = pywikibot.getLanguageLinks(text, insite=self.site, template_subpage=subpage)
# Removing the interwiki
text = pywikibot.removeLanguageLinks(text, site=self.site)
# Removing the stars' issue
starstext = pywikibot.removeDisabledParts(text)
for star in starsList:
regex = re.compile("(\{\{(?:template:|)%s\|.*?\}\}[\s]*)" % star, re.I)
found = regex.findall(starstext)
if found != []:
if pywikibot.verbose:
print found
text = regex.sub("", text)
allstars += found
# nn got a message between the categories and the iw's
# and they want to keep it there, first remove it
if self.site.language() == "nn":
regex = re.compile(
"(<!-- ?interwiki \(no(?:/nb)?, ?sv, ?da first; then other languages alphabetically by name\) ?-->)"
)
found = regex.findall(text)
if found:
if pywikibot.verbose:
print found
hasCommentLine = True
text = regex.sub("", text)
# Adding categories
if categories:
text = pywikibot.replaceCategoryLinks(text, categories, site=self.site)
# Put the nn iw message back
if self.site.language() == "nn" and not self.talkpage and (interwikiLinks or hasCommentLine):
#.........这里部分代码省略.........
开发者ID:swertschak,项目名称:wikijournals-api,代码行数:101,代码来源:cosmetic_changes.py
示例11: standardizePageFooter
def standardizePageFooter(self, text):
"""
Makes sure that interwiki links, categories and star templates are
put to the correct position and into the right order. This combines the
old instances standardizeInterwiki and standardizeCategories
The page footer has the following section in that sequence:
1. categories
2. additional information depending on local site policy
3. stars templates for featured and good articles
4. interwiki links
"""
starsList = [
u'bueno',
u'bom interwiki',
u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed',
u'destacado', u'destaca[tu]',
u'enllaç[ _]ad',
u'enllaz[ _]ad',
u'leam[ _]vdc',
u'legătură[ _]a[bcf]',
u'liamm[ _]pub',
u'lien[ _]adq',
u'lien[ _]ba',
u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt',
u'liên[ _]kết[ _]chọn[ _]lọc',
u'ligam[ _]adq',
u'ligoelstara',
u'ligoleginda',
u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km',
u'link[ _]sm', u'linkfa',
u'na[ _]lotura',
u'nasc[ _]ar',
u'tengill[ _][úg]g',
u'ua',
u'yüm yg',
u'רא',
u'وصلة مقالة جيدة',
u'وصلة مقالة مختارة',
]
categories = None
interwikiLinks = None
allstars = []
hasCommentLine = False
# The PyWikipediaBot is no longer allowed to touch categories on the
# German Wikipedia. See
# http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22
# ignoring nn-wiki of cause of the comment line above iw section
if not self.template and not '{{Personendaten' in text:
categories = pywikibot.getCategoryLinks(text, site = self.site)
if not self.talkpage:# and pywikibot.calledModuleName() <> 'interwiki':
subpage = False
if self.template:
loc = None
try:
tmpl, loc = moved_links[self.site.lang]
del tmpl
except KeyError:
pass
if loc != None and loc in self.title:
subpage = True
interwikiLinks = pywikibot.getLanguageLinks(
text, insite=self.site, template_subpage=subpage)
# Removing the interwiki
text = pywikibot.removeLanguageLinks(text, site = self.site)
# Removing the stars' issue
starstext = pywikibot.removeDisabledParts(text)
for star in starsList:
regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
% star, re.I)
found = regex.findall(starstext)
if found != []:
if pywikibot.verbose:
print found
text = regex.sub('', text)
allstars += found
# nn got a message between the categories and the iw's
# and they want to keep it there, first remove it
if self.site.lang in msg_interwiki:
iw_msg = msg_interwiki[self.site.lang]
if isinstance(iw_msg, tuple):
iw_reg = iw_msg[1]
iw_msg = iw_msg[0]
else:
iw_reg = u'(%s)' % iw_msg
regex = re.compile(iw_reg)
found = regex.findall(text)
if found:
if pywikibot.verbose:
print found
hasCommentLine = True
text = regex.sub('', text)
# Adding categories
if categories:
text = pywikibot.replaceCategoryLinks(text, categories,
#.........这里部分代码省略.........
开发者ID:Protonk,项目名称:pywikipedia2,代码行数:101,代码来源:cosmetic_changes.py
示例12: add_text
def add_text(
page=None,
addText=None,
summary=None,
regexSkip=None,
regexSkipUrl=None,
always=False,
up=False,
putText=True,
oldTextGiven=None,
reorderEnabled=True,
create=False,
):
if not addText:
raise NoEnoughData("You have to specify what text you want to add!")
if not summary:
summary = i18n.twtranslate(pywikibot.getSite(), "add_text-adding", {"adding": addText[:200]})
# When a page is tagged as "really well written" it has a star in the
# interwiki links. This is a list of all the templates used (in regex
# format) to make the stars appear.
errorCount = 0
site = pywikibot.getSite()
pathWiki = site.family.nicepath(site.lang)
if putText:
pywikibot.output(u"Loading %s..." % page.title())
if oldTextGiven is None:
try:
text = page.get()
except pywikibot.NoPage:
if create:
pywikibot.output(u"%s doesn't exist, creating it!" % page.title())
text = u""
else:
pywikibot.output(u"%s doesn't exist, skip!" % page.title())
return (False, False, always)
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is a redirect, skip!" % page.title())
return (False, False, always)
else:
text = oldTextGiven
# Understand if the bot has to skip the page or not
# In this way you can use both -except and -excepturl
if regexSkipUrl is not None:
url = "%s%s" % (pathWiki, page.urlname())
result = re.findall(regexSkipUrl, site.getUrl(url))
if result != []:
pywikibot.output(
u"""Exception! regex (or word) used with -exceptUrl is in the page. Skip!
Match was: %s"""
% result
)
return (False, False, always)
if regexSkip is not None:
result = re.findall(regexSkip, text)
if result != []:
pywikibot.output(
u"""Exception! regex (or word) used with -except is in the page. Skip!
Match was: %s"""
% result
)
return (False, False, always)
# If not up, text put below
if not up:
newtext = text
# Translating the \\n into binary \n
addText = addText.replace("\\n", config.line_separator)
if reorderEnabled:
# Getting the categories
categoriesInside = pywikibot.getCategoryLinks(newtext, site)
# Deleting the categories
newtext = pywikibot.removeCategoryLinks(newtext, site)
# Getting the interwiki
interwikiInside = pywikibot.getLanguageLinks(newtext, site)
# Removing the interwiki
newtext = pywikibot.removeLanguageLinks(newtext, site)
# Adding the text
newtext += u"%s%s" % (config.line_separator, addText)
# Reputting the categories
newtext = pywikibot.replaceCategoryLinks(newtext, categoriesInside, site, True)
# Dealing the stars' issue
allstars = []
starstext = pywikibot.removeDisabledParts(text)
for star in starsList:
regex = re.compile("(\{\{(?:template:|)%s\|.*?\}\}[\s]*)" % star, re.I)
found = regex.findall(starstext)
if found != []:
newtext = regex.sub("", newtext)
allstars += found
if allstars != []:
newtext = newtext.strip() + config.line_separator * 2
allstars.sort()
for element in allstars:
newtext += "%s%s" % (element.strip(), config.LS)
# Adding the interwiki
newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside, site)
else:
#.........这里部分代码省略.........
开发者ID:hroest,项目名称:pywikibot-compat,代码行数:101,代码来源:add_text.py
注:本文中的wikipedia.removeDisabledParts函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论