本文整理汇总了Python中wikipedia.output函数的典型用法代码示例。如果您正苦于以下问题:Python output函数的具体用法?Python output怎么用?Python output使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了output函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main():
# If debug is True, don't edit pages, but only show what would have been
# changed.
debug = False
# The AfD log that should be treated.
date = None
# Whether to confirm edits.
always = False
# Parse command line arguments
for arg in wikipedia.handleArgs():
if arg.startswith('-debug'):
wikipedia.output(u'Debug mode.')
debug = True
elif arg.startswith('-date'):
if len(arg) == 5:
date = wikipedia.input(u'Please enter the date of the log that should be treated (yyyymmdd):')
else:
date = arg[6:]
elif arg.startswith('-always'):
always = True
if date:
page_title = u'Wikipedia:Te verwijderen pagina\'s/Toegevoegd %s' % date
else:
page_title = u'Wikipedia:Te verwijderen pagina\'s/Toegevoegd %s' % time.strftime("%Y%m%d", time.localtime(time.time()-60*60*24))
wikipedia.output(u'Checking: %s.' % page_title)
page = wikipedia.Page(wikipedia.getSite(code = 'nl', fam = 'wikipedia'), page_title)
bot = AfDBot(page, always, debug)
bot.run()
开发者ID:nlwikibots,项目名称:nlwikibots,代码行数:31,代码来源:tvpmelder.py
示例2: in_list
def in_list(self, pagelist, title, laxyload=True):
if pywikibot.verbose:
pywikibot.output(u'Checking whitelist for: %s' % title)
# quick check for exact match
if title in pagelist:
return title
# quick check for wildcard
if '' in pagelist:
if pywikibot.verbose:
pywikibot.output(u"wildcarded")
return '.*'
for item in pagelist:
if pywikibot.verbose:
pywikibot.output(u"checking against whitelist item = %s" % item)
if isinstance(item, PatrolRule):
if pywikibot.verbose:
pywikibot.output(u"invoking programmed rule")
if item.match(title):
return item
elif title_match(item, title):
return item
if pywikibot.verbose:
pywikibot.output(u'not found')
开发者ID:Protonk,项目名称:pywikipedia2,代码行数:29,代码来源:patrol.py
示例3: countTemplates
def countTemplates(self, templates, namespaces):
mysite = pywikibot.getSite()
total = 0
# The names of the templates are the keys, and the numbers of
# transclusions are the values.
templateDict = {}
pg = pagegenerators
getall = templates
mytpl = mysite.template_namespace()+':'
for template in getall:
gen = pg.ReferringPageGenerator(pywikibot.Page(mysite,
mytpl + template),
onlyTemplateInclusion = True)
if namespaces:
gen = pg.NamespaceFilterPageGenerator(gen, namespaces)
count = 0
for page in gen:
count += 1
templateDict[template] = count
total += count
pywikibot.output(u'\nNumber of transclusions per template',
toStdout=True)
pywikibot.output(u'-' * 36, toStdout=True)
for key in templateDict.keys():
pywikibot.output(u'%-10s: %5d' % (key, templateDict[key]),
toStdout=True)
pywikibot.output(u'TOTAL : %5d' % total, toStdout=True)
pywikibot.output(u'Report generated on %s'
% datetime.datetime.utcnow().isoformat(),
toStdout=True)
return templateDict
开发者ID:edgarskos,项目名称:pywikipedia-git,代码行数:32,代码来源:templatecount.py
示例4: MySQLPageGenerator
def MySQLPageGenerator(query, site = None):
import MySQLdb as mysqldb
if site is None:
site = pywikibot.getSite()
conn = mysqldb.connect(config.db_hostname, db = site.dbName(),
user = config.db_username,
passwd = config.db_password)
cursor = conn.cursor()
pywikibot.output(u'Executing query:\n%s' % query)
query = query.encode(site.encoding())
cursor.execute(query)
while True:
try:
namespaceNumber, pageName = cursor.fetchone()
print namespaceNumber, pageName
except TypeError:
# Limit reached or no more results
break
#print pageName
if pageName:
namespace = site.namespace(namespaceNumber)
pageName = unicode(pageName, site.encoding())
if namespace:
pageTitle = '%s:%s' % (namespace, pageName)
else:
pageTitle = pageName
page = pywikibot.Page(site, pageTitle)
yield page
开发者ID:dbow,项目名称:Project-OPEN,代码行数:28,代码来源:pagegenerators.py
示例5: processImage
def processImage(self, page):
'''
Work on a single image
'''
if page.exists() and (page.namespace() == 6) and \
(not page.isRedirectPage()):
imagepage = pywikibot.ImagePage(page.site(), page.title())
#First do autoskip.
if self.doiskip(imagepage):
pywikibot.output(
u'Skipping %s : Got a template on the skip list.'
% page.title())
return False
text = imagepage.get()
foundMatch = False
for (regex, replacement) in licenseTemplates[page.site().language()]:
match = re.search(regex, text, flags=re.IGNORECASE)
if match:
foundMatch = True
if not foundMatch:
pywikibot.output(
u'Skipping %s : No suitable license template was found.'
% page.title())
return False
self.prefetchQueue.put(self.getNewFields(imagepage))
开发者ID:moleculea,项目名称:ess,代码行数:27,代码来源:imagecopy_self.py
示例6: _catlib_Category__parseCategory
def _catlib_Category__parseCategory(self, recurse=False, purge=False, startFrom=None):
if not startFrom:
startFrom = 0
ns = self.site().category_namespaces()
catsdone = []
catstodo = [(self, recurse)]
# Get subcats and articles
for (cat, recurselevel) in catstodo:
if type(recurselevel) == type(1):
newrecurselevel = recurselevel - 1
else:
newrecurselevel = recurselevel
catsdone.append(cat)
wikipedia.output("Getting [[%s]] from %s..." % (cat.title(), cat.site().dbName()))
for page in toolserver.Generators.getCategoryMembers(cat, startFrom):
if type(page) == catlib.Category:
if recurselevel and page not in catsdone:
catstodo.append((page, newrecurselevel))
yield catlib.SUBCATEGORY, page.title()
else:
yield catlib.ARTICLE, page.title()
# Get supercats
for supercat in toolserver.Generators.getCategories(self):
yield catlib.SUPERCATEGORY, supercat.title()
开发者ID:valhallasw,项目名称:valhallasw-toolserver-libs,代码行数:26,代码来源:catlib_ts.py
示例7: PageTitleFilterPageGenerator
def PageTitleFilterPageGenerator(generator, ignoreList):
"""
Wraps around another generator. Yields only those pages are not
listed in the ignore list.
The ignoreList is a dictionary. Family names are mapped to
dictionaries in which language codes are mapped to lists of
page titles.
"""
def isIgnored(page):
if not (page.site().family.name in ignoreList and page.site().lang in ignoreList[page.site().family.name]):
return False
for ig in ignoreList[page.site().family.name][page.site().lang]:
if re.match(ig, page.title()):
return True
return False
for page in generator:
if isIgnored(page):
if pywikibot.verbose:
pywikibot.output('Ignoring page %s' % page.title())
else:
yield page
开发者ID:dbow,项目名称:Project-OPEN,代码行数:25,代码来源:pagegenerators.py
示例8: dump
def dump(self, filename = 'category.dump.bz2'):
'''Saves the contents of the dictionaries superclassDB and catContentDB
to disk.
'''
if not os.path.isabs(filename):
filename = pywikibot.config.datafilepath(filename)
if self.catContentDB or self.superclassDB:
pywikibot.output(u'Dumping to %s, please wait...'
% pywikibot.config.shortpath(filename))
f = bz2.BZ2File(filename, 'w')
databases = {
'catContentDB': self.catContentDB,
'superclassDB': self.superclassDB
}
# store dump to disk in binary format
try:
pickle.dump(databases, f, protocol=pickle.HIGHEST_PROTOCOL)
except pickle.PicklingError:
pass
f.close()
else:
try:
os.remove(filename)
except EnvironmentError:
pass
else:
pywikibot.output(u'Database is empty. %s removed'
% pywikibot.config.shortpath(filename))
开发者ID:MrTweek,项目名称:lanigiro,代码行数:29,代码来源:category.py
示例9: writeMainFreeUploads
def writeMainFreeUploads(subpages):
site = wikipedia.getSite(u'en', u'wikipedia')
page = wikipedia.Page(site, u'User:Multichill/Free_uploads')
oldtext = page.get()
text = u'__TOC__\n'
#text = text + u'== Links to day pages ==\n'
#text = text + u'{{Special:PrefixIndex/User:Multichill/Free uploads/20}}\n'
text = text + u'== This week ==\n'
i = 0
limit = 7
# From new to old
subpages.reverse()
for subpage in subpages:
date = subpage.replace(u'User:Multichill/Free uploads/', u'')
if i < limit:
text = text + u'===[[%s|%s]]===\n' % (subpage, date)
text = text + u'{{%s}}\n' % (subpage,)
elif i == limit:
text = text + u'== Older ==\n'
text = text + u'* [[%s|%s]]\n' % (subpage, date)
else:
text = text + u'* [[%s|%s]]\n' % (subpage, date)
i = i + 1
comment = u'Updating list, %d subpages contain images' % (len(subpages),)
wikipedia.showDiff(oldtext, text)
wikipedia.output(comment)
page.put(text, comment)
开发者ID:multichill,项目名称:toollabs,代码行数:32,代码来源:free_uploads_enwp.py
示例10: getExtendedFindNearby
def getExtendedFindNearby(lat, lng):
'''
Get the result from http://ws.geonames.org/extendedFindNearby
and put it in a list of dictionaries to play around with
'''
result = []
gotInfo = False
parameters = urllib.urlencode({'lat' : lat, 'lng' : lng})
while(not gotInfo):
try:
page = urllib.urlopen("http://ws.geonames.org/extendedFindNearby?%s" % parameters)
et = xml.etree.ElementTree.parse(page)
gotInfo=True
except IOError:
wikipedia.output(u'Got an IOError, let\'s try again')
time.sleep(30)
except socket.timeout:
wikipedia.output(u'Got a timeout, let\'s try again')
time.sleep(30)
for geoname in et.getroot().getchildren():
geonamedict = {}
if geoname.tag=='geoname':
for element in geoname.getchildren():
geonamedict[element.tag]=element.text
result.append(geonamedict)
#print result
return result
开发者ID:multichill,项目名称:toollabs,代码行数:28,代码来源:description_restoration.py
示例11: main
def main():
pywikibot.warning("this script should not be run manually/directly, but automatically by maintainer.py")
if len(sys.argv) == 1:
pywikibot.output("Usage: censure.py <article title>")
sys.exit(1)
del sys.argv[0]
checkPage(" ".join(sys.argv).decode("utf-8"))
开发者ID:NaturalSolutions,项目名称:ecoReleve-Concepts,代码行数:7,代码来源:censure.py
示例12: get
def get(site = None):
if site is None:
site = pywikibot.getSite()
if site in cache:
# Use cached copy if it exists.
watchlist = cache[site]
else:
fn = pywikibot.config.datafilepath('watchlists',
'watchlist-%s-%s.dat' % (site.family.name, site.lang))
try:
# find out how old our saved dump is (in seconds)
file_age = time.time() - os.path.getmtime(fn)
# if it's older than 1 month, reload it
if file_age > 30 * 24 * 60 * 60:
pywikibot.output(
u'Copy of watchlist is one month old, reloading')
refresh(site)
except OSError:
# no saved watchlist exists yet, retrieve one
refresh(site)
f = open(fn, 'r')
watchlist = pickle.load(f)
f.close()
# create cached copy
cache[site] = watchlist
return watchlist
开发者ID:legoktm,项目名称:pywikipedia-original,代码行数:26,代码来源:watchlist.py
示例13: _refreshOld
def _refreshOld(site, sysop=False):
# get watchlist special page's URL
path = site.watchlist_address()
pywikibot.output(u'Retrieving watchlist for %s' % repr(site))
#pywikibot.put_throttle() # It actually is a get, but a heavy one.
watchlistHTML = site.getUrl(path, sysop=sysop)
pywikibot.output(u'Parsing watchlist')
watchlist = []
for itemR in [re.compile(r'<li><input type="checkbox" name="id\[\]" value="(.+?)" />'),
re.compile(r'<li><input name="titles\[\]" type="checkbox" value="(.+?)" />')]:
for m in itemR.finditer(watchlistHTML):
pageName = m.group(1)
watchlist.append(pageName)
# Save the watchlist to disk
# The file is stored in the watchlists subdir. Create if necessary.
if sysop:
f = open(pywikibot.config.datafilepath('watchlists',
'watchlist-%s-%s-sysop.dat'
% (site.family.name, site.lang)),
'w')
else:
f = open(pywikibot.config.datafilepath('watchlists',
'watchlist-%s-%s.dat'
% (site.family.name, site.lang)),
'w')
pickle.dump(watchlist, f)
f.close()
开发者ID:legoktm,项目名称:pywikipedia-original,代码行数:29,代码来源:watchlist.py
示例14: output
def output(message, toStdout = True):
message = time.strftime('[%Y-%m-%d %H:%M:%S] ') + message
wikipedia.output(message, toStdout = toStdout)
if toStdout:
sys.stdout.flush()
else:
sys.stderr.flush()
开发者ID:legoktm,项目名称:pywikipedia-original,代码行数:7,代码来源:delinker.py
示例15: categoryAllElementsAPI
def categoryAllElementsAPI(CatName, cmlimit=5000, categories_parsed=[], site=None):
""" Category to load all the elements in a category using the APIs.
Limit: 5000 elements.
"""
pywikibot.output("Loading %s..." % CatName)
# action=query&list=categorymembers&cmlimit=500&cmtitle=Category:License_tags
params = {"action": "query", "list": "categorymembers", "cmlimit": cmlimit, "cmtitle": CatName}
data = query.GetData(params, site)
categories_parsed.append(CatName)
try:
members = data["query"]["categorymembers"]
except KeyError:
if int(cmlimit) != 500:
pywikibot.output(u"An Error occured, trying to reload the category.")
return categoryAllElementsAPI(CatName, cmlimit=500)
else:
raise pywikibot.Error(data)
if len(members) == int(cmlimit):
raise pywikibot.Error(u"The category selected has >= %s elements, limit reached." % cmlimit)
allmembers = members
results = list()
for subcat in members:
ns = subcat["ns"]
title = subcat["title"]
if ns == 14:
if title not in categories_parsed:
categories_parsed.append(title)
(results_part, categories_parsed) = categoryAllElementsAPI(title, 5000, categories_parsed)
allmembers.extend(results_part)
for member in allmembers:
results.append(member)
return (results, categories_parsed)
开发者ID:hasteur,项目名称:UAABOT,代码行数:35,代码来源:catlib.py
示例16: getPhotos
def getPhotos(photoset=u'', start_id='', end_id='', interval=100):
''' Loop over a set of Panoramio photos. '''
i = 0
has_more = True
url = u'http://www.panoramio.com/map/get_panoramas.php?set=%s&from=%s&to=%s&size=original'
while has_more:
gotInfo = False
maxtries = 10
tries = 0
while not gotInfo:
try:
if tries < maxtries:
tries += 1
panoramioApiPage = urllib2.urlopen(url % (photoset, i,
i + interval))
contents = panoramioApiPage.read().decode('utf-8')
gotInfo = True
i += interval
else:
break
except IOError:
pywikibot.output(u'Got an IOError, let\'s try again')
except socket.timeout:
pywikibot.output(u'Got a timeout, let\'s try again')
metadata = json.loads(contents)
count = metadata.get(u'count') # Useless?
photos = metadata.get(u'photos')
for photo in photos:
yield photo
has_more = metadata.get(u'has_more')
return
开发者ID:Rodehi,项目名称:GFROS,代码行数:32,代码来源:panoramiopicker.py
示例17: load_word_function
def load_word_function(raw):
""" This is a function used to load the badword and the whitelist."""
page = re.compile(r"(?:\"|\')(.*?)(?:\"|\')(?:, |\))", re.UNICODE)
list_loaded = page.findall(raw)
if len(list_loaded) == 0:
pywikibot.output(u'There was no input on the real-time page.')
return list_loaded
开发者ID:swertschak,项目名称:wikijournals-api,代码行数:7,代码来源:welcome.py
示例18: makeStatistics
def makeStatistics(mconfig, totals):
text = u'{| class="wikitable sortable"\n'
text = text + u'! country !! lang !! total !! page !! row template !! Commons template\n'
totalImages = 0
for ((countrycode, lang), countryconfig) in sorted(mconfig.countries.items()):
if countryconfig.get('unusedImagesPage') and countryconfig.get('commonsTemplate'):
text = text + u'|-\n'
text = text + u'| %s ' % countrycode
text = text + u'|| %s ' % lang
text = text + u'|| %s ' % totals.get((countrycode, lang))
totalImages = totalImages + totals.get((countrycode, lang))
text = text + u'|| [[:%s:%s|%s]] ' % (lang, countryconfig.get('unusedImagesPage'), countryconfig.get('unusedImagesPage'))
text = text + u'|| [[:%s:Template:%s|%s]] ' % (lang, countryconfig.get('rowTemplate'), countryconfig.get('rowTemplate'))
text = text + u'|| {{tl|%s}}\n' % countryconfig.get('commonsTemplate')
text = text + u'|-\n'
text = text + u'| || || %s \n' % totalImages
text = text + u'|}\n'
site = wikipedia.getSite('commons', 'commons')
page = wikipedia.Page(site, u'Commons:Monuments database/Unused images/Statistics')
comment = u'Updating unused image statistics. Total unused images: %s' % totalImages
wikipedia.output(text)
page.put(newtext = text, comment = comment)
开发者ID:ranjithsiji,项目名称:wikimedia-wlm-api,代码行数:25,代码来源:unused_monument_images.py
示例19: save
def save(self, text, page, comment, minorEdit=False, botflag=False):
# only save if something was changed
if text != page.get():
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% page.title())
# show what was changed
pywikibot.showDiff(page.get(), text)
pywikibot.output(u'Comment: %s' %comment)
choice = pywikibot.inputChoice(
u'Do you want to accept these changes?',
['Yes', 'No'], ['y', 'N'], 'N')
if choice == 'y':
try:
# Save the page
page.put(text, comment=comment,
minorEdit=minorEdit, botflag=botflag)
except pywikibot.LockedPage:
pywikibot.output(u"Page %s is locked; skipping."
% page.title(asLink=True))
except pywikibot.EditConflict:
pywikibot.output(
u'Skipping %s because of edit conflict'
% (page.title()))
except pywikibot.SpamfilterError, error:
pywikibot.output(
u'Cannot change %s because of spam blacklist entry %s'
% (page.title(), error.url))
else:
return True
开发者ID:peppage,项目名称:Film-bot,代码行数:32,代码来源:createInfobox.py
示例20: UserEditFilterGenerator
def UserEditFilterGenerator(generator, username, timestamp=None, skip=False):
"""
Generator which will yield Pages depending of user:username is an Author of
that page (only looks at the last 100 editors).
If timestamp is set in MediaWiki format JJJJMMDDhhmmss, older edits are
ignored
If skip is set, pages edited by the given user are ignored otherwise only
pages edited by this user are given back
"""
if timestamp:
ts = pywikibot.Timestamp.fromtimestampformat(timestamp)
for page in generator:
editors = page.getLatestEditors(limit=100)
found = False
for ed in editors:
uts = pywikibot.Timestamp.fromISOformat(ed['timestamp'])
if not timestamp or uts>=ts:
if username == ed['user']:
found = True
break
else:
break
if found and not skip or not found and skip:
yield page
else:
pywikibot.output(u'Skipping %s' % page.title(asLink=True))
开发者ID:Botomatik,项目名称:JackBot,代码行数:27,代码来源:template.py
注:本文中的wikipedia.output函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论