本文整理汇总了Python中r2.lib.utils.timeago函数的典型用法代码示例。如果您正苦于以下问题:Python timeago函数的具体用法?Python timeago怎么用?Python timeago使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了timeago函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_hot
def get_hot(srs, only_fullnames=False):
"""Get the (fullname, hotness, epoch_seconds) for the hottest
links in a subreddit. Use the query-cache to avoid some lookups
if we can."""
from r2.lib.db.thing import Query
from r2.lib.db.queries import CachedResults
ret = []
queries = [sr.get_links("hot", "all") for sr in srs]
# fetch these all in one go
cachedresults = filter(lambda q: isinstance(q, CachedResults), queries)
CachedResults.fetch_multi(cachedresults)
for q in queries:
if isinstance(q, Query):
links = cached_query(q, sr)
res = [(link._fullname, link._hot, epoch_seconds(link._date)) for link in links]
elif isinstance(q, CachedResults):
# we're relying on an implementation detail of
# CachedResults here, where it's storing tuples that look
# exactly like the return-type we want, to make our
# sorting a bit cheaper
res = list(q.data)
# remove any that are too old
age_limit = epoch_seconds(utils.timeago("%d days" % g.HOT_PAGE_AGE))
res = [(fname if only_fullnames else (fname, hot, date)) for (fname, hot, date) in res if date > age_limit]
ret.append(res)
return ret
开发者ID:eerock,项目名称:reddit,代码行数:31,代码来源:normalized_hot.py
示例2: user_vote_change_links
def user_vote_change_links(period = '1 day'):
rel = Vote.rel(Account, Link)
type = tdb.rel_types_id[rel._type_id]
# rt = rel table
# dt = data table
rt, account_tt, link_tt, dt = type.rel_table
aliases = tdb.alias_generator()
author_dt = dt.alias(aliases.next())
link_dt = tdb.types_id[Link._type_id].data_table[0].alias(aliases.next())
# Create an SQL CASE statement for the subreddit vote multiplier
cases = []
for subreddit in subreddits_with_custom_karma_multiplier():
cases.append( (sa.cast(link_dt.c.value,sa.Integer) == subreddit._id,
subreddit.post_karma_multiplier) )
cases.append( (True, g.post_karma_multiplier) ) # The default article multiplier
date = utils.timeago(period)
s = sa.select([author_dt.c.value, sa.func.sum(sa.cast(rt.c.name, sa.Integer) * sa.case(cases))],
sa.and_(rt.c.date >= date,
author_dt.c.thing_id == rt.c.rel_id,
author_dt.c.key == 'author_id',
link_tt.c.thing_id == rt.c.thing2_id,
link_tt.c.date >= date,
link_dt.c.key == 'sr_id',
link_dt.c.thing_id == rt.c.thing2_id),
group_by = author_dt.c.value)
rows = s.execute().fetchall()
return [(int(r.value), r.sum) for r in rows]
开发者ID:Craigus,项目名称:lesswrong,代码行数:34,代码来源:user_stats.py
示例3: time_listings
def time_listings(times = ('year','month','week','day','hour', 'all')):
oldests = dict((t, epoch_seconds(timeago('1 %s' % t)))
for t in times if t != 'all')
if 'all' in times:
oldests['all'] = 0
@mr_tools.dataspec_m_thing(('author_id', int),)
def process(link):
assert link.thing_type == 'link'
timestamp = link.timestamp
fname = make_fullname(Link, link.thing_id)
if not link.spam and not link.deleted:
author_id = link.author_id
ups, downs = link.ups, link.downs
sc = score(ups, downs)
contr = controversy(ups, downs)
h = _hot(ups, downs, timestamp)
for tkey, oldest in oldests.iteritems():
if timestamp > oldest:
yield ('user-top-%s-%d' % (tkey, author_id),
sc, timestamp, fname)
yield ('user-controversial-%s-%d' % (tkey, author_id),
contr, timestamp, fname)
if tkey == 'all':
yield ('user-new-%s-%d' % (tkey, author_id),
timestamp, timestamp, fname)
yield ('user-hot-%s-%d' % (tkey, author_id),
h, timestamp, fname)
mr_tools.mr_map(process)
开发者ID:constantAmateur,项目名称:sciteit,代码行数:35,代码来源:mr_gold.py
示例4: normalized_hot_cached
def normalized_hot_cached(sr_ids):
results = []
srs = Subreddit._byID(sr_ids, data = True, return_dict = False)
for sr in srs:
#items = get_hot(sr)
items = filter(lambda l: l._date > utils.timeago('%d day' % g.HOT_PAGE_AGE),
get_hot(sr))
if not items:
continue
top_score = max(items[0]._hot, 1)
top, rest = items[:2], items[2:]
if top:
normals = [l._hot / top_score for l in top]
results.extend((l, random.choice(normals)) for l in top)
#random.shuffle(normals)
#results.extend((l, normals.pop()) for l in top)
if rest:
results.extend((l, l._hot / top_score) for l in rest)
results.sort(key = lambda x: (x[1], x[0]._hot), reverse = True)
return [l[0]._fullname for l in results]
开发者ID:cmak,项目名称:reddit,代码行数:26,代码来源:normalized_hot.py
示例5: time_listings
def time_listings(times = ('year','month','week','day','hour')):
oldests = dict((t, epoch_seconds(timeago('1 %s' % t)))
for t in times)
@mr_tools.dataspec_m_thing(("url", str),('sr_id', int),)
def process(link):
assert link.thing_type == 'link'
timestamp = link.timestamp
fname = make_fullname(Link, link.thing_id)
if not link.spam and not link.deleted:
sr_id = link.sr_id
if link.url:
domains = UrlParser(link.url).domain_permutations()
else:
domains = []
ups, downs = link.ups, link.downs
for tkey, oldest in oldests.iteritems():
if timestamp > oldest:
sc = score(ups, downs)
contr = controversy(ups, downs)
yield ('sr-top-%s-%d' % (tkey, sr_id),
sc, timestamp, fname)
yield ('sr-controversial-%s-%d' % (tkey, sr_id),
contr, timestamp, fname)
for domain in domains:
yield ('domain/top/%s/%s' % (tkey, domain),
sc, timestamp, fname)
yield ('domain/controversial/%s/%s' % (tkey, domain),
contr, timestamp, fname)
mr_tools.mr_map(process)
开发者ID:3river,项目名称:reddit,代码行数:34,代码来源:mr_top.py
示例6: add_props
def add_props(cls, user, wrapped):
from r2.lib.count import incr_counts
from r2.lib.media import thumbnail_url
from r2.lib.utils import timeago
saved = Link._saved(user, wrapped) if user else {}
hidden = Link._hidden(user, wrapped) if user else {}
#clicked = Link._clicked(user, wrapped) if user else {}
clicked = {}
for item in wrapped:
show_media = (c.user.pref_media == 'on' or
(item.promoted and item.has_thumbnail
and c.user.pref_media != 'off') or
(c.user.pref_media == 'subreddit' and
item.subreddit.show_media))
if not show_media:
item.thumbnail = ""
elif item.has_thumbnail:
item.thumbnail = thumbnail_url(item)
else:
item.thumbnail = g.default_thumb
item.score = max(0, item.score)
item.domain = (domain(item.url) if not item.is_self
else 'self.' + item.subreddit.name)
if not hasattr(item,'top_link'):
item.top_link = False
item.urlprefix = ''
item.saved = bool(saved.get((user, item, 'save')))
item.hidden = bool(hidden.get((user, item, 'hide')))
item.clicked = bool(clicked.get((user, item, 'click')))
item.num = None
item.score_fmt = Score.number_only
item.permalink = item.make_permalink(item.subreddit)
if item.is_self:
item.url = item.make_permalink(item.subreddit, force_domain = True)
if c.user_is_admin:
item.hide_score = False
elif item.promoted:
item.hide_score = True
elif c.user == item.author:
item.hide_score = False
elif item._date > timeago("2 hours"):
item.hide_score = True
else:
item.hide_score = False
if c.user_is_loggedin and item.author._id == c.user._id:
item.nofollow = False
elif item.score <= 1 or item._spam or item.author._spam:
item.nofollow = True
else:
item.nofollow = False
if c.user_is_loggedin:
incr_counts(wrapped)
开发者ID:vin,项目名称:reddit,代码行数:59,代码来源:link.py
示例7: share
def share(link, emails, from_name = "", reply_to = "", body = ""):
"""Queues a 'share link' email."""
now = datetime.datetime.now(g.tz)
ival = now - timeago(g.new_link_share_delay)
date = max(now,link._date + ival)
Email.handler.add_to_queue(c.user, link, emails, from_name, g.share_reply,
date, request.ip, Email.Kind.SHARE,
body = body, reply_to = reply_to)
开发者ID:AndrewHay,项目名称:lesswrong,代码行数:8,代码来源:emailer.py
示例8: _get_cutoffs
def _get_cutoffs(intervals):
cutoffs = {}
for interval in intervals:
if interval == "all":
cutoffs["all"] = 0.0
else:
cutoffs[interval] = epoch_seconds(timeago("1 %s" % interval))
return cutoffs
开发者ID:Sheesha1992,项目名称:reddit,代码行数:9,代码来源:mr_top.py
示例9: fix_all_broken_things
def fix_all_broken_things(delete=False):
from r2.models import Link, Comment
# 2009-07-21 is the first broken thing at the time of writing.
from_time = datetime.datetime(2009, 7, 21, tzinfo=g.tz)
to_time = utils.timeago("60 seconds")
for (cls, attrs) in ((Link, ("author_id", "sr_id")), (Comment, ("author_id", "sr_id", "body", "link_id"))):
utils.find_broken_things(cls, attrs, from_time, to_time, delete=delete)
开发者ID:brendanlong,项目名称:lesswrong,代码行数:9,代码来源:fix_broken_things.py
示例10: simplified_timesince
def simplified_timesince(date, include_tense=True):
if date > timeago("1 minute"):
return _("just now")
since = timesince(date)
if include_tense:
return _("%s ago") % since
else:
return since
开发者ID:DreamRivulet,项目名称:reddit,代码行数:9,代码来源:template_helpers.py
示例11: simplified_timesince
def simplified_timesince(date, include_tense=True):
if date > timeago("1 minute"):
return _("just now")
since = []
since.append(timesince(date))
if include_tense:
since.append(_("ago"))
return " ".join(since)
开发者ID:SRITANU,项目名称:reddit,代码行数:9,代码来源:template_helpers.py
示例12: keep_fn
def keep_fn(self):
"""For merged time-listings, don't show items that are too old
(this can happen when mr_top hasn't run in a while)"""
if self.time != 'all' and c.default_sr:
oldest = timeago('1 %s' % (str(self.time),))
def keep(item):
return item._date > oldest and item.keep_item(item)
return keep
else:
return ListingController.keep_fn(self)
开发者ID:donslice,项目名称:reddit,代码行数:10,代码来源:listingcontroller.py
示例13: port_cassahides
def port_cassahides():
from r2.models import SaveHide, CassandraHide
from r2.lib.db.tdb_cassandra import CL
from r2.lib.db.operators import desc
from r2.lib.utils import fetch_things2, timeago, progress
q = SaveHide._query(SaveHide.c._date > timeago("1 week"), SaveHide.c._name == "hide", sort=desc("_date"))
q = fetch_things2(q)
q = progress(q, estimate=1953374)
for sh in q:
CassandraHide._hide(sh._thing1, sh._thing2, write_consistency_level=CL.ONE)
开发者ID:ketralnis,项目名称:reddit,代码行数:12,代码来源:migrate.py
示例14: all_comments
def all_comments():
q = Comment._query(Comment.c._score > 2,
Comment.c.sr_id != 6,
Comment.c._date > timeago('1 weeks'),
sort = desc('_date'),
limit = 200,
data = True)
comments = list(q)
while comments:
for l in comments:
yield l
comments = list(q._after(l))
开发者ID:ArslanRafique,项目名称:reddit,代码行数:12,代码来源:update_karmas.py
示例15: vote_stats
def vote_stats(config):
stats = {}
link_votes = Vote.rel(Account, Link)
comment_votes = Vote.rel(Account, Comment)
for name, rel in (('link', link_votes), ('comment', comment_votes)):
table = get_rel_table(rel._type_id)[0]
q = table.count(table.c.date > timeago('1 day'))
stats[name+'_vote_count_past_day'] = q.execute().fetchone()[0]
stats['vote_count_past_day'] = stats['link_vote_count_past_day'] + stats['comment_vote_count_past_day']
return stats
开发者ID:ajmint,项目名称:reddit-plugin-about,代码行数:13,代码来源:generate_stats.py
示例16: append_date_clause
def append_date_clause(self, table, select, all_time=None):
"""Create the date portion of a where clause based on the time
period specified."""
if all_time:
return select
if self.period and not self.date:
select.append_whereclause(table.c.date > timeago(self.period))
if self.date:
seconds = 24 * 60 * 60
wheredate = dt.datetime.strptime(self.date, "%Y%m%d")
select.append_whereclause(table.c.date >= wheredate)
select.append_whereclause((table.c.date < wheredate + dt.timedelta(0, seconds)))
return select
开发者ID:brendanlong,项目名称:lesswrong,代码行数:13,代码来源:reporting.py
示例17: reindex_all
def reindex_all(types = None, delete_all_first=False):
"""
Called from `paster run` to totally re-index everything in the
database. Spawns a thread to connect to Solr, and sends it
tokenised Things
"""
global indexed_types
start_t = datetime.now()
if not types:
types = indexed_types
# We don't want the default thread-local cache (which is just a
# dict) to grow un-bounded (normally, we'd use
# utils.set_emptying_cache, except that that preserves memcached,
# and we don't even want to get memcached for total indexing,
# because it would dump out more recent stuff)
g.cache.caches = (SelfEmptyingCache(),) # + g.cache.caches[1:]
count = 0
q=Queue(100)
indexer=Thread(target=indexer_worker,
args=(q,delete_all_first))
indexer.start()
try:
for cls in types:
for batch in fetch_batches(cls,1000,
timeago("50 years"),
start_t):
r = tokenize_things([ x for x in batch
if not x._spam and not x._deleted ])
count += len(r)
print ("Processing %s #%d(%s): %s"
% (cls.__name__, count, q.qsize(), r[0]['contents']))
if indexer.isAlive():
q.put(r)
else:
raise Exception("'tis a shame that I have but one thread to give")
q.put("done")
indexer.join()
except object,e:
if indexer.isAlive():
q.put(e,timeout=30)
raise e
开发者ID:ProfNandaa,项目名称:reddit,代码行数:49,代码来源:solrsearch.py
示例18: time_listings
def time_listings(times = ('all',)):
oldests = dict((t, epoch_seconds(timeago('1 %s' % t)))
for t in times if t != "all")
oldests['all'] = epoch_seconds(timeago('10 years'))
@mr_tools.dataspec_m_thing(("url", str),)
def process(link):
assert link.thing_type == 'link'
timestamp = link.timestamp
fname = make_fullname(Link, link.thing_id)
if not link.spam and not link.deleted:
if link.url:
domains = UrlParser(link.url).domain_permutations()
else:
domains = []
ups, downs = link.ups, link.downs
for tkey, oldest in oldests.iteritems():
if timestamp > oldest:
sc = score(ups, downs)
contr = controversy(ups, downs)
h = _hot(ups, downs, timestamp)
for domain in domains:
yield ('domain/top/%s/%s' % (tkey, domain),
sc, timestamp, fname)
yield ('domain/controversial/%s/%s' % (tkey, domain),
contr, timestamp, fname)
if tkey == "all":
yield ('domain/hot/%s/%s' % (tkey, domain),
h, timestamp, fname)
yield ('domain/new/%s/%s' % (tkey, domain),
timestamp, timestamp, fname)
mr_tools.mr_map(process)
开发者ID:3river,项目名称:reddit,代码行数:36,代码来源:mr_domains.py
示例19: delete_old
def delete_old(cls, age="3 days", limit=10000):
cutoff = timeago(age)
q = cls._query(cls.c._date < cutoff)
q._limit = limit
accounts = set()
defendants = set()
for j in q:
accounts.add(j._thing1)
defendants.add(j._thing2)
j._delete()
for a in accounts:
Jury.by_account(a, _update=True)
for d in defendants:
Jury.by_defendant(d, _update=True)
开发者ID:XieConnect,项目名称:reddit,代码行数:17,代码来源:jury.py
示例20: keep
def keep(item):
"""Avoid showing links that are too young, to give time
for things like the spam filter and thumbnail fetcher to
act on them before releasing them into the wild"""
wouldkeep = item.keep_item(item)
if c.user_is_loggedin and (c.user_is_admin or item.subreddit.is_moderator(c.user)):
# let admins and moderators see them regardless
return wouldkeep
elif wouldkeep and c.user_is_loggedin and c.user._id == item.author_id:
# also let the author of the link see them
return True
elif item._date > timeago(g.new_incubation):
# it's too young to show yet
return False
else:
# otherwise, fall back to the regular logic (don't
# show hidden links, etc)
return wouldkeep
开发者ID:rajbot,项目名称:tikical,代码行数:18,代码来源:listingcontroller.py
注:本文中的r2.lib.utils.timeago函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论