本文整理汇总了Python中r2.lib.db.operators.desc函数的典型用法代码示例。如果您正苦于以下问题:Python desc函数的具体用法?Python desc怎么用?Python desc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了desc函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: operator
def operator(self, sort):
if sort == 'new':
return operators.desc('_t1_date')
elif sort == 'old':
return operators.asc('_t1_date')
elif sort == 'top':
return operators.desc('_t1_score')
开发者ID:JoshuaDavid,项目名称:lesswrong-1,代码行数:7,代码来源:menus.py
示例2: GET_editreddit
def GET_editreddit(self, location, num, after, reverse, count):
"""Edit reddit form."""
if isinstance(c.site, FakeSubreddit):
return self.abort404()
# moderator is either reddit's moderator or an admin
is_moderator = c.user_is_loggedin and c.site.is_moderator(c.user) or c.user_is_admin
if is_moderator and location == 'edit':
pane = CreateSubreddit(site = c.site, listings = ListingController.listing_names())
elif location == 'moderators':
pane = ModList(editable = is_moderator)
elif is_moderator and location == 'banned':
pane = BannedList(editable = is_moderator)
elif location == 'contributors' and c.site.type != 'public':
pane = ContributorList(editable = is_moderator)
elif (location == 'stylesheet'
and c.site.can_change_stylesheet(c.user)
and not g.css_killswitch):
if hasattr(c.site,'stylesheet_contents_user') and c.site.stylesheet_contents_user:
stylesheet_contents = c.site.stylesheet_contents_user
elif hasattr(c.site,'stylesheet_contents') and c.site.stylesheet_contents:
stylesheet_contents = c.site.stylesheet_contents
else:
stylesheet_contents = ''
pane = SubredditStylesheet(site = c.site,
stylesheet_contents = stylesheet_contents)
elif is_moderator and location == 'reports':
links = Link._query(Link.c.reported != 0,
Link.c._spam == False)
comments = Comment._query(Comment.c.reported != 0,
Comment.c._spam == False)
query = thing.Merge((links, comments),
Link.c.sr_id == c.site._id,
sort = desc('_date'),
data = True)
builder = QueryBuilder(query, num = num, after = after,
count = count, reverse = reverse,
wrap = ListingController.builder_wrapper)
listing = LinkListing(builder)
pane = listing.listing()
elif is_moderator and location == 'spam':
links = Link._query(Link.c._spam == True)
comments = Comment._query(Comment.c._spam == True)
query = thing.Merge((links, comments),
Link.c.sr_id == c.site._id,
sort = desc('_date'),
data = True)
builder = QueryBuilder(query, num = num, after = after,
count = count, reverse = reverse,
wrap = ListingController.builder_wrapper)
listing = LinkListing(builder)
pane = listing.listing()
else:
return self.abort404()
return EditReddit(content = pane).render()
开发者ID:MattFisher,项目名称:lesswrong,代码行数:60,代码来源:front.py
示例3: upgrade_messages
def upgrade_messages(update_comments=True, update_messages=True, update_trees=True):
from r2.lib.db import queries
from r2.lib import comment_tree, cache
from r2.models import Account
from pylons import app_globals as g
accounts = set()
def batch_fn(items):
g.reset_caches()
return items
if update_messages or update_trees:
q = Message._query(Message.c.new == True, sort=desc("_date"), data=True)
for m in fetch_things2(q, batch_fn=batch_fn):
print m, m._date
if update_messages:
accounts = accounts | queries.set_unread(m, m.new)
else:
accounts.add(m.to_id)
if update_comments:
q = Comment._query(Comment.c.new == True, sort=desc("_date"))
q._filter(Comment.c._id < 26152162676)
for m in fetch_things2(q, batch_fn=batch_fn):
print m, m._date
queries.set_unread(m, True)
print "Precomputing comment trees for %d accounts" % len(accounts)
for i, a in enumerate(accounts):
if not isinstance(a, Account):
a = Account._byID(a)
print i, a
comment_tree.user_messages(a)
开发者ID:Shilohtd,项目名称:reddit,代码行数:35,代码来源:migrate.py
示例4: gen_keys
def gen_keys():
yield promoted_memo_key
# just let this one do its own writing
load_all_reddits()
yield queries.get_all_comments().iden
l_q = Link._query(Link.c._spam == (True, False),
Link.c._deleted == (True, False),
sort=desc('_date'),
data=True,
)
for link in fetch_things2(l_q, verbosity):
yield comments_key(link._id)
yield last_modified_key(link, 'comments')
a_q = Account._query(Account.c._spam == (True, False),
sort=desc('_date'),
)
for account in fetch_things2(a_q, verbosity):
yield messages_key(account._id)
yield last_modified_key(account, 'overview')
yield last_modified_key(account, 'commented')
yield last_modified_key(account, 'submitted')
yield last_modified_key(account, 'liked')
yield last_modified_key(account, 'disliked')
yield queries.get_comments(account, 'new', 'all').iden
yield queries.get_submitted(account, 'new', 'all').iden
yield queries.get_liked(account).iden
yield queries.get_disliked(account).iden
yield queries.get_hidden(account).iden
yield queries.get_saved(account).iden
yield queries.get_inbox_messages(account).iden
yield queries.get_unread_messages(account).iden
yield queries.get_inbox_comments(account).iden
yield queries.get_unread_comments(account).iden
yield queries.get_inbox_selfreply(account).iden
yield queries.get_unread_selfreply(account).iden
yield queries.get_sent(account).iden
sr_q = Subreddit._query(Subreddit.c._spam == (True, False),
sort=desc('_date'),
)
for sr in fetch_things2(sr_q, verbosity):
yield last_modified_key(sr, 'stylesheet_contents')
yield queries.get_links(sr, 'hot', 'all').iden
yield queries.get_links(sr, 'new', 'all').iden
for sort in 'top', 'controversial':
for time in 'hour', 'day', 'week', 'month', 'year', 'all':
yield queries.get_links(sr, sort, time,
merge_batched=False).iden
yield queries.get_spam_links(sr).iden
yield queries.get_spam_comments(sr).iden
yield queries.get_reported_links(sr).iden
yield queries.get_reported_comments(sr).iden
yield queries.get_subreddit_messages(sr).iden
yield queries.get_unread_subreddit_messages(sr).iden
开发者ID:MatsT,项目名称:reddit,代码行数:59,代码来源:migrate.py
示例5: GET_random
def GET_random(self):
"""The Serendipity button"""
n = rand.randint(0, 9)
links = Link._query(*c.site.query_rules())
links._sort = desc('_date') if n > 5 else desc('_hot')
links._limit = 50
links = list(links)
l = links[rand.randint(0, len(links)-1)]
l._load()
return self.redirect(l.url)
开发者ID:cmak,项目名称:reddit,代码行数:10,代码来源:front.py
示例6: operator
def operator(self, sort):
if sort == "hot":
return operators.desc("_hot")
elif sort == "new":
return operators.desc("_date")
elif sort == "old":
return operators.asc("_date")
elif sort == "top":
return operators.desc("_score")
elif sort == "controversial":
return operators.desc("_controversy")
开发者ID:bogdanb,项目名称:lesswrong,代码行数:11,代码来源:menus.py
示例7: special_reddits_cache
def special_reddits_cache(cls, user_id, query_param):
reddits = SRMember._query(SRMember.c._name == query_param,
SRMember.c._thing2_id == user_id,
#hack to prevent the query from
#adding it's own date
sort = (desc('_t1_ups'), desc('_t1_date')),
eager_load = True,
thing_data = True,
limit = 100)
return [ sr._thing1_id for sr in reddits ]
开发者ID:barneyfoxuk,项目名称:reddit,代码行数:11,代码来源:subreddit.py
示例8: gen_keys
def gen_keys():
yield promoted_memo_key
# just let this one do its own writing
load_all_reddits()
yield queries.get_all_comments().iden
l_q = Link._query(
Link.c._spam == (True, False), Link.c._deleted == (True, False), sort=desc("_date"), data=True
)
for link in fetch_things2(l_q, verbosity):
yield comments_key(link._id)
yield last_modified_key(link, "comments")
a_q = Account._query(Account.c._spam == (True, False), sort=desc("_date"))
for account in fetch_things2(a_q, verbosity):
yield messages_key(account._id)
yield last_modified_key(account, "overview")
yield last_modified_key(account, "commented")
yield last_modified_key(account, "submitted")
yield last_modified_key(account, "liked")
yield last_modified_key(account, "disliked")
yield queries.get_comments(account, "new", "all").iden
yield queries.get_submitted(account, "new", "all").iden
yield queries.get_liked(account).iden
yield queries.get_disliked(account).iden
yield queries.get_hidden(account).iden
yield queries.get_saved(account).iden
yield queries.get_inbox_messages(account).iden
yield queries.get_unread_messages(account).iden
yield queries.get_inbox_comments(account).iden
yield queries.get_unread_comments(account).iden
yield queries.get_inbox_selfreply(account).iden
yield queries.get_unread_selfreply(account).iden
yield queries.get_sent(account).iden
sr_q = Subreddit._query(Subreddit.c._spam == (True, False), sort=desc("_date"))
for sr in fetch_things2(sr_q, verbosity):
yield last_modified_key(sr, "stylesheet_contents")
yield queries.get_links(sr, "hot", "all").iden
yield queries.get_links(sr, "new", "all").iden
for sort in "top", "controversial":
for time in "hour", "day", "week", "month", "year", "all":
yield queries.get_links(sr, sort, time, merge_batched=False).iden
yield queries.get_spam_links(sr).iden
yield queries.get_spam_comments(sr).iden
yield queries.get_reported_links(sr).iden
yield queries.get_reported_comments(sr).iden
yield queries.get_subreddit_messages(sr).iden
yield queries.get_unread_subreddit_messages(sr).iden
开发者ID:Shilohtd,项目名称:reddit,代码行数:52,代码来源:migrate.py
示例9: operator
def operator(self, sort):
if sort == 'hot':
return operators.desc('_hot')
elif sort == 'new':
return operators.desc('_date')
elif sort == 'old':
return operators.asc('_date')
elif sort == 'top':
return operators.desc('_score')
elif sort == 'controversial':
return operators.desc('_controversy')
elif sort == 'confidence':
return operators.desc('_confidence')
elif sort == 'random':
return operators.shuffled('_confidence')
开发者ID:89sos98,项目名称:reddit,代码行数:15,代码来源:menus.py
示例10: rebuild_link_index
def rebuild_link_index(start_at=None, sleeptime=1, cls=Link,
uploader=LinkUploader, doc_api='CLOUDSEARCH_DOC_API',
estimate=50000000, chunk_size=1000):
doc_api = getattr(g, doc_api)
uploader = uploader(doc_api)
q = cls._query(cls.c._deleted == (True, False), sort=desc('_date'))
if start_at:
after = cls._by_fullname(start_at)
assert isinstance(after, cls)
q._after(after)
q = r2utils.fetch_things2(q, chunk_size=chunk_size)
q = r2utils.progress(q, verbosity=1000, estimate=estimate, persec=True,
key=_progress_key)
for chunk in r2utils.in_chunks(q, size=chunk_size):
uploader.things = chunk
for x in range(5):
try:
uploader.inject()
except httplib.HTTPException as err:
print "Got %s, sleeping %s secs" % (err, x)
time.sleep(x)
continue
else:
break
else:
raise err
last_update = chunk[-1]
print "last updated %s" % last_update._fullname
time.sleep(sleeptime)
开发者ID:KeyserSosa,项目名称:reddit,代码行数:32,代码来源:cloudsearch.py
示例11: get_hot
def get_hot(sr):
q = Link._query(Link.c.sr_id == sr._id,
sort = desc('_hot'),
write_cache = True,
limit = 150)
iden = q._iden()
read_cache = True
#if query is in the cache, the expire flag is true, and the access
#time is old, set read_cache = False
if cache.get(iden) is not None:
if cache.get(expire_key(sr)):
access_time = cache.get(access_key(sr))
if not access_time or datetime.now() > access_time + expire_delta:
cache.delete(expire_key(sr))
read_cache = False
#if the query isn't in the cache, set read_cache to false so we
#record the access time
else:
read_cache = False
if not read_cache:
cache.set(access_key(sr), datetime.now())
q._read_cache = read_cache
res = list(q)
#set the #1 link so we can ignore it later. expire after TOP_CACHE
#just in case something happens and that sr doesn't update
if res:
cache.set(top_key(sr), res[0]._fullname, TOP_CACHE)
return res
开发者ID:cmak,项目名称:reddit,代码行数:34,代码来源:normalized_hot.py
示例12: GET_editreddit
def GET_editreddit(self, location, num, after, reverse, count):
"""Edit reddit form. """
if isinstance(c.site, FakeSubreddit):
return self.abort404()
# moderator is either reddit's moderator or an admin
is_moderator = c.user_is_loggedin and c.site.is_moderator(c.user) or c.user_is_admin
if is_moderator and location == 'edit':
pane = CreateSubreddit(site = c.site)
elif location == 'moderators':
pane = ModList(editable = is_moderator)
elif is_moderator and location == 'banned':
pane = BannedList(editable = is_moderator)
elif location == 'contributors' and c.site.type != 'public':
pane = ContributorList(editable = is_moderator)
elif is_moderator and location == 'spam':
links = Link._query(Link.c._spam == True)
comments = Comment._query(Comment.c._spam == True)
query = thing.Merge((links, comments),
sort = desc('_date'),
data = True,
*c.site.query_rules())
builder = QueryBuilder(query, num = num, after = after,
count = count, reverse = reverse,
wrap = ListingController.builder_wrapper)
listing = LinkListing(builder)
pane = listing.listing()
else:
return self.abort404()
return EditReddit(content = pane).render()
开发者ID:cmak,项目名称:reddit,代码行数:33,代码来源:front.py
示例13: test_comment_order_invalid_sticky
def test_comment_order_invalid_sticky(self):
self.link.sticky_comment_id = 101
sort = operators.desc("_confidence")
builder = CommentBuilder(self.link, sort, num=1500)
builder._get_comments()
self.assertEqual(builder.comment_order,
[100, 101, 102, 104, 105, 106, 103, 107, 108, 110, 109])
开发者ID:nanderson94,项目名称:reddit,代码行数:7,代码来源:commentbuilder_test.py
示例14: top_lang_srs
def top_lang_srs(cls, lang, limit, filter_allow_top = False, over18 = True,
over18_only = False):
"""Returns the default list of subreddits for a given language, sorted
by popularity"""
pop_reddits = Subreddit._query(Subreddit.c.type == ('public',
'restricted'),
sort=desc('_downs'),
limit = limit,
data = True,
read_cache = True,
write_cache = True,
cache_time = 5 * 60)
if lang != 'all':
pop_reddits._filter(Subreddit.c.lang == lang)
if not over18:
pop_reddits._filter(Subreddit.c.over_18 == False)
elif over18_only:
pop_reddits._filter(Subreddit.c.over_18 == True)
if filter_allow_top:
pop_reddits._limit = 2 * limit
pop_reddits = filter(lambda sr: sr.allow_top == True,
pop_reddits)[:limit]
# reddits with negative author_id are system reddits and shouldn't be displayed
return [x for x in pop_reddits
if getattr(x, "author_id", 0) is None or getattr(x, "author_id", 0) >= 0]
开发者ID:codyro,项目名称:reddit,代码行数:28,代码来源:subreddit.py
示例15: get_query
def get_query(after_user_id):
q = SRMember._query(
SRMember.c._name == "subscriber",
SRMember.c._thing2_id < after_user_id,
sort=desc("_thing2_id"),
)
return q
开发者ID:pra85,项目名称:reddit,代码行数:7,代码来源:srmember_to_cassandra.py
示例16: run
def run(verbose=True, sleep_time = 60, num_items = 1):
key = "indextank_cursor"
cursor = g.cache.get(key)
if cursor is None:
raise ValueError("%s is not set!" % key)
cursor = int(cursor)
while True:
if verbose:
print "Looking for %d items with _id < %d" % (num_items, cursor)
q = Link._query(sort = desc('_id'),
limit = num_items)
q._after(Link._byID(cursor))
last_date = None
for item in q:
cursor = item._id
last_date = item._date
amqp.add_item('indextank_changes', item._fullname,
message_id = item._fullname,
delivery_mode = amqp.DELIVERY_TRANSIENT)
g.cache.set(key, cursor)
if verbose:
if last_date:
last_date = last_date.strftime("%Y-%m-%d")
print ("Just enqueued %d items. New cursor=%s (%s). Sleeping %d seconds."
% (num_items, cursor, last_date, sleep_time))
sleep(sleep_time)
开发者ID:constantAmateur,项目名称:sciteit,代码行数:29,代码来源:indextank_backfill.py
示例17: rebuild_index
def rebuild_index(start_at=None, sleeptime=1, cls=Link, estimate=50000000,
chunk_size=1000):
if start_at is _REBUILD_INDEX_CACHE_KEY:
start_at = g.cache.get(start_at)
if not start_at:
raise ValueError("Told me to use '%s' key, but it's not set" %
_REBUILD_INDEX_CACHE_KEY)
q = cls._query(cls.c._deleted == (True, False),
sort=desc('_date'), data=True)
if start_at:
after = cls._by_fullname(start_at)
assert isinstance(after, cls)
q._after(after)
q = r2utils.fetch_things2(q, chunk_size=chunk_size)
q = r2utils.progress(q, verbosity=1000, estimate=estimate, persec=True,
key=_progress_key)
for chunk in r2utils.in_chunks(q, size=chunk_size):
for x in range(5):
try:
inject(chunk)
except httplib.HTTPException as err:
print "Got %s, sleeping %s secs" % (err, x)
time.sleep(x)
continue
else:
break
else:
raise err
last_update = chunk[-1]
g.cache.set(_REBUILD_INDEX_CACHE_KEY, last_update._fullname)
time.sleep(sleeptime)
开发者ID:ProfNandaa,项目名称:reddit,代码行数:32,代码来源:cloudsearch.py
示例18: subreddit_stats
def subreddit_stats(config, ranges):
def get_id(*args, **kwargs):
kwargs.setdefault('limit', 1)
results = list(kind._query(*args, **kwargs))
if not results:
return None
else:
return results[0]._id
sr_counts = defaultdict(int)
for kind in (Link, Comment):
thing_table, data_table = get_thing_table(kind._type_id)
first_id = get_id(kind.c._date > ranges['yesterday'][0], sort=asc('_date'))
last_id = get_id(kind.c._date < ranges['yesterday'][1], sort=desc('_date'))
if not first_id or not last_id:
continue
q = sa.select([data_table.c.value, sa.func.count(data_table.c.value)],
(data_table.c.thing_id > first_id)
& (data_table.c.thing_id < last_id)
& (data_table.c.key == 'sr_id')
& (thing_table.c.thing_id == data_table.c.thing_id)
& (thing_table.c.spam == False),
group_by=data_table.c.value)
for sr_id, count in q.execute():
sr_counts[sr_id] += count
return {'subreddits_active_yesterday': len(list(count for count in sr_counts.itervalues() if count > 5))}
开发者ID:binarycoder,项目名称:reddit-plugin-about,代码行数:29,代码来源:generate_stats.py
示例19: port_cassavotes
def port_cassavotes():
from r2.models import Vote, Account, Link, Comment
from r2.models.vote import CassandraVote, CassandraLinkVote, CassandraCommentVote
from r2.lib.db.tdb_cassandra import CL
from r2.lib.utils import fetch_things2, to36, progress
ts = [(Vote.rel(Account, Link), CassandraLinkVote),
(Vote.rel(Account, Comment), CassandraCommentVote)]
dataattrs = set(['valid_user', 'valid_thing', 'ip', 'organic'])
for prel, crel in ts:
vq = prel._query(sort=desc('_date'),
data=True,
eager_load=False)
vq = fetch_things2(vq)
vq = progress(vq, persec=True)
for v in vq:
t1 = to36(v._thing1_id)
t2 = to36(v._thing2_id)
cv = crel(thing1_id = t1,
thing2_id = t2,
date=v._date,
name=v._name)
for dkey, dval in v._t.iteritems():
if dkey in dataattrs:
setattr(cv, dkey, dval)
cv._commit(write_consistency_level=CL.ONE)
开发者ID:MatsT,项目名称:reddit,代码行数:29,代码来源:migrate.py
示例20: by_award
def by_award(cls, award):
q = Trophy._query(Trophy.c._thing2_id == award._id,
eager_load = True, thing_data = True,
data = True,
sort = desc('_date'))
q._limit = 500
return list(q)
开发者ID:kevinrose,项目名称:diggit,代码行数:7,代码来源:award.py
注:本文中的r2.lib.db.operators.desc函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论