本文整理汇总了Python中r2.lib.db.queries.get_domain_links函数的典型用法代码示例。如果您正苦于以下问题:Python get_domain_links函数的具体用法?Python get_domain_links怎么用?Python get_domain_links使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_domain_links函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: store_keys
def store_keys(cls, key, listing):
"""Look up query based on key, and update with provided listing.
:param str key: key generated by :py:method:`make_key`
:param list listing: sorted listing generated by
`mr_reduce_max_per_key`, generally by :py:method:`write_permacache`
"""
category, thing_cls, sort, time, uid = cls.split_key(key)
query = None
if category == "user":
if thing_cls == "link":
query = queries._get_submitted(int(uid), sort, time)
elif thing_cls == "comment":
query = queries._get_comments(int(uid), sort, time)
elif category == "sr":
if thing_cls == "link":
query = queries._get_links(int(uid), sort, time)
elif category == "domain":
if thing_cls == "link":
query = queries.get_domain_links(uid, sort, time)
assert query, 'unknown query type for {}'.format(key)
item_tuples = [
(thing_fullname, float(value), float(timestamp))
for value, timestamp, thing_fullname in listing
]
# we only need locking updates for non-time-based listings, since for
# time- based ones we're the only ones that ever update it
lock = time == 'all'
query._replace(item_tuples, lock=lock)
开发者ID:zeantsoi,项目名称:reddit,代码行数:33,代码来源:mr_top.py
示例2: store_keys
def store_keys(key, maxes):
category, thing_cls, sort, time, id = key.split("/")
query = None
if category == "user":
if thing_cls == "link":
query = queries._get_submitted(int(id), sort, time)
elif thing_cls == "comment":
query = queries._get_comments(int(id), sort, time)
elif category == "sr":
if thing_cls == "link":
query = queries._get_links(int(id), sort, time)
elif category == "domain":
if thing_cls == "link":
query = queries.get_domain_links(id, sort, time)
assert query, 'unknown query type for %s' % (key,)
item_tuples = [tuple([item[-1]] + [float(x) for x in item[:-1]])
for item in maxes]
# we only need locking updates for non-time-based listings, since for time-
# based ones we're the only ones that ever update it
lock = time == 'all'
query._replace(item_tuples, lock=lock)
开发者ID:AHAMED750,项目名称:reddit,代码行数:26,代码来源:mr_top.py
示例3: process_message
def process_message(msgs, chan):
"""Update get_domain_links(), the Links by domain precomputed query.
get_domain_links() is a CachedResult which is stored in permacache. To
update these objects we need to do a read-modify-write which requires
obtaining a lock. Sharding these updates by domain allows us to run
multiple consumers (but ideally just one per shard) to avoid lock
contention.
"""
from r2.lib.db.queries import add_queries, get_domain_links
link_names = {msg.body for msg in msgs}
links = Link._by_fullname(link_names, return_dict=False)
print 'Processing %r' % (links,)
links_by_domain = defaultdict(list)
for link in links:
parsed = UrlParser(link.url)
# update the listings for all permutations of the link's domain
for domain in parsed.domain_permutations():
links_by_domain[domain].append(link)
for d, links in links_by_domain.iteritems():
with g.stats.get_timer("link_vote_processor.domain_queries"):
add_queries(
queries=[
get_domain_links(d, sort, "all") for sort in SORTS],
insert_items=links,
)
开发者ID:13steinj,项目名称:reddit,代码行数:32,代码来源:voting.py
示例4: store_keys
def store_keys(key, maxes):
# we're building queries using queries.py, but we could make the
# queries ourselves if we wanted to avoid the individual lookups
# for accounts and subreddits.
# Note that we're only generating the 'sr-' type queries here, but
# we're also able to process the other listings generated by the
# old migrate.mr_permacache for convenience
userrel_fns = dict(liked = queries.get_liked,
disliked = queries.get_disliked,
saved = queries.get_saved,
hidden = queries.get_hidden)
if key.startswith('user-'):
acc_str, keytype, account_id = key.split('-')
account_id = int(account_id)
fn = queries.get_submitted if keytype == 'submitted' else queries.get_comments
q = fn(Account._byID(account_id), 'new', 'all')
q._insert_tuples([(fname, float(timestamp))
for (timestamp, fname)
in maxes])
elif key.startswith('sr-'):
sr_str, sort, time, sr_id = key.split('-')
sr_id = int(sr_id)
if sort == 'controversy':
# I screwed this up in the mapper and it's too late to fix
# it
sort = 'controversial'
q = queries.get_links(Subreddit._byID(sr_id), sort, time)
q._insert_tuples([tuple([item[-1]] + map(float, item[:-1]))
for item in maxes])
elif key.startswith('domain/'):
d_str, sort, time, domain = key.split('/')
q = queries.get_domain_links(domain, sort, time)
q._insert_tuples([tuple([item[-1]] + map(float, item[:-1]))
for item in maxes])
elif key.split('-')[0] in userrel_fns:
key_type, account_id = key.split('-')
account_id = int(account_id)
fn = userrel_fns[key_type]
q = fn(Account._byID(account_id))
q._insert_tuples([tuple([item[-1]] + map(float, item[:-1]))
for item in maxes])
开发者ID:3river,项目名称:reddit,代码行数:49,代码来源:mr_domains.py
示例5: store_keys
def store_keys(key, maxes):
category, thing_cls, sort, time, id = key.split("/")
query = None
if category == "user":
if thing_cls == "link":
query = queries._get_submitted(int(id), sort, time)
elif thing_cls == "comment":
query = queries._get_comments(int(id), sort, time)
elif category == "sr":
if thing_cls == "link":
query = queries._get_links(int(id), sort, time)
elif category == "domain":
if thing_cls == "link":
query = queries.get_domain_links(id, sort, time)
assert query
item_tuples = [tuple([item[-1]] + [float(x) for x in item[:-1]])
for item in maxes]
query._replace(item_tuples)
开发者ID:Sheesha1992,项目名称:reddit,代码行数:20,代码来源:mr_top.py
示例6: get_links
def get_links(self, sort, time):
from r2.lib.db import queries
return queries.get_domain_links(self.domain, sort, time)
开发者ID:nborwankar,项目名称:reddit,代码行数:4,代码来源:subreddit.py
注:本文中的r2.lib.db.queries.get_domain_links函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论