• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.to36函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中r2.lib.utils.to36函数的典型用法代码示例。如果您正苦于以下问题:Python to36函数的具体用法?Python to36怎么用?Python to36使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了to36函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _to_fn

def _to_fn(cls, id_):
    '''Convert id_ to a fullname (equivalent to "link._fullname", but doesn't
    require an instance of the class)
    
    '''
    return (cls._type_prefix + r2utils.to36(cls._type_id) + '_' +
            r2utils.to36(id_))
开发者ID:shannonyu,项目名称:reddit,代码行数:7,代码来源:cloudsearch.py


示例2: port_cassavotes

def port_cassavotes():
    from r2.models import Vote, Account, Link, Comment
    from r2.models.vote import CassandraVote, CassandraLinkVote, CassandraCommentVote
    from r2.lib.db.tdb_cassandra import CL
    from r2.lib.utils import fetch_things2, to36, progress

    ts = [(Vote.rel(Account, Link), CassandraLinkVote),
          (Vote.rel(Account, Comment), CassandraCommentVote)]

    dataattrs = set(['valid_user', 'valid_thing', 'ip', 'organic'])

    for prel, crel in ts:
        vq = prel._query(sort=desc('_date'),
                         data=True,
                         eager_load=False)
        vq = fetch_things2(vq)
        vq = progress(vq, persec=True)
        for v in vq:
            t1 = to36(v._thing1_id)
            t2 = to36(v._thing2_id)
            cv = crel(thing1_id = t1,
                      thing2_id = t2,
                      date=v._date,
                      name=v._name)
            for dkey, dval in v._t.iteritems():
                if dkey in dataattrs:
                    setattr(cv, dkey, dval)

            cv._commit(write_consistency_level=CL.ONE)
开发者ID:MatsT,项目名称:reddit,代码行数:29,代码来源:migrate.py


示例3: __init__

 def __init__(self, link, depth, parent_id = None):
     if parent_id is not None:
         id36 = utils.to36(parent_id)
         self.parent_id = parent_id
         self.parent_name = "t%s_%s" % (utils.to36(Comment._type_id), id36)
         self.parent_permalink = link.make_permalink_slow() + id36
     self.link_name = link._fullname
     self.link_id = link._id
     self.depth = depth
     self.children = []
     self.count = 0
开发者ID:ericmoritz,项目名称:reddit,代码行数:11,代码来源:link.py


示例4: get_recommended

def get_recommended(userid, age = 2, sort='relevance', num_users=10):
    u = get_users_for_user(userid)[:num_users]
    if not u: return []

    voter = Vote.rels[(Account, Link)]

    tables = tdb.get_rel_type_table(voter._type_id)
    votertable = tables[0]
    acct_col = votertable.c.thing1_id
    link_col = votertable.c.thing2_id
    date_col = votertable.c.date
    count = sa.func.count(acct_col)

    linktable = tables[2]
#    dlinktable, linktable = tdb.types_id[Link._type_id].data_table
    link_id_col = linktable.c.thing_id

    query = [sa.or_(*[acct_col == x for x in u]),
             date_col > datetime.now(g.tz)-timedelta(age)]
    cols = [link_col, count]

    if sort == 'new':
        sort = 'date'
    elif sort == 'top':
        sort = 'score'

    if sort and sort != 'relevance':
        query.append(link_id_col == link_col)
        s = tdb.translate_sort(linktable, sort)
        order = [sa.desc(s), sa.desc(link_id_col)]
        cols = [link_id_col, count]
        group_by = [link_id_col, s]
    else:
        order = [sa.desc(count), sa.desc(link_col)]
        group_by = link_col

#    #TODO: wish I could just use query_rules
#    if c.user and c.user.subreddits:
#        query.append(dlinktable.c.thing_id == linktable.c.thing_id)
#        q = sa.and_(dlinktable.c.key == 'sr_id',
#                    sa.or_(*[dlinktable.c.value == x
#                             for x in c.user.subreddits]))
#        query.append(q)

    res = sa.select(cols, sa.and_(*query),
                    group_by=group_by,
                    order_by=order).execute()


    prefix = "t%s" % to36(Link._type_id)
    return ["%s_%s" % (prefix, to36(x)) for x, y in res.fetchall()]
开发者ID:DFectuoso,项目名称:culter,代码行数:51,代码来源:recommendation.py


示例5: __init__

    def __init__(self, link, depth, parent_id=None):
        from r2.lib.wrapped import CachedVariable

        if parent_id is not None:
            id36 = utils.to36(parent_id)
            self.parent_id = parent_id
            self.parent_name = "t%s_%s" % (utils.to36(Comment._type_id), id36)
            self.parent_permalink = link.make_permalink_slow() + id36
        self.link_name = link._fullname
        self.link_id = link._id
        self.depth = depth
        self.children = []
        self.count = 0
        self.previous_visits_hex = CachedVariable("previous_visits_hex")
开发者ID:ketralnis,项目名称:reddit,代码行数:14,代码来源:link.py


示例6: _process

    def _process(t):
        thing_id = t.thing_id
        id36 = to36(thing_id)

        link_id = t.link_id
        link_id36 = to36(link_id)

        ups, downs, timestamp = t.ups, t.downs, t.timestamp

        yield link_id36+'_controversy', id36, sorts.controversy(ups, downs)
        yield link_id36+'_hot',         id36, sorts._hot(ups, downs, timestamp)
        yield link_id36+'_confidence',  id36, sorts.confidence(ups, downs)
        yield link_id36+'_score',       id36, sorts.score(ups, downs)
        yield link_id36+'_date',        id36, timestamp
开发者ID:APerson241,项目名称:reddit,代码行数:14,代码来源:comment_sorts.py


示例7: port_cassasaves

def port_cassasaves(after_id=None, estimate=12489897):
    from r2.models import SaveHide, CassandraSave
    from r2.lib.db.operators import desc
    from r2.lib.db.tdb_cassandra import CL
    from r2.lib.utils import fetch_things2, to36, progress

    q = SaveHide._query(SaveHide.c._name == "save", sort=desc("_date"), data=False, eager_load=False)

    if after_id is not None:
        q._after(SaveHide._byID(after_id))

    for sh in progress(fetch_things2(q), estimate=estimate):

        csh = CassandraSave(thing1_id=to36(sh._thing1_id), thing2_id=to36(sh._thing2_id), date=sh._date)
        csh._commit(write_consistency_level=CL.ONE)
开发者ID:ketralnis,项目名称:reddit,代码行数:15,代码来源:migrate.py


示例8: sup_json_cached

def sup_json_cached(period, last_time):
    #we need to re-add MIN_PERIOD because we moved back that far with
    #the call to make_last_time
    target_time = last_time + MIN_PERIOD - period

    updates = ''
    #loop backwards adding MIN_PERIOD chunks until last_time is as old
    #as target time
    while last_time >= target_time:
        updates += g.cache.get(cache_key(last_time)) or ''
        last_time -= MIN_PERIOD

    supdates = []
    if updates:
        for u in ifilter(None, updates.split(',')):
            sup_id, time = u.split(':')
            time = int(time)
            if time >= target_time:
                supdates.append([sup_id, to36(time)])

    update_time = datetime.utcnow()
    since_time = datetime.utcfromtimestamp(target_time)
    json = simplejson.dumps({'updated_time' : rfc3339_date_str(update_time),
                             'since_time' : rfc3339_date_str(since_time),
                             'period' : period,
                             'available_periods' : period_urls(),
                             'updates' : supdates})

    #undo json escaping
    json = json.replace('\/', '/')
    return json
开发者ID:constantAmateur,项目名称:sciteit,代码行数:31,代码来源:sup.py


示例9: _get_sr_restriction

 def _get_sr_restriction(sr):
     '''Return a solr-appropriate query string that restricts
     results to only contain results from self.sr
     
     '''
     bq = []
     if (not sr) or sr == All or isinstance(sr, DefaultSR):
         return None
     elif isinstance(sr, MultiReddit):
         for sr_id in sr.sr_ids:
             bq.append("sr_id:%s" % sr_id)
     elif isinstance(sr, DomainSR):
         bq = ["site:'%s'" % sr.domain]
     elif sr == Friends:
         if not c.user_is_loggedin or not c.user.friends:
             return None
         friend_ids = c.user.friends
         friends = ["author_fullname:'%s'" %
                    Account._fullname_from_id36(r2utils.to36(id_))
                    for id_ in friend_ids]
         bq.extend(friends)
     elif isinstance(sr, ModContribSR):
         for sr_id in sr.sr_ids:
             bq.append("sr_id:%s" % sr_id)
     elif not isinstance(sr, FakeSubreddit):
         bq = ["sr_id:%s" % sr._id]
     return ' OR '.join(bq)
开发者ID:AjaxGb,项目名称:reddit,代码行数:27,代码来源:solr.py


示例10: _get_sr_restriction

    def _get_sr_restriction(sr):
        """Return a cloudsearch appropriate query string that restricts
        results to only contain results from self.sr
        
        """
        bq = []
        if (not sr) or sr == All or isinstance(sr, DefaultSR):
            return None
        elif isinstance(sr, MultiReddit):
            bq = ["(or"]
            for sr_id in sr.sr_ids:
                bq.append("sr_id:%s" % sr_id)
            bq.append(")")
        elif isinstance(sr, DomainSR):
            bq = ["site:'%s'" % sr.domain]
        elif sr == Friends:
            if not c.user_is_loggedin or not c.user.friends:
                return None
            bq = ["(or"]
            # The query limit is roughly 8k bytes. Limit to 200 friends to
            # avoid getting too close to that limit
            friend_ids = c.user.friends[:200]
            friends = ["author_fullname:'%s'" % Account._fullname_from_id36(r2utils.to36(id_)) for id_ in friend_ids]
            bq.extend(friends)
            bq.append(")")
        elif isinstance(sr, ModContribSR):
            bq = ["(or"]
            for sr_id in sr.sr_ids:
                bq.append("sr_id:%s" % sr_id)
            bq.append(")")
        elif not isinstance(sr, FakeSubreddit):
            bq = ["sr_id:%s" % sr._id]

        return " ".join(bq)
开发者ID:jzplusplus,项目名称:reddit,代码行数:34,代码来源:cloudsearch.py


示例11: GET_oldinfo

    def GET_oldinfo(self, article, type, dest, rest=None, comment=''):
        """Legacy: supporting permalink pages from '06,
           and non-search-engine-friendly links"""
        if not (dest in ('comments','related','details')):
                dest = 'comments'
        if type == 'ancient':
            #this could go in config, but it should never change
            max_link_id = 10000000
            new_id = max_link_id - int(article._id)
            return self.redirect('/info/' + to36(new_id) + '/' + rest)
        if type == 'old':
            new_url = "/%s/%s/%s" % \
                      (dest, article._id36, 
                       quote_plus(title_to_url(article.title).encode('utf-8')))
            if not c.default_sr:
                new_url = "/r/%s%s" % (c.site.name, new_url)
            if comment:
                new_url = new_url + "/%s" % comment._id36
            if c.extension:
                new_url = new_url + "/.%s" % c.extension

            new_url = new_url + query_string(request.get)

            # redirect should be smarter and handle extensions, etc.
            return self.redirect(new_url, code=301)
开发者ID:JediWatchman,项目名称:reddit,代码行数:25,代码来源:front.py


示例12: by_sr

 def by_sr(cls, sr_id, create=False):
     try:
         return cls._byID(to36(sr_id))
     except tdb_cassandra.NotFound:
         if create:
             return cls._new(sr_id)
         raise
开发者ID:1900,项目名称:reddit,代码行数:7,代码来源:flair.py


示例13: cached_query_wrapper

        def cached_query_wrapper(*args):
            # build the row key from the function name and arguments
            assert fn.__name__.startswith("get_")
            row_key_components = [fn.__name__[len('get_'):]]

            if len(args) > 0:
                # we want to accept either a Thing or a thing's ID at this
                # layer, but the query itself should always get just an ID
                if isinstance(args[0], Thing):
                    args = list(args)
                    args[0] = args[0]._id

                if isinstance(args[0], (int, long)):
                    serialized = to36(args[0])
                else:
                    serialized = str(args[0])
                row_key_components.append(serialized)

            row_key_components.extend(str(x) for x in args[1:])
            row_key = '.'.join(row_key_components)

            query = fn(*args)

            query_sort = query._sort
            try:
                is_precomputed = query.precomputed
            except AttributeError:
                is_precomputed = _is_query_precomputed(query)

            return CachedQuery(model, row_key, query_sort, filter_fn,
                               is_precomputed)
开发者ID:Robert77168,项目名称:reddit,代码行数:31,代码来源:query_cache.py


示例14: add_target_fields

    def add_target_fields(self, target):
        if not target:
            return
        from r2.models import Comment, Link, Message

        self.add("target_id", target._id)
        self.add("target_fullname", target._fullname)
        self.add("target_type", target.__class__.__name__.lower())

        # If the target is an Account or Subreddit (or has a "name" attr),
        # add the target_name
        if hasattr(target, "name"):
            self.add("target_name", target.name)
        # Pass in the author of the target for comments, links, & messages
        elif isinstance(target, (Comment, Link, Message)):
            author = target.author_slow
            if target._deleted or author._deleted:
                self.add("target_author_id", 0)
                self.add("target_author_name", "[deleted]")
            else:
                self.add("target_author_id", author._id)
                self.add("target_author_name", author.name)
            if isinstance(target, Link) and not target.is_self:
                self.add("target_url", target.url)
                self.add("target_url_domain", target.link_domain())
            elif isinstance(target, Comment):
                link_fullname = Link._fullname_from_id36(to36(target.link_id))
                self.add("link_id", target.link_id)
                self.add("link_fullname", link_fullname)
开发者ID:Shilohtd,项目名称:reddit,代码行数:29,代码来源:eventcollector.py


示例15: cached_query_wrapper

        def cached_query_wrapper(*args):
            # build the row key from the function name and arguments
            assert fn.__name__.startswith("get_")
            row_key_components = [fn.__name__[len('get_'):]]

            if len(args) > 0:
                # we want to accept either a Thing or a thing's ID at this
                # layer, but the query itself should always get just an ID
                if isinstance(args[0], Thing):
                    args = list(args)
                    args[0] = args[0]._id

                thing_id = to36(args[0])
                row_key_components.append(thing_id)

            row_key_components.extend(str(x) for x in args[1:])
            row_key = '.'.join(row_key_components)

            query = fn(*args)

            if query:
                # sql-backed query
                query_sort = query._sort
                is_precomputed = _is_query_precomputed(query)
            else:
                # pure-cassandra query
                assert sort
                query_sort = sort
                is_precomputed = False

            return CachedQuery(model, row_key, query_sort, filter_fn,
                               is_precomputed)
开发者ID:BenHalberstam,项目名称:reddit,代码行数:32,代码来源:query_cache.py


示例16: add_props

    def add_props(cls, user, wrapped):
        #fetch parent links
        links = Link._byID(set(l.link_id for l in wrapped), True)
        

        #get srs for comments that don't have them (old comments)
        for cm in wrapped:
            if not hasattr(cm, 'sr_id'):
                cm.sr_id = links[cm.link_id].sr_id
        
        subreddits = Subreddit._byID(set(cm.sr_id for cm in wrapped),
                                     data=True,return_dict=False)
        can_reply_srs = set(s._id for s in subreddits if s.can_comment(user))

        min_score = c.user.pref_min_comment_score

        cids = dict((w._id, w) for w in wrapped)

        for item in wrapped:
            item.link = links.get(item.link_id)
            if not hasattr(item, 'subreddit'):
                item.subreddit = item.subreddit_slow
            if hasattr(item, 'parent_id'):
                parent = Comment._byID(item.parent_id, data=True)
                parent_author = Account._byID(parent.author_id, data=True)
                item.parent_author = parent_author

                if not c.full_comment_listing and cids.has_key(item.parent_id):
                    item.parent_permalink = '#' + utils.to36(item.parent_id)
                else:
                    item.parent_permalink = parent.make_anchored_permalink(item.link, item.subreddit)
            else:
                item.parent_permalink = None
                item.parent_author = None

            item.can_reply = (item.sr_id in can_reply_srs)

            # Don't allow users to vote on their own comments
            item.votable = bool(c.user != item.author)

            # not deleted on profile pages,
            # deleted if spam and not author or admin
            item.deleted = (not c.profilepage and
                           (item._deleted or
                            (item._spam and
                             item.author != c.user and
                             not item.show_spam)))

            # don't collapse for admins, on profile pages, or if deleted
            item.collapsed = ((item.score < min_score) and
                             not (c.profilepage or
                                  item.deleted or
                                  c.user_is_admin))
                
            if not hasattr(item,'editted'):
                item.editted = False
            #will get updated in builder
            item.num_children = 0
            item.score_fmt = Score.points
            item.permalink = item.make_permalink(item.link, item.subreddit)
开发者ID:Kakun1,项目名称:lesswrong,代码行数:60,代码来源:link.py


示例17: _restrict_sr

    def _restrict_sr(sr):
        '''Return a cloudsearch appropriate query string that restricts
        results to only contain results from self.sr
        
        '''
        if isinstance(sr, MultiReddit):
            if not sr.sr_ids:
                raise InvalidQuery
            srs = ["sr_id:%s" % sr_id for sr_id in sr.sr_ids]
            return "(or %s)" % ' '.join(srs)
        elif isinstance(sr, DomainSR):
            return "site:'%s'" % sr.domain
        elif isinstance(sr, FriendsSR):
            if not c.user_is_loggedin or not c.user.friends:
                raise InvalidQuery
            # The query limit is roughly 8k bytes. Limit to 200 friends to
            # avoid getting too close to that limit
            friend_ids = c.user.friends[:200]
            friends = ["author_fullname:'%s'" %
                       Account._fullname_from_id36(r2utils.to36(id_))
                       for id_ in friend_ids]
            return "(or %s)" % ' '.join(friends)
        elif not isinstance(sr, FakeSubreddit):
            return "sr_id:%s" % sr._id

        return None
开发者ID:judys-io,项目名称:reddit,代码行数:26,代码来源:cloudsearch.py


示例18: query

 def query(self):
     if c.user_is_sponsor:
         if self.sort == "future_promos":
             return queries.get_all_unapproved_links()
         elif self.sort == "pending_promos":
             return queries.get_all_accepted_links()
         elif self.sort == "unpaid_promos":
             return queries.get_all_unpaid_links()
         elif self.sort == "rejected_promos":
             return queries.get_all_rejected_links()
         elif self.sort == "live_promos" and self.sr:
             return self.live_by_subreddit(self.sr)
         elif self.sort == 'live_promos':
             return queries.get_all_live_links()
         elif self.sort == 'underdelivered':
             q = queries.get_underdelivered_campaigns()
             campaigns = PromoCampaign._by_fullname(list(q), data=True,
                                                    return_dict=False)
             link_ids = [camp.link_id for camp in campaigns]
             return [Link._fullname_from_id36(to36(id)) for id in link_ids]
         elif self.sort == 'reported':
             return queries.get_reported_links(get_promote_srid())
         return queries.get_all_promoted_links()
     else:
         if self.sort == "future_promos":
             return queries.get_unapproved_links(c.user._id)
         elif self.sort == "pending_promos":
             return queries.get_accepted_links(c.user._id)
         elif self.sort == "unpaid_promos":
             return queries.get_unpaid_links(c.user._id)
         elif self.sort == "rejected_promos":
             return queries.get_rejected_links(c.user._id)
         elif self.sort == "live_promos":
             return queries.get_live_links(c.user._id)
         return queries.get_promoted_links(c.user._id)
开发者ID:Damgaard,项目名称:reddit,代码行数:35,代码来源:promotecontroller.py


示例19: queue_vote

def queue_vote(user, thing, dir, ip, organic = False,
               cheater = False, store = True):
    # set the vote in memcached so the UI gets updated immediately
    key = prequeued_vote_key(user, thing)
    g.cache.set(key, '1' if dir is True else '0' if dir is None else '-1')
    # queue the vote to be stored unless told not to
    if store:
        if g.amqp_host:
            if isinstance(thing, Link):
                if thing._id36 in g.live_config["fastlane_links"]:
                    qname = vote_fastlane_q
                else:
                    qname = vote_link_q

            elif isinstance(thing, Comment):
                if utils.to36(thing.link_id) in g.live_config["fastlane_links"]:
                    qname = vote_fastlane_q
                else:
                    qname = vote_comment_q
            else:
                log.warning("%s tried to vote on %r. that's not a link or comment!",
                            user, thing)
                return

            amqp.add_item(qname,
                          pickle.dumps((user._id, thing._fullname,
                                        dir, ip, organic, cheater)))
        else:
            handle_vote(user, thing, dir, ip, organic)
开发者ID:Anenome,项目名称:reddit,代码行数:29,代码来源:queries.py


示例20: _comment_page_links

def _comment_page_links(comment_page_data):
    for comment_info in comment_page_data:
        path = u"/r/{0}/comments/{1}/{2}/".format(
            comment_info.subreddit,
            to36(int(comment_info.thing_id)),
            urllib.quote(title_to_url(comment_info.title).encode("utf-8")),
        )
        yield _absolute_url(path)
开发者ID:zeantsoi,项目名称:reddit,代码行数:8,代码来源:generate.py



注:本文中的r2.lib.utils.to36函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.to_date函数代码示例发布时间:2022-05-26
下一篇:
Python utils.title_to_url函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap