• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python redis_cache.get_redis_connection函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中redis_cache.get_redis_connection函数的典型用法代码示例。如果您正苦于以下问题:Python get_redis_connection函数的具体用法?Python get_redis_connection怎么用?Python get_redis_connection使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_redis_connection函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: clear_all

def clear_all():
	if not settings.DEBUG:
		raise Exception("Can't clear everything when not in DEBUG mode, dude")

	get_redis_connection().flushdb()
	base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
	db_path = os.path.join(base_dir, "cg/sqlite3.db")
	os.remove(db_path)
开发者ID:geoapi,项目名称:conceptgrapher,代码行数:8,代码来源:data.py


示例2: distr_object

def distr_object(t_object, **kwargs):
    redis_c = get_redis_connection('feed_storage')
    rfeed_list = kwargs.get('feed_list') 
    rt_object = pickle.dumps(t_object)
    if t_object.get_verbtext() == 'updatefcard':
        redis_fa = get_redis_connection('favlist')
        for i in rfeed_list:
            action = redis_fa.hget(str(i), t_object.get_mobject_id())
            if action.startswith('A') or (action.startswith('B') and str(i) in action):
                redis_c.lpush(str(i), rt_object)
    else:
        for i in rfeed_list:
            redis_c.lpush(i, rt_object)
开发者ID:allenling,项目名称:dbss,代码行数:13,代码来源:utils.py


示例3: get_models

def get_models(prefix, tag):
    """
    Возвращает id-шники элементов с тегом tag
    Например все вопросы, у которых есть какой-то тег
    """
    r = get_redis_connection()
    return r.smembers(tag_key(prefix, tag))
开发者ID:LifeMoroz,项目名称:faq,代码行数:7,代码来源:tags.py


示例4: process_message_task

def process_message_task(msg_id, from_mage=False, new_contact=False):
    """
    Processes a single incoming message through our queue.
    """
    r = get_redis_connection()
    msg = Msg.current_messages.filter(pk=msg_id, status=PENDING).select_related('org', 'contact', 'contact_urn', 'channel').first()

    # somebody already handled this message, move on
    if not msg:
        return

    # get a lock on this contact, we process messages one by one to prevent odd behavior in flow processing
    key = 'pcm_%d' % msg.contact_id
    if not r.get(key):
        with r.lock(key, timeout=120):
            print "M[%09d] Processing - %s" % (msg.id, msg.text)
            start = time.time()

            # if message was created in Mage...
            if from_mage:
                mage_handle_new_message(msg.org, msg)
                if new_contact:
                    mage_handle_new_contact(msg.org, msg.contact)

            Msg.process_message(msg)
            print "M[%09d] %08.3f s - %s" % (msg.id, time.time() - start, msg.text)
开发者ID:ewheeler,项目名称:rapidpro,代码行数:26,代码来源:tasks.py


示例5: get_or_open

    def get_or_open(cls, org, user, message, summary, assignee):
        r = get_redis_connection()
        with r.lock(CASE_LOCK_KEY % (org.pk, message.contact.uuid)):
            # if message is already associated with a case, return that
            if message.case:
                message.case.is_new = False
                return message.case

            # if message contact has an open case, return that
            existing_open = cls.get_open_for_contact_on(org, message.contact, timezone.now())
            if existing_open:
                existing_open.is_new = False
                return existing_open

            # suspend from groups, expire flows and archive messages
            message.contact.prepare_for_case()

            case = cls.objects.create(org=org, assignee=assignee, initial_message=message, contact=message.contact,
                                      summary=summary)
            case.is_new = True
            case.labels.add(*list(message.labels.all()))  # copy labels from message to new case

            # attach message to this case
            message.case = case
            message.save(update_fields=('case',))

            CaseAction.create(case, user, CaseAction.OPEN, assignee=assignee)

        return case
开发者ID:digideskio,项目名称:casepro,代码行数:29,代码来源:models.py


示例6: get_or_open

    def get_or_open(cls, org, user, message, summary, assignee):
        from casepro.profiles.models import Notification

        r = get_redis_connection()
        with r.lock(CASE_LOCK_KEY % (org.pk, message.contact.uuid)):
            message.refresh_from_db()

            # if message is already associated with a case, return that
            if message.case:
                message.case.is_new = False
                return message.case

            # suspend from groups, expire flows and archive messages
            message.contact.prepare_for_case()

            case = cls.objects.create(org=org, assignee=assignee, initial_message=message, contact=message.contact,
                                      summary=summary)
            case.is_new = True
            case.labels.add(*list(message.labels.all()))  # copy labels from message to new case
            case.watchers.add(user)

            # attach message to this case
            message.case = case
            message.save(update_fields=('case',))

            action = CaseAction.create(case, user, CaseAction.OPEN, assignee=assignee)

            for assignee_user in assignee.get_users():
                if assignee_user != user:
                    Notification.new_case_assignment(org, assignee_user, action)

        return case
开发者ID:praekelt,项目名称:casepro,代码行数:32,代码来源:models.py


示例7: get_or_open

    def get_or_open(cls, org, user, labels, message, summary, assignee, archive_messages=True):
        r = get_redis_connection()
        with r.lock('org:%d:cases_lock' % org.pk):
            # check for open case with this contact
            existing_open = cls.get_open_for_contact_on(org, message.contact, timezone.now())
            if existing_open:
                existing_open.is_new = False
                return existing_open

            # check for another case (possibly closed) connected to this message
            existing_for_msg = cls.objects.filter(message_id=message.id).first()
            if existing_for_msg:
                existing_for_msg.is_new = False
                return existing_for_msg

            case = cls.objects.create(org=org, assignee=assignee, contact_uuid=message.contact,
                                      summary=summary, message_id=message.id, message_on=message.created_on)
            case.is_new = True
            case.labels.add(*labels)

            CaseAction.create(case, user, CaseAction.OPEN, assignee=assignee)

            # archive messages any labelled messages from this contact
            if archive_messages:
                Contact.archive_messages(org, message.contact)

        return case
开发者ID:juniorsilver,项目名称:casepro,代码行数:27,代码来源:models.py


示例8: squash_channelcounts

def squash_channelcounts():
    r = get_redis_connection()

    key = 'squash_channelcounts'
    if not r.get(key):
        with r.lock(key, timeout=900):
            ChannelCount.squash_counts()
开发者ID:Ebaneck,项目名称:rapidpro,代码行数:7,代码来源:tasks.py


示例9: squash_flowruncounts

def squash_flowruncounts():
    r = get_redis_connection()

    key = 'squash_flowruncounts'
    if not r.get(key):
        with r.lock(key, timeout=900):
            FlowRunCount.squash_counts()
开发者ID:Ebaneck,项目名称:rapidpro,代码行数:7,代码来源:tasks.py


示例10: process_run_timeout

def process_run_timeout(run_id, timeout_on):
    """
    Processes a single run timeout
    """
    from temba.flows.models import FlowRun

    r = get_redis_connection()
    run = FlowRun.objects.filter(id=run_id, is_active=True, flow__is_active=True).first()

    if run:
        key = 'pcm_%d' % run.contact_id
        if not r.get(key):
            with r.lock(key, timeout=120):
                print "T[%09d] Processing timeout" % run.id
                start = time.time()

                run.refresh_from_db()

                # this is still the timeout to process (json doesn't have microseconds so close enough)
                if run.timeout_on and abs(run.timeout_on - timeout_on) < timedelta(milliseconds=1):
                    run.resume_after_timeout()
                else:
                    print "T[%09d] .. skipping timeout, already handled" % run.id

                print "T[%09d] %08.3f s" % (run.id, time.time() - start)
开发者ID:ewheeler,项目名称:rapidpro,代码行数:25,代码来源:tasks.py


示例11: check_campaigns_task

def check_campaigns_task(sched_id=None):
    """
    See if any event fires need to be triggered
    """
    logger = check_campaigns_task.get_logger()

    # get a lock
    r = get_redis_connection()

    key = "check_campaigns"

    # only do this if we aren't already checking campaigns
    if not r.get(key):
        with r.lock(key, timeout=3600):
            # for each that needs to be fired
            for fire in EventFire.objects.filter(fired=None, scheduled__lte=timezone.now()):
                try:
                    key = "fire_campaign_%d" % fire.pk
                    if not r.get(key):
                        # try to acquire a lock
                        with r.lock("fire_campaign_%d" % fire.pk, timeout=120):
                            # reload it
                            fire = EventFire.objects.get(id=fire.pk)
                            if not fire.fired:
                                fire.fire()

                except:  # pragma: no cover
                    logger.error("Error running campaign event: %s" % fire.pk, exc_info=True)
开发者ID:mdheyab,项目名称:rapidpro,代码行数:28,代码来源:tasks.py


示例12: squash_topupcredits

def squash_topupcredits():
    r = get_redis_connection()

    key = 'squash_topupcredits'
    if not r.get(key):
        with r.lock(key, timeout=900):
            TopUpCredits.squash_credits()
开发者ID:Maximus325,项目名称:rapidpro,代码行数:7,代码来源:tasks.py


示例13: get_top

def get_top(max_tags=10, offset=0):
    r = get_redis_connection()
    # zrevrage возвращает из упорядоченного множества
    # первые max_tags тегов начиная с offset
    # значения отсортированы (в данном случае по кол-ву вопросов
    # в которых они использованы)
    return r.zrevrange(key_all_tags, offset, max_tags)
开发者ID:LifeMoroz,项目名称:faq,代码行数:7,代码来源:tags.py


示例14: push_task

def push_task(org, queue, task_name, args, priority=DEFAULT_PRIORITY):
    """
    Adds a task to queue_name with the supplied arguments.

    Ex: add_task(nyaruka, 'flows', 'start_flow', [1,2,3,4,5,6,7,8,9,10])
    """
    r = get_redis_connection('default')

    # calculate our score from the current time and priority, this could get us in trouble
    # if things are queued for more than ~100 days, but otherwise gives us the properties of prioritizing
    # first based on priority, then insertion order.
    score = time.time() + priority

    # push our task onto the right queue and make sure it is in the active list (atomically)
    with r.pipeline() as pipe:
        key = "%s:%d" % (task_name, org.id)
        pipe.zadd(key, dict_to_json(args), score)

        # and make sure this key is in our list of queues so this job will get worked on
        pipe.sadd("%s:active" % task_name, key)
        pipe.execute()

    # if we were given a queue to schedule on, then add this task to celery.
    #
    # note that the task that is fired needs no arguments as it should just use pop_task with the
    # task name to determine what to work on.
    if queue:
        if getattr(settings, 'CELERY_ALWAYS_EAGER', False):
            task_function = lookup_task_function(task_name)
            task_function()
        else:
            current_app.send_task(task_name, args=[], kwargs={}, queue=queue)
开发者ID:MOconcepts,项目名称:rapidpro,代码行数:32,代码来源:queues.py


示例15: pop_task

def pop_task(task_name):
    """
    Pops the next 'random' task off our queue, returning the arguments that were saved

    Ex: pop_next_task('start_flow')
    <<< {flow=5, contacts=[1,2,3,4,5,6,7,8,9,10]}
    """
    r = get_redis_connection('default')

    task = None
    active_set = "%s:active" % task_name

    # get what queue we will work against
    queue = r.srandmember(active_set)

    while queue:
        # this lua script does both a "zpop" (popping the next highest thing off our sorted set) and
        # a clearing of our active set if there is no value in it as an atomic action
        lua = "local val = redis.call('zrange', ARGV[2], 0, 0) \n" \
              "if next(val) == nil then redis.call('srem', ARGV[1], ARGV[2]) return nil \n"\
              "else redis.call('zremrangebyrank', ARGV[2], 0, 0) return val[1] end\n"

        task = r.eval(lua, 2, 'active_set', 'queue', active_set, queue)

        # found a task? then break out
        if task is not None:
            task = json.loads(task)
            break

        # if we didn't get a task, then run again against a new queue until there is nothing left in our task queue
        queue = r.srandmember(active_set)

    return task
开发者ID:MOconcepts,项目名称:rapidpro,代码行数:33,代码来源:queues.py


示例16: invalidate_cache

    def invalidate_cache(cls, contact_field=None, ruleset=None, group=None):
        """
        Used to invalidate our summary cache for values. Callers should pass in one (and only one) of a contact field,
        ruleset or group that changed and all result summaries that have changed will be invalidated accordingly.
        :return: how many cached records were invalidated
        """
        if not contact_field and not ruleset and not group:
            raise ValueError("You must specify a contact field, ruleset or group to invalidate results for")

        if contact_field:
            key = CONTACT_KEY % contact_field.id
        elif group:
            key = GROUP_KEY % group.id
        elif ruleset:
            key = RULESET_KEY % ruleset.id

        # blow away any redis items that contain our key as a dependency
        r = get_redis_connection()
        dependent_results = r.smembers(key)

        # save ourselves a roundtrip if there are no matches
        if dependent_results:
            # clear all our dependencies
            pipe = r.pipeline()
            pipe.srem(key, *dependent_results)
            pipe.delete(*dependent_results)
            pipe.execute()

        return len(dependent_results)
开发者ID:Spring-Apps,项目名称:rapidpro,代码行数:29,代码来源:models.py


示例17: squash_systemlabels

def squash_systemlabels():
    r = get_redis_connection()

    key = 'squash_systemlabels'
    if not r.get(key):
        with r.lock(key, timeout=900):
            SystemLabel.squash_counts()
开发者ID:ewheeler,项目名称:rapidpro,代码行数:7,代码来源:tasks.py


示例18: handle_event_task

def handle_event_task():
    """
    Priority queue task that handles both event fires (when fired) and new incoming
    messages that need to be handled.

    Currently two types of events may be "popped" from our queue:
           msg - Which contains the id of the Msg to be processed
          fire - Which contains the id of the EventFire that needs to be fired
    """
    from temba.campaigns.models import EventFire
    r = get_redis_connection()

    # pop off the next task
    event_task = pop_task(HANDLE_EVENT_TASK)

    # it is possible we have no message to send, if so, just return
    if not event_task:
        return

    if event_task['type'] == MSG_EVENT:
        process_message_task(event_task['id'], event_task.get('from_mage', False), event_task.get('new_contact', False))

    elif event_task['type'] == FIRE_EVENT:
        # use a lock to make sure we don't do two at once somehow
        with r.lock('fire_campaign_%s' % event_task['id'], timeout=120):
            event = EventFire.objects.filter(pk=event_task['id'], fired=None).first()
            if event:
                event.fire()

    else:
        raise Exception("Unexpected event type: %s" % event_task)
开发者ID:harykeyrun,项目名称:rapidpro,代码行数:31,代码来源:tasks.py


示例19: send_msg_task

def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    # pop off the next task
    msg_tasks = pop_task(SEND_MSG_TASK)

    # it is possible we have no message to send, if so, just return
    if not msg_tasks:
        return

    if not isinstance(msg_tasks, list):
        msg_tasks = [msg_tasks]

    r = get_redis_connection()

    # acquire a lock on our contact to make sure two sets of msgs aren't being sent at the same time
    try:
        with r.lock('send_contact_%d' % msg_tasks[0]['contact'], timeout=300):
            # send each of our msgs
            while msg_tasks:
                msg_task = msg_tasks.pop(0)
                msg = dict_to_struct('MockMsg', msg_task,
                                     datetime_fields=['modified_on', 'sent_on', 'created_on', 'queued_on', 'next_attempt'])
                Channel.send_message(msg)

                # if there are more messages to send for this contact, sleep a second before moving on
                if msg_tasks:
                    time.sleep(1)

    finally:  # pragma: no cover
        # if some msgs weren't sent for some reason, then requeue them for later sending
        if msg_tasks:
            # requeue any unsent msgs
            push_task(msg_tasks[0]['org'], MSG_QUEUE, SEND_MSG_TASK, msg_tasks)
开发者ID:Ebaneck,项目名称:rapidpro,代码行数:35,代码来源:tasks.py


示例20: invalidate_cache

    def invalidate_cache(cls, contact_field=None, ruleset=None, group=None):
        """
        Used to invalidate our summary cache for values. Callers should pass in one (and only one) of a contact field,
        ruleset or group that changed and all result summaries that have changed will be invalidated accordingly.
        :return: how many cached records were invalidated
        """
        if not contact_field and not ruleset and not group:
            raise Exception("You must specify a contact field, ruleset or group to invalidate results for")

        if contact_field:
            key = ':' + (CONTACT_KEY % contact_field.id) + ':'
        elif group:
            key = ':' + (GROUP_KEY % group.id) + ':'
        elif ruleset:
            key = ':' + (RULESET_KEY % ruleset.id) + ':'

        # blow away any redis items that contain our key as a dependency
        r = get_redis_connection()
        keys = r.keys(VALUE_SUMMARY_CACHE_KEY + "*" + key + "*")
        if keys:
            invalidated = r.delete(*keys)
        else:
            invalidated = 0

        return invalidated
开发者ID:TextoCMR,项目名称:TexTo.cm,代码行数:25,代码来源:models.py



注:本文中的redis_cache.get_redis_connection函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python redis_lock.Lock类代码示例发布时间:2022-05-26
下一篇:
Python sentinel.Sentinel类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap