• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python redis_connection.delete函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中redash.redis_connection.delete函数的典型用法代码示例。如果您正苦于以下问题:Python delete函数的具体用法?Python delete怎么用?Python delete使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了delete函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: cleanup_tasks

def cleanup_tasks():
    # in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really
    # going to run. this job removes them.
    lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command
    if not lock_keys:
        return
    
    query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]

    logger.info("Found %d locks", len(query_tasks))

    inspect = celery.control.inspect()
    active_tasks = inspect.active()
    if active_tasks is None:
        active_tasks = []
    else:
        active_tasks = active_tasks.values()

    all_tasks = set()
    for task_list in active_tasks:
        for task in task_list:
            all_tasks.add(task['id'])

    logger.info("Active jobs count: %d", len(all_tasks))

    for i, t in enumerate(query_tasks):
        if t.ready():
            # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore
            logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status)
            redis_connection.delete(lock_keys[i])
开发者ID:MiguelPeralvoPM,项目名称:redash,代码行数:30,代码来源:tasks.py


示例2: setUp

 def setUp(self):
     self.list = "test_list"
     redis_connection.delete(self.list)
     self.keys = []
     for score in range(0, 100):
         key = 'k:{}'.format(score)
         self.keys.append(key)
         redis_connection.zadd(self.list, score, key)
         redis_connection.set(key, 1)
开发者ID:5t111111,项目名称:redash,代码行数:9,代码来源:test_queries.py


示例3: _compare_and_update

def _compare_and_update(latest_version):
    # TODO: support alpha channel (allow setting which channel to check & parse build number)
    is_newer = semver.compare(current_version, latest_version) == -1
    logging.info("Latest version: %s (newer: %s)", latest_version, is_newer)

    if is_newer:
        redis_connection.set(REDIS_KEY, latest_version)
    else:
        redis_connection.delete(REDIS_KEY)
开发者ID:13768324554,项目名称:redash,代码行数:9,代码来源:version_check.py


示例4: enqueue_query

def enqueue_query(query, data_source, user_id, scheduled_query=None, metadata={}):
    query_hash = gen_query_hash(query)
    logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
    try_count = 0
    job = None

    while try_count < 5:
        try_count += 1

        pipe = redis_connection.pipeline()
        try:
            pipe.watch(_job_lock_id(query_hash, data_source.id))
            job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
            if job_id:
                logging.info("[%s] Found existing job: %s", query_hash, job_id)

                job = QueryTask(job_id=job_id)

                if job.ready():
                    logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
                    redis_connection.delete(_job_lock_id(query_hash, data_source.id))
                    job = None

            if not job:
                pipe.multi()

                time_limit = None

                if scheduled_query:
                    queue_name = data_source.scheduled_queue_name
                    scheduled_query_id = scheduled_query.id
                else:
                    queue_name = data_source.queue_name
                    scheduled_query_id = None
                    time_limit = settings.ADHOC_QUERY_TIME_LIMIT

                result = execute_query.apply_async(args=(query, data_source.id, metadata, user_id, scheduled_query_id),
                                                   queue=queue_name,
                                                   time_limit=time_limit)
                job = QueryTask(async_result=result)
                tracker = QueryTaskTracker.create(
                    result.id, 'created', query_hash, data_source.id,
                    scheduled_query is not None, metadata)
                tracker.save(connection=pipe)

                logging.info("[%s] Created new job: %s", query_hash, job.id)
                pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
                pipe.execute()
            break

        except redis.WatchError:
            continue

    if not job:
        logging.error("[Manager][%s] Failed adding job for query.", query_hash)

    return job
开发者ID:jonyboy2000,项目名称:redash,代码行数:57,代码来源:queries.py


示例5: prune

    def prune(cls, list_name, keep_count, max_keys=100):
        count = redis_connection.zcard(list_name)
        if count <= keep_count:
            return 0

        remove_count = min(max_keys, count - keep_count)
        keys = redis_connection.zrange(list_name, 0, remove_count - 1)
        redis_connection.delete(*keys)
        redis_connection.zremrangebyrank(list_name, 0, remove_count - 1)
        return remove_count
开发者ID:jonyboy2000,项目名称:redash,代码行数:10,代码来源:queries.py


示例6: enqueue_query

def enqueue_query(query, data_source, scheduled=False, metadata={}):
    query_hash = gen_query_hash(query)
    logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
    try_count = 0
    job = None

    while try_count < 5:
        try_count += 1

        pipe = redis_connection.pipeline()
        try:
            pipe.watch(_job_lock_id(query_hash, data_source.id))
            job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
            if job_id:
                logging.info("[%s] Found existing job: %s", query_hash, job_id)

                job = QueryTask(job_id=job_id)
                tracker = QueryTaskTracker.get_by_task_id(job_id, connection=pipe)
                # tracker might not exist, if it's an old job
                if scheduled and tracker:
                    tracker.update(retries=tracker.retries+1)
                elif tracker:
                    tracker.update(scheduled_retries=tracker.scheduled_retries+1)

                if job.ready():
                    logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
                    redis_connection.delete(_job_lock_id(query_hash, data_source.id))
                    job = None

            if not job:
                pipe.multi()

                if scheduled:
                    queue_name = data_source.scheduled_queue_name
                else:
                    queue_name = data_source.queue_name

                result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
                job = QueryTask(async_result=result)
                tracker = QueryTaskTracker.create(result.id, 'created', query_hash, data_source.id, scheduled, metadata)
                tracker.save(connection=pipe)

                logging.info("[%s] Created new job: %s", query_hash, job.id)
                pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
                pipe.execute()
            break

        except redis.WatchError:
            continue

    if not job:
        logging.error("[Manager][%s] Failed adding job for query.", query_hash)

    return job
开发者ID:ChiragKParmar,项目名称:redash,代码行数:54,代码来源:queries.py


示例7: prune

    def prune(cls, list_name, keep_count):
        count = redis_connection.zcard(list_name)
        if count <= keep_count:
            return 0

        remove_count = count - keep_count
        keys = redis_connection.zrange(list_name, 0, remove_count - 1)
        redis_connection.delete(keys)
        redis_connection.zremrangebyrank(list_name, 0, remove_count - 1)

        return remove_count
开发者ID:ChiragKParmar,项目名称:redash,代码行数:11,代码来源:queries.py


示例8: execute_query

def execute_query(self, query, data_source_id, metadata):
    signal.signal(signal.SIGINT, signal_handler)
    start_time = time.time()

    logger.info("Loading data source (%d)...", data_source_id)

    # TODO: we should probably cache data sources in Redis
    data_source = models.DataSource.get_by_id(data_source_id)

    self.update_state(state="STARTED", meta={"start_time": start_time, "custom_message": ""})

    logger.info("Executing query:\n%s", query)

    query_hash = gen_query_hash(query)
    query_runner = get_query_runner(data_source.type, data_source.options)

    if query_runner.annotate_query():
        metadata["Task ID"] = self.request.id
        metadata["Query Hash"] = query_hash
        metadata["Queue"] = self.request.delivery_info["routing_key"]

        annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()])

        logging.debug(u"Annotation: %s", annotation)

        annotated_query = u"/* {} */ {}".format(annotation, query)
    else:
        annotated_query = query

    with statsd_client.timer("query_runner.{}.{}.run_time".format(data_source.type, data_source.name)):
        data, error = query_runner.run_query(annotated_query)

    run_time = time.time() - start_time
    logger.info("Query finished... data length=%s, error=%s", data and len(data), error)

    self.update_state(state="STARTED", meta={"start_time": start_time, "error": error, "custom_message": ""})

    # Delete query_hash
    redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))

    if not error:
        query_result, updated_query_ids = models.QueryResult.store_result(
            data_source.id, query_hash, query, data, run_time, utils.utcnow()
        )
        for query_id in updated_query_ids:
            check_alerts_for_query.delay(query_id)
    else:
        raise Exception(error)

    return query_result.id
开发者ID:scottkrager,项目名称:redash,代码行数:50,代码来源:tasks.py


示例9: add_task

    def add_task(cls, query, data_source, scheduled=False, metadata={}):
        query_hash = gen_query_hash(query)
        logging.info("[Manager][%s] Inserting job", query_hash)
        logging.info("[Manager] Metadata: [%s]", metadata)
        try_count = 0
        job = None
        
        while try_count < cls.MAX_RETRIES:
            try_count += 1

            pipe = redis_connection.pipeline()
            try:
                pipe.watch(cls._job_lock_id(query_hash, data_source.id))
                job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
                if job_id:
                    logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)

                    job = cls(job_id=job_id)
                    if job.ready():
                        logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
                        redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
                        job = None

                if not job:
                    pipe.multi()

                    if scheduled:
                        queue_name = data_source.scheduled_queue_name
                    else:
                        queue_name = data_source.queue_name

                    result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
                    job = cls(async_result=result)
                    
                    logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
                    pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
                    pipe.execute()
                break

            except redis.WatchError:
                continue

        if not job:
            logging.error("[Manager][%s] Failed adding job for query.", query_hash)

        return job
开发者ID:MiguelPeralvoPM,项目名称:redash,代码行数:46,代码来源:tasks.py


示例10: execute_query

def execute_query(self, query, data_source_id, metadata):
    start_time = time.time()

    logger.info("Loading data source (%d)...", data_source_id)

    # TODO: we should probably cache data sources in Redis
    data_source = models.DataSource.get_by_id(data_source_id)

    self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})

    logger.info("Executing query:\n%s", query)

    query_hash = gen_query_hash(query)
    query_runner = get_query_runner(data_source.type, data_source.options)

    if query_runner.annotate_query():
        metadata['Task ID'] = self.request.id
        metadata['Query Hash'] = query_hash
        metadata['Queue'] = self.request.delivery_info['routing_key']

        annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()])

        logging.debug(u"Annotation: %s", annotation)

        annotated_query = u"/* {} */ {}".format(annotation, query)
    else:
        annotated_query = query

    with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
        data, error = query_runner.run_query(annotated_query)

    run_time = time.time() - start_time
    logger.info("Query finished... data length=%s, error=%s", data and len(data), error)

    self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})

    # Delete query_hash
    redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))

    if not error:
        query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, utils.utcnow())
    else:
        raise Exception(error)

    return query_result.id
开发者ID:MiguelPeralvoPM,项目名称:redash,代码行数:45,代码来源:tasks.py


示例11: execute_query

def execute_query(self, query, data_source_id):
    # TODO: maybe this should be a class?
    start_time = time.time()

    logger.info("Loading data source (%d)...", data_source_id)

    # TODO: we should probably cache data sources in Redis
    data_source = models.DataSource.get_by_id(data_source_id)

    self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})

    logger.info("Executing query:\n%s", query)

    query_hash = gen_query_hash(query)
    query_runner = get_query_runner(data_source.type, data_source.options)

    if getattr(query_runner, 'annotate_query', True):
        # TODO: anotate with queu ename
        annotated_query = "/* Task Id: %s, Query hash: %s */ %s" % \
                          (self.request.id, query_hash, query)
    else:
        annotated_query = query

    with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
        data, error = query_runner(annotated_query)

    run_time = time.time() - start_time
    logger.info("Query finished... data length=%s, error=%s", data and len(data), error)

    self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})

    # Delete query_hash
    redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))

    # TODO: it is possible that storing the data will fail, and we will need to retry
    # while we already marked the job as done
    if not error:
        query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
    else:
        raise Exception(error)

    return query_result.id
开发者ID:MaTriXy,项目名称:redash,代码行数:42,代码来源:tasks.py


示例12: execute_query

def execute_query(self, query, data_source_id, metadata):
    signal.signal(signal.SIGINT, signal_handler)
    start_time = time.time()

    logger.info("task=execute_query state=load_ds ds_id=%d", data_source_id)

    data_source = models.DataSource.get_by_id(data_source_id)

    self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})

    logger.debug("Executing query:\n%s", query)

    query_hash = gen_query_hash(query)
    query_runner = data_source.query_runner

    logger.info("task=execute_query state=before query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
                query_hash, data_source.type, data_source.id, self.request.id, self.request.delivery_info['routing_key'],
                metadata.get('Query ID', 'unknown'), metadata.get('Username', 'unknown'))

    if query_runner.annotate_query():
        metadata['Task ID'] = self.request.id
        metadata['Query Hash'] = query_hash
        metadata['Queue'] = self.request.delivery_info['routing_key']

        annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()])

        logging.debug(u"Annotation: %s", annotation)

        annotated_query = u"/* {} */ {}".format(annotation, query)
    else:
        annotated_query = query

    with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
        data, error = query_runner.run_query(annotated_query)

    logger.info("task=execute_query state=after query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
                query_hash, data_source.type, data_source.id, self.request.id, self.request.delivery_info['routing_key'],
                metadata.get('Query ID', 'unknown'), metadata.get('Username', 'unknown'))

    run_time = time.time() - start_time
    logger.info("Query finished... data length=%s, error=%s", data and len(data), error)

    self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})

    # Delete query_hash
    redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))

    if not error:
        query_result, updated_query_ids = models.QueryResult.store_result(data_source.org_id, data_source.id, query_hash, query, data, run_time, utils.utcnow())
        logger.info("task=execute_query state=after_store query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
                    query_hash, data_source.type, data_source.id, self.request.id, self.request.delivery_info['routing_key'],
                    metadata.get('Query ID', 'unknown'), metadata.get('Username', 'unknown'))
        for query_id in updated_query_ids:
            check_alerts_for_query.delay(query_id)
        logger.info("task=execute_query state=after_alerts query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
                    query_hash, data_source.type, data_source.id, self.request.id, self.request.delivery_info['routing_key'],
                    metadata.get('Query ID', 'unknown'), metadata.get('Username', 'unknown'))
    else:
        raise QueryExecutionError(error)

    return query_result.id
开发者ID:hudl,项目名称:redash,代码行数:61,代码来源:tasks.py


示例13: enqueue_query

def enqueue_query(query, data_source, user_id, is_api_key=False, scheduled_query=None, metadata={}):
    query_hash = gen_query_hash(query)
    logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
    try_count = 0
    job = None

    while try_count < 5:
        try_count += 1

        pipe = redis_connection.pipeline()
        try:
            pipe.watch(_job_lock_id(query_hash, data_source.id))
            job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
            if job_id:
                logging.info("[%s] Found existing job: %s", query_hash, job_id)

                job = QueryTask(job_id=job_id)

                if job.ready():
                    logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
                    redis_connection.delete(_job_lock_id(query_hash, data_source.id))
                    job = None

            if not job:
                pipe.multi()

                time_limit = None

                if scheduled_query:
                    queue_name = data_source.scheduled_queue_name
                    scheduled_query_id = scheduled_query.id
                else:
                    queue_name = data_source.queue_name
                    scheduled_query_id = None
                    time_limit = settings.ADHOC_QUERY_TIME_LIMIT

                args = (query, data_source.id, metadata, user_id, scheduled_query_id, is_api_key)
                argsrepr = json_dumps({
                    'org_id': data_source.org_id,
                    'data_source_id': data_source.id,
                    'enqueue_time': time.time(),
                    'scheduled': scheduled_query_id is not None,
                    'query_id': metadata.get('Query ID'),
                    'user_id': user_id
                })

                result = execute_query.apply_async(args=args,
                                                   argsrepr=argsrepr,
                                                   queue=queue_name,
                                                   time_limit=time_limit)

                job = QueryTask(async_result=result)
                logging.info("[%s] Created new job: %s", query_hash, job.id)
                pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
                pipe.execute()
            break

        except redis.WatchError:
            continue

    if not job:
        logging.error("[Manager][%s] Failed adding job for query.", query_hash)

    return job
开发者ID:ariarijp,项目名称:redash,代码行数:64,代码来源:queries.py


示例14:

from redash import redis_connection

if __name__ == '__main__':
    redis_connection.delete('query_task_trackers')
开发者ID:13768324554,项目名称:redash,代码行数:4,代码来源:0026_remove_query_trackers_redis_key.py


示例15: setUp

 def setUp(self):
     self.list = "test_list"
     redis_connection.delete(self.list)
     for score in range(0, 100):
         redis_connection.zadd(self.list, score, 'k:{}'.format(score))
开发者ID:ChiragKParmar,项目名称:redash,代码行数:5,代码来源:test_queries.py


示例16: _unlock

def _unlock(query_hash, data_source_id):
    redis_connection.delete(_job_lock_id(query_hash, data_source_id))
开发者ID:ariarijp,项目名称:redash,代码行数:2,代码来源:queries.py


示例17: resume

 def resume(self):
     redis_connection.delete(self._pause_key())
开发者ID:Drunkar,项目名称:redash,代码行数:2,代码来源:models.py


示例18: xrange

atfork.monkeypatch_os_fork_functions()
import atfork.stdlib_fixer
atfork.stdlib_fixer.fix_logging_module()

import time
from redash.data import worker
from redash import models, data_manager, redis_connection

if __name__ == '__main__':
    models.create_db(True, False)

    print "Creating data source..."
    data_source = models.DataSource.create(name="Concurrency", type="pg", options="dbname=postgres")

    print "Clear jobs/hashes:"
    redis_connection.delete("jobs")
    query_hashes = redis_connection.keys("query_hash_*")
    if query_hashes:
        redis_connection.delete(*query_hashes)

    starting_query_results_count = models.QueryResult.select().count()
    jobs_count = 5000
    workers_count = 10

    print "Creating jobs..."
    for i in xrange(jobs_count):
        query = "SELECT {}".format(i)
        print "Inserting: {}".format(query)
        data_manager.add_job(query=query, priority=worker.Job.LOW_PRIORITY,
                             data_source=data_source)
开发者ID:HasanAboShally,项目名称:redash,代码行数:30,代码来源:test_multithreading.py



注:本文中的redash.redis_connection.delete函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python redis_connection.hgetall函数代码示例发布时间:2022-05-26
下一篇:
Python permissions.require_admin_or_owner函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap