本文整理汇总了Python中redash.utils.gen_query_hash函数的典型用法代码示例。如果您正苦于以下问题:Python gen_query_hash函数的具体用法?Python gen_query_hash怎么用?Python gen_query_hash使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gen_query_hash函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, redis_connection, query, priority,
job_id=None,
wait_time=None, query_time=None,
updated_at=None, status=None, error=None, query_result_id=None,
process_id=0):
self.redis_connection = redis_connection
self.query = query
self.priority = priority
self.query_hash = gen_query_hash(self.query)
self.query_result_id = query_result_id
if process_id == 'None':
self.process_id = None
else:
self.process_id = int(process_id)
if job_id is None:
self.id = str(uuid.uuid1())
self.new_job = True
self.wait_time = 0
self.query_time = 0
self.error = None
self.updated_at = time.time() # job_dict.get('updated_at', time.time())
self.status = self.WAITING # int(job_dict.get('status', self.WAITING))
else:
self.id = job_id
self.new_job = False
self.error = error
self.wait_time = wait_time
self.query_time = query_time
self.updated_at = updated_at
self.status = status
开发者ID:lifey,项目名称:redash,代码行数:31,代码来源:worker.py
示例2: __init__
def __init__(self, task, query, data_source_id, user_id, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
if user_id is not None:
self.user = models.User.query.get(user_id)
else:
self.user = None
# Close DB connection to prevent holding a connection for a long time while the query is executing.
models.db.session.close()
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = (
QueryTaskTracker.get_by_task_id(task.request.id) or
QueryTaskTracker.create(
task.request.id,
'created',
self.query_hash,
self.data_source_id,
False,
metadata
)
)
if self.tracker.scheduled:
models.scheduled_queries_executions.update(self.tracker.query_id)
开发者ID:jonyboy2000,项目名称:redash,代码行数:29,代码来源:queries.py
示例3: pre_save
def pre_save(self, created):
super(Query, self).pre_save(created)
self.query_hash = utils.gen_query_hash(self.query)
self._set_api_key()
if self.last_modified_by is None:
self.last_modified_by = self.user
开发者ID:Drunkar,项目名称:redash,代码行数:7,代码来源:models.py
示例4: add_job
def add_job(self, query, priority, data_source):
query_hash = gen_query_hash(query)
logging.info("[Manager][%s] Inserting job with priority=%s", query_hash, priority)
try_count = 0
job = None
while try_count < self.max_retries:
try_count += 1
pipe = self.redis_connection.pipeline()
try:
pipe.watch('query_hash_job:%s' % query_hash)
job_id = pipe.get('query_hash_job:%s' % query_hash)
if job_id:
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
job = worker.Job.load(self.redis_connection, job_id)
else:
job = worker.Job(self.redis_connection, query=query, priority=priority,
data_source_id=data_source.id,
data_source_name=data_source.name,
data_source_type=data_source.type,
data_source_options=data_source.options)
pipe.multi()
job.save(pipe)
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
self.queue.push(job.id, job.priority)
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
开发者ID:HasanAboShally,项目名称:redash,代码行数:35,代码来源:manager.py
示例5: enqueue_query
def enqueue_query(query, data_source, user_id, scheduled_query=None, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
job = QueryTask(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
time_limit = None
if scheduled_query:
queue_name = data_source.scheduled_queue_name
scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
scheduled_query_id = None
time_limit = settings.ADHOC_QUERY_TIME_LIMIT
result = execute_query.apply_async(args=(query, data_source.id, metadata, user_id, scheduled_query_id),
queue=queue_name,
time_limit=time_limit)
job = QueryTask(async_result=result)
tracker = QueryTaskTracker.create(
result.id, 'created', query_hash, data_source.id,
scheduled_query is not None, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
开发者ID:jonyboy2000,项目名称:redash,代码行数:57,代码来源:queries.py
示例6: enqueue_query
def enqueue_query(query, data_source, scheduled=False, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
job = QueryTask(job_id=job_id)
tracker = QueryTaskTracker.get_by_task_id(job_id, connection=pipe)
# tracker might not exist, if it's an old job
if scheduled and tracker:
tracker.update(retries=tracker.retries+1)
elif tracker:
tracker.update(scheduled_retries=tracker.scheduled_retries+1)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled:
queue_name = data_source.scheduled_queue_name
else:
queue_name = data_source.queue_name
result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
job = QueryTask(async_result=result)
tracker = QueryTaskTracker.create(result.id, 'created', query_hash, data_source.id, scheduled, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
开发者ID:ChiragKParmar,项目名称:redash,代码行数:54,代码来源:queries.py
示例7: get_latest
def get_latest(cls, data_source, query, max_age=0):
query_hash = utils.gen_query_hash(query)
if max_age == -1:
query = cls.select().where(cls.query_hash == query_hash,
cls.data_source == data_source).order_by(cls.retrieved_at.desc())
else:
query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,
peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'",
max_age)).order_by(cls.retrieved_at.desc())
return query.first()
开发者ID:Drunkar,项目名称:redash,代码行数:12,代码来源:models.py
示例8: __init__
def __init__(self, task, query, data_source_id, metadata):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
self.query_hash = gen_query_hash(self.query)
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
self.query_hash,
self.data_source_id,
False, metadata)
开发者ID:ChiragKParmar,项目名称:redash,代码行数:13,代码来源:queries.py
示例9: execute_query
def execute_query(self, query, data_source_id, metadata):
signal.signal(signal.SIGINT, signal_handler)
start_time = time.time()
logger.info("Loading data source (%d)...", data_source_id)
# TODO: we should probably cache data sources in Redis
data_source = models.DataSource.get_by_id(data_source_id)
self.update_state(state="STARTED", meta={"start_time": start_time, "custom_message": ""})
logger.info("Executing query:\n%s", query)
query_hash = gen_query_hash(query)
query_runner = get_query_runner(data_source.type, data_source.options)
if query_runner.annotate_query():
metadata["Task ID"] = self.request.id
metadata["Query Hash"] = query_hash
metadata["Queue"] = self.request.delivery_info["routing_key"]
annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()])
logging.debug(u"Annotation: %s", annotation)
annotated_query = u"/* {} */ {}".format(annotation, query)
else:
annotated_query = query
with statsd_client.timer("query_runner.{}.{}.run_time".format(data_source.type, data_source.name)):
data, error = query_runner.run_query(annotated_query)
run_time = time.time() - start_time
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
self.update_state(state="STARTED", meta={"start_time": start_time, "error": error, "custom_message": ""})
# Delete query_hash
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
if not error:
query_result, updated_query_ids = models.QueryResult.store_result(
data_source.id, query_hash, query, data, run_time, utils.utcnow()
)
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
else:
raise Exception(error)
return query_result.id
开发者ID:scottkrager,项目名称:redash,代码行数:50,代码来源:tasks.py
示例10: __init__
def __init__(self, task, query, data_source_id, user_id, is_api_key, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
self.user = _resolve_user(user_id, is_api_key)
# Close DB connection to prevent holding a connection for a long time while the query is executing.
models.db.session.close()
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
if scheduled_query:
models.scheduled_queries_executions.update(scheduled_query.id)
开发者ID:ariarijp,项目名称:redash,代码行数:16,代码来源:queries.py
示例11: get_query_result
def get_query_result(self, query, ttl=0):
query_hash = gen_query_hash(query)
with self.db_transaction() as cursor:
sql = (
"SELECT id, query, data, runtime, retrieved_at, query_hash FROM query_results "
"WHERE query_hash=%s "
"AND retrieved_at < now() at time zone 'utc' - interval '%s second'"
"ORDER BY retrieved_at DESC LIMIT 1"
)
cursor.execute(sql, (query_hash, psycopg2.extensions.AsIs(ttl)))
query_result = cursor.fetchone()
if query_result:
query_result = QueryResult(*query_result)
return query_result
开发者ID:revinate,项目名称:redash,代码行数:17,代码来源:manager.py
示例12: add_task
def add_task(cls, query, data_source, scheduled=False, metadata={}):
query_hash = gen_query_hash(query)
logging.info("[Manager][%s] Inserting job", query_hash)
logging.info("[Manager] Metadata: [%s]", metadata)
try_count = 0
job = None
while try_count < cls.MAX_RETRIES:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(cls._job_lock_id(query_hash, data_source.id))
job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
job = cls(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled:
queue_name = data_source.scheduled_queue_name
else:
queue_name = data_source.queue_name
result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
job = cls(async_result=result)
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
开发者ID:MiguelPeralvoPM,项目名称:redash,代码行数:46,代码来源:tasks.py
示例13: execute_query
def execute_query(self, query, data_source_id, metadata):
start_time = time.time()
logger.info("Loading data source (%d)...", data_source_id)
# TODO: we should probably cache data sources in Redis
data_source = models.DataSource.get_by_id(data_source_id)
self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})
logger.info("Executing query:\n%s", query)
query_hash = gen_query_hash(query)
query_runner = get_query_runner(data_source.type, data_source.options)
if query_runner.annotate_query():
metadata['Task ID'] = self.request.id
metadata['Query Hash'] = query_hash
metadata['Queue'] = self.request.delivery_info['routing_key']
annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()])
logging.debug(u"Annotation: %s", annotation)
annotated_query = u"/* {} */ {}".format(annotation, query)
else:
annotated_query = query
with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
data, error = query_runner.run_query(annotated_query)
run_time = time.time() - start_time
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})
# Delete query_hash
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
if not error:
query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, utils.utcnow())
else:
raise Exception(error)
return query_result.id
开发者ID:MiguelPeralvoPM,项目名称:redash,代码行数:45,代码来源:tasks.py
示例14: __init__
def __init__(self, task, query, data_source_id, user_id, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
if user_id is not None:
self.user = models.User.query.get(user_id)
else:
self.user = None
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
self.query_hash,
self.data_source_id,
False, metadata)
开发者ID:appfolio,项目名称:redash,代码行数:19,代码来源:queries.py
示例15: run_query_sync
def run_query_sync(data_source, parameter_values, query_text, max_age=0):
query_parameters = set(collect_query_parameters(query_text))
missing_params = set(query_parameters) - set(parameter_values.keys())
if missing_params:
raise Exception('Missing parameter value for: {}'.format(", ".join(missing_params)))
if query_parameters:
query_text = mustache_render(query_text, parameter_values)
if max_age <= 0:
query_result = None
else:
query_result = models.QueryResult.get_latest(data_source, query_text, max_age)
query_hash = gen_query_hash(query_text)
if query_result:
logging.info("Returning cached result for query %s" % query_hash)
return query_result
try:
started_at = time.time()
data, error = data_source.query_runner.run_query(query_text, current_user)
if error:
logging.info('got bak error')
logging.info(error)
return None
run_time = time.time() - started_at
query_result, updated_query_ids = models.QueryResult.store_result(data_source.org_id, data_source,
query_hash, query_text, data,
run_time, utcnow())
models.db.session.commit()
return query_result
except Exception as e:
if max_age > 0:
abort(404, message="Unable to get result from the database, and no cached query result found.")
else:
abort(503, message="Unable to get result from the database.")
return None
开发者ID:jonyboy2000,项目名称:redash,代码行数:42,代码来源:query_results.py
示例16: execute_query
def execute_query(self, query, data_source_id):
# TODO: maybe this should be a class?
start_time = time.time()
logger.info("Loading data source (%d)...", data_source_id)
# TODO: we should probably cache data sources in Redis
data_source = models.DataSource.get_by_id(data_source_id)
self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})
logger.info("Executing query:\n%s", query)
query_hash = gen_query_hash(query)
query_runner = get_query_runner(data_source.type, data_source.options)
if getattr(query_runner, 'annotate_query', True):
# TODO: anotate with queu ename
annotated_query = "/* Task Id: %s, Query hash: %s */ %s" % \
(self.request.id, query_hash, query)
else:
annotated_query = query
with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
data, error = query_runner(annotated_query)
run_time = time.time() - start_time
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})
# Delete query_hash
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
# TODO: it is possible that storing the data will fail, and we will need to retry
# while we already marked the job as done
if not error:
query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
else:
raise Exception(error)
return query_result.id
开发者ID:MaTriXy,项目名称:redash,代码行数:42,代码来源:tasks.py
示例17: store_query_result
def store_query_result(self, query, data, run_time, retrieved_at):
query_result_id = None
query_hash = gen_query_hash(query)
sql = (
"INSERT INTO query_results (query_hash, query, data, runtime, retrieved_at) "
"VALUES (%s, %s, %s, %s, %s) RETURNING id"
)
with self.db_transaction() as cursor:
cursor.execute(sql, (query_hash, query, data, run_time, retrieved_at))
if cursor.rowcount == 1:
query_result_id = cursor.fetchone()[0]
logging.info("[Manager][%s] Inserted query data; id=%s", query_hash, query_result_id)
sql = "UPDATE queries SET latest_query_data_id=%s WHERE query_hash=%s"
cursor.execute(sql, (query_result_id, query_hash))
logging.info("[Manager][%s] Updated %s queries.", query_hash, cursor.rowcount)
else:
logging.error("[Manager][%s] Failed inserting query data.", query_hash)
return query_result_id
开发者ID:revinate,项目名称:redash,代码行数:20,代码来源:manager.py
示例18: get_latest
def get_latest(cls, data_source, query, max_age=0):
query_hash = utils.gen_query_hash(query)
if max_age == -1:
query = cls.query.filter(
cls.query_hash == query_hash,
cls.data_source == data_source
)
else:
query = cls.query.filter(
cls.query_hash == query_hash,
cls.data_source == data_source,
(
db.func.timezone('utc', cls.retrieved_at) +
datetime.timedelta(seconds=max_age) >=
db.func.timezone('utc', db.func.now())
)
)
return query.order_by(cls.retrieved_at.desc()).first()
开发者ID:ariarijp,项目名称:redash,代码行数:20,代码来源:__init__.py
示例19: store_query_result
def store_query_result(self, data_source_id, query, data, run_time, retrieved_at):
query_hash = gen_query_hash(query)
query_result = models.QueryResult.create(query_hash=query_hash,
query=query,
runtime=run_time,
data_source=data_source_id,
retrieved_at=retrieved_at,
data=data)
logging.info("[Manager][%s] Inserted query data; id=%s", query_hash, query_result.id)
# TODO: move this logic to the model?
updated_count = models.Query.update(latest_query_data=query_result).\
where(models.Query.query_hash==query_hash, models.Query.data_source==data_source_id).\
execute()
logging.info("[Manager][%s] Updated %s queries.", query_hash, updated_count)
return query_result.id
开发者ID:HasanAboShally,项目名称:redash,代码行数:20,代码来源:manager.py
示例20: apply_parameters
def apply_parameters(template, parameters, data_source):
query = SQLQuery(template).apply(parameters)
# for now we only log `SQLInjectionError` to detect false positives
try:
text = query.text
except SQLInjectionError:
record_event({
'action': 'sql_injection',
'object_type': 'query',
'query': template,
'parameters': parameters,
'timestamp': time.time(),
'org_id': data_source.org_id
})
except Exception as e:
logging.info(u"Failed applying parameters for query %s: %s", gen_query_hash(query.query), e.message)
finally:
text = query.query
return text
开发者ID:rohithreddy,项目名称:redash,代码行数:21,代码来源:query_results.py
注:本文中的redash.utils.gen_query_hash函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论