本文整理汇总了Python中sentry.cache.default_cache.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: process_event
def process_event(cache_key, start_time=None, **kwargs):
from sentry.plugins import plugins
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'process'})
error_logger.error('process.failed.empty', extra={'cache_key': cache_key})
return
project = data['project']
Raven.tags_context({
'project': project,
})
# TODO(dcramer): ideally we would know if data changed by default
has_changed = False
for plugin in plugins.all(version=2):
processors = safe_execute(plugin.get_event_preprocessors, data=data, _with_transaction=False)
for processor in (processors or ()):
result = safe_execute(processor, data)
if result:
data = result
has_changed = True
assert data['project'] == project, 'Project cannot be mutated by preprocessor'
if has_changed:
default_cache.set(cache_key, data, 3600)
save_event.delay(cache_key=cache_key, data=None, start_time=start_time)
开发者ID:mvaled,项目名称:sentry,代码行数:31,代码来源:store.py
示例2: get
def get(self, request, wizard_hash=None):
"""
This tries to retrieve and return the cache content if possible
otherwise creates new cache
"""
if wizard_hash is not None:
key = '%s%s' % (SETUP_WIZARD_CACHE_KEY, wizard_hash)
wizard_data = default_cache.get(key)
if wizard_data is None:
return Response(status=404)
elif wizard_data == 'empty':
# when we just created a clean cache
return Response(status=400)
return Response(serialize(wizard_data))
else:
# This creates a new available hash url for the project wizard
rate_limited = ratelimits.is_limited(
key='rl:setup-wizard:ip:%s' % request.META['REMOTE_ADDR'],
limit=10,
)
if rate_limited:
logger.info('setup-wizard.rate-limit')
return Response(
{
'Too wizard requests',
}, status=403
)
wizard_hash = get_random_string(
64, allowed_chars='abcdefghijklmnopqrstuvwxyz012345679')
key = '%s%s' % (SETUP_WIZARD_CACHE_KEY, wizard_hash)
default_cache.set(key, 'empty', SETUP_WIZARD_CACHE_TIMEOUT)
return Response(serialize({'hash': wizard_hash}))
开发者ID:Kayle009,项目名称:sentry,代码行数:35,代码来源:setup_wizard.py
示例3: preprocess_event
def preprocess_event(cache_key=None, data=None, start_time=None, **kwargs):
from sentry.plugins import plugins
if cache_key:
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'})
error_logger.error('preprocess.failed.empty', extra={'cache_key': cache_key})
return
project = data['project']
Raven.tags_context({
'project': project,
})
# Iterate over all plugins looking for processors based on the input data
# plugins should yield a processor function only if it actually can operate
# on the input data, otherwise it should yield nothing
for plugin in plugins.all(version=2):
processors = safe_execute(plugin.get_event_preprocessors, data=data, _with_transaction=False)
for processor in (processors or ()):
# On the first processor found, we just defer to the process_event
# queue to handle the actual work.
process_event.delay(cache_key=cache_key, start_time=start_time)
return
# If we get here, that means the event had no preprocessing needed to be done
# so we can jump directly to save_event
if cache_key:
data = None
save_event.delay(cache_key=cache_key, data=data, start_time=start_time)
开发者ID:mvaled,项目名称:sentry,代码行数:32,代码来源:store.py
示例4: save_event
def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
"""
Saves an event to the database.
"""
from sentry.event_manager import EventManager
if cache_key:
data = default_cache.get(cache_key)
if event_id is None and data is not None:
event_id = data['event_id']
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
return
project = data.pop('project')
delete_raw_event(project, event_id)
Raven.tags_context({
'project': project,
})
try:
manager = EventManager(data)
manager.save(project)
finally:
if cache_key:
default_cache.delete(cache_key)
if start_time:
metrics.timing('events.time-to-process', time() - start_time,
instance=data['platform'])
开发者ID:rlugojr,项目名称:sentry,代码行数:33,代码来源:store.py
示例5: reprocess_minidump
def reprocess_minidump(data):
project = Project.objects.get_from_cache(id=data['project'])
minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(data)
if default_cache.get(minidump_is_reprocessed_cache_key):
return
if not _is_symbolicator_enabled(project, data):
rv = reprocess_minidump_with_cfi(data)
default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
return rv
minidump = get_attached_minidump(data)
if not minidump:
logger.error("Missing minidump for minidump event")
return
request_id_cache_key = request_id_cache_key_for_event(data)
response = run_symbolicator(
project=project,
request_id_cache_key=request_id_cache_key,
create_task=create_minidump_task,
minidump=make_buffered_slice_reader(minidump.data, None)
)
if handle_symbolicator_response_status(data, response):
merge_symbolicator_minidump_response(data, response)
event_cache_key = cache_key_for_event(data)
default_cache.set(event_cache_key, dict(data), 3600)
default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
return data
开发者ID:yaoqi,项目名称:sentry,代码行数:35,代码来源:plugin.py
示例6: _do_preprocess_event
def _do_preprocess_event(cache_key, data, start_time, event_id,
process_event):
if cache_key:
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'})
error_logger.error('preprocess.failed.empty',
extra={'cache_key': cache_key})
return
project = data['project']
Raven.tags_context({
'project': project,
})
if should_process(data):
process_event.delay(cache_key=cache_key, start_time=start_time,
event_id=event_id)
return
# If we get here, that means the event had no preprocessing needed to be done
# so we can jump directly to save_event
if cache_key:
data = None
save_event.delay(cache_key=cache_key, data=data, start_time=start_time,
event_id=event_id)
开发者ID:rlugojr,项目名称:sentry,代码行数:27,代码来源:store.py
示例7: _update_cachefiles
def _update_cachefiles(self, project, debug_files, cls):
rv = []
conversion_errors = {}
for debug_file in debug_files:
debug_id = debug_file.debug_id
# Find all the known bad files we could not convert last time. We
# use the debug identifier and file checksum to identify the source
# DIF for historic reasons (debug_file.id would do, too).
cache_key = 'scbe:%s:%s' % (debug_id, debug_file.file.checksum)
err = default_cache.get(cache_key)
if err is not None:
conversion_errors[debug_id] = err
continue
# Download the original debug symbol and convert the object file to
# a cache. This can either yield a cache object, an error or none of
# the above. THE FILE DOWNLOAD CAN TAKE SIGNIFICANT TIME.
with debug_file.file.getfile(as_tempfile=True) as tf:
file, cache, err = self._update_cachefile(debug_file, tf.name, cls)
# Store this conversion error so that we can skip subsequent
# conversions. There might be concurrent conversions running for the
# same debug file, however.
if err is not None:
default_cache.set(cache_key, err, CONVERSION_ERROR_TTL)
conversion_errors[debug_id] = err
continue
if file is not None or cache is not None:
rv.append((debug_id, file, cache))
return rv, conversion_errors
开发者ID:getsentry,项目名称:sentry,代码行数:34,代码来源:debugfile.py
示例8: _do_preprocess_event
def _do_preprocess_event(cache_key, data, start_time, event_id, process_event):
if cache_key:
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'}, skip_internal=False)
error_logger.error('preprocess.failed.empty', extra={'cache_key': cache_key})
return
data = CanonicalKeyDict(data)
project = data['project']
with configure_scope() as scope:
scope.set_tag("project", project)
if should_process(data):
process_event.delay(cache_key=cache_key, start_time=start_time, event_id=event_id)
return
# If we get here, that means the event had no preprocessing needed to be done
# so we can jump directly to save_event
if cache_key:
data = None
save_event.delay(
cache_key=cache_key, data=data, start_time=start_time, event_id=event_id,
project_id=project
)
开发者ID:Kayle009,项目名称:sentry,代码行数:27,代码来源:store.py
示例9: preprocess_event
def preprocess_event(cache_key=None, data=None, start_time=None, **kwargs):
from sentry.plugins import plugins
if cache_key:
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'})
logger.error('Data not available in preprocess_event (cache_key=%s)', cache_key)
return
project = data['project']
# TODO(dcramer): ideally we would know if data changed by default
has_changed = False
for plugin in plugins.all(version=2):
for processor in (safe_execute(plugin.get_event_preprocessors) or ()):
result = safe_execute(processor, data)
if result:
data = result
has_changed = True
assert data['project'] == project, 'Project cannot be mutated by preprocessor'
if has_changed and cache_key:
default_cache.set(cache_key, data, 3600)
if cache_key:
data = None
save_event.delay(cache_key=cache_key, data=data, start_time=start_time)
开发者ID:noah-lee,项目名称:sentry,代码行数:30,代码来源:store.py
示例10: _update_cachefiles
def _update_cachefiles(self, project, debug_files):
rv = []
# Find all the known bad files we could not convert last time
# around
conversion_errors = {}
for debug_file in debug_files:
cache_key = 'scbe:%s:%s' % (debug_file.debug_id, debug_file.file.checksum)
err = default_cache.get(cache_key)
if err is not None:
conversion_errors[debug_file.debug_id] = err
for debug_file in debug_files:
dsym_id = debug_file.debug_id
if dsym_id in conversion_errors:
continue
with debug_file.file.getfile(as_tempfile=True) as tf:
symcache_file, conversion_error = self._update_cachefile(
debug_file, tf)
if symcache_file is not None:
rv.append((dsym_id, symcache_file))
elif conversion_error is not None:
conversion_errors[dsym_id] = conversion_error
return rv, conversion_errors
开发者ID:hosmelq,项目名称:sentry,代码行数:26,代码来源:dsymfile.py
示例11: create_failed_event
def create_failed_event(cache_key, project, issues, event_id):
"""If processing failed we put the original data from the cache into a
raw event.
"""
# We need to get the original data here instead of passing the data in
# from the last processing step because we do not want any
# modifications to take place.
delete_raw_event(project, event_id)
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'raw'})
error_logger.error('process.failed_raw.empty', extra={'cache_key': cache_key})
return
from sentry.models import RawEvent, ProcessingIssue
raw_event = RawEvent.objects.create(
project_id=project,
event_id=event_id,
datetime=datetime.utcfromtimestamp(
data['timestamp']).replace(tzinfo=timezone.utc),
data=data
)
for issue in issues:
ProcessingIssue.objects.record_processing_issue(
raw_event=raw_event,
scope=issue['scope'],
object=issue['object'],
type=issue['type'],
data=issue['data'],
)
default_cache.delete(cache_key)
开发者ID:rlugojr,项目名称:sentry,代码行数:33,代码来源:store.py
示例12: create_failed_event
def create_failed_event(cache_key, project_id, issues, event_id, start_time=None):
"""If processing failed we put the original data from the cache into a
raw event. Returns `True` if a failed event was inserted
"""
reprocessing_active = ProjectOption.objects.get_value(
project_id, 'sentry:reprocessing_active', REPROCESSING_DEFAULT
)
# The first time we encounter a failed event and the hint was cleared
# we send a notification.
sent_notification = ProjectOption.objects.get_value(
project_id, 'sentry:sent_failed_event_hint', False
)
if not sent_notification:
project = Project.objects.get_from_cache(id=project_id)
Activity.objects.create(
type=Activity.NEW_PROCESSING_ISSUES,
project=project,
datetime=to_datetime(start_time),
data={'reprocessing_active': reprocessing_active,
'issues': issues},
).send_notification()
ProjectOption.objects.set_value(project, 'sentry:sent_failed_event_hint', True)
# If reprocessing is not active we bail now without creating the
# processing issues
if not reprocessing_active:
return False
# We need to get the original data here instead of passing the data in
# from the last processing step because we do not want any
# modifications to take place.
delete_raw_event(project_id, event_id)
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'raw'})
error_logger.error('process.failed_raw.empty', extra={'cache_key': cache_key})
return True
from sentry.models import RawEvent, ProcessingIssue
raw_event = RawEvent.objects.create(
project_id=project_id,
event_id=event_id,
datetime=datetime.utcfromtimestamp(data['timestamp']).replace(tzinfo=timezone.utc),
data=data
)
for issue in issues:
ProcessingIssue.objects.record_processing_issue(
raw_event=raw_event,
scope=issue['scope'],
object=issue['object'],
type=issue['type'],
data=issue['data'],
)
default_cache.delete(cache_key)
return True
开发者ID:alshopov,项目名称:sentry,代码行数:59,代码来源:store.py
示例13: get_assemble_status
def get_assemble_status(project, checksum):
"""For a given file it checks what the current status of the assembling is.
Returns a tuple in the form ``(status, details)`` where details is either
`None` or a string identifying an error condition or notice.
"""
cache_key = 'assemble-status:%s' % _get_idempotency_id(
project, checksum)
rv = default_cache.get(cache_key)
if rv is None:
return None, None
return tuple(rv)
开发者ID:hosmelq,项目名称:sentry,代码行数:11,代码来源:dsymfile.py
示例14: public_dsn
def public_dsn():
project_id = settings.SENTRY_FRONTEND_PROJECT or settings.SENTRY_PROJECT
cache_key = 'dsn:%s' % (project_id, )
result = default_cache.get(cache_key)
if result is None:
key = _get_project_key(project_id)
if key:
result = key.dsn_public
else:
result = ''
default_cache.set(cache_key, result, 60)
return result
开发者ID:alexandrul,项目名称:sentry,代码行数:13,代码来源:sentry_dsn.py
示例15: _do_process_event
def _do_process_event(cache_key, start_time, event_id):
from sentry.plugins import plugins
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'process'})
error_logger.error('process.failed.empty',
extra={'cache_key': cache_key})
return
project = data['project']
Raven.tags_context({
'project': project,
})
has_changed = False
# Stacktrace based event processors. These run before anything else.
new_data = process_stacktraces(data)
if new_data is not None:
has_changed = True
data = new_data
# TODO(dcramer): ideally we would know if data changed by default
# Default event processors.
for plugin in plugins.all(version=2):
processors = safe_execute(plugin.get_event_preprocessors,
data=data, _with_transaction=False)
for processor in (processors or ()):
result = safe_execute(processor, data)
if result:
data = result
has_changed = True
assert data['project'] == project, 'Project cannot be mutated by preprocessor'
if has_changed:
issues = data.get('processing_issues')
if issues:
create_failed_event(cache_key, project, list(issues.values()),
event_id=event_id)
return
default_cache.set(cache_key, data, 3600)
save_event.delay(cache_key=cache_key, data=None, start_time=start_time,
event_id=event_id)
开发者ID:rlugojr,项目名称:sentry,代码行数:47,代码来源:store.py
示例16: save_event
def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
"""
Saves an event to the database.
"""
from sentry.event_manager import HashDiscarded, EventManager
from sentry import tsdb
if cache_key:
data = default_cache.get(cache_key)
if event_id is None and data is not None:
event_id = data['event_id']
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
return
project = data.pop('project')
delete_raw_event(project, event_id, allow_hint_clear=True)
Raven.tags_context({
'project': project,
})
try:
manager = EventManager(data)
manager.save(project)
except HashDiscarded as exc:
# TODO(jess): remove this before it goes out to a wider audience
info_logger.info(
'discarded.hash', extra={
'project_id': project,
'description': exc.message,
}
)
tsdb.incr(tsdb.models.project_total_received_discarded, project, timestamp=start_time)
finally:
if cache_key:
default_cache.delete(cache_key)
if start_time:
metrics.timing(
'events.time-to-process',
time() - start_time,
instance=data['platform'])
开发者ID:alshopov,项目名称:sentry,代码行数:45,代码来源:store.py
示例17: save_event
def save_event(cache_key=None, data=None, **kwargs):
"""
Saves an event to the database.
"""
from sentry.event_manager import EventManager
if cache_key:
data = default_cache.get(cache_key)
if data is None:
return
project = data.pop('project')
try:
manager = EventManager(data)
manager.save(project)
finally:
if cache_key:
default_cache.delete(cache_key)
开发者ID:Juraldinio,项目名称:sentry,代码行数:20,代码来源:store.py
示例18: save_event
def save_event(cache_key=None, data=None, start_time=None, **kwargs):
"""
Saves an event to the database.
"""
from sentry.event_manager import EventManager
if cache_key:
data = default_cache.get(cache_key)
if data is None:
return
project = data.pop('project')
try:
manager = EventManager(data)
manager.save(project)
finally:
if cache_key:
default_cache.delete(cache_key)
if start_time:
metrics.timing('events.time-to-process', time() - start_time)
开发者ID:carriercomm,项目名称:sentry-1,代码行数:22,代码来源:store.py
示例19: _do_preprocess_event
def _do_preprocess_event(cache_key, data, start_time, event_id, process_task):
if cache_key and data is None:
data = default_cache.get(cache_key)
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'}, skip_internal=False)
error_logger.error('preprocess.failed.empty', extra={'cache_key': cache_key})
return
original_data = data
data = CanonicalKeyDict(data)
project_id = data['project']
with configure_scope() as scope:
scope.set_tag("project", project_id)
project = Project.objects.get_from_cache(id=project_id)
if should_process(data):
from_reprocessing = process_task is process_event_from_reprocessing
submit_process(project, from_reprocessing, cache_key, event_id, start_time, original_data)
return
submit_save_event(project, cache_key, event_id, start_time, original_data)
开发者ID:yaoqi,项目名称:sentry,代码行数:24,代码来源:store.py
示例20: run_symbolicator
def run_symbolicator(project, request_id_cache_key, create_task=create_payload_task, **kwargs):
symbolicator_options = options.get('symbolicator.options')
base_url = symbolicator_options['url'].rstrip('/')
assert base_url
project_id = six.text_type(project.id)
request_id = default_cache.get(request_id_cache_key)
sess = Session()
# Will be set lazily when a symbolicator request is fired
sources = None
attempts = 0
wait = 0.5
with sess:
while True:
try:
if request_id:
rv = _poll_symbolication_task(
sess=sess, base_url=base_url,
request_id=request_id, project_id=project_id,
)
else:
if sources is None:
sources = get_sources_for_project(project)
rv = create_task(
sess=sess, base_url=base_url,
project_id=project_id,
sources=sources,
**kwargs
)
metrics.incr('events.symbolicator.status_code', tags={
'status_code': rv.status_code,
'project_id': project_id,
})
if rv.status_code == 404 and request_id:
default_cache.delete(request_id_cache_key)
request_id = None
continue
elif rv.status_code == 503:
raise RetrySymbolication(retry_after=10)
rv.raise_for_status()
json = rv.json()
metrics.incr('events.symbolicator.response', tags={
'response': json['status'],
'project_id': project_id,
})
if json['status'] == 'pending':
default_cache.set(
request_id_cache_key,
json['request_id'],
REQUEST_CACHE_TIMEOUT)
raise RetrySymbolication(retry_after=json['retry_after'])
elif json['status'] == 'completed':
default_cache.delete(request_id_cache_key)
return rv.json()
else:
logger.error("Unexpected status: %s", json['status'])
default_cache.delete(request_id_cache_key)
return
except (IOError, RequestException):
attempts += 1
if attempts > MAX_ATTEMPTS:
logger.error('Failed to contact symbolicator', exc_info=True)
default_cache.delete(request_id_cache_key)
return
time.sleep(wait)
wait *= 2.0
开发者ID:getsentry,项目名称:sentry,代码行数:78,代码来源:symbolicator.py
注:本文中的sentry.cache.default_cache.get函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论