• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python metrics.timing函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sentry.utils.metrics.timing函数的典型用法代码示例。如果您正苦于以下问题:Python timing函数的具体用法?Python timing怎么用?Python timing使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了timing函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __call__

 def __call__(self, function):
     start = self.clock.time()
     try:
         for i in itertools.count(1):
             try:
                 return function()
             except self.exceptions as error:
                 delay = self.delay(i)
                 now = self.clock.time()
                 if (now + delay) > (start + self.timeout):
                     raise RetryException(
                         'Could not successfully execute %r within %.3f seconds (%s attempts.)' %
                         (function, now - start, i),
                         error,
                     )
                 else:
                     logger.debug(
                         'Failed to execute %r due to %r on attempt #%s, retrying in %s seconds...',
                         function,
                         error,
                         i,
                         delay,
                     )
                     self.clock.sleep(delay)
     finally:
         if self.metric_instance:
             metrics.timing(
                 'timedretrypolicy.duration',
                 self.clock.time() - start,
                 instance=self.metric_instance,
                 tags=self.metric_tags,
             )
开发者ID:Kayle009,项目名称:sentry,代码行数:32,代码来源:retries.py


示例2: process_pending

    def process_pending(self):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            keycount = 0
            with self.cluster.all() as conn:
                results = conn.zrange(self.pending_key, 0, -1)

            with self.cluster.all() as conn:
                for host_id, keys in six.iteritems(results.value):
                    if not keys:
                        continue
                    keycount += len(keys)
                    for key in keys:
                        process_incr.apply_async(kwargs={
                            'key': key,
                        })
                    conn.target([host_id]).zrem(self.pending_key, *keys)
            metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
开发者ID:faulkner,项目名称:sentry,代码行数:25,代码来源:redis.py


示例3: index_event_tags

def index_event_tags(organization_id, project_id, event_id, tags,
                     group_id, environment_id, date_added=None, **kwargs):
    from sentry import tagstore

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    create_event_tags_kwargs = {}
    if date_added is not None:
        create_event_tags_kwargs['date_added'] = date_added

    metrics.timing(
        'tagstore.tags_per_event',
        len(tags),
        tags={
            'organization_id': organization_id,
        }
    )

    tagstore.create_event_tags(
        project_id=project_id,
        group_id=group_id,
        environment_id=environment_id,
        event_id=event_id,
        tags=tags,
        **create_event_tags_kwargs
    )
开发者ID:yaoqi,项目名称:sentry,代码行数:27,代码来源:post_process.py


示例4: putfile

    def putfile(self, fileobj, blob_size=DEFAULT_BLOB_SIZE, commit=True):
        """
        Save a fileobj into a number of chunks.

        Returns a list of `FileBlobIndex` items.

        >>> indexes = file.putfile(fileobj)
        """
        results = []
        offset = 0
        checksum = sha1(b'')

        while True:
            contents = fileobj.read(blob_size)
            if not contents:
                break
            checksum.update(contents)

            blob_fileobj = ContentFile(contents)
            blob = FileBlob.from_file(blob_fileobj)

            results.append(FileBlobIndex.objects.create(
                file=self,
                blob=blob,
                offset=offset,
            ))
            offset += blob.size
        self.size = offset
        self.checksum = checksum.hexdigest()
        metrics.timing('filestore.file-size', offset)
        if commit:
            self.save()
        return results
开发者ID:alexandrul,项目名称:sentry,代码行数:33,代码来源:file.py


示例5: process_pending

    def process_pending(self):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            for host_id in self.cluster.hosts.iterkeys():
                conn = self.cluster.get_local_client(host_id)
                keys = conn.zrange(self.pending_key, 0, -1)
                if not keys:
                    continue
                keycount = 0
                for key in keys:
                    keycount += 1
                    process_incr.apply_async(kwargs={
                        'key': key,
                    })
                pipe = conn.pipeline()
                pipe.zrem(self.pending_key, *keys)
                pipe.execute()
                metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
开发者ID:daevaorn,项目名称:sentry,代码行数:25,代码来源:redis.py


示例6: putfile

    def putfile(self, fileobj, blob_size=DEFAULT_BLOB_SIZE):
        """
        Save a fileobj into a number of chunks.

        Returns a list of `FileBlobIndex` items.

        >>> indexes = file.putfile(fileobj)
        """
        results = []
        offset = 0
        while True:
            contents = fileobj.read(blob_size)
            if not contents:
                break

            blob_fileobj = ContentFile(contents)
            blob = FileBlob.from_file(blob_fileobj)

            results.append(
                FileBlobIndex.objects.create(
                    file=self,
                    blob=blob,
                    offset=offset,
                )
            )
            offset += blob.size

        metrics.timing('filestore.file-size', offset)
        return results
开发者ID:jasonbeverage,项目名称:sentry,代码行数:29,代码来源:file.py


示例7: save_event

def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import EventManager

    if cache_key:
        data = default_cache.get(cache_key)

    if event_id is None and data is not None:
        event_id = data['event_id']

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
        return

    project = data.pop('project')

    delete_raw_event(project, event_id)

    Raven.tags_context({
        'project': project,
    })

    try:
        manager = EventManager(data)
        manager.save(project)
    finally:
        if cache_key:
            default_cache.delete(cache_key)
        if start_time:
            metrics.timing('events.time-to-process', time() - start_time,
                           instance=data['platform'])
开发者ID:rlugojr,项目名称:sentry,代码行数:33,代码来源:store.py


示例8: try_repeated

def try_repeated(func):
    """
    Runs a function a few times ignoring errors we see from GCS
    due to what appears to be network issues.  This is a temporary workaround
    until we can find the root cause.
    """
    if hasattr(func, '__name__'):
        func_name = func.__name__
    elif hasattr(func, 'func'):
        # Partials
        func_name = getattr(func.func, '__name__', '__unknown__')
    else:
        func_name = '__unknown__'

    metrics_key = 'filestore.gcs.retry'
    metrics_tags = {'function': func_name}
    idx = 0
    while True:
        try:
            result = func()
            metrics_tags.update({'success': '1'})
            metrics.timing(metrics_key, idx, tags=metrics_tags)
            return result
        except (DataCorruption, TransportError, RefreshError, RequestException, OpenSSLError) as e:
            if idx >= GCS_RETRIES:
                metrics_tags.update({'success': '0', 'exception_class': e.__class__.__name__})
                metrics.timing(metrics_key, idx, tags=metrics_tags)
                raise
        idx += 1
开发者ID:Kayle009,项目名称:sentry,代码行数:29,代码来源:gcs.py


示例9: index_event_tags

def index_event_tags(organization_id, project_id, event_id, tags,
                     group_id, environment_id, date_added=None, **kwargs):
    from sentry import tagstore

    Raven.tags_context({
        'project': project_id,
    })

    create_event_tags_kwargs = {}
    if date_added is not None:
        create_event_tags_kwargs['date_added'] = date_added

    metrics.timing(
        'tagstore.tags_per_event',
        len(tags),
        tags={
            'organization_id': organization_id,
        }
    )

    tagstore.create_event_tags(
        project_id=project_id,
        group_id=group_id,
        environment_id=environment_id,
        event_id=event_id,
        tags=tags,
        **create_event_tags_kwargs
    )
开发者ID:binlee1990,项目名称:sentry,代码行数:28,代码来源:post_process.py


示例10: normalize

    def normalize(self):
        with metrics.timer('events.store.normalize.duration'):
            self._normalize_impl()

        metrics.timing(
            'events.store.normalize.errors',
            len(self._data.get("errors") or ()),
        )
开发者ID:yaoqi,项目名称:sentry,代码行数:8,代码来源:event_manager.py


示例11: set

    def set(self, key, attachments, timeout=None):
        key = self.make_key(key)
        for index, attachment in enumerate(attachments):
            compressed = zlib.compress(attachment.data)
            self.inner.set(u'{}:{}'.format(key, index), compressed, timeout, raw=True)

            metrics_tags = {'type': attachment.type}
            metrics.incr('attachments.received', tags=metrics_tags, skip_internal=False)
            metrics.timing('attachments.blob-size.raw', len(attachment.data), tags=metrics_tags)
            metrics.timing('attachments.blob-size.compressed', len(compressed), tags=metrics_tags)

        meta = [attachment.meta() for attachment in attachments]
        self.inner.set(key, meta, timeout, raw=False)
开发者ID:Kayle009,项目名称:sentry,代码行数:13,代码来源:base.py


示例12: _record_time

    def _record_time(self, request, status_code):
        if not hasattr(request, "_view_path"):
            return

        metrics.incr(
            "view.response", instance=request._view_path, tags={"method": request.method, "status_code": status_code}
        )

        if not hasattr(request, "_start_time"):
            return

        ms = int((time.time() - request._start_time) * 1000)
        metrics.timing("view.duration", ms, instance=request._view_path, tags={"method": request.method})
开发者ID:AyrtonRicardo,项目名称:sentry,代码行数:13,代码来源:stats.py


示例13: _capture_stats

def _capture_stats(event, is_new):
    # TODO(dcramer): limit platforms to... something?
    group = event.group
    platform = group.platform
    if not platform:
        return
    platform = platform.split('-', 1)[0].split('_', 1)[0]

    if is_new:
        metrics.incr('events.unique')

    metrics.incr('events.processed')
    metrics.incr('events.processed.{platform}'.format(platform=platform))
    metrics.timing('events.size.data', event.size)
开发者ID:binlee1990,项目名称:sentry,代码行数:14,代码来源:post_process.py


示例14: _capture_stats

def _capture_stats(event, is_new):
    group = event.group
    platform = group.platform or group.project.platform
    if not platform:
        return
    platform = PLATFORM_ROOTS.get(platform, platform)
    if platform not in PLATFORM_LIST:
        return

    if is_new:
        metrics.incr("events.unique", 1)

    metrics.incr("events.processed", 1)
    metrics.incr("events.processed.{platform}".format(platform=platform), 1)
    metrics.timing("events.size.data", len(unicode(event.data)))
开发者ID:alfonsolzrg,项目名称:sentry,代码行数:15,代码来源:post_process.py


示例15: process

    def process(self, request, project, key, auth, helper, data, attachments=None, **kwargs):
        metrics.incr('events.total', skip_internal=False)

        if not data:
            track_outcome(project.organization_id, project.id, key.id, Outcome.INVALID, "no_data")
            raise APIError('No JSON data was found')

        remote_addr = request.META['REMOTE_ADDR']

        event_manager = EventManager(
            data,
            project=project,
            key=key,
            auth=auth,
            client_ip=remote_addr,
            user_agent=helper.context.agent,
            version=auth.version,
            content_encoding=request.META.get('HTTP_CONTENT_ENCODING', ''),
        )
        del data

        self.pre_normalize(event_manager, helper)
        event_manager.normalize()

        data = event_manager.get_data()
        dict_data = dict(data)
        data_size = len(json.dumps(dict_data))

        if data_size > 10000000:
            metrics.timing('events.size.rejected', data_size)
            track_outcome(
                project.organization_id,
                project.id,
                key.id,
                Outcome.INVALID,
                'too_large',
                event_id=dict_data.get('event_id')
            )
            raise APIForbidden("Event size exceeded 10MB after normalization.")

        metrics.timing(
            'events.size.data.post_storeendpoint',
            data_size,
            tags={'project_id': project.id}
        )

        return process_event(event_manager, project,
                             key, remote_addr, helper, attachments)
开发者ID:yaoqi,项目名称:sentry,代码行数:48,代码来源:api.py


示例16: process_pending

    def process_pending(self, partition=None):
        if partition is None and self.pending_partitions > 1:
            # If we're using partitions, this one task fans out into
            # N subtasks instead.
            for i in range(self.pending_partitions):
                process_pending.apply_async(kwargs={'partition': i})
            # Explicitly also run over the unpartitioned buffer as well
            # to ease in transition. In practice, this should just be
            # super fast and is fine to do redundantly.

        pending_key = self._make_pending_key(partition)
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        pending_buffer = PendingBuffer(self.incr_batch_size)

        try:
            keycount = 0
            with self.cluster.all() as conn:
                results = conn.zrange(pending_key, 0, -1)

            with self.cluster.all() as conn:
                for host_id, keys in six.iteritems(results.value):
                    if not keys:
                        continue
                    keycount += len(keys)
                    for key in keys:
                        pending_buffer.append(key)
                        if pending_buffer.full():
                            process_incr.apply_async(
                                kwargs={
                                    'batch_keys': pending_buffer.flush(),
                                }
                            )
                    conn.target([host_id]).zrem(pending_key, *keys)

            # queue up remainder of pending keys
            if not pending_buffer.empty():
                process_incr.apply_async(kwargs={
                    'batch_keys': pending_buffer.flush(),
                })

            metrics.timing('buffer.pending-size', keycount)
        finally:
            client.delete(lock_key)
开发者ID:alexandrul,项目名称:sentry,代码行数:48,代码来源:redis.py


示例17: _capture_stats

def _capture_stats(event, is_new):
    # TODO(dcramer): limit platforms to... something?
    group = event.group
    platform = group.platform
    if not platform:
        return
    platform = platform.split('-', 1)[0].split('_', 1)[0]
    tags = {
        'platform': platform,
    }

    if is_new:
        metrics.incr('events.unique', tags=tags, skip_internal=False)

    metrics.incr('events.processed', tags=tags, skip_internal=False)
    metrics.incr(u'events.processed.{platform}'.format(platform=platform), skip_internal=False)
    metrics.timing('events.size.data', event.size, tags=tags)
开发者ID:yaoqi,项目名称:sentry,代码行数:17,代码来源:post_process.py


示例18: save_event

def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import tsdb

    if cache_key:
        data = default_cache.get(cache_key)

    if event_id is None and data is not None:
        event_id = data['event_id']

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
        return

    project = data.pop('project')

    delete_raw_event(project, event_id, allow_hint_clear=True)

    Raven.tags_context({
        'project': project,
    })

    try:
        manager = EventManager(data)
        manager.save(project)
    except HashDiscarded as exc:
        # TODO(jess): remove this before it goes out to a wider audience
        info_logger.info(
            'discarded.hash', extra={
                'project_id': project,
                'description': exc.message,
            }
        )
        tsdb.incr(tsdb.models.project_total_received_discarded, project, timestamp=start_time)
    finally:
        if cache_key:
            default_cache.delete(cache_key)
        if start_time:
            metrics.timing(
                'events.time-to-process',
                time() - start_time,
                instance=data['platform'])
开发者ID:alshopov,项目名称:sentry,代码行数:45,代码来源:store.py


示例19: from_file

    def from_file(cls, fileobj):
        """
        Retrieve a FileBlob instance for the given file.

        If not already present, this will cause it to be stored.

        >>> blob = FileBlob.from_file(fileobj)
        """
        size = 0
        checksum = sha1('')
        for chunk in fileobj:
            size += len(chunk)
            checksum.update(chunk)
        checksum = checksum.hexdigest()

        lock_key = 'fileblob:upload:{}'.format(checksum)
        # TODO(dcramer): the database here is safe, but if this lock expires
        # and duplicate files are uploaded then we need to prune one
        with Lock(lock_key, timeout=600):
            # test for presence
            try:
                existing = FileBlob.objects.get(checksum=checksum)
            except FileBlob.DoesNotExist:
                pass
            else:
                return existing

            blob = cls(
                size=size,
                checksum=checksum,
                storage=settings.SENTRY_FILESTORE,
                storage_options=settings.SENTRY_FILESTORE_OPTIONS,
            )

            blob.path = cls.generate_unique_path(blob.timestamp)

            storage = blob.get_storage()
            storage.save(blob.path, fileobj)
            blob.save()

        metrics.timing('filestore.blob-size', blob.size)
        return blob
开发者ID:haojiang1,项目名称:sentry,代码行数:42,代码来源:file.py


示例20: from_file

    def from_file(cls, fileobj):
        """
        Retrieve a list of FileBlobIndex instances for the given file.

        If not already present, this will cause it to be stored.

        >>> blobs = FileBlob.from_file(fileobj)
        """
        size = 0

        checksum = sha1(b'')
        for chunk in fileobj:
            size += len(chunk)
            checksum.update(chunk)
        checksum = checksum.hexdigest()

        # TODO(dcramer): the database here is safe, but if this lock expires
        # and duplicate files are uploaded then we need to prune one
        lock = locks.get(u'fileblob:upload:{}'.format(checksum), duration=60 * 10)
        with TimedRetryPolicy(60)(lock.acquire):
            # test for presence
            try:
                existing = FileBlob.objects.get(checksum=checksum)
            except FileBlob.DoesNotExist:
                pass
            else:
                return existing

            blob = cls(
                size=size,
                checksum=checksum,
            )

            blob.path = cls.generate_unique_path(blob.timestamp)

            storage = get_storage()
            storage.save(blob.path, fileobj)
            blob.save()

        metrics.timing('filestore.blob-size', size)
        return blob
开发者ID:alexandrul,项目名称:sentry,代码行数:41,代码来源:file.py



注:本文中的sentry.utils.metrics.timing函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python query.bulk_delete_objects函数代码示例发布时间:2022-05-27
下一篇:
Python metrics.incr函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap