• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tsdb.incr_multi函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sentry.app.tsdb.incr_multi函数的典型用法代码示例。如果您正苦于以下问题:Python incr_multi函数的具体用法?Python incr_multi怎么用?Python incr_multi使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了incr_multi函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: send

    def send(self, **kwargs):
        # TODO(dcramer): this should respect rate limits/etc and use the normal
        # pipeline
        from sentry.app import tsdb
        from sentry.coreapi import insert_data_to_database
        from sentry.event_manager import EventManager
        from sentry.models import Project

        try:
            project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT)
        except Project.DoesNotExist:
            self.error_logger.error('Internal project (id=%s) does not exist',
                                    settings.SENTRY_PROJECT)
            return

        metrics.incr('events.total', 1)

        kwargs['project'] = project.id
        try:
            manager = EventManager(kwargs)
            data = manager.normalize()
            tsdb.incr_multi([
                (tsdb.models.project_total_received, project.id),
                (tsdb.models.organization_total_received, project.organization_id),
            ])
            insert_data_to_database(data)
        except Exception as e:
            if self.raise_send_errors:
                raise
            self.error_logger.error(
                'Unable to record event: %s\nEvent was: %r', e,
                kwargs['message'], exc_info=True)
开发者ID:Crystalnix,项目名称:sentry,代码行数:32,代码来源:raven.py


示例2: _save_aggregate

    def _save_aggregate(self, event, hashes, release, **kwargs):
        project = event.project

        # attempt to find a matching hash
        all_hashes = self._find_hashes(project, hashes)

        try:
            existing_group_id = (h[0] for h in all_hashes if h[0]).next()
        except StopIteration:
            existing_group_id = None

        # XXX(dcramer): this has the opportunity to create duplicate groups
        # it should be resolved by the hash merging function later but this
        # should be better tested/reviewed
        if existing_group_id is None:
            kwargs["score"] = ScoreClause.calculate(1, kwargs["last_seen"])
            group, group_is_new = Group.objects.create(project=project, **kwargs), True
        else:
            group = Group.objects.get(id=existing_group_id)

            group_is_new = False

        # If all hashes are brand new we treat this event as new
        is_new = False
        new_hashes = [h[1] for h in all_hashes if h[0] is None]
        if new_hashes:
            affected = GroupHash.objects.filter(project=project, hash__in=new_hashes, group__isnull=True).update(
                group=group
            )

            if affected != len(new_hashes):
                self._ensure_hashes_merged(group, new_hashes)
            elif group_is_new and len(new_hashes) == len(all_hashes):
                is_new = True

        # XXX(dcramer): it's important this gets called **before** the aggregate
        # is processed as otherwise values like last_seen will get mutated
        can_sample = should_sample(event.datetime, group.last_seen, group.times_seen)

        if not is_new:
            is_regression = self._process_existing_aggregate(group=group, event=event, data=kwargs, release=release)
        else:
            is_regression = False

        # Determine if we've sampled enough data to store this event
        if is_new or is_regression:
            is_sample = False
        else:
            is_sample = can_sample

        tsdb.incr_multi([(tsdb.models.group, group.id), (tsdb.models.project, project.id)], timestamp=event.datetime)

        return group, is_new, is_regression, is_sample
开发者ID:rkreddy559,项目名称:sentry,代码行数:53,代码来源:event_manager.py


示例3: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project = group.project
        date = group.last_seen

        tsdb_keys = []

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            if not value:
                continue

            value = six.text_type(value)
            if len(value) > MAX_TAG_VALUE_LENGTH:
                continue

            tsdb_id = u'%s=%s' % (key, value)

            tsdb_keys.extend([
                (tsdb.models.project_tag_value, tsdb_id),
            ])

            buffer.incr(TagValue, {
                'times_seen': 1,
            }, {
                'project': project,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
                'data': data,
            })

            buffer.incr(GroupTagValue, {
                'times_seen': 1,
            }, {
                'group': group,
                'project': project,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
            })

        if tsdb_keys:
            tsdb.incr_multi(tsdb_keys)
开发者ID:abensrhir,项目名称:sentry,代码行数:51,代码来源:manager.py


示例4: send

    def send(self, **kwargs):
        # TODO(dcramer): this should respect rate limits/etc and use the normal
        # pipeline
        from sentry.app import tsdb
        from sentry.coreapi import ClientApiHelper
        from sentry.event_manager import EventManager
        from sentry.models import Project

        helper = ClientApiHelper(
            agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION),
            project_id=settings.SENTRY_PROJECT,
            version=self.protocol_version,
        )

        try:
            project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT)
        except DatabaseError:
            self.error_logger.error('Unable to fetch internal project',
                                    exc_info=True)
            return
        except Project.DoesNotExist:
            self.error_logger.error('Internal project (id=%s) does not exist',
                                    settings.SENTRY_PROJECT)
            return
        except Exception:
            self.error_logger.error(
                'Unable to fetch internal project for some unknown reason',
                exc_info=True)
            return

        helper.context.bind_project(project)

        metrics.incr('events.total')

        kwargs['project'] = project.id
        try:
            manager = EventManager(kwargs)
            data = manager.normalize()
            tsdb.incr_multi([
                (tsdb.models.project_total_received, project.id),
                (tsdb.models.organization_total_received, project.organization_id),
            ])
            helper.insert_data_to_database(data)
        except Exception as e:
            if self.raise_send_errors:
                raise
            self.error_logger.error(
                'Unable to record event: %s\nEvent was: %r', e,
                kwargs['message'], exc_info=True)
开发者ID:AyrtonRicardo,项目名称:sentry,代码行数:49,代码来源:raven.py


示例5: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project_id = group.project_id
        date = group.last_seen

        tsdb_keys = []

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            tsdb_id = u'%s=%s' % (key, value)

            tsdb_keys.extend([
                (tsdb.models.project_tag_value, tsdb_id),
            ])

            buffer.incr(TagValue, {
                'times_seen': 1,
            }, {
                'project_id': project_id,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
                'data': data,
            })

            buffer.incr(GroupTagValue, {
                'times_seen': 1,
            }, {
                'group_id': group.id,
                'project_id': project_id,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
            })

        if tsdb_keys:
            tsdb.incr_multi(tsdb_keys)
开发者ID:Juraldinio,项目名称:sentry,代码行数:44,代码来源:group.py


示例6: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project = group.project
        date = group.last_seen

        tsdb_keys = []

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            if not value:
                continue

            value = six.text_type(value)
            if len(value) > MAX_TAG_VALUE_LENGTH:
                continue

            tsdb_id = u"%s=%s" % (key, value)

            tsdb_keys.extend([(tsdb.models.project_tag_value, tsdb_id)])

            buffer.incr(
                TagValue,
                {"times_seen": 1},
                {"project": project, "key": key, "value": value},
                {"last_seen": date, "data": data},
            )

            buffer.incr(
                GroupTagValue,
                {"times_seen": 1},
                {"group": group, "project": project, "key": key, "value": value},
                {"last_seen": date},
            )

        if tsdb_keys:
            tsdb.incr_multi(tsdb_keys)
开发者ID:achun2080,项目名称:sentry,代码行数:41,代码来源:manager.py


示例7: save


#.........这里部分代码省略.........
                    project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True)
            return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_group, {
                    group.id: {
                        grouprelease.id: 1,
                    },
                })
            )

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project, event_id=event_id,
开发者ID:mvaled,项目名称:sentry,代码行数:67,代码来源:event_manager.py


示例8: _save_aggregate

    def _save_aggregate(self, event, tags, hashes, **kwargs):
        time_spent = event.time_spent
        project = event.project

        # attempt to find a matching hash
        all_hashes = self._find_hashes(project, hashes)

        try:
            existing_group_id = (h[0] for h in all_hashes if h[0]).next()
        except StopIteration:
            existing_group_id = None

        # XXX(dcramer): this has the opportunity to create duplicate groups
        # it should be resolved by the hash merging function later but this
        # should be better tested/reviewed
        if existing_group_id is None:
            group, group_is_new = Group.objects.get_or_create(
                project=project,
                # TODO(dcramer): remove checksum from Group/Event
                checksum=hashes[0],
                defaults=kwargs,
            )
        else:
            group = Group.objects.get(id=existing_group_id)

            group_is_new = False

        # If all hashes are brand new we treat this event as new
        is_new = False
        new_hashes = [h[1] for h in all_hashes if h[0] is None]
        if new_hashes:
            affected = GroupHash.objects.filter(
                project=project,
                hash__in=new_hashes,
                group__isnull=True,
            ).update(
                group=group,
            )

            if affected != len(new_hashes):
                self._ensure_hashes_merged(group, new_hashes)
            elif group_is_new and len(new_hashes) == len(all_hashes):
                is_new = True

        update_kwargs = {
            'times_seen': 1,
        }
        if time_spent:
            update_kwargs.update({
                'time_spent_total': time_spent,
                'time_spent_count': 1,
            })

        if not is_new:
            is_regression = self._process_existing_aggregate(group, event, kwargs)
        else:
            is_regression = False

            # TODO: this update should actually happen as part of create
            group.update(score=ScoreClause(group))

            # We need to commit because the queue can run too fast and hit
            # an issue with the group not existing before the buffers run
            transaction.commit_unless_managed(using=group._state.db)

        # Determine if we've sampled enough data to store this event
        if is_new:
            is_sample = False
        elif not should_sample(group, event):
            is_sample = False
        else:
            is_sample = True

        # Rounded down to the nearest interval
        safe_execute(Group.objects.add_tags, group, tags)

        tsdb.incr_multi([
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ])

        return group, is_new, is_regression, is_sample
开发者ID:PostPCEra,项目名称:sentry,代码行数:82,代码来源:event_manager.py


示例9: create_sample_time_series

def create_sample_time_series(event):
    from sentry.app import tsdb
    group = event.group

    now = datetime.utcnow().replace(tzinfo=utc)

    for _ in xrange(60):
        count = randint(1, 10)
        tsdb.incr_multi((
            (tsdb.models.project, group.project.id),
            (tsdb.models.group, group.id),
        ), now, count)
        tsdb.incr_multi((
            (tsdb.models.organization_total_received,
             group.project.organization_id),
            (tsdb.models.project_total_received, group.project.id),
        ), now, int(count * 1.1))
        tsdb.incr_multi((
            (tsdb.models.organization_total_rejected,
             group.project.organization_id),
            (tsdb.models.project_total_rejected, group.project.id),
        ), now, int(count * 0.1))
        now = now - timedelta(seconds=1)

    for _ in xrange(24 * 30):
        count = randint(100, 1000)
        tsdb.incr_multi((
            (tsdb.models.project, group.project.id),
            (tsdb.models.group, group.id),
        ), now, count)
        tsdb.incr_multi((
            (tsdb.models.organization_total_received,
             group.project.organization_id),
            (tsdb.models.project_total_received, group.project.id),
        ), now, int(count * 1.1))
        tsdb.incr_multi((
            (tsdb.models.organization_total_rejected,
             group.project.organization_id),
            (tsdb.models.project_total_rejected, group.project.id),
        ), now, int(count * 0.1))
        now = now - timedelta(hours=1)
开发者ID:jasonbeverage,项目名称:sentry,代码行数:41,代码来源:apidocs.py


示例10: save


#.........这里部分代码省略.........
                "level": level,
                "last_seen": date,
                "first_seen": date,
                "data": {
                    "last_received": event.data.get("received") or float(event.datetime.strftime("%s")),
                    "type": event_type.key,
                    # we cache the events metadata on the group to ensure its
                    # accessible in the stream
                    "metadata": event_metadata,
                },
            }
        )

        if release:
            release = Release.get_or_create(project=project, version=release, date_added=date)

            group_kwargs["first_release"] = release

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event, hashes=hashes, release=release, **group_kwargs
        )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic(using=router.db_for_write(EventMapping)):
                EventMapping.objects.create(project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info("Duplicate EventMapping found for event_id=%s", event_id, exc_info=True)
            return event

        if release:
            grouprelease = GroupRelease.get_or_create(
                group=group, release=release, environment=environment, datetime=date
            )

        tsdb.incr_multi([(tsdb.models.group, group.id), (tsdb.models.project, project.id)], timestamp=event.datetime)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
        ]
        if release:
            frequencies.append((tsdb.models.frequent_releases_by_groups, {group.id: {grouprelease.id: 1}}))

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(project=project, event_id=event_id).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info("Duplicate Event found for event_id=%s", event_id, exc_info=True)
                return event

            index_event_tags.delay(project_id=project.id, group_id=group.id, event_id=event.id, tags=tags)

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
                    (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
                ),
                timestamp=event.datetime,
            )

        if is_new and release:
            buffer.incr(Release, {"new_groups": 1}, {"id": release.id})

        safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project, group=group, sender=Project)

            post_process_group.delay(
                group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression
            )
        else:
            self.logger.info("Raw event passed; skipping post process for event_id=%s", event_id)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
开发者ID:yoachum,项目名称:sentry,代码行数:101,代码来源:event_manager.py


示例11: send

    def send(self, **kwargs):
        # TODO(dcramer): this should respect rate limits/etc and use the normal
        # pipeline

        # Report the issue to an upstream Sentry if active
        # NOTE: we don't want to check self.is_enabled() like normal, since
        # is_enabled behavior is overridden in this class. We explicitly
        # want to check if the remote is active.
        if self.remote.is_active():
            from sentry import options
            # Append some extra tags that are useful for remote reporting
            super_kwargs = copy.deepcopy(kwargs)
            super_kwargs['tags']['install-id'] = options.get('sentry:install-id')
            super(SentryInternalClient, self).send(**super_kwargs)

        if not is_current_event_safe():
            return

        from sentry.app import tsdb
        from sentry.coreapi import ClientApiHelper
        from sentry.event_manager import EventManager
        from sentry.models import Project

        helper = ClientApiHelper(
            agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION),
            project_id=settings.SENTRY_PROJECT,
            version=self.protocol_version,
        )

        try:
            project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT)
        except DatabaseError:
            self.error_logger.error('Unable to fetch internal project',
                                    exc_info=True)
            return
        except Project.DoesNotExist:
            self.error_logger.error('Internal project (id=%s) does not exist',
                                    settings.SENTRY_PROJECT)
            return
        except Exception:
            self.error_logger.error(
                'Unable to fetch internal project for some unknown reason',
                exc_info=True)
            return

        helper.context.bind_project(project)

        metrics.incr('events.total')

        kwargs['project'] = project.id
        try:
            # This in theory is the right way to do it because validate
            # also normalizes currently, but we just send in data already
            # normalised in the raven client now.
            # data = helper.validate_data(project, kwargs)
            data = kwargs
            manager = EventManager(data)
            data = manager.normalize()
            tsdb.incr_multi([
                (tsdb.models.project_total_received, project.id),
                (tsdb.models.organization_total_received, project.organization_id),
            ])
            helper.insert_data_to_database(data)
        except Exception as e:
            if self.raise_send_errors:
                raise
            message = kwargs.get('message')
            if not message:
                msg_interface = kwargs.get('sentry.interface.Message', {})
                message = msg_interface.get('formatted', msg_interface.get('message', 'unknown error'))
            self.error_logger.error(
                'Unable to record event: %s\nEvent was: %r', e,
                message, exc_info=True)
开发者ID:Akashguharoy,项目名称:sentry,代码行数:73,代码来源:raven.py


示例12: _save_aggregate

    def _save_aggregate(self, event, tags, hashes, **kwargs):
        time_spent = event.time_spent
        project = event.project

        # attempt to find a matching hash
        all_hashes = self._find_hashes(project, hashes)

        try:
            existing_group_id = (h[0] for h in all_hashes if h[0]).next()
        except StopIteration:
            existing_group_id = None

        # XXX(dcramer): this has the opportunity to create duplicate groups
        # it should be resolved by the hash merging function later but this
        # should be better tested/reviewed
        if existing_group_id is None:
            kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
            group, group_is_new = Group.objects.get_or_create(
                project=project,
                # TODO(dcramer): remove checksum from Group/Event
                checksum=hashes[0],
                defaults=kwargs,
            )
        else:
            group = Group.objects.get(id=existing_group_id)

            group_is_new = False

        # If all hashes are brand new we treat this event as new
        is_new = False
        new_hashes = [h[1] for h in all_hashes if h[0] is None]
        if new_hashes:
            affected = GroupHash.objects.filter(
                project=project,
                hash__in=new_hashes,
                group__isnull=True,
            ).update(
                group=group,
            )

            if affected != len(new_hashes):
                self._ensure_hashes_merged(group, new_hashes)
            elif group_is_new and len(new_hashes) == len(all_hashes):
                is_new = True

        update_kwargs = {
            'times_seen': 1,
        }
        if time_spent:
            update_kwargs.update({
                'time_spent_total': time_spent,
                'time_spent_count': 1,
            })

        # XXX(dcramer): it's important this gets called **before** the aggregate
        # is processed as otherwise values like last_seen will get mutated
        can_sample = should_sample(event.datetime, group.last_seen, group.times_seen)

        if not is_new:
            is_regression = self._process_existing_aggregate(group, event, kwargs)
        else:
            is_regression = False

        # Determine if we've sampled enough data to store this event
        if is_new or is_regression:
            is_sample = False
        else:
            is_sample = can_sample

        tsdb.incr_multi([
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ])

        return group, is_new, is_regression, is_sample
开发者ID:Juraldinio,项目名称:sentry,代码行数:75,代码来源:event_manager.py


示例13: _save_aggregate

    def _save_aggregate(self, event, hashes, release, **kwargs):
        project = event.project

        # attempt to find a matching hash
        all_hashes = self._find_hashes(project, hashes)

        try:
            existing_group_id = (h[0] for h in all_hashes if h[0]).next()
        except StopIteration:
            existing_group_id = None

        # XXX(dcramer): this has the opportunity to create duplicate groups
        # it should be resolved by the hash merging function later but this
        # should be better tested/reviewed
        if existing_group_id is None:
            kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
            with transaction.atomic():
                short_id = project.next_short_id()
                group, group_is_new = Group.objects.create(
                    project=project,
                    short_id=short_id,
                    **kwargs
                ), True
        else:
            group = Group.objects.get(id=existing_group_id)

            group_is_new = False

        # If all hashes are brand new we treat this event as new
        is_new = False
        new_hashes = [h[1] for h in all_hashes if h[0] is None]
        if new_hashes:
            affected = GroupHash.objects.filter(
                project=project,
                hash__in=new_hashes,
                group__isnull=True,
            ).update(
                group=group,
            )

            if affected != len(new_hashes):
                self._ensure_hashes_merged(group, new_hashes)
            elif group_is_new and len(new_hashes) == len(all_hashes):
                is_new = True

        # XXX(dcramer): it's important this gets called **before** the aggregate
        # is processed as otherwise values like last_seen will get mutated
        can_sample = should_sample(
            event.data.get('received') or float(event.datetime.strftime('%s')),
            group.data.get('last_received') or float(group.last_seen.strftime('%s')),
            group.times_seen,
        )

        if not is_new:
            is_regression = self._process_existing_aggregate(
                group=group,
                event=event,
                data=kwargs,
                release=release,
            )
        else:
            is_regression = False

        # Determine if we've sampled enough data to store this event
        if is_new or is_regression:
            is_sample = False
        else:
            is_sample = can_sample

        tsdb.incr_multi([
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ], timestamp=event.datetime)

        frequencies = [
            (tsdb.models.frequent_projects_by_organization, {
                project.organization_id: {
                    project.id: 1,
                },
            }),
            (tsdb.models.frequent_issues_by_project, {
                project.id: {
                    group.id: 1,
                },
            })
        ]
        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_groups, {
                    group.id: {
                        release.id: 1,
                    },
                })
            )

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        return group, is_new, is_regression, is_sample
开发者ID:Cobbyzhang,项目名称:sentry,代码行数:98,代码来源:event_manager.py



注:本文中的sentry.app.tsdb.incr_multi函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python access.from_request函数代码示例发布时间:2022-05-27
下一篇:
Python tsdb.incr函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap