本文整理汇总了Python中statsd.statsd.gauge函数的典型用法代码示例。如果您正苦于以下问题:Python gauge函数的具体用法?Python gauge怎么用?Python gauge使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gauge函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: rssatomfeed_post_save
def rssatomfeed_post_save(instance, **kwargs):
if not kwargs.get('created', False):
return
statsd.gauge('feeds.counts.total', 1, delta=True)
statsd.gauge('feeds.counts.rssatom', 1, delta=True)
开发者ID:1flow,项目名称:1flow,代码行数:7,代码来源:rssatom.py
示例2: process_exit_surveys
def process_exit_surveys():
"""Exit survey handling.
* Collect new exit survey results.
* Save results to our metrics table.
* Add new emails collected to the exit survey.
"""
_process_exit_survey_results()
# Get the email addresses from two days ago and add them to the survey
# campaign (skip this on stage).
if settings.STAGE:
# Only run this on prod, it doesn't need to be running multiple times
# from different places.
return
startdate = date.today() - timedelta(days=2)
enddate = date.today() - timedelta(days=1)
for survey in SURVEYS.keys():
if 'email_collection_survey_id' not in SURVEYS[survey]:
# Some surveys don't have email collection on the site
# (the askers survey, for example).
continue
emails = get_email_addresses(survey, startdate, enddate)
for email in emails:
add_email_to_campaign(survey, email)
statsd.gauge('survey.{0}'.format(survey), len(emails))
开发者ID:ChromiumEx,项目名称:kitsune,代码行数:31,代码来源:cron.py
示例3: report_stats
def report_stats(host, topology, toporoot, topic):
state = urllib2.urlopen(
"http://{}/api/status?toporoot={}&topic={}".format(
host, toporoot, topic
)
).read()
data = json.loads(state)
amount = 0
for looplord in data:
if looplord['amount'] is not None:
statsd.gauge(
'razor.kafkamon.topology.partition',
looplord['amount'],
tags = [
"topic:{}".format(topic),
"topology:{}".format(topology),
"partition:{}".format(looplord['partition'])
]
)
amount += looplord['amount']
print "Got {} for {}".format(amount, topology)
statsd.gauge(
'razor.kafkamon.total_delta',
amount, tags = [
"topic:{}".format(topic),
"topology:{}".format(topology)
]
)
开发者ID:evertrue,项目名称:capillary,代码行数:32,代码来源:stats-to-datadog.py
示例4: _deliver_submission
def _deliver_submission(self, submission):
payload = {'xqueue_body': submission.xqueue_body,
'xqueue_files': submission.s3_urls}
submission.grader_id = self.worker_url
submission.push_time = timezone.now()
start = time.time()
(grading_success, grader_reply) = _http_post(self.worker_url, json.dumps(payload), settings.GRADING_TIMEOUT)
statsd.histogram('xqueue.consumer.consumer_callback.grading_time', time.time() - start,
tags=['queue:{0}'.format(self.queue_name)])
job_count = get_queue_length(self.queue_name)
statsd.gauge('xqueue.consumer.consumer_callback.queue_length', job_count,
tags=['queue:{0}'.format(self.queue_name)])
submission.return_time = timezone.now()
# TODO: For the time being, a submission in a push interface gets one chance at grading,
# with no requeuing logic
if grading_success:
submission.grader_reply = grader_reply
submission.lms_ack = post_grade_to_lms(submission.xqueue_header, grader_reply)
else:
log.error("Submission {} to grader {} failure: Reply: {}, ".format(submission.id, self.worker_url, grader_reply))
submission.num_failures += 1
submission.lms_ack = post_failure_to_lms(submission.xqueue_header)
# NOTE: retiring pushed submissions after one shot regardless of grading_success
submission.retired = True
submission.save()
开发者ID:EduPepperPDTesting,项目名称:xqueue,代码行数:31,代码来源:consumer.py
示例5: emit
def emit(self, stat_name, tags, value):
# Convert the dictionary of tags into an array of strings separated by a colon
string_tags = map(lambda (k, v): (self.dd_tag_string.format(key=k, value=v)), tags.iteritems())
statsd.gauge(self.dd_metric_string.format(
scope=self.scope,
stat=stat_name
), value, tags=string_tags)
开发者ID:digideskio,项目名称:razor,代码行数:7,代码来源:stats.py
示例6: measure_queue_lag
def measure_queue_lag(queued_time):
"""A task that measures the time it was sitting in the queue.
It saves the data to graphite via statsd.
"""
lag = datetime.now() - queued_time
lag = (lag.days * 3600 * 24) + lag.seconds
statsd.gauge('rabbitmq.lag', max(lag, 0))
开发者ID:1234-,项目名称:kitsune,代码行数:8,代码来源:tasks.py
示例7: post_create_task
def post_create_task(self):
""" Method meant to be run from a celery task. """
if not self.slug:
self.slug = slugify(self.name)
self.save()
statsd.gauge('tags.counts.total', 1, delta=True)
开发者ID:EliotBerriot,项目名称:1flow,代码行数:8,代码来源:tag.py
示例8: synchronize_statsd_websites_gauges
def synchronize_statsd_websites_gauges(full=False):
with benchmark('synchronize statsd gauges for WebSite.*'):
statsd.gauge('websites.counts.total', WebSite._get_collection().count())
if full:
duplicates = WebSite.objects(duplicate_of__ne=None).no_cache()
statsd.gauge('websites.counts.duplicates', duplicates.count())
开发者ID:EliotBerriot,项目名称:1flow,代码行数:9,代码来源:stats.py
示例9: synchronize_statsd_authors_gauges
def synchronize_statsd_authors_gauges(full=False):
with benchmark('synchronize statsd gauges for Author.*'):
statsd.gauge('authors.counts.total', Author._get_collection().count())
if full:
duplicates = Author.objects(duplicate_of__ne=None).no_cache()
statsd.gauge('authors.counts.duplicates', duplicates.count())
开发者ID:EliotBerriot,项目名称:1flow,代码行数:9,代码来源:stats.py
示例10: synchronize_mongodb_statsd_tags_gauges
def synchronize_mongodb_statsd_tags_gauges(full=False):
""" synchronize all tag-related gauges on our statsd server. """
with benchmark('synchronize statsd gauges for Tag.*'):
statsd.gauge('mongo.tags.counts.total', Tag._get_collection().count())
if full:
duplicates = Tag.objects(duplicate_of__ne=None).no_cache()
statsd.gauge('mongo.tags.counts.duplicates', duplicates.count())
开发者ID:1flow,项目名称:1flow,代码行数:10,代码来源:stats.py
示例11: register_duplicate
def register_duplicate(self, duplicate, force=False):
""" TODO. """
# be sure this helper method is called
# on a document that has the atribute.
assert hasattr(duplicate, 'duplicate_of')
_cls_name_ = self.__class__.__name__
_cls_name_lower_ = _cls_name_.lower()
# TODO: get this from a class attribute?
# I'm not sure for MongoEngine models.
lower_plural = _cls_name_lower_ + u's'
if duplicate.duplicate_of:
if duplicate.duplicate_of != self:
# NOTE: for Article, this situation can't happen IRL
# (demonstrated with Willian 20130718).
#
# Any "second" duplicate *will* resolve to the master via the
# redirect chain. It will *never* resolve to an intermediate
# URL in the chain.
#
# For other objects it should happen too, because the
# `get_or_create()` methods should return the `.duplicate_of`
# attribute if it is not None.
LOGGER.warning(u'%s %s is already a duplicate of '
u'another instance, not %s. Aborting.',
_cls_name_, duplicate, duplicate.duplicate_of)
return
LOGGER.info(u'Registering %s %s as duplicate of %s…',
_cls_name_, duplicate, self)
# Register the duplication immediately, for other
# background operations to use ourselves as value.
duplicate.duplicate_of = self
duplicate.save()
statsd.gauge('mongo.%s.counts.duplicates' % lower_plural, 1, delta=True)
try:
# Having tasks not as methods because of Celery bugs forces
# us to do strange things. We have to "guess" and lookup the
# task name in the current module. OK, not *that* big deal.
self.nonrel_globals[
_cls_name_lower_ + '_replace_duplicate_everywhere_task'].delay(
self.id, duplicate.id)
except KeyError:
LOGGER.warning(u'Object %s has no `replace_duplicate_everywhere()` '
u'method, or the method has not been registered as '
u'a task with `register_task_method()`.', self)
开发者ID:1flow,项目名称:1flow,代码行数:53,代码来源:common.py
示例12: g729_metrics
def g729_metrics(self):
if (self.g729):
g729_count = yield self.api('g729_count')
g729_count = int(g729_count)
statsd.gauge('freeswitch.g729.total', g729_count)
g729_counts = yield self.api('g729_used')
g729_enc, g729_dec = [int(e) for e in g729_counts.split(":")]
statsd.gauge('freeswitch.g729.used.encoder', g729_enc)
statsd.gauge('freeswitch.g729.used.decoder', g729_dec)
if (g729_enc > g729_dec):
statsd.gauge('freeswitch.g729.utilization', g729_enc / g729_count)
else:
statsd.gauge('freeswitch.g729.utilization', g729_dec / g729_count)
开发者ID:areski,项目名称:FreeSwitch-DataDog-Metrics,代码行数:13,代码来源:fsmetrics.py
示例13: post_create_task
def post_create_task(self):
""" Method meant to be run from a celery task. """
if not self.slug:
if self.name is None:
proto, host_and_port, remaining = WebSite.split_url(self.url)
self.name = host_and_port.replace(u'_', u' ').title()
self.slug = slugify(self.name)
self.save()
statsd.gauge('mongo.websites.counts.total', 1, delta=True)
开发者ID:1flow,项目名称:1flow,代码行数:13,代码来源:website.py
示例14: mark_tweet_deleted
def mark_tweet_deleted(tweet_id):
try:
tweet = Tweet.objects.get(tweet_id=tweet_id)
except:
LOGGER.warning(u'Unknown tweet to delete: %s', tweet_id)
else:
tweet.is_deleted = True
tweet.save()
statsd.gauge('tweets.counts.deleted', 1, delta=True)
LOGGER.info(u'Tweet %s marked as deleted.', tweet)
开发者ID:1flow,项目名称:1flow,代码行数:14,代码来源:tweet.py
示例15: _execute
def _execute():
statsd.connect('localhost', 8125)
result = _netfilter()
for chain, entries in result.iteritems():
for number, item in entries.iteritems():
key, bytes = _generate_key(chain, item)
if key is None or key == '':
continue
_println('[info]: send gauge=[', key, '], value=[', str(bytes), ']')
# statsd.histogram(key, bytes)
statsd.gauge(key, bytes)
开发者ID:mass10,项目名称:datadog.note,代码行数:17,代码来源:test.py
示例16: report
def report(self, metric_type, value, **kwargs):
if not statsd_installed:
return
if not self.stats_connected:
statsd.connect(self.host, self.port)
self.stats_connected = True
key = "spike.test"
tags = ["%s:%s" % (k, v) for k, v in kwargs.iteritems()]
if "postfix" in kwargs:
key = ".".join([key, kwargs["postfix"]])
del kwargs["postfix"]
if metric_type == "counter":
statsd.increment(key, value, tags=tags)
elif metric_type == "gauge":
statsd.gauge(key, value, tags=tags)
开发者ID:Unix4ever,项目名称:spike,代码行数:17,代码来源:executor.py
示例17: survey_recent_askers
def survey_recent_askers():
"""Add question askers to a surveygizmo campaign to get surveyed."""
if settings.STAGE:
# Only run this on prod, it doesn't need to be running multiple times
# from different places.
return
# We get the email addresses of all users that asked a question 2 days
# ago. Then, all we have to do is send the email address to surveygizmo
# and it does the rest.
two_days_ago = date.today() - timedelta(days=2)
yesterday = date.today() - timedelta(days=1)
emails = (
Question.objects
.filter(created__gte=two_days_ago, created__lt=yesterday)
.values_list('creator__email', flat=True))
for email in emails:
add_email_to_campaign('askers', email)
statsd.gauge('survey.askers', len(emails))
开发者ID:ChromiumEx,项目名称:kitsune,代码行数:21,代码来源:cron.py
示例18: on_shutter
def on_shutter(self, state):
if not state.event_count:
# No new events since last snapshot.
return
statsd.incr("celery.tasks.total", state.task_count)
statsd.gauge("celery.workers.total", len(state.workers))
statsd.gauge("celery.workers.alive.count",
sum(1 for _, worker in state.workers.items() if worker.alive))
statsd.gauge("celery.workers.dead.count",
sum(1 for _, worker in state.workers.items() if not worker.alive))
map(self.handle_task, state.tasks.items())
开发者ID:arnaudlimbourg,项目名称:instax,代码行数:11,代码来源:instax.py
示例19: update_stats
def update_stats(provider):
state_names = {
vmdatabase.BUILDING: 'building',
vmdatabase.READY: 'ready',
vmdatabase.USED: 'used',
vmdatabase.ERROR: 'error',
vmdatabase.HOLD: 'hold',
vmdatabase.DELETE: 'delete',
}
for base_image in provider.base_images:
states = {
vmdatabase.BUILDING: 0,
vmdatabase.READY: 0,
vmdatabase.USED: 0,
vmdatabase.ERROR: 0,
vmdatabase.HOLD: 0,
vmdatabase.DELETE: 0,
}
for machine in base_image.machines:
if machine.state not in states:
continue
states[machine.state] += 1
if statsd:
for state_id, count in states.items():
key = 'devstack.pool.%s.%s.%s' % (
provider.name,
base_image.name,
state_names[state_id])
statsd.gauge(key, count)
key = 'devstack.pool.%s.%s.min_ready' % (
provider.name,
base_image.name)
statsd.gauge(key, base_image.min_ready)
if statsd:
key = 'devstack.pool.%s.max_servers' % provider.name
statsd.gauge(key, provider.max_servers)
开发者ID:rainsome-org1,项目名称:devstack-gate,代码行数:39,代码来源:utils.py
示例20: updateStats
def updateStats(self, session, provider_name):
if not statsd:
return
# This may be called outside of the main thread.
provider = self.config.providers[provider_name]
states = {}
for target in self.config.targets.values():
for image in target.images.values():
image_key = 'nodepool.target.%s.%s' % (
target.name, image.name)
key = '%s.min_ready' % image_key
statsd.gauge(key, image.min_ready)
for provider in image.providers.values():
provider_key = '%s.%s' % (
image_key, provider.name)
for state in nodedb.STATE_NAMES.values():
key = '%s.%s' % (provider_key, state)
states[key] = 0
for node in session.getNodes():
if node.state not in nodedb.STATE_NAMES:
continue
key = 'nodepool.target.%s.%s.%s.%s' % (
node.target_name, node.image_name,
node.provider_name, nodedb.STATE_NAMES[node.state])
if key not in states:
states[key] = 0
states[key] += 1
for key, count in states.items():
statsd.gauge(key, count)
for provider in self.config.providers.values():
key = 'nodepool.provider.%s.max_servers' % provider.name
statsd.gauge(key, provider.max_servers)
开发者ID:skolekonov,项目名称:nodepool,代码行数:37,代码来源:nodepool.py
注:本文中的statsd.statsd.gauge函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论