• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python whisper.update_many函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中whisper.update_many函数的典型用法代码示例。如果您正苦于以下问题:Python update_many函数的具体用法?Python update_many怎么用?Python update_many使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了update_many函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_update_many_excess

    def test_update_many_excess(self):
        # given an empty db
        wsp = "test_update_many_excess.wsp"
        self.addCleanup(self._remove, wsp)
        archive_len = 3
        archive_step = 1
        whisper.create(wsp, [(archive_step, archive_len)])

        # given too many points than the db can hold
        excess_len = 1
        num_input_points = archive_len + excess_len
        test_now = int(time.time())
        input_start = test_now - num_input_points + archive_step
        input_points = [(input_start + i, random.random() * 10)
                        for i in range(num_input_points)]

        # when the db is updated with too many points
        whisper.update_many(wsp, input_points, now=test_now)

        # then only the most recent input points (those at the end) were written
        actual_time_info = whisper.fetch(wsp, 0, now=test_now)[0]
        self.assertEqual(actual_time_info,
                         (input_points[-archive_len][0],
                          input_points[-1][0] + archive_step,  # untilInterval = newest + step
                          archive_step))
开发者ID:deniszh,项目名称:whisper,代码行数:25,代码来源:test_whisper.py


示例2: test_single_metric

    def test_single_metric(self):
        xfilesfactor = 0.5
        aggregation_method = "last"
        # This retentions are such that every other point is present in both
        # archives. Test validates that duplicate points gets inserted only once.
        retentions = [(1, 10), (2, 10)]
        high_precision_duration = retentions[0][0] * retentions[0][1]
        low_precision_duration = retentions[1][0] * retentions[1][1]
        now = int(time.time())
        time_from, time_to = now - low_precision_duration, now
        points = [(float(t), float(now-t)) for t in xrange(time_from, time_to)]
        metric = "test_metric"
        metric_path = os_path.join(self.tempdir, metric + ".wsp")
        whisper.create(metric_path, retentions, xfilesfactor, aggregation_method)
        whisper.update_many(metric_path, points)

        self._call_main()

        metric = self.accessor.get_metric(metric)
        self.assertTrue(metric)
        self.assertEqual(metric.name, metric.name)
        self.assertEqual(metric.aggregator.carbon_name, aggregation_method)
        self.assertEqual(metric.carbon_xfilesfactor, xfilesfactor)
        self.assertEqual(metric.retention.as_string, "10*1s:10*2s")

        points_again = list(self.accessor.fetch_points(
            metric, time_from, time_to, metric.retention[0]))
        self.assertEqual(points[-high_precision_duration:], points_again)
开发者ID:natbraun,项目名称:biggraphite,代码行数:28,代码来源:test_import_whisper.py


示例3: _createdb

 def _createdb(self, wsp, schema=[(1, 20)], data=None):
     whisper.create(wsp, schema)
     if data is None:
         tn = time.time() - 20
         data = []
         for i in range(20):
             data.append((tn + 1 + i, random.random() * 10))
     whisper.update_many(wsp, data)
     return data
开发者ID:Asana,项目名称:carbonate,代码行数:9,代码来源:test_fill.py


示例4: writeWhisperFile

def writeWhisperFile(dbFilePath, datapoints):
  try:
    whisper.update_many(dbFilePath, datapoints)
  except:
    log.msg("Error writing to %s" % (dbFilePath))
    log.err()
    instrumentation.increment('errors')
    return False
  return True
开发者ID:penpen,项目名称:carbon,代码行数:9,代码来源:writer.py


示例5: write_series

 def write_series(self, series):
     file_name = os.path.join(
         WHISPER_DIR,
         '{0}.wsp'.format(series.pathExpression.replace('.', os.sep)))
     os.makedirs(os.path.dirname(file_name))
     whisper.create(file_name, [(1, 180)])
     data = []
     for index, value in enumerate(series):
         if value is None:
             continue
         data.append((series.start + index * series.step, value))
     whisper.update_many(file_name, data)
开发者ID:torkelo,项目名称:graphite-api,代码行数:12,代码来源:__init__.py


示例6: _update

    def _update(self, datapoints):
        """
        This method store in the datapoints in the current database.

            :datapoints: is a list of tupple with the epoch timestamp and value
                 [(1368977629,10)]
        """
        if len(datapoints) == 1:
            timestamp, value = datapoints[0]
            whisper.update(self.path, value, timestamp)
        else:
            whisper.update_many(self.path, datapoints)
开发者ID:2mind,项目名称:salmon,代码行数:12,代码来源:graph.py


示例7: update

def update(path, datapoints):
    nrOfPoints = len(datapoints),
    if nrOfPoints == 1:
        (timestamp, value) = datapoints[0]
        timestamp = timegm(timestamp.timetuple())
        whisper.update(path, value, timestamp)
    elif nrOfPoints > 1:
        whisper.update_many(path + '.wsp', [
            (timegm(t.timetuple()), v) for (t,v) in datapoints])
    else:
        raise Exception("No Datapoint given")

    return True
开发者ID:dergraf,项目名称:whisbert,代码行数:13,代码来源:whisbert.py


示例8: load_data

def load_data(f_name, dest_file):
    with open(f_name, 'r') as fp:
        start = False
        for line in fp:
            datapoints = []
            if start == False:
                if line.find("Archive ") == 0 and line.find(" data:") > 0:
                    start = True
            else:
                datas = line.split(" ")
                if len(datas) == 3 and datas[0] != 'Archive':
                    datapoints.append((datas[1][:-1], datas[2]))
                    #print datapoints
                    whisper.update_many(dest_file, datapoints)
开发者ID:lirudy,项目名称:graphite_dump_load,代码行数:14,代码来源:whisper-load.py


示例9: handle

 def handle(self):
     points = 0
     for metric in self.redis.smembers(METRICS):
         values = self.redis.zrange(metric, 0, -1)
         points += len(values)
         f = target_to_path(self.path, metric)
         d = os.path.dirname(f)
         if d not in self.dirs:
             if not os.path.isdir(d):
                 os.makedirs(d)
             self.dirs.add(d)
         if not os.path.exists(f):
             whisper.create(f, [(10, 1000)])  # [FIXME] hardcoded values
         whisper.update_many(f, [struct.unpack('!ff', a) for a in values])
         if len(values):
             self.redis.zrem(metric, *values)
     self.metric(METRIC_POINTS, points)
开发者ID:bearstech,项目名称:whirlwind-tornado,代码行数:17,代码来源:persist.py


示例10: fill

def fill(src, dst, tstart, tstop):
    # fetch range start-stop from src, taking values from the highest
    # precision archive, thus optionally requiring multiple fetch + merges
    srcHeader = info(src)

    srcArchives = srcHeader["archives"]
    srcArchives.sort(key=itemgetter("retention"))

    # find oldest point in time, stored by both files
    srcTime = int(time.time()) - srcHeader["maxRetention"]

    if tstart < srcTime and tstop < srcTime:
        return

    # we want to retain as much precision as we can, hence we do backwards
    # walk in time

    # skip forward at max 'step' points at a time
    for archive in srcArchives:
        # skip over archives that don't have any data points
        rtime = time.time() - archive["retention"]
        if tstop <= rtime:
            continue

        untilTime = tstop
        fromTime = rtime if rtime > tstart else tstart

        (timeInfo, values) = fetch(src, fromTime, untilTime)
        (start, end, archive_step) = timeInfo
        pointsToWrite = list(
            itertools.ifilter(
                lambda points: points[1] is not None, itertools.izip(xrange(start, end, archive_step), values)
            )
        )
        # order points by timestamp, newest first
        pointsToWrite.sort(key=lambda p: p[0], reverse=True)
        update_many(dst, pointsToWrite)

        tstop = fromTime

        # can stop when there's nothing to fetch any more
        if tstart == tstop:
            return
开发者ID:szibis,项目名称:whisper,代码行数:43,代码来源:whisper-fill.py


示例11: _update

    def _update(self, wsp=None, schema=None, sparse=False, useFallocate=False):
        wsp = wsp or self.filename
        schema = schema or [(1, 20)]

        num_data_points = 20

        # create sample data
        whisper.create(wsp, schema, sparse=sparse, useFallocate=useFallocate)
        tn = time.time() - num_data_points
        data = []
        for i in range(num_data_points):
            data.append((tn + 1 + i, random.random() * 10))

        # test single update
        whisper.update(wsp, data[0][1], data[0][0])

        # test multi update
        whisper.update_many(wsp, data[1:])
        return data
开发者ID:yadsirhc,项目名称:whisper,代码行数:19,代码来源:test_whisper.py


示例12: _update

    def _update(self, wsp=None, schema=None):
        wsp = wsp or self.db
        schema = schema or [(1, 20)]
        num_data_points = 20

        whisper.create(wsp, schema)

        # create sample data
        tn = time.time() - num_data_points
        data = []
        for i in range(num_data_points):
            data.append((tn + 1 + i, random.random() * 10))

        # test single update
        whisper.update(wsp, data[0][1], data[0][0])

        # test multi update
        whisper.update_many(wsp, data[1:])
        return data
开发者ID:TheNoButton,项目名称:whisper,代码行数:19,代码来源:test_whisper.py


示例13: test_single_metric

    def test_single_metric(self):
        xfilesfactor = 0.5
        aggregation_method = "last"
        retentions = [(1, 60)]
        now = int(time.time())
        time_from, time_to = now - 10, now
        points = [(t, now-t) for t in xrange(time_from, time_to)]
        metric = "test_metric"
        metric_path = os_path.join(self.tempdir, metric + ".wsp")
        whisper.create(metric_path, retentions, xfilesfactor, aggregation_method)
        whisper.update_many(metric_path, points)

        self._call_main()

        meta = self.accessor.get_metric(metric)
        self.assertTrue(meta)
        self.assertEqual(meta.name, metric)
        self.assertEqual(meta.carbon_aggregation, aggregation_method)
        self.assertEqual(meta.carbon_xfilesfactor, xfilesfactor)
        self.assertEqual(meta.carbon_retentions, retentions)

        points_again = self.accessor.fetch_points(metric, time_from, time_to, step=1)
        self.assertEqual(points, points_again)
开发者ID:gitter-badger,项目名称:biggraphite,代码行数:23,代码来源:test_import_whisper.py


示例14: writeCachedDataPoints

def writeCachedDataPoints():
  "Write datapoints until the MetricCache is completely empty"

  while MetricCache:
    dataWritten = False

    for (metric, datapoints, dbFilePath, dbFileExists) in optimalWriteOrder():
      dataWritten = True

      if not dbFileExists:
        archiveConfig = None
        xFilesFactor, aggregationMethod = None, None

        for schema in SCHEMAS:
          if schema.matches(metric):
            log.creates('new metric %s matched schema %s' % (metric, schema.name))
            archiveConfig = [archive.getTuple() for archive in schema.archives]
            break

        for schema in AGGREGATION_SCHEMAS:
          if schema.matches(metric):
            log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name))
            xFilesFactor, aggregationMethod = schema.archives
            break

        if not archiveConfig:
          raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric)

        dbDir = dirname(dbFilePath)
        try:
            if not exists(dbDir):
                os.makedirs(dbDir)
        except OSError, e:
            log.err("%s" % e)
        log.creates("creating database file %s (archive=%s xff=%s agg=%s)" %
                    (dbFilePath, archiveConfig, xFilesFactor, aggregationMethod))
        try:
            whisper.create(
                dbFilePath,
                archiveConfig,
                xFilesFactor,
                aggregationMethod,
                settings.WHISPER_SPARSE_CREATE,
                settings.WHISPER_FALLOCATE_CREATE)
            instrumentation.increment('creates')
        except:
            log.err("Error creating %s" % (dbFilePath))
            continue
      # If we've got a rate limit configured lets makes sure we enforce it
      if UPDATE_BUCKET:
        UPDATE_BUCKET.drain(1, blocking=True)
      try:
        t1 = time.time()
        whisper.update_many(dbFilePath, datapoints)
        updateTime = time.time() - t1
      except Exception:
        log.msg("Error writing to %s" % (dbFilePath))
        log.err()
        instrumentation.increment('errors')
      else:
        pointCount = len(datapoints)
        instrumentation.increment('committedPoints', pointCount)
        instrumentation.append('updateTimes', updateTime)
        if settings.LOG_UPDATES:
          log.updates("wrote %d datapoints for %s in %.5f seconds" % (pointCount, metric, updateTime))

    # Avoid churning CPU when only new metrics are in the cache
    if not dataWritten:
      time.sleep(0.1)
开发者ID:jacklesplat,项目名称:ql_emc_graphite,代码行数:69,代码来源:writer.py


示例15: update_many

    def update_many(self, metric, datapoints, retention_config):
	''' Update datapoints but quietly ignore the retention_config '''
        return whisper.update_many(self.getFilesystemPath(metric), datapoints)
开发者ID:posix4e,项目名称:graphite-data,代码行数:3,代码来源:whispertsdb.py


示例16: writeCachedDataPoints

def writeCachedDataPoints():
  "Write datapoints until the MetricCache is completely empty"
  updates = 0
  lastSecond = 0

  while MetricCache:
    dataWritten = False

    for (metric, datapoints, dbFilePath, dbFileExists) in optimalWriteOrder():
      dataWritten = True

      if not dbFileExists:
        archiveConfig = None
        xFilesFactor, aggregationMethod = None, None

        for schema in schemas:
          if schema.matches(metric):
            log.creates('new metric %s matched schema %s' % (metric, schema.name))
            archiveConfig = [archive.getTuple() for archive in schema.archives]
            break

        for schema in agg_schemas:
          if schema.matches(metric):
            log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name))
            xFilesFactor, aggregationMethod = schema.archives
            break

        if not archiveConfig:
          raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric)

        dbDir = dirname(dbFilePath)
        try:
            os.makedirs(dbDir, 0755)
        except OSError as e:
            log.err("%s" % e)
        log.creates("creating database file %s (archive=%s xff=%s agg=%s)" %
                    (dbFilePath, archiveConfig, xFilesFactor, aggregationMethod))
        whisper.create(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod, settings.WHISPER_SPARSE_CREATE, settings.WHISPER_FALLOCATE_CREATE)
        instrumentation.increment('creates')

      try:
        t1 = time.time()
        whisper.update_many(dbFilePath, datapoints)
        t2 = time.time()
        updateTime = t2 - t1
      except:
        log.msg("Error writing to %s" % (dbFilePath))
        log.err()
        instrumentation.increment('errors')
      else:
        pointCount = len(datapoints)
        instrumentation.increment('committedPoints', pointCount)
        instrumentation.append('updateTimes', updateTime)

        if settings.LOG_UPDATES:
          log.updates("wrote %d datapoints for %s in %.5f seconds" % (pointCount, metric, updateTime))

        # Rate limit update operations
        thisSecond = int(t2)

        if thisSecond != lastSecond:
          lastSecond = thisSecond
          updates = 0
        else:
          updates += 1
          if updates >= settings.MAX_UPDATES_PER_SECOND:
            time.sleep(int(t2 + 1) - t2)

    # Avoid churning CPU when only new metrics are in the cache
    if not dataWritten:
      time.sleep(0.1)
开发者ID:AstromechZA,项目名称:carbon,代码行数:71,代码来源:writer.py


示例17: write

 def write(self, metric, datapoints):
   path = self.getFilesystemPath(metric)
   whisper.update_many(path, datapoints)
开发者ID:cclauss,项目名称:carbon,代码行数:3,代码来源:database.py


示例18: update_many

 def update_many(self, metric, datapoints):
     return whisper.update_many(self.getFilesystemPath(metric), datapoints)
开发者ID:jbooth,项目名称:graphite-data,代码行数:2,代码来源:whispertsdb.py


示例19: update_many

 def update_many(self, metric, datapoints, dbIdentifier):
   dbFilePath = dbIdentifier
   whisper.update_many(dbFilePath, datapoints)
开发者ID:arowser,项目名称:carbon-postgres-patches,代码行数:3,代码来源:persister.py


示例20: zip

    os.unlink(tmpfile)
  newfile = tmpfile
else:
  newfile = options.newfile

print 'Creating new whisper database: %s' % newfile
whisper.create(newfile, new_archives, xFilesFactor=xff)
size = os.stat(newfile).st_size
print 'Created: %s (%d bytes)' % (newfile,size)

print 'Migrating data...'
for archive in old_archives:
  timeinfo, values = archive['data']
  datapoints = zip( range(*timeinfo), values )
  datapoints = filter(lambda p: p[1] is not None, datapoints)
  whisper.update_many(newfile, datapoints)

if options.newfile is not None:
  sys.exit(0)

backup = path + '.bak'
print 'Renaming old database to: %s' % backup
os.rename(path, backup)

try:
  print 'Renaming new database to: %s' % path
  os.rename(tmpfile, path)
except:
  traceback.print_exc()
  print '\nOperation failed, restoring backup'
  os.rename(backup, path)
开发者ID:ZachGoldberg,项目名称:Graphite,代码行数:31,代码来源:whisper-resize.py



注:本文中的whisper.update_many函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python whisper.validateArchiveList函数代码示例发布时间:2022-05-26
下一篇:
Python whisper.update函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap