• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tracemalloc.get_traced_memory函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tracemalloc.get_traced_memory函数的典型用法代码示例。如果您正苦于以下问题:Python get_traced_memory函数的具体用法?Python get_traced_memory怎么用?Python get_traced_memory使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_traced_memory函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_get_traced_memory

    def test_get_traced_memory(self):
        # Python allocates some internals objects, so the test must tolerate
        # a small difference between the expected size and the real usage
        max_error = 2048

        # allocate one object
        obj_size = 1024 * 1024
        tracemalloc.clear_traces()
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)
        self.assertGreaterEqual(peak_size, size)

        self.assertLessEqual(size - obj_size, max_error)
        self.assertLessEqual(peak_size - size, max_error)

        # destroy the object
        obj = None
        size2, peak_size2 = tracemalloc.get_traced_memory()
        self.assertLess(size2, size)
        self.assertGreaterEqual(size - size2, obj_size - max_error)
        self.assertGreaterEqual(peak_size2, peak_size)

        # clear_traces() must reset traced memory counters
        tracemalloc.clear_traces()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))

        # allocate another object
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)

        # stop() also resets traced memory counters
        tracemalloc.stop()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
开发者ID:asvetlov,项目名称:cpython,代码行数:35,代码来源:test_tracemalloc.py


示例2: test_task_memory_threshold

    def test_task_memory_threshold(self):
        diff = None
        def log_func():
            nonlocal diff
            size, max_size = tracemalloc.get_traced_memory()
            diff = (size - old_size)

        obj_size  = 1024 * 1024
        threshold = int(obj_size * 0.75)

        old_size, max_size = tracemalloc.get_traced_memory()
        task = tracemalloctext.Task(log_func)
        task.set_memory_threshold(threshold)
        task.schedule()

        # allocate
        obj = allocate_bytes(obj_size)
        time.sleep(MEMORY_CHECK_DELAY)
        self.assertIsNotNone(diff)
        self.assertGreaterEqual(diff, threshold)

        # release
        diff = None
        old_size, max_size = tracemalloc.get_traced_memory()
        obj = None
        time.sleep(MEMORY_CHECK_DELAY)
        size, max_size = tracemalloc.get_traced_memory()
        self.assertIsNotNone(diff)
        self.assertLessEqual(diff, threshold)
开发者ID:waytai,项目名称:pytracemalloctext,代码行数:29,代码来源:test_tracemalloctext.py


示例3: check_track

    def check_track(self, release_gil):
        nframe = 5
        tracemalloc.start(nframe)

        size = tracemalloc.get_traced_memory()[0]

        frames = self.track(release_gil, nframe)
        self.assertEqual(self.get_traceback(),
                         tracemalloc.Traceback(frames))

        self.assertEqual(self.get_traced_memory(), self.size)
开发者ID:asvetlov,项目名称:cpython,代码行数:11,代码来源:test_tracemalloc.py


示例4: on_epoch_end

 def on_epoch_end(self, last_metrics, **kwargs):
     cpu_used, cpu_peak =  list(map(lambda x: int(x/2**20), tracemalloc.get_traced_memory()))
     self.peak_monitor_stop()
     gpu_used = gpu_mem_get_used_no_cache() - self.gpu_before
     gpu_peak = self.gpu_mem_used_peak      - self.gpu_before
     # can be negative, due to unreliable peak monitor thread
     if gpu_peak < 0:   gpu_peak = 0
     # since we want the overhead only, subtract delta used if it's positive
     elif gpu_used > 0: gpu_peak -= gpu_used
     # The numbers are deltas in MBs (beginning of the epoch and the end)
     return add_metrics(last_metrics, [cpu_used, cpu_peak, gpu_used, gpu_peak])
开发者ID:SiddharthTiwari,项目名称:fastai,代码行数:11,代码来源:mem.py


示例5: tracemalloc_dump

def tracemalloc_dump() -> None:
    if not tracemalloc.is_tracing():
        logger.warning("pid {}: tracemalloc off, nothing to dump"
                       .format(os.getpid()))
        return
    # Despite our name for it, `timezone_now` always deals in UTC.
    basename = "snap.{}.{}".format(os.getpid(),
                                   timezone_now().strftime("%F-%T"))
    path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
    os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)

    gc.collect()
    tracemalloc.take_snapshot().dump(path)

    procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()
    rss_pages = int(procstat[23])
    logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
                .format(tracemalloc.get_traced_memory()[0] // 1048576,
                        tracemalloc.get_traced_memory()[1] // 1048576,
                        tracemalloc.get_tracemalloc_memory() // 1048576,
                        rss_pages // 256,
                        basename))
开发者ID:gnprice,项目名称:zulip,代码行数:22,代码来源:debug.py


示例6: add_tracemalloc_metrics

def add_tracemalloc_metrics(snapshot):
    size, max_size = tracemalloc.get_traced_memory()
    snapshot.add_metric('tracemalloc.traced.size', size, 'size')
    snapshot.add_metric('tracemalloc.traced.max_size', max_size, 'size')

    if snapshot.traces:
        snapshot.add_metric('tracemalloc.traces', len(snapshot.traces), 'int')

    size, free = tracemalloc.get_tracemalloc_memory()
    snapshot.add_metric('tracemalloc.module.size', size, 'size')
    snapshot.add_metric('tracemalloc.module.free', free, 'size')
    if size:
        frag = free / size
        snapshot.add_metric('tracemalloc.module.fragmentation', frag, 'percent')
开发者ID:waytai,项目名称:pytracemalloctext,代码行数:14,代码来源:tracemalloctext.py


示例7: schedule

    def schedule(self):
        task = self._task_ref()
        memory_threshold = task.get_memory_threshold()
        delay = task.get_delay()

        if memory_threshold is not None:
            traced = tracemalloc.get_traced_memory()[0]
            self.min_memory = traced - memory_threshold
            self.max_memory = traced + memory_threshold
        else:
            self.min_memory = None
            self.max_memory = None

        if delay is not None:
            self.timeout = _time_monotonic() + delay
        else:
            self.timeout = None
开发者ID:waytai,项目名称:pytracemalloctext,代码行数:17,代码来源:tracemalloctext.py


示例8: once

    def once(self):
        delay = None

        if self.min_memory is not None:
            traced = tracemalloc.get_traced_memory()[0]
            if traced <= self.min_memory:
                return None
            if traced >= self.max_memory:
                return None
            delay = self.memory_delay

        if self.timeout is not None:
            dt = (self.timeout - _time_monotonic())
            if dt <= 0:
                return None
            if delay is not None:
                delay = min(delay, dt)
            else:
                delay = dt

        return delay
开发者ID:waytai,项目名称:pytracemalloctext,代码行数:21,代码来源:tracemalloctext.py


示例9: compute

    def compute(self):
        args = self.args

        if args.track_memory:
            if MS_WINDOWS:
                from perf._win_memory import get_peak_pagefile_usage
            else:
                from perf._memory import PeakMemoryUsageThread
                mem_thread = PeakMemoryUsageThread()
                mem_thread.start()

        if args.tracemalloc:
            import tracemalloc
            tracemalloc.start()

        WorkerTask.compute(self)

        if args.tracemalloc:
            traced_peak = tracemalloc.get_traced_memory()[1]
            tracemalloc.stop()

            if not traced_peak:
                raise RuntimeError("tracemalloc didn't trace any Python "
                                   "memory allocation")

            # drop timings, replace them with the memory peak
            self._set_memory_value(traced_peak)

        if args.track_memory:
            if MS_WINDOWS:
                mem_peak = get_peak_pagefile_usage()
            else:
                mem_thread.stop()
                mem_peak = mem_thread.peak_usage

            if not mem_peak:
                raise RuntimeError("failed to get the memory peak usage")

            # drop timings, replace them with the memory peak
            self._set_memory_value(mem_peak)
开发者ID:haypo,项目名称:perf,代码行数:40,代码来源:_worker.py


示例10: fastahack_fetch

    def fastahack_fetch(n):
        print('timings for fastahack.FastaHack')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = fastahack.FastaHack(fa_file.name)
            ti.append(time.time() - t)

            t = time.time()
            read_fastahack(f, headers)
            tf.append(time.time() - t)
            os.remove(index)
        # profile memory usage and report timings
        tracemalloc.start()
        f = fastahack.FastaHack(fa_file.name)
        read_fastahack(f, headers)
        os.remove(index)
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
开发者ID:JorisBenschop,项目名称:pyfaidx,代码行数:22,代码来源:benchmark.py


示例11: pyfaidx_bgzf_faidx

    def pyfaidx_bgzf_faidx(n):
        print('timings for pyfaidx.Faidx with bgzf compression')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = pyfaidx.Faidx(fa_file.name + '.gz')
            ti.append(time.time() - t)

            t = time.time()
            read_faidx(f, headers)
            tf.append(time.time() - t)
            os.remove(index)
        # profile memory usage and report timings
        tracemalloc.start()
        f = pyfaidx.Faidx(fa_file.name + '.gz')
        read_faidx(f, headers)
        os.remove(index)
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
开发者ID:mdshw5,项目名称:pyfaidx,代码行数:22,代码来源:benchmark.py


示例12: pyfaidx_fasta

    def pyfaidx_fasta(n):
        print('timings for pyfaidx.Fasta')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = pyfaidx.Fasta(fa_file.name)
            ti.append(time.time() - t)

            t = time.time()
            read_dict(f, headers)
            tf.append(time.time() - t)
            os.remove(index)
        # profile memory usage and report timings
        tracemalloc.start()
        f = pyfaidx.Fasta(fa_file.name)
        read_dict(f, headers)
        os.remove(index)
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
开发者ID:JorisBenschop,项目名称:pyfaidx,代码行数:22,代码来源:benchmark.py


示例13: seqio_read

    def seqio_read(n):
        print('timings for Bio.SeqIO')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            fh = open(fa_file.name)
            f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
            ti.append(time.time() - t)

            t = time.time()
            read_dict(f, headers)
            tf.append(time.time() - t)
            fh.close()
        # profile memory usage and report timings
        tracemalloc.start()
        fh = open(fa_file.name)
        f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
        read_dict(f, headers)
        fh.close()
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/100*1000*1000)
        tracemalloc.stop()
开发者ID:JorisBenschop,项目名称:pyfaidx,代码行数:24,代码来源:benchmark.py


示例14: pyfasta_fseek

    def pyfasta_fseek(n):
        print('timings for pyfasta.Fasta (fseek)')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
            ti.append(time.time() - t)

            t = time.time()
            read_dict(f, headers)
            tf.append(time.time() - t)
            os.remove(fa_file.name + '.flat')
            os.remove(fa_file.name + '.gdx')
        # profile memory usage and report timings
        tracemalloc.start()
        f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
        read_dict(f, headers)
        os.remove(fa_file.name + '.flat')
        os.remove(fa_file.name + '.gdx')
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
开发者ID:JorisBenschop,项目名称:pyfaidx,代码行数:24,代码来源:benchmark.py


示例15: loop

def loop(*, size, times):
    for i in range(times):
        print(len(L))
        g(size)
        print([t._format_size(x, False) for x in t.get_traced_memory()])
        snapshot = t.take_snapshot().filter_traces(
            (
                t.Filter(False, "<frozen importlib._bootstrap>"),
                t.Filter(False, "*tracemalloc*"),
                t.Filter(False, "*linecache*"),
                t.Filter(False, "*sre_*"),
                t.Filter(False, "*re.py"),
                t.Filter(False, "*fnmatch*"),
                t.Filter(False, "*tokenize*"),
                t.Filter(False, "<unknown>"),
            )
        )

        for stat in snapshot.statistics("lineno", cumulative=False)[:3]:
            print("----------------------------------------")
            print(t._format_size(stat.size, False))
            for line in stat.traceback.format():
                print(line)
        print("========================================")
开发者ID:podhmo,项目名称:individual-sandbox,代码行数:24,代码来源:04snapshot.py


示例16: open

    f = open(filename)
    f_csv = csv.reader(f)
    headings = next(f_csv)     # Skip headers
    for row in f_csv:
        route = row[0]
        date = row[1]
        daytype = row[2]
        rides = int(row[3])
        record = (route, date, daytype, rides)
        rows.append(record)
    f.close()
    return rows
    
def read_rides_into_dict() #slower

def read_rides_into_namedtuple() #medium

def read_rides_into_class() #slowest

def read_rides_into_class_w_slots() #faster

def read_rides_via_pandas() #fastest

if __name__ == '__main__':
    import tracemalloc
    tracemalloc.start()
    rows = read_rides_into_tuple('Data/ctabus.csv')
    print('Memory Use: Current %d, Peak %d' % tracemalloc.get_traced_memory())
    rows = read_rides_into_dict('Data/ctabus.csv')
    print('Memory Use: Current %d, Peak %d' % tracemalloc.get_traced_memory())
开发者ID:seanbradley,项目名称:structly,代码行数:30,代码来源:readrides_all_structs.py


示例17: test_reproject_3D_memory

def test_reproject_3D_memory():

    pytest.importorskip('reproject')

    tracemalloc.start()

    snap1 = tracemalloc.take_snapshot()

    # create a 64 MB cube
    cube,_ = utilities.generate_gaussian_cube(shape=[200,200,200])
    sz = _.dtype.itemsize

    # check that cube is loaded into memory
    snap2 = tracemalloc.take_snapshot()
    diff = snap2.compare_to(snap1, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    # at this point, the generated cube should still exist in memory
    assert diffvals.max()*u.B >= 200**3*sz*u.B

    wcs_in = cube.wcs
    wcs_out = wcs_in.deepcopy()
    wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', cube.wcs.wcs.ctype[2]]
    wcs_out.wcs.crval = [0.001, 0.001, cube.wcs.wcs.crval[2]]
    wcs_out.wcs.crpix = [2., 2., cube.wcs.wcs.crpix[2]]

    header_out = (wcs_out.to_header())
    header_out['NAXIS'] = 3
    header_out['NAXIS1'] = int(cube.shape[2]/2)
    header_out['NAXIS2'] = int(cube.shape[1]/2)
    header_out['NAXIS3'] = cube.shape[0]

    # First the unfilled reprojection test: new memory is allocated for
    # `result`, but nowhere else
    result = cube.reproject(header_out, filled=False)

    snap3 = tracemalloc.take_snapshot()
    diff = snap3.compare_to(snap2, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    # result should have the same size as the input data, except smaller in two dims
    # make sure that's all that's allocated
    assert diffvals.max()*u.B >= 200*100**2*sz*u.B
    assert diffvals.max()*u.B < 200*110**2*sz*u.B

    # without masking the cube, nothing should change
    result = cube.reproject(header_out, filled=True)

    snap4 = tracemalloc.take_snapshot()
    diff = snap4.compare_to(snap3, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    assert diffvals.max()*u.B <= 1*u.MB

    assert result.wcs.wcs.crval[0] == 0.001
    assert result.wcs.wcs.crpix[0] == 2.


    # masking the cube will force the fill to create a new in-memory copy
    mcube = cube.with_mask(cube > 0.1*cube.unit)
    # `_is_huge` would trigger a use_memmap
    assert not mcube._is_huge
    assert mcube.mask.any()

    # take a new snapshot because we're not testing the mask creation
    snap5 = tracemalloc.take_snapshot()
    tracemalloc.stop()
    tracemalloc.start() # stop/start so we can check peak mem use from here
    current_b4, peak_b4 = tracemalloc.get_traced_memory()
    result = mcube.reproject(header_out, filled=True)
    current_aftr, peak_aftr = tracemalloc.get_traced_memory()


    snap6 = tracemalloc.take_snapshot()
    diff = snap6.compare_to(snap5, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    # a duplicate of the cube should have been created by filling masked vals
    # (this should be near-exact since 'result' should occupy exactly the
    # same amount of memory)
    assert diffvals.max()*u.B <= 1*u.MB #>= 200**3*sz*u.B
    # the peak memory usage *during* reprojection will have that duplicate,
    # but the memory gets cleaned up afterward
    assert (peak_aftr-peak_b4)*u.B >= (200**3*sz*u.B + 200*100**2*sz*u.B)

    assert result.wcs.wcs.crval[0] == 0.001
    assert result.wcs.wcs.crpix[0] == 2.
开发者ID:keflavich,项目名称:spectral-cube,代码行数:83,代码来源:test_regrid.py


示例18: run_memleak_test

def run_memleak_test(bench, iterations, report):
    tracemalloc.start()

    starti = min(50, iterations // 2)
    endi = iterations

    malloc_arr = np.empty((endi,), dtype=np.int64)
    rss_arr = np.empty((endi,), dtype=np.int64)
    rss_peaks = np.empty((endi,), dtype=np.int64)
    nobjs_arr = np.empty((endi,), dtype=np.int64)
    garbage_arr = np.empty((endi,), dtype=np.int64)
    open_files_arr = np.empty((endi,), dtype=np.int64)
    rss_peak = 0

    p = psutil.Process()

    for i in range(endi):
        bench()

        gc.collect()

        rss = p.memory_info().rss
        malloc, peak = tracemalloc.get_traced_memory()
        nobjs = len(gc.get_objects())
        garbage = len(gc.garbage)
        open_files = len(p.open_files())
        print("{0: 4d}: pymalloc {1: 10d}, rss {2: 10d}, nobjs {3: 10d}, garbage {4: 4d}, files: {5: 4d}".format(
            i, malloc, rss, nobjs, garbage, open_files))

        malloc_arr[i] = malloc
        rss_arr[i] = rss
        if rss > rss_peak:
            rss_peak = rss
        rss_peaks[i] = rss_peak
        nobjs_arr[i] = nobjs
        garbage_arr[i] = garbage
        open_files_arr[i] = open_files

    print('Average memory consumed per loop: %1.4f bytes\n' %
          (np.sum(rss_peaks[starti+1:] - rss_peaks[starti:-1]) / float(endi - starti)))

    from matplotlib import pyplot as plt
    fig, (ax1, ax2, ax3) = plt.subplots(3)
    ax1b = ax1.twinx()
    ax1.plot(malloc_arr, 'r')
    ax1b.plot(rss_arr, 'b')
    ax1.set_ylabel('pymalloc', color='r')
    ax1b.set_ylabel('rss', color='b')

    ax2b = ax2.twinx()
    ax2.plot(nobjs_arr, 'r')
    ax2b.plot(garbage_arr, 'b')
    ax2.set_ylabel('total objects', color='r')
    ax2b.set_ylabel('garbage objects', color='b')

    ax3.plot(open_files_arr)
    ax3.set_ylabel('open file handles')

    if not report.endswith('.pdf'):
        report = report + '.pdf'
    fig.tight_layout()
    fig.savefig(report, format='pdf')
开发者ID:Eric89GXL,项目名称:matplotlib,代码行数:62,代码来源:memleak.py


示例19: log_func

 def log_func():
     nonlocal diff
     size, max_size = tracemalloc.get_traced_memory()
     diff = (size - old_size)
开发者ID:waytai,项目名称:pytracemalloctext,代码行数:4,代码来源:test_tracemalloctext.py


示例20: print

import tracemalloc as t

print("*start")
print([t._format_size(x, False) for x in t.get_traced_memory()])
t.start()

L = [[_ for _ in range(10000)] for i in range(100)]
print("*gen")
print([t._format_size(x, False) for x in t.get_traced_memory()])

snapshot = t.take_snapshot()
for stats in snapshot.statistics("traceback")[:3]:
    print(stats)

print("----------------------------------------")
snapshot = t.take_snapshot()
for stats in snapshot.statistics("lineno", cumulative=True)[:3]:
    print(stats)

t.stop()
print([t._format_size(x, False) for x in t.get_traced_memory()])
开发者ID:podhmo,项目名称:individual-sandbox,代码行数:21,代码来源:03snapshot.py



注:本文中的tracemalloc.get_traced_memory函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tracemalloc.is_tracing函数代码示例发布时间:2022-05-27
下一篇:
Python tracemalloc.get_object_traceback函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap