• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python ops.reset_default_graph函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.framework.ops.reset_default_graph函数的典型用法代码示例。如果您正苦于以下问题:Python reset_default_graph函数的具体用法?Python reset_default_graph怎么用?Python reset_default_graph使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reset_default_graph函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: clear_session

def clear_session():
    global _SESSION
    global _LEARNING_PHASE
    reset_default_graph()
    reset_uids()
    _SESSION = None
    _LEARNING_PHASE = tf.placeholder(dtype='uint8', name='keras_learning_phase')
开发者ID:gvessere,项目名称:keras,代码行数:7,代码来源:tensorflow_backend.py


示例2: testSimpleCodeView

  def testSimpleCodeView(self):
    ops.reset_default_graph()
    opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
    outfile = os.path.join(test.get_temp_dir(), 'dump')
    opts['output'] = 'file:outfile=' + outfile
    opts['account_type_regexes'] = ['.*']
    opts['show_name_regexes'] = ['.*model_analyzer_testlib.*']
    opts['account_displayed_op_only'] = False
    # TODO(xpan): Test 'micros'. Since the execution time changes each run,
    # it's a bit difficult to test it now.
    opts['select'] = [
        'bytes', 'params', 'float_ops', 'num_hidden_ops', 'device',
        'input_shapes'
    ]

    with session.Session() as sess:
      x = lib.BuildSmallModel()

      sess.run(variables.global_variables_initializer())
      run_meta = config_pb2.RunMetadata()
      _ = sess.run(x,
                   options=config_pb2.RunOptions(
                       trace_level=config_pb2.RunOptions.FULL_TRACE),
                   run_metadata=run_meta)

      model_analyzer.print_model_analysis(
          sess.graph, run_meta, tfprof_cmd='code', tfprof_options=opts)

      with gfile.Open(outfile, 'r') as f:
        # pylint: disable=line-too-long
        self.assertEqual(
            'node name | output bytes | # parameters | # float_ops | assigned devices | input',
            f.read()[0:80])
开发者ID:Joetz,项目名称:tensorflow,代码行数:33,代码来源:model_analyzer_test.py


示例3: _train

  def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
    ops.reset_default_graph()
    graph = ops.get_default_graph()
    with session.Session(
        config=get_config(layout_optimizer), graph=graph) as sess:
      batch = 2
      height = 6
      width = 7
      input_channels = 3
      shape = [batch, height, width, input_channels]
      image = array_ops.placeholder(dtype='float32', shape=shape)
      conv1 = conv_layers.conv2d(image, 32, [3, 3])
      conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
      optimizer = gradient_descent.GradientDescentOptimizer(0.01)
      loss = math_ops.reduce_mean(conv2)
      train_op = optimizer.minimize(loss)
      saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)

      if restore:
        saver.restore(sess, checkpoint_path)
      else:
        sess.run(variables.global_variables_initializer())

      np.random.seed(0)
      for _ in range(2):
        image_val = np.random.rand(*shape).astype(np.float32)
        sess.run([loss, train_op], feed_dict={image: image_val})

      if restore:
        all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
        all_vars_values = [var.eval(session=sess) for var in all_vars]
        return all_vars_values
      else:
        saver.save(sess, checkpoint_path)
开发者ID:SylChan,项目名称:tensorflow,代码行数:34,代码来源:layout_optimizer_test.py


示例4: testCodeViewLeafGraphNode

  def testCodeViewLeafGraphNode(self):
    ops.reset_default_graph()
    opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
    opts['account_type_regexes'] = ['.*']
    opts['account_displayed_op_only'] = False
    opts['select'] = [
        'bytes', 'params', 'float_ops', 'device'
    ]
    opts['output'] = 'none'

    with session.Session() as sess:
      x = lib.BuildSmallModel()

      sess.run(variables.global_variables_initializer())
      run_meta = config_pb2.RunMetadata()
      _ = sess.run(x,
                   options=config_pb2.RunOptions(
                       trace_level=config_pb2.RunOptions.FULL_TRACE),
                   run_metadata=run_meta)

      tfprof_node = model_analyzer.print_model_analysis(
          sess.graph, run_meta, tfprof_cmd='code', tfprof_options=opts)

      leaf = tfprof_node
      while leaf.children:
        self.assertEqual(0, len(leaf.graph_nodes))
        leaf = leaf.children[0]
      self.assertEqual(1, len(leaf.graph_nodes))
开发者ID:Joetz,项目名称:tensorflow,代码行数:28,代码来源:model_analyzer_test.py


示例5: testAdvisor

  def testAdvisor(self):
    ops.reset_default_graph()

    with session.Session() as sess:
      x = lib.BuildFullModel()

      sess.run(variables.global_variables_initializer())
      run_meta = config_pb2.RunMetadata()
      _ = sess.run(
          x,
          options=config_pb2.RunOptions(
              trace_level=config_pb2.RunOptions.FULL_TRACE),
          run_metadata=run_meta)

      advice_pb = model_analyzer.advise(sess.graph, run_meta)
      self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
      self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
      self.assertTrue('OperationChecker' in advice_pb.checkers)

      checker = advice_pb.checkers['AcceleratorUtilizationChecker']
      if test.is_gpu_available():
        self.assertGreater(len(checker.reports), 0)
      else:
        self.assertEqual(len(checker.reports), 0)
      checker = advice_pb.checkers['ExpensiveOperationChecker']
      self.assertGreater(len(checker.reports), 0)
开发者ID:Joetz,项目名称:tensorflow,代码行数:26,代码来源:model_analyzer_test.py


示例6: load_policy

    def load_policy(cls, policy_dict_path, tf_generator, network_config=None):
        """
        For when we only need to load a policy for the forward pass. For instance, to run on the robot from
        a checkpointed policy.
        """
        from tensorflow.python.framework import ops
        ops.reset_default_graph()  # we need to destroy the default graph before re_init or checkpoint won't restore.
        pol_dict = pickle.load(open(policy_dict_path, "rb"))
        tf_map = tf_generator(dim_input=pol_dict['deg_obs'], dim_output=pol_dict['deg_action'],
                              batch_size=1, network_config=network_config)

        sess = tf.Session()
        init_op = tf.initialize_all_variables()
        sess.run(init_op)
        saver = tf.train.Saver()
        check_file = pol_dict['checkpoint_path_tf']
        saver.restore(sess, check_file)

        device_string = pol_dict['device_string']

        cls_init = cls(pol_dict['deg_action'], tf_map.get_input_tensor(), tf_map.get_output_op(), np.zeros((1,)),
                       sess, device_string)
        cls_init.chol_pol_covar = pol_dict['chol_pol_covar']
        cls_init.scale = pol_dict['scale']
        cls_init.bias = pol_dict['bias']
        cls_init.x_idx = pol_dict['x_idx']
        return cls_init
开发者ID:Etragas,项目名称:gps,代码行数:27,代码来源:tf_policy.py


示例7: testTimeline

  def testTimeline(self):
    ops.reset_default_graph()
    outfile = os.path.join(test.get_temp_dir(), 'timeline')
    opts = (builder(builder.trainable_variables_parameter())
            .with_max_depth(100000)
            .with_step(0)
            .with_timeline_output(outfile)
            .with_accounted_types(['.*']).build())

    with session.Session() as sess:
      x = lib.BuildFullModel()

      sess.run(variables.global_variables_initializer())
      run_meta = config_pb2.RunMetadata()
      _ = sess.run(
          x,
          options=config_pb2.RunOptions(
              trace_level=config_pb2.RunOptions.FULL_TRACE),
          run_metadata=run_meta)

      _ = model_analyzer.profile(
          sess.graph, run_meta, cmd='graph', options=opts)

      with gfile.Open(outfile, 'r') as f:
        # Test that a json file is created.
        # TODO(xpan): tfprof Timeline isn't quite correct on Windows.
        # Investigate why.
        if os.name != 'nt':
          self.assertLess(1000, len(f.read()))
        else:
          self.assertLess(1, len(f.read()))
开发者ID:rmcguinness,项目名称:tensorflow,代码行数:31,代码来源:model_analyzer_test.py


示例8: testSelectEverything

  def testSelectEverything(self):
    ops.reset_default_graph()
    opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
    outfile = os.path.join(test.get_temp_dir(), 'dump')
    opts['output'] = 'file:outfile=' + outfile
    opts['account_type_regexes'] = ['.*']
    opts['select'] = [
        'bytes', 'params', 'float_ops', 'occurrence',
        'device', 'op_types'
    ]

    with session.Session() as sess, ops.device('/cpu:0'):
      x = lib.BuildSmallModel()

      sess.run(variables.global_variables_initializer())
      run_meta = config_pb2.RunMetadata()
      _ = sess.run(x,
                   options=config_pb2.RunOptions(
                       trace_level=config_pb2.RunOptions.FULL_TRACE),
                   run_metadata=run_meta)

      model_analyzer.print_model_analysis(
          sess.graph, run_meta, tfprof_options=opts)

      with gfile.Open(outfile, 'r') as f:
        # pylint: disable=line-too-long
        self.assertEqual(
            'node name | # parameters | # float_ops | output bytes | assigned devices | op types\n_TFProfRoot (--/451 params, --/10.44k flops, --/5.28KB, _kTFScopeParent)\n  Conv2D (0/0 params, 5.83k/5.83k flops, 432B/432B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Conv2D)\n  Conv2D_1 (0/0 params, 4.61k/4.61k flops, 384B/384B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Conv2D)\n  DW (3x3x3x6, 162/162 params, 0/0 flops, 648B/1.30KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|VariableV2|_trainable_variables)\n    DW/Assign (0/0 params, 0/0 flops, 0B/0B, Assign)\n    DW/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n      DW/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n        DW/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n        DW/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n        DW/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n        DW/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n        DW/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n    DW/read (0/0 params, 0/0 flops, 648B/648B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Identity)\n  DW2 (2x2x6x12, 288/288 params, 0/0 flops, 1.15KB/2.30KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|VariableV2|_trainable_variables)\n    DW2/Assign (0/0 params, 0/0 flops, 0B/0B, Assign)\n    DW2/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n      DW2/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n        DW2/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n        DW2/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n        DW2/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n        DW2/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n        DW2/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n    DW2/read (0/0 params, 0/0 flops, 1.15KB/1.15KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Identity)\n  ScalarW (1, 1/1 params, 0/0 flops, 0B/0B, VariableV2|_trainable_variables)\n    ScalarW/Assign (0/0 params, 0/0 flops, 0B/0B, Assign)\n    ScalarW/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n      ScalarW/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n        ScalarW/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n        ScalarW/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n        ScalarW/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n        ScalarW/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n        ScalarW/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n    ScalarW/read (0/0 params, 0/0 flops, 0B/0B, Identity)\n  init (0/0 params, 0/0 flops, 0B/0B, NoOp)\n  zeros (0/0 params, 0/0 flops, 864B/864B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Const)\n',
            f.read())
开发者ID:ajaybhat,项目名称:tensorflow,代码行数:29,代码来源:model_analyzer_test.py


示例9: testSelectEverything

  def testSelectEverything(self):
    ops.reset_default_graph()
    outfile = os.path.join(test.get_temp_dir(), 'dump')
    opts = (builder(builder.trainable_variables_parameter())
            .with_file_output(outfile)
            .with_accounted_types(['.*'])
            .select(['params', 'float_ops', 'occurrence', 'device', 'op_types',
                     'input_shapes']).build())

    rewriter_config = rewriter_config_pb2.RewriterConfig(
        disable_model_pruning=True)
    graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
    config = config_pb2.ConfigProto(graph_options=graph_options)
    with session.Session(config=config) as sess, ops.device('/device:CPU:0'):
      x = lib.BuildSmallModel()

      sess.run(variables.global_variables_initializer())
      run_meta = config_pb2.RunMetadata()
      _ = sess.run(x,
                   options=config_pb2.RunOptions(
                       trace_level=config_pb2.RunOptions.FULL_TRACE),
                   run_metadata=run_meta)

      model_analyzer.profile(
          sess.graph, run_meta, options=opts)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:25,代码来源:model_analyzer_test.py


示例10: testBasic

  def testBasic(self):
    base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
    ops.reset_default_graph()
    sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
        base_path,
        target="",
        config=config_pb2.ConfigProto(device_count={"CPU": 2}))

    self.assertTrue(sess)
    asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
    with sess.as_default():
      path1, path2 = sess.run(["filename1:0", "filename2:0"])
      self.assertEqual(
          compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
      self.assertEqual(
          compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)

      collection_def = meta_graph_def.collection_def

      signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
      self.assertEquals(len(signatures_any), 1)

      signatures = manifest_pb2.Signatures()
      signatures_any[0].Unpack(signatures)
      self._checkRegressionSignature(signatures, sess)
      self._checkNamedSignatures(signatures, sess)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:session_bundle_test.py


示例11: setUp

  def setUp(self):
    ops.reset_default_graph()
    dim = 1
    num = 3
    with ops.name_scope('some_scope'):
      # Basically from 0 to dim*num-1.
      flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
      bias = variables.Variable(
          array_ops.reshape(flat_data, (num, dim)), name='bias')
    save = saver.Saver([bias])
    with self.test_session() as sess:
      variables.global_variables_initializer().run()
      self.bundle_file = os.path.join(test.get_temp_dir(), 'bias_checkpoint')
      save.save(sess, self.bundle_file)

    self.new_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
    self.old_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
    self.init_val = 42

    def _init_val_initializer(shape, dtype=None, partition_info=None):
      del dtype, partition_info  # Unused by this unit-testing initializer.
      return array_ops.tile(
          constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)

    self.initializer = _init_val_initializer
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:checkpoint_ops_test.py


示例12: testBasics

  def testBasics(self):
    ops.reset_default_graph()
    outfile = os.path.join(test.get_temp_dir(), "dump")
    opts = builder(builder.time_and_memory()
                  ).with_file_output(outfile).build()

    x = lib.BuildFullModel()

    profile_str = None
    profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
    with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
      pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
      with session.Session() as sess:
        self.evaluate(variables.global_variables_initializer())
        total_steps = 101
        for i in range(total_steps):
          self.evaluate(x)
          if i == 14 or i == 49:
            self.assertTrue(gfile.Exists(outfile))
            gfile.Remove(outfile)
          if i == 99:
            self.assertTrue(gfile.Exists(profile_step100))
            with gfile.Open(outfile, "r") as f:
              profile_str = f.read()
            gfile.Remove(outfile)

      self.assertEqual(set([15, 50, 100]), set(pctx.get_profiles("op").keys()))

    with lib.ProfilerFromFile(
        os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
      profiler.profile_operations(options=opts)
      with gfile.Open(outfile, "r") as f:
        self.assertEqual(profile_str, f.read())
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:33,代码来源:profile_context_test.py


示例13: test_graph_replace_gradients

  def test_graph_replace_gradients(self):
    ops.reset_default_graph()
    w = variables.VariableV1(0.0, name="w")
    y = math_ops.multiply(math_ops.multiply(w, w, name="mul1"), w, name="mul2")
    g = gradients_impl.gradients(y, w, name="grad")[0]

    # Extract the operations.
    replacement_ts = {w.value(): g}
    original_mul1_grad = (ops.get_default_graph().
                          get_operation_by_name("grad/mul1_grad/Mul_1"))

    # Should not raise exception.
    res = ge.graph_replace(g, replacement_ts, dst_scope="res")

    # Extract the operations after graph_replace.
    result_mul1_grad = (ops.get_default_graph().
                        get_operation_by_name("res/grad/mul1_grad/Mul_1"))

    # Make sure _original_ops are as expected.
    self.assertEqual(original_mul1_grad._original_op.name, u"mul1")
    self.assertEqual(result_mul1_grad._original_op.name, u"res/mul1")
    self.assertNotEqual(res.name, g.name)
    with session.Session() as sess:
      sess.run(variables.global_variables_initializer())
      g_val, res_val = sess.run([g, res])
    self.assertNear(g_val, 0.0, ERROR_TOLERANCE)
    self.assertNear(res_val, 0.0, ERROR_TOLERANCE)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:transform_test.py


示例14: generate_testdata

  def generate_testdata(self):
    ops.reset_default_graph()
    sess = session.Session()
    placeholder = array_ops.placeholder(dtypes.string)
    summary_tensor = text_summary.text_summary('message', placeholder)

    vector_summary = text_summary.text_summary('vector', placeholder)

    run_names = ['fry', 'leela']
    for run_name in run_names:
      subdir = os.path.join(self.logdir, run_name)
      writer = summary.FileWriter(subdir)
      writer.add_graph(sess.graph)

      step = 0
      for gem in GEMS:
        message = run_name + ' *loves* ' + gem
        feed_dict = {placeholder: message}
        summ = sess.run(summary_tensor, feed_dict=feed_dict)
        writer.add_summary(summ, global_step=step)
        step += 1

      vector_message = ['one', 'two', 'three', 'four']
      summ = sess.run(vector_summary, feed_dict={placeholder: vector_message})
      writer.add_summary(summ)
      writer.close()
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:26,代码来源:text_plugin_test.py


示例15: testDropoutWrapperWithSeed

 def testDropoutWrapperWithSeed(self):
   keep_some = 0.5
   random_seed.set_random_seed(2)
   ## Use parallel_iterations = 1 in both calls to
   ## _testDropoutWrapper to ensure the (per-time step) dropout is
   ## consistent across both calls.  Otherwise the seed may not end
   ## up being munged consistently across both graphs.
   res_standard_1 = self._testDropoutWrapper(
       input_keep_prob=keep_some,
       output_keep_prob=keep_some,
       state_keep_prob=keep_some,
       seed=10,
       parallel_iterations=1)
   # Clear away the graph and the test session (which keeps variables around)
   ops.reset_default_graph()
   self._ClearCachedSession()
   random_seed.set_random_seed(2)
   res_standard_2 = self._testDropoutWrapper(
       input_keep_prob=keep_some,
       output_keep_prob=keep_some,
       state_keep_prob=keep_some,
       seed=10,
       parallel_iterations=1)
   self.assertAllClose(res_standard_1[0], res_standard_2[0])
   self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
   self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:26,代码来源:core_rnn_cell_test.py


示例16: testComplexCodeView

  def testComplexCodeView(self):
    ops.reset_default_graph()
    outfile = os.path.join(test.get_temp_dir(), 'dump')
    opts = (builder(builder.trainable_variables_parameter())
            .with_file_output(outfile)
            .with_accounted_types(['.*'])
            .with_node_names(show_name_regexes=
                             ['.*model_analyzer_testlib.py.*'])
            .account_displayed_op_only(False)
            .select(['params', 'float_ops']).build())

    with profile_context.ProfileContext(test.get_temp_dir(),
                                        trace_steps=[],
                                        dump_steps=[]) as pctx:
      with session.Session() as sess:
        x = lib.BuildFullModel()

        sess.run(variables.global_variables_initializer())
        pctx.trace_next_step()
        _ = sess.run(x)
        tfprof_node = pctx.profiler.profile_python(options=opts)

        # pylint: disable=line-too-long
        with gfile.Open(outfile, 'r') as f:
          lines = f.read().split('\n')
          result = '\n'.join([l[:min(len(l), 80)] for l in lines])
          self.assertEqual(
              compat.as_bytes(
                  'node name | # parameters | # float_ops\n_TFProfRoot (--/2.84k params, --/168.86k flops)\n  model_analyzer_testlib.py:63:BuildFullModel (0/1.80k params, 0/45.37k flops)\n    model_analyzer_testlib.py:40:BuildSmallModel (0/0 params, 0/0 flops)\n    model_analyzer_testlib.py:44:BuildSmallModel (0/4 params, 0/8 flops)\n    model_analyzer_testlib.py:48:BuildSmallModel (0/648 params, 0/1.30k flops)\n    model_analyzer_testlib.py:49:BuildSmallModel (0/0 params, 0/23.33k flops)\n    model_analyzer_testlib.py:53:BuildSmallModel (0/1.15k params, 0/2.30k flops)\n    model_analyzer_testlib.py:54:BuildSmallModel (0/0 params, 0/18.43k flops)\n  model_analyzer_testlib.py:63:BuildFullModel (gradient) (0/0 params, 0/67.39k f\n    model_analyzer_testlib.py:49:BuildSmallModel (gradient) (0/0 params, 0/46.66\n    model_analyzer_testlib.py:54:BuildSmallModel (gradient) (0/0 params, 0/20.74\n  model_analyzer_testlib.py:67:BuildFullModel (0/1.04k params, 0/18.58k flops)\n  model_analyzer_testlib.py:67:BuildFullModel (gradient) (0/0 params, 0/37.00k f\n  model_analyzer_testlib.py:69:BuildFullModel (0/0 params, 0/0 flops)\n  model_analyzer_testlib.py:70:BuildFullModel (0/0 params, 0/258 flops)\n  model_analyzer_testlib.py:70:BuildFullModel (gradient) (0/0 params, 0/129 flop\n  model_analyzer_testlib.py:72:BuildFullModel (0/0 params, 0/141 flops)\n'
              ), compat.as_bytes(lib.CheckAndRemoveDoc(result)))

        self.assertLess(0, tfprof_node.total_exec_micros)
        self.assertEqual(2844, tfprof_node.total_parameters)
        self.assertEqual(168863, tfprof_node.total_float_ops)
        self.assertEqual(8, len(tfprof_node.children))
        self.assertEqual('_TFProfRoot', tfprof_node.name)
        self.assertEqual(
            'model_analyzer_testlib.py:63:BuildFullModel',
            tfprof_node.children[0].name)
        self.assertEqual(
            'model_analyzer_testlib.py:63:BuildFullModel (gradient)',
            tfprof_node.children[1].name)
        self.assertEqual(
            'model_analyzer_testlib.py:67:BuildFullModel',
            tfprof_node.children[2].name)
        self.assertEqual(
            'model_analyzer_testlib.py:67:BuildFullModel (gradient)',
            tfprof_node.children[3].name)
        self.assertEqual(
            'model_analyzer_testlib.py:69:BuildFullModel',
            tfprof_node.children[4].name)
        self.assertEqual(
            'model_analyzer_testlib.py:70:BuildFullModel',
            tfprof_node.children[5].name)
        self.assertEqual(
            'model_analyzer_testlib.py:70:BuildFullModel (gradient)',
            tfprof_node.children[6].name)
        self.assertEqual(
            'model_analyzer_testlib.py:72:BuildFullModel',
            tfprof_node.children[7].name)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:60,代码来源:model_analyzer_test.py


示例17: testTrainRegressorNonInMemory

  def testTrainRegressorNonInMemory(self):
    ops.reset_default_graph()
    expected_first, expected_second, expected_third = (
        self._get_expected_ensembles_for_regression())
    with self.test_session() as sess:
      # Train without train_in_memory mode.
      with sess.graph.as_default():
        train_op, ensemble_serialized = self._get_train_op_and_ensemble(
            boosted_trees._create_regression_head(label_dimension=1),
            run_config.RunConfig(),
            is_classification=False,
            train_in_memory=False)
      _, serialized = sess.run([train_op, ensemble_serialized])
      # Validate the trained ensemble.
      ensemble_proto = boosted_trees_pb2.TreeEnsemble()
      ensemble_proto.ParseFromString(serialized)
      self.assertProtoEquals(expected_first, ensemble_proto)

      # Run one more time and validate the trained ensemble.
      _, serialized = sess.run([train_op, ensemble_serialized])
      ensemble_proto = boosted_trees_pb2.TreeEnsemble()
      ensemble_proto.ParseFromString(serialized)
      self.assertProtoEquals(expected_second, ensemble_proto)

      # Third round training and validation.
      _, serialized = sess.run([train_op, ensemble_serialized])
      ensemble_proto = boosted_trees_pb2.TreeEnsemble()
      ensemble_proto.ParseFromString(serialized)
      self.assertProtoEquals(expected_third, ensemble_proto)
开发者ID:LiuCKind,项目名称:tensorflow,代码行数:29,代码来源:boosted_trees_test.py


示例18: testAutoProfiling

  def testAutoProfiling(self):
    ops.reset_default_graph()
    time_dir = os.path.join(test.get_temp_dir(), 'time')
    memory_dir = os.path.join(test.get_temp_dir(), 'memory')
    profile_dir = os.path.join(test.get_temp_dir(), 'dir/dir2/profile')
    # TODO(xpan): Should we create parent directory for them?
    gfile.MkDir(time_dir)
    gfile.MkDir(memory_dir)

    time_opts = (builder(builder.time_and_memory())
                 .with_file_output(os.path.join(time_dir, 'profile'))
                 .select(['micros']).build())
    memory_opts = (builder(builder.time_and_memory())
                   .with_file_output(os.path.join(memory_dir, 'profile'))
                   .select(['bytes']).build())

    time_steps = [2, 3]
    memory_steps = [1, 3]
    dump_steps = [3, 4]

    x = lib.BuildSmallModel()
    with profile_context.ProfileContext(profile_dir,
                                        trace_steps=[1, 2, 3],
                                        dump_steps=[3, 4]) as pctx:
      pctx.add_auto_profiling('scope', time_opts, time_steps)
      pctx.add_auto_profiling('scope', memory_opts, memory_steps)

      self._trainLoop(x, 10, time_dir, time_steps,
                      memory_dir, memory_steps, profile_dir, dump_steps)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:29,代码来源:model_analyzer_test.py


示例19: testCodeViewLeafGraphNode

  def testCodeViewLeafGraphNode(self):
    ops.reset_default_graph()
    opts = (builder(builder.trainable_variables_parameter())
            .with_empty_output()
            .with_accounted_types(['.*'])
            .account_displayed_op_only(False)
            .select(['bytes', 'params', 'float_ops', 'device']).build())

    with session.Session() as sess:
      x = lib.BuildSmallModel()

      sess.run(variables.global_variables_initializer())
      run_meta = config_pb2.RunMetadata()
      _ = sess.run(x,
                   options=config_pb2.RunOptions(
                       trace_level=config_pb2.RunOptions.FULL_TRACE),
                   run_metadata=run_meta)

      tfprof_node = model_analyzer.profile(
          sess.graph, run_meta, cmd='code', options=opts)

      leaf = tfprof_node
      while leaf.children:
        self.assertEqual(0, len(leaf.graph_nodes))
        leaf = leaf.children[0]
      self.assertEqual(1, len(leaf.graph_nodes))
开发者ID:rmcguinness,项目名称:tensorflow,代码行数:26,代码来源:model_analyzer_test.py


示例20: testOOM

  def testOOM(self):
    if not test.is_gpu_available():
      return
    ops.reset_default_graph()
    with ops.device('/device:GPU:0'):
      a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
      b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
      c = a * b

    try:
      with session.Session() as sess:
        sess.run(c, options=config_pb2.RunOptions(
            report_tensor_allocations_upon_oom=True))
    except Exception as e:  # pylint: disable=broad-except
      exception_str = '%s' % e
      # This trace reports allocations for to random tensor.
      self.assertTrue(
          'OOM when allocating tensor with shape[30000,10000,20000]' in
          exception_str)
      mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
                      exception_str)
      self.assertGreater(float(mat.group(1)), 0.0)
      mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
                      exception_str)
      self.assertGreater(float(mat.group(1)), 0.0)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:25,代码来源:model_analyzer_test.py



注:本文中的tensorflow.python.framework.ops.reset_default_graph函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ops.strip_name_scope函数代码示例发布时间:2022-05-27
下一篇:
Python ops.register_tensor_conversion_function函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap