• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python context.executing_eagerly函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.eager.context.executing_eagerly函数的典型用法代码示例。如果您正苦于以下问题:Python executing_eagerly函数的具体用法?Python executing_eagerly怎么用?Python executing_eagerly使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了executing_eagerly函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _test_helper

  def _test_helper(self,
                   inputs,
                   expected_outputs,
                   init_loss_scale=1,
                   incr_every_n_step=2,
                   decr_every_n_nan_or_inf=2):
    ratio = 2
    lsm = lsm_lib.ExponentialUpdateLossScaleManager(
        init_loss_scale=init_loss_scale,
        incr_every_n_steps=incr_every_n_step,
        decr_every_n_nan_or_inf=decr_every_n_nan_or_inf,
        incr_ratio=ratio,
        decr_ratio=1. / ratio)
    itr = _GetExampleIter(inputs)
    update_fn = lambda: lsm.update_loss_scale(itr.get_next())

    self.evaluate(variables.global_variables_initializer())
    actual_outputs = []

    if not context.executing_eagerly():
      update_op = update_fn()
    for _ in range(len(inputs)):
      if context.executing_eagerly():
        update_fn()
      else:
        self.evaluate(update_op)
      actual_outputs.append(self.evaluate(lsm.get_loss_scale()))
    self.assertEqual(actual_outputs, expected_outputs)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:28,代码来源:loss_scale_manager_test.py


示例2: _test_helper

  def _test_helper(self,
                   inputs,
                   expected_outputs,
                   initial_loss_scale=1.,
                   increment_period=2,
                   multiplier=2):
    loss_scale = loss_scale_module.DynamicLossScale(
        initial_loss_scale=initial_loss_scale,
        increment_period=increment_period,
        multiplier=multiplier)
    itr = _get_example_iter(inputs)

    def update():
      is_finite = itr.get_next()
      grad = self._get_tensor(is_finite)
      update_op, should_apply_gradients = loss_scale.update([grad])
      assert_op = check_ops.assert_equal(should_apply_gradients, is_finite)
      if context.executing_eagerly():
        return
      with ops.control_dependencies([assert_op]):
        return array_ops.identity(update_op)

    actual_outputs = []

    if not context.executing_eagerly():
      update_op = update()
      self.evaluate(variables.global_variables_initializer())
    for _ in range(len(inputs)):
      if context.executing_eagerly():
        update()
      else:
        self.evaluate(update_op)
      actual_outputs.append(self.evaluate(loss_scale()))
    self.assertEqual(actual_outputs, expected_outputs)
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:loss_scale_test.py


示例3: _test_summary_for_replica_zero_only

  def _test_summary_for_replica_zero_only(self, d):
    logdir = tempfile.mkdtemp()

    def run_fn():
      """Function executed for each replica."""
      with summary_writer.as_default():
        replica_id = ds_context.get_replica_context().replica_id_in_sync_group
        return summary_ops.write("a", replica_id)

    with self.cached_session() as sess, d.scope(), \
        summary_ops.always_record_summaries():
      # We need global_step because summary writing op *always* has global_step
      # as input, even when we always record summary or never record summary.
      global_step = training_util.get_or_create_global_step()
      if not context.executing_eagerly():
        # When executing eagerly, variables are initialized immediately after
        # creation, and its initializer will be None.
        global_step.initializer.run()
      summary_ops.set_step(0)
      summary_writer = summary_ops.create_file_writer(logdir)
      output = d.extended.call_for_each_replica(run_fn)
      unwrapped = d.unwrap(output)
      if not context.executing_eagerly():
        sess.run(summary_writer.init())
        sess.run(unwrapped)
        sess.run(summary_writer.close())

      events = _events_from_logdir(self, logdir)
      # There will be 2 entries: 1 summary file header entry, and 1 entry
      # written by replica 0.
      self.assertLen(events, 2)
      self.assertEqual(events[1].summary.value[0].tag, "a")
      self.assertEqual(events[1].summary.value[0].simple_value, 0.0)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:strategy_test_lib.py


示例4: testRequestNotToCompile

  def testRequestNotToCompile(self):
    with self.test_scope():
      def f(x):
        with ops.device('device:CPU:0'):
          y = 2.0 * x
        return x, y

      wholly_compiled_f = def_function.function(f)
      op_by_op_f = function.defun_with_attributes(
          f, attributes={'_XlaCompile': False})

      x = constant_op.constant([0.0, 2.0], name='data')

      # When function is wholly compiled, all outputs will be on the
      # device on which it is run.
      r_x, r_y = wholly_compiled_f(x)
      self.assertAllEqual([0.0, 2.0], r_x)
      self.assertAllEqual([0.0, 4.0], r_y)
      if context.executing_eagerly():
        # backing_device is only available for eager tensors.
        self.assertRegexpMatches(r_x.backing_device, self.device)
        self.assertRegexpMatches(r_y.backing_device, self.device)

      # When function is executed op-by-op, requested devices will be
      # respected.
      r_x, r_y = op_by_op_f(x)
      self.assertAllEqual([0.0, 2.0], r_x)
      self.assertAllEqual([0.0, 4.0], r_y)
      if context.executing_eagerly():
        # backing_device is only available for eager tensors.
        self.assertRegexpMatches(r_x.backing_device, self.device)
        self.assertRegexpMatches(r_y.backing_device, 'device:CPU:0')
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:32,代码来源:eager_test.py


示例5: testLoadFromNameBasedSaver

 def testLoadFromNameBasedSaver(self):
   """Save a name-based checkpoint, load it using the object-based API."""
   with test_util.device(use_gpu=True):
     save_path = self._write_name_based_checkpoint()
     root = self._initialized_model()
     self._set_sentinels(root)
     with self.assertRaises(AssertionError):
       self._check_sentinels(root)
     object_saver = util.TrackableSaver(graph_view.ObjectGraphView(root))
     self._set_sentinels(root)
     status = object_saver.restore(save_path)
     if context.executing_eagerly():
       self._check_sentinels(root)
     if context.executing_eagerly():
       with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
         status.assert_consumed()
     else:
       # When graph building, we haven't read any keys, so we don't know
       # whether the restore will be complete.
       with self.assertRaisesRegexp(AssertionError, "not restored"):
         status.assert_consumed()
     status.run_restore_ops()
     self._check_sentinels(root)
     self._set_sentinels(root)
     status = object_saver.restore(save_path)
     status.initialize_or_restore()
     self._check_sentinels(root)
开发者ID:jackd,项目名称:tensorflow,代码行数:27,代码来源:checkpointable_utils_test.py


示例6: testTrainNetwork

  def testTrainNetwork(self, distribution, optimizer_fn,
                       use_callable_loss=True):
    with distribution.scope():
      model_fn, dataset_fn, layer = minimize_loss_example(
          optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)

      ds = distribution.distribute_dataset(dataset_fn)
      if context.executing_eagerly():
        iterator = ds.make_one_shot_iterator()
      else:
        iterator = ds.make_initializable_iterator()

      def run_step():
        return control_flow_ops.group(distribution.unwrap(
            distribution.call_for_each_tower(
                model_fn, iterator.get_next(), run_concurrently=layer.built)))

      if not context.executing_eagerly():
        with self.cached_session() as sess:
          sess.run(iterator.initializer)
          run_step = sess.make_callable(run_step())
        self.evaluate(variables.global_variables_initializer())

      weights, biases = [], []
      for _ in range(10):
        run_step()

        weights.append(self.evaluate(layer.kernel))
        biases.append(self.evaluate(layer.bias))

      error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
      is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
      self.assertTrue(is_not_increasing)
开发者ID:baojianzhou,项目名称:tensorflow,代码行数:33,代码来源:optimizer_v2_test.py


示例7: add_variable

 def add_variable(self, name, shape=None, dtype=None, initializer=None):
   """***Only for use by descendants of Metric***."""
   if self._built:
     raise RuntimeError("Can't call add_variable() except in build().")
   if context.executing_eagerly():
     collections = None
   else:
     if self._use_global_variables:
       collections = [ops.GraphKeys.GLOBAL_VARIABLES]
     else:
       collections = [ops.GraphKeys.LOCAL_VARIABLES]
     collections += [ops.GraphKeys.METRIC_VARIABLES]
   # Variables are Checkpointable dependencies of Metrics regardless of the
   # global/local distinction. Users can avoid saving variables by not adding a
   # dependency on the Metric.
   v = self._add_variable_with_custom_getter(
       name=name,
       shape=shape,
       dtype=dtype,
       initializer=initializer,
       trainable=False,
       collections=collections,
       use_resource=True,
       getter=variable_scope.get_variable,
       # Raise duplicate variable exceptions from get_variable rather than
       # Checkpointable.
       overwrite=True)
   self._vars.append(v)
   if context.executing_eagerly():
     self._initial_values[v] = v.value()
   return v
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:31,代码来源:metrics_impl.py


示例8: testSaveRestoreMultipleIterator

 def testSaveRestoreMultipleIterator(self):
   checkpoint_directory = self.get_temp_dir()
   checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
   dataset = dataset_ops.Dataset.from_tensor_slices(
       [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
   dataset = dataset.map(math_ops.square).batch(2)
   iterator_1 = dataset.make_one_shot_iterator()
   get_next_1 = iterator_1.get_next if context.executing_eagerly(
   ) else functools.partial(self.evaluate, iterator_1.get_next())
   iterator_2 = dataset.make_one_shot_iterator()
   get_next_2 = iterator_2.get_next if context.executing_eagerly(
   ) else functools.partial(self.evaluate, iterator_2.get_next())
   dataset_2 = dataset_ops.Dataset.range(10)
   iterator_3 = dataset_2.make_one_shot_iterator()
   get_next_3 = iterator_3.get_next if context.executing_eagerly(
   ) else functools.partial(self.evaluate, iterator_3.get_next())
   checkpoint = checkpointable_utils.Checkpoint(
       iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
   self.assertAllEqual([1, 4], get_next_1())
   self.assertAllEqual(0, get_next_3())
   self.assertAllEqual(1, get_next_3())
   self.assertAllEqual(2, get_next_3())
   save_path = checkpoint.save(checkpoint_prefix)
   self.assertAllEqual([1, 4], get_next_2())
   self.assertAllEqual([9, 16], get_next_2())
   self.assertAllEqual(3, get_next_3())
   checkpoint.restore(save_path).run_restore_ops()
   self.assertAllEqual([9, 16], get_next_1())
   self.assertAllEqual([1, 4], get_next_2())
   self.assertAllEqual(3, get_next_3())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:30,代码来源:iterator_ops_test.py


示例9: testDeferredSlotRestoration

  def testDeferredSlotRestoration(self):
    checkpoint_directory = self.get_temp_dir()

    root = trackable_utils.Checkpoint()
    root.var = trackable_utils.add_variable(
        root, name="var", initializer=0.)
    optimizer = adam.AdamOptimizer(0.1)
    if context.executing_eagerly():
      optimizer.minimize(root.var.read_value)
    else:
      train_op = optimizer.minimize(root.var)
      # Note that `optimizer` has not been added as a dependency of
      # `root`. Create a one-off grouping so that slot variables for `root.var`
      # get initialized too.
      self.evaluate(trackable_utils.gather_initializers(
          trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
      self.evaluate(train_op)
    self.evaluate(state_ops.assign(root.var, 12.))
    no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
    root.optimizer = optimizer
    self.evaluate(state_ops.assign(root.var, 13.))
    self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
                                   14.))
    slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
    new_root = trackable_utils.Checkpoint()
    # Load the slot-containing checkpoint (deferred), then immediately overwrite
    # the non-slot variable (also deferred).
    slot_status = new_root.restore(slots_path)
    no_slot_status = new_root.restore(no_slots_path)
    with self.assertRaises(AssertionError):
      no_slot_status.assert_consumed()
    new_root.var = trackable_utils.add_variable(
        new_root, name="var", shape=[])
    no_slot_status.assert_consumed()
    no_slot_status.run_restore_ops()
    self.assertEqual(12., self.evaluate(new_root.var))
    new_root.optimizer = adam.AdamOptimizer(0.1)
    slot_status.assert_existing_objects_matched()
    with self.assertRaisesRegexp(AssertionError, "beta1_power"):
      slot_status.assert_consumed()
    self.assertEqual(12., self.evaluate(new_root.var))
    if context.executing_eagerly():
      # Slot variables are only created with restoring initializers when
      # executing eagerly.
      self.assertEqual(14., self.evaluate(
          new_root.optimizer.get_slot(name="m", var=new_root.var)))
    else:
      self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
                    None)
    if context.executing_eagerly():
      new_root.optimizer.minimize(new_root.var.read_value)
    else:
      train_op = new_root.optimizer.minimize(new_root.var)
      # The slot variable now exists; restore() didn't create it, but we should
      # now have a restore op for it.
      slot_status.run_restore_ops()
      self.assertEqual(14., self.evaluate(
          new_root.optimizer.get_slot(name="m", var=new_root.var)))
      self.evaluate(train_op)
    slot_status.assert_consumed()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:60,代码来源:util_with_v1_optimizers_test.py


示例10: test_dropout_mask_reuse

  def test_dropout_mask_reuse(self):
    # The layer is created with recurrent_initializer = zero, so that the
    # the recurrent state won't affect the output. By doing this, we can verify
    # the output and see if the same mask is applied to for each timestep.
    rnn = keras.layers.SimpleRNN(3,
                                 dropout=0.5,
                                 kernel_initializer='ones',
                                 recurrent_initializer='zeros',
                                 return_sequences=True,
                                 unroll=True)

    inputs = constant_op.constant(1.0, shape=(6, 2, 5))
    out = rnn(inputs, training=True)
    if not context.executing_eagerly():
      self.evaluate(variables_lib.global_variables_initializer())
    batch_1 = self.evaluate(out)
    batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :]
    self.assertAllClose(batch_1_t0, batch_1_t1)

    # This simulate the layer called with multiple batches in eager mode
    if context.executing_eagerly():
      out2 = rnn(inputs, training=True)
    else:
      out2 = out
    batch_2 = self.evaluate(out2)
    batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :]
    self.assertAllClose(batch_2_t0, batch_2_t1)

    # Also validate that different dropout is used by between batches.
    self.assertNotAllClose(batch_1_t0, batch_2_t0)
    self.assertNotAllClose(batch_1_t1, batch_2_t1)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:31,代码来源:recurrent_test.py


示例11: testDynamicShapeVariableWithCallableInit

  def testDynamicShapeVariableWithCallableInit(self):
    var0 = variable_scope.get_variable("var0",
                                       initializer=constant_op.constant(1.),
                                       validate_shape=False)
    self.assertFalse(var0.shape.is_fully_defined())

    grads0 = constant_op.constant(0.1, dtype=dtypes.float32)
    learning_rate = lambda: 3.0

    ada_opt = adagrad.AdagradOptimizer(
        learning_rate, initial_accumulator_value=0.1, use_locking=True)

    if not context.executing_eagerly():
      ada_update = ada_opt.apply_gradients(
          zip([grads0], [var0]))
      self.evaluate(variables.global_variables_initializer())

    # Fetch params to validate initial values
    v0_val = self.evaluate([var0])
    self.assertAllClose([1.0], v0_val)

    # Run 3 steps of adagrad
    for _ in range(3):
      if not context.executing_eagerly():
        self.evaluate(ada_update)
      else:
        ada_opt.apply_gradients(zip([grads0], [var0]))

    # Validate updated params
    v0_val = self.evaluate([var0])
    self.assertAllCloseAccordingToType(
        np.array([-1.6026098728179932]), v0_val)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:32,代码来源:adagrad_test.py


示例12: testCriticalSectionInParallelDoesntDeadlockOnError

  def testCriticalSectionInParallelDoesntDeadlockOnError(self):
    # No eager mode execution of this test because eager does not
    # run fn() in parallel, which is where the deadlock could
    # potentially occur (in graph mode).
    cs = critical_section_ops.CriticalSection(shared_name="cs")
    v = resource_variable_ops.ResourceVariable(0.0, name="v")

    def fn(i):
      error = control_flow_ops.Assert((i % 2) == 1, ["Error"])
      with ops.control_dependencies([error]):
        return v.read_value()

    num_concurrent = 2

    @def_function.function(autograph=False)
    def run_concurrently():
      return [cs.execute(lambda: fn(i)) for i in range(num_concurrent)]

    if not context.executing_eagerly():
      run_concurrently = run_concurrently()

    self.evaluate(v.initializer)
    for _ in range(100):
      with self.assertRaisesOpError("Error"):
        if context.executing_eagerly():
          run_concurrently()
        else:
          self.evaluate(run_concurrently)
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:critical_section_test.py


示例13: testBasicWithLearningRateDecay

  def testBasicWithLearningRateDecay(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.cached_session():
        var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
        grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
        learning_rate = 3.0
        decay = 0.5
        sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay)
        if not context.executing_eagerly():
          sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
        self.evaluate(variables.global_variables_initializer())
        # Run 2 steps of sgd
        if not context.executing_eagerly():
          self.evaluate(sgd_op)
        else:
          sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
        # Validate updated params
        self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
                                           self.evaluate(var0))
        self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
                                           self.evaluate(var1))

        if not context.executing_eagerly():
          self.evaluate(sgd_op)
        else:
          sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1 - 2.0 * 0.1, 2.0 - 3.0 * 0.1 - 2.0 * 0.1],
            self.evaluate(var0))
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01],
            self.evaluate(var1))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:35,代码来源:gradient_descent_test.py


示例14: _test_basic_sgd_with_learning_rate_decay

  def _test_basic_sgd_with_learning_rate_decay(self, sgd, dtype):
    var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
    var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
    grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
    grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
    if not context.executing_eagerly():
      sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
    self.evaluate(variables.global_variables_initializer())
    # Run 2 steps of sgd
    if not context.executing_eagerly():
      self.evaluate(sgd_op)
    else:
      sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
    # Validate updated params
    self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
                                       self.evaluate(var0))
    self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
                                       self.evaluate(var1))

    if not context.executing_eagerly():
      self.evaluate(sgd_op)
    else:
      sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
    # Validate updated params
    self.assertAllCloseAccordingToType(
        [1.0 - 3.0 * 0.1 - 2.0 * 0.1, 2.0 - 3.0 * 0.1 - 2.0 * 0.1],
        self.evaluate(var0))
    self.assertAllCloseAccordingToType(
        [3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01],
        self.evaluate(var1))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:30,代码来源:gradient_descent_test.py


示例15: test_apply_gradients

  def test_apply_gradients(self):

    x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
    itr = dataset.make_one_shot_iterator()

    lr = 1
    opt = gd.GradientDescentOptimizer(lr)
    lsm = lsm_lib.FixedLossScaleManager(1.e4)
    opt = lso.LossScaleOptimizer(opt, lsm)
    train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
    if not context.executing_eagerly():
      train_op = train_fn()

    expected_output = [1, 1, 1 - 0.1]
    actual_output = []

    self.evaluate(variables.global_variables_initializer())
    for _ in range(3):
      # nan or inf is not applied.
      if context.executing_eagerly():
        train_fn()
      else:
        self.evaluate(train_op)
      actual_output.append(self.evaluate(x))
    self.assertAllClose(expected_output, actual_output)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:26,代码来源:loss_scale_optimizer_test.py


示例16: testVariablesAcrossGraphs

  def testVariablesAcrossGraphs(self):
    optimizer = momentum_lib.MomentumOptimizer(0.01, 0.5)
    with ops.Graph().as_default():
      var0 = resource_variable_ops.ResourceVariable(
          [1.0, 2.0], dtype=dtypes.float32, name="var0")
      var1 = resource_variable_ops.ResourceVariable(
          [3.0, 4.0], dtype=dtypes.float32, name="var1")
      if context.executing_eagerly():
        loss = lambda: math_ops.reduce_sum(var0 + var1)
      else:
        loss = math_ops.reduce_sum(var0 + var1)
      optimizer.minimize(loss)
      optimizer_variables = optimizer.variables()
      self.assertStartsWith(optimizer_variables[0].name, "var0")
      self.assertStartsWith(optimizer_variables[1].name, "var1")
      self.assertEquals(2, len(optimizer_variables))

    with ops.Graph().as_default():
      var2 = resource_variable_ops.ResourceVariable(
          [1.0, 2.0], dtype=dtypes.float32, name="var2")
      var3 = resource_variable_ops.ResourceVariable(
          [3.0, 4.0], dtype=dtypes.float32, name="var3")
      if context.executing_eagerly():
        loss = lambda: math_ops.reduce_sum(var2 + var3)
      else:
        loss = math_ops.reduce_sum(var2 + var3)
      optimizer.minimize(loss)
      optimizer_variables = optimizer.variables()
      self.assertStartsWith(optimizer_variables[0].name, "var2")
      self.assertStartsWith(optimizer_variables[1].name, "var3")
      self.assertEquals(2, len(optimizer_variables))
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:31,代码来源:momentum_test.py


示例17: testAddWeight

  def testAddWeight(self):
    layer = base_layers.Layer(name='my_layer')

    # Test basic variable creation.
    variable = layer.add_variable(
        'my_var', [2, 2], initializer=init_ops.zeros_initializer())
    self.assertEqual(variable.name, 'my_layer/my_var:0')
    self.assertEqual(layer.variables, [variable])
    self.assertEqual(layer.trainable_variables, [variable])
    self.assertEqual(layer.non_trainable_variables, [])
    if not context.executing_eagerly():
      self.assertEqual(
          layer.variables,
          ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))

    # Test non-trainable variable creation.
    # layer.add_variable should work even outside `build` and `call`.
    variable_2 = layer.add_variable(
        'non_trainable_var', [2, 2],
        initializer=init_ops.zeros_initializer(),
        trainable=False)
    self.assertEqual(layer.variables, [variable, variable_2])
    self.assertEqual(layer.trainable_variables, [variable])
    self.assertEqual(layer.non_trainable_variables, [variable_2])
    if not context.executing_eagerly():
      self.assertEqual(
          len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)

      # regularizers only supported in GRAPH mode.
      regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
      variable = layer.add_variable(
          'reg_var', [2, 2],
          initializer=init_ops.zeros_initializer(),
          regularizer=regularizer)
      self.assertEqual(len(layer.losses), 1)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:35,代码来源:base_test.py


示例18: compress

  def compress(self, inputs):
    """Compress inputs and store their binary representations into strings.

    Args:
      inputs: `Tensor` with values to be compressed.

    Returns:
      String `Tensor` vector containing the compressed representation of each
      batch element of `inputs`.
    """
    with ops.name_scope(self._name_scope()):
      inputs = ops.convert_to_tensor(inputs)
      if not self.built:
        # Check input assumptions set before layer building, e.g. input rank.
        self._assert_input_compatibility(inputs)
        if self.dtype is None:
          self._dtype = inputs.dtype.base_dtype.name
        self.build(inputs.shape)

      # Check input assumptions set after layer building, e.g. input shape.
      if not context.executing_eagerly():
        self._assert_input_compatibility(inputs)

      ndim = self.input_spec.ndim
      channel_axis = self._channel_axis(ndim)
      # Tuple of slices for expanding dimensions of tensors below.
      slices = ndim * [None] + [slice(None)]
      slices[channel_axis] = slice(None)
      slices = tuple(slices)

      # Expand dimensions of CDF to input dimensions, keeping the channels along
      # the right dimension.
      cdf = self._quantized_cdf[slices[1:]]
      num_levels = array_ops.shape(cdf)[-1] - 1

      # Bring inputs to the right range by centering the range on the medians.
      half = constant_op.constant(.5, dtype=self.dtype)
      medians = array_ops.squeeze(self._medians, [1, 2])
      offsets = (math_ops.cast(num_levels // 2, self.dtype) + half) - medians
      # Expand offsets to input dimensions and add to inputs.
      values = inputs + offsets[slices[:-1]]

      # Clip to range and cast to integers. Because we have added .5 above, and
      # all values are positive, the cast effectively implements rounding.
      values = math_ops.maximum(values, half)
      values = math_ops.minimum(
          values, math_ops.cast(num_levels, self.dtype) - half)
      values = math_ops.cast(values, dtypes.int16)

      def loop_body(tensor):
        return coder_ops.range_encode(
            tensor, cdf, precision=self.range_coder_precision)
      strings = functional_ops.map_fn(
          loop_body, values, dtype=dtypes.string, back_prop=False)

      if not context.executing_eagerly():
        strings.set_shape(inputs.shape[:1])

      return strings
开发者ID:ChristinaEricka,项目名称:tensorflow,代码行数:59,代码来源:entropybottleneck.py


示例19: doTestBasic

  def doTestBasic(self, use_resource=False):
    for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
      with self.test_session(graph=ops.Graph()):
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        if use_resource:
          var0 = resource_variable_ops.ResourceVariable(
              var0_np, name="var0_%d" % i)
          var1 = resource_variable_ops.ResourceVariable(
              var1_np, name="var1_%d" % i)
        else:
          var0 = variables.Variable(var0_np)
          var1 = variables.Variable(var1_np)
        grads0 = constant_op.constant(grads0_np)
        grads1 = constant_op.constant(grads1_np)

        opt = adamax.AdaMaxOptimizer()
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        opt_variables = opt.variables()
        beta1_power = opt._get_beta_accumulators()
        self.assertTrue(beta1_power is not None)
        self.assertIn(beta1_power, opt_variables)

        if not context.executing_eagerly():
          with ops.Graph().as_default():
            # Shouldn't return non-slot variables from other graphs.
            self.assertEqual(0, len(opt.variables()))

          self.evaluate(variables.global_variables_initializer())
          # Fetch params to validate initial values
          self.assertAllClose([1.0, 2.0], self.evaluate(var0))
          self.assertAllClose([3.0, 4.0], self.evaluate(var1))

        beta1_power = opt._get_beta_accumulators()

        # Run 3 steps of AdaMax
        for t in range(1, 4):
          if not context.executing_eagerly():
            self.evaluate(update)
          elif t > 1:
            opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

          self.assertAllCloseAccordingToType(0.9**(t + 1),
                                             self.evaluate(beta1_power))

          var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
          self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
          if use_resource:
            self.assertEqual("var0_%d/AdaMax:0" % (i,),
                             opt.get_slot(var=var0, name="m").name)
开发者ID:jinxin0924,项目名称:tensorflow,代码行数:59,代码来源:adamax_test.py


示例20: tearDown

  def tearDown(self):
    # test for disable eager test
    ops.disable_eager_execution()
    self.assertFalse(context.executing_eagerly())

    # Calling disable eager execution a second time should not cause an error.
    ops.disable_eager_execution()
    self.assertFalse(context.executing_eagerly())
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:8,代码来源:ops_enable_eager_test.py



注:本文中的tensorflow.python.eager.context.executing_eagerly函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python context.execution_mode函数代码示例发布时间:2022-05-27
下一篇:
Python context.eager_mode函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap