• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nn.fused_batch_norm函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.nn.fused_batch_norm函数的典型用法代码示例。如果您正苦于以下问题:Python fused_batch_norm函数的具体用法?Python fused_batch_norm怎么用?Python fused_batch_norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了fused_batch_norm函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testSplitWithNonConstAxis

  def testSplitWithNonConstAxis(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      dim = array_ops.placeholder(dtype='int32')
      split = array_ops.split(conv, 2, axis=dim)
      scale = constant_op.constant(0.1, shape=[32])
      offset = constant_op.constant(0.3, shape=[32])
      bn0 = nn.fused_batch_norm(split[0], scale, offset)
      bn1 = nn.fused_batch_norm(split[1], scale, offset)
      add = bn0[0] + bn1[0]
      output = array_ops.identity(add)

      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={dim: 3})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if _is_transpose(node.name):
          num_transposes += 1
        nodes.append(node.name)

      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
      self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
      self._assert_map_nhwc_to_nchw('split-0', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:34,代码来源:layout_optimizer_test.py


示例2: _fused_batch_norm_training

 def _fused_batch_norm_training():
   return nn.fused_batch_norm(
       inputs,
       gamma,
       beta,
       epsilon=self.epsilon,
       data_format=self._data_format)
开发者ID:piyushjaiswal98,项目名称:tensorflow,代码行数:7,代码来源:normalization.py


示例3: loop_fn

 def loop_fn(i):
   with g:
     x1 = array_ops.gather(x, i)
     outputs = nn.fused_batch_norm(
         x1,
         scale,
         offset,
         mean=mean,
         variance=variance,
         epsilon=0.01,
         data_format=data_format,
         is_training=is_training)
     outputs = list(outputs)
     # We only test the first value of outputs when is_training is False.
     # It looks like CPU and GPU have different outputs for batch_mean
     # and batch_variance for this case.
     if not is_training:
       outputs[1] = constant_op.constant(0.)
       outputs[2] = constant_op.constant(0.)
     loss = nn.l2_loss(outputs[0])
   if is_training:
     gradients = g.gradient(loss, [x1, scale, offset])
   else:
     gradients = [constant_op.constant(0.)] * 3
   return outputs + gradients
开发者ID:aritratony,项目名称:tensorflow,代码行数:25,代码来源:control_flow_ops_test.py


示例4: testInference

  def testInference(self):
    channel = 3
    x_shape = [2, 2, 6, channel]
    scale_shape = [channel]
    x_val = np.random.random_sample(x_shape).astype(np.float32)
    scale_val = np.random.random_sample(scale_shape).astype(np.float32)

    offset_val = np.random.random_sample(scale_shape).astype(np.float32)
    data_format = "NHWC"
    with self.test_session() as sess, self.test_scope():
      # To avoid constant folding
      t_val = array_ops.placeholder(np.float32, shape=x_shape, name="x")
      scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
      offset = array_ops.placeholder(np.float32, shape=scale_shape, name="offset")
      epsilon = 0.001
      y_ref, mean_ref, var_ref = self._reference_training(
          x_val, scale_val, offset_val, epsilon, data_format)
      y, mean, variance = nn.fused_batch_norm(
          t_val,
          scale,
          offset,
          mean=mean_ref,
          variance=var_ref,
          epsilon=epsilon,
          data_format=data_format,
          is_training=False)

      y_val, _, _ = sess.run(
          [y, mean,
           variance], {t_val: x_val,
                       scale: scale_val,
                       offset: offset_val})
      self.assertAllClose(y_val, y_ref, atol=1e-3)
开发者ID:SylChan,项目名称:tensorflow,代码行数:33,代码来源:fused_batchnorm_test.py


示例5: testBasic

  def testBasic(self):
    x_shape = [2, 2, 6, 2]
    scale_shape = [2]
    x_val = np.random.random_sample(x_shape).astype(np.float32)
    scale_val = np.random.random_sample(scale_shape).astype(np.float32)

    offset_val = np.random.random_sample(scale_shape).astype(np.float32)
    mean_val = np.random.random_sample(scale_shape).astype(np.float32)
    var_val = np.random.random_sample(scale_shape).astype(np.float32)
    data_format = "NHWC"
    with self.test_session() as sess, self.test_scope():
      # To avoid constant folding
      t_val = array_ops.placeholder(np.float32, shape=x_shape, name="x")
      scale = array_ops.placeholder(np.float32, shape=[2], name="scale")
      offset = array_ops.placeholder(np.float32, shape=[2], name="offset")
      epsilon = 0.001
      y, mean, var = nn.fused_batch_norm(
          t_val,
          scale,
          offset,
          mean=None,
          variance=None,
          epsilon=epsilon,
          data_format=data_format,
          is_training=True)
      y_val, mean_val, var_val = sess.run(
          [y, mean, var], {t_val: x_val,
                           scale: scale_val,
                           offset: offset_val})
      y_ref, mean_ref, var_ref = self._reference_training(
          x_val, scale_val, offset_val, epsilon, data_format)
      self.assertAllClose(mean_val, mean_ref, atol=1e-3)
      self.assertAllClose(y_val, y_ref, atol=1e-3)
      self.assertAllClose(var_val, var_ref, atol=1e-3)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:34,代码来源:fused_batchnorm_test.py


示例6: _testLearning

  def _testLearning(self, use_gradient_checker, data_format):
    channel = 3
    x_shape = [2, 2, 6, channel]
    scale_shape = [channel]
    x_val = np.random.random_sample(x_shape).astype(np.float32)
    scale_val = np.random.random_sample(scale_shape).astype(np.float32)
    offset_val = np.random.random_sample(scale_shape).astype(np.float32)
    mean_val = np.random.random_sample(scale_shape).astype(np.float32)
    var_val = np.random.random_sample(scale_shape).astype(np.float32)
    epsilon = 0.001
    data_format_src = "NHWC"
    # When in training mode, fused_batchnorm applies an implicit Bessel's
    # correction. So we have to use the corrected variance here, as well.
    y_ref, mean_ref, _, var_ref_corr = self._reference_training(
        x_val, scale_val, offset_val, epsilon, data_format_src)

    with self.cached_session() as sess, self.test_scope():
      # To avoid constant folding
      x_val_converted = test_utils.ConvertBetweenDataFormats(
          x_val, data_format_src, data_format)
      y_ref_converted = test_utils.ConvertBetweenDataFormats(
          y_ref, data_format_src, data_format)

      t_val = array_ops.placeholder(
          np.float32, shape=x_val_converted.shape, name="x")
      scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
      offset = array_ops.placeholder(
          np.float32, shape=scale_shape, name="offset")
      y, mean, var = nn.fused_batch_norm(
          t_val,
          scale,
          offset,
          mean=None,
          variance=None,
          epsilon=epsilon,
          data_format=data_format,
          is_training=True)
      # Check gradient.
      if use_gradient_checker:
        err = gradient_checker.compute_gradient_error(
            t_val,
            x_val_converted.shape,
            y,
            x_val_converted.shape,
            extra_feed_dict={
                t_val: x_val_converted,
                scale: scale_val,
                offset: offset_val
            })
        self.assertLess(err, 1e-3)

      y_val, mean_val, var_val = sess.run([y, mean, var], {
          t_val: x_val_converted,
          scale: scale_val,
          offset: offset_val
      })
      self.assertAllClose(mean_val, mean_ref, atol=1e-3)
      self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
      self.assertAllClose(var_val, var_ref_corr, atol=1e-3)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:59,代码来源:fused_batchnorm_test.py


示例7: _model_with_second_port

def _model_with_second_port():
  random_seed.set_random_seed(0)
  x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
  scale = constant_op.constant(0.1, shape=[4])
  offset = constant_op.constant(0.3, shape=[4])
  y, mean, _ = nn.fused_batch_norm(x, scale, offset)
  mul = math_ops.add(y, mean)
  output = array_ops.identity(mul)
  return output
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:9,代码来源:layout_optimizer_test.py


示例8: _fused_batch_norm_inference

 def _fused_batch_norm_inference():
   return nn.fused_batch_norm(
       inputs,
       gamma,
       beta,
       mean=self.moving_mean,
       variance=self.moving_variance,
       epsilon=self.epsilon,
       is_training=False,
       data_format=self._data_format)
开发者ID:piyushjaiswal98,项目名称:tensorflow,代码行数:10,代码来源:normalization.py


示例9: _testLearning

  def _testLearning(self, use_gradient_checker):
    channel = 3
    x_shape = [2, 2, 6, channel]
    scale_shape = [channel]
    x_val = np.random.random_sample(x_shape).astype(np.float32)
    scale_val = np.random.random_sample(scale_shape).astype(np.float32)

    offset_val = np.random.random_sample(scale_shape).astype(np.float32)
    mean_val = np.random.random_sample(scale_shape).astype(np.float32)
    var_val = np.random.random_sample(scale_shape).astype(np.float32)
    data_format = "NHWC"
    with self.test_session() as sess, self.test_scope():
      # To avoid constant folding
      t_val = array_ops.placeholder(np.float32, shape=x_shape, name="x")
      scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
      offset = array_ops.placeholder(
          np.float32, shape=scale_shape, name="offset")
      epsilon = 0.001
      y, mean, var = nn.fused_batch_norm(
          t_val,
          scale,
          offset,
          mean=None,
          variance=None,
          epsilon=epsilon,
          data_format=data_format,
          is_training=True)
      # Check gradient.
      if use_gradient_checker:
        err = gradient_checker.compute_gradient_error(
            t_val,
            x_shape,
            y,
            x_shape,
            extra_feed_dict={
                t_val: x_val,
                scale: scale_val,
                offset: offset_val
            })
        self.assertLess(err, 1e-3)

      y_val, mean_val, var_val = sess.run(
          [y, mean, var], {t_val: x_val,
                           scale: scale_val,
                           offset: offset_val})
      y_ref, mean_ref, var_ref = self._reference_training(
          x_val, scale_val, offset_val, epsilon, data_format)
      self.assertAllClose(mean_val, mean_ref, atol=1e-3)
      self.assertAllClose(y_val, y_ref, atol=1e-3)
      self.assertAllClose(var_val, var_ref, atol=1e-3)
开发者ID:abidrahmank,项目名称:tensorflow,代码行数:50,代码来源:fused_batchnorm_test.py


示例10: _testLearning

  def _testLearning(self, use_gradient_checker, data_format):
    channel = 3
    x_shape = [2, 2, 6, channel]
    scale_shape = [channel]
    x_val = np.random.random_sample(x_shape).astype(np.float32)
    scale_val = np.random.random_sample(scale_shape).astype(np.float32)
    offset_val = np.random.random_sample(scale_shape).astype(np.float32)
    mean_val = np.random.random_sample(scale_shape).astype(np.float32)
    var_val = np.random.random_sample(scale_shape).astype(np.float32)
    epsilon = 0.001
    data_format_src = "NHWC"
    y_ref, mean_ref, var_ref = self._reference_training(
        x_val, scale_val, offset_val, epsilon, data_format_src)

    # TODO(b/110530713): Support data format HWCN on GPU
    if self.device == "XLA_GPU" and data_format == "HWCN":
      self.skipTest("GPU does not support data format HWCN.")

    with self.test_session() as sess, self.test_scope():
      # To avoid constant folding
      x_val_converted = test_utils.ConvertBetweenDataFormats(
          x_val, data_format_src, data_format)
      y_ref_converted = test_utils.ConvertBetweenDataFormats(
          y_ref, data_format_src, data_format)

      t_val = array_ops.placeholder(
          np.float32, shape=x_val_converted.shape, name="x")
      scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
      offset = array_ops.placeholder(
          np.float32, shape=scale_shape, name="offset")
      y, mean, var = nn.fused_batch_norm(
          t_val,
          scale,
          offset,
          mean=None,
          variance=None,
          epsilon=epsilon,
          data_format=data_format,
          is_training=True)
      # Check gradient.
      if use_gradient_checker:
        err = gradient_checker.compute_gradient_error(
            t_val,
            x_val_converted.shape,
            y,
            x_val_converted.shape,
            extra_feed_dict={
                t_val: x_val_converted,
                scale: scale_val,
                offset: offset_val
            })
        self.assertLess(err, 1e-3)

      y_val, mean_val, var_val = sess.run([y, mean, var], {
          t_val: x_val_converted,
          scale: scale_val,
          offset: offset_val
      })
      self.assertAllClose(mean_val, mean_ref, atol=1e-3)
      self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
      self.assertAllClose(var_val, var_ref, atol=1e-3)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:61,代码来源:fused_batchnorm_test.py



注:本文中的tensorflow.python.ops.nn.fused_batch_norm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nn.l2_loss函数代码示例发布时间:2022-05-27
下一篇:
Python nn.dropout函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap