• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nn_ops.relu6函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.nn_ops.relu6函数的典型用法代码示例。如果您正苦于以下问题:Python relu6函数的具体用法?Python relu6怎么用?Python relu6使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了relu6函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _TestInsertQuantOpForAddAfterConv2d

  def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
      conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Scan through all FakeQuant operations, ensuring that the activation
    # isn't in the consumers of the operation. Since activations are folded
    # the preceding operation during inference, the FakeQuant operation after
    # the activation is all that is needed.
    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:34,代码来源:quantize_test.py


示例2: _testRelu6

 def _testRelu6(self, np_features, use_gpu=False):
   np_relu6 = self._npRelu6(np_features)
   with self.test_session(use_gpu=use_gpu):
     relu6 = nn_ops.relu6(np_features)
     tf_relu6 = relu6.eval()
   self.assertAllClose(np_relu6, tf_relu6)
   self.assertShapeEqual(np_relu6, relu6)
开发者ID:awisbith,项目名称:tensorflow,代码行数:7,代码来源:relu_op_test.py


示例3: _TestDeviceName

  def _TestDeviceName(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      conv = layers.conv2d(
          inputs,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test')
      _ = nn_ops.relu6(conv)

    device_name = '/job:oink/task:0/device:CPU:0'
    if is_training:
      q_graph = quantize_graph.create_training_graph(
          graph, device_name_or_function=device_name)
    else:
      q_graph = quantize_graph.create_eval_graph(
          graph, device_name_or_function=device_name)

    orig_variable_names = set(
        [v.name for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
    q_variables = q_graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
    # Ensure that variables were added.
    self.assertTrue(len(orig_variable_names) < len(q_variables))
    # All added variables should have the specified device name.
    for var in q_variables:
      if var.name not in orig_variable_names:
        self.assertEqual(var.device, device_name)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:32,代码来源:quantize_graph_test.py


示例4: _testMultiplePartitionedVariables

  def _testMultiplePartitionedVariables(self, is_training):
    # When weights are partitioned into multiple partitions the weights variable
    # is followed by a identity -> concat -> identity to group the partitions.
    partitioner = partitioned_variables.fixed_size_partitioner(2)
    graph = ops.Graph()
    with graph.as_default():
      with variable_scope.variable_scope('part', partitioner=partitioner):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros((batch_size, height, width, depth))
        input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
        conv = conv2d(
            input1,
            32, [5, 5],
            stride=2,
            padding='SAME',
            weights_initializer=self._WeightInit(0.09),
            activation_fn=None,
            scope='test/test')
        node = math_ops.add(conv, input2, name='test/add')
        node = nn_ops.relu6(node, name='test/relu6')

      quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
      # Check that the weight's quant node was added.
      op_names = [op.name for op in graph.get_operations()]
      self.assertTrue(
          'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in op_names)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:26,代码来源:quantize_test.py


示例5: _LayerWithActivationProcessing

  def _LayerWithActivationProcessing(self,
                                     input_tensor=None,
                                     scope='test',
                                     post_activation_bypass=False):

    batch_size, height, width, depth = 5, 128, 128, 3
    if input_tensor is None:
      input_tensor = array_ops.zeros((batch_size, height, width, depth))
    weight_init = init_ops.truncated_normal_initializer
    with ops.name_scope(scope):
      output = layers.conv2d(
          input_tensor,
          depth, [5, 5],
          padding='SAME',
          weights_initializer=weight_init(0.09),
          activation_fn=None,
          normalizer_fn=None,
          biases_initializer=None)

      output = layers.batch_norm(
          output, center=True, scale=True, decay=1.0 - 0.003, fused=True)

      output = nn_ops.relu6(output)
      scaled_output1 = math_ops.mul(2.0, output)
      scaled_output2 = math_ops.mul(3.0, output)
      output = scaled_output1 + scaled_output2
    return output
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:quantize_graph_test.py


示例6: testOneConsumerOperation

  def testOneConsumerOperation(self):
    graph = ops.Graph()
    with graph.as_default():
      input_tensor = array_ops.zeros((1, 2, 3, 4))
      output_tensor = nn_ops.relu6(input_tensor)

    input_to_ops_map = input_to_ops.InputToOps(graph)
    consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)

    self.assertEqual(consumer_operations, {output_tensor.op})
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:10,代码来源:input_to_ops_test.py


示例7: _TestOverlappingPostActivationBypassQuantized

  def _TestOverlappingPostActivationBypassQuantized(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      conv_input = array_ops.zeros((batch_size, height, width, depth))
      conv1 = conv2d(
          conv_input,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=nn_ops.relu6,
          scope='test/test1')

      # The bypass of this conv is the post activation bypass of the previous
      # conv.
      conv2 = conv2d(
          conv_input,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test/test2')

      bypass_tensor = math_ops.add(conv1, conv2, name='test/add')
      _ = nn_ops.relu6(bypass_tensor, name='test/output')

      quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

      # Ensure that the bypass node is preceded by a FakeQuantWithMinMaxVar
      # operation, and NOT followed by one.
      self.assertTrue('FakeQuantWithMinMaxVars' not in
                      [c.type for c in bypass_tensor.consumers()])
      self.assertTrue('FakeQuantWithMinMaxVars' in
                      [i.op.type for i in bypass_tensor.op.inputs])

      # Ensure that all the convs and activations are quantized.
      op_names = [op.name for op in graph.get_operations()]
      self.assertTrue(
          'test/test1/weights_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertTrue(
          'test/test2/weights_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertTrue(
          'test/test1/act_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertTrue('test/act_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertEqual(
          'Relu6',
          graph.get_operation_by_name(
              'test/test1/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
      self.assertEqual(
          'Relu6',
          graph.get_operation_by_name(
              'test/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:54,代码来源:quantize_test.py


示例8: testRelu6GradGrad

 def testRelu6GradGrad(self):
   inputs = constant_op.constant([[-2, -1, 1, 3], [5, 7, 8, 9]],
                                 dtype=dtypes.float32)
   x_init_value = np.array([[-3.5, -1.5, 2, 4], [4.5, 7.5, 8.5, 11]])
   r = nn_ops.relu6(inputs)
   r_g = gradients_impl.gradients(r, inputs)[0]
   with self.test_session():
     error = gradient_checker.compute_gradient_error(
       inputs, inputs.get_shape().as_list(),
       r_g, r_g.get_shape().as_list(),
       x_init_value=x_init_value)
     self.assertLess(error, 1e-4)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:12,代码来源:nn_grad_test.py


示例9: test_summarize_activation_relu6

  def test_summarize_activation_relu6(self):
    with self.cached_session():
      var = variables.Variable(1)
      op = nn_ops.relu6(var, name='SummaryTest')
      summary_op = summaries_lib.summarize_activation(op)

      self.assertEquals(summary_op.op.type, 'HistogramSummary')
      names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
      self.assertEquals(len(names), 3)
      self.assertIn(u'SummaryTest/zeros', names)
      self.assertIn(u'SummaryTest/sixes', names)
      self.assertIn(u'SummaryTest/activation', names)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:12,代码来源:summaries_test.py


示例10: _TestInsertQuantOpInSeparableConv2d

  def _TestInsertQuantOpInSeparableConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, depth))
      conv = separable_conv2d(
          input1,
          3, [5, 5],
          stride=2,
          depth_multiplier=1.0,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
    # Check if output of bias add is quantized
    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Check if weights for both convs inside seperable conv are quantized
    pointwise_weight_quant = graph.get_operation_by_name(
        'test/test/weights_quant/' + quantization_node_name)
    self.assertEqual(pointwise_weight_quant.type, quantization_node_name)
    depthwise_weight_quant = graph.get_operation_by_name(
        'test/test/separable_conv2d/weights_quant/' + quantization_node_name)
    self.assertEqual(depthwise_weight_quant.type, quantization_node_name)

    # Check if activations after first depthwise conv are quantized.
    depthwise_act_quant = graph.get_operation_by_name(
        'test/test/separable_conv2d/act_quant/' + quantization_node_name)
    self.assertEqual(depthwise_act_quant.type, quantization_node_name)

    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        # Scan through all FakeQuant operations, ensuring that the activation
        # identity op isn't in the consumers of the operation.
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:51,代码来源:quantize_test.py


示例11: _ConvLayer

 def _ConvLayer(self):
   """Add a basic convolution layer to the default graph."""
   batch_size, height, width, depth = 5, 128, 128, 3
   inputs = array_ops.zeros((batch_size, height, width, depth))
   weight_init = init_ops.truncated_normal_initializer
   conv = layers.conv2d(
       inputs,
       32, [5, 5],
       stride=2,
       padding='SAME',
       weights_initializer=weight_init(0.09),
       activation_fn=None,
       scope='test')
   _ = nn_ops.relu6(conv)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:14,代码来源:quantize_graph_test.py


示例12: testSeveralConsumerOperations

  def testSeveralConsumerOperations(self):
    graph = ops.Graph()
    with graph.as_default():
      input_tensor = array_ops.zeros((1, 2, 3, 4))
      output_tensor_1 = nn_ops.relu6(input_tensor)
      output_tensor_2 = input_tensor + output_tensor_1
      output_tensor_3 = input_tensor * output_tensor_2

    input_to_ops_map = input_to_ops.InputToOps(graph)
    consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)

    self.assertEqual(consumer_operations,
                     {output_tensor_1.op, output_tensor_2.op,
                      output_tensor_3.op})
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:14,代码来源:input_to_ops_test.py


示例13: testGradientFloat32

 def testGradientFloat32(self):
   with self.test_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.relu6(x, name="relu6")
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   print("relu6 (float32) gradient err = ", err)
   self.assertLess(err, 1e-4)
开发者ID:awisbith,项目名称:tensorflow,代码行数:15,代码来源:relu_op_test.py


示例14: testInsertQuantOpFailsWhenOpsNotConnected

  def testInsertQuantOpFailsWhenOpsNotConnected(self):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      conv = conv2d(inputs, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test')
      relu = nn_ops.relu6(inputs)

    # Inserting a quantization op between two unconnected ops should fail with
    # ValueError.
    with self.assertRaises(ValueError) as err:
      quantize._InsertQuantOp('test', conv.op, [relu.op], 'FailingQuantOp')
    self.assertEqual(
        str(err.exception), 'Some inputs not quantized for ops: [Relu6]')
开发者ID:japrogramer,项目名称:tensorflow,代码行数:16,代码来源:quantize_test.py


示例15: _TestActivationRewriteWithScope

  def _TestActivationRewriteWithScope(self, rewrite_fn):
    graph = ops.Graph()
    with graph.as_default():
      output = self._LayerWithIdentity(scope='scope1')
      with ops.name_scope('scope2'):
        output = nn_ops.relu6(output)
        scaled_output1 = math_ops.mul(2.0, output)
        scaled_output2 = math_ops.mul(3.0, output)
        output = scaled_output1 + scaled_output2
      rewrite_fn(graph)

      op_names = [op.name for op in graph.get_operations()]
      # The weights and activation of scope1 is quantized, but not scope2.
      self.assertTrue(any('scope1/Conv/act_quant' in name for name in op_names))
      self.assertTrue(
          any('scope1/Conv/weights_quant' in name for name in op_names))

      for op_name in op_names:
        if op_name.startswith('scope2'):
          self.assertTrue('FakeQuant' not in op_name)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:20,代码来源:quantize_graph_test.py


示例16: _ConvLayer

 def _ConvLayer(
     self, input_tensor=None, scope='test', pre_activation_bypass=False,
     post_activation_bypass=False):
   """Add a basic convolution layer to the default graph."""
   batch_size, height, width, depth = 5, 128, 128, 3
   if input_tensor is None:
     input_tensor = array_ops.zeros((batch_size, height, width, depth))
   weight_init = init_ops.truncated_normal_initializer
   with ops.name_scope(scope):
     output = layers.conv2d(
         input_tensor,
         depth, [5, 5],
         padding='SAME',
         weights_initializer=weight_init(0.09),
         activation_fn=None)
     if pre_activation_bypass:
       output += input_tensor
     output = nn_ops.relu6(output)
     if post_activation_bypass:
       output += input_tensor
   return output
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:21,代码来源:quantize_graph_test.py


示例17: _TestDefaultGraph

  def _TestDefaultGraph(self, fn):
    with ops.Graph().as_default() as g:
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      conv = layers.conv2d(
          inputs,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test')
      _ = nn_ops.relu6(conv)

      orig_variable_names = set(
          [v.name for v in g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])

      fn()

      q_variables = g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
      # Ensure that variables were added.
      self.assertTrue(len(orig_variable_names) < len(q_variables))
开发者ID:japrogramer,项目名称:tensorflow,代码行数:22,代码来源:quantize_graph_test.py


示例18: _testRelu6

 def _testRelu6(self, np_features):
   np_relu6 = self._npRelu6(np_features)
   tf_relu6 = nn_ops.relu6(np_features)
   self.assertAllClose(np_relu6, tf_relu6)
   self.assertShapeEqual(np_relu6, tf_relu6)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:5,代码来源:relu_op_test.py



注:本文中的tensorflow.python.ops.nn_ops.relu6函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nn_ops.softmax函数代码示例发布时间:2022-05-27
下一篇:
Python nn_ops.relu函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap