• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nn_ops.relu函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.nn_ops.relu函数的典型用法代码示例。如果您正苦于以下问题:Python relu函数的具体用法?Python relu怎么用?Python relu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了relu函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGradientFloat16

  def testGradientFloat16(self):
    with self.test_session(use_gpu=True) as sess:
      # Randomly construct a 1D shape from [1, 40)
      shape = random_ops.random_uniform(
          [1], minval=1, maxval=40, dtype=dtypes.int32)

      # Construct the fp32 graph and its gradient.
      x = random_ops.random_uniform(shape, minval=-1, maxval=1, name="x")
      y1 = nn_ops.relu(x, name="relu_fp32")
      l1 = nn_ops.l2_loss(y1)
      dx_f32 = gradients_impl.gradients(l1, x)

      # Construct the fp16 graph and its gradient.
      # It starts with the same x, in fp32. But before it reaches Relu, it is
      # cast into fp16. So during backprop, the gradient computation is in fp16.
      x2 = math_ops.cast(x, dtype=dtypes.float16, name="cast")
      y2 = nn_ops.relu(x2, name="relu_fp16")
      l2 = nn_ops.l2_loss(y2)
      dx_f16 = gradients_impl.gradients(l2, x)

      # Repeat the experiment for 100 times. All tensor shapes and its tensor
      # values are randomly generated for each run.
      for _ in xrange(100):
        dx_f32_v, dx_f16_v = sess.run([dx_f32, dx_f16])
        self.assertAllClose(dx_f32_v, dx_f16_v, atol=3e-4)
开发者ID:HughKu,项目名称:tensorflow,代码行数:25,代码来源:relu_op_test.py


示例2: doTestExportNestedNames

  def doTestExportNestedNames(self, use_resource=False):
    graph1 = ops.Graph()
    with graph1.as_default():
      with ops.name_scope("hidden1/hidden2/hidden3"):
        images = constant_op.constant(
            1.0, dtypes.float32, shape=[3, 2], name="images")
        if use_resource:
          weights1 = variables.Variable(
              [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
          biases1 = resource_variable_ops.ResourceVariable(
              [0.1] * 3, name="biases")
        else:
          biases1 = variables.Variable([0.1] * 3, name="biases")
          weights1 = variables.Variable(
              [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
        nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")

    orig_meta_graph, var_list = meta_graph.export_scoped_meta_graph(
        export_scope="hidden1/hidden2", graph=graph1)
    var_names = [v.name for _, v in var_list.items()]
    self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
                     sorted(var_list.keys()))
    self.assertEqual([
        "hidden1/hidden2/hidden3/biases:0", "hidden1/hidden2/hidden3/weights:0"
    ], sorted(var_names))
    for node in orig_meta_graph.graph_def.node:
      self.assertTrue(node.name.startswith("hidden3"))

    graph2 = ops.Graph()
    new_var_list = meta_graph.import_scoped_meta_graph(
        orig_meta_graph, import_scope="new_hidden1/new_hidden2", graph=graph2)
    self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
                     sorted(new_var_list.keys()))
    new_var_names = [v.name for _, v in new_var_list.items()]
    self.assertEqual([
        "new_hidden1/new_hidden2/hidden3/biases:0",
        "new_hidden1/new_hidden2/hidden3/weights:0"
    ], sorted(new_var_names))

    nodes = [
        "new_hidden1/new_hidden2/hidden3/biases/Assign",
        "new_hidden1/new_hidden2/hidden3/weights/Assign"
    ]
    expected = [
        b"loc:@new_hidden1/new_hidden2/hidden3/biases",
        b"loc:@new_hidden1/new_hidden2/hidden3/weights"
    ]
    for n, e in zip(nodes, expected):
      self.assertEqual([e], graph2.get_operation_by_name(n).get_attr("_class"))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:49,代码来源:meta_graph_test.py


示例3: testSmallNetwork

  def testSmallNetwork(self):
    image = array_ops.placeholder(dtypes.float32, shape=[1, 28, 28, 1])
    label = array_ops.placeholder(dtypes.float32, shape=[1, 10])
    w = variables.Variable(
        random_ops.truncated_normal([5, 5, 1, 32], stddev=0.1))
    b = variables.Variable(random_ops.truncated_normal([32], stddev=0.1))
    conv = nn_ops.conv2d(image, w, strides=[1, 1, 1, 1], padding="SAME")
    h_conv = nn_ops.relu(conv + b)
    h_conv_flat = array_ops.reshape(h_conv, [1, -1])

    w_fc = variables.Variable(
        random_ops.truncated_normal([25088, 10], stddev=0.1))
    b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1))
    y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc)

    cross_entropy = math_ops.reduce_mean(-math_ops.reduce_sum(
        label * math_ops.log(y_conv), reduction_indices=[1]))
    _ = adam.AdamOptimizer(1e-4).minimize(cross_entropy)

    mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
    report = cost_analyzer.GenerateCostReport(mg)

    self.assertTrue(b"MatMul" in report)
    self.assertTrue(b"ApplyAdam" in report)
    self.assertTrue(b"Conv2D" in report)
    self.assertTrue(b"Conv2DBackpropInput" in report)
    self.assertTrue(b"Conv2DBackpropFilter" in report)
    self.assertTrue(b"Softmax" in report)

    # Also print the report to make it easier to debug
    print("{}".format(report))
开发者ID:ajaybhat,项目名称:tensorflow,代码行数:31,代码来源:cost_analyzer_test.py


示例4: testBatchNormScope

  def testBatchNormScope(self):
    batch_size, height, width, depth = 5, 128, 128, 3
    g = ops.Graph()
    with g.as_default():
      inputs = array_ops.zeros((batch_size, height, width, depth))
      stride = 1
      out_depth = 32
      scope = ''
      node = conv2d(
          inputs,
          out_depth, [2, 2],
          stride=stride,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          normalizer_fn=batch_norm,
          normalizer_params=self._BatchNormParams(False),
          scope=scope)

      node = nn_ops.relu(node, name='Relu6')
    bn_list = common.BatchNormGroups(g)
    with open('/tmp/common_test.pbtxt', 'w') as f:
      f.write(str(g.as_graph_def()))

  # Exactly one batch norm layer with empty scope should be found
    self.assertEqual(len(bn_list), 1)
    self.assertEqual(bn_list[0], '')
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:common_test.py


示例5: test

  def test(self):
    np.random.seed(1)  # Make it reproducible.
    x = np.random.randn(3, 4).astype(np.float32)
    y = np.maximum(x, 0.0)

    z = self.evaluate(nn_ops.relu(constant_op.constant(x)))
    self.assertAllEqual(y, z)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:7,代码来源:nn_test.py


示例6: hinge_loss

def hinge_loss(logits, labels=None, scope=None, target=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.
    target: Deprecated alias for `labels`.

  Returns:
    A `Tensor` of same shape as logits and target representing the loss values
      across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  labels = _labels(labels, target)
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.sub(2 * labels, all_ones)
    return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
开发者ID:RapidApplicationDevelopment,项目名称:tensorflow,代码行数:25,代码来源:loss_ops.py


示例7: hinge_loss

def hinge_loss(labels, logits, weights=1.0, scope=None,
               loss_collection=ops.GraphKeys.LOSSES):
  """Adds a hinge loss to the training procedure.

  Args:
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    logits: The logits, a float tensor.
    weights: Coefficients for the loss a scalar, a tensor of shape
      [batch_size] or a tensor whose shape matches `predictions`.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.

  Returns:
    A scalar `Tensor` of the loss value.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.sub(2 * labels, all_ones)
    losses = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
    return compute_weighted_loss(losses, weights, scope, loss_collection)
开发者ID:BloodD,项目名称:tensorflow,代码行数:27,代码来源:losses.py


示例8: testNaNs

 def testNaNs(self):
   # Test that relu(nan) = nan for various sizes.
   for i in range(18):
     x = np.zeros(i) + np.nan
     with self.test_session():
       z = nn_ops.relu(constant_op.constant(x)).eval()
       self.assertTrue(np.isnan(z).all())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:nn_test.py


示例9: _testRelu

 def _testRelu(self, np_features, use_gpu=False):
   np_relu = self._npRelu(np_features)
   with self.test_session(use_gpu=use_gpu):
     relu = nn_ops.relu(np_features)
     tf_relu = relu.eval()
   self.assertAllClose(np_relu, tf_relu)
   self.assertShapeEqual(np_relu, relu)
开发者ID:awisbith,项目名称:tensorflow,代码行数:7,代码来源:relu_op_test.py


示例10: SimulateFusedConv2dBiasActivationInt8

def SimulateFusedConv2dBiasActivationInt8(conv_input_scale, conv_input, kernel,
                                          padding, strides, side_input_scale,
                                          side_input, biases):
  """Simulates the int8 fused 2-D convolution op using separate float ops.

    The arguments and return values have the same format, meanings and
    restrictions as the actual op.
  Args:
    conv_input_scale: A scalar 'float'.
    conv_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
    kernel: A `Tensor` of type `qint8` in OIHW_VECT_I layout.
    padding: A `string` from: `"SAME", "VALID"`.
    strides: A list of `ints`.
    side_input_scale: A scalar 'float'.
    side_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
    biases: A `Tensor` of type `float32` in NCHW layout.
  Returns:
    A `Tensor` of type `qint8` in NCHW_VECT_C layout.
  """
  conv_result = nn_ops.conv2d(
      NchwVectCToNchw(gen_array_ops.dequantize(conv_input, -128, 127)),
      OihwVectIToHwio(gen_array_ops.dequantize(kernel, -128, 127)),
      strides=strides,
      padding=padding,
      data_format="NCHW") * conv_input_scale

  conv_and_side_inputs = conv_result + side_input_scale * NchwVectCToNchw(
      gen_array_ops.dequantize(side_input, -128, 127))

  logit = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NCHW")

  result, _, _ = gen_array_ops.quantize_v2(
      NchwToNchwVectC(nn_ops.relu(logit)), -128, 127, dtypes.qint8)
  return result
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:34,代码来源:fused_conv2d_bias_activation_op_test.py


示例11: hinge_loss

def hinge_loss(labels, logits, weights=1.0, scope=None,
               loss_collection=ops.GraphKeys.LOSSES):
  """Adds a hinge loss to the training procedure.

  WARNING: `weights` also supports dimensions of 1, but the broadcasting does
  not work as advertised, you'll wind up with weighted sum instead of weighted
  mean for any but the last dimension. This will be cleaned up soon, so please
  do not rely on the current behavior for anything but the shapes documented for
  `weights` below.

  Args:
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    logits: The logits, a float tensor.
    weights: Coefficients for the loss a scalar, a tensor of shape
      `[batch_size]` or a tensor whose shape matches `predictions`.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.

  Returns:
    A scalar `Tensor` of the loss value.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope:
    logits = math_ops.to_float(logits)
    labels = math_ops.to_float(labels)
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    losses = nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
    return compute_weighted_loss(losses, weights, scope, loss_collection)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:35,代码来源:losses_impl.py


示例12: hinge_loss

def hinge_loss(labels, logits, weights=1.0, scope=None,
               loss_collection=ops.GraphKeys.LOSSES,
               reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
  """Adds a hinge loss to the training procedure.

  Args:
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    logits: The logits, a float tensor.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.
    reduction: Type of reduction to apply to loss.

  Returns:
    Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
    shape as `labels`; otherwise, it is scalar.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
    logits = math_ops.to_float(logits)
    labels = math_ops.to_float(labels)
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    losses = nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
    return compute_weighted_loss(
        losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:piyushjaiswal98,项目名称:tensorflow,代码行数:34,代码来源:losses_impl.py


示例13: testGradient

  def testGradient(self):
    with ops.Graph().as_default() as g:
      inputs = array_ops.placeholder(
          dtypes.float32, shape=[None, 100], name="input")
      weights = array_ops.placeholder(
          dtypes.float32, shape=[100, 10], name="weights")
      biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
      activations = nn_ops.relu(
          math_ops.matmul(inputs, weights) + biases, name="activations")
      loss = math_ops.reduce_mean(activations, name="loss")
    gdef = g.as_graph_def()

    with ops.Graph().as_default() as g:
      input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
      weights_var = variables.Variable(
          random_ops.truncated_normal([100, 10]), name="weights")
      biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
      activations, loss = importer.import_graph_def(
          gdef,
          input_map={
              "input:0": input_placeholder,
              "weights:0": weights_var,
              "biases:0": biases_var
          },
          return_elements=["activations:0", "loss:0"])
      self.assertEqual([32, 10], activations.get_shape())
      self.assertEqual([], loss.get_shape())
      weights_grad, biases_grad = gradients_impl.gradients(
          loss, [weights_var, biases_var])
      self.assertEqual([100, 10], weights_grad.get_shape())
      self.assertEqual([10], biases_grad.get_shape())
开发者ID:pcm17,项目名称:tensorflow,代码行数:31,代码来源:importer_test.py


示例14: bottleneck_hole

def bottleneck_hole(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=2,
               outputs_collections=None,
               scope=None):
  with variable_scope.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
    depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
    if depth == depth_in:
      shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
    else:
      shortcut = layers.conv2d(
          inputs,
          depth, [1, 1],
          stride=stride,
          activation_fn=None,
          scope='shortcut')

    residual = layers.conv2d(
        inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1')
    residual = layers_lib.conv2d(residual, depth_bottleneck, [3, 3], stride=1, rate=rate, padding='SAME', scope='conv2')
    residual = layers.conv2d(
        residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3')

    output = nn_ops.relu(shortcut + residual)

    return utils.collect_named_outputs(outputs_collections, sc.name, output)
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:28,代码来源:resnet_v1_rfcn_hole_iter4_test.py


示例15: testPotentialCycle

  def testPotentialCycle(self):
    graph1 = ops.Graph()
    with graph1.as_default():
      a = constant_op.constant(1.0, shape=[2, 2])
      b = constant_op.constant(2.0, shape=[2, 2])
      matmul = math_ops.matmul(a, b)
      with ops.name_scope("hidden1"):
        c = nn_ops.relu(matmul)
        d = constant_op.constant(3.0, shape=[2, 2])
        matmul = math_ops.matmul(c, d)

    orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
        export_scope="hidden1", graph=graph1)

    graph2 = ops.Graph()
    with graph2.as_default():
      with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
        meta_graph.import_scoped_meta_graph(
            orig_meta_graph, import_scope="new_hidden1")

      meta_graph.import_scoped_meta_graph(
          orig_meta_graph,
          import_scope="new_hidden1",
          input_map={
              "$unbound_inputs_MatMul": constant_op.constant(
                  4.0, shape=[2, 2])
          })
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:27,代码来源:meta_graph_test.py


示例16: test

 def test(self):
   np.random.seed(1)  # Make it reproducible.
   x = np.random.randn(3, 4).astype(np.float32)
   y = np.maximum(x, 0.0)
   with self.test_session():
     z = nn_ops.relu(constant_op.constant(x)).eval()
     self.assertAllEqual(y, z)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:nn_test.py


示例17: hinge_loss

def hinge_loss(logits, labels=None, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor. Note that logits are assumed to be
      unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
      (resp. negative) binary prediction.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
      the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    An unweighted `Tensor` of same shape as `logits` and `labels` representing
    the
      loss values across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    return nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:28,代码来源:loss_ops.py


示例18: unregularized_loss

  def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

    Args:
      examples: Examples to compute unregularized loss on.

    Returns:
      An Operation that computes mean (unregularized) loss for given set of
      examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified([
        'example_labels', 'example_weights', 'sparse_features', 'dense_features'
    ], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      predictions = math_ops.cast(
          self._linear_predictions(examples), dtypes.float64)
      labels = math_ops.cast(
          internal_convert_to_tensor(examples['example_labels']),
          dtypes.float64)
      weights = math_ops.cast(
          internal_convert_to_tensor(examples['example_weights']),
          dtypes.float64)

      if self._options['loss_type'] == 'logistic_loss':
        return math_ops.reduce_sum(math_ops.multiply(
            sigmoid_cross_entropy_with_logits(labels=labels,
                                              logits=predictions),
            weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] == 'poisson_loss':
        return math_ops.reduce_sum(math_ops.multiply(
            log_poisson_loss(targets=labels, log_input=predictions),
            weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
        # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
        # first convert 0/1 labels into -1/1 labels.
        all_ones = array_ops.ones_like(predictions)
        adjusted_labels = math_ops.subtract(2 * labels, all_ones)
        # Tensor that contains (unweighted) error (hinge loss) per
        # example.
        error = nn_ops.relu(
            math_ops.subtract(all_ones,
                              math_ops.multiply(adjusted_labels, predictions)))
        weighted_error = math_ops.multiply(error, weights)
        return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
            weights)

      # squared loss
      err = math_ops.subtract(labels, predictions)

      weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
      # SDCA squared loss function is sum(err^2) / (2*sum(weights))
      return (math_ops.reduce_sum(weighted_squared_err) /
              (2.0 * math_ops.reduce_sum(weights)))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:59,代码来源:sdca_ops.py


示例19: testReluInt8x4BadShape

  def testReluInt8x4BadShape(self):
    if not test.is_gpu_available(cuda_only=True):
      self.skipTest("No GPU available")
    inputs = constant_op.constant(
        np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8)
    with self.assertRaisesRegexp(
        errors.InvalidArgumentError,
        "Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"):
      self.evaluate(nn_ops.relu(inputs))

    inputs = constant_op.constant(
        np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]),
        dtypes.qint8)
    with self.assertRaisesRegexp(
        errors.InvalidArgumentError,
        "Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"):
      self.evaluate(nn_ops.relu(inputs))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:17,代码来源:relu_op_test.py


示例20: func

 def func(inp):
   conv = nn_ops.conv2d(
       inp,
       filter=array_ops.ones([3, 3, 3, 16]),
       strides=[1, 1, 1, 1],
       padding='SAME')
   output = nn_ops.relu(conv, name='output')
   return output
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:lite_v2_test.py



注:本文中的tensorflow.python.ops.nn_ops.relu函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nn_ops.relu6函数代码示例发布时间:2022-05-27
下一篇:
Python nn_ops.log_softmax函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap