• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python quantize_graph.set_attr_dtype函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.tools.quantization.quantize_graph.set_attr_dtype函数的典型用法代码示例。如果您正苦于以下问题:Python set_attr_dtype函数的具体用法?Python set_attr_dtype怎么用?Python set_attr_dtype使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了set_attr_dtype函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_multiple_outputs

  def test_multiple_outputs(self):
    input_constant_name = "input_constant"
    split_constant_name = "split_constant"
    split_name = "split"
    concat_constant_name = "concat_constant"
    concat_name = "concat"

    float_graph_def = graph_pb2.GraphDef()
    input_constant = quantize_graph.create_constant_node(
        input_constant_name,
        value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
        dtype=dtypes.float32,
        shape=[2, 6])
    float_graph_def.node.extend([input_constant])
    split_constant = quantize_graph.create_constant_node(
        split_constant_name, value=1, dtype=dtypes.int32, shape=[])
    float_graph_def.node.extend([split_constant])
    split_node = quantize_graph.create_node(
        "Split", split_name, [split_constant_name, input_constant_name])
    quantize_graph.set_attr_int(split_node, "num_split", 2)
    quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
    float_graph_def.node.extend([split_node])
    concat_constant = quantize_graph.create_constant_node(
        concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
    float_graph_def.node.extend([concat_constant])
    concat_node = quantize_graph.create_node(
        "Concat", concat_name,
        [concat_constant_name, split_name + ":0", split_name + ":1"])
    quantize_graph.set_attr_int(concat_node, "N", 2)
    quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
    float_graph_def.node.extend([concat_node])

    test_graph(float_graph_def, {}, [concat_name])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:33,代码来源:quantize_graph_test.py


示例2: test_quantized_input_range_mat_mul

  def test_quantized_input_range_mat_mul(self):
    shapes = [[3, 2], [2, 4]]
    inputs = []
    for i, shape in enumerate(shapes):
      node = quantize_graph.create_node("PlaceholderV2", "input_%s" % i, [])
      quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
      quantize_graph.set_attr_shape(node, "shape", shape)
      inputs.append(node)
    mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
                                              [n.name for n in inputs])
    quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)

    float_graph_def = graph_pb2.GraphDef()
    float_graph_def.node.extend(inputs + [mat_mul_node])

    input_map = {
        inputs[0].name + ":0":
            np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
        inputs[1].name + ":0":
            np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
    }
    self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
                                         [mat_mul_node.name], [-1, 20.])
    self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
                                         [mat_mul_node.name], [0, 6.])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:25,代码来源:quantize_graph_test.py


示例3: test_relu_w_fake_quant_w_min_max_vars

  def test_relu_w_fake_quant_w_min_max_vars(self):
    input_node = quantize_graph.create_constant_node(
        "input",
        value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
        dtype=dtypes.float32,
        shape=[1, 2, 6, 1])
    relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
    quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)

    min_node = quantize_graph.create_constant_node(
        "min_bias_add", value=0, dtype=dtypes.float32, shape=[])
    max_node = quantize_graph.create_constant_node(
        "max_bias_add", value=12, dtype=dtypes.float32, shape=[])
    fake_quant_node = quantize_graph.create_node(
        "FakeQuantWithMinMaxVars", "fake_quant",
        [relu_node.name, min_node.name, max_node.name])

    float_graph_def = graph_pb2.GraphDef()
    float_graph_def.node.extend(
        [input_node, relu_node, min_node, max_node, fake_quant_node])
    test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)

    # Verify there is only one Quantize and one Requantize op.
    eightbit_rewriter = quantize_graph.GraphRewriter(
        float_graph_def, "eightbit", quantized_input_range=None)
    eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])

    ops = [node.op for node in eightbit_graph_def.node]
    # No quantize since all inputs are const and can be quantized up-front.
    self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))

    # One dequantize at the end.
    self.assertEqual(1, ops.count("Dequantize"))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:33,代码来源:quantize_graph_test.py


示例4: test_conv

def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
              filter_count, stride, padding, input_values, filter_values):
  """Tests a Conv replacement."""
  input_constant_name = "input_constant"
  filter_constant_name = "filter_constant"
  conv_name = "conv"

  float_graph_def = graph_pb2.GraphDef()
  input_constant = quantize_graph.create_constant_node(
      input_constant_name,
      value=input_values,
      dtype=dtypes.float32,
      shape=[image_batch_count, image_height, image_width, depth])
  float_graph_def.node.extend([input_constant])
  filter_constant = quantize_graph.create_constant_node(
      filter_constant_name,
      value=filter_values,
      dtype=dtypes.float32,
      shape=[filter_size, filter_size, depth, filter_count])
  float_graph_def.node.extend([filter_constant])
  conv_node = quantize_graph.create_node(
      "Conv2D", conv_name, [input_constant_name, filter_constant_name])
  quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
  quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
  quantize_graph.set_attr_string(conv_node, "padding", padding)
  float_graph_def.node.extend([conv_node])

  test_graph(float_graph_def, {}, [conv_name])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:28,代码来源:quantize_graph_test.py


示例5: test_mat_mul

def test_mat_mul(m, n, k, a, b):
  """Tests a MatMul replacement."""
  a_constant_name = "a_constant"
  b_constant_name = "b_constant"
  mat_mul_name = "mat_mul"

  float_graph_def = tf.GraphDef()
  a_constant = quantize_graph.create_constant_node(a_constant_name,
                                                   value=a,
                                                   dtype=tf.float32,
                                                   shape=[m, k])
  float_graph_def.node.extend([a_constant])
  b_constant = quantize_graph.create_constant_node(b_constant_name,
                                                   value=b,
                                                   dtype=tf.float32,
                                                   shape=[k, n])
  float_graph_def.node.extend([b_constant])
  mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
                                            [a_constant_name, b_constant_name])
  quantize_graph.set_attr_dtype(mat_mul_node, "T", tf.float32)
  quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
  quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
  float_graph_def.node.extend([mat_mul_node])

  test_graph(float_graph_def, {}, [mat_mul_name])
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:25,代码来源:quantize_graph_test.py


示例6: test_concat

  def test_concat(self):
    shape_constant_name = "shape_constant"
    a_constant_name = "a_constant"
    b_constant_name = "b_constant"
    concat_name = "concat"

    float_graph_def = tf.GraphDef()
    shape_constant = quantize_graph.create_constant_node(shape_constant_name,
                                                         value=0,
                                                         dtype=tf.int32,
                                                         shape=[])
    float_graph_def.node.extend([shape_constant])
    a_constant = quantize_graph.create_constant_node(a_constant_name,
                                                     value=[1, 2, 3, 4, 5, 6, 7,
                                                            8, 9, 10, 11, 12],
                                                     dtype=tf.float32,
                                                     shape=[2, 2, 3])
    float_graph_def.node.extend([a_constant])
    b_constant = quantize_graph.create_constant_node(b_constant_name,
                                                     value=[13, 14, 15, 16, 17,
                                                            18, 19, 20, 21, 22,
                                                            23, 24],
                                                     dtype=tf.float32,
                                                     shape=[2, 2, 3])
    float_graph_def.node.extend([b_constant])
    concat_node = quantize_graph.create_node("Concat", concat_name,
                                             [shape_constant_name,
                                              a_constant_name, b_constant_name])
    quantize_graph.set_attr_int(concat_node, "N", 2)
    quantize_graph.set_attr_dtype(concat_node, "T", tf.float32)
    float_graph_def.node.extend([concat_node])

    test_graph(float_graph_def, {}, [concat_name])
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:33,代码来源:quantize_graph_test.py


示例7: test_bias_add_w_fallback_min_max_vars

  def test_bias_add_w_fallback_min_max_vars(self):
    input_node = quantize_graph.create_constant_node(
        "input", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
        dtype=tf.float32, shape=[1, 1, 2, 5])
    offset_node = quantize_graph.create_constant_node(
        "offset", value=[1, 2, 3, 4, 5], dtype=tf.float32, shape=[5])
    bias_add_node = quantize_graph.create_node(
        "BiasAdd", "bias_add", [input_node.name, offset_node.name])
    quantize_graph.set_attr_dtype(bias_add_node, "T", tf.float32)

    float_graph_def = tf.GraphDef()
    float_graph_def.node.extend([input_node, offset_node, bias_add_node])
    test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)

    # Verify there is only one Quantize, one Requantize op, and no
    # RequantizationRange op.
    eightbit_rewriter = quantize_graph.GraphRewriter(
        float_graph_def, "eightbit", quantized_input_range=None,
        fallback_quantization_range=[-.5, 15.5])
    eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])

    ops = [node.op for node in eightbit_graph_def.node]
    node_names = [node.name for node in eightbit_graph_def.node]
    # No quantize since all inputs are const and can be quantized up-front.
    self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))

    # One dequantize at the end.
    self.assertEqual(1, ops.count("Dequantize"))

    # No RequantizationRange
    self.assertEqual(0, ops.count("RequantizationRange"))

    # The fallback constants are in the graph.
    self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
    self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:35,代码来源:quantize_graph_test.py


示例8: test_keep_control_edges

  def test_keep_control_edges(self):
    no_op_name = "no_op"
    a_constant_name = "a_constant"
    b_constant_name = "b_constant"
    a_check_name = "a_check"
    b_check_name = "b_check"
    a_identity_name = "a_identity"
    b_identity_name = "b_identity"
    add_name = "add"
    graph_def = graph_pb2.GraphDef()
    no_op = quantize_graph.create_node("NoOp", no_op_name, [])
    graph_def.node.extend([no_op])
    a_constant = quantize_graph.create_constant_node(
        a_constant_name, value=1, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([a_constant])
    a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
                                              [a_constant_name])
    graph_def.node.extend([a_check_node])
    a_identity_node = quantize_graph.create_node(
        "Identity", a_identity_name,
        [a_constant_name, "^" + a_check_name, "^" + no_op_name])
    graph_def.node.extend([a_identity_node])
    b_constant = quantize_graph.create_constant_node(
        b_constant_name, value=1, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([b_constant])
    b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
                                              [b_constant_name])
    graph_def.node.extend([b_check_node])
    b_identity_node = quantize_graph.create_node(
        "Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
    graph_def.node.extend([b_identity_node])
    add_node = quantize_graph.create_node("Add", add_name,
                                          [a_identity_name, b_identity_name])
    quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
    graph_def.node.extend([add_node])

    expected_output = graph_pb2.GraphDef()
    no_op = quantize_graph.create_node("NoOp", no_op_name, [])
    expected_output.node.extend([no_op])
    a_constant = quantize_graph.create_constant_node(
        a_constant_name, value=1, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([a_constant])
    a_identity_node = quantize_graph.create_node(
        "Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
    expected_output.node.extend([a_identity_node])
    b_constant = quantize_graph.create_constant_node(
        b_constant_name, value=1, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([b_constant])
    add_node = quantize_graph.create_node("Add", add_name,
                                          [a_identity_name, b_constant_name])
    quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
    expected_output.node.extend([add_node])
    expected_output.versions.CopyFrom(graph_def.versions)
    expected_output.library.CopyFrom(graph_def.library)

    output = graph_util.remove_training_nodes(graph_def)
    stripped_output = graph_util.extract_sub_graph(output, [add_name])
    self.assertProtoEquals(expected_output, stripped_output)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:58,代码来源:quantize_graph_test.py


示例9: test_reshape

  def test_reshape(self):
    """Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""

    def make_matmul(name, a, b):
      n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
      quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
      quantize_graph.set_attr_bool(n, "transpose_a", False)
      quantize_graph.set_attr_bool(n, "transpose_b", False)
      return n

    # matmul_1 = input*weight_1
    input_node = quantize_graph.create_constant_node(
        "input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
    weight_1_node = quantize_graph.create_constant_node(
        "weight_1",
        value=[.5, .6, .7, .8, .9],
        dtype=dtypes.float32,
        shape=[1, 5])
    matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)

    # Reshape 4x5 to 10x2.
    new_shape_node = quantize_graph.create_constant_node(
        "new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
    reshape_node = quantize_graph.create_node(
        "Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
    quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)

    # matmul_2_node = reshape*weight_2
    weight_2_node = quantize_graph.create_constant_node(
        "weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
    matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)

    g = graph_pb2.GraphDef()
    g.node.extend([
        input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
        weight_2_node, matmul_2_node
    ])

    # Test the graph
    test_graph(g, {}, ["matmul_2"])

    # Verify there is only one Quantize and one Requantize op.
    eightbit_rewriter = quantize_graph.GraphRewriter(
        g, "eightbit", quantized_input_range=None)
    eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])

    ops = [node.op for node in eightbit_graph_def.node]
    # No quantize since all inputs are const and can be quantized up-front.
    self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
    self.assertEqual(1, ops.count("QuantizedReshape"))

    # One dequantize at the end.
    self.assertEqual(1, ops.count("Dequantize"))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:53,代码来源:quantize_graph_test.py


示例10: test_non_float_reshape

  def test_non_float_reshape(self):
    a = quantize_graph.create_constant_node(
        "a", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
        dtype=tf.int32, shape=[2, 2, 3])
    shape = quantize_graph.create_constant_node(
        "shape", value=[12], dtype=tf.int32, shape=[1])
    reshape = quantize_graph.create_node(
        "Reshape", "reshape", [a.name, shape.name])
    quantize_graph.set_attr_dtype(reshape, "T", tf.int32)

    g = tf.GraphDef()
    g.node.extend([a, shape, reshape])
    test_graph(g, {}, [reshape.name])
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:13,代码来源:quantize_graph_test.py


示例11: test_relu6

 def test_relu6(self):
   input_constant_name = "input_constant"
   relu6_name = "relu6"
   float_graph_def = graph_pb2.GraphDef()
   input_constant = quantize_graph.create_constant_node(
       input_constant_name,
       value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
       dtype=dtypes.float32,
       shape=[1, 2, 6, 1])
   float_graph_def.node.extend([input_constant])
   relu6_node = quantize_graph.create_node("Relu6", relu6_name,
                                           [input_constant_name])
   quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
   float_graph_def.node.extend([relu6_node])
   test_graph(float_graph_def, {}, [relu6_name])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:15,代码来源:quantize_graph_test.py


示例12: test_bias_add_w_fake_quant_w_min_max_vars

  def test_bias_add_w_fake_quant_w_min_max_vars(self):
    input_node = quantize_graph.create_constant_node(
        "input",
        value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
        dtype=dtypes.float32,
        shape=[1, 1, 2, 5])
    offset_node = quantize_graph.create_constant_node(
        "offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
    bias_add_node = quantize_graph.create_node(
        "BiasAdd", "bias_add", [input_node.name, offset_node.name])
    quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)

    min_node = quantize_graph.create_constant_node(
        "min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
    max_node = quantize_graph.create_constant_node(
        "max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
    fake_quant_node = quantize_graph.create_node(
        "FakeQuantWithMinMaxVars", "fake_quant",
        [bias_add_node.name, min_node.name, max_node.name])

    float_graph_def = graph_pb2.GraphDef()
    float_graph_def.node.extend([
        input_node, offset_node, bias_add_node, min_node, max_node,
        fake_quant_node
    ])
    test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)

    # Verify there is only one Quantize and one Requantize op.
    # Pass in fallback_quantization_range, although it will have no effect
    # because the FakeQuantWithMinMaxVars are used instead.
    eightbit_rewriter = quantize_graph.GraphRewriter(
        float_graph_def,
        "eightbit",
        quantized_input_range=None,
        fallback_quantization_range=[-100, 100])
    eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])

    ops = [node.op for node in eightbit_graph_def.node]
    node_names = [node.name for node in eightbit_graph_def.node]
    # No quantize since all inputs are const and can be quantized up-front.
    self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))

    # One dequantize at the end.
    self.assertEqual(1, ops.count("Dequantize"))

    # The fallback constants are not in the graph.
    self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
    self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:48,代码来源:quantize_graph_test.py


示例13: test_non_float_concat

  def test_non_float_concat(self):
    concat_dim = quantize_graph.create_constant_node(
        "concat_dim", value=0, dtype=tf.int32, shape=[])
    a = quantize_graph.create_constant_node(
        "a", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
        dtype=tf.int32, shape=[2, 2, 3])
    b = quantize_graph.create_constant_node(
        "b", value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
        dtype=tf.int32, shape=[2, 2, 3])
    concat = quantize_graph.create_node(
        "Concat", "concat", [concat_dim.name, a.name, b.name])
    quantize_graph.set_attr_int(concat, "N", 2)
    quantize_graph.set_attr_dtype(concat, "T", tf.int32)

    g = tf.GraphDef()
    g.node.extend([concat_dim, a, b, concat])
    test_graph(g, {}, [concat.name])
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:17,代码来源:quantize_graph_test.py


示例14: test_avg_pool

 def test_avg_pool(self):
   input_constant_name = "input_constant"
   avg_pool_name = "avg_pool"
   float_graph_def = graph_pb2.GraphDef()
   input_constant = quantize_graph.create_constant_node(
       input_constant_name,
       value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
       dtype=dtypes.float32,
       shape=[1, 2, 6, 1])
   float_graph_def.node.extend([input_constant])
   avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
                                              [input_constant_name])
   quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
   quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
   quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
   quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
   float_graph_def.node.extend([avg_pool_node])
   test_graph(float_graph_def, {}, [avg_pool_name])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:18,代码来源:quantize_graph_test.py


示例15: test_batch_norm

 def test_batch_norm(self):
   input_constant_name = "input_constant"
   mean_constant_name = "mean_constant"
   variance_constant_name = "variance_constant"
   beta_constant_name = "beta_constant"
   gamma_constant_name = "gamma_constant"
   batch_norm_name = "batch_norm"
   float_graph_def = tf.GraphDef()
   input_constant = quantize_graph.create_constant_node(input_constant_name,
                                                        value=[1, 4, 2, 5, 3,
                                                               6, -1, -4, -2,
                                                               -5, -3, -6],
                                                        dtype=tf.float32,
                                                        shape=[1, 1, 6, 2])
   float_graph_def.node.extend([input_constant])
   mean_constant = quantize_graph.create_constant_node(mean_constant_name,
                                                       value=[10, 20],
                                                       dtype=tf.float32,
                                                       shape=[2])
   float_graph_def.node.extend([mean_constant])
   variance_constant = quantize_graph.create_constant_node(
       variance_constant_name, value=[0.25, 0.5], dtype=tf.float32, shape=[2])
   float_graph_def.node.extend([variance_constant])
   beta_constant = quantize_graph.create_constant_node(beta_constant_name,
                                                       value=[0.1, 0.6],
                                                       dtype=tf.float32,
                                                       shape=[2])
   float_graph_def.node.extend([beta_constant])
   gamma_constant = quantize_graph.create_constant_node(gamma_constant_name,
                                                        value=[0, 0],
                                                        dtype=tf.float32,
                                                        shape=[2])
   float_graph_def.node.extend([gamma_constant])
   batch_norm_node = quantize_graph.create_node(
       "BatchNormWithGlobalNormalization", batch_norm_name,
       [input_constant_name, mean_constant_name, variance_constant_name,
        beta_constant_name, gamma_constant_name])
   quantize_graph.set_attr_dtype(batch_norm_node, "T", tf.float32)
   quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
                                False)
   quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
   float_graph_def.node.extend([batch_norm_node])
   test_graph(float_graph_def, {}, [batch_norm_name])
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:43,代码来源:quantize_graph_test.py


示例16: test_concat

  def test_concat(self):
    shape_constant_name = "shape_constant"
    a_constant_name = "a_constant"
    b_constant_name = "b_constant"
    concat_name = "concat"

    float_graph_def = tf.GraphDef()
    shape_constant = quantize_graph.create_constant_node(shape_constant_name,
                                                         value=0,
                                                         dtype=tf.int32,
                                                         shape=[])
    float_graph_def.node.extend([shape_constant])
    a_constant = quantize_graph.create_constant_node(a_constant_name,
                                                     value=[1, 2, 3, 4, 5, 6, 7,
                                                            8, 9, 10, 11, 12],
                                                     dtype=tf.float32,
                                                     shape=[2, 2, 3])
    float_graph_def.node.extend([a_constant])
    b_constant = quantize_graph.create_constant_node(b_constant_name,
                                                     value=[13, 14, 15, 16, 17,
                                                            18, 19, 20, 21, 22,
                                                            23, 24],
                                                     dtype=tf.float32,
                                                     shape=[2, 2, 3])
    float_graph_def.node.extend([b_constant])
    concat_node = quantize_graph.create_node("Concat", concat_name,
                                             [shape_constant_name,
                                              a_constant_name, b_constant_name])
    quantize_graph.set_attr_int(concat_node, "N", 2)
    quantize_graph.set_attr_dtype(concat_node, "T", tf.float32)
    float_graph_def.node.extend([concat_node])

    test_graph(float_graph_def, {}, [concat_name])

    # Verify the concat is quantized.
    eightbit_rewriter = quantize_graph.GraphRewriter(
        float_graph_def, "eightbit", quantized_input_range=None)
    eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])

    ops = [node.op for node in eightbit_graph_def.node]
    self.assertEqual(1, ops.count("QuantizedConcat"))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:41,代码来源:quantize_graph_test.py


示例17: test_quantized_input_range_bias_add

  def test_quantized_input_range_bias_add(self):
    input_shape = [1, 1, 2, 6]
    input_n = quantize_graph.create_node("PlaceholderV2", "input", [])
    quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
    quantize_graph.set_attr_shape(input_n, "shape", input_shape)
    offset_n = quantize_graph.create_constant_node(
        "offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
    bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
                                            [input_n.name, offset_n.name])
    quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)

    float_graph_def = graph_pb2.GraphDef()
    float_graph_def.node.extend([input_n, offset_n, bias_add_n])

    input_map = {
        input_n.name + ":0":
            np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
    }
    self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
                                         [bias_add_n.name], [-1, 20.])
    self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
                                         [bias_add_n.name], [0, 12.])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:22,代码来源:quantize_graph_test.py


示例18: test_bias_add

 def test_bias_add(self):
   input_constant_name = "input_constant"
   offset_constant_name = "offset_constant"
   bias_add_name = "bias_add"
   float_graph_def = graph_pb2.GraphDef()
   input_constant = quantize_graph.create_constant_node(
       input_constant_name,
       value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
       dtype=dtypes.float32,
       shape=[1, 1, 2, 6])
   float_graph_def.node.extend([input_constant])
   offset_constant = quantize_graph.create_constant_node(
       offset_constant_name,
       value=[1, 2, 3, 4, 5, 6],
       dtype=dtypes.float32,
       shape=[6])
   float_graph_def.node.extend([offset_constant])
   bias_add_node = quantize_graph.create_node(
       "BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
   quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
   float_graph_def.node.extend([bias_add_node])
   test_graph(float_graph_def, {}, [bias_add_name])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:22,代码来源:quantize_graph_test.py


示例19: test_identity

  def test_identity(self):
    input_constant_name = "input_constant"
    identity_name = "identity"
    float_graph_def = graph_pb2.GraphDef()
    input_constant = quantize_graph.create_constant_node(
        input_constant_name,
        value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
        dtype=dtypes.float32,
        shape=[2, 6])
    float_graph_def.node.extend([input_constant])
    identity_node = quantize_graph.create_node("Identity", identity_name,
                                               [input_constant_name])
    quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
    float_graph_def.node.extend([identity_node])

    mul_name = "mul"
    mul_node = quantize_graph.create_node("Mul", mul_name,
                                          [identity_name, identity_name])
    quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
    float_graph_def.node.extend([mul_node])

    test_graph(float_graph_def, {}, [mul_name])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:22,代码来源:quantize_graph_test.py


示例20: test_remove_redundant_quantization

  def test_remove_redundant_quantization(self):
    a_constant_name = "a_constant"
    a_constant_min_name = "a_constant_min"
    a_constant_max_name = "a_constant_max"
    a_dequantize_name = "a_dequantize"
    a_quantize_name = "a_quantize"
    b_constant_name = "b_constant"
    b_constant_min_name = "b_constant_min"
    b_constant_max_name = "b_constant_max"
    b_dequantize_name = "b_dequantize"
    b_quantize_name = "b_quantize"
    mat_mul_name = "mat_mul"
    graph_def = graph_pb2.GraphDef()
    a_constant = quantize_graph.create_constant_node(
        a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
    graph_def.node.extend([a_constant])
    a_constant_min = quantize_graph.create_constant_node(
        a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([a_constant_min])
    a_constant_max = quantize_graph.create_constant_node(
        a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([a_constant_max])
    a_dequantize_node = quantize_graph.create_node(
        "Dequantize", a_dequantize_name,
        [a_constant_name, a_constant_min_name, a_constant_max_name])
    quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
    graph_def.node.extend([a_dequantize_node])
    a_quantize_node = quantize_graph.create_node(
        "QuantizeV2", a_quantize_name,
        [a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
    quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
    graph_def.node.extend([a_quantize_node])
    b_constant = quantize_graph.create_constant_node(
        b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
    graph_def.node.extend([b_constant])
    b_constant_min = quantize_graph.create_constant_node(
        b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([b_constant_min])
    b_constant_max = quantize_graph.create_constant_node(
        b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([b_constant_max])
    b_dequantize_node = quantize_graph.create_node(
        "Dequantize", b_dequantize_name,
        [b_constant_name, b_constant_min_name, b_constant_max_name])
    quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
    graph_def.node.extend([b_dequantize_node])
    b_quantize_node = quantize_graph.create_node(
        "QuantizeV2", b_quantize_name,
        [b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
    quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
    graph_def.node.extend([b_quantize_node])
    mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
        a_quantize_name, b_quantize_name, a_quantize_name + ":1",
        a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
    ])
    quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
    quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
    graph_def.node.extend([mat_mul_node])

    expected_output = graph_pb2.GraphDef()
    a_constant = quantize_graph.create_constant_node(
        a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
    expected_output.node.extend([a_constant])
    a_constant_min = quantize_graph.create_constant_node(
        a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([a_constant_min])
    a_constant_max = quantize_graph.create_constant_node(
        a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([a_constant_max])
    b_constant = quantize_graph.create_constant_node(
        b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
    expected_output.node.extend([b_constant])
    b_constant_min = quantize_graph.create_constant_node(
        b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([b_constant_min])
    b_constant_max = quantize_graph.create_constant_node(
        b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([b_constant_max])
    mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
        a_constant_name, b_constant_name, a_constant_min_name,
        a_constant_max_name, b_constant_min_name, b_constant_max_name
    ])
    quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
    quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
    expected_output.node.extend([mat_mul_node])
    expected_output.versions.CopyFrom(graph_def.versions)
    expected_output.library.CopyFrom(graph_def.library)

    rewriter = quantize_graph.GraphRewriter(
        graph_def, [mat_mul_name], quantized_input_range=None)
    output = rewriter.remove_redundant_quantization(graph_def)
    stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
    self.assertProtoEquals(expected_output, stripped_output)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:93,代码来源:quantize_graph_test.py



注:本文中的tensorflow.tools.quantization.quantize_graph.set_attr_dtype函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow_hub.create_module_spec函数代码示例发布时间:2022-05-27
下一篇:
Python quantize_graph.create_node函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap