• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.initialize_all_tables函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.initialize_all_tables函数的典型用法代码示例。如果您正苦于以下问题:Python initialize_all_tables函数的具体用法?Python initialize_all_tables怎么用?Python initialize_all_tables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了initialize_all_tables函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testMultipleHashTables

  def testMultipleHashTables(self):
    with self.test_session() as sess:
      shared_name = ''
      default_val = -1
      table1 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
      table2 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
      table3 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)

      keys = tf.constant(['brain', 'salad', 'surgery'])
      values = tf.constant([0, 1, 2], tf.int64)
      table1.initialize_from(keys, values)
      table2.initialize_from(keys, values)
      table3.initialize_from(keys, values)

      tf.initialize_all_tables().run()
      self.assertAllEqual(3, table1.size().eval())
      self.assertAllEqual(3, table2.size().eval())
      self.assertAllEqual(3, table3.size().eval())

      input_string = tf.constant(['brain', 'salad', 'tank'])
      output1 = table1.lookup(input_string)
      output2 = table2.lookup(input_string)
      output3 = table3.lookup(input_string)

      out1, out2, out3 = sess.run([output1, output2, output3])
      self.assertAllEqual([0, 1, -1], out1)
      self.assertAllEqual([0, 1, -1], out2)
      self.assertAllEqual([0, 1, -1], out3)
开发者ID:sumodm,项目名称:tensorflow,代码行数:28,代码来源:lookup_table_op_test.py


示例2: testMultipleHashTables

  def testMultipleHashTables(self):
    with self.test_session() as sess:
      default_val = -1
      keys = tf.constant(["brain", "salad", "surgery"])
      values = tf.constant([0, 1, 2], tf.int64)

      table1 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
          default_val)
      table2 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
          default_val)
      table3 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
          default_val)

      tf.initialize_all_tables().run()
      self.assertAllEqual(3, table1.size().eval())
      self.assertAllEqual(3, table2.size().eval())
      self.assertAllEqual(3, table3.size().eval())

      input_string = tf.constant(["brain", "salad", "tank"])
      output1 = table1.lookup(input_string)
      output2 = table2.lookup(input_string)
      output3 = table3.lookup(input_string)

      out1, out2, out3 = sess.run([output1, output2, output3])
      self.assertAllEqual([0, 1, -1], out1)
      self.assertAllEqual([0, 1, -1], out2)
      self.assertAllEqual([0, 1, -1], out3)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:30,代码来源:lookup_ops_test.py


示例3: test_duplicate_entries

    def test_duplicate_entries(self):
        with self.test_session():
            mapping_strings = tf.constant(["hello", "hello"])
            indices = tf.constant([0, 1, 4], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings)
            tf.initialize_all_tables().run()
            self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())

            self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
开发者ID:RuhiSharma,项目名称:tensorflow,代码行数:9,代码来源:lookup_ops_test.py


示例4: test_index_to_string

    def test_index_to_string(self):
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            indices = tf.constant([0, 1, 2, 3], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings)

            self.assertRaises(tf.OpError, feats.eval)
            tf.initialize_all_tables().run()

            self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), feats.eval())
开发者ID:RuhiSharma,项目名称:tensorflow,代码行数:10,代码来源:lookup_ops_test.py


示例5: test_string_to_index_with_default_value

    def test_string_to_index_with_default_value(self):
        default_value = -42
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            feats = tf.constant(["salad", "surgery", "tarkus"])
            indices = tf.contrib.lookup.string_to_index(feats, mapping=mapping_strings, default_value=default_value)
            self.assertRaises(tf.OpError, indices.eval)

            tf.initialize_all_tables().run()
            self.assertAllEqual((1, 2, default_value), indices.eval())
开发者ID:RuhiSharma,项目名称:tensorflow,代码行数:10,代码来源:lookup_ops_test.py


示例6: test_index_to_string_with_default_value

    def test_index_to_string_with_default_value(self):
        default_value = b"NONE"
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            indices = tf.constant([1, 2, 4], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings, default_value=default_value)
            self.assertRaises(tf.OpError, feats.eval)

            tf.initialize_all_tables().run()
            self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
开发者ID:RuhiSharma,项目名称:tensorflow,代码行数:10,代码来源:lookup_ops_test.py


示例7: apply_model

    def apply_model(self, x):
        x = x.data

        tmp = np.zeros((1,1))
        with tf.Session(graph=self._graph) as sess:
            tf.initialize_all_tables().run()

            feed_dict = {self._x: x,
                         self._W: self._result_W,
                         self._b: self._result_b}

            tmp = sess.run(self._y, feed_dict=feed_dict)

        ret = BrewPipeDataFrame('y')
        ret.data = tmp
        return ret
开发者ID:meyerd,项目名称:brewPipe,代码行数:16,代码来源:tf_leastsquares.py


示例8: testInitializeSameTableWithMultipleNodes

  def testInitializeSameTableWithMultipleNodes(self):
    vocabulary_file = self._createVocabFile("one_column_5.txt")

    with self.test_session() as sess:
      shared_name = "shared-one-columm"
      default_value = -1
      table1 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.TextFileInitializer(
              vocabulary_file, tf.string,
              tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
              tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
          default_value,
          shared_name=shared_name)
      table2 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.TextFileInitializer(
              vocabulary_file, tf.string,
              tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
              tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
          default_value,
          shared_name=shared_name)
      table3 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.TextFileInitializer(
              vocabulary_file, tf.string,
              tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
              tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
          default_value,
          shared_name=shared_name)

      tf.initialize_all_tables().run()

      input_string = tf.constant(["brain", "salad", "tank"])

      output1 = table1.lookup(input_string)
      output2 = table2.lookup(input_string)
      output3 = table3.lookup(input_string)

      out1, out2, out3 = sess.run([output1, output2, output3])
      self.assertAllEqual([0, 1, -1], out1)
      self.assertAllEqual([0, 1, -1], out2)
      self.assertAllEqual([0, 1, -1], out3)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:40,代码来源:lookup_ops_test.py


示例9: testGetModelInput

 def testGetModelInput(self):
   initial_state, sequence_input = self._rnn_estimator._get_model_input(
       self._columns_to_tensors)
   self.assertIsNone(initial_state)
   with self.test_session() as sess:
     sess.run(tf.initialize_all_variables())
     sess.run(tf.initialize_all_tables())
     sequence_input_val = sess.run(sequence_input)
   expected_shape = np.array([
       3,         # expected batch size
       2,         # padded sequence length
       3 + 8 + 2  # location keys + embedding dim + measurement dimension
   ])
   self.assertAllEqual(expected_shape, sequence_input_val.shape)
开发者ID:caikehe,项目名称:tensorflow,代码行数:14,代码来源:dynamic_rnn_estimator_test.py


示例10: testBuildSequenceInputInput

 def testBuildSequenceInputInput(self):
   sequence_input = dynamic_rnn_estimator.build_sequence_input(
       self.columns_to_tensors,
       self.sequence_feature_columns,
       self.context_feature_columns)
   with self.test_session() as sess:
     sess.run(tf.global_variables_initializer())
     sess.run(tf.initialize_all_tables())
     sequence_input_val = sess.run(sequence_input)
   expected_shape = np.array([
       3,         # expected batch size
       2,         # padded sequence length
       3 + 8 + 2  # location keys + embedding dim + measurement dimension
   ])
   self.assertAllEqual(expected_shape, sequence_input_val.shape)
开发者ID:chinnadhurai,项目名称:block_rnn,代码行数:15,代码来源:dynamic_rnn_estimator_test.py


示例11: testConstructRNN

  def testConstructRNN(self):
    """Test `DynamicRNNEstimator._construct_rnn`."""
    initial_state, sequence_input = self._rnn_estimator._get_model_input(
        self._columns_to_tensors)
    activations_t, final_state_t = self._rnn_estimator._construct_rnn(
        initial_state, sequence_input)

    # Obtain values of activations and final state.
    with tf.Session() as sess:
      sess.run(tf.initialize_all_variables())
      sess.run(tf.initialize_all_tables())
      activations, final_state = sess.run([activations_t, final_state_t])

    expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
    self.assertAllEqual(expected_activations_shape, activations.shape)
    expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
    self.assertAllEqual(expected_state_shape, final_state.shape)
开发者ID:caikehe,项目名称:tensorflow,代码行数:17,代码来源:dynamic_rnn_estimator_test.py


示例12: testConstructRNN

  def testConstructRNN(self):
    initial_state = None
    sequence_input = dynamic_rnn_estimator.build_sequence_input(
        self.columns_to_tensors,
        self.sequence_feature_columns,
        self.context_feature_columns)
    activations_t, final_state_t = dynamic_rnn_estimator.construct_rnn(
        initial_state,
        sequence_input,
        self.rnn_cell,
        self.mock_target_column.num_label_columns)

    # Obtain values of activations and final state.
    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.initialize_all_tables())
      activations, final_state = sess.run([activations_t, final_state_t])

    expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
    self.assertAllEqual(expected_activations_shape, activations.shape)
    expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
    self.assertAllEqual(expected_state_shape, final_state.shape)
开发者ID:chinnadhurai,项目名称:block_rnn,代码行数:22,代码来源:dynamic_rnn_estimator_test.py


示例13: main


#.........这里部分代码省略.........
        exit(1)

      logging.info("Export the saved model to {}".format(
          FLAGS.saved_model_path))
      export_path_base = FLAGS.saved_model_path
      export_path = os.path.join(
          compat.as_bytes(export_path_base),
          compat.as_bytes(str(FLAGS.model_version)))

      model_signature = signature_def_utils.build_signature_def(
          inputs={
              "keys": utils.build_tensor_info(keys_placeholder),
              "indexs": utils.build_tensor_info(sparse_index),
              "ids": utils.build_tensor_info(sparse_ids),
              "values": utils.build_tensor_info(sparse_values),
              "shape": utils.build_tensor_info(sparse_shape)
          },
          outputs={
              "keys": utils.build_tensor_info(keys),
              "softmax": utils.build_tensor_info(inference_softmax),
              "prediction": utils.build_tensor_info(inference_op)
          },
          method_name=signature_constants.PREDICT_METHOD_NAME)

      try:
        builder = saved_model_builder.SavedModelBuilder(export_path)
        builder.add_meta_graph_and_variables(
            sess,
            [tag_constants.SERVING],
            clear_devices=True,
            signature_def_map={
                signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                model_signature,
            },
            #legacy_init_op=legacy_init_op)
            legacy_init_op=tf.group(tf.initialize_all_tables(),
                                    name="legacy_init_op"))

        builder.save()
      except Exception as e:
        logging.error("Fail to export saved model, exception: {}".format(e))

    elif MODE == "inference":
      if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
        logging.error("No checkpoint found, exit now")
        exit(1)

      # Load inference test data
      inference_result_file_name = "./inference_result.txt"
      inference_test_file_name = "./data/a8a_test.libsvm"
      labels = []
      feature_ids = []
      feature_values = []
      feature_index = []
      ins_num = 0
      for line in open(inference_test_file_name, "r"):
        tokens = line.split(" ")
        labels.append(int(tokens[0]))
        feature_num = 0
        for feature in tokens[1:]:
          feature_id, feature_value = feature.split(":")
          feature_ids.append(int(feature_id))
          feature_values.append(float(feature_value))
          feature_index.append([ins_num, feature_num])
          feature_num += 1
        ins_num += 1

      # Run inference
      start_time = datetime.datetime.now()
      prediction, prediction_softmax = sess.run(
          [inference_op, inference_softmax],
          feed_dict={sparse_index: feature_index,
                     sparse_ids: feature_ids,
                     sparse_values: feature_values,
                     sparse_shape: [ins_num, FEATURE_SIZE]})

      end_time = datetime.datetime.now()

      # Compute accuracy
      label_number = len(labels)
      correct_label_number = 0
      for i in range(label_number):
        if labels[i] == prediction[i]:
          correct_label_number += 1
      accuracy = float(correct_label_number) / label_number

      # Compute auc
      expected_labels = np.array(labels)
      predict_labels = prediction_softmax[:, 0]
      fpr, tpr, thresholds = metrics.roc_curve(expected_labels,
                                               predict_labels,
                                               pos_label=0)
      auc = metrics.auc(fpr, tpr)
      logging.info("[{}] Inference accuracy: {}, auc: {}".format(
          end_time - start_time, accuracy, auc))

      # Save result into the file
      np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",")
      logging.info("Save result to file: {}".format(
          inference_result_file_name))
开发者ID:zhongkeli,项目名称:deep_recommend_system,代码行数:101,代码来源:sparse_classifier.py


示例14: export


#.........这里部分代码省略.........
  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    feature_configs = {
        'image/encoded': tf.FixedLenFeature(
            shape=[], dtype=tf.string),
    }
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    jpegs = tf_example['image/encoded']
    images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(
        tf.to_int64(indices), mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print 'Successfully loaded model from %s at step=%s.' % (
            ckpt.model_checkpoint_path, global_step)
      else:
        print 'No checkpoint file found at %s' % FLAGS.checkpoint_dir
        return

      # Export inference model.
      output_path = os.path.join(
          compat.as_bytes(FLAGS.output_dir),
          compat.as_bytes(str(FLAGS.model_version)))
      print 'Exporting trained model to', output_path
      builder = saved_model_builder.SavedModelBuilder(output_path)

      # Build the signature_def_map.
      classify_inputs_tensor_info = utils.build_tensor_info(
          serialized_tf_example)
      classes_output_tensor_info = utils.build_tensor_info(classes)
      scores_output_tensor_info = utils.build_tensor_info(values)

      classification_signature = signature_def_utils.build_signature_def(
          inputs={
              signature_constants.CLASSIFY_INPUTS: classify_inputs_tensor_info
          },
          outputs={
              signature_constants.CLASSIFY_OUTPUT_CLASSES:
                  classes_output_tensor_info,
              signature_constants.CLASSIFY_OUTPUT_SCORES:
                  scores_output_tensor_info
          },
          method_name=signature_constants.CLASSIFY_METHOD_NAME)

      predict_inputs_tensor_info = utils.build_tensor_info(jpegs)
      prediction_signature = signature_def_utils.build_signature_def(
          inputs={'images': predict_inputs_tensor_info},
          outputs={
              'classes': classes_output_tensor_info,
              'scores': scores_output_tensor_info
          },
          method_name=signature_constants.PREDICT_METHOD_NAME)

      legacy_init_op = tf.group(
          tf.initialize_all_tables(), name='legacy_init_op')
      builder.add_meta_graph_and_variables(
          sess, [tag_constants.SERVING],
          signature_def_map={
              'predict_images':
                  prediction_signature,
              signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                  classification_signature,
          },
          legacy_init_op=legacy_init_op)

      builder.save()
      print 'Successfully exported model to %s' % FLAGS.output_dir
开发者ID:kchodorow,项目名称:serving,代码行数:101,代码来源:inception_saved_model.py


示例15: operation

    Args:
      logits: A `Tensor`. Must be one of the following types: `float32`, `float64`.
        2-D with shape `[batch_size, num_classes]`.
      name: A name for the operation (optional).

    Returns:
      A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
'''

# To implement cross-entropy
# placeholder to input the correct answers
y_ = tf.placeholder(tf.float32, [None, mnist.train.labels.shape[1]])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

# training setup
# learning rate = 0.5
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

# initialize the variables we created
init = tf.initialize_all_tables()

# launch the model in a Session
sess = tf.Session()
sess.run(init)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
开发者ID:autodrive,项目名称:tensor_flow_practice,代码行数:29,代码来源:download_mnist.py


示例16: export

def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    # TODO(b/27776734): Add batching support.
    jpegs = tf.placeholder(tf.string, shape=(1))
    image_buffer = tf.squeeze(jpegs, [0])
    # Decode the string as an RGB JPEG.
    # Note that the resulting image contains an unknown height and width
    # that is set dynamically by decode_jpeg. In other words, the height
    # and width of image is unknown at compile-time.
    image = tf.image.decode_jpeg(image_buffer, channels=3)
    # After this point, all image pixels reside in [0,1)
    # until the very end, when they're rescaled to (-1, 1).  The various
    # adjust_* ops all require this range for dtype float.
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    image = tf.image.central_crop(image, central_fraction=0.875)
    # Resize the image to the original height and width.
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image,
                                     [FLAGS.image_size, FLAGS.image_size],
                                     align_corners=False)
    image = tf.squeeze(image, [0])
    # Finally, rescale to [-1,1] instead of [0, 1)
    image = tf.sub(image, 0.5)
    image = tf.mul(image, 2.0)
    images = tf.expand_dims(image, 0)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    class_tensor = tf.constant([texts[s] for s in synsets])

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      model_exporter = exporter.Exporter(saver)
      signature = exporter.classification_signature(
          input_tensor=jpegs, classes_tensor=classes, scores_tensor=values)
      model_exporter.init(default_graph_signature=signature, init_op=init_op)
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)
开发者ID:Jamesleons,项目名称:serving,代码行数:85,代码来源:inception_export.py


示例17: export

def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    jpegs = tf.placeholder(tf.string)
    images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      model_exporter = exporter.Exporter(saver)
      model_exporter.init(init_op=init_op, named_graph_signatures={
          'inputs': exporter.generic_signature({'images': jpegs}),
          'outputs': exporter.generic_signature({'classes': classes,
                                                 'scores': values})})
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)
开发者ID:damienmg,项目名称:serving,代码行数:68,代码来源:inception_export.py


示例18: main

def main(_):
  if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
    print('Usage: mnist_export.py [--training_iteration=x] '
          '[--model_version=y] export_dir')
    sys.exit(-1)
  if FLAGS.training_iteration <= 0:
    print 'Please specify a positive value for training iteration.'
    sys.exit(-1)
  if FLAGS.model_version <= 0:
    print 'Please specify a positive value for version number.'
    sys.exit(-1)

  # Train model
  print 'Training model...'
  mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
  sess = tf.InteractiveSession()
  serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
  feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}
  tf_example = tf.parse_example(serialized_tf_example, feature_configs)
  x = tf.identity(tf_example['x'], name='x')  # use tf.identity() to assign name
  y_ = tf.placeholder('float', shape=[None, 10])
  w = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  sess.run(tf.initialize_all_variables())
  y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
  cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
  train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
  values, indices = tf.nn.top_k(y, 10)
  prediction_classes = tf.contrib.lookup.index_to_string(
      tf.to_int64(indices), mapping=tf.constant([str(i) for i in xrange(10)]))
  for _ in range(FLAGS.training_iteration):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
  print 'training accuracy %g' % sess.run(
      accuracy, feed_dict={x: mnist.test.images,
                           y_: mnist.test.labels})
  print 'Done training!'

  # Export model
  # WARNING(break-tutorial-inline-code): The following code snippet is
  # in-lined in tutorials, please update tutorial documents accordingly
  # whenever code changes.
  export_path_base = sys.argv[-1]
  export_path = os.path.join(
      compat.as_bytes(export_path_base),
      compat.as_bytes(str(FLAGS.model_version)))
  print 'Exporting trained model to', export_path
  builder = saved_model_builder.SavedModelBuilder(export_path)

  # Build the signature_def_map.
  classification_inputs = utils.build_tensor_info(serialized_tf_example)
  classification_outputs_classes = utils.build_tensor_info(prediction_classes)
  classification_outputs_scores = utils.build_tensor_info(values)

  classification_signature = signature_def_utils.build_signature_def(
      inputs={signature_constants.CLASSIFY_INPUTS: classification_inputs},
      outputs={
          signature_constants.CLASSIFY_OUTPUT_CLASSES:
              classification_outputs_classes,
          signature_constants.CLASSIFY_OUTPUT_SCORES:
              classification_outputs_scores
      },
      method_name=signature_constants.CLASSIFY_METHOD_NAME)

  tensor_info_x = utils.build_tensor_info(x)
  tensor_info_y = utils.build_tensor_info(y)

  prediction_signature = signature_def_utils.build_signature_def(
      inputs={'images': tensor_info_x},
      outputs={'scores': tensor_info_y},
      method_name=signature_constants.PREDICT_METHOD_NAME)

  legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op')
  builder.add_meta_graph_and_variables(
      sess, [tag_constants.SERVING],
      signature_def_map={
          'predict_images':
              prediction_signature,
          signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
              classification_signature,
      },
      legacy_init_op=legacy_init_op)

  builder.save()

  print 'Done exporting!'
开发者ID:kchodorow,项目名称:serving,代码行数:88,代码来源:mnist_saved_model.py


示例19:

#!/usr/bin/env python
import tensorflow as tf

# Create a Constant op
# The op is added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
hello = tf.constant('Hello, TensorFlow!')
x = tf.placeholder("float", 3)
a = tf.placeholder("float", shape=[None, 3])

y = x*2
b = a*2

# Start tf session
sess = tf.Session()
sess.run(tf.initialize_all_tables())

print sess.run(hello)
print sess.run(y, feed_dict={x:[1,2,3]})
print sess.run(b, feed_dict={a:[[1,2,3], [4,5,6]]})

sess.close()
开发者ID:feiskyer,项目名称:feiskyer.github.io,代码行数:24,代码来源:helloworld.py


示例20: save_model

def save_model():
    with tf.Graph().as_default():
        # definimos placeholders
        _images = tf.placeholder(tf.float32, shape=[None, FLAGS.image_height, FLAGS.image_width, 3])

        # Inference.
        logits = reconobook_modelo.inference(_images)

        # clase = tf.argmax(logits, 1)

        values, indices = tf.nn.top_k(logits, 10)
        prediction_classes = tf.contrib.lookup.index_to_string(
            tf.to_int64(indices), mapping=tf.constant([str(i) for i in range(10)]))

        with tf.Session() as sess:
            # Cargar modelo
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay)
            variables_to_restore = variable_averages.variables_to_restore()
            saver = tf.train.Saver(variables_to_restore)
            saver.restore(sess, ckpt.model_checkpoint_path)

            # Definimos la ruta donde se guardará el modelo
            export_path = os.path.join(
                compat.as_bytes(FLAGS.export_model_dir),
                compat.as_bytes(str(FLAGS.model_version)))

            # creamos el directorio de export si no existe, y si existe lo borramos y creamos de nuevo
            if os.path.exists(export_path):
                shutil.rmtree(export_path)

            print('Exportando modelo a %s' % export_path)

            # Creamos el "builder"
            builder = saved_model_builder.SavedModelBuilder(export_path)

            # Build the signature_def_map.
            classification_inputs = utils.build_tensor_info(_images)
            classification_outputs_classes = utils.build_tensor_info(prediction_classes)
            classification_outputs_scores = utils.build_tensor_info(values)

            classification_signature = signature_def_utils.build_signature_def(
                inputs={signature_constants.CLASSIFY_INPUTS: classification_inputs},
                outputs={
                    signature_constants.CLASSIFY_OUTPUT_CLASSES:
                        classification_outputs_classes,
                    signature_constants.CLASSIFY_OUTPUT_SCORES:
                        classification_outputs_scores
                },
                method_name=signature_constants.CLASSIFY_METHOD_NAME)

            tensor_info_x = utils.build_tensor_info(_images)
            tensor_info_y = utils.build_tensor_info(logits)
            
            prediction_signature = signature_def_utils.build_signature_def(
                inputs={'images': tensor_info_x},
                outputs={'scores': tensor_info_y},
                method_name=signature_constants.PREDICT_METHOD_NAME)

            legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op')
            builder.add_meta_graph_and_variables(
                sess, [tag_constants.SERVING],
                signature_def_map={
                    'predict_images':
                        prediction_signature,
                    signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                        classification_signature,
                },
                legacy_init_op=legacy_init_op)

            builder.save()

            print('Modelo exportado')
开发者ID:NicolasPresta,项目名称:ReconoBook,代码行数:73,代码来源:reconobook_save_model.py



注:本文中的tensorflow.initialize_all_tables函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.initialize_all_variables函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.import_graph_def函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap