• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.tables_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.tables_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python tables_initializer函数的具体用法?Python tables_initializer怎么用?Python tables_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tables_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: train

def train(mnist):
    x = tf.placeholder(tf.float32, [None, INPUT_NODE],name = 'x-input')
    y_= tf.placeholder(tf.float32, [None, OUTPUT_NODE], name= 'y-input')

    weights1 = tf.Variable(
        tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1)
    )
    biases1 = tf.Variable(tf.constant(0,1, shape=[LAYER1_NODE]))
    weights2 = tf.Variable(
        tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1)
    )
    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
    y = inference(x, None, weights1, biases1, weights2, biases2)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECY, global_step
    )

    variable_averages_op = variable_averages.apply(
        tf.trainable_variables()
    )
    average_y = inference(
        x, variable_averages, weights1, biases1, weights2, biases2
    )

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = y, labels = tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # regularizere = tf.contrib.l
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    regularization = regularizer(weights1) + regularizer(weights2)

    loss = cross_entropy_mean + regularization
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY
    )    
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)


    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')
    correct_prection = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prection, tf.float32))

    with tf.Session() as sess:
        tf.tables_initializer().run()
        validate_feed = {x: mnist.validation.images, y_:mnist.validation.labels}
        test_feed = {x:mnist.test.images, y_:mnist.test.labels}
        for i in range(TRAINING_STEPS):
            if i % 100 ==0:
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                print("After %d training step(s), validation accuracy using avarage model is %g " %(i , validate_acc))
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            sess.run(train_op, feed_dict={x:xs, y_:ys})
        test_acc =sess.run(accuracy, feed_dict=test_feed)
        print('After %d training step(s), test accuracy usering average model is %g' %(TRAINING_STEPS, test_acc))
开发者ID:severalfly,项目名称:MyTest,代码行数:60,代码来源:mnistFull.py


示例2: test_text_corresponds_to_ids

  def test_text_corresponds_to_ids(self):
    charset = create_fake_charset(36)
    ids = tf.constant(
        [[17, 14, 21, 21, 24], [32, 24, 27, 21, 13]], dtype=tf.int64)
    charset_mapper = model.CharsetMapper(charset)

    with self.test_session() as sess:
      tf.tables_initializer().run()
      text = sess.run(charset_mapper.get_text(ids))

    self.assertAllEqual(text, ['hello', 'world'])
开发者ID:banjocui,项目名称:models,代码行数:11,代码来源:model_test.py


示例3: test_predicted_text_has_correct_shape_w_charset

  def test_predicted_text_has_correct_shape_w_charset(self):
    charset = create_fake_charset(self.num_char_classes)
    ocr_model = self.create_model(charset=charset)

    with self.test_session() as sess:
      endpoints_tf = ocr_model.create_base(
          images=self.fake_images, labels_one_hot=None)

      sess.run(tf.global_variables_initializer())
      tf.tables_initializer().run()
      endpoints = sess.run(endpoints_tf)

      self.assertEqual(endpoints.predicted_text.shape, (self.batch_size,))
      self.assertEqual(len(endpoints.predicted_text[0]), self.seq_length)
开发者ID:JiweiHe,项目名称:models,代码行数:14,代码来源:model_test.py


示例4: main

def main(_):
  images_placeholder, endpoints, init_fn = load_model(FLAGS.checkpoint,
                                                      FLAGS.batch_size,
                                                      FLAGS.dataset_name)
  images_data = load_images(FLAGS.image_path_pattern, FLAGS.batch_size,
                            FLAGS.dataset_name)
  with tf.Session() as sess:
    tf.tables_initializer().run()  # required by the CharsetMapper
    init_fn(sess)
    predictions = sess.run(endpoints.predicted_text,
                           feed_dict={images_placeholder: images_data})
  print("Predicted strings:")
  for line in predictions:
    print(line)
开发者ID:Hukongtao,项目名称:models,代码行数:14,代码来源:demo_inference.py


示例5: test_skipgram_randomize

    def test_skipgram_randomize(self):
        test_dataset = tf.contrib.data.Dataset.from_tensor_slices([
          'passj',
          'word',
          'db'
        ])

        config = pe.EmbeddingConfig(
          alphabet='abcdefghijklmnopqrstuvwxyz',
          password_batch=5,
          batch_size=10,
          embedding_window_size=3)
        emb_trainer = pe.EmbeddingTrainer(config)
        examples, labels = emb_trainer.skipgram(test_dataset, randomize=True)

        with self.test_session() as session:
            session.run([tf.tables_initializer()])
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            try:
                session.run([examples, labels])

            except tf.errors.OutOfRangeError:
                pass

            finally:
                coord.request_stop()

            coord.join(threads)
开发者ID:cupslab,项目名称:neural_network_cracking,代码行数:29,代码来源:test_pass_embedding.py


示例6: export

  def export(self, last_checkpoint, output_dir):
    """Builds a prediction graph and xports the model.

    Args:
      last_checkpoint: Path to the latest checkpoint file from training.
      output_dir: Path to the folder to be used to output the model.
    """
    logging.info('Exporting prediction graph to %s', output_dir)
    with tf.Session(graph=tf.Graph()) as sess:
      # Build and save prediction meta graph and trained variable values.
      inputs, outputs = self.build_prediction_graph()
      signature_def_map = {
        'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)
      }
      init_op = tf.global_variables_initializer()
      sess.run(init_op)
      self.restore_from_checkpoint(sess, self.inception_checkpoint_file,
                                   last_checkpoint)
      init_op_serving = control_flow_ops.group(
          variables.local_variables_initializer(),
          tf.tables_initializer())

      builder = saved_model_builder.SavedModelBuilder(output_dir)
      builder.add_meta_graph_and_variables(
          sess, [tag_constants.SERVING],
          signature_def_map=signature_def_map,
          legacy_init_op=init_op_serving)
      builder.save(False)
开发者ID:googledatalab,项目名称:pydatalab,代码行数:28,代码来源:_model.py


示例7: testCharConvEmbedder

  def testCharConvEmbedder(self):
    with open(vocab_file, "w") as vocab:
      vocab.write("h\n"
                  "e\n"
                  "l\n"
                  "w\n"
                  "o\n")
    with open(data_file, "w") as data:
      data.write("hello world !\n")

    embedder = text_inputter.CharConvEmbedder("vocabulary_file", 10, 5)
    data, transformed = _first_element(
        embedder, data_file, {"vocabulary_file": vocab_file})

    input_receiver = embedder.get_serving_input_receiver()
    self.assertAllEqual(
        [None, None, None],
        input_receiver.features["char_ids"].get_shape().as_list())
    self.assertAllEqual(
        [None],
        input_receiver.features["length"].get_shape().as_list())

    with self.test_session() as sess:
      sess.run(tf.tables_initializer())
      sess.run(tf.global_variables_initializer())
      data, transformed = sess.run([data, transformed])
      self.assertNotIn("raw", data)
      self.assertNotIn("tokens", data)
      self.assertAllEqual([3], data["length"])
      self.assertAllEqual(
          [[[0, 1, 2, 2, 4], [3, 4, 5, 2, 5], [5, 5, 5, 5, 5]]],
          data["char_ids"])
      self.assertAllEqual([1, 3, 5], transformed.shape)
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:33,代码来源:inputter_test.py


示例8: testWordEmbedder

  def testWordEmbedder(self):
    with open(vocab_file, "w") as vocab:
      vocab.write("the\n"
                  "world\n"
                  "hello\n"
                  "toto\n")
    with open(data_file, "w") as data:
      data.write("hello world !\n")

    embedder = text_inputter.WordEmbedder(
        "vocabulary_file", embedding_size=10)
    data, transformed = _first_element(
        embedder, data_file, {"vocabulary_file": vocab_file})

    input_receiver = embedder.get_serving_input_receiver()
    self.assertAllEqual(
        [None, None],
        input_receiver.features["ids"].get_shape().as_list())
    self.assertAllEqual(
        [None],
        input_receiver.features["length"].get_shape().as_list())

    with self.test_session() as sess:
      sess.run(tf.tables_initializer())
      sess.run(tf.global_variables_initializer())
      data, transformed = sess.run([data, transformed])
      self.assertNotIn("raw", data)
      self.assertNotIn("tokens", data)
      self.assertAllEqual([3], data["length"])
      self.assertAllEqual([[2, 1, 4]], data["ids"])
      self.assertAllEqual([1, 3, 10], transformed.shape)
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:31,代码来源:inputter_test.py


示例9: export_model

def export_model(model_info, class_count, saved_model_dir):
  # The SavedModel should hold the eval graph.
  sess, _, _, _, _ = build_eval_session(model_info, class_count)
  graph = sess.graph
  with graph.as_default():
    input_tensor = model_info['resized_input_tensor_name']
    in_image = sess.graph.get_tensor_by_name(input_tensor)
    inputs = {'image': tf.saved_model.utils.build_tensor_info(in_image)}

    out_classes = sess.graph.get_tensor_by_name('final_result:0')
    outputs = {
        'prediction': tf.saved_model.utils.build_tensor_info(out_classes)
    }

    signature = tf.saved_model.signature_def_utils.build_signature_def(
        inputs=inputs,
        outputs=outputs,
        method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)

    legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')

    # Save out the SavedModel.
    builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)
    print("signature=", signature)
    print("key:", tf.saved_model.signature_constants.
            DEFAULT_SERVING_SIGNATURE_DEF_KEY)
    builder.add_meta_graph_and_variables(
        sess, [tf.saved_model.tag_constants.SERVING],
        signature_def_map={
            tf.saved_model.signature_constants.
            DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                signature
        },
        legacy_init_op=legacy_init_op)
    builder.save()
开发者ID:google,项目名称:makerfaire-2016,代码行数:35,代码来源:retrain.py


示例10: _run_eval

  def _run_eval(self):
    """Run model evaluation and generate summaries."""
    coord = tf.train.Coordinator(clean_stop_exception_types=(
        tf.errors.CancelledError, tf.errors.OutOfRangeError))

    with tf.Session(graph=self._graph) as session:
      # Restores previously saved variables from latest checkpoint
      self._saver.restore(session, self._latest_checkpoint)

      session.run([
          tf.tables_initializer(),
          tf.local_variables_initializer()])
      tf.train.start_queue_runners(coord=coord, sess=session)
      train_step = session.run(self._gs)

      tf.logging.info('Starting Evaluation For Step: {}'.format(train_step))
      with coord.stop_on_exception():
        eval_step = 0
        while not coord.should_stop() and (self._eval_steps is None or
                                           eval_step < self._eval_steps):
          summaries, final_values, _ = session.run(
              [self._summary_op, self._final_ops_dict, self._eval_ops])
          if eval_step % 100 == 0:
            tf.logging.info('On Evaluation Step: {}'.format(eval_step))
          eval_step += 1

      # Write the summaries
      self._file_writer.add_summary(summaries, global_step=train_step)
      self._file_writer.flush()
      tf.logging.info(final_values)
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:30,代码来源:task.py


示例11: test_module_export_vocab_on_custom_fs

  def test_module_export_vocab_on_custom_fs(self):
    root_dir = "file://%s" % self.get_temp_dir()
    export_dir = "%s_%s" % (root_dir, "export")
    tf.gfile.MakeDirs(export_dir)
    # Create a module with a vocab file located on a custom filesystem.
    vocab_dir = os.path.join(root_dir, "vocab_location")
    tf.gfile.MakeDirs(vocab_dir)
    vocab_filename = os.path.join(vocab_dir, "tokens.txt")
    tf_utils.atomic_write_string_to_file(vocab_filename, "one", False)

    def create_assets_module_fn():

      def assets_module_fn():
        indices = tf.placeholder(dtype=tf.int64, name="indices")
        table = tf.contrib.lookup.index_to_string_table_from_file(
            vocabulary_file=vocab_filename, default_value="UNKNOWN")
        outputs = table.lookup(indices)
        hub.add_signature(inputs=indices, outputs=outputs)

      return assets_module_fn

    with tf.Graph().as_default():
      assets_module_fn = create_assets_module_fn()
      spec = hub.create_module_spec(assets_module_fn)
      embedding_module = hub.Module(spec)
      with tf.Session() as sess:
        sess.run(tf.tables_initializer())
        embedding_module.export(export_dir, sess)

    module_files = tf.gfile.ListDirectory(export_dir)
    self.assertListEqual(
        ["assets", "saved_model.pb", "tfhub_module.pb", "variables"],
        sorted(module_files))
    module_files = tf.gfile.ListDirectory(os.path.join(export_dir, "assets"))
    self.assertListEqual(["tokens.txt"], module_files)
开发者ID:jankim,项目名称:hub,代码行数:35,代码来源:e2e_test.py


示例12: testDuplicateAssetCopy

  def testDuplicateAssetCopy(self):
    export_path = os.path.join(self.get_temp_dir(), "assets-module")

    def module_with_duplicate_asset():
      vocabulary_file = self.create_vocab_file("tokens2.txt", ["1", "2", "3"])
      indices1 = tf.placeholder(dtype=tf.int64, name="indices1")
      indices2 = tf.placeholder(dtype=tf.int64, name="indices2")
      hub.add_signature(
          inputs={
              "indices_1": indices1,
              "indices_2": indices2,
          },
          outputs={
              "x": do_table_lookup(indices1, vocabulary_file),
              "y": do_table_lookup(indices2, vocabulary_file),
          })

    with tf.Graph().as_default():
      spec = hub.create_module_spec(module_with_duplicate_asset)
      module_a = hub.Module(spec)
      module_a({"indices_1": tf.constant([1, 2], dtype=tf.int64),
                "indices_2": tf.constant([1, 2], dtype=tf.int64)}, as_dict=True)
      with tf.Session() as sess:
        sess.run(tf.tables_initializer())
        module_a.export(export_path, sess)
开发者ID:jankim,项目名称:hub,代码行数:25,代码来源:native_module_test.py


示例13: execute_cpu

  def execute_cpu(self, graph_fn, inputs):
    """Constructs the graph, executes it on CPU and returns the result.

    Args:
      graph_fn: a callable that constructs the tensorflow graph to test. The
        arguments of this function should correspond to `inputs`.
      inputs: a list of numpy arrays to feed input to the computation graph.

    Returns:
      A list of numpy arrays or a scalar returned from executing the tensorflow
      graph.
    """
    with self.test_session(graph=tf.Graph()) as sess:
      placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
      results = graph_fn(*placeholders)
      sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
                tf.local_variables_initializer()])
      materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
                                                                  inputs)))

      if (hasattr(materialized_results, '__len__') and
          len(materialized_results) == 1 and
          (isinstance(materialized_results, list) or
           isinstance(materialized_results, tuple))):
        materialized_results = materialized_results[0]
    return materialized_results
开发者ID:pcm17,项目名称:models,代码行数:26,代码来源:test_case.py


示例14: test_with_counts

  def test_with_counts(self):
    vocab_list = ["Hello", ".", "笑"]
    vocab_counts = [100, 200, 300]
    vocab_file = test_utils.create_temporary_vocab_file(vocab_list,
                                                        vocab_counts)

    vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size = \
      vocab.create_vocabulary_lookup_table(vocab_file.name)

    self.assertEqual(vocab_size, 6)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())
      sess.run(tf.tables_initializer())

      ids = vocab_to_id_table.lookup(
          tf.convert_to_tensor(["Hello", ".", "笑", "??", "xxx"]))
      ids = sess.run(ids)
      np.testing.assert_array_equal(ids, [0, 1, 2, 3, 3])

      words = id_to_vocab_table.lookup(
          tf.convert_to_tensor(
              [0, 1, 2, 3], dtype=tf.int64))
      words = sess.run(words)
      np.testing.assert_array_equal(
          np.char.decode(words.astype("S"), "utf-8"),
          ["Hello", ".", "笑", "UNK"])

      counts = word_to_count_table.lookup(
          tf.convert_to_tensor(["Hello", ".", "笑", "??", "xxx"]))
      counts = sess.run(counts)
      np.testing.assert_array_equal(counts, [100, 200, 300, -1, -1])
开发者ID:AbhinavJain13,项目名称:seq2seq,代码行数:33,代码来源:vocab_test.py


示例15: testLabels2

  def testLabels2(self):
    self._input_config["label_feature"] = "label_str"
    self._input_config["label_map"] = {"PC": 1, "AFP": 0, "NTP": 0}

    dataset = dataset_ops.build_dataset(
        file_pattern=self._file_pattern,
        input_config=self._input_config,
        batch_size=4)

    # We need an initializable iterator when using labels because of the
    # stateful label id hash table.
    iterator = dataset.make_initializable_iterator()
    inputs = iterator.get_next()
    init_op = tf.tables_initializer()

    # Expect features and labels.
    self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
                          inputs.keys())
    labels = inputs["labels"]

    with self.test_session() as sess:
      sess.run([init_op, iterator.initializer])

      # Fetch 3 batches.
      np.testing.assert_array_equal([1, 0, 0, 1], sess.run(labels))
      np.testing.assert_array_equal([0, 0, 1, 0], sess.run(labels))
      np.testing.assert_array_equal([0, 1], sess.run(labels))

      # No more batches.
      with self.assertRaises(tf.errors.OutOfRangeError):
        sess.run(labels)
开发者ID:812864539,项目名称:models,代码行数:31,代码来源:dataset_ops_test.py


示例16: main

def main(args):
  if not os.path.exists(FLAGS.checkpoint):
    tf.logging.fatal(
        'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
        FLAGS.checkpoint)
  g = tf.Graph()
  with g.as_default():
    input_image = PreprocessImage(FLAGS.image_path[0])

    with slim.arg_scope(inception.inception_v3_arg_scope()):
      logits, end_points = inception.inception_v3(
          input_image, num_classes=FLAGS.num_classes, is_training=False)

    bottleneck = end_points['PreLogits']
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer(),
                       tf.tables_initializer())
    saver = tf_saver.Saver()
    sess = tf.Session()
    saver.restore(sess, FLAGS.checkpoint)

    # Run the evaluation on the image
    bottleneck_eval = np.squeeze(sess.run(bottleneck))

  first = True
  for val in bottleneck_eval:
    if not first:
      sys.stdout.write(",")
    first = False
    sys.stdout.write('{:.3f}'.format(val))
  sys.stdout.write('\n')
开发者ID:hotak92,项目名称:elective-pirri,代码行数:31,代码来源:compute_bottleneck.py


示例17: testUnknownLabel

  def testUnknownLabel(self):
    self._input_config["label_feature"] = "label_str"

    # label_map does not include "NTP".
    self._input_config["label_map"] = {"PC": 1, "AFP": 0}

    dataset = dataset_ops.build_dataset(
        file_pattern=self._file_pattern,
        input_config=self._input_config,
        batch_size=4)

    # We need an initializable iterator when using labels because of the
    # stateful label id hash table.
    iterator = dataset.make_initializable_iterator()
    inputs = iterator.get_next()
    init_op = tf.tables_initializer()

    # Expect features and labels.
    self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
                          inputs.keys())
    labels = inputs["labels"]

    with self.test_session() as sess:
      sess.run([init_op, iterator.initializer])

      # Unknown label "NTP".
      with self.assertRaises(tf.errors.InvalidArgumentError):
        sess.run(labels)
开发者ID:812864539,项目名称:models,代码行数:28,代码来源:dataset_ops_test.py


示例18: load_model

def load_model(model, ckpt, session, name):
    start_time = time.time()
    model.saver.restore(session, ckpt)
    session.run(tf.tables_initializer())
    print "  loaded %s model parameters from %s, time %.2fs" % \
        (name, ckpt, time.time() - start_time)
    return model
开发者ID:rpryzant,项目名称:code-doodles,代码行数:7,代码来源:model_base.py


示例19: testDeprecatedFunction

  def testDeprecatedFunction(self, mock_warning):
    self.assertEqual(0, mock_warning.call_count)
    tf.compat.v1.initializers.tables_initializer()
    self.assertEqual(0, mock_warning.call_count)

    tf.tables_initializer()
    self.assertEqual(1, mock_warning.call_count)
    self.assertRegexpMatches(
        mock_warning.call_args[0][1],
        "deprecation_test.py:")
    self.assertRegexpMatches(
        mock_warning.call_args[0][2], r"tables_initializer")
    self.assertRegexpMatches(
        mock_warning.call_args[0][3],
        r"compat.v1.tables_initializer")
    tf.tables_initializer()
    self.assertEqual(1, mock_warning.call_count)
开发者ID:aritratony,项目名称:tensorflow,代码行数:17,代码来源:deprecation_test.py


示例20: test_create_summaries_is_runnable

 def test_create_summaries_is_runnable(self):
   ocr_model = self.create_model()
   data = data_provider.InputEndpoints(
       images=self.fake_images,
       images_orig=self.fake_images,
       labels=self.fake_labels,
       labels_one_hot=slim.one_hot_encoding(self.fake_labels,
                                            self.num_char_classes))
   endpoints = ocr_model.create_base(
       images=self.fake_images, labels_one_hot=None)
   charset = create_fake_charset(self.num_char_classes)
   summaries = ocr_model.create_summaries(
       data, endpoints, charset, is_training=False)
   with self.test_session() as sess:
     sess.run(tf.global_variables_initializer())
     sess.run(tf.local_variables_initializer())
     tf.tables_initializer().run()
     sess.run(summaries)  # just check it is runnable
开发者ID:banjocui,项目名称:models,代码行数:18,代码来源:model_test.py



注:本文中的tensorflow.tables_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.tanh函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.svd函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap