• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.parse_example函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.parse_example函数的典型用法代码示例。如果您正苦于以下问题:Python parse_example函数的具体用法?Python parse_example怎么用?Python parse_example使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了parse_example函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _test

  def _test(self, kwargs, expected_values=None, expected_err=None):
    with self.test_session() as sess:
      if expected_err:
        with self.assertRaisesWithPredicateMatch(
            expected_err[0], expected_err[1]):
          out = tf.parse_example(**kwargs)
          sess.run(flatten_values_tensors_or_sparse(out.values()))
      else:
        # Returns dict w/ Tensors and SparseTensors.
        out = tf.parse_example(**kwargs)
        result = flatten_values_tensors_or_sparse(out.values())
        # Check values.
        tf_result = sess.run(result)
        _compare_output_to_expected(self, out, expected_values, tf_result)

      # Check shapes; if serialized is a Tensor we need its size to
      # properly check.
      serialized = kwargs["serialized"]
      batch_size = (serialized.eval().size if isinstance(serialized, tf.Tensor)
                    else np.asarray(serialized).size)
      for k, f in kwargs["features"].items():
        if isinstance(f, tf.FixedLenFeature) and f.shape is not None:
          self.assertEqual(
              tuple(out[k].get_shape().as_list()), (batch_size,) + f.shape)
        elif isinstance(f, tf.VarLenFeature):
          self.assertEqual(
              tuple(out[k].indices.get_shape().as_list()), (None, 2))
          self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
          self.assertEqual(tuple(out[k].shape.get_shape().as_list()), (2,))
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:29,代码来源:parsing_ops_test.py


示例2: build_prediction_graph

  def build_prediction_graph(self):
    """Builds prediction graph and registers appropriate endpoints."""
    examples = tf.placeholder(tf.string, shape=(None,))
    features = {
        'image': tf.FixedLenFeature(
            shape=[IMAGE_PIXELS], dtype=tf.float32),
        'key': tf.FixedLenFeature(
            shape=[], dtype=tf.string),
    }

    parsed = tf.parse_example(examples, features)
    images = parsed['image']
    keys = parsed['key']

    # Build a Graph that computes predictions from the inference model.
    logits = inference(images, self.hidden1, self.hidden2)
    softmax = tf.nn.softmax(logits)
    prediction = tf.argmax(softmax, 1)

    # Mark the inputs and the outputs
    # Marking the input tensor with an alias with suffix _bytes. This is to
    # indicate that this tensor value is raw bytes and will be base64 encoded
    # over HTTP.
    # Note that any output tensor marked with an alias with suffix _bytes, shall
    # be base64 encoded in the HTTP response. To get the binary value, it
    # should be base64 decoded.
    tf.add_to_collection('inputs',
                         json.dumps({'examples_bytes': examples.name}))
    tf.add_to_collection('outputs',
                         json.dumps({
                             'key': keys.name,
                             'prediction': prediction.name,
                             'scores': softmax.name
                         }))
开发者ID:cottrell,项目名称:notebooks,代码行数:34,代码来源:model.py


示例3: _generate_saved_model_for_half_plus_two

def _generate_saved_model_for_half_plus_two(export_dir, as_text=False):
  """Generates SavedModel for half plus two.

  Args:
    export_dir: The directory to which the SavedModel should be written.
    as_text: Writes the SavedModel protocol buffer in text format to disk.
  """
  builder = saved_model_builder.SavedModelBuilder(export_dir)

  with tf.Session(graph=tf.Graph()) as sess:
    # Set up the model parameters as variables to exercise variable loading
    # functionality upon restore.
    a = tf.Variable(0.5, name="a")
    b = tf.Variable(2.0, name="b")

    # Create a placeholder for serialized tensorflow.Example messages to be fed.
    serialized_tf_example = tf.placeholder(tf.string, name="tf_example")

    # Parse the tensorflow.Example looking for a feature named "x" with a single
    # floating point value.
    feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    # Use tf.identity() to assign name
    x = tf.identity(tf_example["x"], name="x")
    y = tf.add(tf.mul(a, x), b, name="y")

    # Create an assets file that can be saved and restored as part of the
    # SavedModel.
    original_assets_directory = "/tmp/original/export/assets"
    original_assets_filename = "foo.txt"
    original_assets_filepath = _write_assets(original_assets_directory,
                                             original_assets_filename)

    # Set up the assets collection.
    assets_filepath = tf.constant(original_assets_filepath)
    tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, assets_filepath)

    # Set up the signature for regression with input and output tensor
    # specification.
    input_tensor = meta_graph_pb2.TensorInfo()
    input_tensor.name = serialized_tf_example.name
    signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor}

    output_tensor = meta_graph_pb2.TensorInfo()
    output_tensor.name = tf.identity(y).name
    signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor}
    signature_def = utils.build_signature_def(
        signature_inputs, signature_outputs,
        signature_constants.REGRESS_METHOD_NAME)

    # Initialize all variables and then save the SavedModel.
    sess.run(tf.initialize_all_variables())
    builder.add_meta_graph_and_variables(
        sess, [constants.TAG_SERVING],
        signature_def_map={
            signature_constants.REGRESS_METHOD_NAME:
                signature_def
        },
        assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS))
    builder.save(as_text)
开发者ID:Qstar,项目名称:tensorflow,代码行数:60,代码来源:saved_model_half_plus_two.py


示例4: _test

  def _test(self, kwargs, expected_values=None, expected_err_re=None):
    with self.test_session() as sess:
      # Pull out some keys to check shape inference
      serialized = kwargs["serialized"]
      dense_keys = kwargs["dense_keys"] if "dense_keys" in kwargs else []
      sparse_keys = kwargs["sparse_keys"] if "sparse_keys" in kwargs else []
      dense_shapes = kwargs["dense_shapes"] if "dense_shapes" in kwargs else []

      # Returns dict w/ Tensors and SparseTensors
      out = tf.parse_example(**kwargs)

      # Check shapes; if serialized is a Tensor we need its size to
      # properly check.
      batch_size = (
          serialized.eval().size if isinstance(serialized, tf.Tensor)
          else np.asarray(serialized).size)
      if dense_shapes:
        self.assertEqual(len(dense_keys), len(dense_shapes))
        for (k, s) in zip(dense_keys, dense_shapes):
          self.assertEqual(
              tuple(out[k].get_shape().as_list()), (batch_size,) + s)
      for k in sparse_keys:
        self.assertEqual(
            tuple(out[k].indices.get_shape().as_list()), (None, 2))
        self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
        self.assertEqual(tuple(out[k].shape.get_shape().as_list()), (2,))

      # Check values
      result = flatten_values_tensors_or_sparse(out.values())  # flatten values
      if expected_err_re is None:
        tf_result = sess.run(result)
        _compare_output_to_expected(self, out, expected_values, tf_result)
      else:
        with self.assertRaisesOpError(expected_err_re):
          sess.run(result)
开发者ID:barongeng,项目名称:tensorflow,代码行数:35,代码来源:parsing_ops_test.py


示例5: _deserialize_train

def _deserialize_train(examples_serialized):
  features = tf.parse_example(examples_serialized, _FEATURE_MAP)
  train_features = {
      movielens.USER_COLUMN: features[movielens.USER_COLUMN],
      movielens.ITEM_COLUMN: features[movielens.ITEM_COLUMN],
  }
  return train_features, features[movielens.RATING_COLUMN]
开发者ID:AlwaysTheBeginer,项目名称:models,代码行数:7,代码来源:movielens_dataset.py


示例6: parse_example_batch

def parse_example_batch(serialized):
  """Parses a batch of tf.Example protos.

  Args:
    serialized: A 1-D string Tensor; a batch of serialized tf.Example protos.
  Returns:
    encode: A SentenceBatch of encode sentences.
    decode_pre: A SentenceBatch of "previous" sentences to decode.
    decode_post: A SentenceBatch of "post" sentences to decode.
  """
  features = tf.parse_example(
      serialized,
      features={
          "encode": tf.VarLenFeature(dtype=tf.int64),
          "decode_pre": tf.VarLenFeature(dtype=tf.int64),
          "decode_post": tf.VarLenFeature(dtype=tf.int64),
      })

  def _sparse_to_batch(sparse):
    ids = tf.sparse_tensor_to_dense(sparse)  # Padding with zeroes.
    mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape,
                              tf.ones_like(sparse.values, dtype=tf.int32))
    return SentenceBatch(ids=ids, mask=mask)

  output_names = ("encode", "decode_pre", "decode_post")
  return tuple(_sparse_to_batch(features[x]) for x in output_names)
开发者ID:ALISCIFP,项目名称:models,代码行数:26,代码来源:input_ops.py


示例7: batch_parse_tf_example

def batch_parse_tf_example(batch_size, example_batch):
    '''
    Args:
        example_batch: a batch of tf.Example
    Returns:
        A dict of batched tensors
    '''
    features = {
        'x': tf.FixedLenFeature([], tf.string),
        'pi': tf.FixedLenFeature([], tf.string),
        'outcome': tf.FixedLenFeature([], tf.float32),
    }
    parsed = tf.parse_example(example_batch, features)
    x = tf.decode_raw(parsed['x'], tf.uint8)
    x = tf.cast(x, tf.float32)
    x = tf.reshape(x, [batch_size, go.N, go.N,
                       features_lib.NEW_FEATURES_PLANES])
    pi = tf.decode_raw(parsed['pi'], tf.float32)
    pi = tf.reshape(pi, [batch_size, go.N * go.N + 1])
    outcome = parsed['outcome']
    outcome.set_shape([batch_size])
    return {
        'pos_tensor': x,
        'pi_tensor': pi,
        'value_tensor': outcome,
    }
开发者ID:tcxdgit,项目名称:minigo,代码行数:26,代码来源:preprocessing.py


示例8: parse_examples

def parse_examples(examples):
  feature_map = {
      'labels': tf.FixedLenFeature(
          shape=[], dtype=tf.int64, default_value=[-1]),
      'images': tf.FixedLenFeature(
          shape=[IMAGE_PIXELS], dtype=tf.float32),
  }
  return tf.parse_example(examples, features=feature_map)
开发者ID:cottrell,项目名称:notebooks,代码行数:8,代码来源:model.py


示例9: parse_examples

 def parse_examples(example_protos):
     features = {
         "target": tf.FixedLenFeature(shape=[1], dtype=tf.float32, default_value=0),
         "age_indices": tf.VarLenFeature(dtype=tf.int64),
         "age_values": tf.VarLenFeature(dtype=tf.float32),
         "gender_indices": tf.VarLenFeature(dtype=tf.int64),
         "gender_values": tf.VarLenFeature(dtype=tf.float32),
     }
     return tf.parse_example([e.SerializeToString() for e in example_protos], features)
开发者ID:apollos,项目名称:tensorflow,代码行数:9,代码来源:sdca_ops_test.py


示例10: testBasic

 def testBasic(self):
     golden_config = example_parser_configuration_pb2.ExampleParserConfiguration()
     text_format.Parse(BASIC_PROTO, golden_config)
     with tf.Session() as sess:
         examples = tf.placeholder(tf.string, shape=[1])
         feature_to_type = {"x": tf.FixedLenFeature([1], tf.float32, 33.0), "y": tf.VarLenFeature(tf.string)}
         _ = tf.parse_example(examples, feature_to_type)
         parse_example_op = sess.graph.get_operation_by_name("ParseExample/ParseExample")
         config = extract_example_parser_configuration(parse_example_op, sess)
         self.assertProtoEquals(golden_config, config)
开发者ID:CCChaos,项目名称:tensorflow,代码行数:10,代码来源:example_parser_configuration_test.py


示例11: example_serving_input_fn

def example_serving_input_fn():
    """Build the serving inputs."""
    example_bytestring = tf.placeholder(
        shape=[None],
        dtype=tf.string,
    )
    features = tf.parse_example(
        example_bytestring,
        tf.feature_column.make_parse_example_spec(featurizer.INPUT_COLUMNS))
    return tf.estimator.export.ServingInputReceiver(
        features, {'example_proto': example_bytestring})
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:11,代码来源:input.py


示例12: build_prediction_graph

  def build_prediction_graph(self):
    """Builds prediction graph and registers appropriate endpoints."""
    examples = tf.placeholder(tf.string, shape=(None,))
    features = {
        'image': tf.FixedLenFeature(
            shape=[IMAGE_PIXELS], dtype=tf.float32),
        'key': tf.FixedLenFeature(
            shape=[], dtype=tf.string),
    }

    parsed = tf.parse_example(examples, features)
    images = parsed['image']
    keys = parsed['key']

    # Build a Graph that computes predictions from the inference model.
    logits = inference(images, self.hidden1, self.hidden2)
    softmax = tf.nn.softmax(logits)
    prediction = tf.argmax(softmax, 1)

    # Mark the inputs and the outputs
    # Marking the input tensor with an alias with suffix _bytes. This is to
    # indicate that this tensor value is raw bytes and will be base64 encoded
    # over HTTP.
    # Note that any output tensor marked with an alias with suffix _bytes, shall
    # be base64 encoded in the HTTP response. To get the binary value, it
    # should be base64 decoded.
    input_signatures = {}
    predict_input_tensor = meta_graph_pb2.TensorInfo()
    predict_input_tensor.name = examples.name
    predict_input_tensor.dtype = examples.dtype.as_datatype_enum
    input_signatures['example_bytes'] = predict_input_tensor

    tf.add_to_collection('inputs',
                         json.dumps({
                             'examples_bytes': examples.name
                         }))
    tf.add_to_collection('outputs',
                         json.dumps({
                             'key': keys.name,
                             'prediction': prediction.name,
                             'scores': softmax.name
                         }))
    output_signatures = {}
    outputs_dict = {'key': keys.name,
                    'prediction': prediction.name,
                    'scores': softmax.name}
    for key, val in outputs_dict.iteritems():
      predict_output_tensor = meta_graph_pb2.TensorInfo()
      predict_output_tensor.name = val
      for placeholder in [keys, prediction, softmax]:
        if placeholder.name == val:
          predict_output_tensor.dtype = placeholder.dtype.as_datatype_enum
      output_signatures[key] = predict_output_tensor
    return input_signatures, output_signatures
开发者ID:cottrell,项目名称:notebooks,代码行数:54,代码来源:model.py


示例13: decode

    def decode(self,batched_serialized_tensors,batch_size):
        """Decodes the input from batch of serialized tensors
           Formats and reshapes image
           Args:
            batched_serialized_tensors: tensor output from Batcher containing read in
                serialized tensors

          Returns:
            batched_decoded_tensors: dict of batches of decoded TFRecords of batch_size
        """

        #faster to decode tensors as a batch
        batched_decoded_tensors = tf.parse_example(batched_serialized_tensors[fields.InputDataFields.serialized],
                                                    self._keys_to_features)

        #Decode and cast tensors if needed
        for label in self._multi_task_labels:
            tensor = batched_decoded_tensors[label.name]
            #only strings need t obe decoded
            if label.dtype == "string":
                if label.decodetype:
                    tensor = tf.decode_raw(tensor, TYPE_MAP[label.decodetype])
                else:
                    raise ValueError("string type must have a type to be decoded to.")
            if label.casttype:
                tensor = tf.cast(tensor, TYPE_MAP[label.casttype])

            if label.shape:
                tensor = tf.reshape(tensor, [batch_size,*label.shape])
                tensor.set_shape([batch_size, *label.shape])

            batched_decoded_tensors[label.name] = tensor

        #input is handlded separately
        image_float = tf.cast(
                            tf.decode_raw(batched_decoded_tensors['input'],
                                          tf.uint8),
                            tf.float32)
        image_float = tf.reshape(image_float,[batch_size,
                                              self._image_height,
                                              self._image_width,
                                              self._channels])
        image_float.set_shape([batch_size,
                               self._image_height,
                               self._image_width,
                               self._channels])

        batched_decoded_tensors['input'] = image_float

        return batched_decoded_tensors
开发者ID:oneTimePad,项目名称:classification,代码行数:50,代码来源:multi_task_tf_examples_decoder.py


示例14: load_all_pairs

def load_all_pairs(records):
  """Reads TensorFlow examples from a RecordReader and returns the word pairs.

  Args:
    records: a record list with TensorFlow examples.

  Returns:
    The word pairs
  """
  curr_features = tf.parse_example(records, {
      'pair': tf.FixedLenFeature([1], dtype=tf.string)
  })

  word_pairs = curr_features['pair']
  return word_pairs
开发者ID:ALISCIFP,项目名称:models,代码行数:15,代码来源:lexnet_common.py


示例15: load_all_labels

def load_all_labels(records):
  """Reads TensorFlow examples from a RecordReader and returns only the labels.

  Args:
    records: a record list with TensorFlow examples.

  Returns:
    The labels
  """
  curr_features = tf.parse_example(records, {
      'rel_id': tf.FixedLenFeature([1], dtype=tf.int64),
  })

  labels = tf.squeeze(curr_features['rel_id'], [-1])
  return labels
开发者ID:ALISCIFP,项目名称:models,代码行数:15,代码来源:lexnet_common.py


示例16: serving_input_fn

def serving_input_fn():
  with tf.name_scope("inputs"):
    serialized = tf.placeholder(
        dtype=tf.string,
        shape=tf.tensor_shape.unknown_shape(ndims=1),
        name=EXAMPLES_KEY)

    parsing_spec = {TERMS_KEY: tf.VarLenFeature(dtype=tf.string)}
    features = tf.parse_example(serialized, parsing_spec)

    sequence_length = sparse_sequence_length(features[TERMS_KEY])
    features[SEQUENCE_LENGTH_KEY] = sequence_length
    return tf.contrib.learn.InputFnOps(
        features=features,
        labels=None,
        default_inputs={EXAMPLES_KEY: serialized})
开发者ID:ckml,项目名称:tf_learn,代码行数:16,代码来源:lstm_classifier.py


示例17: LoadBinaryCode

def LoadBinaryCode(input_config, batch_size):
  """Load a batch of binary codes from a tf.Example dataset.

  Args:
    input_config: An InputConfig proto containing the input configuration.
    batch_size: Output batch size of examples.

  Returns:
    A batched tensor of binary codes.
  """
  data = input_config.data

  # TODO: Possibly use multiple files (instead of just one).
  file_list = [data]
  filename_queue = tf.train.string_input_producer(file_list,
                                                  capacity=4)
  reader = tf.TFRecordReader()
  _, values = reader.read(filename_queue)

  serialized_example = tf.reshape(values, shape=[1])
  serialized_features = {
      'code_shape': tf.FixedLenFeature([3],
                                       dtype=tf.int64),
      'code': tf.VarLenFeature(tf.float32),
  }
  example = tf.parse_example(serialized_example, serialized_features)

  # 3D shape: height x width x binary_code_depth
  z = example['code_shape']
  code_shape = tf.reshape(tf.cast(z, tf.int32), [3])
  # Un-flatten the binary codes.
  code = tf.reshape(tf.sparse_tensor_to_dense(example['code']), code_shape)

  queue_size = 10
  queue = tf.PaddingFIFOQueue(
      queue_size + 3 * batch_size,
      dtypes=[code.dtype],
      shapes=[[None, None, None]])
  enqueue_op = queue.enqueue([code])
  dequeue_code = queue.dequeue_many(batch_size)
  queue_runner = tf.train.queue_runner.QueueRunner(queue, [enqueue_op])
  tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, queue_runner)

  return dequeue_code
开发者ID:812864539,项目名称:models,代码行数:44,代码来源:code_loader.py


示例18: _generate_saved_model_for_half_plus_two

def _generate_saved_model_for_half_plus_two(export_dir, as_text=False):
  """Generates SavedModel for half plus two.

  Args:
    export_dir: The directory to which the SavedModel should be written.
    as_text: Writes the SavedModel protocol buffer in text format to disk.
  """
  builder = saved_model_builder.SavedModelBuilder(export_dir)

  with tf.Session(graph=tf.Graph()) as sess:
    # Set up the model parameters as variables to exercise variable loading
    # functionality upon restore.
    a = tf.Variable(0.5, name="a")
    b = tf.Variable(2.0, name="b")

    # Create a placeholder for serialized tensorflow.Example messages to be fed.
    serialized_tf_example = tf.placeholder(tf.string, name="tf_example")

    # Parse the tensorflow.Example looking for a feature named "x" with a single
    # floating point value.
    feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    # Use tf.identity() to assign name
    x = tf.identity(tf_example["x"], name="x")
    y = tf.add(tf.mul(a, x), b, name="y")

    # Set up the signature for regression with input and output tensor
    # specification.
    input_tensor = meta_graph_pb2.TensorInfo()
    input_tensor.name = serialized_tf_example.name
    signature_inputs = {"input": input_tensor}

    output_tensor = meta_graph_pb2.TensorInfo()
    output_tensor.name = tf.identity(y).name
    signature_outputs = {"output": output_tensor}
    signature_def = utils.build_signature_def(signature_inputs,
                                              signature_outputs, "regression")

    # Initialize all variables and then save the SavedModel.
    sess.run(tf.initialize_all_variables())
    builder.add_meta_graph_and_variables(
        sess, [constants.TAG_SERVING],
        signature_def_map={"regression": signature_def})
    builder.save(as_text)
开发者ID:KalraA,项目名称:tensorflow,代码行数:44,代码来源:saved_model_half_plus_two.py


示例19: example_serving_input_fn

def example_serving_input_fn(default_batch_size=None):
  """Build the serving inputs.

  Args:
    default_batch_size (int): Batch size for the tf.placeholder shape
  """
  feature_spec = {}
  for feat in CONTINUOUS_COLS:
    feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.int64)

  for feat, _ in CATEGORICAL_COLS:
    feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.string)

  example_bytestring = tf.placeholder(
      shape=[default_batch_size],
      dtype=tf.string,
  )
  features = tf.parse_example(example_bytestring, feature_spec)
  return features, {'example': example_bytestring}
开发者ID:cottrell,项目名称:notebooks,代码行数:19,代码来源:model.py


示例20: example_evaluating_input_receiver_fn

def example_evaluating_input_receiver_fn():
  """Creating an EvalInputReceiver object for TFRecords data.

  Returns:
      EvalInputReceiver
  """

  tf_example = tf.placeholder(shape=[None], dtype=tf.string)
  features = tf.parse_example(
    tf_example,
    features=get_feature_spec(is_serving=False))

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tfma.export.EvalInputReceiver(
    features=process_features(features),
    receiver_tensors={'examples': tf_example},
    labels=features[metadata.TARGET_NAME])
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:19,代码来源:inputs.py



注:本文中的tensorflow.parse_example函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.parse_single_example函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.pad函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap