• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python array_ops.ones_like函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.array_ops.ones_like函数的典型用法代码示例。如果您正苦于以下问题:Python ones_like函数的具体用法?Python ones_like怎么用?Python ones_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ones_like函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: sample_n

  def sample_n(self, n, seed=None, name="sample_n"):
    """Sample `n` observations from the Beta Distributions.

    Args:
      n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
        observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self.a, self.b, n]):
        a = array_ops.ones_like(self._a_b_sum, dtype=self.dtype) * self.a
        b = array_ops.ones_like(self._a_b_sum, dtype=self.dtype) * self.b
        n = ops.convert_to_tensor(n, name="n")

        gamma1_sample = random_ops.random_gamma(
            [n,], a, dtype=self.dtype, seed=seed)
        gamma2_sample = random_ops.random_gamma(
            [n,], b, dtype=self.dtype, seed=seed)

        beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)

        n_val = tensor_util.constant_value(n)
        final_shape = tensor_shape.vector(n_val).concatenate(
            self._a_b_sum.get_shape())

        beta_sample.set_shape(final_shape)
        return beta_sample
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:32,代码来源:beta.py


示例2: _BiasAddGradGrad

def _BiasAddGradGrad(op, received_grad):
  """Gradient for the BiasAddGrad op.

  Args:
    op: BiasAddGrad op for which we are calculating gradients.
    received_grad: The gradients passed to the BiasAddGrad op.

  Returns:
    A single gradient Tensor for the input to BiasAddGrad (which
    is the gradient of the bias term in BiasAdd)
  """

  try:
    data_format = op.get_attr("data_format")
  except ValueError:
    data_format = None

  shape = array_ops.shape(op.inputs[0])
  rank = array_ops.rank(op.inputs[0])
  bias_shape = array_ops.shape(received_grad)

  if data_format == b"NCHW":
    expanded_shape = array_ops.concat([
        array_ops.ones_like(shape[:-3]), bias_shape,
        array_ops.ones_like(shape[-2:])
    ], 0)
    tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
  else:
    expanded_shape = array_ops.concat(
        [array_ops.ones_like(shape[:-1]), bias_shape], 0)
    tile_mults = array_ops.concat([shape[:-1], [1]], 0)

  expanded_grad = array_ops.reshape(received_grad, expanded_shape)
  return array_ops.tile(expanded_grad, tile_mults)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:34,代码来源:nn_grad.py


示例3: _sample_n

  def _sample_n(self, n, seed=None):
    n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
    k = self.event_shape_tensor()[0]

    # broadcast the total_count and logits to same shape
    n_draws = array_ops.ones_like(
        self.logits[..., 0], dtype=n_draws.dtype) * n_draws
    logits = array_ops.ones_like(
        n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits

    # flatten the total_count and logits
    flat_logits = array_ops.reshape(logits, [-1, k])  # [B1B2...Bm, k]
    flat_ndraws = n * array_ops.reshape(n_draws, [-1])  # [B1B2...Bm]

    # computes each total_count and logits situation by map_fn
    def _sample_single(args):
      logits, n_draw = args[0], args[1]  # [K], []
      x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw,
                                 seed)  # [1, n*n_draw]
      x = array_ops.reshape(x, shape=[n, -1])  # [n, n_draw]
      x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2)  # [n, k]
      return x

    x = functional_ops.map_fn(
        _sample_single, [flat_logits, flat_ndraws],
        dtype=self.dtype)  # [B1B2...Bm, n, k]

    # reshape the results to proper shape
    x = array_ops.transpose(x, perm=[1, 0, 2])
    final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
    x = array_ops.reshape(x, final_shape)  # [n, B1, B2,..., Bm, k]
    return x
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:32,代码来源:multinomial.py


示例4: grad

 def grad(dmm, dr):
   return [
       math_ops.matmul(dmm, b, transpose_b=True) +
       math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),
       math_ops.matmul(a, dmm, transpose_b=True) +
       math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)
   ]
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:7,代码来源:tape_test.py


示例5: _log_prob

 def _log_prob(self, x):
   x = self._assert_valid_sample(x)
   # broadcast logits or x if need be.
   logits = self.logits
   if (not x.get_shape().is_fully_defined() or
       not logits.get_shape().is_fully_defined() or
       x.get_shape() != logits.get_shape()):
     logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
     x = array_ops.ones_like(logits, dtype=x.dtype) * x
   logits_shape = array_ops.shape(math_ops.reduce_sum(logits, axis=[-1]))
   logits_2d = array_ops.reshape(logits, [-1, self.event_size])
   x_2d = array_ops.reshape(x, [-1, self.event_size])
   # compute the normalization constant
   k = math_ops.cast(self.event_size, x.dtype)
   log_norm_const = (math_ops.lgamma(k)
                     + (k - 1.)
                     * math_ops.log(self.temperature))
   # compute the unnormalized density
   log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)
   log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keepdims=False)
   # combine unnormalized density with normalization constant
   log_prob = log_norm_const + log_unnorm_prob
   # Reshapes log_prob to be consistent with shape of user-supplied logits
   ret = array_ops.reshape(log_prob, logits_shape)
   return ret
开发者ID:dananjayamahesh,项目名称:tensorflow,代码行数:25,代码来源:relaxed_onehot_categorical.py


示例6: full_exp_with_logits

  def full_exp_with_logits(name, labels=None, logits=None):
    """Computes exponential loss given `logits`.

    Args:
      name: A name for the operation (optional).
      labels: A `Tensor` of the same type and shape as `logits`.
      logits: A `Tensor` of type `float32` or `float64`.

    Returns:
      A `Tensor` of the same shape as `logits` with the componentwise
      exponential losses.

    Raises:
      ValueError: If `logits` and `labels` do not have the same shape.
    """
    with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
      logits = ops.convert_to_tensor(logits, name="logits")
      labels = ops.convert_to_tensor(labels, name="labels")
      try:
        labels.get_shape().merge_with(logits.get_shape())
      except ValueError:
        raise ValueError("logits and labels must have the same shape (%s vs %s)"
                         % (logits.get_shape(), labels.get_shape()))

    # Default threshold of 0 to switch between classes
    zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
    ones = array_ops.ones_like(logits, dtype=logits.dtype)
    neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)

    # Convert labels to 1 and -1
    cond_labels = (labels > zeros)
    labels_converted = array_ops.where(cond_labels, ones, neg_ones)

    return math_ops.exp(-1.0 * logits * labels_converted)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:losses.py


示例7: log_prob

  def log_prob(self, event, name="log_prob"):
    """Log of the probability mass function.

    Args:
      event: `int32` or `int64` binary Tensor.
      name: A name for this operation (optional).

    Returns:
      The log-probabilities of the events.
    """
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self.logits, event]):
        event = ops.convert_to_tensor(event, name="event")
        event = math_ops.cast(event, self.logits.dtype)
        logits = self.logits
        # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
        # so we do this here.
        # TODO(b/30637701): Check dynamic shape, and don't broadcast if the
        # dynamic shapes are the same.
        if (not event.get_shape().is_fully_defined() or
            not logits.get_shape().is_fully_defined() or
            event.get_shape() != logits.get_shape()):
          logits = array_ops.ones_like(event) * logits
          event = array_ops.ones_like(logits) * event
        return -nn.sigmoid_cross_entropy_with_logits(logits, event)
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:27,代码来源:bernoulli.py


示例8: _sample_n

 def _sample_n(self, n, seed=None):
     a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a
     b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b
     gamma1_sample = random_ops.random_gamma([n], a, dtype=self.dtype, seed=seed)
     gamma2_sample = random_ops.random_gamma([n], b, dtype=self.dtype, seed=seed)
     beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
     return beta_sample
开发者ID:caisq,项目名称:tensorflow,代码行数:7,代码来源:beta.py


示例9: _sample_n

 def _sample_n(self, n, seed=None):
   a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a
   b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b
   gamma1_sample = random_ops.random_gamma(
       [n,], a, dtype=self.dtype, seed=seed)
   gamma2_sample = random_ops.random_gamma(
       [n,], b, dtype=self.dtype,
       seed=distribution_util.gen_new_seed(seed, "beta"))
   beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
   return beta_sample
开发者ID:cg31,项目名称:tensorflow,代码行数:10,代码来源:beta.py


示例10: grad_fn

 def grad_fn(inputs, trainable_variables, unused_outputs,
             unused_grad_outputs):
   grad_inputs = [
       array_ops.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)
   ]
   grad_vars = [
       array_ops.ones_like(t) * (i + len(inputs) + 1.)
       for i, t in enumerate(trainable_variables)
   ]
   return grad_inputs, grad_vars
开发者ID:bikong2,项目名称:tensorflow,代码行数:10,代码来源:rev_block_lib_test.py


示例11: exp_with_logits

  def exp_with_logits(name, eps, labels=None, logits=None):
    """Computes exponential loss given `logits`.

    The loss returns is exp(-targets*modified_predictions), where
    modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
    class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
    the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).

    Args:
      name: A name for the operation (optional).
      eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
      labels: A `Tensor` of the same type and shape as `logits`.
      logits: A `Tensor` of type `float32` or `float64`.

    Returns:
      A `Tensor` of the same shape as `logits` with the componentwise
      exponential losses.

    Raises:
      ValueError: If `logits` and `labels` do not have the same shape.
    """
    with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
      logits = ops.convert_to_tensor(logits, name="logits")
      labels = ops.convert_to_tensor(labels, name="labels")
      try:
        labels.get_shape().merge_with(logits.get_shape())
      except ValueError:
        raise ValueError("logits and labels must have the same shape (%s vs %s)"
                         % (logits.get_shape(), labels.get_shape()))

    # Default threshold to switch between classes
    zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
    ones = array_ops.ones_like(logits, dtype=logits.dtype)
    neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)

    # Convert labels to 1 and -1
    cond_labels = (labels > zeros)
    labels_converted = array_ops.where(cond_labels, ones, neg_ones)

    # Convert predictions to 1 and -1
    # The loss we build is min(1, max(-1,ax+b))
    # where a=1/eps, b=-1/2eps.

    a = 1.0 / eps
    b = -1.0 / 2 / eps
    probs = math_ops.sigmoid(logits)
    y = a * probs + b
    # Build max(-1, ax+b)
    cond = (y < -1)
    max_res = array_ops.where(cond, neg_ones, y)
    # Build min part
    cond = (max_res > 1)
    min_res = array_ops.where(cond, ones, max_res)
    preds_converted = min_res
    return math_ops.exp(-preds_converted * labels_converted)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:55,代码来源:losses.py


示例12: _moment

 def _moment(self, n):
   """Compute the n'th (uncentered) moment."""
   expanded_concentration1 = array_ops.ones_like(
       self.total_concentration, dtype=self.dtype) * self.concentration1
   expanded_concentration0 = array_ops.ones_like(
       self.total_concentration, dtype=self.dtype) * self.concentration0
   beta_arg0 = 1 + n / expanded_concentration1
   beta_arg = array_ops.stack([beta_arg0, expanded_concentration0], -1)
   log_moment = math_ops.log(expanded_concentration0) + special_math_ops.lbeta(
       beta_arg)
   return math_ops.exp(log_moment)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:11,代码来源:kumaraswamy.py


示例13: _sample_n

  def _sample_n(self, n, seed=None):
    expanded_concentration1 = array_ops.ones_like(
        self.total_concentration, dtype=self.dtype) * self.concentration1
    expanded_concentration0 = array_ops.ones_like(
        self.total_concentration, dtype=self.dtype) * self.concentration0
    shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
    uniform_sample = random_ops.random_uniform(
        shape=shape, minval=0.0, maxval=1.0, dtype=self.dtype, seed=seed)

    kumaraswamy_sample = (1 - uniform_sample**(1. / expanded_concentration0))**(
        1. / expanded_concentration1)
    return kumaraswamy_sample
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:12,代码来源:kumaraswamy.py


示例14: _log_prob

  def _log_prob(self, counts):
    if self.validate_args:
      counts = distribution_util.embed_check_nonnegative_discrete(
          counts, check_integer=True)
    counts *= array_ops.ones_like(self.probs)
    probs = self.probs * array_ops.ones_like(counts)

    safe_domain = array_ops.where(
        math_ops.equal(counts, 0.),
        array_ops.zeros_like(probs),
        probs)
    return counts * math_ops.log1p(-safe_domain) + math_ops.log(probs)
开发者ID:pcm17,项目名称:tensorflow,代码行数:12,代码来源:geometric.py


示例15: _log_prob

 def _log_prob(self, x):
   if self.validate_args:
     x = distribution_util.embed_check_nonnegative_integer_form(x)
   else:
     # For consistency with cdf, we take the floor.
     x = math_ops.floor(x)
   x *= array_ops.ones_like(self.probs)
   probs = self.probs * array_ops.ones_like(x)
   safe_domain = array_ops.where(
       math_ops.equal(x, 0.),
       array_ops.zeros_like(probs),
       probs)
   return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:13,代码来源:geometric.py


示例16: gradient_clipping

  def gradient_clipping(grads_and_vars):
    """Internal function for adaptive clipping."""
    grads, variables = zip(*grads_and_vars)

    norm = clip_ops.global_norm(grads)

    max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
                                            global_step, epsilon, name)

    # reports the max gradient norm for debugging
    if report_summary:
      summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)

    # factor will be 1. if norm is smaller than max_norm
    factor = array_ops.where(norm < max_norm,
                             array_ops.ones_like(norm),
                             math_ops.exp(log_mean) / norm)

    if static_max_norm is not None:
      factor = math_ops.minimum(static_max_norm / norm, factor)

    # apply factor
    clipped_grads = []
    for grad in grads:
      if grad is None:
        clipped_grads.append(None)
      elif isinstance(grad, ops.IndexedSlices):
        clipped_grads.append(
            ops.IndexedSlices(grad.values * factor, grad.indices,
                              grad.dense_shape))
      else:
        clipped_grads.append(grad * factor)

    return list(zip(clipped_grads, variables))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:34,代码来源:optimizers.py


示例17: call

 def call(self, inputs, mask=None):
   self._validate_call_args(inputs=inputs, mask=mask)
   q = inputs[0]
   v = inputs[1]
   k = inputs[2] if len(inputs) > 2 else v
   q_mask = mask[0] if mask else None
   v_mask = mask[1] if mask else None
   scores = self._calculate_scores(query=q, key=k)
   if v_mask is not None:
     # Mask of shape [batch_size, 1, Tv].
     v_mask = array_ops.expand_dims(v_mask, axis=-2)
   if self.causal:
     # Creates a lower triangular mask, so position i cannot attend to
     # positions j>i. This prevents the flow of information from the future
     # into the past.
     scores_shape = array_ops.shape(scores)
     # causal_mask_shape = [1, Tq, Tv].
     causal_mask_shape = array_ops.concat(
         [array_ops.ones_like(scores_shape[:-2]), scores_shape[-2:]],
         axis=0)
     causal_mask = _lower_triangular_mask(causal_mask_shape)
   else:
     causal_mask = None
   scores_mask = _merge_masks(v_mask, causal_mask)
   result = self._apply_scores(scores=scores, value=v, scores_mask=scores_mask)
   if q_mask is not None:
     # Mask of shape [batch_size, Tq, 1].
     q_mask = array_ops.expand_dims(q_mask, axis=-1)
     result *= math_ops.cast(q_mask, dtype=result.dtype)
   return result
开发者ID:aritratony,项目名称:tensorflow,代码行数:30,代码来源:dense_attention.py


示例18: _num_present

def _num_present(losses, weights, per_batch=False):
  """Computes the number of elements in the loss function induced by `weights`.

  A given weights tensor induces different numbers of usable elements in the
  `losses` tensor. The `weights` tensor is broadcast across `losses` for all
  possible dimensions. For example, if `losses` is a tensor of dimension
  `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
  in effect, tiled to match the shape of `losses`. Following this effective
  tile, the total number of present elements is the number of non-zero weights.

  Args:
    losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
    weights: `Tensor` of shape `[]`, `[batch_size]` or
      `[batch_size, d1, ... dK]`, where K < N.
    per_batch: Whether to return the number of elements per batch or as a sum
      total.

  Returns:
    The number of present (non-zero) elements in the losses tensor. If
      `per_batch` is `True`, the value is returned as a tensor of size
      `[batch_size]`. Otherwise, a single scalar tensor is returned.
  """
  with ops.name_scope(None, "num_present", (losses, weights)) as scope:
    weights = math_ops.to_float(weights)
    present = array_ops.where(
        math_ops.equal(weights, 0.0),
        array_ops.zeros_like(weights),
        array_ops.ones_like(weights))
    present = weights_broadcast_ops.broadcast_weights(present, losses)
    if per_batch:
      return math_ops.reduce_sum(
          present, axis=math_ops.range(1, array_ops.rank(present)),
          keep_dims=True, name=scope)
    return math_ops.reduce_sum(present, name=scope)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:34,代码来源:losses_impl.py


示例19: testContribSignalSTFT

  def testContribSignalSTFT(self):
    ws = 512
    hs = 128
    dims = (ws * 20,)
    shape = BATCH_DIMS + dims
    data = np.arange(np.prod(shape)) / np.prod(dims)
    np.random.seed(123)
    np.random.shuffle(data)
    data = np.reshape(data.astype(np.float32), shape)
    window = sps.get_window("hann", ws)
    expected = sps.stft(
        data, nperseg=ws, noverlap=ws - hs, boundary=None, window=window)[2]
    expected = np.swapaxes(expected, -1, -2)
    expected *= window.sum()  # scipy divides by window sum
    with self.test_session() as sess:
      with self.test_scope():
        ph = array_ops.placeholder(
            dtypes.as_dtype(data.dtype), shape=data.shape)
        out = signal.stft(ph, ws, hs)
        grad = gradients_impl.gradients(out, ph,
                                        grad_ys=array_ops.ones_like(out))

      # For gradients, we simply verify that they compile & execute.
      value, _ = sess.run([out, grad], {ph: data})
      self.assertAllClose(expected, value, rtol=RTOL, atol=ATOL)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:25,代码来源:fft_test.py


示例20: mode

  def mode(self, name="mode"):
    """Mode of the distribution.

    Note that the mode for the Beta distribution is only defined
    when `alpha > 1`. This returns the mode when `alpha > 1`,
    and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name: The name for this op.

    Returns:
      Mode of the Dirichlet distribution.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._alpha, self._alpha_0], name):
        one = constant_op.constant(1, self.dtype)
        mode = (self._alpha - 1)/ (
            array_ops.expand_dims(self._alpha_0, -1) - math_ops.cast(
                self.event_shape()[0], self.dtype))

        if self.allow_nan_stats:
          return math_ops.select(
              math_ops.greater(self._alpha, 1),
              mode,
              (constant_op.constant(float("NaN"), dtype=self.dtype) *
               array_ops.ones_like(self._alpha, dtype=self.dtype)))
        else:
          return control_flow_ops.with_dependencies([
              check_ops.assert_less(
                  one, self._alpha,
                  message="mode not defined for components of alpha <= 1")
          ], mode)
开发者ID:10imaging,项目名称:tensorflow,代码行数:33,代码来源:dirichlet.py



注:本文中的tensorflow.python.ops.array_ops.ones_like函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python array_ops.pack函数代码示例发布时间:2022-05-27
下一篇:
Python array_ops.ones函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap