• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.dynamic_stitch函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.dynamic_stitch函数的典型用法代码示例。如果您正苦于以下问题:Python dynamic_stitch函数的具体用法?Python dynamic_stitch怎么用?Python dynamic_stitch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dynamic_stitch函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testErrorDataDimSizeMismatch

 def testErrorDataDimSizeMismatch(self):
   indices = [tf.constant([0, 4, 5]),
              tf.constant([1, 6, 2, 3])]
   data = [tf.constant([[0], [40], [70]]),
           tf.constant([[10, 11], [60, 61], [20, 21], [30, 31]])]
   with self.assertRaises(ValueError):
     tf.dynamic_stitch(indices, data)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:7,代码来源:dynamic_stitch_op_test.py


示例2: testErrorDataAndIndicesSizeMismatch

 def testErrorDataAndIndicesSizeMismatch(self):
   indices = [tf.constant([0, 4, 7]),
              tf.constant([1, 6, 2, 3, 5])]
   data = [tf.constant([0, 40, 70]),
           tf.constant([10, 60, 20, 30])]
   with self.assertRaises(ValueError):
     tf.dynamic_stitch(indices, data)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:7,代码来源:dynamic_stitch_op_test.py


示例3: loop

    def loop(q_, mask, mass_, found_):
        q_list = tf.dynamic_partition(q_, mask, 2)
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2)  # 0 element it False,
        #  1 element if true

        p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
        p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])

        # condition verification and mask modification
        less_mask = tf.cast(tf.less(u, p_new), tf.int32)  # 0 when u is bigger than p, 1 when u is less than p
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
                                                 2)  # 0 when u is bigger than p, 1 when u is less than p

        split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
        split_u = tf.dynamic_partition(u, less_mask, 2)

        alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
        mass_ += tf.reduce_sum(split_u[1])

        mask = mask * (tf.ones_like(less_mask) - less_mask)

        found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
                         lambda: False,
                         lambda: True)

        alpha = tf.reshape(alpha, q_.shape)

        return alpha, mask, mass_, found_
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:28,代码来源:tf_csoftmax_attention.py


示例4: testErrorIndicesMultiDimensional

 def testErrorIndicesMultiDimensional(self):
   indices = [tf.constant([0, 4, 7]),
              tf.constant([[1, 6, 2, 3, 5]])]
   data = [tf.constant([[0, 40, 70]]),
           tf.constant([10, 60, 20, 30, 50])]
   with self.assertRaises(ValueError):
     tf.dynamic_stitch(indices, data)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:7,代码来源:dynamic_stitch_op_test.py


示例5: CircularConvolution

def CircularConvolution(vector, kernel):
    size = int(vector.get_shape()[0])
    kernel_size = int(kernel.get_shape()[0])
    kernel_shift = int(math.floor(kernel_size/2.0))
    output = tf.zeros_like(vector)

    def loop(idx):
        if idx < 0: return size + idx
        if idx >= size : return idx - size
        else: return idx

    kernels = []
    for i in xrange(size):
        indices = [loop(i+j) for j in xrange(kernel_shift, -kernel_shift-1, -1)]
        v = tf.gather(vector, indices)
        kernels.append(tf.reduce_sum(v * kernel, 0, keep_dims=True))

    output = tf.dynamic_stitch([[i] for i in xrange(size)], kernels)

    # # code with double loop
    # for i in xrange(size):
    #     for j in xrange(kernel_size):
    #         idx = i + kernel_shift - j + 1
    #         if idx < 0: idx = idx + size
    #         if idx >= size: idx = idx - size
    #         w = tf.gather(vector, int(idx)) * tf.gather(kernel, j)
    #         output = tf.scatter_add(output, [i], tf.reshape(w, [1, -1]))

    return output
开发者ID:ramtej,项目名称:NTM-tensorflow,代码行数:29,代码来源:layers.py


示例6: testSumGradArgs

 def testSumGradArgs(self):
   with self.test_session(use_gpu=False):
     indices = [tf.convert_to_tensor([0, 1, 2, 3]),
                tf.convert_to_tensor([2, 3])]
     values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]
     self.assertAllEqual(
         tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:7,代码来源:embedding_ops_test.py


示例7: indices_to_dense_vector

def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0,
                            dtype=tf.float32):
    """Creates dense vector with indices set to specific value and rest to zeros.

    This function exists because it is unclear if it is safe to use
      tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
    with indices which are not ordered.
    This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

    Args:
      indices: 1d Tensor with integer indices which are to be set to
          indices_values.
      size: scalar with size (integer) of output Tensor.
      indices_value: values of elements specified by indices in the output vector
      default_value: values of other elements in the output vector.
      dtype: data type.

    Returns:
      dense 1D Tensor of shape [size] with indices set to indices_values and the
          rest set to default_value.
    """
    size = tf.to_int32(size)
    zeros = tf.ones([size], dtype=dtype) * default_value
    values = tf.ones_like(indices, dtype=dtype) * indices_value

    return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
                             [zeros, values])
开发者ID:Zumbalamambo,项目名称:deepcv,代码行数:30,代码来源:ops.py


示例8: _create_regression_targets

  def _create_regression_targets(self, anchors, groundtruth_boxes, match):
    """Returns a regression target for each anchor.

    Args:
      anchors: a BoxList representing N anchors
      groundtruth_boxes: a BoxList representing M groundtruth_boxes
      match: a matcher.Match object

    Returns:
      reg_targets: a float32 tensor with shape [N, box_code_dimension]
    """
    matched_anchor_indices = match.matched_column_indices()
    unmatched_ignored_anchor_indices = (match.
                                        unmatched_or_ignored_column_indices())
    matched_gt_indices = match.matched_row_indices()
    matched_anchors = box_list_ops.gather(anchors,
                                          matched_anchor_indices)
    matched_gt_boxes = box_list_ops.gather(groundtruth_boxes,
                                           matched_gt_indices)
    matched_reg_targets = self._box_coder.encode(matched_gt_boxes,
                                                 matched_anchors)
    unmatched_ignored_reg_targets = tf.tile(
        self._default_regression_target(),
        tf.stack([tf.size(unmatched_ignored_anchor_indices), 1]))
    reg_targets = tf.dynamic_stitch(
        [matched_anchor_indices, unmatched_ignored_anchor_indices],
        [matched_reg_targets, unmatched_ignored_reg_targets])
    # TODO: summarize the number of matches on average.
    return reg_targets
开发者ID:GERASM1,项目名称:Semana-i-Equipo-Seat-Here,代码行数:29,代码来源:target_assigner.py


示例9: _partition_and_stitch

    def _partition_and_stitch(self, args, func_name):
        """
        args is a list of tensors, to be passed to self.likelihoods.<func_name>

        args[-1] is the 'Y' argument, which contains the indexes to self.likelihoods.

        This function splits up the args using dynamic_partition, calls the
        relevant function on the likelihoods, and re-combines the result.
        """
        # get the index from Y
        Y = args[-1]
        ind = Y[:, -1]
        ind = tf.cast(ind, tf.int32)
        Y = Y[:, :-1]
        args[-1] = Y

        # split up the arguments into chunks corresponding to the relevant likelihoods
        args = zip(*[tf.dynamic_partition(X, ind, self.num_likelihoods) for X in args])

        # apply the likelihood-function to each section of the data
        with params_as_tensors_for(self, convert=False):
            funcs = [getattr(lik, func_name) for lik in self.likelihood_list]
        results = [f(*args_i) for f, args_i in zip(funcs, args)]

        # stitch the results back together
        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, self.num_likelihoods)
        results = tf.dynamic_stitch(partitions, results)

        return results
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:29,代码来源:likelihoods.py


示例10: circular_convolution

def circular_convolution(v, k):
    """Computes circular convolution.

    Args:
        v: a 1-D `Tensor` (vector)
        k: a 1-D `Tensor` (kernel)
    """
    size = int(v.get_shape()[0])
    kernel_size = int(k.get_shape()[0])
    kernel_shift = int(math.floor(kernel_size/2.0))

    def loop(idx):
        if idx < 0: return size + idx
        if idx >= size : return idx - size
        else: return idx

    kernels = []
    for i in xrange(size):
        indices = [loop(i+j) for j in xrange(kernel_shift, -kernel_shift-1, -1)]
        v_ = tf.gather(v, indices)
        kernels.append(tf.reduce_sum(v_ * k, 0))

    # # code with double loop
    # for i in xrange(size):
    #     for j in xrange(kernel_size):
    #         idx = i + kernel_shift - j + 1
    #         if idx < 0: idx = idx + size
    #         if idx >= size: idx = idx - size
    #         w = tf.gather(v, int(idx)) * tf.gather(kernel, j)
    #         output = tf.scatter_add(output, [i], tf.reshape(w, [1, -1]))

    return tf.dynamic_stitch([i for i in xrange(size)], kernels)
开发者ID:PKUers,项目名称:NTM-tensorflow,代码行数:32,代码来源:ops.py


示例11: _create_classification_targets

  def _create_classification_targets(self, groundtruth_labels, match):
    """Create classification targets for each anchor.

    Assign a classification target of for each anchor to the matching
    groundtruth label that is provided by match.  Anchors that are not matched
    to anything are given the target self._unmatched_cls_target

    Args:
      groundtruth_labels:  a tensor of shape [num_gt_boxes, d_1, ... d_k]
        with labels for each of the ground_truth boxes. The subshape
        [d_1, ... d_k] can be empty (corresponding to scalar labels).
      match: a matcher.Match object that provides a matching between anchors
        and groundtruth boxes.

    Returns:
      cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
        where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
        which has shape [num_gt_boxes, d_1, d_2, ... d_k].
    """
    matched_anchor_indices = match.matched_column_indices()
    unmatched_ignored_anchor_indices = (match.
                                        unmatched_or_ignored_column_indices())
    matched_gt_indices = match.matched_row_indices()
    matched_cls_targets = tf.gather(groundtruth_labels, matched_gt_indices)

    ones = self._unmatched_cls_target.shape.ndims * [1]
    unmatched_ignored_cls_targets = tf.tile(
        tf.expand_dims(self._unmatched_cls_target, 0),
        tf.stack([tf.size(unmatched_ignored_anchor_indices)] + ones))

    cls_targets = tf.dynamic_stitch(
        [matched_anchor_indices, unmatched_ignored_anchor_indices],
        [matched_cls_targets, unmatched_ignored_cls_targets])
    return cls_targets
开发者ID:GERASM1,项目名称:Semana-i-Equipo-Seat-Here,代码行数:34,代码来源:target_assigner.py


示例12: scheduled_sample_count

def scheduled_sample_count(ground_truth_x,
                           generated_x,
                           batch_size,
                           scheduled_sample_var):
  """Sample batch with specified mix of groundtruth and generated data points.

  Args:
    ground_truth_x: tensor of ground-truth data points.
    generated_x: tensor of generated data points.
    batch_size: batch size
    scheduled_sample_var: number of ground-truth examples to include in batch.
  Returns:
    New batch with num_ground_truth sampled from ground_truth_x and the rest
    from generated_x.
  """
  num_ground_truth = scheduled_sample_var
  idx = tf.random_shuffle(tf.range(batch_size))
  ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
  generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))

  ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
  generated_examps = tf.gather(generated_x, generated_idx)

  output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])
  # if batch size is known set it.
  if isinstance(batch_size, int):
    output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
  return output
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:29,代码来源:common_video.py


示例13: test_dynamic_stitch

def test_dynamic_stitch(sess):
    x = tf.zeros((1, 3))
    y = tf.dynamic_stitch([[0], [0]], [x, tf.ones((1, 3))])
    z = tf.gather(y, [0])

    with sess.as_default():
        analytic, numeric = tf.test.compute_gradient(x, (1, 3), z, (1, 3))

    assert np.allclose(analytic, numeric)
开发者ID:nengo,项目名称:nengo_deeplearning,代码行数:9,代码来源:test_tensorflow_patch.py


示例14: testOneListOneDimensional

 def testOneListOneDimensional(self):
   with self.test_session():
     indices = [tf.constant([1, 6, 2, 3, 5, 0, 4, 7])]
     data = [tf.constant([10, 60, 20, 30, 50, 0, 40, 70])]
     stitched_t = tf.dynamic_stitch(indices, data)
     stitched_val = stitched_t.eval()
     self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
     # Dimension 0 is determined by the max index in indices, so we
     # can only infer that the output is a vector of some unknown
     # length.
     self.assertEqual([None], stitched_t.get_shape().as_list())
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:11,代码来源:dynamic_stitch_op_test.py


示例15: testStitchOrder

 def testStitchOrder(self):
   with self.test_session():
     indices = []
     np_values = []
     values = []
     for _ in range(10):
       indices.extend([tf.convert_to_tensor(np.arange(100).astype(np.int32))])
       np_values.extend([np.random.uniform(size=100)])
       values.extend([tf.convert_to_tensor(np_values[-1])])
     stitched = tf.dynamic_stitch(indices, values).eval()
   self.assertAllEqual(np_values[-1], stitched)
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:11,代码来源:embedding_ops_test.py


示例16: testScalar

 def testScalar(self):
   with self.test_session():
     indices = [tf.constant(0), tf.constant(1)]
     data = [tf.constant(40), tf.constant(60)]
     for step in -1, 1:
       stitched_t = tf.dynamic_stitch(indices[::step], data)
       stitched_val = stitched_t.eval()
       self.assertAllEqual([40, 60][::step], stitched_val)
       # Dimension 0 is determined by the max index in indices, so we
       # can only infer that the output is a vector of some unknown
       # length.
       self.assertEqual([None], stitched_t.get_shape().as_list())
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:12,代码来源:dynamic_stitch_op_test.py


示例17: __call__

    def __call__(self, X):
        ind = tf.gather(tf.transpose(X), tf.shape(X)[1]-1)  # ind = X[:,-1]
        ind = tf.cast(ind, tf.int32)
        X = tf.transpose(tf.gather(tf.transpose(X), tf.range(0, tf.shape(X)[1]-1)))  # X = X[:,:-1]

        # split up X into chunks corresponding to the relevant likelihoods
        x_list = tf.dynamic_partition(X, ind, len(self.meanfunction_list))
        # apply the likelihood-function to each section of the data
        results = [m(x) for x, m in zip(x_list, self.meanfunction_list)]
        # stitch the results back together
        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, len(self.meanfunction_list))
        return tf.dynamic_stitch(partitions, results)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:12,代码来源:mean_functions.py


示例18: split_apply_merge

def split_apply_merge(inp, partitions, fns):
    """Split input according to partitions.  Pass results through fns and merge.
  Args:
    inp: the input vector
    partitions: tensor of same length as input vector, having values 0, 1
    fns: the two functions.
  Returns:
    the vector routed, where routed[i] = fns[partitions[i]](inp[i])
  """
    new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
    new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
    new_indices = tf.dynamic_partition(tf.range(0, inp.get_shape()[0]), partitions, len(fns))
    return tf.dynamic_stitch(new_indices, new_outputs)
开发者ID:rudaoshi,项目名称:neuralmachines,代码行数:13,代码来源:reinforce.py


示例19: replace_features

def replace_features(coarse_features, fine_features, replace_idxs):
    """ Replace fine features with the corresponding coarse features

        Trick.
            use tf.dynamic_stitch ops

    """
   
    # TODO: simplify indexing 
    def _convert_to_1d_idxs(src_idxs):
        """ Convert 2D idxs to 1D idxs 
            within 1D tensor whose shape is (b*h*w*c)
        """
        batch_idx_len = map_channel.value * map_width.value * map_height.value
        batch_idx_base = [i*batch_idx_len for i in xrange(batch_size.value)]

        batch_1d = map_channel.value * map_width.value * src_idxs[:,0] + \
                   map_channel.value * src_idxs[:,1]
        batch_1d = tf.add(batch_1d,batch_idx_base)
        
        flat_idxs = [batch_1d+i for i in xrange(map_channel.value)]
        flat_idxs = tf.reshape(tf.transpose(tf.pack(flat_idxs)), [-1])

        return flat_idxs

    batch_size, map_height, map_width, map_channel = coarse_features.get_shape()

    # flatten coarse features
    flat_coarse_features = tf.reshape(coarse_features, [batch_size.value,-1])
    flat_coarse_features = tf.reshape(flat_coarse_features, [-1])


    # flatten fine features
    flat_fine_features = [tf.reshape(i,[-1]) for i in fine_features]
    flat_fine_features = tf.concat(0,flat_fine_features)

    flat_fine_idxs = [_convert_to_1d_idxs(i) for i in replace_idxs]
    flat_fine_idxs = tf.concat(0,flat_fine_idxs)

    # extract coarse features to be replaced
    # this is required for hint-based training
    flat_coarse_replaced = tf.gather(flat_coarse_features, flat_fine_idxs, validate_indices=False)

    merged = tf.dynamic_stitch([tf.range(0,flat_coarse_features.get_shape()[0]),flat_fine_idxs],
            [flat_coarse_features,flat_fine_features])

    merged = tf.reshape(merged,coarse_features.get_shape())

    return merged, flat_coarse_replaced, flat_fine_features
开发者ID:jazzsaxmafia,项目名称:dcn.tf,代码行数:49,代码来源:dcn.py


示例20: _match_when_rows_are_non_empty

    def _match_when_rows_are_non_empty():
      """Performs matching when the rows of similarity matrix are non empty.

      Returns:
        matches:  int32 tensor indicating the row each column matches to.
      """
      # Matches for each column
      matches = tf.argmax(similarity_matrix, 0)

      # Deal with matched and unmatched threshold
      if self._matched_threshold is not None:
        # Get logical indices of ignored and unmatched columns as tf.int64
        matched_vals = tf.reduce_max(similarity_matrix, 0)
        below_unmatched_threshold = tf.greater(self._unmatched_threshold,
                                               matched_vals)
        between_thresholds = tf.logical_and(
            tf.greater_equal(matched_vals, self._unmatched_threshold),
            tf.greater(self._matched_threshold, matched_vals))

        if self._negatives_lower_than_unmatched:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -1)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -2)
        else:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -2)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -1)

      if self._force_match_for_each_row:
        forced_matches_ids = tf.cast(tf.argmax(similarity_matrix, 1), tf.int32)

        # Set matches[forced_matches_ids] = [0, ..., R], R is number of rows.
        row_range = tf.range(tf.shape(similarity_matrix)[0])
        col_range = tf.range(tf.shape(similarity_matrix)[1])
        forced_matches_values = tf.cast(row_range, matches.dtype)
        keep_matches_ids, _ = tf.setdiff1d(col_range, forced_matches_ids)
        keep_matches_values = tf.gather(matches, keep_matches_ids)
        matches = tf.dynamic_stitch(
            [forced_matches_ids,
             keep_matches_ids], [forced_matches_values, keep_matches_values])

      return tf.cast(matches, tf.int32)
开发者ID:DaRealLazyPanda,项目名称:models,代码行数:48,代码来源:argmax_matcher.py



注:本文中的tensorflow.dynamic_stitch函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.einsum函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.dynamic_partition函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap