• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.unsorted_segment_sum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.unsorted_segment_sum函数的典型用法代码示例。如果您正苦于以下问题:Python unsorted_segment_sum函数的具体用法?Python unsorted_segment_sum怎么用?Python unsorted_segment_sum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了unsorted_segment_sum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _full_batch_training_op

  def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
    """Creates an op for training for full batch case.

    Args:
      inputs: list of input Tensors.
      cluster_idx_list: A vector (or list of vectors). Each element in the
        vector corresponds to an input row in 'inp' and specifies the cluster id
        corresponding to the input.
      cluster_centers: Tensor Ref of cluster centers.

    Returns:
      An op for doing an update of mini-batch k-means.
    """
    cluster_sums = []
    cluster_counts = []
    epsilon = tf.constant(1e-6, dtype=inputs[0].dtype)
    for inp, cluster_idx in zip(inputs, cluster_idx_list):
      with ops.colocate_with(inp):
        cluster_sums.append(tf.unsorted_segment_sum(inp,
                                                    cluster_idx,
                                                    self._num_clusters))
        cluster_counts.append(tf.unsorted_segment_sum(
            tf.reshape(tf.ones(tf.reshape(tf.shape(inp)[0], [-1])), [-1, 1]),
            cluster_idx,
            self._num_clusters))
    with ops.colocate_with(cluster_centers):
      new_clusters_centers = tf.add_n(cluster_sums) / (
          tf.cast(tf.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
      if self._clusters_l2_normalized():
        new_clusters_centers = tf.nn.l2_normalize(new_clusters_centers, dim=1)
    return tf.assign(cluster_centers, new_clusters_centers)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:31,代码来源:clustering_ops.py


示例2: data_group_avg

def data_group_avg(group_ids, data):
    # Sum each group
    sum_total = tf.unsorted_segment_sum(data, group_ids, 3)
    # Count each group
    num_total = tf.unsorted_segment_sum(tf.ones_like(data), group_ids, 3)
    # Calculate average
    avg_by_group = sum_total/num_total
    return(avg_by_group)
开发者ID:Bluebear171,项目名称:tensorflow_cookbook,代码行数:8,代码来源:03_k_means.py


示例3: get_eval

def get_eval(logits, labels):
    with tf.variable_scope('loss2') as scope:
        logits = tf.reshape(logits, [-1, 21], name='logits2d')
        labels = tf.reshape(labels, [-1], name='labels1d')
        y_sotfmax = tf.nn.softmax(logits, name='softmax1d')
        predictions = tf.argmax(y_sotfmax, 1)
        correct_pred = tf.to_float(tf.equal(labels, predictions))
        ones = tf.ones_like(labels)
        eval_count = tf.to_float(tf.unsorted_segment_sum(ones, labels, 21))
        eval_correct = tf.to_float(tf.unsorted_segment_sum(correct_pred, labels, 21))
    return eval_count, eval_correct
开发者ID:dmancevo,项目名称:semantic_segmentation,代码行数:11,代码来源:sem_segm.py


示例4: kMeansTF

def kMeansTF(data, center, nMaxIter, th): # data: nDim x nData, center: nDim x  nCenter
    """Clustering data using the kMeans method implemented with tensorflow.

    :param data: 2D matrix as data input with dimensions: nDim x nData.
    :type data: numpy array.
    :param center: 2D matrix with initial cluster centers with dimensions: nDim x nCenter.
    :type center: numpy array.
    :param nMaxIter: Maximum number of iterations.
    :type nMaxIter: int.
    :param th: Threshold applied to RMS error between prior and current cluster centers.
    :type th: float.
    :return 2D matrix with computed cluster centers with dimensions> nDim x nCenter.
    """
    nData   = data.shape[1]
    nCenter = center.shape[1]
    center  = tf.Variable(center)

    # Replicate data to have the dimensions: nDim x nData x nCenter
    rData       = tf.tile(tf.expand_dims(data,-1),[1, 1, nCenter]) # replicate for nCenter
    rCenter     = tf.transpose(tf.tile(tf.expand_dims(center,-1),[1, 1, nData]),perm=[0, 2, 1]) # replicate for nData

    # Get the cluster center of minimum distance for each data point.
    ssq         = tf.reduce_sum(tf.square(rData - rCenter), 0, keep_dims=True) # over nDim
    index       = tf.squeeze(tf.argmin(ssq, 2)) # min index over nCenter and remove leading dimension

    # Compute the new cluster centers based on the closest data points.
    newSum      = tf.unsorted_segment_sum(tf.transpose(data,[1,0]), index, nCenter)
    count       = tf.unsorted_segment_sum(tf.transpose(tf.ones_like(data),[1,0]), index, nCenter)
    newCenter   = tf.transpose(newSum / count,[1,0])

    # Compute the differences between the new and old cluster centers and threshold them.
    rms             = tf.reduce_sum(tf.sqrt(tf.reduce_sum((center-newCenter)*(center-newCenter), 0)), 0)
    changeCenter    = rms > th

    # Update the cluster centers if they have changed by more than the threshold value.
    with tf.control_dependencies([changeCenter]):
        doUpdates = center.assign(newCenter)

    # Initialize the tensor variables.
    init = tf.initialize_all_variables()
    sess = tf.Session()
    sess.run(init)

    # As long as there are enough changes in the cluster centers and we have not reached the maximum number of
    # iterations, repeat the steps from above.
    changed = True
    iter    = 0
    while changed and iter < nMaxIter:
        iter += 1
        [changed, _] = sess.run([changeCenter, doUpdates])

    return sess.run(center)
开发者ID:kbrems,项目名称:opveclib,代码行数:52,代码来源:clustering.py


示例5: adloss

    def adloss(self,x,xt,y,global_step):
        with tf.variable_scope('reuse_inference') as scope:
	    scope.reuse_variables()
            self.inference(x,training=True)
	    source_feature=self.feature
            scope.reuse_variables()
            self.inference(xt,training=True)
	    target_feature=self.feature
	    target_pred=self.output
        with tf.variable_scope('reuse') as scope:
            source_logits,_=D(source_feature)
            scope.reuse_variables()
            target_logits,_=D(target_feature)

	self.source_feature=source_feature
	self.target_feature=target_feature
	self.concat_feature=tf.concat([source_feature,target_feature],0)	
        source_result=tf.argmax(y,1)
        target_result=tf.argmax(target_pred,1)
        ones=tf.ones_like(source_feature)
        current_source_count=tf.unsorted_segment_sum(ones,source_result,self.num_classes)
        current_target_count=tf.unsorted_segment_sum(ones,target_result,self.num_classes)

        current_positive_source_count=tf.maximum(current_source_count,tf.ones_like(current_source_count))
        current_positive_target_count=tf.maximum(current_target_count,tf.ones_like(current_target_count))

        current_source_centroid=tf.divide(tf.unsorted_segment_sum(data=source_feature,segment_ids=source_result,num_segments=self.num_classes),current_positive_source_count)
        current_target_centroid=tf.divide(tf.unsorted_segment_sum(data=target_feature,segment_ids=target_result,num_segments=self.num_classes),current_positive_target_count)

        decay=tf.constant(0.3)
        self.decay=decay

        target_centroid=(decay)*current_target_centroid+(1.-decay)*self.target_moving_centroid
        source_centroid=(decay)*current_source_centroid+(1.-decay)*self.source_moving_centroid
	
        self.Semanticloss=protoloss(source_centroid,target_centroid)
	tf.summary.scalar('semanticloss',self.Semanticloss)

        D_real_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=target_logits,labels=tf.ones_like(target_logits)))
        D_fake_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=source_logits,labels=tf.zeros_like(source_logits)))
        self.D_loss=D_real_loss+D_fake_loss
        self.G_loss=-self.D_loss
	tf.summary.scalar('G_loss',self.G_loss)
	tf.summary.scalar('JSD',self.G_loss/2+math.log(2))
	
        self.G_loss=0.1*self.G_loss
	self.D_loss=0.1*self.D_loss
	return self.G_loss,self.D_loss,source_centroid,target_centroid
开发者ID:slowbull,项目名称:Moving-Semantic-Transfer-Network,代码行数:48,代码来源:mstnmodel.py


示例6: _grad_variance

  def _grad_variance(self):
    """Estimate of gradient Variance.

    Returns:
      C_t ops.
    """
    grad_var_ops = []
    tensor_to_avg = []
    for t, g in zip(self._vars, self._grad):
      if isinstance(g, tf.IndexedSlices):
        tensor_to_avg.append(
            tf.reshape(tf.unsorted_segment_sum(g.values,
                                               g.indices,
                                               g.dense_shape[0]),
                       shape=t.get_shape()))
      else:
        tensor_to_avg.append(g)
    avg_op = self._moving_averager.apply(tensor_to_avg)
    grad_var_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._grad_avg = [self._moving_averager.average(val)
                        for val in tensor_to_avg]
      self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]

    # Compute Variance
    self._grad_var = tf.maximum(
        tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype),
        self._grad_norm_squared_avg
        - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))
    if self._sparsity_debias:
      self._grad_var *= self._sparsity_avg
    return grad_var_ops  # C_t
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:32,代码来源:yellowfin.py


示例7: testGradientMatchesSegmentSum

 def testGradientMatchesSegmentSum(self):
   # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
   # and compare the outputs, which should be identical.
   # NB: for this test to work, indices must be valid for SegmentSum, namely
   # it must be sorted, the indices must be contiguous, and num_segments
   # must be max(indices) + 1.
   indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
   n = len(indices)
   num_cols = 2
   shape = [n, num_cols]
   num_segments = max(indices) + 1
   with self.test_session():
     tf_x, np_x = self._input(shape, dtype=tf.float64)
     # Results from UnsortedSegmentSum
     unsorted_s = tf.unsorted_segment_sum(data=tf_x,
                                                segment_ids=indices,
                                                num_segments=num_segments)
     unsorted_jacob_t, unsorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, unsorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
     # Results from SegmentSum
     sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
     sorted_jacob_t, sorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, sorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
   self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
   self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:29,代码来源:segment_reduction_ops_test.py


示例8: EmbeddingLookupFeatures

def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
  """Computes embeddings for each entry of sparse features sparse_features.

  Args:
    params: list of 2D tensors containing vector embeddings
    sparse_features: 1D tensor of strings. Each entry is a string encoding of
      dist_belief.SparseFeatures, and represents a variable length list of
      feature ids, and optionally, corresponding weights values.
    allow_weights: boolean to control whether the weights returned from the
      SparseFeatures are used to multiply the embeddings.

  Returns:
    A tensor representing the combined embeddings for the sparse features.
    For each entry s in sparse_features, the function looks up the embeddings
    for each id and sums them into a single tensor weighing them by the
    weight of each id. It returns a tensor with each entry of sparse_features
    replaced by this combined embedding.
  """
  if not isinstance(params, list):
    params = [params]
  # Lookup embeddings.
  sparse_features = tf.convert_to_tensor(sparse_features)
  indices, ids, weights = gen_parser_ops.unpack_sparse_features(sparse_features)
  embeddings = tf.nn.embedding_lookup(params, ids)

  if allow_weights:
    # Multiply by weights, reshaping to allow broadcast.
    broadcast_weights_shape = tf.concat(0, [tf.shape(weights), [1]])
    embeddings *= tf.reshape(weights, broadcast_weights_shape)

  # Sum embeddings by index.
  return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))
开发者ID:TrendonixNetwork,项目名称:ProjectCybonix,代码行数:32,代码来源:graph_builder.py


示例9: testBadIndices

 def testBadIndices(self):
   with self.test_session():
     for bad in [[-1]], [[7]]:
       unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
       with self.assertRaisesOpError(
           r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
         unsorted.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:7,代码来源:segment_reduction_ops_test.py


示例10: testValues

 def testValues(self):
   dtypes = [tf.float32,
             tf.float64,
             tf.int64,
             tf.int32,
             tf.complex64,
             tf.complex128]
   indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
   num_segments = 12
   for indices in indices_flat, indices_flat.reshape(5, 2):
     shape = indices.shape + (2,)
     for dtype in dtypes:
       with self.test_session(use_gpu=False):
         tf_x, np_x = self._input(shape, dtype=dtype)
         np_ans = self._segmentReduce(indices,
                                      np_x,
                                      np.add,
                                      op2=None,
                                      num_out_rows=num_segments)
         s = tf.unsorted_segment_sum(data=tf_x,
                                     segment_ids=indices,
                                     num_segments=num_segments)
         tf_ans = s.eval()
       self._assertAllClose(indices, np_ans, tf_ans)
       self.assertShapeEqual(np_ans, s)
开发者ID:0ruben,项目名称:tensorflow,代码行数:25,代码来源:segment_reduction_ops_test.py


示例11: testBadIndices

 def testBadIndices(self):
     # Note: GPU kernel does not return the out-of-range error needed for this
     # test, so this test is marked as cpu-only.
     with self.test_session(use_gpu=False):
         for bad in [[-1]], [[7]]:
             unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
             with self.assertRaisesOpError(r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
                 unsorted.eval()
开发者ID:paolodedios,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py


示例12: testEmptySecondDimension

 def testEmptySecondDimension(self):
     dtypes = [np.float32, np.float64, np.int64, np.int32, np.complex64, np.complex128]
     with self.test_session(use_gpu=self.use_gpu):
         for dtype in dtypes:
             for itype in (np.int32, np.int64):
                 data = np.zeros((2, 0), dtype=dtype)
                 segment_ids = np.array([0, 1], dtype=itype)
                 unsorted = tf.unsorted_segment_sum(data, segment_ids, 2)
                 self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
开发者ID:paolodedios,项目名称:tensorflow,代码行数:9,代码来源:segment_reduction_ops_test.py


示例13: apply_factor

def apply_factor(tensor, *args, **kwargs):
    scope = kwargs.pop("scope", "")
    with tf.name_scope(scope):
        n_args = len(args)

        if n_args is 0:
            tensor, output_size, error_symbol = tensor
            return one_hot(tensor, output_size, scope=scope)
        else:
            tensor, args = slice_out_int_literals(tensor, list(args))
            args, is_batched = make_batch_consistent(args)
            tensor, output_size, error_symbol = tensor
            
            # handle the case where all arguments were int literals
            tensor_dim_sizes = [dim.value for dim in tensor.get_shape()]
            if not tensor_dim_sizes:
                return one_hot(tensor, output_size, scope=scope)

            # Each arg is batch size x arg dim. Add dimensions to enable broadcasting.
            for i, arg in enumerate(args):
                for j in xrange(n_args):
                    if j == i: continue
                    args[i] = tf.expand_dims(args[i], j + 1)

            # compute joint before tensor is applied
            joint = 1
            for arg in args:
                joint = joint * arg
            
            # prepare for unsorted_segment_sum
            joint = tf.reshape(joint, (-1, np.prod(tensor_dim_sizes)))
            joint = tf.transpose(joint, [1, 0])	 # |tensor| x batch_size

            if error_symbol is not None:
                result = tf.unsorted_segment_sum(joint, tf.reshape(tensor, [-1]), output_size + 1)
                # assume error bin is last bin
                result = result[:output_size, :]
            else:
                result = tf.unsorted_segment_sum(joint, tf.reshape(tensor, [-1]), output_size)

            result = tf.transpose(result, [1, 0])
            if not is_batched: result = tf.squeeze(result)
            return result    
开发者ID:ml-lab,项目名称:TerpreT,代码行数:43,代码来源:terpret_tf_runtime.py


示例14: __init__

  def __init__(self, requests, expert_capacity):
    """Create a TruncatingDispatcher.

    Args:
      requests: a boolean `Tensor` of shape `[batch, length, num_experts]`.
        Alternatively, a float or int Tensor containing zeros and ones.
      expert_capacity: a Scalar - maximum number of examples per expert per
        batch element.

    Returns:
      a TruncatingDispatcher
    """
    self._requests = tf.to_float(requests)
    self._expert_capacity = expert_capacity
    expert_capacity_f = tf.to_float(expert_capacity)
    self._batch, self._length, self._num_experts = tf.unstack(
        tf.shape(self._requests), num=3)

    # [batch, length, num_experts]
    position_in_expert = tf.cumsum(self._requests, axis=1, exclusive=True)
    # [batch, length, num_experts]
    self._gates = self._requests * tf.to_float(
        tf.less(position_in_expert, expert_capacity_f))
    batch_index = tf.reshape(
        tf.to_float(tf.range(self._batch)), [self._batch, 1, 1])
    length_index = tf.reshape(
        tf.to_float(tf.range(self._length)), [1, self._length, 1])
    expert_index = tf.reshape(
        tf.to_float(tf.range(self._num_experts)), [1, 1, self._num_experts])
    # position in a Tensor with shape [batch * num_experts * expert_capacity]
    flat_position = (
        position_in_expert +
        batch_index * (tf.to_float(self._num_experts) * expert_capacity_f) +
        expert_index * expert_capacity_f)
    # Tensor of shape [batch * num_experts * expert_capacity].
    # each element is an integer in [0, length)
    self._indices = tf.unsorted_segment_sum(
        data=tf.reshape((length_index + 1.0) * self._gates, [-1]),
        segment_ids=tf.to_int32(tf.reshape(flat_position, [-1])),
        num_segments=self._batch * self._num_experts * expert_capacity)
    self._indices = tf.reshape(
        self._indices,
        [self._batch, self._num_experts, expert_capacity])
    # Tensors of shape [batch, num_experts, expert_capacity].
    # each element is 0.0 or 1.0
    self._nonpadding = tf.minimum(self._indices, 1.0)
    # each element is an integer in [0, length)
    self._indices = tf.nn.relu(self._indices - 1.0)
    # self._flat_indices is [batch, num_experts, expert_capacity], with values
    # in [0, batch * length)
    self._flat_indices = tf.to_int32(
        self._indices +
        (tf.reshape(tf.to_float(tf.range(self._batch)), [-1, 1, 1])
         * tf.to_float(self._length)))
    self._indices = tf.to_int32(self._indices)
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:55,代码来源:expert_utils.py


示例15: MoG_validation

def MoG_validation(K):
	MoG_valid = mog.MoG("data2D.npy")
	_, X_data, mu, _, sigma_2, log_pi, pi_np = MoG_valid.cluster(K, D, B, 1.0/3.0)
	# _, X_data, mu, _, sigma_2, log_pi, pi_np = MoG_valid.cluster(K, D, B)

	loss_valid = MoG_valid.cal_loss(MoG_valid.validation.astype(np.float32), mu, D, log_pi, sigma_2)
	min_idx = MoG_valid.cal_min_idx(X_data, mu, np.sqrt(sigma_2), pi_np, D)

	data = tf.ones(shape = [B,])
	division = tf.unsorted_segment_sum(data, min_idx, K, name=None)

	data_valid = tf.ones(shape = [(B - (1 - 1/3) * B), ])
	min_idx_valid = MoG_valid.cal_min_idx(MoG_valid.validation.astype(np.float32), mu, np.sqrt(sigma_2), pi_np, D)
	division_valid = tf.unsorted_segment_sum(data_valid, min_idx_valid, K, name = None)

	with tf.Session():
		print 'loss_validation:', loss_valid.eval()
		print 'Total Proportion:', division.eval()/10000

		plot.plot_cluster(min_idx.eval(), X_data, mu, K)
		plot.plot_valid_cluster(min_idx_valid.eval(), MoG_valid.validation, mu, K)
开发者ID:cxxichen,项目名称:Machine-Learning,代码行数:21,代码来源:MoG_validation.py


示例16: testGradient

 def testGradient(self):
     num_cols = 2
     indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
     num_segments = max(indices_flat) + 3
     for indices in indices_flat, indices_flat.reshape(5, 2):
         shape = indices.shape + (num_cols,)
         with self.test_session(use_gpu=self.use_gpu):
             tf_x, np_x = self._input(shape, dtype=tf.float64)
             s = tf.unsorted_segment_sum(data=tf_x, segment_ids=indices, num_segments=num_segments)
             jacob_t, jacob_n = tf.test.compute_gradient(
                 tf_x, shape, s, [num_segments, num_cols], x_init_value=np_x.astype(np.double), delta=1
             )
         self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
开发者ID:paolodedios,项目名称:tensorflow,代码行数:13,代码来源:segment_reduction_ops_test.py


示例17: k_comparison

def k_comparison(K):
    D = 2
    B = 10000
                    
    KM = km.k_mean("data2D.npy")
    _, segment_ids, X_data, mu= KM.cluster(K, D, B)
    
    
    data = tf.ones(shape = [B,])
    division = tf.unsorted_segment_sum(data, segment_ids, K, name=None)
    
    with tf.Session():
        print "K =",K,":",division.eval()/10000
        plot.plot_cluster(segment_ids, X_data, mu, K)
开发者ID:haoyues,项目名称:ML_HWs,代码行数:14,代码来源:k_comparison.py


示例18: model

def model(x, segmentinds, keep_prob, batchsize, neuronList, activationType,
          fplength, mask, name, dxdxik, tilederiv,element):
    """Generates a multilayer neural network with variable number
    of neurons, so that we have a template for each atom's NN."""

    nNeurons = neuronList[0]
    # Pass  the input tensors through the first soft-plus layer
    W_fc = weight_variable([fplength, nNeurons], name=name+element)
    b_fc = bias_variable([nNeurons], name=name)
    h_fc = activationType(tf.matmul(x, W_fc) + b_fc)
    #h_fc = tf.nn.dropout(activationType(tf.matmul(x, W_fc) + b_fc),keep_prob)

    if len(neuronList) > 1:
        for i in range(1, len(neuronList)):
            nNeurons = neuronList[i]
            nNeuronsOld = neuronList[i - 1]
            W_fc = weight_variable([nNeuronsOld, nNeurons], name=name)
            b_fc = bias_variable([nNeurons], name=name)
            h_fc = tf.nn.dropout(activationType(
                tf.matmul(h_fc, W_fc) + b_fc), keep_prob)

    W_fc_out = weight_variable([neuronList[-1], 1], name=name)
    b_fc_out = bias_variable([1], name=name)
    y_out = tf.matmul(h_fc, W_fc_out) + b_fc_out

    # Sum the predicted energy for each molecule
    reducedSum = tf.unsorted_segment_sum(y_out, segmentinds, batchsize)

    dEjdgj = tf.gradients(y_out, x)[0]
    dEjdgj1 = tf.expand_dims(dEjdgj, 1)
    dEjdgj2 = tf.expand_dims(dEjdgj1, 1)
    dEjdgjtile = tf.tile(dEjdgj2, tilederiv)
    dEdxik = tf.mul(dxdxik, dEjdgjtile)
    dEdxikReduce = tf.reduce_sum(dEdxik, 3)
    dEdxik_reduced = tf.unsorted_segment_sum(
        dEdxikReduce, segmentinds, batchsize)
    return tf.mul(reducedSum, mask), dEdxik_reduced
开发者ID:AkshayTharval,项目名称:Atomistic-Machine-Learning-Potentials,代码行数:37,代码来源:tfAmpNN.py


示例19: _rowwise_unsorted_segment_sum

def _rowwise_unsorted_segment_sum(values, indices, n):
  """UnsortedSegmentSum on each row.

  Args:
    values: a `Tensor` with shape `[batch_size, k]`.
    indices: an integer `Tensor` with shape `[batch_size, k]`.
    n: an integer.
  Returns:
    A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
  """
  batch, k = tf.unstack(tf.shape(indices), num=2)
  indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n
  ret_flat = tf.unsorted_segment_sum(
      tf.reshape(values, [-1]), indices_flat, batch * n)
  return tf.reshape(ret_flat, [batch, n])
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:15,代码来源:expert_utils.py


示例20: accumulate_sparse_gradients

def accumulate_sparse_gradients(grad):
  """Accumulates repeated indices of a sparse gradient update.

  Args:
    grad: a tf.IndexedSlices gradient

  Returns:
    grad_indices: unique indices
    grad_values: gradient values corresponding to the indices
  """

  grad_indices, grad_segments = tf.unique(grad.indices)
  grad_values = tf.unsorted_segment_sum(grad.values, grad_segments,
                                        tf.shape(grad_indices)[0])
  return grad_indices, grad_values
开发者ID:ALISCIFP,项目名称:models,代码行数:15,代码来源:utils.py



注:本文中的tensorflow.unsorted_segment_sum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.unstack函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.unpack函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap