• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.segment_sum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.segment_sum函数的典型用法代码示例。如果您正苦于以下问题:Python segment_sum函数的具体用法?Python segment_sum怎么用?Python segment_sum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了segment_sum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGradientMatchesSegmentSum

 def testGradientMatchesSegmentSum(self):
   # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
   # and compare the outputs, which should be identical.
   # NB: for this test to work, indices must be valid for SegmentSum, namely
   # it must be sorted, the indices must be contiguous, and num_segments
   # must be max(indices) + 1.
   indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
   n = len(indices)
   num_cols = 2
   shape = [n, num_cols]
   num_segments = max(indices) + 1
   with self.test_session():
     tf_x, np_x = self._input(shape, dtype=tf.float64)
     # Results from UnsortedSegmentSum
     unsorted_s = tf.unsorted_segment_sum(data=tf_x,
                                                segment_ids=indices,
                                                num_segments=num_segments)
     unsorted_jacob_t, unsorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, unsorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
     # Results from SegmentSum
     sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
     sorted_jacob_t, sorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, sorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
   self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
   self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:29,代码来源:segment_reduction_ops_test.py


示例2: create_tensor

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_split
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    outputs = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    output_molecules = tf.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)

    out_tensor = output_molecules
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
开发者ID:AhlamMD,项目名称:deepchem,代码行数:26,代码来源:graph_layers.py


示例3: create_tensor

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, distance, distance_membership_i, distance_membership_j
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_features = in_layers[0].out_tensor
    distance = in_layers[1].out_tensor
    distance_membership_i = in_layers[2].out_tensor
    distance_membership_j = in_layers[3].out_tensor
    distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
    atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
    outputs = tf.multiply(
        distance_hidden, tf.gather(atom_features_hidden, distance_membership_j))

    # for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
    # and embeddings of atom j(both gone through a hidden layer)
    outputs = tf.matmul(outputs, self.W_fc)
    outputs = self.activation(outputs)

    output_ii = tf.multiply(self.b_df, atom_features_hidden)
    output_ii = tf.matmul(output_ii, self.W_fc)
    output_ii = self.activation(output_ii)

    # for atom i, sum the influence from all other atom j in the molecule
    outputs = tf.segment_sum(outputs,
                             distance_membership_i) - output_ii + atom_features
    out_tensor = outputs
    if set_tensors:
      self.trainable_variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
开发者ID:ktaneishi,项目名称:deepchem,代码行数:35,代码来源:graph_layers.py


示例4: model

    def model(self, dataset, labels, isEval=None):

        with tf.variable_scope('softmax_linear', reuse=isEval):
            embeddings = tf.get_variable("embeddings", [self.vocabulary_size, self.embedding_size],
                initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0, seed=self.SEED))
            segments = tf.constant([x // self.context_window for x in
                range(self.cbowbatch_size)])
            weights = tf.get_variable("weights", [self.vocabulary_size, self.embedding_size],
                initializer=tf.truncated_normal_initializer(0.0, 1.0 / math.sqrt(float(self.embedding_size)),
                          seed=self.SEED))
            biases = tf.get_variable("biases", [self.vocabulary_size],
                initializer=tf.constant_initializer(0.0))

            # Look up embeddings for inputs.
            embed = tf.nn.embedding_lookup(embeddings, dataset)
            compressed_embeddings = tf.segment_sum(embed, segments) # merging couple of embeded words into one input
            # Compute the softmax loss, using a sample of the negative labels each time.
            loss = tf.reduce_mean(
                tf.nn.sampled_softmax_loss(weights, biases,
                                           compressed_embeddings,
                               labels, self.num_sampled, self.vocabulary_size))

            similarity, normalized_embeddings, embeddings = self.similarity(embeddings, dataset)

            if isEval == None:
                return loss
            if isEval == True:
                return similarity, normalized_embeddings, embeddings
开发者ID:andreslechuga,项目名称:DeepLearning,代码行数:28,代码来源:Assignment5.py


示例5: testSegmentIdsInvalid2

 def testSegmentIdsInvalid2(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [1, 1, 2, 2]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment ids do not start at 0"):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py


示例6: testSegmentIdsInvalid4

 def testSegmentIdsInvalid4(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 1, 0, 1]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment ids are not increasing by 1"):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py


示例7: testSegmentIdsInvalid7

 def testSegmentIdsInvalid7(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 0, 0, -2]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment ids must be >= 0"):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py


示例8: testSegmentIdsSize

 def testSegmentIdsSize(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 1]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment_ids should be the same size"):
       s.eval()
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py


示例9: testSegmentIdsValid

 def testSegmentIdsValid(self):
   # This is a baseline for the following SegmentIdsInvalid* tests.
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 0, 0, 1]
     result = tf.segment_sum(data=tf_x, segment_ids=indices).eval()
     self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py


示例10: remap_keys

  def remap_keys(sparse_tensor):
    # Current indices of our SparseTensor that we need to fix
    bad_indices = sparse_tensor.indices
    # Current values of our SparseTensor that we need to fix
    bad_values = sparse_tensor.values 
  
    # Group by the batch_indices and get the count for each  
    size = tf.segment_sum(data = tf.ones_like(bad_indices[:,0], dtype = tf.int64), segment_ids = bad_indices[:,0]) - 1
    # The number of batch_indices (this should be batch_size unless it is a partially full batch)
    length = tf.shape(size, out_type = tf.int64)[0]
    # Finds the cumulative sum which we can use for indexing later
    cum = tf.cumsum(size)
    # The offsets between each example in the batch due to our concatentation of the keys in the decode_example method
    length_range = tf.range(start = 0, limit = length, delta = 1, dtype = tf.int64)
    # Indices of the SparseTensor's indices member of the rows we added by the concatentation of our keys in the decode_example method
    cum_range = cum + length_range

    # The keys that we have extracted back out of our concatentated SparseTensor
    gathered_indices = tf.squeeze(tf.gather(bad_indices, cum_range)[:,1])

    # The enumerated row indices of the SparseTensor's indices member
    sparse_indices_range = tf.range(tf.shape(bad_indices, out_type = tf.int64)[0], dtype = tf.int64)

    # We want to find here the row indices of the SparseTensor's indices member that are of our actual data and not the concatentated rows
    # So we want to find the intersection of the two sets and then take the opposite of that
    x = sparse_indices_range
    s = cum_range

    # Number of multiples we are going to tile x, which is our sparse_indices_range
    tile_multiples = tf.concat([tf.ones(tf.shape(tf.shape(x)), dtype=tf.int64), tf.shape(s, out_type = tf.int64)], axis = 0)
    # Expands x, our sparse_indices_range, into a rank 2 tensor and then multiplies the rows by 1 (no copying) and the columns by the number of examples in the batch
    x_tile = tf.tile(tf.expand_dims(x, -1), tile_multiples)
    # Essentially a vectorized logical or, that we then negate
    x_not_in_s = ~tf.reduce_any(tf.equal(x_tile, s), -1)

    # The SparseTensor's indices that are our actual data by using the boolean_mask we just made above applied to the entire indices member of our SparseTensor
    selected_indices = tf.boolean_mask(tensor = bad_indices, mask = x_not_in_s, axis = 0)
    # Apply the same boolean_mask to the entire values member of our SparseTensor to get the actual values data
    selected_values = tf.boolean_mask(tensor = bad_values, mask = x_not_in_s, axis = 0)

    # Need to replace the first column of our selected_indices with keys, so we first need to tile our gathered_indices
    tiling = tf.tile(input = tf.expand_dims(gathered_indices[0], -1), multiples = tf.expand_dims(size[0] , -1))
    
    # We have to repeatedly apply the tiling to each example in the batch
    # Since it is jagged we cannot use tf.map_fn due to the stacking of the TensorArray, so we have to create our own custom version
    def loop_body(i, tensor_grow):
      return i + 1, tf.concat(values = [tensor_grow, tf.tile(input = tf.expand_dims(gathered_indices[i], -1), multiples = tf.expand_dims(size[i] , -1))], axis = 0)

    _, result = tf.while_loop(lambda i, tensor_grow: i < length, loop_body, [tf.constant(1, dtype = tf.int64), tiling])
    
    # Concatenate tiled keys with the 2nd column of selected_indices
    selected_indices_fixed = tf.concat([tf.expand_dims(result, -1), tf.expand_dims(selected_indices[:, 1], -1)], axis = 1)
    
    # Combine everything together back into a SparseTensor
    remapped_sparse_tensor = tf.SparseTensor(indices = selected_indices_fixed, values = selected_values, dense_shape = sparse_tensor.dense_shape)
    return remapped_sparse_tensor
开发者ID:TarunBattula,项目名称:training-data-analyst,代码行数:56,代码来源:model.py


示例11: testSegmentIdsInvalid5

 def testSegmentIdsInvalid5(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 1, 2, 0]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError(
         r"Segment id 1 out of range \[0, 1\), probably "
         "because 'segment_ids' input is not sorted."):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:10,代码来源:segment_reduction_ops_test.py


示例12: range

  
  # Variables.
  embeddings = tf.Variable(
    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
  softmax_weights = tf.Variable(
    tf.truncated_normal([vocabulary_size, embedding_size],
                         stddev=1.0 / math.sqrt(embedding_size)))
  softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
  
  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)
  # sum every `context_window` word embedding into one
  segment_ids = tf.constant([i // context_window for i in range(batch_size)])
  
  embed = tf.segment_sum(embed, segment_ids)
  
  # Compute the softmax loss, using a sample of the negative labels each time.
  loss = tf.reduce_mean(
    tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
                               train_labels, num_sampled, vocabulary_size))

  # Optimizer.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
  
  # Compute the similarity between minibatch examples and all embeddings.
  # We use the cosine distance:
  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
  normalized_embeddings = embeddings / norm
  valid_embeddings = tf.nn.embedding_lookup(
    normalized_embeddings, valid_dataset)
开发者ID:vincentqb,项目名称:udacity-deeplearning,代码行数:30,代码来源:5_word2vec_maxc01.py


示例13: test_SegmentSum

 def test_SegmentSum(self):
     t = tf.segment_sum(self.random(4, 2, 3), np.array([0, 1, 1, 2]))
     self.check(t)
开发者ID:kestrelm,项目名称:tfdeploy,代码行数:3,代码来源:ops.py


示例14: range

  
  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)

  # seq_ids only needs to be generated once so do this as a numpy array rather than a tensor.
  seq_ids = np.zeros(batch_size, dtype=np.int32)
  cur_id = -1
  for i in range(batch_size):
    if i % context_window == 0:
      cur_id = cur_id + 1
    seq_ids[i] = cur_id
  print (seq_ids)
  
  # use segment_sum to add together the related words and reduce the output to be num_labels in size.
  final_embed = tf.segment_sum(embed, seq_ids)
  
  # Compute the softmax loss, using a sample of the negative labels each time.
  loss = tf.reduce_mean(
    tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, final_embed,
                               train_labels, num_sampled, vocabulary_size))

  # Optimizer.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
  
  # Compute the similarity between minibatch examples and all embeddings.
  # We use the cosine distance:
  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
  normalized_embeddings = embeddings / norm
  valid_embeddings = tf.nn.embedding_lookup(
    normalized_embeddings, valid_dataset)
开发者ID:qpham01,项目名称:GitHub,代码行数:30,代码来源:word2vect_cbow.py


示例15: testSegmentIdsShape

 def testSegmentIdsShape(self):
   shape = [4, 4]
   tf_x, _ = self._input(shape)
   indices = tf.constant([0, 1, 2, 2], shape=[2, 2])
   with self.assertRaises(ValueError):
     tf.segment_sum(data=tf_x, segment_ids=indices)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:6,代码来源:segment_reduction_ops_test.py


示例16: print

import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

import tensorflow as tf


sess = tf.InteractiveSession() 
seg_ids = tf.constant([0,1,1,2,2]) # Group indexes : 0|1,2|3,4 


tens1 = tf.constant([[2, 5, 3, -5], 
                 [0, 3,-2,  5], 
                 [4, 3, 5,  3], 
                 [6, 1, 4,  0], 
                 [6, 1, 4,  0]])  # A sample constant m

print('\nseg_ids->', seg_ids.eval())
print('tens1->', tens1.eval())

print("\ntf.segment_sum(tens1, seg_ids).eval() ")   # Sum segmen
print(tf.segment_sum(tens1, seg_ids).eval() )   # Sum segmen

print("\ntf.segment_prod(tens1, seg_ids).eval() ") # Product segmen
print(tf.segment_prod(tens1, seg_ids).eval() ) # Product segmen

print(tf.segment_min(tens1, seg_ids).eval() ) # minimun value goes to
print(tf.segment_max(tens1, seg_ids).eval() ) # maximum value goes to
print(tf.segment_mean(tens1, seg_ids).eval() ) # mean value goes to group 
开发者ID:greatabel,项目名称:MachineLearning,代码行数:28,代码来源:i4tensor+segmentation.py


示例17: print

#!/usr/bin/env python

import tensorflow as tf

data = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
result = tf.segment_sum(data, tf.constant([0, 0, 1]))

with tf.Session() as sess:
    print(sess.run(result))
开发者ID:tobegit3hub,项目名称:tensorflow_examples,代码行数:9,代码来源:use_segmentation.py


示例18:

#Segmentation Examples
import tensorflow as tf
sess = tf.InteractiveSession()
seg_ids = tf.constant([0,1,1,2,2]); # Group indexes : 0|1,2|3,4

tens1 = tf.constant([[2, 5, 3, -5],  
                    [0, 3,-2,  5], 
                    [4, 3, 5,  3], 
                    [6, 1, 4,  0],
                    [6, 1, 4,  0]])  # A sample constant matrix

tf.segment_sum(tens1, seg_ids).eval()   # Sum segmentation
tf.segment_prod(tens1, seg_ids).eval() # Product segmantation
tf.segment_min(tens1, seg_ids).eval() # minimun value goes to group
tf.segment_max(tens1, seg_ids).eval() # maximum value goes to group
tf.segment_mean(tens1, seg_ids).eval() # mean value goes to group
开发者ID:CeasarSS,项目名称:books,代码行数:16,代码来源:segmentation.py


示例19: range

tf_conv1 = tf.nn.conv2d(tf_input,ww,strides=[1,1,1,1],padding='VALID')
tf_deconv1 = tf.nn.conv2d_transpose(value=tf_conv1,filter=ww,output_shape=[batch_size,ny,nx,nl],strides=[1,1,1,1],padding='VALID')
tf_error = tf.reduce_mean(tf.square(tf_deconv1 - tf_input))
## tf_error = tf.nn.l2_loss(tf_deconv1 - tf_input)

qqq = tf.square(tf_conv1)
ooo = tf.reduce_sum(qqq,3,keep_dims=True)
rrr = qqq / (tf.tile(ooo,[1,1,1,nf_risa])+1e-16)
tf_local_entropy1 = tf.reduce_sum(rrr * (-tf.log(rrr+1e-16)),3)
tf_entropy = tf.reduce_mean(tf_local_entropy1)
                
tf_simple1 = tf.square(tf_conv1)
seg24 = tf.constant([0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11])
tf_t_simple1 = tf.transpose(tf_simple1)
tf_sparce1 = tf.reduce_mean(tf.sqrt(tf.segment_sum(tf_t_simple1,seg24)))

# tf_score = tf_error 
# tf_score = tf_error * lambda_s + tf_sparce1
tf_score = lambda_s * tf_error + tf_entropy

optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(tf_score)

sess.run(tf.initialize_all_variables())

iii_bin = np.arange(batch_size,nn,batch_size)
iii_nn = np.arange(nn)
iii_batches = np.split(iii_nn,iii_bin)

for tt in range(tmax):
开发者ID:naono-git,项目名称:cnncancer,代码行数:30,代码来源:risa.py


示例20: forward

 def forward(self, atom_features, atom_to_pair):
   out = tf.expand_dims(tf.gather(atom_features, atom_to_pair[:, 1]), 2)
   out = tf.squeeze(tf.matmul(self.A, out), axis=2)
   out = tf.segment_sum(out, atom_to_pair[:, 0])
   return out
开发者ID:AhlamMD,项目名称:deepchem,代码行数:5,代码来源:graph_layers.py



注:本文中的tensorflow.segment_sum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.select函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.scatter_update函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap