• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.count_nonzero函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.count_nonzero函数的典型用法代码示例。如果您正苦于以下问题:Python count_nonzero函数的具体用法?Python count_nonzero怎么用?Python count_nonzero使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了count_nonzero函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: polyphonic_rate

def polyphonic_rate(tensor, threshold=2):
    """Return the ratio of the number of time steps where the number of pitches
    being played is larger than `threshold` to the total number of time steps"""
    if tensor.get_shape().ndims != 5:
        raise ValueError("Input tensor must have 5 dimensions.")
    n_poly = tf.count_nonzero((tf.count_nonzero(tensor, 3) > threshold), 2)
    return tf.reduce_mean((n_poly / tensor.get_shape()[2]), [0, 1])
开发者ID:lucas19700000,项目名称:musegan,代码行数:7,代码来源:metrics.py


示例2: init_training_graph

    def init_training_graph(self):

        with tf.name_scope('Evaluation'):
            logits = self.last
            prob_b = tf.squeeze(logits, squeeze_dims=[1,2])
            self.predictions = tf.argmax(prob_b, axis=1)
            
            with tf.name_scope('Loss'):
                
                self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prob_b,
                                                                          labels=tf.cast(self.train_labels_node, tf.int32),
                                                                          name="entropy")))
                tf.summary.scalar("entropy", self.loss)

            with tf.name_scope('Accuracy'):

                LabelInt = tf.cast(self.train_labels_node, tf.int64)
                CorrectPrediction = tf.equal(self.predictions, LabelInt)
                self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
                tf.summary.scalar("accuracy", self.accuracy)

            with tf.name_scope('Prediction'):

                self.TP = tf.count_nonzero(self.predictions * LabelInt)
                self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
                self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
                self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)

            with tf.name_scope('Precision'):

                self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
                tf.summary.scalar('Precision', self.precision)

            with tf.name_scope('Recall'):

                self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
                tf.summary.scalar('Recall', self.recall)

            with tf.name_scope('F1'):

                num = tf.multiply(self.precision, self.recall)
                dem = tf.add(self.precision, self.recall)
                self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
                tf.summary.scalar('F1', self.F1)

            with tf.name_scope('MeanAccuracy'):
                
                Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
                self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)

            #self.batch = tf.Variable(0, name = "batch_iterator")

            self.train_prediction = tf.nn.softmax(logits)

            self.test_prediction = tf.nn.softmax(logits)

        tf.global_variables_initializer().run()

        print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:59,代码来源:vgg16.py


示例3: rpn_losses

def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits):
    """
    Args:
        anchor_labels: fHxfWxNA
        anchor_boxes: fHxfWxNAx4, encoded
        label_logits:  fHxfWxNA
        box_logits: fHxfWxNAx4

    Returns:
        label_loss, box_loss
    """
    with tf.device('/cpu:0'):
        valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1))
        pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1))
        nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor')
        nr_pos = tf.count_nonzero(pos_mask, dtype=tf.int32, name='num_pos_anchor')

        valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask)
    valid_label_logits = tf.boolean_mask(label_logits, valid_mask)

    with tf.name_scope('label_metrics'):
        valid_label_prob = tf.nn.sigmoid(valid_label_logits)
        summaries = []
        with tf.device('/cpu:0'):
            for th in [0.5, 0.2, 0.1]:
                valid_prediction = tf.cast(valid_label_prob > th, tf.int32)
                nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction')
                pos_prediction_corr = tf.count_nonzero(
                    tf.logical_and(
                        valid_label_prob > th,
                        tf.equal(valid_prediction, valid_anchor_labels)),
                    dtype=tf.int32)
                summaries.append(tf.truediv(
                    pos_prediction_corr,
                    nr_pos, name='recall_th{}'.format(th)))
                precision = tf.to_float(tf.truediv(pos_prediction_corr, nr_pos_prediction))
                precision = tf.where(tf.equal(nr_pos_prediction, 0), 0.0, precision, name='precision_th{}'.format(th))
                summaries.append(precision)
        add_moving_summary(*summaries)

    label_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=tf.to_float(valid_anchor_labels), logits=valid_label_logits)
    label_loss = tf.reduce_mean(label_loss, name='label_loss')

    pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask)
    pos_box_logits = tf.boolean_mask(box_logits, pos_mask)
    delta = 1.0 / 9
    box_loss = tf.losses.huber_loss(
        pos_anchor_boxes, pos_box_logits, delta=delta,
        reduction=tf.losses.Reduction.SUM) / delta
    box_loss = tf.div(
        box_loss,
        tf.cast(nr_valid, tf.float32), name='box_loss')

    add_moving_summary(label_loss, box_loss, nr_valid, nr_pos)
    return label_loss, box_loss
开发者ID:caiwenpu,项目名称:tensorpack,代码行数:56,代码来源:model.py


示例4: build_graph

  def build_graph(self):
    file_pattern = os.path.join(self.params['data_dir'],
                                self.params['file_pattern'])
    self.batched_dataset = _read_and_batch_from_files(
      file_pattern=file_pattern,
      batch_size=self.params['batch_size'],
      max_length=self.params['max_length'],
      num_cpu_cores=self.params.get('num_cpu_cores', 2),
      shuffle=self.params['shuffle'],
      repeat=self.params['repeat'],
      num_workers=self._num_workers,
      worker_id=self._worker_id)

    self._iterator = self.batched_dataset.make_initializable_iterator()
    x, y = self.iterator.get_next()

    if self.params.get('m_padding', False):
      # MAGIC PADDING
      x = tf.cond(tf.equal(tf.shape(x)[1] % 8, 0),
                  true_fn = lambda: x,
                  false_fn = lambda: tf.pad(x,
                                            paddings=[[0, 0],
                                                      [0, 8 - tf.shape(x)[1] % 8]]))

      y = tf.cond(tf.equal(tf.shape(y)[1] % 8, 0),
                  true_fn = lambda: y,
                  false_fn = lambda: tf.pad(y,
                                            paddings=[[0, 0],
                                                      [0, 8 - tf.shape(y)[1] % 8]]))

      x = tf.cond(tf.equal(tf.shape(x)[0] % 8, 0),
                  true_fn = lambda: x,
                  false_fn = lambda: tf.pad(x,
                                            paddings=[[0, 8 - tf.shape(x)[0] % 8],
                                                      [0, 0]]))

      y = tf.cond(tf.equal(tf.shape(y)[0] % 8, 0),
                  true_fn=lambda: y,
                  false_fn=lambda: tf.pad(y,
                                          paddings=[[0, 8 - tf.shape(y)[0] % 8],
                                                    [0, 0]]))
      # ENDOF MAGIC PADDING

    len_x = tf.count_nonzero(x, axis=1, dtype=tf.int32)
    len_y = tf.count_nonzero(y, axis=1, dtype=tf.int32)
    if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
      self._input_tensors['source_tensors'] = [x, len_x]
      self._input_tensors['target_tensors'] = [y, len_y]
    else:
      self._input_tensors['source_tensors'] = [x, len_x]
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:50,代码来源:text2text.py


示例5: hard_negative_mining

      def hard_negative_mining():
        bboxes_per_batch = tf.unstack(bboxes)
        classification_loss_per_batch = tf.unstack(classification_loss)
        num_positives_per_batch = tf.unstack(tf.reduce_sum(positives, axis=-1))
        neg_class_loss_per_batch = tf.unstack(neg_class_loss_all)

        neg_class_losses = []
        total_negatives = []

        for bboxes_per_image, classification_loss_per_image, num_positives_per_image, neg_class_loss_per_image in \
            zip(bboxes_per_batch, classification_loss_per_batch, num_positives_per_batch, neg_class_loss_per_batch):
          min_negatives_keep = tf.maximum(self.neg_pos_ratio * num_positives_per_image, 3)
          num_negatives_keep = tf.minimum(min_negatives_keep,
                                          tf.count_nonzero(neg_class_loss_per_image, dtype=tf.float32))

          indices = tf.image.non_max_suppression(bboxes_per_image, classification_loss_per_image,
                                                 tf.to_int32(num_negatives_keep), iou_threshold=0.99)
          num_negatives = tf.size(indices)
          total_negatives.append(num_negatives)
          expanded_indexes = tf.expand_dims(indices, axis=1)  # shape: (num_negatives, 1)
          negatives_keep = tf.scatter_nd(expanded_indexes, updates=tf.ones_like(indices, dtype=tf.int32),
                                         shape=tf.shape(classification_loss_per_image))  # shape: (num_priors,)
          negatives_keep = tf.to_float(tf.reshape(negatives_keep, [num_priors]))  # shape: (batch_size, num_priors)
          neg_class_losses.append(tf.reduce_sum(classification_loss_per_image * negatives_keep, axis=-1))  # shape: (1,)

        return tf.stack(neg_class_losses), tf.reduce_sum(tf.stack(total_negatives))
开发者ID:undeadinu,项目名称:training_toolbox_tensorflow,代码行数:26,代码来源:loss.py


示例6: testSparseConstraint

 def testSparseConstraint(self):
   expected = [float(round(N * WEIGHT_SPARSITY))] * BATCH_SIZE
   constraint = htm.constraints.Sparse(sparsity=WEIGHT_SPARSITY)
   with self.test_session(config=CONFIG):
     actual = constraint(tf.ones([BATCH_SIZE, N]))
     tf.global_variables_initializer().run()
     self.assertAllEqual(tf.count_nonzero(actual, axis=1).eval(), expected)
开发者ID:rhyolight,项目名称:nupic.research,代码行数:7,代码来源:sparse_mnist_net_test.py


示例7: _get_testing

def _get_testing(rnn_logits,sequence_length,label,label_length):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:  Batch-normalized edit distance on beam search max
       sequence_error: Batch-normalized sequence error rate
    """
    with tf.name_scope("train"):
        loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) 
    with tf.name_scope("test"):
        predictions,_ = tf.nn.ctc_beam_search_decoder(rnn_logits, 
                                                   sequence_length,
                                                   beam_width=128,
                                                   top_paths=1,
                                                   merge_repeated=True)
        hypothesis = tf.cast(predictions[0], tf.int32) # for edit_distance
        label_errors = tf.edit_distance(hypothesis, label, normalize=False)
        sequence_errors = tf.count_nonzero(label_errors,axis=0)
        total_label_error = tf.reduce_sum( label_errors )
        total_labels = tf.reduce_sum( label_length )
        label_error = tf.truediv( total_label_error, 
                                  tf.cast(total_labels, tf.float32 ),
                                  name='label_error')
        sequence_error = tf.truediv( tf.cast( sequence_errors, tf.int32 ),
                                     tf.shape(label_length)[0],
                                     name='sequence_error')
        tf.summary.scalar( 'loss', loss )
        tf.summary.scalar( 'label_error', label_error )
        tf.summary.scalar( 'sequence_error', sequence_error )

    return loss, label_error, sequence_error
开发者ID:trigrass2,项目名称:cnn_lstm_ctc_ocr,代码行数:30,代码来源:test.py


示例8: _decode_and_resize

    def _decode_and_resize(image_tensor):
      """Decodes jpeg string, resizes it and returns a uint8 tensor."""

      # These constants are set by Inception v3's expectations.
      height = 299
      width = 299
      channels = 3

      image_tensor = tf.where(tf.equal(image_tensor, ''), IMAGE_DEFAULT_STRING, image_tensor)

      # Fork by whether image_tensor value is a file path, or a base64 encoded string.
      slash_positions = tf.equal(tf.string_split([image_tensor], delimiter="").values, '/')
      is_file_path = tf.cast(tf.count_nonzero(slash_positions), tf.bool)

      # The following two functions are required for tf.cond. Note that we can not replace them
      # with lambda. According to TF docs, if using inline lambda, both branches of condition
      # will be executed. The workaround is to use a function call.
      def _read_file():
        return tf.read_file(image_tensor)

      def _decode_base64():
        return tf.decode_base64(image_tensor)

      image = tf.cond(is_file_path, lambda: _read_file(), lambda: _decode_base64())
      image = tf.image.decode_jpeg(image, channels=channels)
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
      image = tf.squeeze(image, squeeze_dims=[0])
      image = tf.cast(image, dtype=tf.uint8)
      return image
开发者ID:googledatalab,项目名称:pydatalab,代码行数:30,代码来源:feature_transforms.py


示例9: buildGraph

    def buildGraph(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            # train_input , [batch_size * embed_size] 一个batch有多条
            self.train_input = tf.placeholder(tf.float32,shape=[self.batch_size,self.embed_size],name='train_input')
            self.train_label = tf.placeholder(tf.int32,shape=[self.batch_size],name='train_label')
            label_float = tf.cast(self.train_label,tf.float32)

            # label_matrix = tf.Variable(tf.diag(tf.ones(self.label_size)),trainable=False)
            label_matrix = tf.diag(tf.ones(self.label_size))
            embed_label = tf.nn.embedding_lookup(label_matrix,self.train_label)

            hidden_unit = 50
            self.weight = tf.Variable(tf.truncated_normal(shape=[hidden_unit,self.embed_size],stddev=1.0/math.sqrt(self.embed_size)))
            self.biase = tf.Variable(tf.zeros([hidden_unit]))

            y1 = tf.matmul(self.train_input,self.weight,transpose_b=True) + self.biase
            g1 = tf.nn.sigmoid(y1) # batch_size * label_size

            weight2 = tf.Variable(tf.truncated_normal(shape=[self.label_size,hidden_unit],stddev=1.0/math.sqrt(hidden_unit)))
            biase2 = tf.Variable(tf.zeros([self.label_size]))
            y2 = tf.matmul(g1,weight2,transpose_b=True) + biase2
            g2 = tf.nn.sigmoid(y2)

            self.predict = tf.cast(tf.argmax(g2,axis=1),tf.float32)
            self.error_num = tf.count_nonzero(label_float-self.predict)

            self.loss = tf.reduce_mean(-tf.reduce_sum(embed_label*tf.log(g2+0.0001)+(1-embed_label)*tf.log(1+0.0001-g2),axis=1))

            # self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss)
            self.train_op = tf.train.AdagradOptimizer(learning_rate=1).minimize(self.loss)
            self.init_op = tf.global_variables_initializer()
开发者ID:multiangle,项目名称:PyNLP,代码行数:32,代码来源:simple_add_classifier.py


示例10: calculate_reshape

def calculate_reshape(original_shape, new_shape, validate=False, name=None):
  """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
  batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
  if batch_shape_static.is_fully_defined():
    return np.int32(batch_shape_static.as_list()), batch_shape_static, []
  with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
    original_size = tf.reduce_prod(original_shape)
    implicit_dim = tf.equal(new_shape, -1)
    size_implicit_dim = (
        original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
    new_ndims = tf.shape(new_shape)
    expanded_new_shape = tf.where(  # Assumes exactly one `-1`.
        implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
    validations = [] if not validate else [
        tf.assert_rank(
            original_shape, 1, message="Original shape must be a vector."),
        tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
        tf.assert_less_equal(
            tf.count_nonzero(implicit_dim, dtype=tf.int32),
            1,
            message="At most one dimension can be unknown."),
        tf.assert_positive(
            expanded_new_shape, message="Shape elements must be >=-1."),
        tf.assert_equal(
            tf.reduce_prod(expanded_new_shape),
            original_size,
            message="Shape sizes do not match."),
    ]
    return expanded_new_shape, batch_shape_static, validations
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:batch_reshape.py


示例11: testDegenerate

 def testDegenerate(self):
   for use_gpu in False, True:
     with self.test_session(use_gpu=use_gpu):
       for dtype in (tf.bool,):
         # A large number is needed to get Eigen to die
         x = tf.zeros((0, 9938), dtype=dtype)
         y = tf.count_nonzero(x, [0])
         self.assertAllEqual(y.eval(), np.zeros(9938))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:8,代码来源:reduction_ops_test.py


示例12: buildGraph

    def buildGraph(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.inputs = tf.placeholder(tf.float32,[self.batch_size,self.max_depth,self.embed_size]) # inputs: num_step * embed_size
            self.seq_len = tf.placeholder(tf.int32)
            self.label = tf.placeholder(tf.int32,[1])
            label_float = tf.cast(self.label,tf.float32)
            label_matrix = tf.diag(tf.ones(self.label_size))
            embed_label = tf.nn.embedding_lookup(label_matrix,self.label)
            print('pin2.1')
            # input_list = list(tf.split(0,self.max_depth,expand_inputs))
            input_list = tf.unpack(self.inputs,axis=1)    # [[1,embed_size,]...,[1,embed_size]]
            print('pin2.2')
            # BasicRNNCell: [num_units, input_size, ...]
            # self.rnn_cell = tf.nn.rnn_cell.BasicRNNCell(self.hidden_size,self.embed_size)
            # self.rnn_cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size,self.embed_size,state_is_tuple=True)
            self.rnn_cell = LTMCell(self.hidden_size,self.embed_size,state_is_tuple=True)
            self.rnn_cell = tf.nn.rnn_cell.DropoutWrapper(self.rnn_cell,output_keep_prob=0.9)
            print('pin2.3')
            init_stat = self.rnn_cell.zero_state(1,tf.float32)
            output_embedding,states = tf.nn.rnn(self.rnn_cell,input_list,
                                                initial_state=init_stat,
                                                sequence_length=self.seq_len)

            # state = init_stat
            # states = []
            # with tf.variable_scope('RNN'):
            #     for time_step in range(max_depth):
            #         if tf.equal(time_step,self.seq_len):
            #             break
            #         if time_step>0:
            #             tf.get_variable_scope().reuse_variables()
            #         m,state = self.rnn_cell(input_list[time_step,:],state)
            #         states.append(state)
            # final_output = states[-1][0]

            print('pin2.4')
            final_output = states[-1] # final_output : [1,hidden_size]
            print(final_output.get_shape())

            weight = tf.Variable(tf.truncated_normal([self.label_size,self.hidden_size],
                                                     stddev=1.0/math.sqrt(self.hidden_size)))
            biase = tf.Variable(tf.zeros([self.label_size]))

            tmp_y = tf.matmul(final_output,weight,transpose_b=True) + biase
            tmp_g = tf.sigmoid(tmp_y)

            self.predict = tf.cast(tf.argmax(tmp_g,axis=1),tf.float32)
            self.error_num = tf.count_nonzero(label_float-self.predict)

            tiny_v = 0.0001
            self.loss =  -tf.reduce_mean(embed_label*tf.log(tmp_g+tiny_v) + (1-embed_label)*tf.log(1+tiny_v-tmp_g))

            self.train_op = tf.train.AdagradOptimizer(learning_rate=1).minimize(self.loss)
            self.init_op = tf.global_variables_initializer()
开发者ID:multiangle,项目名称:PyNLP,代码行数:55,代码来源:rnn_classifier.py


示例13: contrastive_loss

def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"):
    r"""Loss for Siamese networks as described in the paper:
    `Learning a Similarity Metric Discriminatively, with Application to Face
    Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.

    .. math::
        \frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2

    Args:
        left (tf.Tensor): left feature vectors of shape [Batch, N].
        right (tf.Tensor): right feature vectors of shape [Batch, N].
        y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
        margin (float): horizon for negative examples (y==0).
        extra (bool): also return distances for pos and neg.

    Returns:
        tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
    """
    with tf.name_scope(scope):
        y = tf.cast(y, tf.float32)

        delta = tf.reduce_sum(tf.square(left - right), 1)
        delta_sqrt = tf.sqrt(delta + 1e-10)

        match_loss = delta
        missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt))

        loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss))

        if extra:
            num_pos = tf.count_nonzero(y)
            num_neg = tf.count_nonzero(1 - y)
            pos_dist = tf.where(tf.equal(num_pos, 0), 0.,
                                tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32),
                                name="pos-dist")
            neg_dist = tf.where(tf.equal(num_neg, 0), 0.,
                                tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32),
                                name="neg-dist")
            return loss, pos_dist, neg_dist
        else:
            return loss
开发者ID:tobyma,项目名称:tensorpack,代码行数:41,代码来源:mnist-embeddings.py


示例14: _grad_sparsity

 def _grad_sparsity(self):
   """Gradient sparsity."""
   # If the sparse minibatch gradient has 10 percent of its entries
   # non-zero, its sparsity is 0.1.
   # The norm of dense gradient averaged from full dataset
   # are roughly estimated norm of minibatch
   # sparse gradient norm * sqrt(sparsity)
   # An extension maybe only correct the sparse blob.
   non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
   all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
   self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
   self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
   avg_op = self._moving_averager.apply([self._sparsity,])
   with tf.control_dependencies([avg_op]):
     self._sparsity_avg = self._moving_averager.average(self._sparsity)
   return avg_op
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:16,代码来源:yellowfin.py


示例15: __init__

    def __init__(self, input_dim, lab_dim, learning_rate):
        self.input_feature = tf.placeholder(tf.float32, [None, input_dim])
        self.input_labels = tf.placeholder(tf.float32, [None, lab_dim])

        self.w = tf.Variable(tf.random_normal([input_dim, lab_dim]), name="weight")
        self.b = tf.Variable(tf.zeros([lab_dim]), name="bias")

        self.output = tf.matmul(self.input_feature, self.w) + self.b

        self.a1 = tf.argmax(tf.nn.softmax(self.output), axis=1)
        self.b1 = tf.argmax(self.input_labels, axis=1)

        self.err = tf.count_nonzero(self.a1 - self.b1)
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_labels, logits=self.output)
        self.loss = tf.reduce_mean(cross_entropy)
        optimizer = tf.train.AdamOptimizer(learning_rate)
        self.train = optimizer.minimize(self.loss)
开发者ID:bluecryolite,项目名称:LearningCoding,代码行数:17,代码来源:tf_act_05.py


示例16: MaskedCrossEntropyLoss

def MaskedCrossEntropyLoss(outputs, targets, lengths=None, mask=None, max_len=None):
	if lengths is None and mask is None:
		raise RuntimeError('Please provide either lengths or mask')

	#[batch_size, time_length]
	if mask is None:
		mask = sequence_mask(lengths, max_len, False)

	#One hot encode targets (outputs.shape[-1] = hparams.quantize_channels)
	targets_ = tf.one_hot(targets, depth=tf.shape(outputs)[-1])
	
	with tf.control_dependencies([tf.assert_equal(tf.shape(outputs), tf.shape(targets_))]):
		losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs, labels=targets_)

	with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
		masked_loss = losses * mask

	return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32)
开发者ID:duvtedudug,项目名称:Tacotron-2,代码行数:18,代码来源:modules.py


示例17: _compare

 def _compare(self,
              x,
              reduction_axes,
              keep_dims,
              use_gpu=False,
              feed_dict=None):
   np_ans = (x != 0).astype(np.int32)
   if reduction_axes is None:
     np_ans = np.sum(np_ans, keepdims=keep_dims)
   else:
     reduction_axes = np.array(reduction_axes).astype(np.int32)
     for ra in reduction_axes.ravel()[::-1]:
       np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
   with self.test_session(use_gpu=use_gpu) as sess:
     tf_ans = tf.count_nonzero(x, reduction_axes, keep_dims)
     out = sess.run(tf_ans, feed_dict)
   self.assertAllClose(np_ans, out)
   self.assertShapeEqual(np_ans, tf_ans)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:18,代码来源:reduction_ops_test.py


示例18: proposal_metrics

def proposal_metrics(iou):
    """
    Add summaries for RPN proposals.

    Args:
        iou: nxm, #proposal x #gt
    """
    # find best roi for each gt, for summary only
    best_iou = tf.reduce_max(iou, axis=0)
    mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt')
    summaries = [mean_best_iou]
    with tf.device('/cpu:0'):
        for th in [0.3, 0.5]:
            recall = tf.truediv(
                tf.count_nonzero(best_iou >= th),
                tf.size(best_iou, out_type=tf.int64),
                name='recall_iou{}'.format(th))
            summaries.append(recall)
    add_moving_summary(*summaries)
开发者ID:tobyma,项目名称:tensorpack,代码行数:19,代码来源:model.py


示例19: count_nonzero_wrapper

def count_nonzero_wrapper(X, optype):
    """Wrapper for handling sparse and dense versions of `tf.count_nonzero`.

    Parameters
    ----------
    X : tf.Tensor (N, K)
    optype : str, {'dense', 'sparse'}

    Returns
    -------
    tf.Tensor (1,K)
    """
    with tf.name_scope('count_nonzero_wrapper') as scope:
        if optype == 'dense':
            return tf.count_nonzero(X, axis=0, keep_dims=True)
        elif optype == 'sparse':
            indicator_X = tf.SparseTensor(X.indices, tf.ones_like(X.values), X.dense_shape)
            return tf.sparse_reduce_sum(indicator_X, axis=0, keep_dims=True)
        else:
            raise NameError('Unknown input type in count_nonzero_wrapper')
开发者ID:geffy,项目名称:tffm,代码行数:20,代码来源:utils.py


示例20: step

 def step(index, scores_sum, scores_num):
   """Single step."""
   index %= hparams.epoch_length  # Only needed in eval runs.
   # Note - the only way to ensure making a copy of tensor is to run simple
   # operation. We are waiting for tf.copy:
   # https://github.com/tensorflow/tensorflow/issues/11186
   obs_copy = batch_env.observ + 0
   actor_critic = policy_factory(tf.expand_dims(obs_copy, 0))
   policy = actor_critic.policy
   action = tf.cond(eval_phase,
                    policy.mode,
                    policy.sample)
   postprocessed_action = actor_critic.action_postprocessing(action)
   simulate_output = batch_env.simulate(postprocessed_action[0, ...])
   pdf = policy.prob(action)[0]
   with tf.control_dependencies(simulate_output):
     reward, done = simulate_output
     done = tf.reshape(done, (len(batch_env),))
     to_save = [obs_copy, reward, done, action[0, ...], pdf,
                actor_critic.value[0]]
     save_ops = [tf.scatter_update(memory_slot, index, value)
                 for memory_slot, value in zip(memory, to_save)]
     cumulate_rewards_op = cumulative_rewards.assign_add(reward)
     agent_indices_to_reset = tf.where(done)[:, 0]
   with tf.control_dependencies([cumulate_rewards_op]):
     scores_sum_delta = tf.reduce_sum(
         tf.gather(cumulative_rewards, agent_indices_to_reset))
     scores_num_delta = tf.count_nonzero(done, dtype=tf.int32)
   with tf.control_dependencies(save_ops + [scores_sum_delta,
                                            scores_num_delta]):
     reset_env_op = batch_env.reset(agent_indices_to_reset)
     reset_cumulative_rewards_op = tf.scatter_update(
         cumulative_rewards, agent_indices_to_reset,
         tf.zeros(tf.shape(agent_indices_to_reset)))
   with tf.control_dependencies([reset_env_op,
                                 reset_cumulative_rewards_op]):
     return [index + 1, scores_sum + scores_sum_delta,
             scores_num + scores_num_delta]
开发者ID:chqiwang,项目名称:tensor2tensor,代码行数:38,代码来源:collect.py



注:本文中的tensorflow.count_nonzero函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.create_partitioned_variables函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.cos函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap