• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.logical_and函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.logical_and函数的典型用法代码示例。如果您正苦于以下问题:Python logical_and函数的具体用法?Python logical_and怎么用?Python logical_and使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了logical_and函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: getRpRnTpTnForTrain0OrVal1

    def getRpRnTpTnForTrain0OrVal1(self, y, training0OrValidation1):
        # The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
        # Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
        # param y: y = T.itensor4('y'). Dimensions [batchSize, r, c, z]
        
        yPredToUse = self.y_pred_train if  training0OrValidation1 == 0 else self.y_pred_val
        
        returnedListWithNumberOfRpRnTpTnForEachClass = []
        
        for class_i in range(0, self._numberOfOutputClasses) :
            #Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
            tensorOneAtRealPos = tf.equal(y, class_i)
            tensorOneAtRealNeg = tf.logical_not(tensorOneAtRealPos)

            tensorOneAtPredictedPos = tf.equal(yPredToUse, class_i)
            tensorOneAtPredictedNeg = tf.logical_not(tensorOneAtPredictedPos)
            tensorOneAtTruePos = tf.logical_and(tensorOneAtRealPos,tensorOneAtPredictedPos)
            tensorOneAtTrueNeg = tf.logical_and(tensorOneAtRealNeg,tensorOneAtPredictedNeg)
                    
            returnedListWithNumberOfRpRnTpTnForEachClass.append( tf.reduce_sum( tf.cast(tensorOneAtRealPos, dtype="int32")) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( tf.reduce_sum( tf.cast(tensorOneAtRealNeg, dtype="int32")) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( tf.reduce_sum( tf.cast(tensorOneAtTruePos, dtype="int32")) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( tf.reduce_sum( tf.cast(tensorOneAtTrueNeg, dtype="int32")) )
            
        return returnedListWithNumberOfRpRnTpTnForEachClass
开发者ID:Kamnitsask,项目名称:deepmedic,代码行数:25,代码来源:layers.py


示例2: m_body

        def m_body(i, ta_tp, ta_fp, gmatch, n_ignored_det):
            # Jaccard score with groundtruth bboxes.
            rbbox = bboxes[i, :]
#             rbbox = tf.Print(rbbox, [rbbox])
            jaccard = bboxes_jaccard(rbbox, gxs, gys)

            # Best fit, checking it's above threshold.
            idxmax = tf.cast(tf.argmax(jaccard, axis=0), dtype = tf.int32)
            
            jcdmax = jaccard[idxmax]
            match = jcdmax > matching_threshold
            existing_match = gmatch[idxmax]
            not_ignored = tf.logical_not(gignored[idxmax])

            n_ignored_det = n_ignored_det + tf.cast(gignored[idxmax], tf.int32)
            # TP: match & no previous match and FP: previous match | no match.
            # If ignored: no record, i.e FP=False and TP=False.
            tp = tf.logical_and(not_ignored, tf.logical_and(match, tf.logical_not(existing_match)))
            ta_tp = ta_tp.write(i, tp)
            
            fp = tf.logical_and(not_ignored, tf.logical_or(existing_match, tf.logical_not(match)))
            ta_fp = ta_fp.write(i, fp)
            
            # Update grountruth match.
            mask = tf.logical_and(tf.equal(grange, idxmax), tf.logical_and(not_ignored, match))
            gmatch = tf.logical_or(gmatch, mask)
            return [i+1, ta_tp, ta_fp, gmatch,n_ignored_det]
开发者ID:cvtower,项目名称:seglink,代码行数:27,代码来源:bboxes.py


示例3: prune_outside_window

def prune_outside_window(keypoints, window, scope=None):
  """Prunes keypoints that fall outside a given window.

  This function replaces keypoints that fall outside the given window with nan.
  See also clip_to_window which clips any keypoints that fall outside the given
  window.

  Args:
    keypoints: a tensor of shape [num_instances, num_keypoints, 2]
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window outside of which the op should prune the keypoints.
    scope: name scope.

  Returns:
    new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)

    valid_indices = tf.logical_and(
        tf.logical_and(y >= win_y_min, y <= win_y_max),
        tf.logical_and(x >= win_x_min, x <= win_x_max))

    new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
    new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
    new_keypoints = tf.concat([new_y, new_x], 2)

    return new_keypoints
开发者ID:ALISCIFP,项目名称:models,代码行数:29,代码来源:keypoint_ops.py


示例4: to_binary_tf

def to_binary_tf(bar_or_track_bar, threshold=0.0, track_mode=False, melody=False):
    """Return the binarize tensor of the input tensor (be careful of the channel order!)"""
    if track_mode:
        # melody track
        if melody:
            melody_is_max = tf.equal(bar_or_track_bar, tf.reduce_max(bar_or_track_bar, axis=2, keep_dims=True))
            melody_pass_threshold = (bar_or_track_bar > threshold)
            out_tensor = tf.logical_and(melody_is_max, melody_pass_threshold)
        # non-melody track
        else:
            out_tensor = (bar_or_track_bar > threshold)
        return out_tensor
    else:
        if len(bar_or_track_bar.get_shape()) == 4:
            melody_track = tf.slice(bar_or_track_bar, [0, 0, 0, 0], [-1, -1, -1, 1])
            other_tracks = tf.slice(bar_or_track_bar, [0, 0, 0, 1], [-1, -1, -1, -1])
        elif len(bar_or_track_bar.get_shape()) == 5:
            melody_track = tf.slice(bar_or_track_bar, [0, 0, 0, 0, 0], [-1, -1, -1, -1, 1])
            other_tracks = tf.slice(bar_or_track_bar, [0, 0, 0, 0, 1], [-1, -1, -1, -1, -1])
        # melody track
        melody_is_max = tf.equal(melody_track, tf.reduce_max(melody_track, axis=2, keep_dims=True))
        melody_pass_threshold = (melody_track > threshold)
        out_tensor_melody = tf.logical_and(melody_is_max, melody_pass_threshold)
        # other tracks
        out_tensor_others = (other_tracks > threshold)
        if len(bar_or_track_bar.get_shape()) == 4:
            return tf.concat([out_tensor_melody, out_tensor_others], 3)
        elif len(bar_or_track_bar.get_shape()) == 5:
            return tf.concat([out_tensor_melody, out_tensor_others], 4)
开发者ID:Hotim,项目名称:AI_For_Music_Composition,代码行数:29,代码来源:ops.py


示例5: build_graph

    def build_graph(self, nn_im_w, nn_im_h, num_colour_channels=3, weights=None, biases=None):
        num_outputs = 1 #ofc
        self.nn_im_w = nn_im_w
        self.nn_im_h = nn_im_h

        if weights is None:
            weights = [None, None, None, None, None]
        if biases is None:
            biases = [None, None, None, None, None]

        with tf.device('/cpu:0'):
            # Placeholder variables for the input image and output images
            self.x = tf.placeholder(tf.float32, shape=[None, nn_im_w*nn_im_h*3])
            self.y_ = tf.placeholder(tf.float32, shape=[None, num_outputs])
            self.threshold = tf.placeholder(tf.float32)

            # Build the convolutional and pooling layers
            conv1_output_channels = 32
            conv2_output_channels = 16
            conv3_output_channels = 8

            conv_layer_1_input = tf.reshape(self.x, [-1, nn_im_h, nn_im_w, num_colour_channels]) #The resized input image
            self.build_conv_layer(conv_layer_1_input, num_colour_channels, conv1_output_channels, initial_weights=weights[0], initial_biases=biases[0]) # layer 1
            self.build_conv_layer(self.layers[0][0], conv1_output_channels, conv2_output_channels, initial_weights=weights[1], initial_biases=biases[1])# layer 2
            self.build_conv_layer(self.layers[1][0], conv2_output_channels, conv3_output_channels, initial_weights=weights[2], initial_biases=biases[2])# layer 3

            # Build the fully connected layer
            convnet_output_w = nn_im_w//8
            convnet_output_h = nn_im_h//8

            fully_connected_layer_input = tf.reshape(self.layers[2][0], [-1, convnet_output_w * convnet_output_h * conv3_output_channels])
            self.build_fully_connected_layer(fully_connected_layer_input, convnet_output_w, convnet_output_h, conv3_output_channels, initial_weights=weights[3], initial_biases=biases[3])

            # The dropout stage and readout layer
            self.keep_prob, self.h_drop = self.dropout(self.layers[3][0])
            self.y_conv,_,_ = self.build_readout_layer(self.h_drop, num_outputs, initial_weights=weights[4], initial_biases=biases[4])

            self.mean_error =  tf.sqrt(tf.reduce_mean(tf.square(self.y_ - self.y_conv)))
            self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.mean_error)

            self.accuracy = (1.0 - tf.reduce_mean(tf.abs(self.y_ - tf.round(self.y_conv))))


            positive_examples = tf.greater_equal(self.y_, 0.5)
            negative_examples = tf.logical_not(positive_examples)
            positive_classifications = tf.greater_equal(self.y_conv, self.threshold)
            negative_classifications = tf.logical_not(positive_classifications)

            self.true_positive = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, positive_classifications),tf.int32)) # count the examples that are positive and classified as positive
            self.false_positive = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, positive_classifications),tf.int32)) # count the examples that are negative but classified as positive

            self.true_negative = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, negative_classifications),tf.int32)) # count the examples that are negative and classified as negative
            self.false_negative = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, negative_classifications),tf.int32)) # count the examples that are positive but classified as negative

            self.positive_count = tf.reduce_sum(tf.cast(positive_examples, tf.int32)) # count the examples that are positive
            self.negative_count = tf.reduce_sum(tf.cast(negative_examples, tf.int32)) # count the examples that are negative

            self.confusion_matrix = tf.reshape(tf.pack([self.true_positive, self.false_positive, self.false_negative, self.true_negative]), [2,2])

        self.sess.run(tf.initialize_all_variables())
开发者ID:JTKBowers,项目名称:CNN-people-detect,代码行数:60,代码来源:Model.py


示例6: m_body

        def m_body(i, ta_tp, ta_fp, gmatch):
            # Jaccard score with groundtruth bboxes.
            rbbox = bboxes[i]
            jaccard = bboxes_jaccard(rbbox, gbboxes)
            jaccard = jaccard * tf.cast(tf.equal(glabels, rlabel), dtype=jaccard.dtype)

            # Best fit, checking it's above threshold.
            idxmax = tf.cast(tf.argmax(jaccard, axis=0), tf.int32)
            jcdmax = jaccard[idxmax]
            match = jcdmax > matching_threshold
            existing_match = gmatch[idxmax]
            not_difficult = tf.logical_not(gdifficults[idxmax])

            # TP: match & no previous match and FP: previous match | no match.
            # If difficult: no record, i.e FP=False and TP=False.
            tp = tf.logical_and(not_difficult,
                                tf.logical_and(match, tf.logical_not(existing_match)))
            ta_tp = ta_tp.write(i, tp)
            fp = tf.logical_and(not_difficult,
                                tf.logical_or(existing_match, tf.logical_not(match)))
            ta_fp = ta_fp.write(i, fp)
            # Update grountruth match.
            mask = tf.logical_and(tf.equal(grange, idxmax),
                                  tf.logical_and(not_difficult, match))
            gmatch = tf.logical_or(gmatch, mask)

            return [i+1, ta_tp, ta_fp, gmatch]
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:27,代码来源:bboxes.py


示例7: check_convergence

    def check_convergence(self, new_T0, new_transition, new_emission):
        
        delta_T0 = tf.reduce_max(tf.abs(self.T0 - new_T0)) < self.epsilon
        delta_T = tf.reduce_max(tf.abs(self.T - new_transition)) < self.epsilon
        delta_E = tf.reduce_max(tf.abs(self.E - new_emission)) < self.epsilon

        return tf.logical_and(tf.logical_and(delta_T0, delta_T), delta_E)
开发者ID:aliziaei,项目名称:HiddenMarkovModel_TensorFlow,代码行数:7,代码来源:HiddenMarkovModel.py


示例8: subsample

  def subsample(self, indicator, batch_size, labels, scope=None):
    """Returns subsampled minibatch.

    Args:
      indicator: boolean tensor of shape [N] whose True entries can be sampled.
      batch_size: desired batch size. If None, keeps all positive samples and
        randomly selects negative samples so that the positive sample fraction
        matches self._positive_fraction. It cannot be None is is_static is True.
      labels: boolean tensor of shape [N] denoting positive(=True) and negative
          (=False) examples.
      scope: name scope.

    Returns:
      sampled_idx_indicator: boolean tensor of shape [N], True for entries which
        are sampled.

    Raises:
      ValueError: if labels and indicator are not 1D boolean tensors.
    """
    if len(indicator.get_shape().as_list()) != 1:
      raise ValueError('indicator must be 1 dimensional, got a tensor of '
                       'shape %s' % indicator.get_shape())
    if len(labels.get_shape().as_list()) != 1:
      raise ValueError('labels must be 1 dimensional, got a tensor of '
                       'shape %s' % labels.get_shape())
    if labels.dtype != tf.bool:
      raise ValueError('labels should be of type bool. Received: %s' %
                       labels.dtype)
    if indicator.dtype != tf.bool:
      raise ValueError('indicator should be of type bool. Received: %s' %
                       indicator.dtype)
    with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'):
      if self._is_static:
        return self._static_subsample(indicator, batch_size, labels)

      else:
        # Only sample from indicated samples
        negative_idx = tf.logical_not(labels)
        positive_idx = tf.logical_and(labels, indicator)
        negative_idx = tf.logical_and(negative_idx, indicator)

        # Sample positive and negative samples separately
        if batch_size is None:
          max_num_pos = tf.reduce_sum(tf.to_int32(positive_idx))
        else:
          max_num_pos = int(self._positive_fraction * batch_size)
        sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
        num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))
        if batch_size is None:
          negative_positive_ratio = (
              1 - self._positive_fraction) / self._positive_fraction
          max_num_neg = tf.to_int32(
              negative_positive_ratio * tf.to_float(num_sampled_pos))
        else:
          max_num_neg = batch_size - num_sampled_pos
        sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)

        return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
开发者ID:ALISCIFP,项目名称:models,代码行数:58,代码来源:balanced_positive_negative_sampler.py


示例9: tf_F1_score

def tf_F1_score(actuals, predictions):
    actuals = tf.reshape(actuals, [-1, 1])
    predictions = tf.reshape(predictions, [-1, 1])

    ones_like_actuals = tf.ones_like(actuals)
    zeros_like_actuals = tf.zeros_like(actuals)
    ones_like_predictions = tf.ones_like(predictions)
    zeros_like_predictions = tf.zeros_like(predictions)

    #true-positive
    tp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, ones_like_actuals),
                tf.equal(predictions, ones_like_predictions)
            ),
            dtype=tf.float32
        )
    )
    #true-Negative
    tn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, zeros_like_actuals),
                tf.equal(predictions, zeros_like_predictions)
            ),
            dtype=tf.float32
        )
    )
    #false-positive
    fp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, zeros_like_actuals),
                tf.equal(predictions, ones_like_predictions)
            ),
            dtype=tf.float32
        )
    )
    #false_Neg
    fn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, ones_like_actuals),
                tf.equal(predictions, zeros_like_predictions)
            ),
            dtype=tf.float32
        )
    )

    accuracy = (tp_op + tn_op) / (tp_op + tn_op + fp_op + fn_op)
    prediction = tp_op / (tp_op + fp_op)
    recall = tp_op / (tp_op + fn_op)
    f1_score = (2 * (prediction * recall)) / (prediction + recall)

    return accuracy, [tp_op, tn_op, fp_op, fn_op, f1_score]
开发者ID:PistonY,项目名称:myTensorFlowTest,代码行数:56,代码来源:get_F1.py


示例10: sensitivity

def sensitivity(logits, labels):
    predictions = tf.argmax(logits, axis=-1)
    actuals = tf.argmax(labels, axis=-1)


    nodule_actuals = tf.ones_like(actuals)
    non_nodule_actuals = tf.zeros_like(actuals)
    nodule_predictions = tf.ones_like(predictions)
    non_nodule_predictions = tf.zeros_like(predictions)

    tp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, nodule_actuals),
                tf.equal(predictions, nodule_predictions)
            ),
            tf.float32
        )
    )

    tn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, non_nodule_actuals),
                tf.equal(predictions, non_nodule_predictions)
            ),
            tf.float32
        )
    )

    fp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, non_nodule_actuals),
                tf.equal(predictions, nodule_predictions)
            ),
            tf.float32
        )
    )

    fn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, nodule_actuals),
                tf.equal(predictions, non_nodule_predictions)
            ),
            tf.float32
        )
    )

    false_positive_rate = fp_op / (fp_op + tn_op)

    recall = tp_op / (tp_op + fn_op)

    return recall, false_positive_rate
开发者ID:codealphago,项目名称:CASED-Tensorflow,代码行数:55,代码来源:utils.py


示例11: do_eval

 def do_eval(self, f1 = False):
   """INTENT : Evaluate the CDBN as a classifier"""
   
   input_placeholder  = tf.placeholder(tf.float32, shape=self.input)
   
   eval = tf.reshape(self._get_input_level(self.number_layer,input_placeholder), [self.batch_size, -1])
   y = tf.nn.softmax(tf.matmul(eval, self.W) + self.b)
   y_ = tf.placeholder(tf.float32, [None,self.output_classes])
   
   if f1:
     predicted_class = tf.argmax(y,1)
     real_class = tf.argmax(y_,1)
     zeros = tf.zeros_like(predicted_class)
     ones  = tf.ones_like(predicted_class)
     
     true_positive = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(predicted_class, ones),tf.equal(real_class, ones)), tf.float32))
     tp_count = 0
     false_positive = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(predicted_class, ones),tf.equal(real_class, zeros)), tf.float32))
     fp_count = 0
     true_negative = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(predicted_class, zeros),tf.equal(real_class, zeros)), tf.float32))
     tn_count = 0
     false_negative = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(predicted_class, zeros),tf.equal(real_class, ones)), tf.float32))
     fn_count = 0
   else:
     correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
     correct_count = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))
     true_count = 0
     
   num_examples = self.data.num_test_example
   steps_per_epoch = num_examples // self.batch_size
   
   for step in range(steps_per_epoch):
     images_feed, labels_feed = self.data.next_batch(self.batch_size, 'test')
     visible                  = np.reshape(images_feed, self.input)
     if f1:
       a,b,c,d = self.session.run([true_positive,false_positive,true_negative,false_negative], feed_dict={input_placeholder: visible, y_: labels_feed})
       tp_count += a
       fp_count += b
       tn_count += c
       fn_count += d
     else:
       true_count += self.session.run(correct_count, feed_dict={input_placeholder: visible, y_: labels_feed})
       
   if self.verbosity > 0:
     print('--------------------------')
   if f1:
     precision = tp_count / (tp_count+fp_count)
     recall = tp_count / (tp_count+fn_count)
     f1_score = 2*precision*recall/(precision+recall)
     overall_precision = (tp_count + tn_count) / (fn_count+ fp_count + tp_count +tn_count)
     print('Successfully evaluated the CDBN : \n Precision is %0.02f percent \n Recall is %0.02f percent \n F1 score is %0.02f\n tp: %d ---  fp: %d ---  tn: %d ---  fn: %d\n Overall precision is %0.02f percent' %(precision*100, recall*100, f1_score, tp_count, fp_count, tn_count, fn_count, overall_precision * 100))
   else:
     precision = true_count / num_examples
     print('Successfully evaluated the CDBN : \n %d examples are correctly classified out of %d total examples\n Precision is %0.02f percent' %(true_count, num_examples, precision*100))
开发者ID:vittichi,项目名称:Convolutional_Deep_Belief_Network,代码行数:54,代码来源:cdbn_backup.py


示例12: _noisy_identity_kernel_initializer

  def _noisy_identity_kernel_initializer(shape,
                                         dtype=tf.float32,
                                         partition_info=None):
    """Constructs a noisy identity kernel.

    Args:
      shape: List of integers. Represents shape of result.
      dtype: data type for values in result.
      partition_info: Partition information for initializer functions. Ignored.

    Returns:
      Tensor of desired shape and dtype such that applying it as a convolution
        kernel results in a noisy near-identity operation.

    Raises:
      ValueError: If shape does not define a valid kernel.
                  If filter width and height differ.
                  If filter width and height are not odd numbers.
                  If number of input and output channels are not multiples of
                    base_num_channels.
    """
    if len(shape) != 4:
      raise ValueError("Convolution kernels must be rank 4.")

    filter_height, filter_width, in_channels, out_channels = shape

    if filter_width != filter_height:
      raise ValueError(
          "Noisy identity initializer only works for square filters.")
    if filter_width % 2 != 1:
      raise ValueError(
          "Noisy identity initializer requires filters have odd height and "
          "width.")
    if (in_channels % base_num_channels != 0 or
        out_channels % base_num_channels != 0):
      raise ValueError("in_channels and out_channels must both be multiples of "
                       "base_num_channels.")

    middle_pixel = filter_height // 2
    is_middle_pixel = tf.logical_and(
        tf.equal(_range_along_dimension(0, shape), middle_pixel),
        tf.equal(_range_along_dimension(1, shape), middle_pixel))
    is_same_channel_multiple = tf.equal(
        tf.floordiv(
            _range_along_dimension(2, shape) * base_num_channels, in_channels),
        tf.floordiv(
            _range_along_dimension(3, shape) * base_num_channels, out_channels))
    noise = tf.truncated_normal(shape, stddev=stddev, dtype=dtype)
    return tf.where(
        tf.logical_and(is_same_channel_multiple, is_middle_pixel),
        tf.ones(
            shape, dtype=dtype) * (base_num_channels / out_channels),
        noise)
开发者ID:TianjiPang,项目名称:sonnet,代码行数:53,代码来源:dilation.py


示例13: detection_loss

    def detection_loss(location, confidence, refine_ph, classes_ph, pos_mask):
        neg_mask = tf.logical_not(pos_mask)
        number_of_positives = tf.reduce_sum(tf.to_int32(pos_mask))
        true_number_of_negatives = tf.minimum(3 * number_of_positives,
                                              tf.shape(pos_mask)[1] - number_of_positives)
        # max is to avoid the case where no positive boxes were sampled
        number_of_negatives = tf.maximum(1, true_number_of_negatives)
        num_pos_float = tf.to_float(tf.maximum(1, number_of_positives))
        normalizer = tf.to_float(tf.add(number_of_positives, number_of_negatives))
        tf.summary.scalar('batch/size', normalizer)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=confidence,
                                                                       labels=classes_ph)
        pos_class_loss = tf.reduce_sum(tf.boolean_mask(cross_entropy, pos_mask))
        tf.summary.scalar('loss/class_pos', pos_class_loss / num_pos_float)

        top_k_worst, top_k_inds = tf.nn.top_k(tf.boolean_mask(cross_entropy, neg_mask),
                                              number_of_negatives)
        # multiplication is to avoid the case where no positive boxes were sampled
        neg_class_loss = tf.reduce_sum(top_k_worst) * \
                         tf.cast(tf.greater(true_number_of_negatives, 0), tf.float32)
        class_loss = (neg_class_loss + pos_class_loss) / num_pos_float
        tf.summary.scalar('loss/class_neg', neg_class_loss / tf.to_float(number_of_negatives))
        tf.summary.scalar('loss/class', class_loss)

        # cond is to avoid the case where no positive boxes were sampled
        bbox_loss = tf.cond(tf.equal(tf.reduce_sum(tf.cast(pos_mask, tf.int32)), 0),
                            lambda: 0.0,
                            lambda: tf.reduce_mean(smooth_l1(tf.boolean_mask(location, pos_mask),
                                                             tf.boolean_mask(refine_ph, pos_mask))))
        tf.summary.scalar('loss/bbox', bbox_loss)

        inferred_class = tf.cast(tf.argmax(confidence, 2), tf.int32)
        positive_matches = tf.equal(tf.boolean_mask(inferred_class, pos_mask),
                                    tf.boolean_mask(classes_ph, pos_mask))
        hard_matches = tf.equal(tf.boolean_mask(inferred_class, neg_mask),
                                tf.boolean_mask(classes_ph, neg_mask))
        hard_matches = tf.gather(hard_matches, top_k_inds)
        train_acc = ((tf.reduce_sum(tf.to_float(positive_matches)) +
                    tf.reduce_sum(tf.to_float(hard_matches))) / normalizer)
        tf.summary.scalar('accuracy/train', train_acc)

        recognized_class = tf.argmax(confidence, 2)
        tp = tf.reduce_sum(tf.to_float(tf.logical_and(recognized_class > 0, pos_mask)))
        fp = tf.reduce_sum(tf.to_float(tf.logical_and(recognized_class > 0, neg_mask)))
        fn = tf.reduce_sum(tf.to_float(tf.logical_and(tf.equal(recognized_class, 0), pos_mask)))
        precision = tp / (tp + fp)
        recall = tp / (tp + fn)
        f1 = 2*(precision * recall)/(precision + recall)
        tf.summary.scalar('metrics/train/precision', precision)
        tf.summary.scalar('metrics/train/recall', recall)
        tf.summary.scalar('metrics/train/f1', f1)
        return class_loss, bbox_loss, train_acc, number_of_positives
开发者ID:heidongxianhau,项目名称:blitznet,代码行数:53,代码来源:training.py


示例14: accuracy

def accuracy(logits, labels):
  def tf_count(t, val):
    elements_equal_to_value = tf.equal(t, val)
    as_ints = tf.cast(elements_equal_to_value, tf.int32)
    count = tf.reduce_sum(as_ints)
    return count
  
  labels = tf.cast(labels, tf.int64)
  label_shape = labels.get_shape().as_list()
  reshaped_labels = tf.reshape(labels,
                              [label_shape[0]*label_shape[1]*label_shape[2]])

  logits_shape = logits.get_shape().as_list()
  reshaped_logits = tf.reshape(logits,
                              [logits_shape[0]*logits_shape[1]*logits_shape[2],
                              logits_shape[3]])

  predictions = tf.argmax(reshaped_logits, dimension=1)
  shaped_predictions = tf.argmax(logits, dimension=3)
  correct_predictions = tf.equal(predictions, reshaped_labels)
  accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name='accuracy')
  tf.add_to_collection('accuracy', accuracy)
  tf.histogram_summary('predictions_hist', predictions)
  imgs_to_summarize = tf.expand_dims(tf.cast(shaped_predictions, 'float32'), -1)
  tf.image_summary('predictions', imgs_to_summarize)

  cat_names = CLASSES
  precision = []
  cat_acc = []
  for cat_id,cat in enumerate(cat_names):
    cat_pred = tf.equal(predictions, cat_id, name=cat+"_pred")
    cat_truth = tf.equal(reshaped_labels, cat_id, name=cat+"_truth")
    non_cat_truth = tf.not_equal(reshaped_labels, cat_id, name=cat+"_non_truth")
      
    tp = tf.logical_and(cat_pred, cat_truth, name=cat+"_tp")
    tp_count = tf.reduce_sum(tf.cast(tp, "float"), name=cat+"_tp_count")
    fp = tf.logical_and(cat_pred, non_cat_truth, name=cat+"_fp")
    fp_count = tf.reduce_sum(tf.cast(fp, "float"), name=cat+"_fp_count")

    tf.scalar_summary('cat_precisions/'+cat+'_fp_count', fp_count)
    tf.scalar_summary('cat_precisions/'+cat+'_tp_count', tp_count)
  
    precision.append( tp_count / (tp_count + fp_count) )

    cat_correct = tf.logical_and(cat_truth, cat_pred, name=cat+"_correct")
    cat_acc.append(tf.reduce_mean(tf.cast(cat_correct, "float"), name=cat+"_accuracy"))
  
  precisions = tf.pack(precision)  
  accuracies = tf.pack(cat_acc)
  tf.add_to_collection('precisions',precisions)

  return accuracy, precisions, accuracies
开发者ID:Thrasi,项目名称:thesis-project,代码行数:52,代码来源:cifar10.py


示例15: _define_summaries

  def _define_summaries():
    """Reset the average score and duration, and return them as summary.

    Returns:
      Summary string.
    """
    score_summary = tf.cond(
        tf.logical_and(log, tf.cast(mean_score.count, tf.bool)),
        lambda: tf.summary.scalar('mean_score', mean_score.clear()), str)
    length_summary = tf.cond(
        tf.logical_and(log, tf.cast(mean_length.count, tf.bool)),
        lambda: tf.summary.scalar('mean_length', mean_length.clear()), str)
    return tf.summary.merge([score_summary, length_summary])
开发者ID:AndrewMeadows,项目名称:bullet3,代码行数:13,代码来源:simulate.py


示例16: iou

def iou(x_true, y_true, w_true, h_true, x_pred, y_pred, w_pred, h_pred, t, pred_confid_tf):
    x_true = K.expand_dims(x_true, 2)
    y_true = K.expand_dims(y_true, 2)
    w_true = K.expand_dims(w_true, 2)
    h_true = K.expand_dims(h_true, 2)
    x_pred = K.expand_dims(x_pred, 2)
    y_pred = K.expand_dims(y_pred, 2)
    w_pred = K.expand_dims(w_pred, 2)
    h_pred = K.expand_dims(h_pred, 2)

    xoffset = K.expand_dims(tf.convert_to_tensor(np.asarray([0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7], dtype=np.float32)),1)
    yoffset = K.expand_dims(tf.convert_to_tensor(np.asarray([0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4], dtype=np.float32)),1)


    # xoffset = K.cast_to_floatx((np.tile(np.arange(side),side)))
    # yoffset = K.cast_to_floatx((np.repeat(np.arange(side),side)))
    x = tf.where(t, x_pred, K.zeros_like(x_pred))
    y = tf.where(t, y_pred, K.zeros_like(y_pred))
    w = tf.where(t, w_pred, K.zeros_like(w_pred))
    h = tf.where(t, h_pred, K.zeros_like(h_pred))

    ow = overlap(x + xoffset, w * 256. , x_true + xoffset, w_true * 256.)
    oh = overlap(y + yoffset, h * 160., y_true + yoffset, h_true * 256.)

    ow = tf.where(K.greater(ow, 0), ow, K.zeros_like(ow))
    oh = tf.where(K.greater(oh, 0), oh, K.zeros_like(oh))
    intersection = ow * oh
    union = w * 256. * h * 160. + w_true * 256. * h_true * 160.  - intersection + K.epsilon()  # prevent div 0

    #
    # find best iou among bboxs
    # iouall shape=(-1, bnum*gridcells)
    iouall = intersection / union
    obj_count = K.sum(tf.where(t, K.ones_like(x_true), K.zeros_like(x_true)))

    ave_iou = K.sum(iouall) / (obj_count + 0.0000001)
    recall_t = K.greater(iouall, 0.5)
    # recall_count = K.sum(tf.select(recall_t, K.ones_like(iouall), K.zeros_like(iouall)))

    fid_t = K.greater(pred_confid_tf, 0.3)
    recall_count_all = K.sum(tf.where(fid_t, K.ones_like(iouall), K.zeros_like(iouall)))

    #  
    obj_fid_t = tf.logical_and(fid_t, t)
    obj_fid_t = tf.logical_and(fid_t, recall_t)
    effevtive_iou_count = K.sum(tf.where(obj_fid_t, K.ones_like(iouall), K.zeros_like(iouall)))

    recall = effevtive_iou_count / (obj_count + 0.00000001)
    precision = effevtive_iou_count / (recall_count_all + 0.0000001)
    return ave_iou, recall, precision, obj_count, intersection, union, ow, oh, x, y, w, h
开发者ID:gabrielpreviato,项目名称:J-MOD2,代码行数:50,代码来源:ObstacleDetectionObjectives.py


示例17: recall

    def recall(mod_y, ref_y, summary=True, name="recall"):
        with tf.name_scope(name):
                predictions = tf.argmax(mod_y, 1)
                actuals = tf.argmax(ref_y, 1)

                ones_likes = tf.ones_like(actuals)
                zeros_likes = tf.zeros_like(actuals)

                tp = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, zeros_likes), tf.equal(predictions, zeros_likes)), tf.float32))
                fn = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, zeros_likes), tf.equal(predictions, ones_likes)), tf.float32))
                recall = tf.div(tp, tf.add(tp, fn))
                if summary:
                        tf.summary.scalar('recall', recall)
                return recall
开发者ID:yy2261,项目名称:Deep-Learning-TensorFlow,代码行数:14,代码来源:layers.py


示例18: precision

    def precision(mod_y, ref_y, summary=True, name="precision"):
    	with tf.name_scope(name):
    		predictions = tf.argmax(mod_y, 1)
    		actuals = tf.argmax(ref_y, 1)
    		
    		ones_likes = tf.ones_like(actuals)
    		zeros_likes = tf.zeros_like(actuals)

    		tp = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, zeros_likes), tf.equal(predictions, zeros_likes)), tf.float32))
    		fp = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, ones_likes), tf.equal(predictions, zeros_likes)), tf.float32))
    		precision = tf.div(tp, tf.add(tp, fp))
    		if summary:
    			tf.summary.scalar('precision', precision)
    		return precision
开发者ID:yy2261,项目名称:Deep-Learning-TensorFlow,代码行数:14,代码来源:layers.py


示例19: _get_filled_box_idx

def _get_filled_box_idx(idx, top_left, bot_right):
    """Fill a box with top left and bottom right coordinates."""
    # [B, T, H, W]
    idx_y = idx[:, :, :, :, 0]
    idx_x = idx[:, :, :, :, 1]
    top_left_y = tf.expand_dims(tf.expand_dims(top_left[:, :, 0], 2), 3)
    top_left_x = tf.expand_dims(tf.expand_dims(top_left[:, :, 1], 2), 3)
    bot_right_y = tf.expand_dims(tf.expand_dims(bot_right[:, :, 0], 2), 3)
    bot_right_x = tf.expand_dims(tf.expand_dims(bot_right[:, :, 1], 2), 3)
    lower = tf.logical_and(idx_y >= top_left_y, idx_x >= top_left_x)
    upper = tf.logical_and(idx_y <= bot_right_y, idx_x <= bot_right_x)
    box = tf.to_float(tf.logical_and(lower, upper))

    return box
开发者ID:lrjconan,项目名称:img-count,代码行数:14,代码来源:ris_attn_model_old.py


示例20: unwrap

def unwrap(p, discont=np.pi, axis=-1):
  """Unwrap a cyclical phase tensor.

  Args:
    p: Phase tensor.
    discont: Float, size of the cyclic discontinuity.
    axis: Axis of which to unwrap.

  Returns:
    unwrapped: Unwrapped tensor of same size as input.
  """
  dd = diff(p, axis=axis)
  ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
  idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
  ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
  ph_correct = ddmod - dd
  idx = tf.less(tf.abs(dd), discont)
  ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
  ph_cumsum = tf.cumsum(ph_correct, axis=axis)

  shape = p.get_shape().as_list()
  shape[axis] = 1
  ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
  unwrapped = p + ph_cumsum
  return unwrapped
开发者ID:cghawthorne,项目名称:magenta,代码行数:25,代码来源:spectral_ops.py



注:本文中的tensorflow.logical_and函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.logical_not函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.log函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap