• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.reduce_min函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.reduce_min函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_min函数的具体用法?Python reduce_min怎么用?Python reduce_min使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reduce_min函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: init_data

def init_data(inputFile, K):
    global training_data, validation_data, centroids, training_num, data_dim, centroids_num 
    global tf_data_set, tf_centroids
    # initialize data and centroids
    data = np.float32( np.load(inputFile))
    data = (data - data.mean()) / data.std()
    # update data_num and centroids_num
    data_num, data_dim = data.shape
    centroids_num = K
    # training data and validation data
    training_num = int(2./3 * data_num)
    training_data = data[:training_num]
    validation_data = data[training_num:]
    centroids = tf.truncated_normal(shape=[centroids_num, data_dim])
    # update tf_data_set and tf_centroids
    tf_data_set = tf.placeholder(tf.float32, shape=[None, data_dim])
    tf_centroids = tf.Variable(tf.convert_to_tensor(centroids, dtype=tf.float32))
    ########### for the training cases #####################
    # get the euclidean distance
    tf_train_dist = euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num)
    # get the min index for data set
    tf_train_min_index = tf.argmin(tf_train_dist, dimension=1)
    # loss and optimizer
    tf_train_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num), 
        1, keep_dims=True))
    tf_train_opt = tf.train.AdamOptimizer(learning_rate=0.1, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(tf_train_loss)
    ########### for the validation cases ####################
    tf_valid_dist = euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num)
    tf_valid_min_index = tf.argmin(tf_valid_dist, dimension=1)
    tf_valid_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num), 
        1, keep_dims=True))
    return tf_train_min_index, tf_train_loss, tf_train_opt, tf_valid_loss
开发者ID:z23han,项目名称:ECE521-Inference-Algorithm-and-Machine-Learning,代码行数:32,代码来源:a3_q1_2_4.py


示例2: gen_debug_td_error_summaries

def gen_debug_td_error_summaries(
    target_q_values, q_values, td_targets, td_errors):
  """Generates debug summaries for critic given a set of batch samples.

  Args:
    target_q_values: set of predicted next stage values.
    q_values: current predicted value for the critic network.
    td_targets: discounted target_q_values with added next stage reward.
    td_errors: the different between td_targets and q_values.
  """
  with tf.name_scope('td_errors'):
    tf.summary.histogram('td_targets', td_targets)
    tf.summary.histogram('q_values', q_values)
    tf.summary.histogram('target_q_values', target_q_values)
    tf.summary.histogram('td_errors', td_errors)
    with tf.name_scope('td_targets'):
      tf.summary.scalar('mean', tf.reduce_mean(td_targets))
      tf.summary.scalar('max', tf.reduce_max(td_targets))
      tf.summary.scalar('min', tf.reduce_min(td_targets))
    with tf.name_scope('q_values'):
      tf.summary.scalar('mean', tf.reduce_mean(q_values))
      tf.summary.scalar('max', tf.reduce_max(q_values))
      tf.summary.scalar('min', tf.reduce_min(q_values))
    with tf.name_scope('target_q_values'):
      tf.summary.scalar('mean', tf.reduce_mean(target_q_values))
      tf.summary.scalar('max', tf.reduce_max(target_q_values))
      tf.summary.scalar('min', tf.reduce_min(target_q_values))
    with tf.name_scope('td_errors'):
      tf.summary.scalar('mean', tf.reduce_mean(td_errors))
      tf.summary.scalar('max', tf.reduce_max(td_errors))
      tf.summary.scalar('min', tf.reduce_min(td_errors))
      tf.summary.scalar('mean_abs', tf.reduce_mean(tf.abs(td_errors)))
开发者ID:Exscotticus,项目名称:models,代码行数:32,代码来源:ddpg_agent.py


示例3: coverage_box

 def coverage_box(bboxes):
   y_min, x_min, y_max, x_max = tf.split(
       value=bboxes, num_or_size_splits=4, axis=1)
   y_min_coverage = tf.reduce_min(y_min, axis=0)
   x_min_coverage = tf.reduce_min(x_min, axis=0)
   y_max_coverage = tf.reduce_max(y_max, axis=0)
   x_max_coverage = tf.reduce_max(x_max, axis=0)
   return tf.stack(
       [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
       axis=1)
开发者ID:NoPointExc,项目名称:models,代码行数:10,代码来源:box_list_ops.py


示例4: __init__

  def __init__(self, reuse=False, trainable=True):
    # Placeholders for our input
    # Our input are 4 RGB frames of shape 160, 160 each
    self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
    # The TD target value
    self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="y")

    X = tf.to_float(self.states) / 255.0
    batch_size = tf.shape(self.states)[0]

    # Graph shared with Value Net
    with tf.variable_scope("shared", reuse=reuse):
      fc1 = build_shared_network(X, add_summaries=(not reuse))

    with tf.variable_scope("value_net"):
      self.logits = tf.contrib.layers.fully_connected(
        inputs=fc1,
        num_outputs=1,
        activation_fn=None)
      self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name="logits")

      self.losses = tf.squared_difference(self.logits, self.targets)
      self.loss = tf.reduce_sum(self.losses, name="loss")

      self.predictions = {
        "logits": self.logits
      }

      # Summaries
      prefix = tf.get_variable_scope().name
      tf.scalar_summary(self.loss.name, self.loss)
      tf.scalar_summary("{}/max_value".format(prefix), tf.reduce_max(self.logits))
      tf.scalar_summary("{}/min_value".format(prefix), tf.reduce_min(self.logits))
      tf.scalar_summary("{}/mean_value".format(prefix), tf.reduce_mean(self.logits))
      tf.scalar_summary("{}/reward_max".format(prefix), tf.reduce_max(self.targets))
      tf.scalar_summary("{}/reward_min".format(prefix), tf.reduce_min(self.targets))
      tf.scalar_summary("{}/reward_mean".format(prefix), tf.reduce_mean(self.targets))
      tf.histogram_summary("{}/reward_targets".format(prefix), self.targets)
      tf.histogram_summary("{}/values".format(prefix), self.logits)

      if trainable:
        # self.optimizer = tf.train.AdamOptimizer(1e-4)
        self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
        self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
        self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
        self.train_op = self.optimizer.apply_gradients(self.grads_and_vars,
          global_step=tf.contrib.framework.get_global_step())

    var_scope_name = tf.get_variable_scope().name
    summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
    sumaries = [s for s in summary_ops if "policy_net" in s.name or "shared" in s.name]
    sumaries = [s for s in summary_ops if var_scope_name in s.name]
    self.summaries = tf.merge_summary(sumaries)
开发者ID:Selimam,项目名称:reinforcement-learning,代码行数:53,代码来源:estimators.py


示例5: conv

    def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True):
        
        print name     
        if isinstance(input, tuple):
            input = input[0]   

        self.validate_padding(padding)
        c_i = input.get_shape()[-1]
        print c_i
        print input.get_shape().as_list()
        assert c_i%group==0
        assert c_o%group==0
        convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
        with tf.variable_scope(name) as scope:
            init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
            init_biases = tf.constant_initializer(0.0)
            kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
            biases = self.make_var('biases', [c_o], init_biases, trainable)

            
            with tf.name_scope('summaries'):
                with tf.name_scope('weights'):
                    mean = tf.reduce_mean(kernel)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(kernel- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(kernel))
                    tf.summary.scalar('min', tf.reduce_min(kernel))
                    tf.summary.histogram('histogram', kernel)
                with tf.name_scope('biases'):
                    mean = tf.reduce_mean(biases)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(biases- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(biases))
                    tf.summary.scalar('min', tf.reduce_min(biases))
                    tf.summary.histogram('histogram', biases)


            if group==1:
                conv = convolve(input, kernel)
            else:
                input_groups = tf.split(3, group, input)
                kernel_groups = tf.split(3, group, kernel)
                output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
                conv = tf.concat(3, output_groups)
            if relu:
                bias = tf.nn.bias_add(conv, biases)
                return tf.nn.relu(bias, name=scope.name)
            return tf.nn.bias_add(conv, biases, name=scope.name)
开发者ID:chsiyuan,项目名称:542FinalProject,代码行数:52,代码来源:network.py


示例6: fc

    def fc(self, input, num_out, name, relu=True, trainable=True):

        print name
        with tf.variable_scope(name) as scope:
            # only use the first input
            if isinstance(input, tuple):
                input = input[0]

            input_shape = input.get_shape()
            if input_shape.ndims == 4:
                dim = 1
                for d in input_shape[1:].as_list():
                    dim *= d
                feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
            else:
                feed_in, dim = (input, int(input_shape[-1]))

            if name == 'bbox_pred':
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
                init_biases = tf.constant_initializer(0.0)
            else:
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
                init_biases = tf.constant_initializer(0.0)

            weights = self.make_var('weights', [dim, num_out], init_weights, trainable)
            biases = self.make_var('biases', [num_out], init_biases, trainable)

            with tf.name_scope('summaries'):
                with tf.name_scope('weights'):
                    mean = tf.reduce_mean(weights)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(weights- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(weights))
                    tf.summary.scalar('min', tf.reduce_min(weights))
                    tf.summary.histogram('histogram', weights)
                with tf.name_scope('biases'):
                    mean = tf.reduce_mean(biases)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(biases- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(biases))
                    tf.summary.scalar('min', tf.reduce_min(biases))
                    tf.summary.histogram('histogram', biases)

            op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
            fc = op(feed_in, weights, biases, name=scope.name)
            return fc
开发者ID:chsiyuan,项目名称:542FinalProject,代码行数:50,代码来源:network.py


示例7: print_act_stats

def print_act_stats(x, _str=""):
    if not do_print_act_stats:
        return x
    if hvd.rank() != 0:
        return x
    if len(x.get_shape()) == 1:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 2:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 4:
        x_mean, x_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True)
    stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean),
             tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))]
    return tf.Print(x, stats, "["+_str+"] "+x.name)
开发者ID:chinatian,项目名称:glow,代码行数:14,代码来源:tfops.py


示例8: compute_lookup_error

 def compute_lookup_error(self, val):
   #computes lookup error.
   cond = tf.equal(self.batch_print_answer, val)
   inter = tf.where(
       cond, self.init_print_error,
       tf.tile(
           tf.reshape(tf.constant(1e10, self.data_type), [1, 1, 1]), [
               self.batch_size, self.utility.FLAGS.max_word_cols +
               self.utility.FLAGS.max_number_cols,
               self.utility.FLAGS.max_elements
           ]))
   return tf.reduce_min(tf.reduce_min(inter, 1), 1) * tf.cast(
       tf.greater(
           tf.reduce_sum(tf.reduce_sum(tf.cast(cond, self.data_type), 1), 1),
           0.0), self.data_type)
开发者ID:Hukongtao,项目名称:models,代码行数:15,代码来源:model.py


示例9: conv2d_1

def conv2d_1(name, inputs, shape, strides=1):

    with tf.name_scope(name+"_conv"):
        W = tf.Variable(tf.random_normal(shape))
        tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(W)) 
        x1 = tf.nn.conv2d(inputs, W, strides=[1, strides, strides, 1], padding='SAME', name="conv1")
        if name=='layerM21' and is_training == True:
            tf.summary.scalar('w_mean',tf.reduce_mean(W))
            tf.summary.scalar('w_max',tf.reduce_max(W))
            tf.summary.scalar('w_min',tf.reduce_min(W))


    with tf.name_scope(name+"_bias"):
        B = tf.Variable(tf.random_normal([shape[-1]]))
        tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(B)) 
        x2 = tf.nn.bias_add(x1, B, name="bias1")
    
    with tf.name_scope(name+"_BN"):
        x3 = bn_layer(x2, is_training, name=name)

    with tf.name_scope(name+"_relu"):
        c1_out=leaky_relu(x3)
        #c1_out=tf.nn.leaky_relu(x3)

    return c1_out
开发者ID:chengyake,项目名称:karch,代码行数:25,代码来源:main.py


示例10: _psd_mask

def _psd_mask(x):
  """Computes whether each square matrix in the input is positive semi-definite.

  Args:
    x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.

  Returns:
    mask: A floating-point `Tensor` of shape `[B1, ... Bn]`.  Each
      scalar is 1 if the corresponding matrix was PSD, otherwise 0.
  """
  # Allegedly
  # https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
  # it is more efficient to test for positive semi-definiteness by
  # trying to compute the Cholesky decomposition -- the matrix is PSD
  # if you succeed and not PSD if you fail.  However, TensorFlow's
  # Cholesky raises an exception if _any_ of the input matrices are
  # not PSD, from which I don't know how to extract _which ones_, so I
  # proceed by explicitly computing all the eigenvalues and checking
  # whether they are all positive or not.
  #
  # Also, as was discussed in the answer, it is somewhat dangerous to
  # treat SPD-ness as binary in floating-point arithmetic. Cholesky
  # factorization can complete and 'look' like everything is fine
  # (e.g., O(1) entries and a diagonal of all ones) but the matrix can
  # have an exponential condition number.
  eigenvalues, _ = tf.self_adjoint_eig(x)
  return tf.cast(
      tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
开发者ID:asudomoeva,项目名称:probability,代码行数:28,代码来源:correlation_matrix_volumes_lib.py


示例11: disjunction_of_literals

def disjunction_of_literals(literals, label="no_label"):
    list_of_literal_tensors = [lit.tensor for lit in literals]
    literals_tensor = tf.concat(1,list_of_literal_tensors)
    if default_tnorm == "product":
        result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
    if default_tnorm == "yager2":
        result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
    if default_tnorm == "luk":
        print "data aggregator is lukas"
        result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
        PR(result)
    if default_tnorm == "goedel":
        result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
    if default_aggregator == "product":
        return tf.reduce_prod(result, keep_dims=True)
    if default_aggregator == "mean":
        print "data aggregator is mean"
        return tf.reduce_mean(result, keep_dims=True, name=label)
    if default_aggregator == "gmean":
        return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
                             tf.inv(tf.to_float(tf.size(result)))), name=label)
    if default_aggregator == "hmean":
        print "data aggregator is hmean"
        return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
    if default_aggregator == "min":
        print "data aggregator is min"
        return tf.reduce_min(result, keep_dims=True, name=label)
    if default_aggregator == "qmean":
        print "data aggregator is qmean"
        return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
    if default_aggregator == "cmean":
        print "data aggregator is cmean"
        return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:33,代码来源:logictensornetworks.py


示例12: __init__

 def __init__(self, label, clauses, save_path=""):
     print "defining the knowledge base", label
     self.label = label
     self.clauses = clauses
     self.parameters = [par for cl in self.clauses for par in cl.parameters]
     if not self.clauses:
         self.tensor = tf.constant(1.0)
     else:
         clauses_value_tensor = tf.concat(0, [cl.tensor for cl in clauses])
         if default_clauses_aggregator == "min":
             print "clauses aggregator is min"
             self.tensor = tf.reduce_min(clauses_value_tensor)
         if default_clauses_aggregator == "mean":
             print "clauses aggregator is mean"
             self.tensor = tf.reduce_mean(clauses_value_tensor)
         if default_clauses_aggregator == "hmean":
             print "clauses aggregator is hmean"
             self.tensor = tf.div(tf.to_float(tf.size(clauses_value_tensor)), tf.reduce_sum(tf.inv(clauses_value_tensor), keep_dims=True))
         if default_clauses_aggregator == "wmean":
             print "clauses aggregator is weighted mean"
             weights_tensor = tf.constant([cl.weight for cl in clauses])
             self.tensor = tf.div(tf.reduce_sum(tf.mul(weights_tensor, clauses_value_tensor)), tf.reduce_sum(weights_tensor))
     if default_positive_fact_penality != 0:
         self.loss = smooth(self.parameters) + \
                     tf.mul(default_positive_fact_penality, self.penalize_positive_facts()) - \
                     PR(self.tensor)
     else:
         self.loss = smooth(self.parameters) - PR(self.tensor)
     self.save_path = save_path
     self.train_op = train_op(self.loss, default_optimizer)
     self.saver = tf.train.Saver(max_to_keep=20)
     print "knowledge base", label, "is defined"
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:32,代码来源:logictensornetworks.py


示例13: get_losses

                def get_losses(obj_mask):
                  """Get motion constraint loss."""
                  # Find height of segment.
                  coords = tf.where(tf.greater(  # Shape (num_true, 2=yx)
                      obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32)))
                  y_max = tf.reduce_max(coords[:, 0])
                  y_min = tf.reduce_min(coords[:, 0])
                  seg_height = y_max - y_min
                  f_y = self.intrinsic_mat[i, 0, 1, 1]
                  approx_depth = ((f_y * self.global_scale_var) /
                                  tf.to_float(seg_height))
                  reference_pred = tf.boolean_mask(
                      depth_pred, tf.greater(
                          tf.reshape(obj_mask[:, :, 0],
                                     (self.img_height, self.img_width, 1)),
                          tf.constant(0.5, dtype=tf.float32)))

                  # Establish loss on approx_depth, a scalar, and
                  # reference_pred, our dense prediction. Normalize both to
                  # prevent degenerative depth shrinking.
                  global_mean_depth_pred = tf.reduce_mean(depth_pred)
                  reference_pred /= global_mean_depth_pred
                  approx_depth /= global_mean_depth_pred
                  spatial_err = tf.abs(reference_pred - approx_depth)
                  mean_spatial_err = tf.reduce_mean(spatial_err)
                  return mean_spatial_err
开发者ID:pcm17,项目名称:models,代码行数:26,代码来源:model.py


示例14: _summarize_vars_and_grads

def _summarize_vars_and_grads(grads_and_vars):
  tf.logging.info('Trainable variables:')
  tf.logging.info('-' * 60)
  for grad, var in grads_and_vars:
    tf.logging.info(var)

    def tag(name, v=var):
      return v.op.name + '_' + name

    # Variable summary
    mean = tf.reduce_mean(var)
    tf.summary.scalar(tag('mean'), mean)
    with tf.name_scope(tag('stddev')):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar(tag('stddev'), stddev)
    tf.summary.scalar(tag('max'), tf.reduce_max(var))
    tf.summary.scalar(tag('min'), tf.reduce_min(var))
    tf.summary.histogram(tag('histogram'), var)

    # Gradient summary
    if grad is not None:
      if isinstance(grad, tf.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad

      tf.summary.histogram(tag('gradient'), grad_values)
      tf.summary.scalar(tag('gradient_norm'), tf.global_norm([grad_values]))
    else:
      tf.logging.info('Var %s has no gradient', var.op.name)
开发者ID:hoysasulee,项目名称:models,代码行数:30,代码来源:layers.py


示例15: histogram

  def histogram(self, x, value_range=None, nbins=None, name=None):
    """Return histogram of values.

    Given the tensor `values`, this operation returns a rank 1 histogram
    counting the number of entries in `values` that fell into every bin. The
    bins are equal width and determined by the arguments `value_range` and
    `nbins`.

    Args:
      x: 1D numeric `Tensor` of items to count.
      value_range:  Shape [2] `Tensor`. `new_values <= value_range[0]` will be
        mapped to `hist[0]`, `values >= value_range[1]` will be mapped to
        `hist[-1]`. Must be same dtype as `x`.
      nbins:  Scalar `int32 Tensor`.  Number of histogram bins.
      name: Python `str` name prefixed to Ops created by this class.

    Returns:
      counts: 1D `Tensor` of counts, i.e.,
        `counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`.
      edges: 1D `Tensor` characterizing intervals used for counting.
    """
    with tf.name_scope(name, "histogram", [x]):
      x = tf.convert_to_tensor(x, name="x")
      if value_range is None:
        value_range = [tf.reduce_min(x), 1 + tf.reduce_max(x)]
      value_range = tf.convert_to_tensor(value_range, name="value_range")
      lo = value_range[0]
      hi = value_range[1]
      if nbins is None:
        nbins = tf.to_int32(hi - lo)
      delta = (hi - lo) / tf.cast(nbins, dtype=value_range.dtype.base_dtype)
      edges = tf.range(
          start=lo, limit=hi, delta=delta, dtype=x.dtype.base_dtype)
      counts = tf.histogram_fixed_width(x, value_range=value_range, nbins=nbins)
      return counts, edges
开发者ID:asudomoeva,项目名称:probability,代码行数:35,代码来源:test_util.py


示例16: kmeans

def kmeans(data, lr, K, epochs=800):
    """
    example of kmeans algorithm
    """
    M, D = data.shape
    train_data = data[:2*M/3]
    valid_data = data[2*M/3:]

    g = tf.Graph() 
    with g.as_default():
        x = tf.placeholder(tf.float32, shape=(None, D))
        mu = tf.Variable(tf.truncated_normal([K, D], dtype=tf.float32))

        cost = tf.reduce_sum(tf.reduce_min(utils.L2_dist(x, mu), 1))
        optimizer = tf.train.AdamOptimizer(lr, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(cost)


    with tf.Session(graph=g) as session:
        tf.initialize_all_variables().run()

        l = []
        for epoch in range(epochs):
            x_batch = train_data
            feed_dict = {x: x_batch}
            _, c = session.run([optimizer, cost], feed_dict=feed_dict)
            l.append(c)
            if epoch % 100 == 0:
                print "Epoch %03d, training loss: %.1f" % (epoch, c)
        feed_dict = {x:valid_data}
        c, mu = session.run([cost, mu], feed_dict=feed_dict)
        print "Validation loss: %.1f" % c

    return  {'training_loss': l,
             'validation_loss': c,
             'mu': mu}
开发者ID:tianrui,项目名称:labswithjay,代码行数:35,代码来源:lab2.py


示例17: writeHistogramSummary

def writeHistogramSummary(label, tensor):
  with tf.name_scope(label):
    print "histogram ", label, " shape:", tensor.get_shape()
    tf.scalar_summary("%s max: " % label, tf.reduce_max(tensor))
    tf.scalar_summary("%s min: " % label, tf.reduce_min(tensor))
    tf.scalar_summary("%s mean: " % label, tf.reduce_mean(tensor))
    tf.histogram_summary(label, tensor)
开发者ID:vikram-r,项目名称:cifar-10-classification,代码行数:7,代码来源:trainer.py


示例18: when_nonempty

    def when_nonempty():
      min_ = tf.reduce_min(data)
      max_ = tf.reduce_max(data)
      range_ = max_ - min_
      is_singular = tf.equal(range_, 0)

      def when_nonsingular():
        bucket_width = range_ / tf.cast(bucket_count, tf.float64)
        offsets = data - min_
        bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                                 dtype=tf.int32)
        clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
        one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
        bucket_counts = tf.cast(tf.reduce_sum(one_hots, axis=0),
                                dtype=tf.float64)
        edges = tf.lin_space(min_, max_, bucket_count + 1)
        left_edges = edges[:-1]
        right_edges = edges[1:]
        return tf.transpose(tf.stack(
            [left_edges, right_edges, bucket_counts]))

      def when_singular():
        center = min_
        bucket_starts = tf.stack([center - 0.5])
        bucket_ends = tf.stack([center + 0.5])
        bucket_counts = tf.stack([tf.cast(tf.size(data), tf.float64)])
        return tf.transpose(
            tf.stack([bucket_starts, bucket_ends, bucket_counts]))

      return tf.cond(is_singular, when_singular, when_nonsingular)
开发者ID:jlewi,项目名称:tensorboard,代码行数:30,代码来源:summary.py


示例19: model_train

def model_train(k):
    data = np.float32(np.load('data100D.npy'))
    sample_num = data.shape[0]
    dim = data.shape[1]
    cluster = k

    tf_data = tf.placeholder(tf.float32, shape=(sample_num, dim))
    tf_centroids = tf.Variable(tf.truncated_normal([k, dim], mean=0.0, stddev=1.0))
    tf_min_index = tf.argmin(eucl_distance(tf_data, tf_centroids), dimension = 1)
    tf_loss = tf.reduce_sum(tf.reduce_min(eucl_distance(tf_data, tf_centroids),1,keep_dims=True))
    optimizer = tf.train.AdamOptimizer(0.01,0.9,0.99,1e-5).minimize(tf_loss)

    sess = tf.InteractiveSession()

    init = tf.initialize_all_variables()
    init.run()

    epoch = 1000
    loss_list = []
    for i in range(epoch):
        feed_dict = {tf_data: data}
        _, loss, assignments, centroids = sess.run([optimizer, tf_loss, tf_min_index, tf_centroids], feed_dict = feed_dict)
        loss_list.append(loss)
        if (i % 50== 0):
            print("Loss at step %d: %f" % (i, loss))

    cal_percentage(assignments, k)

    plt.title('the loss vs the number of updates 100-D')
    plt.xlabel('the number of updates')
    plt.ylabel('the value of the loss')
    plt.plot(range(len(loss_list)), loss_list)
    plt.show()
    return loss
开发者ID:z23han,项目名称:ECE521-Inference-Algorithm-and-Machine-Learning,代码行数:34,代码来源:q_2_2_4_kmeans.py


示例20: summary

def summary(tensor, summary_type=['mean', 'stddev', 'max', 'min', 'sparsity', 'histogram']):
    """ Attach a lot of summaries to a Tensor. """

    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', tensor.name)
    tensor_name = re.sub(':', '-', tensor_name)

    with tf.name_scope('summary_' + tensor_name):
        summaries = []
        if len(tensor._shape) == 0:
            summaries.append(tf.summary.scalar(tensor_name, tensor))
        else:
            if 'mean' in summary_type:
                mean = tf.reduce_mean(tensor)
                summaries.append(tf.summary.scalar(tensor_name + '/mean', mean))
            if 'stddev' in summary_type:
                mean = tf.reduce_mean(tensor)
                stddev = tf.sqrt(tf.reduce_mean(tf.square(tensor - mean)))
                summaries.append(tf.summary.scalar(tensor_name + '/stddev', stddev))
            if 'max' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/max', tf.reduce_max(tensor)))
            if 'min' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/min', tf.reduce_min(tensor)))
            if 'sparsity' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(tensor)))
            if 'histogram' in summary_type:
                summaries.append(tf.summary.histogram(tensor_name, tensor))
        return tf.summary.merge(summaries)
开发者ID:BenJamesbabala,项目名称:CycleGAN-Tensorflow-Simple,代码行数:29,代码来源:ops.py



注:本文中的tensorflow.reduce_min函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.reduce_prod函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.reduce_mean函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap