• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.shape函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.shape函数的典型用法代码示例。如果您正苦于以下问题:Python shape函数的具体用法?Python shape怎么用?Python shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了shape函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: attention_bah_block

def attention_bah_block(hidden_for_sketch, hidden_for_attn_alignment, attention_depth):
    """ It is a implementation of the Bahdanau et al. attention mechanism with concat score and the constrained softmax (csoftmax).
        Based on the papers:
            https://arxiv.org/abs/1409.0473 "Neural Machine Translation by Jointly Learning to Align and Translate"
            https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers"
    Args:
        hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size]
        hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment]
        key: A tensorflow tensor with dimensionality [None, None, key_size]
        attention_depth: Number of usage csoftmax
    Returns:
        final_aligned_hiddens: Tensor at the output with dimensionality [1, attention_depth, hidden_size_for_attn_alignment]
    """
    with tf.name_scope('attention_block'):
        sketch_dims = tf.shape(hidden_for_sketch)
        batch_size = sketch_dims[0]
        num_tokens = sketch_dims[1]
        hidden_size = sketch_dims[2]

        attn_alignment_dims = tf.shape(hidden_for_attn_alignment)
        attn_alignment_hidden_size = attn_alignment_dims[2]

        sketches = [tf.zeros(shape=[batch_size, hidden_size], dtype=tf.float32)]
        aligned_hiddens = []
        cum_att = tf.zeros(shape=[batch_size, num_tokens])  # cumulative attention
        for i in range(attention_depth):
            sketch, cum_att_, aligned_hidden = attention_bah_step(hidden_for_sketch, hidden_for_attn_alignment, sketches[-1], cum_att)
            sketches.append(sketch) #sketch
            aligned_hiddens.append(aligned_hidden) #sketch
            cum_att += cum_att_
        final_aligned_hiddens = tf.reshape(tf.transpose(tf.stack(aligned_hiddens), [1, 0, 2]),[1, attention_depth, attn_alignment_hidden_size])
    return final_aligned_hiddens
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:32,代码来源:tf_csoftmax_attention.py


示例2: dist_info_sym

 def dist_info_sym(self, obs_var, state_info_vars):
     n_batches = tf.shape(obs_var)[0]
     n_steps = tf.shape(obs_var)[1]
     obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, -1]))
     obs_var = tf.cast(obs_var, tf.float32)
     if self.state_include_action:
         prev_action_var = state_info_vars["prev_action"]
         prev_action_var = tf.cast(prev_action_var, tf.float32)
         all_input_var = tf.concat(2, [obs_var, prev_action_var])
     else:
         all_input_var = obs_var
     if self.feature_network is None:
         return dict(
             prob=L.get_output(
                 self.prob_network.output_layer,
                 {self.l_input: all_input_var}
             )
         )
     else:
         flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
         return dict(
             prob=L.get_output(
                 self.prob_network.output_layer,
                 {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
             )
         )
开发者ID:flyers,项目名称:rllab,代码行数:26,代码来源:categorical_lstm_policy.py


示例3: __init__

    def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu,
                 lstm_layer_cls=L.LSTMLayer,
                 output_nonlinearity=None, input_var=None, input_layer=None, forget_bias=1.0, use_peepholes=False,
                 layer_args=None):
        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input")
            else:
                l_in = input_layer
            l_step_input = L.InputLayer(shape=(None,) + input_shape, name="step_input")
            # contains previous hidden and cell state
            l_step_prev_state = L.InputLayer(shape=(None, hidden_dim * 2), name="step_prev_state")
            if layer_args is None:
                layer_args = dict()
            l_lstm = lstm_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity,
                                    hidden_init_trainable=False, name="lstm", forget_bias=forget_bias,
                                    cell_init_trainable=False, use_peepholes=use_peepholes, **layer_args)
            l_lstm_flat = L.ReshapeLayer(
                l_lstm, shape=(-1, hidden_dim),
                name="lstm_flat"
            )
            l_output_flat = L.DenseLayer(
                l_lstm_flat,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output_flat"
            )
            l_output = L.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input:
                tf.reshape(flat_output, tf.pack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
                shape_op=lambda flat_output_shape, l_input_shape:
                (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name="output"
            )
            l_step_state = l_lstm.get_step_layer(l_step_input, l_step_prev_state, name="step_state")
            l_step_hidden = L.SliceLayer(l_step_state, indices=slice(hidden_dim), name="step_hidden")
            l_step_cell = L.SliceLayer(l_step_state, indices=slice(hidden_dim, None), name="step_cell")
            l_step_output = L.DenseLayer(
                l_step_hidden,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                W=l_output_flat.W,
                b=l_output_flat.b,
                name="step_output"
            )

            self._l_in = l_in
            self._hid_init_param = l_lstm.h0
            self._cell_init_param = l_lstm.c0
            self._l_lstm = l_lstm
            self._l_out = l_output
            self._l_step_input = l_step_input
            self._l_step_prev_state = l_step_prev_state
            self._l_step_hidden = l_step_hidden
            self._l_step_cell = l_step_cell
            self._l_step_state = l_step_state
            self._l_step_output = l_step_output
            self._hidden_dim = hidden_dim
开发者ID:QuantCollective,项目名称:maml_rl,代码行数:60,代码来源:network.py


示例4: fast_rcnn_minibatch

    def fast_rcnn_minibatch(self, reference_boxes):
        with tf.variable_scope('fast_rcnn_minibatch'):

            reference_boxes_mattached_gtboxes, object_mask, label = \
                self.fast_rcnn_find_positive_negative_samples(reference_boxes)

            positive_indices = tf.reshape(tf.where(tf.not_equal(object_mask, 0.)), [-1])

            num_of_positives = tf.minimum(tf.shape(positive_indices)[0],
                                          tf.cast(self.fast_rcnn_minibatch_size*self.fast_rcnn_positives_ratio, tf.int32))

            positive_indices = tf.random_shuffle(positive_indices)
            positive_indices = tf.slice(positive_indices, begin=[0], size=[num_of_positives])

            negative_indices = tf.reshape(tf.where(tf.equal(object_mask, 0.)), [-1])
            num_of_negatives = tf.minimum(tf.shape(negative_indices)[0],
                                          self.fast_rcnn_minibatch_size - num_of_positives)

            negative_indices = tf.random_shuffle(negative_indices)
            negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives])

            minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0)
            minibatch_indices = tf.random_shuffle(minibatch_indices)

            minibatch_reference_boxes_mattached_gtboxes = tf.gather(reference_boxes_mattached_gtboxes,
                                                                    minibatch_indices)
            object_mask = tf.gather(object_mask, minibatch_indices)
            label = tf.gather(label, minibatch_indices)
            label_one_hot = tf.one_hot(label, self.num_classes + 1)

            return minibatch_indices, minibatch_reference_boxes_mattached_gtboxes, object_mask, label_one_hot
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:31,代码来源:build_fast_rcnn.py


示例5: _conv_layer

	def _conv_layer(self, name, input_var, stride, in_channels, out_channels, options = {}):
		activation = options.get('activation', 'relu')
		dropout = options.get('dropout', None)
		padding = options.get('padding', 'SAME')
		batchnorm = options.get('batchnorm', True)
		transpose = options.get('transpose', False)

		with tf.variable_scope(name) as scope:
			if not transpose:
				filter_shape = [KERNEL_SIZE, KERNEL_SIZE, in_channels, out_channels]
			else:
				filter_shape = [KERNEL_SIZE, KERNEL_SIZE, out_channels, in_channels]
			kernel = tf.get_variable(
				'weights',
				shape=filter_shape,
				initializer=tf.truncated_normal_initializer(stddev=math.sqrt(2.0 / KERNEL_SIZE / KERNEL_SIZE / in_channels)),
				dtype=tf.float32
			)
			biases = tf.get_variable(
				'biases',
				shape=[out_channels],
				initializer=tf.constant_initializer(0.0),
				dtype=tf.float32
			)
			if not transpose:
				output = tf.nn.bias_add(
					tf.nn.conv2d(
						input_var,
						kernel,
						[1, stride, stride, 1],
						padding=padding
					),
					biases
				)
			else:
				batch = tf.shape(input_var)[0]
				side = tf.shape(input_var)[1]
				output = tf.nn.bias_add(
					tf.nn.conv2d_transpose(
						input_var,
						kernel,
						[batch, side * stride, side * stride, out_channels],
						[1, stride, stride, 1],
						padding=padding
					),
					biases
				)
			if batchnorm:
				output = tf.contrib.layers.batch_norm(output, center=True, scale=True, is_training=self.is_training, decay=0.99)
			if dropout is not None:
				output = tf.nn.dropout(output, keep_prob=1-dropout)

			if activation == 'relu':
				return tf.nn.relu(output, name=scope.name)
			elif activation == 'sigmoid':
				return tf.nn.sigmoid(output, name=scope.name)
			elif activation == 'none':
				return output
			else:
				raise Exception('invalid activation {} specified'.format(activation))
开发者ID:happog,项目名称:roadtracer,代码行数:60,代码来源:model.py


示例6: test_get_correct_box_encoding_and_class_prediction_shapes

  def test_get_correct_box_encoding_and_class_prediction_shapes(self):
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    proposal_boxes = tf.random_normal([4, 2, 4], dtype=tf.float32)
    rfcn_box_predictor = box_predictor.RfcnBoxPredictor(
        is_training=False,
        num_classes=2,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        num_spatial_bins=[3, 3],
        depth=4,
        crop_size=[12, 12],
        box_code_size=4
    )
    box_predictions = rfcn_box_predictor.predict(
        image_features, num_predictions_per_location=1, scope='BoxPredictor',
        proposal_boxes=proposal_boxes)
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    class_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       class_predictions_shape) = sess.run(
           [tf.shape(box_encodings),
            tf.shape(class_predictions_with_background)])
      self.assertAllEqual(box_encodings_shape, [8, 1, 2, 4])
      self.assertAllEqual(class_predictions_shape, [8, 1, 3])
开发者ID:DaRealLazyPanda,项目名称:models,代码行数:28,代码来源:box_predictor_test.py


示例7: test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional

  def test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional(
      self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=0,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features, num_predictions_per_location=5, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
      self.assertAllEqual(objectness_predictions_shape,
                          [4, expected_num_anchors, 1])
开发者ID:DaRealLazyPanda,项目名称:models,代码行数:34,代码来源:box_predictor_test.py


示例8: batch_displacement_warp2d

def batch_displacement_warp2d(imgs, vector_fields):
    """
    warp images by free form transformation

    Parameters
    ----------
    imgs : tf.Tensor
        images to be warped
        [n_batch, xlen, ylen, n_channel]
    vector_fields : tf.Tensor
        [n_batch, 2, xlen, ylen]

    Returns
    -------
    output : tf.Tensor
    warped imagees
        [n_batch, xlen, ylen, n_channel]
    """
    n_batch = tf.shape(imgs)[0]
    xlen = tf.shape(imgs)[1]
    ylen = tf.shape(imgs)[2]

    grids = batch_mgrid(n_batch, xlen, ylen)

    T_g = grids + vector_fields
    output = batch_warp2d(imgs, T_g)
    return output
开发者ID:Ryo-Ito,项目名称:spatial_transformer_network,代码行数:27,代码来源:displacement.py


示例9: cross_entropy

    def cross_entropy(u, label_u, alpha=0.5, normed=False):

        label_ip = tf.cast(
            tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
        s = tf.clip_by_value(label_ip, 0.0, 1.0)

        # compute balance param
        # s_t \in {-1, 1}
        s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
        sum_1 = tf.reduce_sum(s)
        sum_all = tf.reduce_sum(tf.abs(s_t))
        balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
                               tf.multiply(tf.div(sum_all, sum_1), s))

        if normed:
            # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
            ip_1 = tf.matmul(u, tf.transpose(u))

            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
            mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
                                      reduce_shaper(tf.square(u)), transpose_b=True))
            ip = tf.div(ip_1, mod_1)
        else:
            ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
        ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
        return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:27,代码来源:dhn.py


示例10: autoencoder_5_5

def autoencoder_5_5(net_in, LATENT_DIMS):
    data_shape = net_in.get_shape().as_list()
    NUM_CHANNELS = data_shape[4]
    strides = [1, 2, 2, 2, 1]
    # Define all encoding layers
    c0 = 5
    c1 = 5
    padding="VALID"
    conv0, W0 = conv3d_layer("conv0",net_in,[c0, c0, c0, NUM_CHANNELS, 64], strides=strides, padding=padding)
    conv1, W1 = conv3d_layer("conv1",conv0,[c1, c1, c1, 64, 128], strides=strides, padding=padding)   

    # Resolve input dim into fc0 from pool1-output
    #LATENT_DIMS = 100
    shape = conv1.get_shape().as_list()
    print "conv1 shape: ", shape
    fc0_inputdim = shape[1]*shape[2]*shape[3]*shape[4]
    fc0 = fc_layer("fc0", conv1, [fc0_inputdim, LATENT_DIMS])

    # Start going back by first reshaping into 4D image data again
    # Then two sets of depooling and convolutions
    fc1 = fc_layer("fc1", fc0, [LATENT_DIMS,fc0_inputdim])
    fc1_reshaped = tf.reshape(fc1, conv1.get_shape().as_list())
    
    deconv0 = conv3d_layer_transpose("deconv0", fc1_reshaped, W1, output_shape=tf.shape(conv0), strides=strides, padding=padding)
    deconv1 = conv3d_layer_transpose("deconv1", deconv0, W0, output_shape=tf.shape(net_in), strides=strides, padding=padding)
    
    return deconv1
开发者ID:hassnov,项目名称:Conv3D,代码行数:27,代码来源:autoencoderutils.py


示例11: gauss_kl

def gauss_kl(q_mu, q_sqrt, K):
    """
    Compute the KL divergence from

          q(x) = N(q_mu, q_sqrt^2)
    to
          p(x) = N(0, K)

    We assume multiple independent distributions, given by the columns of
    q_mu and the last dimension of q_sqrt.

    q_mu is a matrix, each column contains a mean.

    q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
        matrix of the covariance of q.

    K is a positive definite matrix: the covariance of p.
    """
    L = tf.cholesky(K)
    alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
    KL = 0.5 * tf.reduce_sum(tf.square(alpha))  # Mahalanobis term.
    num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
    KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L))))  # Prior log-det term.
    KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type)  # constant term
    Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0)  # force lower triangle
    KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq))))  # logdet
    L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
    LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
    KL += 0.5 * tf.reduce_sum(tf.square(LiLq))  # Trace term
    return KL
开发者ID:GPflow,项目名称:GPflow,代码行数:30,代码来源:kullback_leiblers.py


示例12: autoencoder_3_3_3

def autoencoder_3_3_3(net_in, LATENT_DIMS):
    data_shape = net_in.get_shape().as_list()
    NUM_CHANNELS = data_shape[4]
    strides = [1, 2, 2, 2, 1]
    # Define all encoding layers
    c0 = 3
    c1 = 3
    c2 = 3
    conv0, W0 = conv3d_layer("aeconv0",net_in,[c0, c0, c0, NUM_CHANNELS, 32], strides=strides)
    conv1, W1 = conv3d_layer("aeconv1",conv0,[c1, c1, c1, 32, 64], strides=strides)   
    conv2, W2 = conv3d_layer("aeconv2",conv1,[c2, c2, c2, 64, 64], strides=[1, 1, 1, 1, 1])
    
    #W0 = tf.Variable(xavier_conv3_init(W0_old.get_shape().as_list()), name='weights')
    #W1 = tf.Variable(xavier_conv3_init(W1_old.get_shape().as_list()), name='weights')
    #W2 = tf.Variable(xavier_conv3_init(W2_old.get_shape().as_list()), name='weights')
    
    # Resolve input dim into fc0 from pool1-output
    #LATENT_DIMS = 100
    shape = conv2.get_shape().as_list()
    print "conv2 shape: ", shape
    fc0_inputdim = shape[1]*shape[2]*shape[3]*shape[4]
    fc0 = fc_layer("aefc0", conv2, [fc0_inputdim, LATENT_DIMS])

    # Start going back by first reshaping into 4D image data again
    # Then two sets of depooling and convolutions
    fc1 = fc_layer("aefc1", fc0, [LATENT_DIMS,fc0_inputdim])
    fc1_reshaped = tf.reshape(fc1, conv2.get_shape().as_list())
    
    deconv0 = conv3d_layer_transpose("aedeconv0", fc1_reshaped, W2, output_shape=tf.shape(conv1), strides=[1, 1, 1, 1, 1])
    deconv1 = conv3d_layer_transpose("aedeconv1", deconv0, W1, output_shape=tf.shape(conv0), strides=strides)
    deconv2 = conv3d_layer_transpose("aedeconv2", deconv1, W0, output_shape=tf.shape(net_in), strides=strides)
    
    return deconv2
开发者ID:hassnov,项目名称:Conv3D,代码行数:33,代码来源:autoencoderutils.py


示例13: ae_latent_sample_beam

def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams):
  """Sample from the latent space in the autoencoder."""
  vocab_size = 2**hparams.z_size
  beam_size = 1  # TODO(lukaszkaiser): larger beam sizes seem to work bad.
  inputs = tf.tile(inputs, [beam_size, 1, 1])
  ed = tf.tile(ed, [beam_size, 1, 1, 1])

  def symbols_to_logits_fn(ids):
    """Go from ids to logits."""
    ids = tf.expand_dims(ids, axis=2)  # Ids start with added all-zeros.
    latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]])

    with tf.variable_scope(tf.get_variable_scope(), reuse=False):
      latents_dense = embed(latents_discrete)
      latents_pred = decode_transformer(
          inputs, ed, latents_dense, hparams, "extra")
      logits = tf.layers.dense(latents_pred, vocab_size, name="extra_logits")
      current_output_position = common_layers.shape_list(ids)[1] - 1
      logits = logits[:, current_output_position, :, :]
    return tf.squeeze(logits, axis=[1])

  initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32)
  length = tf.shape(latents_dense_in)[1]
  ids, _ = beam_search.beam_search(
      symbols_to_logits_fn, initial_ids, beam_size, length,
      vocab_size, alpha=0.0, eos_id=-1, stop_early=False)

  res = tf.expand_dims(ids[:, 0, :], axis=2)  # Pick first beam.
  return res[:, 1:]  # Remove the added all-zeros from ids.
开发者ID:kltony,项目名称:tensor2tensor,代码行数:29,代码来源:transformer_vae.py


示例14: loop

    def loop(q_, mask, mass_, found_):
        q_list = tf.dynamic_partition(q_, mask, 2)
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2)  # 0 element it False,
        #  1 element if true

        p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
        p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])

        # condition verification and mask modification
        less_mask = tf.cast(tf.less(u, p_new), tf.int32)  # 0 when u is bigger than p, 1 when u is less than p
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
                                                 2)  # 0 when u is bigger than p, 1 when u is less than p

        split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
        split_u = tf.dynamic_partition(u, less_mask, 2)

        alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
        mass_ += tf.reduce_sum(split_u[1])

        mask = mask * (tf.ones_like(less_mask) - less_mask)

        found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
                         lambda: False,
                         lambda: True)

        alpha = tf.reshape(alpha, q_.shape)

        return alpha, mask, mass_, found_
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:28,代码来源:tf_csoftmax_attention.py


示例15: K

 def K(self, X, X2=None):
     if X2 is None:
         d = tf.fill(tf.pack([tf.shape(X)[0]]), tf.squeeze(self.variance))
         return tf.diag(d)
     else:
         shape = tf.pack([tf.shape(X)[0], tf.shape(X2)[0]])
         return tf.zeros(shape, tf.float64)
开发者ID:fujiisoup,项目名称:GPflow,代码行数:7,代码来源:kernels.py


示例16: gauss_kl_diag

def gauss_kl_diag(q_mu, q_sqrt, K,  num_latent):
    """
    Compute the KL divergence from

          q(x) = N(q_mu, q_sqrt^2)
    to
          p(x) = N(0, K)

    We assume num_latent independent distributions, given by the columns of
    q_mu and q_sqrt.

    q_mu is a matrix, each column contains a mean

    q_sqrt is a matrix, each column represents the diagonal of a square-root
        matrix of the covariance of q.

    K is a positive definite matrix: the covariance of p.

    num_latent is an integer: the number of independent distributions (equal to
        the columns of q_mu and q_sqrt).
    """
    L = tf.cholesky(K)
    alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
    KL = 0.5 * tf.reduce_sum(tf.square(alpha))  # Mahalanobis term.
    KL += num_latent * 0.5 * tf.reduce_sum(
        tf.log(tf.square(tf.diag_part(L))))  # Prior log-det term.
    KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
    KL += -0.5 * tf.reduce_sum(tf.log(tf.square(q_sqrt)))  # Log-det of q-cov
    L_inv = tf.matrix_triangular_solve(L, eye(tf.shape(L)[0]), lower=True)
    K_inv = tf.matrix_triangular_solve(tf.transpose(L), L_inv, lower=False)
    KL += 0.5 * tf.reduce_sum(tf.expand_dims(tf.diag_part(K_inv), 1)
                              * tf.square(q_sqrt))  # Trace term.
    return KL
开发者ID:blutooth,项目名称:dgp,代码行数:33,代码来源:kullback_leiblers.py


示例17: _build_predict

 def _build_predict(self, Xnew, full_cov=False):
     """
     Compute the mean and variance of the latent function at some new points
     Xnew. For a derivation of the terms in here, see the associated SGPR
     notebook.
     """
     num_inducing = len(self.feature)
     err = self.Y - self.mean_function(self.X)
     Kuf = self.feature.Kuf(self.kern, self.X)
     Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
     Kus = self.feature.Kuf(self.kern, Xnew)
     sigma = tf.sqrt(self.likelihood.variance)
     L = tf.cholesky(Kuu)
     A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
     B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=settings.float_type)
     LB = tf.cholesky(B)
     Aerr = tf.matmul(A, err)
     c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
     tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
     tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
     mean = tf.matmul(tmp2, c, transpose_a=True)
     if full_cov:
         var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
               - tf.matmul(tmp1, tmp1, transpose_a=True)
         shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 2), shape)
     else:
         var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
               - tf.reduce_sum(tf.square(tmp1), 0)
         shape = tf.stack([1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 1), shape)
     return mean + self.mean_function(Xnew), var
开发者ID:vincentadam87,项目名称:GPflow,代码行数:32,代码来源:sgpr.py


示例18: one_hot_matrix

def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
    """Encodes indices from given tensor as one-hot tensor.

    TODO(ilblackdragon): Ideally implementation should be
    part of TensorFlow with Eigen-native operation.

    Args:
        tensor_in: Input tensor of shape [N1, N2].
        num_classes: Number of classes to expand index into.
        on_value: Tensor or float, value to fill-in given index.
        off_value: Tensor or float, value to fill-in everything else.
    Returns:
        Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
        tensor.
    """
    tensor_in = tf.convert_to_tensor(tensor_in)
    sparse_values = tf.to_int64(tf.reshape(tensor_in, [-1, 1]))
    size = tf.shape(sparse_values)[0]
    dims = tf.shape(tensor_in)
    indices = tf.to_int64(tf.reshape(tf.range(0, size), [-1, 1]))
    indices_values = tf.concat(1, [indices, sparse_values])
    outshape = tf.to_int64(expand_concat(0, [size, num_classes]))
    one_hot_vector = tf.sparse_to_dense(indices_values, outshape, on_value, off_value)
    ret = tf.reshape(one_hot_vector, tf.concat(0, [dims, [num_classes]]))
    ret.set_shape(tensor_in.get_shape().concatenate(num_classes))
    return ret
开发者ID:twinklestar93,项目名称:skflow,代码行数:26,代码来源:array_ops.py


示例19: test_get_multi_class_predictions_for_five_aspect_ratios_per_location

  def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=num_classes_without_background,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features,
        num_predictions_per_location=5,
        scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    class_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape, class_predictions_with_background_shape
      ) = sess.run([
          tf.shape(box_encodings), tf.shape(class_predictions_with_background)])
      self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
      self.assertAllEqual(class_predictions_with_background_shape,
                          [4, 320, num_classes_without_background+1])
开发者ID:DaRealLazyPanda,项目名称:models,代码行数:33,代码来源:box_predictor_test.py


示例20: blend_images

def blend_images(data_folder1, data_folder2, out_folder, alpha=.5):
    filename_queue = tf.placeholder(dtype=tf.string)
    label = tf.placeholder(dtype=tf.int32)
    tensor_image = tf.read_file(filename_queue)

    image = tf.image.decode_jpeg(tensor_image, channels=3)

    multiplier = tf.div(tf.constant(224, tf.float32),
                        tf.cast(tf.maximum(tf.shape(image)[0], tf.shape(image)[1]), tf.float32))
    x = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[0], tf.float32), multiplier)), tf.int32)
    y = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[1], tf.float32), multiplier)), tf.int32)
    image = tf.image.resize_images(image, [x, y])

    image = tf.image.rot90(image, k=label)

    image = tf.image.resize_image_with_crop_or_pad(image, 224, 224)
    sess = tf.Session()
    sess.run(tf.local_variables_initializer())
    for root, folders, files in os.walk(data_folder1):
        for each in files:
            if each.find('.jpg') >= 0:
                img1 = Image.open(os.path.join(root, each))
                img2_path = os.path.join(root.replace(data_folder1, data_folder2), each.split("-")[-1])
                rotation = int(each.split("-")[1])
                img2 = sess.run(image, feed_dict={filename_queue: img2_path, label: rotation})
                imsave(os.path.join(os.getcwd(), "temp", "temp.jpg"), img2)
                img2 = Image.open(os.path.join(os.getcwd(), "temp", "temp.jpg"))
                out_image = Image.blend(img1, img2, alpha)
                outfile = os.path.join(root.replace(data_folder1, out_folder), each)
                if not os.path.exists(os.path.split(outfile)[0]):
                    os.makedirs(os.path.split(outfile)[0])
                out_image.save(outfile)
            else:
                print(each)
    sess.close()
开发者ID:Sabrewarrior,项目名称:PhotoOrientation,代码行数:35,代码来源:misc.py



注:本文中的tensorflow.shape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.sigmoid函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.set_random_seed函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap