• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.tile函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.tile函数的典型用法代码示例。如果您正苦于以下问题:Python tile函数的具体用法?Python tile怎么用?Python tile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: tf_compute_distances

def tf_compute_distances(points, start_centers):
    """
    Given a set of points and some centroids, computes the distance from each point to each
    centroid.

    :param points: a 2d TF tensor of shape num_points x dim
    :param start_centers: a numpy array of shape num_centroid x dim
    :return: a TF tensor of shape num_points x num_centroids
    """
    with tf.variable_scope("distances"):
        # The dimensions in the problem
        (num_centroids, _) = np.shape(start_centers)
        # The shape of the block is extracted as a TF variable.
        num_points = tf.shape(points)[0]
        # The centers are embedded in the TF program.
        centers = tf.constant(start_centers)
        # Computation of the minimum distance. This is a standard implementation that follows
        # what MLlib does.
        squares = tf.reduce_sum(tf.square(points), reduction_indices=1)
        center_squares = tf.reduce_sum(tf.square(centers), reduction_indices=1)
        prods = tf.matmul(points, centers, transpose_b = True)
        # This code simply expresses two outer products: center_squares * ones(num_points)
        # and ones(num_centroids) * squares
        t1a = tf.expand_dims(center_squares, 0)
        t1b = tf.stack([num_points, 1])
        t1 = tf.tile(t1a, t1b)
        t2a = tf.expand_dims(squares, 1)
        t2b = tf.stack([1, num_centroids])
        t2 = tf.tile(t2a, t2b)
        distances = t1 + t2 - 2 * prods
    return distances
开发者ID:databricks,项目名称:tensorframes,代码行数:31,代码来源:kmeans_demo.py


示例2: testShapeFunctionEdgeCases

  def testShapeFunctionEdgeCases(self):
    # Unknown multiples shape.
    inp = tf.constant(0.0, shape=[4, 4, 4, 4])
    tiled = tf.tile(inp, tf.placeholder(tf.int32))
    self.assertEqual([None, None, None, None], tiled.get_shape().as_list())

    # Unknown input shape.
    inp = tf.placeholder(tf.float32)
    tiled = tf.tile(inp, [2, 2, 2, 2])
    self.assertEqual([None, None, None, None], tiled.get_shape().as_list())

    # Unknown input and multiples shape.
    inp = tf.placeholder(tf.float32)
    tiled = tf.tile(inp, tf.placeholder(tf.int32))
    self.assertIs(None, tiled.get_shape().ndims)

    # Known input and partially known multiples.
    inp = tf.constant(0.0, shape=[1, 1])
    tiled = tf.tile(inp, [tf.placeholder(tf.int32), 7])
    self.assertEqual([None, 7], tiled.get_shape().as_list())

    # Mismatched input rank and multiples length.
    inp = tf.placeholder(tf.float32, shape=[None, None])
    with self.assertRaises(ValueError):
      tiled = tf.tile(inp, tf.placeholder(tf.int32, shape=[3]))
开发者ID:BloodD,项目名称:tensorflow,代码行数:25,代码来源:shape_ops_test.py


示例3: encode_coordinates_alt

  def encode_coordinates_alt(self, net):
    """An alternative implemenation for the encoding coordinates.

    Args:
      net: a tensor of shape=[batch_size, height, width, num_features]

    Returns:
      a list of tensors with encoded image coordinates in them.
    """
    batch_size, h, w, _ = net.shape.as_list()
    h_loc = [
      tf.tile(
          tf.reshape(
              tf.contrib.layers.one_hot_encoding(
                  tf.constant([i]), num_classes=h), [h, 1]), [1, w])
      for i in xrange(h)
    ]
    h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2)
    w_loc = [
      tf.tile(
          tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w),
          [h, 1]) for i in xrange(w)
    ]
    w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2)
    loc = tf.concat([h_loc, w_loc], 2)
    loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1])
    return tf.concat([net, loc], 3)
开发者ID:banjocui,项目名称:models,代码行数:27,代码来源:model_test.py


示例4: __init__

    def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0):
        self.num_layers = num_layers
        self.grus = []
        self.inits = []
        self.dropout_mask = []
        for layer in range(num_layers):
            input_size_ = input_size if layer == 0 else 2 * num_units
            gru_fw = tf.nn.rnn_cell.MultiRNNCell([
                tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(num_units=num_units)])

            gru_bw = tf.nn.rnn_cell.MultiRNNCell([
                tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(num_units=num_units)])

            init_fw = tf.Variable(tf.zeros([num_units]))
            init_fw = tf.expand_dims(tf.tile(tf.expand_dims(init_fw, axis=0), [batch_size, 1]), axis=0)
            init_bw = tf.Variable(tf.zeros([num_units]))
            init_bw = tf.expand_dims(tf.tile(tf.expand_dims(init_bw, axis=0), [batch_size, 1]), axis=0)

            mask_fw = tf.nn.dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
                                    keep_prob=keep_prob)
            mask_bw = tf.nn.dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
                                    keep_prob=keep_prob)

            self.grus.append((gru_fw, gru_bw,))
            self.inits.append((init_fw, init_bw,))
            self.dropout_mask.append((mask_fw, mask_bw,))
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:26,代码来源:utils.py


示例5: routing

def routing(input, b_IJ):
    ''' The routing algorithm.
        Args:
        input: A Tensor with [batch_size, num_caps_l=1152, 1, length(u_i)=8, 1]
        shape, num_caps_l meaning the number of capsule in the layer l.
        Returns:
        A Tensor of shape [batch_size, num_caps_l_plus_1, length(v_j)=16, 1]
        representing the vector output `v_j` in the layer l+1
        Notes:
        u_i represents the vector output of capsule i in the layer l, and
        v_j the vector output of capsule j in the layer l+1.
        '''
    
    # W: [num_caps_j, num_caps_i, len_u_i, len_v_j]
    W = tf.get_variable('Weight', shape=(1, 1152, 10, 8, 16), dtype=tf.float32,
                        initializer=tf.random_normal_initializer(stddev=cfg.stddev))
        
                        # Eq.2, calc u_hat
                        # do tiling for input and W before matmul
                        # input => [batch_size, 1152, 10, 8, 1]
                        # W => [batch_size, 1152, 10, 8, 16]
                        input = tf.tile(input, [1, 1, 10, 1, 1])
                        W = tf.tile(W, [cfg.batch_size, 1, 1, 1, 1])
                        assert input.get_shape() == [cfg.batch_size, 1152, 10, 8, 1]
                        
                        # in last 2 dims:
                        # [8, 16].T x [8, 1] => [16, 1] => [batch_size, 1152, 10, 16, 1]
                        u_hat = tf.matmul(W, input, transpose_a=True)
                        assert u_hat.get_shape() == [cfg.batch_size, 1152, 10, 16, 1]
                        
                        # line 3,for r iterations do
                        for r_iter in range(cfg.iter_routing):
                            with tf.variable_scope('iter_' + str(r_iter)):
                                # line 4:
                                # => [1, 1152, 10, 1, 1]
                                c_IJ = tf.nn.softmax(b_IJ, dim=2)
                                c_IJ = tf.tile(c_IJ, [cfg.batch_size, 1, 1, 1, 1])
                                assert c_IJ.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
                            
                                # line 5:
                                # weighting u_hat with c_IJ, element-wise in the last two dims
                                # => [batch_size, 1152, 10, 16, 1]
                                s_J = tf.multiply(c_IJ, u_hat)
                                # then sum in the second dim, resulting in [batch_size, 1, 10, 16, 1]
                                s_J = tf.reduce_sum(s_J, axis=1, keep_dims=True)
                                assert s_J.get_shape() == [cfg.batch_size, 1, 10, 16, 1]
                                
                                # line 6:
                                # squash using Eq.1,
                                v_J = squash(s_J)
                                assert v_J.get_shape() == [cfg.batch_size, 1, 10, 16, 1]
                                
                                # line 7:
                                # reshape & tile v_j from [batch_size ,1, 10, 16, 1] to [batch_size, 10, 1152, 16, 1]
                                # then matmul in the last tow dim: [16, 1].T x [16, 1] => [1, 1], reduce mean in the
                                # batch_size dim, resulting in [1, 1152, 10, 1, 1]
                                v_J_tiled = tf.tile(v_J, [1, 1152, 1, 1, 1])
                                u_produce_v = tf.matmul(u_hat, v_J_tiled, transpose_a=True)
                                assert u_produce_v.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
                                    b_IJ += tf.reduce_sum(u_produce_v, axis=0, keep_dims=True)
开发者ID:SrGrace,项目名称:Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials,代码行数:60,代码来源:capsLayer.py


示例6: fztloss

        def fztloss( f, pVecs, nVecs ):
            """
            Tensorized cost function from Fast Zero-Shot Learning paper

            Args:
                f: The output from the network, a tensor of shape (# images, word embedding size)
                pVecs: The vector embeddings of the ground truth tags, a tensor
                    of shape (# images, # positive tags, word embedding size)
                nVecs: The vector embeddings of negatively sampled tags, a tensor
                    of shape (# images, # negative samples, word embedding size)

            Returns:
                Scalar tensor representing the batch cost
            """
            posmul = tf.mul(pVecs, f)
            negmul = tf.mul(nVecs, f)

            tfpos = tf.reduce_sum(posmul, reduction_indices=2)
            tfneg = tf.reduce_sum(negmul, reduction_indices=2)

            tfpos = tf.transpose(tfpos, [1,0])
            tfneg = tf.transpose(tfneg, [1,0])

            negexpan = tf.tile( tf.expand_dims(tfneg, -1), [1, 1, tf.shape(tfpos)[1]] )
            posexpan = tf.tile( tf.transpose(tf.expand_dims(tfpos, -1), [0,2,1]), [1, tf.shape(tfneg)[1], 1])
            differences = tf.sub(negexpan, posexpan)  

            return tf.reduce_sum(tf.reduce_sum(tf.log(1 + tf.exp(differences)), reduction_indices=[1,2]))
开发者ID:agude,项目名称:attalos,代码行数:28,代码来源:fast0tag.py


示例7: ae_latent_sample_beam

def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams):
  """Sample from the latent space in the autoencoder."""
  vocab_size = 2**hparams.z_size
  beam_size = 1  # TODO(lukaszkaiser): larger beam sizes seem to work bad.
  inputs = tf.tile(inputs, [beam_size, 1, 1])
  ed = tf.tile(ed, [beam_size, 1, 1, 1])

  def symbols_to_logits_fn(ids):
    """Go from ids to logits."""
    ids = tf.expand_dims(ids, axis=2)  # Ids start with added all-zeros.
    latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]])

    with tf.variable_scope(tf.get_variable_scope(), reuse=False):
      latents_dense = embed(latents_discrete)
      latents_pred = decode_transformer(
          inputs, ed, latents_dense, hparams, "extra")
      logits = tf.layers.dense(latents_pred, vocab_size, name="extra_logits")
      current_output_position = common_layers.shape_list(ids)[1] - 1
      logits = logits[:, current_output_position, :, :]
    return tf.squeeze(logits, axis=[1])

  initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32)
  length = tf.shape(latents_dense_in)[1]
  ids, _ = beam_search.beam_search(
      symbols_to_logits_fn, initial_ids, beam_size, length,
      vocab_size, alpha=0.0, eos_id=-1, stop_early=False)

  res = tf.expand_dims(ids[:, 0, :], axis=2)  # Pick first beam.
  return res[:, 1:]  # Remove the added all-zeros from ids.
开发者ID:kltony,项目名称:tensor2tensor,代码行数:29,代码来源:transformer_vae.py


示例8: build_loss

 def build_loss(self, logits, labels, lambs):
     # put a sigfunction on logits and then transpose
     logits = tf.transpose(framwork.sig_func(logits))
     # according to the labels, erase rows which is not in labels
     labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
     labels_num = self.image_classes
     logits = tf.gather(logits, indices=labels_unique)
     lambs = tf.gather(lambs, indices=labels_unique)
     # set the value of each row to True when it occurs in labels
     template = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
     labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
     indict_logic = tf.equal(labels_expand, template)
     # split the tensor along rows
     logit_list = tf.split(0, labels_num, logits)
     indict_logic_list = tf.split(0, labels_num, indict_logic)
     lambda_list = tf.split(0, self.image_classes, lambs)
     # loss_list = list()
     # for i in range(self.image_classes):
     #     loss_list.append(framwork.loss_func(logit_list[i], indict_logic_list[i], lambda_list[i]))
     loss_list = map(framwork.loss_func, logit_list, indict_logic_list, lambda_list)
     loss = tf.add_n(loss_list)
     tensors_dict = {'labels_unique': labels_unique, 'template': template, 'logits_sig_trans': logits,
                     'loss': loss, 'indict_logic': indict_logic}
     self.tensors_names.extend(tensors_dict.keys())
     self.net_tensors.update(tensors_dict)
开发者ID:chengyang317,项目名称:information_pursuit,代码行数:25,代码来源:infor_net.py


示例9: call

    def call(self, inputs):
        # print("in call")
# TODO: check input dtype

        # Tile kb_inputs
        kb_inputs = self.kb_inputs
        for i in range(inputs.shape.ndims - 1):
            kb_inputs = tf.expand_dims(kb_inputs, 0)
        kb_inputs = tf.tile(kb_inputs, tf.concat((tf.shape(inputs)[:-1], [1, 1]), 0))

        # Expand kb_mask
        kb_mask = self.kb_mask
        for i in range(inputs.shape.ndims - 2):
            kb_mask = tf.expand_dims(kb_mask, 1)
        kb_mask = tf.expand_dims(kb_mask, -1)

        # Tile inputs
        kb_size = tf.shape(self.kb_inputs)[0]
        tiling = tf.concat(([1] * (inputs.shape.ndims - 1), [kb_size], [1]), 0)
        cell_inputs = tf.tile(tf.expand_dims(inputs, -2), tiling)

        outputs = tf.concat([kb_inputs, cell_inputs], -1)
        outputs = tf.multiply(outputs, kb_mask)
        for layer in self.layers:
            outputs = layer.call(outputs)
        # outputs = tf.Print(outputs, [outputs], "KB attention pre-last layer output =")
        outputs = tf.squeeze(outputs, [-1])
        # print("inputs shape =", inputs.shape)
        # print("outputs shape =", outputs.shape)
        outputs = tf.concat([self.output_layer(inputs), outputs], -1)
        # print("out of call")
        return outputs
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:32,代码来源:kb_attn_layer.py


示例10: build_predict

    def build_predict(self, Xnew, full_cov=False):
        """
        Compute the mean and variance of the latent function at some new points
        Xnew.
        """
        _, _, Luu, L, _, _, gamma = self.build_common_terms()
        Kus = self.kern.K(self.Z, Xnew)  # size  M x Xnew

        w = tf.matrix_triangular_solve(Luu, Kus, lower=True)  # size M x Xnew

        tmp = tf.matrix_triangular_solve(tf.transpose(L), gamma, lower=False)
        mean = tf.matmul(tf.transpose(w), tmp) + self.mean_function(Xnew)
        intermediateA = tf.matrix_triangular_solve(L, w, lower=True)

        if full_cov:
            var = (
                self.kern.K(Xnew)
                - tf.matmul(tf.transpose(w), w)
                + tf.matmul(tf.transpose(intermediateA), intermediateA)
            )
            var = tf.tile(tf.expand_dims(var, 2), tf.pack([1, 1, tf.shape(self.Y)[1]]))
        else:
            var = (
                self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(w), 0) + tf.reduce_sum(tf.square(intermediateA), 0)
            )  # size Xnew,
            var = tf.tile(tf.expand_dims(var, 1), tf.pack([1, tf.shape(self.Y)[1]]))

        return mean, var
开发者ID:GPflow,项目名称:GPflow,代码行数:28,代码来源:sgpr.py


示例11: build_network

    def build_network(self):
        net_tensors = self.net_tensors
        with self.net_graph.as_default(), tf.device(self.net_device):
            logits = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, self.image_classes))
            labels = tf.placeholder(dtype=tf.int32, shape=(self.batch_size,))
            lambs = tf.placeholder(dtype=tf.float32, shape=(self.image_classes,))
            # put a sigfunction on logits and then transpose
            logits = tf.transpose(framwork.sig_func(logits))
            # according to the labels, erase rows which is not in labels

            labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
            labels_num = self.image_classes
            logits = tf.gather(logits, indices=labels_unique)
            lambs = tf.gather(lambs, indices=labels_unique)
            # set the value of each row to True when it occurs in labels
            templete = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
            labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
            indict_logic = tf.equal(labels_expand, templete)
            # split the tensor along rows
            logit_list = tf.split(0, labels_num, logits)
            indict_logic_list = tf.split(0, labels_num, indict_logic)
            lamb_list = tf.split(0, self.image_classes, lambs)
            logit_list = [tf.squeeze(item) for item in logit_list]
            indict_logic_list = [tf.squeeze(item) for item in indict_logic_list]
            left_right_tuples = list()
            for i in range(self.image_classes):
                left_right_tuples.append(framwork.lamb_func(logit_list[i], indict_logic_list[i], lamb=lamb_list[i]))
            # func = framwork.lamb_func()
            # left_right_tuples = map(func, logit_list, indict_logic_list, lamb_list)
            net_tensors.update({'left_right_tuples': left_right_tuples, 'logits': logits, 'labels': labels,
                                'lambs': lambs})
开发者ID:chengyang317,项目名称:information_pursuit,代码行数:31,代码来源:infor_net.py


示例12: _tf_sample_generator

 def _tf_sample_generator(self):
     archit = self.network_architecture
     depth = len(archit) - 1
     self.samp_prob1_tfhl_list = [tf.tile(self.transfer_fun(self.bias_list[depth]), [1, self.batch_size])] # top layer is just the bias
     self.sample_tfhl_list = [sampleInt(self.samp_prob1_tfhl_list[0])]
     self.samp_w_tfhl_list = [tf.ones([1, self.batch_size])]
     sample_handle = [self.samp_var_list[depth].assign(self.sample_tfhl_list[0]),\
         self.samp_w_var_list[depth].assign(self.samp_w_tfhl_list[0]),\
         self.samp_prob1_var_list[depth].assign(self.samp_prob1_tfhl_list[0])]
     # sample from top to the bottom
     for i in range(depth-1, -1, -1): # not include top one
         n = archit[i]
         m = archit[i+1]
         spb = self.transfer_fun(tf.matmul(self.weights_list[i], self.sample_tfhl_list[0]) +\
               tf.tile(self.bias_list[i], [1, self.batch_size]))
         # we need to save the prob of sample
         sp = sampleInt(spb)
         spb_assign_handle = self.samp_prob1_var_list[i].assign(spb)
         sp_assign_handle = self.samp_var_list[i].assign(sp)
         #compute_importance_weight(Hi+1, Hi, H_wi+1, W, b)
         spw = compute_importance_weight(self.sample_tfhl_list[0],
                                         sp,
                                         self.samp_w_tfhl_list[0],
                                         self.weights_list[i],
                                         self.bias_list[i],
                                         self.batch_size)
         spw_assign_handle = self.samp_w_var_list[i].assign(spw)
         sample_handle.extend([sp_assign_handle, spw_assign_handle, spb_assign_handle])
         self.samp_prob1_tfhl_list.insert(0, spb)
         self.sample_tfhl_list.insert(0, sp)
         self.samp_w_tfhl_list.insert(0, spw)
     return sample_handle
开发者ID:hanhongsun,项目名称:tensorflow_script,代码行数:32,代码来源:rl_dgn.py


示例13: compute_attention

 def compute_attention(self, image, text): 
     with tf.variable_scope("attention") as scope:
         if self.reuse:
             scope.reuse_variables()
         text_replicated = self._replicate_features(text, (1, 14, 14, 1), 
                                                    project=self.project)
         
         # Now both the features from the resnet and lstm are concatenated along the depth axis
         features = tf.nn.dropout(tf.concat([image, text_replicated], axis=3), 
                                  keep_prob=self.dropout_prob)
         conv1 = tf.nn.dropout(self.conv2d_layer(features, filters=512, 
                                            kernel_size=(1,1), 
                                            name="attention_conv1"),
                               keep_prob=self.dropout_prob)
         conv2 = self.conv2d_layer(conv1, filters=2, kernel_size=(1,1), name="attention_conv2")
         
         # Flatenning each attention map to perform softmax
         attention_map = tf.reshape(conv2, (self.batch_size, 14*14, 2))
         attention_map = tf.nn.softmax(attention_map, axis=1, name = "attention_map")
         image = tf.reshape(image, (self.batch_size, 196, 2048, 1))
         attention = tf.tile(tf.expand_dims(attention_map, 2), (1, 1, 2048, 1))
         image = tf.tile(image,(1,1,1,2))
         weighted = image * attention
         weighted_average = tf.reduce_mean(weighted, 1)
         
         # Flatten both glimpses into a single vector
         weighted_average = tf.reshape(weighted_average, (self.batch_size, 2048*2))
         attention_output = tf.nn.dropout(tf.concat([weighted_average, text], 1), self.dropout_prob)
     return attention_output
开发者ID:momih,项目名称:vqa_tensorflow,代码行数:29,代码来源:model.py


示例14: loss

def loss(logits, labels, lambs):
    # put a sigfunction on logits and then transpose
    logits = tf.transpose(framwork.sig_func(logits))
    # according to the labels, erase rows which is not in labels
    labels_unique = tf.constant(range(NUM_CLASSES), dtype=tf.int32)
    labels_num = NUM_CLASSES
    # logits = tf.gather(logits, indices=labels_unique)
    # lambs = tf.gather(lambs, indices=labels_unique)
    # set the value of each row to True when it occurs in labels
    template = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, BATCH_SIZE])
    labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
    indict_logic = tf.equal(labels_expand, template)
    # split the tensor along rows
    logit_list = tf.split(0, labels_num, logits)
    indict_logic_list = tf.split(0, labels_num, indict_logic)
    lambda_list = tf.split(0, NUM_CLASSES, lambs)
    # loss_list = list()
    # for i in range(self.image_classes):
    #     loss_list.append(framwork.loss_func(logit_list[i], indict_logic_list[i], lambda_list[i]))
    loss_list = map(framwork.loss_func, logit_list, indict_logic_list, lambda_list)
    losses = tf.add_n(loss_list)
    tf.add_to_collection('losses', losses)
    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:chengyang317,项目名称:cifar10,代码行数:25,代码来源:infor.py


示例15: build_predict

 def build_predict(self, Xnew, full_cov=False):
     """
     Compute the mean and variance of the latent function at some new points
     Xnew. For a derivation of the terms in here, see the associated SGPR
     notebook.
     """
     num_inducing = tf.shape(self.Z)[0]
     err = self.Y - self.mean_function(self.X)
     Kuf = self.kern.K(self.Z, self.X)
     Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
     Kus = self.kern.K(self.Z, Xnew)
     sigma = tf.sqrt(self.likelihood.variance)
     L = tf.cholesky(Kuu)
     A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
     B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
     LB = tf.cholesky(B)
     Aerr = tf.matmul(A, err)
     c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
     tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
     tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
     mean = tf.matmul(tf.transpose(tmp2), c)
     if full_cov:
         var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
             - tf.matmul(tf.transpose(tmp1), tmp1)
         shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 2), shape)
     else:
         var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
             - tf.reduce_sum(tf.square(tmp1), 0)
         shape = tf.pack([1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 1), shape)
     return mean + self.mean_function(Xnew), var
开发者ID:gbohner,项目名称:GPflow,代码行数:32,代码来源:sgpr.py


示例16: q_zt

 def q_zt(self, unused_observation, prev_state, t):
   batch_size = tf.shape(prev_state)[0]
   q_mu = tf.tile(self.mus[t][tf.newaxis, :], [batch_size, 1])
   q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min)
   q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1])
   q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma))
   return q_zt
开发者ID:812864539,项目名称:models,代码行数:7,代码来源:models.py


示例17: tf_format_mnist_images

def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
    correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
    correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1])  # indices of correctly recognised images
    incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
    everything_incorrect_first = tf.concat([incorrectly_recognised_indices, correctly_recognised_indices], 0) # images reordered with indeces of unrecognised images first
    everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
    # compute n=100 digits to display only
    Xs = tf.gather(X, everything_incorrect_first)
    Ys = tf.gather(Y, everything_incorrect_first)
    Ys_ = tf.gather(Y_, everything_incorrect_first)
    correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)

    digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
    correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
    digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
    computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
    #superimposed_digits = correct_tags+computed_tags
    superimposed_digits = tf.where(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
    correct_bkg   = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
    incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
    recognised_bkg = tf.gather(tf.concat([incorrect_bkg, correct_bkg], 0), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status

    I = tf.image.grayscale_to_rgb(Xs)
    I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
    I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
    Islices = [] # 100 images => 10x10 image block
    for imslice in range(lines):
        Islices.append(tf.concat(tf.unstack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3])), 1))
    I = tf.concat(Islices, 0)
    return I
开发者ID:Spandyie,项目名称:tensorflow-mnist-tutorial,代码行数:30,代码来源:tensorflowvisu.py


示例18: r_xn

  def r_xn(self, z_t, t):
    """Computes a distribution over the future observations given current latent
    state.

    The indexing in these messages is 1 indexed and inclusive. This is
    consistent with the latex documents.

    Args:
      z_t: [batch_size, state_size] Tensor
      t: Current timestep
    """
    tf.logging.info(
        "r(x_{start}:{end} | z_{t}) ~ N(z_{t}, sigma_{t})".format(
            **{"t": t,
               "start": (self.first_future_obs_index(t)+1)*self.steps_per_obs,
               "end": self.num_timesteps-1}))
    batch_size = tf.shape(z_t)[0]
    # the mean for all future observations is the same.
    # this tiling results in a [batch_size, num_future_obs, state_size] Tensor
    r_mu = tf.tile(z_t[:,tf.newaxis,:], [1, self.num_future_obs(t), 1])
    # compute the variance
    r_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min)
    # the variance is the same across all state dimensions, so we only have to
    # time sigma to be [batch_size, num_future_obs].
    r_sigma = tf.tile(r_sigma[tf.newaxis,:, tf.newaxis], [batch_size, 1, self.state_size])
    return tf.contrib.distributions.Normal(
        loc=r_mu, scale=tf.sqrt(r_sigma))
开发者ID:812864539,项目名称:models,代码行数:27,代码来源:models.py


示例19: while_step

 def while_step(t, rnn_state, tas, accs):
   """Implements one timestep of FIVO computation."""
   log_weights_acc, log_p_hat_acc, kl_acc = accs
   cur_inputs, cur_mask = nested.read_tas([inputs_ta, mask_ta], t)
   # Run the cell for one step.
   log_q_z, log_p_z, log_p_x_given_z, kl, new_state = cell(
       cur_inputs,
       rnn_state,
       cur_mask,
   )
   # Compute the incremental weight and use it to update the current
   # accumulated weight.
   kl_acc += kl * cur_mask
   log_alpha = (log_p_x_given_z + log_p_z - log_q_z) * cur_mask
   log_alpha = tf.reshape(log_alpha, [num_samples, batch_size])
   log_weights_acc += log_alpha
   # Calculate the effective sample size.
   ess_num = 2 * tf.reduce_logsumexp(log_weights_acc, axis=0)
   ess_denom = tf.reduce_logsumexp(2 * log_weights_acc, axis=0)
   log_ess = ess_num - ess_denom
   # Calculate the ancestor indices via resampling. Because we maintain the
   # log unnormalized weights, we pass the weights in as logits, allowing
   # the distribution object to apply a softmax and normalize them.
   resampling_dist = tf.contrib.distributions.Categorical(
       logits=tf.transpose(log_weights_acc, perm=[1, 0]))
   ancestor_inds = tf.stop_gradient(
       resampling_dist.sample(sample_shape=num_samples, seed=random_seed))
   # Because the batch is flattened and laid out as discussed
   # above, we must modify ancestor_inds to index the proper samples.
   # The particles in the ith filter are distributed every batch_size rows
   # in the batch, and offset i rows from the top. So, to correct the indices
   # we multiply by the batch_size and add the proper offset. Crucially,
   # when ancestor_inds is flattened the layout of the batch is maintained.
   offset = tf.expand_dims(tf.range(batch_size), 0)
   ancestor_inds = tf.reshape(ancestor_inds * batch_size + offset, [-1])
   noresample_inds = tf.range(num_samples * batch_size)
   # Decide whether or not we should resample; don't resample if we are past
   # the end of a sequence.
   should_resample = resampling_criterion(num_samples, log_ess, t)
   should_resample = tf.logical_and(should_resample,
                                    cur_mask[:batch_size] > 0.)
   float_should_resample = tf.to_float(should_resample)
   ancestor_inds = tf.where(
       tf.tile(should_resample, [num_samples]),
       ancestor_inds,
       noresample_inds)
   new_state = nested.gather_tensors(new_state, ancestor_inds)
   # Update the TensorArrays before we reset the weights so that we capture
   # the incremental weights and not zeros.
   ta_updates = [log_weights_acc, log_ess, float_should_resample]
   new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)]
   # For the particle filters that resampled, update log_p_hat and
   # reset weights to zero.
   log_p_hat_update = tf.reduce_logsumexp(
       log_weights_acc, axis=0) - tf.log(tf.to_float(num_samples))
   log_p_hat_acc += log_p_hat_update * float_should_resample
   log_weights_acc *= (1. - tf.tile(float_should_resample[tf.newaxis, :],
                                    [num_samples, 1]))
   new_accs = (log_weights_acc, log_p_hat_acc, kl_acc)
   return t + 1, new_state, new_tas, new_accs
开发者ID:ALISCIFP,项目名称:models,代码行数:60,代码来源:bounds.py


示例20: _meshgrid

  def _meshgrid(depth, height, width, z_near, z_far):
    with tf.variable_scope('_meshgrid'):
      x_t = tf.reshape(
          tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]),
          [depth, height, width])
      y_t = tf.reshape(
          tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]),
          [depth, width, height])
      y_t = tf.transpose(y_t, [0, 2, 1])
      sample_grid = tf.tile(
          tf.linspace(float(z_near), float(z_far), depth), [width * height])
      z_t = tf.reshape(sample_grid, [height, width, depth])
      z_t = tf.transpose(z_t, [2, 0, 1])

      z_t = 1 / z_t
      d_t = 1 / z_t
      x_t /= z_t
      y_t /= z_t

      x_t_flat = tf.reshape(x_t, (1, -1))
      y_t_flat = tf.reshape(y_t, (1, -1))
      d_t_flat = tf.reshape(d_t, (1, -1))

      ones = tf.ones_like(x_t_flat)
      grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0)
      return grid
开发者ID:ALISCIFP,项目名称:models,代码行数:26,代码来源:perspective_transform.py



注:本文中的tensorflow.tile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.to_float函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.tensordot函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap