• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.pack函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.pack函数的典型用法代码示例。如果您正苦于以下问题:Python pack函数的具体用法?Python pack怎么用?Python pack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pack函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: FixedUnPooling

def FixedUnPooling(x, shape, unpool_mat=None):
    """
    Unpool the input with a fixed mat to perform kronecker product with.

    :param input: NHWC tensor
    :param shape: int or [h, w]
    :param unpool_mat: a tf/np matrix with size=shape. If None, will use a mat
        with 1 at top-left corner.
    :returns: NHWC tensor
    """
    shape = shape2d(shape)
    input_shape = tf.shape(x)
    if unpool_mat is None:
        mat = np.zeros(shape, dtype='float32')
        mat[0][0] = 1
        unpool_mat = tf.Variable(mat, trainable=False, name='unpool_mat')
    elif isinstance(unpool_mat, np.ndarray):
        unpool_mat = tf.Variable(unpool_mat, trainable=False, name='unpool_mat')
    assert unpool_mat.get_shape().as_list() == list(shape)

    # perform a tensor-matrix kronecker product
    fx = flatten(tf.transpose(x, [0, 3, 1, 2]))
    fx = tf.expand_dims(fx, -1)       # (bchw)x1
    mat = tf.expand_dims(flatten(unpool_mat), 0)    #1x(shxsw)
    prod = tf.matmul(fx, mat)    #(bchw) x(shxsw)
    prod = tf.reshape(prod, tf.pack(
        [-1, input_shape[3], input_shape[1], input_shape[2], shape[0], shape[1]]))
    prod = tf.transpose(prod, [0, 2, 4, 3, 5, 1])
    prod = tf.reshape(prod, tf.pack(
        [-1, input_shape[1] * shape[0], input_shape[2] * shape[1], input_shape[3]]))
    return prod
开发者ID:Jothecat,项目名称:tensorpack,代码行数:31,代码来源:pool.py


示例2: iou

  def iou(self, boxes1, boxes2):
    """calculate ious
    Args:
      boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4]  ====> (x_center, y_center, w, h)
      boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)
    Return:
      iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
    """
    boxes1 = tf.pack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,
                      boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])
    boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])
    boxes2 =  tf.pack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,
                      boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])

    #calculate the left up point
    lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])
    rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])

    #intersection
    intersection = rd - lu 

    inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]

    mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)
    
    inter_square = mask * inter_square
    
    #calculate the boxs1 square and boxs2 square
    square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])
    square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])
    
    return inter_square/(square1 + square2 - inter_square + 1e-6)
开发者ID:yyf013932,项目名称:tensormsa,代码行数:32,代码来源:yolo_net.py


示例3: _define_distance_to_clusters

  def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = tf.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = tf.diag(self._covs[c, :])
        inverse = tf.matrix_inverse(cov + self._min_var)
        inv_cov = tf.tile(
            tf.expand_dims(inverse, 0),
            tf.pack([self._num_examples, 1, 1]))
        diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = tf.batch_matmul(diff, inv_cov)
        all_scores.append(tf.sqrt(tf.batch_matmul(
            m_left, tf.transpose(diff, perm=[0, 2, 1])
        )))
      self._all_scores.append(tf.reshape(
          tf.concat(1, all_scores),
          tf.pack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = tf.concat(0, self._all_scores)
    assignments = tf.concat(0, self.assignments())
    rows = tf.to_int64(tf.range(0, self._num_examples))
    indices = tf.concat(1, [tf.expand_dims(rows, 1),
                            tf.expand_dims(assignments, 1)])
    self._scores = tf.gather_nd(self._all_scores, indices)
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:33,代码来源:gmm_ops.py


示例4: build_reparam_loss_kl

    def build_reparam_loss_kl(self):
        """Build loss function. Its automatic differentiation
        is a stochastic gradient of

        .. math::

            -ELBO =  - ( E_{q(z; \lambda)} [ \log p(x | z) ]
                        + KL(q(z; \lambda) || p(z)) )

        based on the reparameterization trick. (Kingma and Welling, 2014)

        It assumes the KL is analytic.

        It assumes the prior is :math:`p(z) = \mathcal{N}(z; 0, 1)`

        Computed by sampling from :math:`q(z;\lambda)` and evaluating the
        expectation using Monte Carlo sampling.
        """
        x = self.data
        z = self.variational.sample(self.n_samples)

        mu = tf.pack([layer.loc for layer in self.variational.layers])
        sigma = tf.pack([layer.scale for layer in self.variational.layers])
        self.loss = tf.reduce_mean(self.model.log_lik(x, z)) - \
                    kl_multivariate_normal(mu, sigma)
        return -self.loss
开发者ID:leezqcst,项目名称:edward,代码行数:26,代码来源:inferences.py


示例5: inputs

def inputs(path):
  whole = read_csv(FLAGS.batch_size, path)
  features = tf.transpose(tf.pack(whole[0:FLAGS.max_sentence_len]))
  label = tf.one_hot(
      tf.transpose(tf.pack(whole[FLAGS.max_sentence_len])),
      depth=2)
  return features, label
开发者ID:koth,项目名称:kcws,代码行数:7,代码来源:train_embedding.py


示例6: build_model

    def build_model(self):
        video = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps, self.dim_image])
        video_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])

        HLness = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
        HLness_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])

        video_flat = tf.reshape(video, [-1, self.dim_image])
        image_emb = tf.nn.xw_plus_b( video_flat, self.encode_image_W, self.encode_image_b) # (batch_size*n_lstm_steps, dim_hidden)
        image_emb = tf.reshape(image_emb, [self.batch_size, self.n_lstm_steps, self.dim_hidden])
        image_emb = tf.transpose(image_emb, [1,0,2]) # n x b x h

        state2 = tf.zeros([self.batch_size, self.lstm2.state_size])

	loss_HL = 0.0
	_X = tf.reshape(image_emb, [-1, self.dim_hidden]) # (n x b) x h
	_X = tf.split(0, self.n_lstm_steps, _X) # n x (b x h)
	[output2, state2] = rnn.rnn(self.lstm_HL_net,_X,dtype=tf.float32) # n x (b x h)
	output2 = tf.transpose(tf.pack(output2), [1,0,2]) # b x n x h
	onehot_labels = []
	logit_words = []
	indices = tf.expand_dims(tf.range(0, self.n_lstm_steps, 1), 1) # n x 1
	for ii in xrange(10):
		labels = tf.expand_dims(HLness[ii,:], 1) # n x 1
		concated = tf.concat(1, [indices, labels]) # n x 2
		onehot_labels = tf.sparse_to_dense(concated, tf.pack([self.n_lstm_steps, 2]), 1.0, 0.0) # n x 2
		logit_words = tf.nn.xw_plus_b(output2[ii,:,:], self.embed_HL_W, self.embed_HL_b) # n x 2
		cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_words, onehot_labels) # n x 1
		cross_entropy = tf.mul(cross_entropy, HLness_mask[ii,:]) # n x 1
		loss_HL += tf.reduce_sum(cross_entropy) # 1

	loss_HL = loss_HL / tf.reduce_sum(HLness_mask)
	loss = loss_HL
        return loss, video, video_mask, HLness, HLness_mask
开发者ID:KuoHaoZeng,项目名称:VH,代码行数:34,代码来源:HL.py


示例7: log_prob

    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x = xs['x']
        pi, mus, sigmas = zs
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each sample zs[s, :].
        log_lik = []
        N = get_dims(x)[0]
        n_samples = get_dims(pi)[0]
        for s in range(n_samples):
            # log-likelihood is
            # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
            # Create a K x N matrix, whose entry (k, n) is
            # log pi_k + log N(x_n; mu_k, sigma_k).
            matrix = []
            for k in range(self.K):
                matrix += [tf.ones(N)*tf.log(pi[s, k]) +
                           multivariate_normal.logpdf(x,
                               mus[s, (k*self.D):((k+1)*self.D)],
                               sigmas[s, (k*self.D):((k+1)*self.D)])]

            matrix = tf.pack(matrix)
            # log_sum_exp() along the rows is a vector, whose nth
            # element is the log-likelihood of data point x_n.
            vector = log_sum_exp(matrix, 0)
            # Sum over data points to get the full log-likelihood.
            log_lik_z = tf.reduce_sum(vector)
            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
开发者ID:TalkingData,项目名称:edward,代码行数:33,代码来源:mixture_gaussian.py


示例8: _composition_function

 def _composition_function(self, inputs, length, init_state=None):
     if self._composition == "GRU":
         cell = GRUCell(self._size)
         return dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
     elif self._composition == "LSTM":
         cell = BasicLSTMCell(self._size)
         init_state = tf.concat(1, [tf.zeros_like(init_state, tf.float32), init_state]) if init_state else None
         outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
         return outs
     elif self._composition == "BiGRU":
         cell = GRUCell(self._size // 2, self._size)
         init_state_fw, init_state_bw = tf.split(1, 2, init_state) if init_state else (None, None)
         with tf.variable_scope("forward"):
             fw_outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_fw, dtype=tf.float32)[0]
         with tf.variable_scope("backward"):
             rev_inputs = tf.reverse_sequence(tf.pack(inputs), length, 0, 1)
             rev_inputs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), rev_inputs)]
             bw_outs = dynamic_rnn(cell, rev_inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_bw, dtype=tf.float32)[0]
             bw_outs = tf.reverse_sequence(tf.pack(bw_outs), length, 0, 1)
             bw_outs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), bw_outs)]
         return [tf.concat(1, [fw_out, bw_out]) for fw_out, bw_out in zip(fw_outs, bw_outs)]
     else:
         raise NotImplementedError("Other compositions not implemented yet.")
开发者ID:MorLong,项目名称:qa_network,代码行数:27,代码来源:qa_network.py


示例9: compute_loss

    def compute_loss(self,emb_batch,curr_batch_size=None):
        outloss=[]
        prediction=[]
        for idx_batch in range(self.config.batch_size):

            tree_states=self.compute_states(emb_batch,idx_batch)
            logits = self.create_output(tree_states)

            labels1=tf.gather(self.labels,idx_batch)
            labels2=tf.reduce_sum(tf.to_int32(tf.not_equal(labels1,-1)))
            labels=tf.gather(labels1,tf.range(labels2))
            loss = self.calc_loss(logits,labels)


            pred = tf.nn.softmax(logits)

            pred_root=tf.gather(pred,labels2-1)


            prediction.append(pred_root)
            outloss.append(loss)

        batch_loss=tf.pack(outloss)
        self.pred = tf.pack(prediction)

        return batch_loss
开发者ID:Chelz,项目名称:RecursiveNN,代码行数:26,代码来源:tf_tree_lstm.py


示例10: inference1

def inference1(data):
    data_shape_l = data.get_shape().as_list()
    with tf.variable_scope('conv1') as scope:
        weights = _variable_with_weight_decay('weights', shape=[3, 3, 3, 32],wd=0.0)
        biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0))
        h_conv1 = _conv2d(data, weights, biases, [1,2,2,1])
      
    with tf.variable_scope('conv2') as scope:
        weights = _variable_with_weight_decay('weights', shape=[3, 3, 32, 32],wd=0.0)
        biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0))
        h_conv2 = _conv2d(h_conv1, weights, biases, [1,1,1,1])

    with tf.variable_scope('deconv1') as scope:
        weights = _variable_with_weight_decay('weights', shape=[3, 3, 32, 32],wd=0.0)
        biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0))
        output_shape = tf.pack(h_conv1.get_shape().as_list())
        h_dconv1 = _dconv2d(h_conv2, weights, biases, output_shape, [1,1,1,1])

    with tf.variable_scope('deconv2') as scope:
        weights = _variable_with_weight_decay('weights', shape=[3, 3, 3, 32],wd=0.0)
        biases = _variable_on_cpu('biases', [3], tf.constant_initializer(0.0))
        output_shape = tf.pack(data_shape_l)
        h_dconv2 = _dconv2d(h_dconv1, weights, biases, output_shape, [1,2,2,1])

    # with tf.variable_scope('deconv1') as scope:
    #     weights = _variable_with_weight_decay('weights', shape=[3, 3, 3, 32],
    #                                        stddev=1e-4, wd=0.0)
    #     biases = _variable_on_cpu('biases', [3], tf.constant_initializer(0.0))
    #     output_shape = tf.pack(data_shape_l)
    #     h_dconv1 = _dconv2d(h_conv1, weights, biases, output_shape, [1,2,2,1])
    return h_dconv2
开发者ID:polltooh,项目名称:CNN_LSTM,代码行数:31,代码来源:nt.py


示例11: lstm_cell

 def lstm_cell(i, o, state):
   """
   Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
   Note that in this formulation, we omit the various connections between the
   previous state and the gates.
   """                   
   i_list = tf.pack([i, i, i, i])
   #print i_list.get_shape().as_list()
   o_list = tf.pack([o, o, o, o])
                         
   ins = tf.batch_matmul(i_list, fico_x)
   outs = tf.batch_matmul(o_list, fico_m)
   
   h_x = ins + outs + fico_b
   #print h_x.get_shape().as_list()
   
   #forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
   forget_gate = tf.sigmoid(h_x[0,:,:])
   
   #input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
   input_gate = tf.sigmoid(h_x[1,:,:])
   
   #update = tf.tanh(tf.matmul(i, cx) + tf.matmul(o, cm) + cb)
   update = tf.tanh(h_x[2,:,:])
   
   state = forget_gate*state + input_gate*update
   
   #output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
   output_gate = tf.sigmoid(h_x[3,:,:])
   
   h = output_gate * tf.tanh(state)
   #print 'h', h.get_shape().as_list()
   return h, state
开发者ID:kcbighuge,项目名称:tensorflow-deeplearning,代码行数:33,代码来源:6_lstm.py


示例12: _build_annealed_losses

 def _build_annealed_losses(self, outputs, labels, anneal_factors):
     sequence_length = len(outputs)
     packed_outputs = tf.pack(outputs)
     tiled_labels = tf.pack([labels for i in range(sequence_length)])
     accumulated_losses = -tf.reduce_sum(tiled_labels * tf.log(packed_outputs), [1, 2])
     annealed_losses = tf.mul(anneal_factors, tf.concat(0, accumulated_losses))
     return annealed_losses
开发者ID:dennybritz,项目名称:sentiment-analysis,代码行数:7,代码来源:char_rnn.py


示例13: build_predict

    def build_predict(self, Xnew, full_cov=False):
        """
        Compute the mean and variance of the latent function at some new points
        Xnew. Note that this is very similar to the SGPR prediction, for whcih
        there are notes in the SGPR notebook.
        """
        num_inducing = tf.shape(self.Z)[0]
        psi0, psi1, psi2 = ke.build_psi_stats(self.Z, self.kern, self.X_mean, self.X_var)
        Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
        Kus = self.kern.K(self.Z, Xnew)
        sigma2 = self.likelihood.variance
        sigma = tf.sqrt(sigma2)
        L = tf.cholesky(Kuu)

        A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
        tmp = tf.matrix_triangular_solve(L, psi2, lower=True)
        AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
        B = AAT + eye(num_inducing)
        LB = tf.cholesky(B)
        c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma
        tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
        tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
        mean = tf.matmul(tf.transpose(tmp2), c)
        if full_cov:
            var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
                - tf.matmul(tf.transpose(tmp1), tmp1)
            shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
            var = tf.tile(tf.expand_dims(var, 2), shape)
        else:
            var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
                - tf.reduce_sum(tf.square(tmp1), 0)
            shape = tf.pack([1, tf.shape(self.Y)[1]])
            var = tf.tile(tf.expand_dims(var, 1), shape)
        return mean + self.mean_function(Xnew), var
开发者ID:blutooth,项目名称:dgp,代码行数:34,代码来源:gplvm.py


示例14: _rnn_template

def _rnn_template(incoming, cell, dropout=None, return_seq=False,
                  return_state=False, initial_state=None, dynamic=False,
                  scope=None, name="LSTM"):
    """ RNN Layer Template. """
    sequence_length = None
    if dynamic:
        sequence_length = retrieve_seq_length_op(
            incoming if isinstance(incoming, tf.Tensor) else tf.pack(incoming))

    input_shape = utils.get_incoming_shape(incoming)

    with tf.variable_op_scope([incoming], scope, name) as scope:
        name = scope.name

        _cell = cell
        # Apply dropout
        if dropout:
            if type(dropout) in [tuple, list]:
                in_keep_prob = dropout[0]
                out_keep_prob = dropout[1]
            elif isinstance(dropout, float):
                in_keep_prob, out_keep_prob = dropout, dropout
            else:
                raise Exception("Invalid dropout type (must be a 2-D tuple of "
                                "float)")
            cell = DropoutWrapper(cell, in_keep_prob, out_keep_prob)

        inference = incoming
        # If a tensor given, convert it to a per timestep list
        if type(inference) not in [list, np.array]:
            ndim = len(input_shape)
            assert ndim >= 3, "Input dim should be at least 3."
            axes = [1, 0] + list(range(2, ndim))
            inference = tf.transpose(inference, (axes))
            inference = tf.unpack(inference)

        outputs, state = _rnn(cell, inference, dtype=tf.float32,
                              initial_state=initial_state, scope=name,
                              sequence_length=sequence_length)

        # Retrieve RNN Variables
        c = tf.GraphKeys.LAYER_VARIABLES + '/' + scope.name
        for v in [_cell.W, _cell.b]:
            if hasattr(v, "__len__"):
                for var in v: tf.add_to_collection(c, var)
            else:
                tf.add_to_collection(c, v)
        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])

    if dynamic:
        outputs = tf.transpose(tf.pack(outputs), [1, 0, 2])
        o = advanced_indexing_op(outputs, sequence_length)
    else:
        o = outputs if return_seq else outputs[-1]

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, o)

    return (o, state) if return_state else o
开发者ID:CharlesShang,项目名称:tflearn,代码行数:60,代码来源:recurrent.py


示例15: total_variation_loss

def total_variation_loss(layer):
    shape = tf.shape(layer)
    height = shape[1]
    width = shape[2]
    y = tf.slice(layer, [0,0,0,0], tf.pack([-1,height-1,-1,-1])) - tf.slice(layer, [0,1,0,0], [-1,-1,-1,-1])
    x = tf.slice(layer, [0,0,0,0], tf.pack([-1,-1,width-1,-1])) - tf.slice(layer, [0,0,1,0], [-1,-1,-1,-1])
    return tf.nn.l2_loss(x) / tf.to_float(tf.size(x)) + tf.nn.l2_loss(y) / tf.to_float(tf.size(y))
开发者ID:DenisSergeevitch,项目名称:fast-neural-style,代码行数:7,代码来源:fast_neural_style.py


示例16: build_predict

    def build_predict(self, Xnew, full_cov=False):
        """
        Compute the mean and variance of the latent function at some new points
        Xnew.
        """
        _, _, Luu, L, _, _, gamma = self.build_common_terms()
        Kus = self.kern.K(self.Z, Xnew)  # size  M x Xnew

        w = tf.matrix_triangular_solve(Luu, Kus, lower=True)  # size M x Xnew

        tmp = tf.matrix_triangular_solve(tf.transpose(L), gamma, lower=False)
        mean = tf.matmul(tf.transpose(w), tmp) + self.mean_function(Xnew)
        intermediateA = tf.matrix_triangular_solve(L, w, lower=True)

        if full_cov:
            var = (
                self.kern.K(Xnew)
                - tf.matmul(tf.transpose(w), w)
                + tf.matmul(tf.transpose(intermediateA), intermediateA)
            )
            var = tf.tile(tf.expand_dims(var, 2), tf.pack([1, 1, tf.shape(self.Y)[1]]))
        else:
            var = (
                self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(w), 0) + tf.reduce_sum(tf.square(intermediateA), 0)
            )  # size Xnew,
            var = tf.tile(tf.expand_dims(var, 1), tf.pack([1, tf.shape(self.Y)[1]]))

        return mean, var
开发者ID:GPflow,项目名称:GPflow,代码行数:28,代码来源:sgpr.py


示例17: build_generator

    def build_generator(self):
	tf.get_variable_scope().reuse_variables()
        video = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps, self.dim_image])
        video_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])

        video_flat = tf.reshape(video, [-1, self.dim_image])
        image_emb = tf.nn.xw_plus_b( video_flat, self.encode_image_W, self.encode_image_b)
        image_emb = tf.reshape(image_emb, [self.batch_size, self.n_lstm_steps, self.dim_hidden])
        image_emb = tf.transpose(image_emb, [1,0,2])

        state2 = tf.zeros([self.batch_size, self.lstm2.state_size])

	generated_HL = []

	_X = tf.reshape(image_emb, [-1, self.dim_hidden]) # (n x b) x h
	_X = tf.split(0, self.n_lstm_steps, _X) # n x (b x h)
	[output2, state2] = rnn.rnn(self.lstm_HL_net,_X,dtype=tf.float32) # n x (b x h)
	output2 = tf.transpose(tf.pack(output2), [1,0,2]) # b x n x h
        for ii in range(self.batch_size):
		logit_words = tf.nn.xw_plus_b( output2[ii,:,:], self.embed_HL_W, self.embed_HL_b) # n x 2
		logit_words = tf.nn.softmax(logit_words) # n x 2
		generated_HL.append(logit_words[:,1]) # n x 1

	generated_HL = tf.pack(generated_HL) # b x n
	generated_HL = tf.mul(generated_HL,video_mask) # b x n
	with tf.variable_scope("RNN") as vs:
               lstmRNN_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)]
        return video, video_mask, generated_HL, lstmRNN_variables
开发者ID:KuoHaoZeng,项目名称:VH,代码行数:28,代码来源:HL.py


示例18: _sample_forward

    def _sample_forward(self, back_filtered, eps):
        samples = []

        epses = tf.unpack(eps)

        sampling_dist = back_filtered[0]
        z_i = sampling_dist.sample(epses[0])
        samples.append(z_i)

        sampling_dists = [sampling_dist]        
        entropies = [sampling_dist.entropy()]
        for t in np.arange(1, self.T):
            pred_mean = tf.matmul(self._transition_mat(t-1), z_i)
            noise = self._gaussian_noise(t-1)

            #new_prec_mean = noise.prec_mean() + tf.matmul(noise.prec(), pred_mean)
            #incoming = MVGaussianNatural(new_prec_mean, noise.prec())
            incoming = MVGaussianMeanCov(noise.mean() + pred_mean, noise.cov())
            
            sampling_dist = back_filtered[t].multiply_density(incoming)
            sampling_dists.append(sampling_dist)
            
            z_i = sampling_dist.sample(epses[t])
            entropies.append(sampling_dist.entropy())            
            samples.append(z_i)

        self.sampling_dists = sampling_dists
        self.entropies = entropies

        entropy = tf.reduce_sum(tf.pack(entropies))
        sample = tf.reshape(tf.squeeze(tf.pack(samples)), self.output_shape)
        return sample, entropy
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:32,代码来源:q_distributions.py


示例19: read_record

def read_record(filename_queue):
    class FCNRecord(object):
        pass
    result = FCNRecord()
    result.mask_height = int(420/DOWNSAMPLE_FACTOR)
    result.mask_width = int(580/DOWNSAMPLE_FACTOR)
    result.mask_depth = 1
    result.img_depth = 1
    img_len = result.mask_height*result.mask_width*result.img_depth
    mask_len = result.mask_height*result.mask_width*result.mask_depth
    record_len = img_len + mask_len

    reader = tf.FixedLengthRecordReader(record_bytes=record_len)
    result.key, value = reader.read(filename_queue)
    record_bytes = tf.decode_raw(value, tf.uint8)
    #print(record_bytes.get_shape())
    int_image = tf.reshape(tf.slice(record_bytes, [0], [img_len]),[result.mask_height, result.mask_width])
    rgb_image = tf.pack([int_image,int_image,int_image])
    rgb_img = tf.transpose(rgb_image,(1,2,0))
    result.image = tf.cast(rgb_img,tf.float32)
    bool_mask = tf.cast( tf.reshape(tf.slice(record_bytes, [img_len], [mask_len]),[result.mask_height, result.mask_width]), tf.bool)
    hot_mask= tf.pack( [bool_mask, tf.logical_not(bool_mask)])
    h_mask = tf.transpose(hot_mask,(1,2,0))
    result.mask = tf.cast(h_mask, tf.float32)
    return result
开发者ID:vassiliou,项目名称:unstoo,代码行数:25,代码来源:aws_fcn_input.py


示例20: get_inference

def get_inference(images_ph, dropout_keep_prob_ph):
    #subtract average image
    with tf.variable_scope('centering') as scope:
        mean = tf.constant(vgg.average_image, dtype=tf.float32, name='avg_image')
        images_ph = tf.sub(images_ph, mean, name='subtract_avg')

    #get layers from vgg19
    vgg_layers = vgg.get_VGG_layers(images_ph, dropout_keep_prob_ph, train_fc_layers=True)

    #################################################
    ### Add more layers for semantic segmentation ###
    #################################################

    # convolution on top of pool4 to 21 chammenls (to make coarse predictions)
    with tf.variable_scope('conv9') as scope:
        conv9 = conv_layer(vgg_layers['pool4'], 21, 1, 'conv9')

    # convolution on top of conv7 (fc7) to 21 chammenls (to make coarse predictions)
    with tf.variable_scope('conv8') as scope:
        conv8 = conv_layer(vgg_layers['dropout2'], 21, 1, 'conv8')

    # 2x upsampling from last layer
    with tf.variable_scope('deconv1') as scope:
        shape = tf.shape(conv8)
        out_shape = tf.pack([shape[0], shape[1]*2, shape[2]*2, 21])
        weights = tf.Variable(tf.truncated_normal(mean=MEAN, stddev=0.1, shape=(4, 4, 21, 21)), name='weights')
        deconv1 = tf.nn.conv2d_transpose( value=conv8,
                                          filter=weights,
                                          output_shape=out_shape,
                                          strides=(1, 2, 2, 1),
                                          padding='SAME',
                                          name='deconv1')

        # slice 2x upsampled tensor in the last layer to fit pool4
        shape = tf.shape(conv9)
        size = tf.pack([-1, shape[1], shape[2], -1])
        deconv1 = tf.slice(deconv1, begin=[0,0,0,0], size=size, name="deconv1_slice")

    # combine preductions from last layer and pool4
    with tf.variable_scope('combined_pred') as scope:
        combined_pred = tf.add(deconv1, conv9, name="combined_pred")

    # 16x upsampling
    with tf.variable_scope('deconv2') as scope:
        shape = tf.shape(combined_pred)
        out_shape = tf.pack([shape[0], shape[1]*16, shape[2]*16, 21])
        weights = tf.Variable(tf.truncated_normal(mean=MEAN, stddev=0.1, shape=(32, 32, 21, 21)), name='weights')
        deconv2 = tf.nn.conv2d_transpose(value=combined_pred,
                                          filter=weights,
                                          output_shape=out_shape,
                                          strides=(1, 16, 16, 1),
                                          padding='SAME',
                                          name='deconv2')

        # slice upsampled tensor to original shape
        orig_shape = tf.shape(images_ph)
        size = tf.pack([-1, orig_shape[1], orig_shape[2], -1])
        logits = tf.slice(deconv2, begin=[0,0,0,0], size=size, name='logits')

    return logits
开发者ID:dmancevo,项目名称:semantic_segmentation,代码行数:60,代码来源:sem_segm.py



注:本文中的tensorflow.pack函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.pad函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.op_scope函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap