• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.tril函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.tril函数的典型用法代码示例。如果您正苦于以下问题:Python tril函数的具体用法?Python tril怎么用?Python tril使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tril函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, rng, input1, input2, n_in1, n_in2, n_hidden_layers, d_hidden, W1=None, W2=None):
        self.input1 = input1
        self.input2 = input2
        
        CouplingFunc = WarpNetwork(rng, input1, n_hidden_layers, d_hidden, n_in1, n_in2)  
        
        if W1 is None:
            bin = numpy.sqrt(6. / (n_in1 + n_in1))
            W1_values = numpy.identity(n_in1, dtype=theano.config.floatX)            
            W1 = theano.shared(value=W1_values, name='W1')

        if W2 is None:
            bin = numpy.sqrt(6. / (n_in2 + n_in2))
            W2_values = numpy.identity(n_in2, dtype=theano.config.floatX)
            W2 = theano.shared(value=W2_values, name='W2')

        V1u = T.triu(W1)
        V1l = T.tril(W1)
        V1l = T.extra_ops.fill_diagonal(V1l, 1.)
        V1 = T.dot(V1u, V1l) 
            
        V2u = T.triu(W2)
        V2l = T.tril(W2)
        V2l = T.extra_ops.fill_diagonal(V2l, 1.)
        V2 = T.dot(V2u, V2l) 
            
        self.output1 = T.dot(input1, V1)
        self.output2 = T.dot(input2, V2) + CouplingFunc.output

        self.log_jacobian = T.log(T.abs_(T.nlinalg.ExtractDiag()(V1u))).sum() \
            + T.log(T.abs_(T.nlinalg.ExtractDiag()(V2u))).sum() 

        self.params = CouplingFunc.params
开发者ID:amarshah,项目名称:theano_fun,代码行数:33,代码来源:NICE_warped.py


示例2: lower_lower

    def lower_lower(self):
        '''Evaluates the intractable term in the lower bound which itself
         must be lower bounded'''

        a = self.get_aux_mult()

        reversed_cum_probs = T.extra_ops.cumsum(a[:,::-1],1)
        dot_prod_m   = T.dot(reversed_cum_probs, self.digams_1p2)
        dot_prod_mp1 = T.dot(T.concatenate((reversed_cum_probs[:,1:],T.zeros((self.K,1))),1), self.digams[:,0])
        # final entropy term
        triu_ones = T.triu(T.ones_like(a)) - T.eye(self.K)
        aloga = T.sum(T.tril(a)*T.log(T.tril(a)+triu_ones),1)
        return T.dot(a, self.digams[:,1]) + dot_prod_m + dot_prod_mp1 - aloga
开发者ID:roryjbeard,项目名称:GP-LVM-Conditional-MF,代码行数:13,代码来源:IBP_factor_model.py


示例3: grad

    def grad(self, inputs, output_gradients):
        """
        Reverse-mode gradient updates for matrix solve operation c = A \ b.

        Symbolic expression for updates taken from [1]_.

        References
        ----------
        ..[1] M. B. Giles, "An extended collection of matrix derivative results
          for forward and reverse mode automatic differentiation",
          http://eprints.maths.ox.ac.uk/1079/

        """
        A, b = inputs
        c = self(A, b)
        c_bar = output_gradients[0]
        trans_map = {
            'lower_triangular': 'upper_triangular',
            'upper_triangular': 'lower_triangular'
        }
        trans_solve_op = Solve(
            # update A_structure and lower to account for a transpose operation
            A_structure=trans_map.get(self.A_structure, self.A_structure),
            lower=not self.lower
        )
        b_bar = trans_solve_op(A.T, c_bar)
        # force outer product if vector second input
        A_bar = -tensor.outer(b_bar, c) if c.ndim == 1 else -b_bar.dot(c.T)
        if self.A_structure == 'lower_triangular':
            A_bar = tensor.tril(A_bar)
        elif self.A_structure == 'upper_triangular':
            A_bar = tensor.triu(A_bar)
        return [A_bar, b_bar]
开发者ID:HapeMask,项目名称:Theano,代码行数:33,代码来源:slinalg.py


示例4: logp

    def logp(self, S):
    	l = 0.0

    	#add prior
    	pi = self.pi
        #Get time 0 states
        zeroIndices = np.roll(self.T.cumsum(),1)
        zeroIndices[0] = 0
        zeroIndices = zeroIndices.astype('int32')
        l += TT.sum(TT.log(pi[S[zeroIndices]]))
        #l += TT.sum(TT.log(pi[S[:,0]]))

    	#add likelihood
        Q = self.Q
        step_sizes = self.step_sizes

        #import pdb; pdb.set_trace()
        C = self.computeC(S)

        n_step_sizes = len(self.step_sizes)
        for i in range(0, n_step_sizes):
            tau = step_sizes[i]
            P = TT.slinalg.expm(tau*Q)
            
            stabilizer = TT.tril(TT.alloc(0.0, *P.shape)+0.1, k=-1)
            logP = TT.log(P + stabilizer)

            #compute likelihood in terms of P(tau)
            l += TT.sum(C[i,:,:]*logP)
          
        return l
开发者ID:clinicalml,项目名称:ContinuousTimeMarkovModel,代码行数:31,代码来源:distributions.py


示例5: L_op

    def L_op(self, inputs, outputs, gradients):
        # Modified from theano/tensor/slinalg.py
        # No handling for on_error = 'nan'
        dz = gradients[0]
        chol_x = outputs[0]

        # this is for nan mode
        #
        # ok = ~tensor.any(tensor.isnan(chol_x))
        # chol_x = tensor.switch(ok, chol_x, 1)
        # dz = tensor.switch(ok, dz, 1)

        # deal with upper triangular by converting to lower triangular
        if not self.lower:
            chol_x = chol_x.T
            dz = dz.T

        def tril_and_halve_diagonal(mtx):
            """Extracts lower triangle of square matrix and halves diagonal."""
            return tensor.tril(mtx) - tensor.diag(tensor.diagonal(mtx) / 2.)

        def conjugate_solve_triangular(outer, inner):
            """Computes L^{-T} P L^{-1} for lower-triangular L."""
            return gpu_solve_upper_triangular(
                outer.T, gpu_solve_upper_triangular(outer.T, inner.T).T)

        s = conjugate_solve_triangular(
            chol_x, tril_and_halve_diagonal(chol_x.T.dot(dz)))

        if self.lower:
            grad = tensor.tril(s + s.T) - tensor.diag(tensor.diagonal(s))
        else:
            grad = tensor.triu(s + s.T) - tensor.diag(tensor.diagonal(s))

        return [grad]
开发者ID:Theano,项目名称:Theano,代码行数:35,代码来源:linalg.py


示例6: log_prob

    def log_prob(self, X, Y):
        """ Evaluate the log-probability for the given samples.

        Parameters
        ----------
        Y:      T.tensor
            samples from the upper layer
        X:      T.tensor
            samples from the lower layer

        Returns
        -------
        log_p:  T.tensor
            log-probabilities for the samples in X and Y
        """
        n_X, n_Y = self.get_hyper_params(['n_X', 'n_Y'])
        b, W, U  = self.get_model_params(['b', 'W', 'U'])
        
        W = T.tril(W, k=-1)

        prob_X = self.sigmoid(T.dot(X, W) + T.dot(Y, U) + T.shape_padleft(b))
        log_prob = X*T.log(prob_X) + (1-X)*T.log(1-prob_X)
        log_prob = T.sum(log_prob, axis=1)

        return log_prob
开发者ID:Riashat,项目名称:reweighted-ws,代码行数:25,代码来源:darn.py


示例7: rank_loss

def rank_loss(scores):
    # Images
    diag   = T.diag(scores)
    diff_img = scores - diag.dimshuffle(0, 'x') + 1
    max_img = T.maximum(0, diff_img)
    triu_img = T.triu(max_img, 1)
    til_img  = T.tril(max_img, -1)
    res_img = T.sum(triu_img) + T.sum(til_img)

    # Sentences
    diff_sent = scores.T - diag.dimshuffle(0, 'x') + 1
    max_sent = T.maximum(0, diff_sent)
    triu_sent = T.triu(max_sent, 1)
    til_sent  = T.tril(max_sent, -1)
    res_sent = T.sum(triu_sent) + T.sum(til_sent)
    
    return T.log(T.sum(scores) + 0.01)
开发者ID:flipvrijn,项目名称:Dedicon-Thesis,代码行数:17,代码来源:customloss.py


示例8: get_aux_mult

    def get_aux_mult(self):
        a_first_row_unnorm = (self.digams_1_cumsum - self.digams_1p2_cumsum + self.digams[:,1]).reshape((1,self.K))

        a_first_row_unnorm_rep = t_repeat(a_first_row_unnorm, self.K, axis=0).reshape((self.K,self.K))

        a = T.exp(a_first_row_unnorm_rep) * T.tril(T.ones((self.K, self.K)))

        return a / T.sum(a, 1).reshape((self.K,1))
开发者ID:roryjbeard,项目名称:GP-LVM-Conditional-MF,代码行数:8,代码来源:IBP_factor_model.py


示例9: check_l

    def check_l(m, k=0):
        m_symb = T.matrix(dtype=m.dtype)
        k_symb = T.iscalar()

        f = theano.function([m_symb, k_symb],
                            T.tril(m_symb, k_symb),
                            mode=mode_with_gpu)
        result = f(m, k)
        assert np.allclose(result, np.tril(m, k))
        assert result.dtype == np.dtype(dtype)
        assert any([isinstance(node.op, GpuTri)
                    for node in f.maker.fgraph.toposort()])
开发者ID:Theano,项目名称:Theano,代码行数:12,代码来源:test_basic_ops.py


示例10: grad

    def grad(self, inputs, gradients):
        """
        Cholesky decomposition reverse-mode gradient update.

        Symbolic expression for reverse-mode Cholesky gradient taken from [0]_

        References
        ----------
        .. [0] I. Murray, "Differentiation of the Cholesky decomposition",
           http://arxiv.org/abs/1602.07527

        """

        x = inputs[0]
        dz = gradients[0]
        chol_x = self(x)
        ok = tt.all(tt.nlinalg.diag(chol_x) > 0)
        chol_x = tt.switch(ok, chol_x, tt.fill_diagonal(chol_x, 1))
        dz = tt.switch(ok, dz, floatX(1))

        # deal with upper triangular by converting to lower triangular
        if not self.lower:
            chol_x = chol_x.T
            dz = dz.T

        def tril_and_halve_diagonal(mtx):
            """Extracts lower triangle of square matrix and halves diagonal."""
            return tt.tril(mtx) - tt.diag(tt.diagonal(mtx) / 2.)

        def conjugate_solve_triangular(outer, inner):
            """Computes L^{-T} P L^{-1} for lower-triangular L."""
            solve = tt.slinalg.Solve(A_structure="upper_triangular")
            return solve(outer.T, solve(outer.T, inner.T).T)

        s = conjugate_solve_triangular(
            chol_x, tril_and_halve_diagonal(chol_x.T.dot(dz)))

        if self.lower:
            grad = tt.tril(s + s.T) - tt.diag(tt.diagonal(s))
        else:
            grad = tt.triu(s + s.T) - tt.diag(tt.diagonal(s))
        return [tt.switch(ok, grad, floatX(np.nan))]
开发者ID:qinghsui,项目名称:pymc,代码行数:42,代码来源:dist_math.py


示例11: __init__

 def __init__(self, weights_init, biases_init, lower=False,
              weights_prec=0., biases_prec=0., weights_mean=None,
              biases_mean=None):
     assert weights_init.ndim == 2, 'weights_init must be 2D array.'
     assert biases_init.ndim == 1, 'biases_init must be 1D array.'
     assert weights_init.shape[0] == biases_init.shape[0], \
         'Dimensions of weights_init and biases_init must be consistent.'
     self.lower = lower
     self.weights = th.shared(weights_init, name='W')
     self.weights_tri = (tt.tril(self.weights)
                         if lower else tt.triu(self.weights))
     self.biases = th.shared(biases_init, name='b')
     self.weights_prec = weights_prec
     self.biases_prec = biases_prec
     if weights_mean is None:
         weights_mean = np.eye(weights_init.shape[0])
     if biases_mean is None:
         biases_mean = np.zeros_like(biases_init)
     self.weights_mean = (np.tril(weights_mean)
                          if lower else np.triu(weights_mean))
     self.biases_mean = biases_mean
     super(TriangularAffineLayer, self).__init__(
         [self.weights, self.biases])
开发者ID:matt-graham,项目名称:differentiable-generator-networks,代码行数:23,代码来源:invertible_layers.py


示例12: test_autoregressor

def test_autoregressor(dim=3, n_samples=5):
    ar = AutoRegressor(dim)
    ar.params['b'] += 0.1
    tparams = ar.set_tparams()

    X = T.matrix('X', dtype=floatX)
    nlp = ar.neg_log_prob(X)
    p = ar.get_prob(X, *ar.get_params())
    W = T.tril(ar.W, k=-1)
    z = T.dot(X, W) + ar.b

    x = np.random.randint(0, 2, size=(n_samples, dim)).astype(floatX)

    f = theano.function([X], [nlp, p, z, W])
    nlp_t, p_t, z_t, W_t = f(x)
    print x.shape, nlp_t.shape
    z_np = np.zeros((n_samples, dim)).astype(floatX) + ar.params['b'][None, :]

    for i in xrange(dim):
        print i
        for j in xrange(i + 1, dim):
            print i, j
            z_np[:, i] += ar.params['W'][j, i] * x[:, j]

    assert np.allclose(z_t, z_np), (z_t, z_np)
    p_np = sigmoid(z_np)
    assert np.allclose(p_t, p_np, atol=1e-4), (p_t - p_np)

    p_np = np.clip(p_np, 1e-7, 1 - 1e-7)
    nlp_np = (- x * np.log(p_np) - (1 - x) * np.log(1 - p_np)).sum(axis=1)

    assert np.allclose(nlp_t, nlp_np, atol=1e-3), (nlp_t - nlp_np)

    samples, updates = ar.sample(n_samples=n_samples)

    f = theano.function([], samples, updates=updates)
    print f()
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:37,代码来源:test_darn.py


示例13: step_neg_log_prob

 def step_neg_log_prob(self, x, c, War, bar):
     W = T.tril(War, k=-1)
     p = T.nnet.sigmoid(T.dot(x, W) + bar + c)
     return self.f_neg_log_prob(x, p)
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:4,代码来源:darn.py


示例14: neg_log_prob

 def neg_log_prob(self, x, c):
     W = T.tril(self.War, k=-1)
     p = T.nnet.sigmoid(T.dot(x, W) + self.bar + c)
     return self.f_neg_log_prob(x, p)
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:4,代码来源:darn.py


示例15: __init__

    def __init__(self, params,correct,Xinfo, samples = 500,batch_size=None):
        ker = kernel()
        mmd = MMD()
        self.samples = samples
        self.params =  params
        self.batch_size=batch_size
        self.Xlabel_value=Xinfo["Xlabel_value"]
        self.Weight_value=Xinfo["Weight_value"]
        
        #データの保存ファイル
        model_file_name = 'model_MMD_kernel' + '.save'
                                    #もしこれまでに作ったのがあるならロードする
        try:
            print ('Trying to load model...')
            with open(model_file_name, 'rb') as file_handle:
                obj = pickle.load(file_handle)
                self.f, self.g= obj
                print ('Loaded!')
            return
        except:
            print ('Failed. Creating a new model...')
        
        X,Y,X_test,m,S_b,mu,Sigma_b,Z,eps_NQ,eps_M =\
        T.dmatrices('X','Y','X_test','m','S_b','mu','Sigma_b','Z','eps_NQ','eps_M')
        
        Xlabel=T.dmatrix('Xlabel')
        Zlabel=T.dmatrix('Zlabel')
        
        Zlabel_T=T.exp(Zlabel)/T.sum(T.exp(Zlabel),1)[:,None]#ラベルは確率なので正の値でかつ、企画化されている
        
        Weight=T.dmatrix('Weight')
        
        lhyp = T.dvector('lhyp')
        ls=T.dvector('ls')
        ga=T.dvector('ga')
        
        (M, D), N, Q = Z.shape, X.shape[0], X.shape[1]

        
        #変数の正の値への制約条件
        beta = T.exp(ls)
        gamma=T.exp(ga[0])
        #beta=T.exp(lhyp[0])
        sf2, l = T.exp(lhyp[0]), T.exp(lhyp[1:1+Q])
        
        S=T.exp(S_b)
        #Sigma=T.exp(self.Sigma_b)
        
        #xについてはルートを取らなくても対角行列なので問題なし
        #uについては対角でないのでコレスキー分解するとかして三角行列を作る必要がある
        Sigma = T.tril(Sigma_b - T.diag(T.diag(Sigma_b)) + T.diag(T.exp(T.diag(Sigma_b))))
        
        #スケール変換
        mu_scaled, Sigma_scaled = sf2**0.5 * mu, sf2**0.5 * Sigma
        
        Xtilda = m + S * eps_NQ
        U = mu_scaled+Sigma_scaled.dot(eps_M)

        print ('Setting up cache...')
        
        Kmm = ker.RBF(sf2, l, Z)
        Kmm=mmd.MMD_kenel_Xonly(gamma,Zlabel_T,Kmm,Weight)
        KmmInv = sT.matrix_inverse(Kmm) 
        #KmmDet=theano.sandbox.linalg.det(Kmm)
        
        #KmmInv_cache = sT.matrix_inverse(Kmm)
        #self.fKmm = theano.function([Z, lhyp], Kmm, name='Kmm')
        #self.f_KmmInv = theano.function([Z, lhyp], KmmInv_cache, name='KmmInv_cache')
        #復習:これは員数をZ,lhypとした関数kmmInv_cacheをコンパイルしている。つまり逆行列はzとハイパーパラメタの関数になった
        #self.update_KmmInv_cache()#実際に数値を入れてkinnvを計算させている
        #逆行列の微分関数を作っている
        
        #self.dKmm_d = {'Z': theano.function([Z, lhyp], T.jacobian(Kmm.flatten(), Z), name='dKmm_dZ'),
        #               'lhyp': theano.function([Z, lhyp], T.jacobian(Kmm.flatten(), lhyp), name='dKmm_dlhyp')}

        
        print ('Modeling...')
        
        Kmn = ker.RBF(sf2,l,Z,Xtilda)
        Kmn=mmd.MMD_kenel_ZX(gamma,Zlabel_T,Xlabel,Kmn,Weight)
        
        Knn = ker.RBF(sf2,l,Xtilda,Xtilda)
        Knn=mmd.MMD_kenel_Xonly(gamma,Xlabel,Knn,Weight)
        
        Ktilda=Knn-T.dot(Kmn.T,T.dot(KmmInv,Kmn))
        
        Kinterval=T.dot(KmmInv,Kmn)
              
        mean_U=T.dot(Kinterval.T,U)
        betaI=T.diag(T.dot(Xlabel,beta))
        Covariance = betaI       
        
        LL = (self.log_mvn(X, mean_U, Covariance) - 0.5*T.sum(T.dot(betaI,Ktilda)))*correct              
        KL_X = -self.KLD_X(m,S)*correct
        KL_U = -self.KLD_U(mu_scaled , Sigma_scaled , Kmm,KmmInv)
        
        print ('Compiling model ...')        


        inputs = {'X': X, 'Z': Z, 'm': m, 'S_b': S_b, 'mu': mu, 'Sigma_b': Sigma_b, 'lhyp': lhyp, 'ls': ls, 
#.........这里部分代码省略.........
开发者ID:futoshi-futami,项目名称:GP-and-GPLVM,代码行数:101,代码来源:DGPLVM_theano_model.py


示例16: constructL

 def constructL(ltri):
     tmp = T.transpose(T.tril(T.ones((d,d)),-1))
     lower_tril_indices = tmp.nonzero()
     L = T.transpose(T.set_subtensor(tmp[lower_tril_indices], ltri))
     return L
开发者ID:awf,项目名称:autodiff,代码行数:5,代码来源:Theano_vector.py


示例17: get_prob

 def get_prob(self, x, W, b):
     W = T.tril(W, k=-1)
     p = T.nnet.sigmoid(T.dot(x, W) + b) * 0.9999 + 0.000005
     return p
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:4,代码来源:darn.py


示例18: __init__

    def __init__(self, rng, input, n_in, n_batch, d_bucket, activation, activation_deriv,
                 w=None, index_permute=None, index_permute_reverse=None):
        srng = RandomStreams(seed=234)
        
        n_bucket = n_in / d_bucket + 1
        self.input = input

        # randomly permute input space
        if index_permute is None:
            index_permute = srng.permutation(n=n_in)#numpy.random.permutation(n_in)
            index_permute_reverse = T.argsort(index_permute)
            self.index_permute = index_permute
            self.index_permute_reverse = index_permute_reverse

        permuted_input = input[:, index_permute]
        self.permuted_input = permuted_input

        # initialize reflection parameters
        if w is None:
            bound = numpy.sqrt(3. / d_bucket)
            w_values = numpy.asarray(rng.uniform(low=-bound,
                                                 high=bound,
                                                 size=(n_bucket, d_bucket, d_bucket)),
                                     dtype=theano.config.floatX)
            w = theano.shared(value=w_values, name='w')
            
        self.w = w
        
        
        # compute outputs and Jacobians
        
        log_jacobian = T.alloc(0, n_batch)
        for b in xrange(n_bucket):
            bucket_size = d_bucket
            if b == n_bucket - 1:
                bucket_size = n_in - b * d_bucket
            
            x_b = self.permuted_input[:, b*d_bucket:b*d_bucket + bucket_size]

            
            w_b = self.w[b, :bucket_size, :bucket_size]

#            W = T.slinalg.Expm()(w_b)
#            log_jacobian = log_jacobian + T.alloc(T.nlinalg.trace(w_b), n_batch)

            Upper = T.triu(w_b)
#            Upper = T.extra_ops.fill_diagonal(Upper, 1.)
            Lower = T.tril(w_b)
            Lower = T.extra_ops.fill_diagonal(Lower, 1.)
            log_det_Upper = T.log(T.abs_(T.nlinalg.ExtractDiag()(Upper))).sum() 
#            log_det_Lower = T.log(T.abs_(T.nlinalg.ExtractDiag()(Lower))).sum() 


            W = T.dot(Upper, Lower)
            log_jacobian = log_jacobian + T.alloc(log_det_Upper, n_batch)

            
#            W = T.dot(T.transpose(w_b), w_b) + 0.001*T.eye(bucket_size)
#            log_jacobian = log_jacobian + T.alloc(T.log(T.abs_(T.nlinalg.Det()(W))), n_batch)

#            diag = T.nlinalg.diag(W)
#            div = T.tile(T.reshape(T.sqrt(diag), [1, bucket_size]), (bucket_size, 1))
            
#            W = W / div / T.transpose(div)
            #import pdb; pdb.set_trace()

            lin_output_b = T.dot(x_b, W)
            if b>0:
                lin_output = T.concatenate([lin_output, lin_output_b], axis=1)
            else:
                lin_output = lin_output_b
            if activation is not None:
                derivs = activation_deriv(lin_output_b)     
                #import pdb; pdb.set_trace()
                log_jacobian = log_jacobian + T.log(T.abs_(derivs)).sum(axis=1)

                
#                for n in xrange(n_batch):                    
#                    mat = T.tile(T.reshape(derivs[n], [1, bucket_size]), (bucket_size, 1))
#                    mat = mat * W                   
#                    T.inc_subtensor(log_jacobian[n], T.log(T.abs_(T.nlinalg.Det()(mat))))
                    
        self.log_jacobian = log_jacobian        

        self.output = (
            lin_output if activation is None
            else activation(lin_output)
        )


        self.params = [w]
开发者ID:amarshah,项目名称:theano_fun,代码行数:91,代码来源:AE_perfect2.py


示例19: __init__

    def __init__(self,input, D_in, D_out,num_MC,inducing_number,fixed_z,Domain_number=None,Domain_consideration=True,number="1",kernel_name='X'):

        Xtilda=input        
        
        self.N=Xtilda.shape[1]
        D=D_out
        Q=D_in
        M=inducing_number
        ################################################################################
        #set_initial_value
        ker=kernel(Q,kernel_name)
        self.kern=ker
        
        mu_value = np.random.randn(M,D)* 1e-2
        Sigma_b_value = np.zeros((M,M))
        
        if Domain_consideration:
            ls_value=np.zeros(Domain_number)+np.log(0.1)
        else:
            ls_value=np.zeros(1)+np.log(0.1)
        
        self.mu = theano.shared(value=mu_value, name='mu'+number, borrow=True)
        self.Sigma_b = theano.shared(value=Sigma_b_value, name='Sigma_b'+number, borrow=True)
        self.Z = theano.shared(value=fixed_z, name='Z'+number, borrow=True)
        self.ls = theano.shared(value=ls_value, name='ls'+number, borrow=True)
        
        ##############################################################################
        #param list
        self.params = [self.mu,self.Sigma_b,self.ls]
        self.params.extend(ker.params)
        
        self.hyp_params_list=[self.mu,self.Sigma_b,self.ls,ker.params]
        self.Z_params_list=[self.Z]        
        self.global_params_list=self.params
        #############################################################################
        #set random seed
        from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
        srng = RandomStreams(seed=234)
        
        eps_M = srng.normal((num_MC,M,D))#平均と分散で違う乱数を使う必要があるので別々に銘銘
        eps_ND = srng.normal((num_MC,self.N,D))
        #################################################################
        #set constraints                          
        self.beta = T.exp(self.ls)
        #uについては対角でないのでコレスキー分解するとかして三角行列を作る必要がある

        Sigma = T.tril(self.Sigma_b - T.diag(T.diag(self.Sigma_b)) + T.diag(T.exp(T.diag(self.Sigma_b))))
        ##################################################################
        #スケール変換
        mu_scaled, Sigma_scaled = ker.sf2**0.5 * self.mu, ker.sf2**0.5 * Sigma
        
        ##################################################################
        #if the model is latetnt variable model, we make MC samples of latent X
        
        #Xtilda = eps_NQ * S[None,:,:] + m[None,:,:]
        
        #Xtilda, updates = theano.scan(fn=lambda a: m+S*a,
        #                      sequences=[eps_NQ])
        
        ###############################
        #U is the posterior samples
        self.U, updates = theano.scan(fn=lambda a: mu_scaled+Sigma_scaled.dot(a),
                              sequences=[eps_M])
        ################################
        #inducing point prior
        Kmm = ker.RBF(self.Z)
        KmmInv = sT.matrix_inverse(Kmm) 
        
        ###############################
        #For the MC calculation, we copy the input X
        Knn, updates = theano.scan(fn=lambda a: self.kern.RBF(a),
                              sequences=[Xtilda])
        
        Kmn, updates = theano.scan(fn=lambda a: self.kern.RBF(self.Z,a),
                              sequences=[Xtilda])
        ########################################
        #make posterior (p(F|U)) , its variace
        Ktilda, updates = theano.scan(fn=lambda a,b: a-T.dot(b.T,T.dot(KmmInv,b)),
                              sequences=[Knn,Kmn])
        ##################################################
        #get the posterior samples form (p(F|U))
        #MC*N*D_out
        #F, updates = theano.scan(fn=lambda a,b,c,d: T.dot(a.T,T.dot(KmmInv,b)) + T.dot(T.maximum(c, 1e-16)**0.5,d),
        #                      sequences=[Kmn,self.U,Ktilda,eps_ND])
        F, updates = theano.scan(fn=lambda a,c,d: T.dot(a.T,T.dot(KmmInv,mu_scaled)) + T.dot(T.maximum(c, 1e-16)**0.5,d),
                              sequences=[Kmn,Ktilda,eps_ND])
        ##################################################        
        #Kinterval=T.dot(KmmInv,Kmn)

        self.mean_U=F
        #mean_U=T.dot(Kinterval.T,self.U)
        
        #A=Kinterval.T      
        #Sigma_tilda=Ktilda+T.dot(A,T.dot(Sigma_scaled,A.T))
        #mean_tilda=T.dot(A,mu_scaled)        
        #self.mean_U=mean_tilda + T.dot(T.maximum(Sigma_tilda, 1e-16)**0.5,eps_ND)

        ###################################################################
        eps_ND_F = srng.normal((num_MC,self.N,D))

#.........这里部分代码省略.........
开发者ID:futoshi-futami,项目名称:GP-and-GPLVM,代码行数:101,代码来源:kernel_layer_inducing_fix.py


示例20: __init__

    def __init__(self,D, M,Q,Domain_number):
        
        self.Xlabel=T.matrix('Xlabel')

        
        self.X=T.matrix('X')
        N=self.X.shape[0]
        
        self.Weight=T.matrix('Weight')

        ker=kernel(Q)
        mmd=MMD(M,Domain_number)
        
        mu_value = np.random.randn(M,D)
        Sigma_b_value = np.zeros((M,M)) + np.log(0.01)

        Z_value = np.random.randn(M,Q)
        self.test=Z_value
        ls_value=np.zeros(Domain_number)+np.log(0.1)
        
        self.mu = theano.shared(value=mu_value, name='mu', borrow=True)
        self.Sigma_b = theano.shared(value=Sigma_b_value, name='Sigma_b', borrow=True)
        self.Z = theano.shared(value=Z_value, name='Z', borrow=True)
        self.ls = theano.shared(value=ls_value, name='ls', borrow=True)
        
        self.params = [self.mu,self.Sigma_b,self.Z,self.ls]
        
        self.hiddenLayer_x = HiddenLayer(rng=rng,input=self.X,n_in=D,n_out=20,activation=T.nnet.relu,number='_x')
        self.hiddenLayer_m = HiddenLayer(rng=rng,input=self.hiddenLayer_x.output,n_in=20,n_out=Q,activation=T.nnet.relu,number='_m')
        self.hiddenLayer_S = HiddenLayer(rng=rng,input=self.hiddenLayer_x.output,n_in=20,n_out=Q,activation=T.nnet.relu,number='_S')
        
        self.loc_params= []
        self.loc_params.extend(self.hiddenLayer_x.params)
        self.loc_params.extend(self.hiddenLayer_m.params)
        self.loc_params.extend(self.hiddenLayer_S.params)

        self.local_params={}
        for i in self.loc_params:
            self.local_params[str(i)]=i
        
        self.params.extend(ker.params)
        self.params.extend(mmd.params)
        
        self.global_params={}
        for i in self.params:
            self.global_params[str(i)]=i
        
        self.params.extend(self.hiddenLayer_x.params)
        self.params.extend(self.hiddenLayer_m.params)
        self.params.extend(self.hiddenLayer_S.params)
        
        self.wrt={}
        for i in self.params:
            self.wrt[str(i)]=i
        
        m=self.hiddenLayer_m.output
        S_0=self.hiddenLayer_S.output
        S_1=T.exp(S_0)
        S=T.sqrt(S_1)
        
        from theano.tensor.shared_randomstreams import RandomStreams
        srng = RandomStreams(seed=234)
        eps_NQ = srng.normal((N,Q))
        eps_M= srng.normal((M,D))#平均と分散で違う乱数を使う必要があるので別々に銘銘

        beta = T.exp(self.ls)
        #uについては対角でないのでコレスキー分解するとかして三角行列を作る必要がある

        Sigma = T.tril(self.Sigma_b - T.diag(T.diag(self.Sigma_b)) + T.diag(T.exp(T.diag(self.Sigma_b))))
        
        #スケール変換
        mu_scaled, Sigma_scaled = ker.sf2**0.5 * self.mu, ker.sf2**0.5 * Sigma
        
        Xtilda = m + S * eps_NQ
        self.U = mu_scaled+Sigma_scaled.dot(eps_M)
        
        Kmm = ker.RBF(self.Z)
        Kmm=mmd.MMD_kenel_Xonly(mmd.Zlabel_T,Kmm,self.Weight)
        KmmInv = sT.matrix_inverse(Kmm) 
        
        Kmn = ker.RBF(self.Z,Xtilda)
        Kmn=mmd.MMD_kenel_ZX(self.Xlabel,Kmn,self.Weight)
        
        Knn = ker.RBF(Xtilda)
        Knn=mmd.MMD_kenel_Xonly(self.Xlabel,Knn,self.Weight)
        
        Ktilda=Knn-T.dot(Kmn.T,T.dot(KmmInv,Kmn))
        
        Kinterval=T.dot(KmmInv,Kmn)
              
        mean_U=T.dot(Kinterval.T,self.U)
        betaI=T.diag(T.dot(self.Xlabel,beta))
        Covariance = betaI       
        
        self.LL = (self.log_mvn(self.X, mean_U, Covariance) - 0.5*T.sum(T.dot(betaI,Ktilda)))            
        self.KL_X = -self.KLD_X(m,S)
        self.KL_U = -self.KLD_U(mu_scaled , Sigma_scaled , Kmm,KmmInv)
开发者ID:futoshi-futami,项目名称:GP-and-GPLVM,代码行数:97,代码来源:DGPLVM_model.py



注:本文中的theano.tensor.tril函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.unbroadcast函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.transpose函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap