• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.dmatrices函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.dmatrices函数的典型用法代码示例。如果您正苦于以下问题:Python dmatrices函数的具体用法?Python dmatrices怎么用?Python dmatrices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dmatrices函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: build_ann

    def build_ann(self, weights, biases, layer_sizes=[784, 400, 10],
                  activation=[Tann.sigmoid, Tann.sigmoid, Tann.sigmoid]):
        """
        Builds a neural network with topology from the layer_sizes.
        :parameter activation is the activation function for the network
        :parameter rand_limit_min is the minimum limit for random initialization of weights for all layers
        :parameter rand_limit_max is the maximum limit for random initialization of weights for all layers
        """
        params = []
        inputs, answers = T.dmatrices('input', 'answers')
        assert len(layer_sizes) >= 2

        # Builds the layers
        for i in range(len(layer_sizes) - 1):
            layer = HiddenLayer(inputs, layer_sizes[i], layer_sizes[i + 1], weights[i], biases[i],
                                activation=activation[i])
            params.append(layer.W)
            params.append(layer.b)
            self.layers.append(layer)

        # Sets up the activation functions through the network
        layer = self.layers[0]
        previous_out = layer.activation(T.dot(layer.input, layer.W) + layer.b)
        x_h_out = layer.activation(T.dot(layer.input, layer.W) + layer.b)
        for i in range(len(self.layers) - 1):
            layer = self.layers[i + 1]
            x_h_out = layer.activation(T.dot(previous_out, layer.W) + layer.b)
            previous_out = x_h_out
        self.predictor = theano.function([inputs], [x_h_out])  # Activate
开发者ID:mrminy,项目名称:GomokuPython,代码行数:29,代码来源:new_ann.py


示例2: build_ann

 def build_ann(self, layer_sizes=[784, 24, 10], activation=Tann.sigmoid, rand_limit_min=-.1, rand_limit_max=.1):
     """
     Builds a neural network with topology from the layer_sizes.
     :parameter activation is the activation function for the network
     :parameter rand_limit_min is the minimum limit for random initialization of weights for all layers
     :parameter rand_limit_max is the maximum limit for random initialization of weights for all layers
     """
     params = []
     inputs, answers = T.dmatrices('input', 'answers')
     assert len(layer_sizes) >= 2
     for i in range(len(layer_sizes) - 1):
         layer = HiddenLayer(inputs, layer_sizes[i], layer_sizes[i + 1], activation=activation, rand_limit_min=rand_limit_min, rand_limit_max=rand_limit_max)
         # outputs.append(layer.output)
         params.append(layer.W)
         params.append(layer.b)
         self.layers.append(layer)
     previous_out = self.layers[0].output
     x_h_out = self.layers[0].output
     for i in range(len(self.layers)-1):
         layer = self.layers[i+1]
         x_h_out = Tann.sigmoid(T.dot(previous_out, layer.W) + layer.b)
         previous_out = x_h_out
     error = T.sum((answers - x_h_out) ** 2)
     gradients = T.grad(error, params)
     backprop_acts = [(p, p - self.lrate * g) for p, g in zip(params, gradients)]
     self.predictor = theano.function([inputs], [x_h_out])
     self.trainer = theano.function([inputs, answers], error, updates=backprop_acts)
开发者ID:mrminy,项目名称:NeuralNetwork2048,代码行数:27,代码来源:ANN.py


示例3: createGradientFunctions

    def createGradientFunctions(self):
        #create
        X = T.dmatrices("X")
        mu, logSigma, u, v, f, R = T.dcols("mu", "logSigma", "u", "v", "f", "R")
        mu = sharedX( np.random.normal(10, 10, (self.dimTheta, 1)), name='mu') 
        logSigma = sharedX(np.random.uniform(0, 4, (self.dimTheta, 1)), name='logSigma')
        logLambd = sharedX(np.matrix(np.random.uniform(0, 10)),name='logLambd')
        logLambd = T.patternbroadcast(T.dmatrix("logLambd"),[1,1])
        negKL = 0.5 * T.sum(1 + 2*logSigma - mu ** 2 - T.exp(logSigma) ** 2)
        theta = mu+T.exp(logSigma)*v
        W=theta
        y=X[:,0]
        X_sim=X[:,1:]
        f = (T.dot(X_sim,W)+u).flatten()
        
        gradvariables = [mu, logSigma, logLambd]
        
        
        logLike = T.sum(-(0.5 * np.log(2 * np.pi) + logLambd) - 0.5 * ((y-f)/(T.exp(logLambd)))**2)

        logp = (negKL + logLike)/self.m

        optimizer = -logp
        
        self.negKL = th.function([mu, logSigma], negKL, on_unused_input='ignore')
        self.f = th.function(gradvariables + [X,u,v], f, on_unused_input='ignore')
        self.logLike = th.function(gradvariables + [X, u, v], logLike,on_unused_input='ignore')
        derivatives = T.grad(logp,gradvariables)
        derivatives.append(logp)

        self.gradientfunction = th.function(gradvariables + [X, u, v], derivatives, on_unused_input='ignore')
        self.lowerboundfunction = th.function(gradvariables + [X, u, v], logp, on_unused_input='ignore')

        self.optimizer = BatchGradientDescent(objective=optimizer, params=gradvariables,inputs = [X,u,v],conjugate=True,max_iter=1)
开发者ID:onenoc,项目名称:lfvbae,代码行数:34,代码来源:lfvbaeold.py


示例4: __init__

 def __init__(self, n_x, n_h, n_y, lr=0, nonlinear='softplus', valid_x=None, valid_y=None):
     print 'PL', n_x, n_h, n_y, lr, nonlinear
     if lr == 0: lr = 10. / n_h
     self.lr = lr
     self.fitted = False
     self.n_x = n_x
     self.n_h = n_h
     self.n_y = n_y
     self.nonlinear = nonlinear
     self.valid_x = valid_x
     self.valid_y = valid_y
     
     if self.nonlinear == 'softplus':
         def g(_x): return T.log(T.exp(_x) + 1)
     else:
         raise Exception()
     
     # Define Theano computational graph
     x, y, w1, b1, w2, b2, A = T.dmatrices('x', 'y', 'w1', 'b1', 'w2', 'b2', 'A')
     h1 = g(T.dot(w1, x) + T.dot(b1, A))
     h2 = g(T.dot(w2, h1) + T.dot(b2, A))
     p = T.nnet.softmax(h2.T).T
     logpy = (- T.nnet.categorical_crossentropy(p.T, y.T).T).reshape((1,-1))
     dlogpy_dw = T.grad(logpy.sum(), [w1, b1, w2, b2])
     H = T.nnet.categorical_crossentropy(p.T, p.T).T #entropy
     dH_dw = T.grad(H.sum(), [w1, b1, w2, b2])
     
     # Define functions to call
     self.f_p = theano.function([x, w1, b1, w2, b2, A], p)
     self.f_dlogpy_dw = theano.function([x, y, w1, b1, w2, b2, A], [logpy] + dlogpy_dw)
     self.f_dH_dw = theano.function([x, w1, b1, w2, b2, A], [H] + dH_dw)
开发者ID:Beronx86,项目名称:anglepy,代码行数:31,代码来源:PL.py


示例5: compile_theano_functions

def compile_theano_functions():
    """
    Returns compiled theano functions.  
    
    Notes
    -----
    Originally used to speedup multiplication of large matrices and vectors.  Caused strange 
    issue in nipype where nipype unecessarily reran nodes that use these compiled functions.
    Not used in current implementation.
    """
    import theano.tensor as T
    import theano
    
    def TnormCols(X):
        """
        Theano expression which centers and normalizes columns of X `||x_i|| = 1`
        """
        Xc = X - X.mean(0)
        return Xc/T.sqrt( (Xc**2.).sum(0) )
    
    def TzscorrCols(Xn):
        """
        Theano expression which returns Fisher transformed correlation values between columns of a
        normalized input, `X_n`.  Diagonal is set to zero.
        """
        C_X = T.dot(Xn.T, Xn)-T.eye(Xn.shape[1])
        return 0.5*T.log((1+C_X)/(1-C_X))
    
    X,Y = T.dmatrices('X','Y')
    tdot = theano.function([X,Y], T.dot(X,Y))
    tnormcols = theano.function([X], TnormCols(X))

    return tdot, tnormcols
开发者ID:JohnGriffiths,项目名称:C-PAC,代码行数:33,代码来源:cwas.py


示例6: compute_more_than_one

def compute_more_than_one():
    a,b = T.dmatrices('a','b')
    diff = a - b
    abs_diff = abs(diff)
    diff_sq = diff**2
    f = theano.function([a,b],[diff, abs_diff, diff_sq])
    print f([[0,0],[1,2]], [[2,3],[4,1]])
开发者ID:maximus009,项目名称:PyNN,代码行数:7,代码来源:DL_1.py


示例7: createObjectiveFunction

    def createObjectiveFunction(self):
        '''
        @escription: initialize objective function and minimization function
        @X,y data matrix/vector
        @u random noise for simulator
        @v standard normal for reparametrization trick
        '''
        X,u = T.dmatrices("X","u")
        f, y, v = T.dcols("f", "y", "v")
        
        mu = self.params[0]
        logSigma = self.params[1]
        logLambda = sharedX(np.log(self.sigma_e),name='logLambda')
        #logLambda = self.params[2]

        negKL = 0.5*self.dimTheta+0.5*T.sum(2*logSigma - mu ** 2 - T.exp(logSigma) ** 2)
        f = self.regression_simulator(X,u,v,mu,logSigma)

        logLike = -self.m*(0.5 * np.log(2 * np.pi) + logLambda)-0.5*T.sum((y-f)**2)/(T.exp(logLambda)**2)/self.Lu

        elbo = (negKL + logLike)
        obj = -elbo
        self.lowerboundfunction = th.function([X, y, u, v], obj, on_unused_input='ignore')
        derivatives = T.grad(obj,self.params)
        self.gradientfunction = th.function([X,y,u,v], derivatives, on_unused_input='ignore')
开发者ID:onenoc,项目名称:lfvbae,代码行数:25,代码来源:lfvbae.py


示例8: multipleThingAtTheSameTime

def multipleThingAtTheSameTime(a, b):
    x, y = T.dmatrices('x', 'y')
    diff = x - y
    abs_diff = abs(diff)
    diff_squared = diff**2
    summ = x + y
    f = th.function([x,y], [diff, abs_diff, diff_squared, summ])
    print(f(a, b))
开发者ID:thbeucher,项目名称:DQN,代码行数:8,代码来源:theanoL.py


示例9: createGradientFunctions

    def createGradientFunctions(self):
        #Create the Theano variables
        W1,W2,W3,W4,W5,W6,x,eps = T.dmatrices("W1","W2","W3","W4","W5","W6","x","eps")
        #Create biases as cols so they can be broadcasted for minibatches
        b1,b2,b3,b4,b5,b6 = T.dcols("b1","b2","b3","b4","b5","b6")
        z1 = T.col("z1")
        if self.continuous:
            #convolve x
            # no_filters = 100, stride = 4, filter_size = 50

            h_encoder = T.tanh(T.dot(W1,x) + b1)
            #h_encoder = T.dot(W1,x) + b1
        else:   
            h_encoder = T.tanh(T.dot(W1,x) + b1)

        mu_encoder = T.dot(W2,h_encoder) + b2
        log_sigma_encoder = 0.5*(T.dot(W3,h_encoder) + b3)

        mu_encoder = T.dot(W2,h_encoder) + b2 
        log_sigma_encoder = 0.5*(T.dot(W3,h_encoder) + b3)

        #Find the hidden variable z
        z = mu_encoder + T.exp(log_sigma_encoder)*eps

        prior = 0.5* T.sum(1 + 2*log_sigma_encoder - mu_encoder**2 - T.exp(2*log_sigma_encoder))


        #Set up decoding layer
        if self.continuous:
            h_decoder = T.nnet.softplus(T.dot(W4,z) + b4)
            h_dec = T.nnet.softplus(T.dot(W4,z1) + b4)

            #h_decoder = T.dot(W4,z) + b4
            #h_dec = T.dot(W4,z1) + b4

            mu_decoder = T.tanh(T.dot(W5,h_decoder) + b5)
            mu_dec = T.tanh(T.dot(W5,h_dec) + b5)
            log_sigma_decoder = 0.5*(T.dot(W6,h_decoder) + b6)
            logpxz = T.sum(-(0.5 * np.log(2 * np.pi) + log_sigma_decoder) - 0.5 * ((x - mu_decoder) / T.exp(log_sigma_decoder))**2)
            gradvariables = [W1,W2,W3,W4,W5,W6,b1,b2,b3,b4,b5,b6]
        else:
            h_decoder = T.tanh(T.dot(W4,z) + b4)
            y = T.nnet.sigmoid(T.dot(W5,h_decoder) + b5)
            logpxz = -T.nnet.binary_crossentropy(y,x).sum()
            gradvariables = [W1,W2,W3,W4,W5,b1,b2,b3,b4,b5]
        logp = logpxz + prior

        #Compute all the gradients
        derivatives = T.grad(logp,gradvariables)

        #Add the lowerbound so we can keep track of results
        derivatives.append(logp)
        
        self.get_z = th.function(gradvariables+[x,eps],z,on_unused_input='ignore')
        self.generate = th.function(gradvariables+[z1,x,eps],mu_dec,on_unused_input='ignore')
        self.predict = th.function(gradvariables+[x,eps],mu_decoder,on_unused_input='ignore')
        self.gradientfunction = th.function(gradvariables + [x,eps], derivatives, on_unused_input='ignore')
        self.lowerboundfunction = th.function(gradvariables + [x,eps], logp, on_unused_input='ignore')
开发者ID:KyriacosShiarli,项目名称:Variational-Autoencoder,代码行数:58,代码来源:VariationalAutoencoder.py


示例10: test_examples_3

 def test_examples_3(self):
     a, b = T.dmatrices('a', 'b')
     diff         = a - b
     abs_diff     = abs(diff)
     diff_squared = diff**2
     f = function([a, b], [diff, abs_diff, diff_squared])
     elems = f([[1, 1], [1, 1]], [[0, 1], [2, 3]])
     assert numpy.all( elems[0] == array([[ 1.,  0.],[-1., -2.]]))
     assert numpy.all( elems[1] == array([[ 1.,  0.],[ 1.,  2.]]))
     assert numpy.all( elems[2] == array([[ 1.,  0.],[ 1.,  4.]]))
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:10,代码来源:test_tutorial.py


示例11: variables

    def variables(self):
        
        # Define parameters 'w'
        v = {}
        v['w0x'], v['w0y'] = T.dmatrices('w0x','w0y')
        v['b0'] = T.dmatrix('b0')
        for i in range(1, len(self.n_hidden_q)):
            v['w'+str(i)] = T.dmatrix('w'+str(i))
            v['b'+str(i)] = T.dmatrix('b'+str(i))
        v['mean_w'] = T.dmatrix('mean_w')
        v['mean_b'] = T.dmatrix('mean_b')
        if self.type_qz in ['gaussian','gaussianmarg']:
            v['logvar_w'] = T.dmatrix('logvar_w')
        v['logvar_b'] = T.dmatrix('logvar_b')
        
        w = {}
        w['w0y'], w['w0z'] = T.dmatrices('w0y','w0z')
        w['b0'] = T.dmatrix('b0')
        for i in range(1, len(self.n_hidden_p)):
            w['w'+str(i)] = T.dmatrix('w'+str(i))
            w['b'+str(i)] = T.dmatrix('b'+str(i))
        w['out_w'] = T.dmatrix('out_w')
        w['out_b'] = T.dmatrix('out_b')
        
        if self.type_px == 'sigmoidgaussian' or self.type_px == 'gaussian':
            w['out_logvar_w'] = T.dmatrix('out_logvar_w')
            w['out_logvar_b'] = T.dmatrix('out_logvar_b')
        
        w['logpy'] = T.dmatrix('logpy')
        
        if self.type_pz == 'studentt':
            w['logv'] = T.dmatrix('logv')

        # Define latent variables 'z'
        z = {'eps': T.dmatrix('eps')}
        
        # Define observed variables 'x'
        x = {}
        x['x'] = T.dmatrix('x')
        x['y'] = T.dmatrix('y')
        
        return v, w, x, z
开发者ID:2020zyc,项目名称:nips14-ssl,代码行数:42,代码来源:VAE_YZ_X.py


示例12: test_1_examples_compute_more_than_1_return_value

def test_1_examples_compute_more_than_1_return_value():
    a, b = T.dmatrices('a', 'b')
    diff = a - b
    abs_diff = abs(diff)
    diff_squared = diff**2
    f = theano.function([a, b], [diff, abs_diff, diff_squared])

    diff_res, abs_res, diff_squared_res = f([[1, 1], [1, 1]], [[0, 0], [2, 2]])
    np.testing.assert_array_almost_equal(diff_res, [[1, 1], [-1, -1]])
    np.testing.assert_array_almost_equal(abs_res, [[1, 1], [1, 1]])
    np.testing.assert_array_almost_equal(diff_squared_res, [[1, 1], [1, 1]])
开发者ID:consciousnesss,项目名称:learn_theano,代码行数:11,代码来源:test_1_examples.py


示例13: createGradientFunctions

    def createGradientFunctions(self):
        #Create the Theano variables
        W1,W2,W3,W4,W5,W6,x,eps = T.dmatrices("W1","W2","W3","W4","W5","W6","x","eps")

        #Create biases as cols so they can be broadcasted for minibatches
        b1,b2,b3,b4,b5,b6,pi = T.dcols("b1","b2","b3","b4","b5","b6","pi")
        
        if self.continuous:
            h_encoder = T.nnet.softplus(T.dot(W1,x) + b1)
        else:   
            h_encoder = T.tanh(T.dot(W1,x) + b1)
        print type(pi)    
        rng = T.shared_randomstreams.RandomStreams(seed=124)
        i = rng.choice(size=(1,), a=self.num_model, p=T.nnet.softmax(pi.T).T.flatten())

        mu_encoder = T.dot(W2[i[0]*self.dimZ:(1+i[0])*self.dimZ],h_encoder) + b2[i[0]*self.dimZ:(1+i[0])*self.dimZ]
        log_sigma_encoder = (0.5*(T.dot(W3[i[0]*self.dimZ:(1+i[0])*self.dimZ],h_encoder)))+ b3[i[0]*self.dimZ:(1+i[0])*self.dimZ]

        z = mu_encoder + T.exp(log_sigma_encoder)*eps
     
        
        prior = 0
        for i in range(self.num_model):
            prior += T.exp(pi[i][0])*0.5* T.sum(1 + 2*log_sigma_encoder[int(i)*self.dimZ:(1+int(i))*self.dimZ] - mu_encoder[int(i)*self.dimZ:(1+int(i))*self.dimZ]**2 - T.exp(2*log_sigma_encoder[int(i)*self.dimZ:(1+int(i))*self.dimZ]))
        prior /= T.sum(T.exp(pi))
        #Set up decoding layer
        if self.continuous:
            h_decoder = T.nnet.softplus(T.dot(W4,z) + b4)
            mu_decoder = T.nnet.sigmoid(T.dot(W5,h_decoder) + b5)
            log_sigma_decoder = 0.5*(T.dot(W6,h_decoder) + b6)
            logpxz = T.sum(-(0.5 * np.log(2 * np.pi) + log_sigma_decoder) - 0.5 * ((x - mu_decoder) / T.exp(log_sigma_decoder))**2)
            gradvariables = [W1,W2,W3,W4,W5,W6,b1,b2,b3,b4,b5,b6,pi]
        else:
            h_decoder = T.tanh(T.dot(W4,z) + b4)
            y = T.nnet.sigmoid(T.dot(W5,h_decoder) + b5)
            logpxz = -T.nnet.binary_crossentropy(y,x).sum()
            gradvariables = [W1,W2,W3,W4,W5,b1,b2,b3,b4,b5,pi]


        logp = logpxz + prior

        #Compute all the gradients
        derivatives = T.grad(logp,gradvariables)

        #Add the lowerbound so we can keep track of results
        derivatives.append(logpxz)
        
        self.gradientfunction = th.function(gradvariables + [x,eps], derivatives, on_unused_input='ignore')
        self.lowerboundfunction = th.function(gradvariables + [x,eps], logp, on_unused_input='ignore')
        self.hiddenstatefunction = th.function(gradvariables + [x,eps], z, on_unused_input='ignore')
开发者ID:amartya18x,项目名称:VariationalAutoencoderMixtureGaussian,代码行数:50,代码来源:VariationalAutoencoder_mixture.py


示例14: calc2elements

def calc2elements():
    """
    一次计算两个输入元素。
    http://deeplearning.net/software/theano/tutorial/examples.html
    这是计算对数函数曲线的y值。输入一个矩阵,元素是x的取值,输出是与输入矩阵中元素对应的y值。
    """
    import theano.tensor as T
    from theano import pp
    a, b = T.dmatrices('a', 'b')
    diff = a - b
    abs_diff = abs(diff)
    diff_square = diff ** 2
    f = function([a, b], [diff, abs_diff, diff_square])
    diff, abs_diff, diff_square = f([[1, 1], [1, 1]], [[0, 1], [2, 3]])

    print (diff)
    print (abs_diff)
    print (diff_square)
开发者ID:yonglei,项目名称:code,代码行数:18,代码来源:theano_tutorial.py


示例15: __init__

    def __init__(self,
            initial_params=None):
        print 'Setting up variables ...'
        # Parameters
        if initial_params is None:
            initial_params = {'mean':None,
                              'sigma_n':0.+np_uniform_scalar(0),
                              'sigma_f':0.+np_uniform_scalar(0),
                              'l_k':0.+np.uniform_scalar(0)}
        if initial_params['mean'] == None:
            self.mean = shared_scalar(0.)
            self.meanfunc = 'zero'
        else:
            self.mean = shared_scalar(initial_params['mean'])
            self.meanfunc = 'const'
        self.sigma_n = shared_scalar(initial_params['sigma_n'])
        self.sigma_f = shared_scalar(initial_params['sigma_f'])
        self.l_k = shared_scalar(initial_params['l_k'])
        
        # Variables
        X,Y,x_test = T.dmatrices('X','Y','x_test')

        print 'Setting up model ...'
        K, Ks, Kss, y_test_mu, y_test_var, log_likelihood,L,alpha,V,fs2,sW = self.get_model(X, Y, x_test)

        print 'Compiling model ...'
        inputs = {'X': X, 'Y': Y, 'x_test': x_test}
        # solve a bug with derivative wrt inputs not in the graph
        z = 0.0*sum([T.sum(v) for v in inputs.values()])
        f = zip(['K', 'Ks', 'Kss', 'y_test_mu', 'y_test_var', 'log_likelihood',
                 'L','alpha','V','fs2','sW'],
                [K, Ks, Kss, y_test_mu, y_test_var, log_likelihood,
                 L, alpha,V,fs2,sW])
        self.f = {n: theano.function(inputs.values(), f+z, name=n, on_unused_input='ignore')
                     for n, f in f}

        if self.meanfunc == 'zero':
            wrt = {'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
        else:
            wrt = {'mean':self.mean,'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
        
        self.g = {vn: theano.function(inputs.values(), T.grad(log_likelihood,vv),
                                      name=vn,on_unused_input='ignore')
                                      for vn, vv in wrt.iteritems()}
开发者ID:shenxudeu,项目名称:gp_theano,代码行数:44,代码来源:gptheano_model.py


示例16: JacobiTimesVector

def JacobiTimesVector():
    W, V = T.dmatrices(['W', 'V'])
    x = T.dvector('x')
    y = T.dot(x, W)
    JV = T.Rop(y, W, V)
    f = function([W, V, x], JV)
    print(f(
        [[1, 1], [1, 1]],
        [[2, 2], [2, 2]],
        [0, 1]
    ))

    v = T.dvector('v')
    VJ = T.Lop(y, W, v)
    fL = function([v, x], VJ)
    print(fL(
        [2, 2],
        [0, 1]
    ))
开发者ID:fyabc,项目名称:TheanoProject,代码行数:19,代码来源:derivatives.py


示例17: _getModel

def _getModel():
    s1, s2 = T.dvectors('s1', 's2')
    t1, t2 = T.dmatrices('t1', 't2')
    gw = T.dvector('gw')
    prank = T.dvector('prank')

    r1 = T.dot(t1, prank)
    r2 = T.dot(t2, prank)

    erd = T.exp(r2 - r1)
    p = erd / (erd + 1)

    loglterms = gw * ((s1 * T.log(1 - p)) + (s2 * T.log(p)))

    logl = -T.sum(loglterms)

    gradf = T.grad(logl, prank)
    hessf = theano.gradient.hessian(logl, prank)

    return s1, s2, t1, t2, gw, prank, loglterms, logl, gradf, hessf
开发者ID:HenryBarnett,项目名称:foosbot,代码行数:20,代码来源:theanorank.py


示例18: createObjectiveFunction

    def createObjectiveFunction(self):
        '''
        @escription: initialize objective function and minimization function
        @X,y data matrix/vector
        @u random noise for simulator
        @v standard normal for reparametrization trick
        '''
        y = T.dmatrices("y")
        i = T.iscalar("i")
        v = T.dscalar("i")
        xStart = T.dvector("xStart")

        mu = self.params[0]
        #logSigma = sharedX(np.random.uniform(0, 1, (self.dimTheta, 1)), name='logSigma')
        logSigma = self.params[1]
        #logLambda = sharedX(np.random.uniform(0, 10), name='logLambda')
        logLambda = self.params[2]

        negKL = 0.5*self.dimTheta+0.5*T.sum(2*logSigma - mu ** 2 - T.exp(logSigma) ** 2)
        self.k = mu+T.exp(logSigma)*v
        V1 = T.dvector("V2")
        V2 = T.dvector("V2")
        results, updates = th.scan(fn=self.fisher_wright_normal_approx, outputs_info=[{'initial':xStart,'taps':[-1]}],sequences=[V1,V2], n_steps=i)
        f = results

        logLike = -self.m*(0.5 * np.log(2 * np.pi) + logLambda)-0.5*T.sum((y-f)**2)/(T.exp(logLambda)**2)
        part2 = f
        #0.5*T.sum((y-f)**2)
        #/(T.exp(logLambda)**2)
        elbo = (negKL + logLike)
        obj = -elbo
        test1 = y[0:self.i/4,:].sum(axis=0)/(self.i/4)
        test2 = y[self.i/4:self.i/2].sum(axis=0)/(self.i/4)
        self.test = th.function([xStart, i, y, v, V1, V2],test,on_unused_input='ignore')
        self.part2 = th.function([xStart, i, y, v, V1, V2], part2, updates=updates, on_unused_input='ignore')
        self.logLike = th.function([xStart, i, y, v, V1, V2], logLike, updates=updates, on_unused_input='ignore')
        self.lowerboundfunction = th.function([xStart, i, y, v, V1, V2], obj, updates=updates, on_unused_input='ignore')
        derivatives = T.grad(obj, self.params)
        self.gradientfunction = th.function([xStart, i, y, v, V1, V2], derivatives, updates=updates, on_unused_input='ignore')
开发者ID:onenoc,项目名称:lfvbae,代码行数:39,代码来源:lfvbaeFisherWright.py


示例19: test

def test():
    # multiple inputs, multiple outputs
    a, b = T.dmatrices('a', 'b')
    diff = a - b
    abs_diff = T.abs_(diff)
    sqr_diff = diff ** 2
    f = function([a, b], [diff, abs_diff, sqr_diff])
    h, i, j = f([[0, 1], [2, 3]], [[4, 5], [6, 7]])

    # default value for function arguments
    a, b = T.dscalars('a', 'b')
    z = a + b
    f = function([a, Param(b, default=1)], z)
    print f(1, b=2)
    print f(1)
    print f(1, 2)

    # shared variable
    state = shared(0)
    inc = T.lscalar('inc') # state is int64 by default
    accumulator = function([inc], state, updates=[(state, state + inc)])
    print accumulator(300)
    print state.get_value()
开发者ID:ZiangYan,项目名称:learn-new-tools,代码行数:23,代码来源:test.py


示例20:

import theano.tensor as T

a,b = T.dmatrices('a','b')
x,y = T.dmatrices('x','y')

is_train=1

#1=training,2=test
z= T.switch(T.neq(is_train, 0), 1, 2)

print z.eval()

开发者ID:Seleucia,项目名称:v3d,代码行数:11,代码来源:klstm.py



注:本文中的theano.tensor.dmatrices函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.dmatrix函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.diag函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap