• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.jacobian函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.jacobian函数的典型用法代码示例。如果您正苦于以下问题:Python jacobian函数的具体用法?Python jacobian怎么用?Python jacobian使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了jacobian函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_dot_not_output

    def test_dot_not_output(self):
        """
        Test the case where the vector input to the dot is not already an
        output of the inner function.
        """

        v = T.vector()
        m = T.matrix()
        output = T.dot(v, m)

        # Compile the function twice, once with the optimization and once
        # without
        opt_mode = mode.including("scan")
        f_opt = theano.function([v, m], T.jacobian(output, v), mode=opt_mode)

        no_opt_mode = mode.excluding("scanOp_pushout_output")
        f_no_opt = theano.function([v, m], T.jacobian(output, v), mode=no_opt_mode)

        # Ensure that the optimization was performed correctly in f_opt
        # The inner function of scan should have only one output and it should
        # not be the result of a Dot
        scan_node = [node for node in f_opt.maker.fgraph.toposort()
                     if isinstance(node.op, Scan)][0]
        assert len(scan_node.op.outputs) == 1
        assert not isinstance(scan_node.op.outputs[0], T.Dot)

        # Ensure that the function compiled with the optimization produces
        # the same results as the function compiled without
        v_value = numpy.random.random((4)).astype(config.floatX)
        m_value = numpy.random.random((4, 5)).astype(config.floatX)

        output_opt = f_opt(v_value, m_value)
        output_no_opt = f_no_opt(v_value, m_value)

        utt.assert_allclose(output_opt, output_no_opt)
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:35,代码来源:test_scan_opt.py


示例2: compute_output

 def compute_output(self, network, h_vw, x_vw):
     batch_axis = network.find_hyperparameter(["batch_axis"])
     if batch_axis is None:
         # NOTE: this code path is not tested!
         jacobian = T.jacobian(h_vw.variable.ravel(), x_vw.variable)
         res = (jacobian ** 2).mean()
         res_shape = ()
     else:
         batch_size = h_vw.symbolic_shape()[batch_axis]
         # sum across batch to avoid disconnected input error
         # ravel to be a vector
         h_var = h_vw.variable.sum(axis=batch_axis).ravel()
         x_var = x_vw.variable
         # shape of result = h_var.shape + x_var.shape
         jacobian = T.jacobian(h_var, x_var)
         # put batch axis as first dimension
         # adding 1 to batch axis, because len(h_var.shape) == 1
         swapped_jacobian = jacobian.swapaxes(0, batch_axis + 1)
         # convert to a matrix and mean over elements in a batch
         reshaped_jacobian = swapped_jacobian.reshape((batch_size, -1))
         res = (reshaped_jacobian ** 2).mean(axis=1)
         res_shape = (h_vw.shape[batch_axis],)
     network.create_variable(
         "default",
         variable=res,
         shape=res_shape,
         tags={"output"},
     )
开发者ID:btbasham,项目名称:treeano,代码行数:28,代码来源:contraction_penalty.py


示例3: _grad_single

    def _grad_single(self, ct, s, lnC2, GAMMI2):
        lnC = lnC2
        GAMMI = GAMMI2
        v = self.v#T.as_tensor(self.v)[:,ct:]
        v0 = T.as_tensor(v[v[:,0]==0, :])
        v1 = T.as_tensor(v[v[:,0]==1, :])

        cnp = v.shape[0]

        # Gradient of fE wrt the priors over final state
        [ofE, oxS], upd_fE_single = th.scan(fn=self._free_energy,
                                   sequences=v,
                                   non_sequences=[s,self.h,lnC,self.b])
        ofE0 = ofE[v0].sum()
        ofE1 = ofE[v1].sum()

        dFE0dlnC = T.jacobian(ofE0, lnC)
        dFE1dlnC = T.jacobian(ofE1, lnC)
        dFEdlnC  = T.jacobian(ofE,  lnC)
        ofE_ = T.vector()
        ofE_.tag.test_value = ofE.tag.test_value

        # Gradient of Gamma with respect to its initial condition:
        GAMMA, upd_GAMMA = th.scan(fn=self._upd_gamma,
               outputs_info=[GAMMI],
               non_sequences=[ofE, self.lambd, self.alpha, self.beta, cnp],
               n_steps=4)
        dGdg = T.grad(GAMMA[-1], GAMMI)

        dGdfE = T.jacobian(GAMMA[-1], ofE)
        dGdlnC = dGdfE.dot(dFEdlnC)

        out1 = ofE0
        out2 = ofE1
        maxout = T.max([out1, out2])

        exp_out1 = T.exp(GAMMA[-1]*(out1 - maxout))
        exp_out2 = T.exp(GAMMA[-1]*(out2 - maxout))
        norm_const = exp_out1 + exp_out2

        # Derivative wrt the second output (gammi):
        Jac1_gammi = (-(out1-out2)*dGdg*
                T.exp(GAMMA[-1]*(out1+out2 - 2*maxout))/(norm_const**2))
        Jac2_gammi = -Jac1_gammi
#        dfd1_tZ = Jac1_gammi*dCdf[1][0]+ Jac2_gammi*dCdf[1][1]

        # Derivative wrt first input (lnc)
        Jac1_lnC = (T.exp(GAMMA[-1]*(out1 + out2 - 2*maxout))/(norm_const**2)*
                  (-dGdlnC*(out1 - out2) - GAMMA[-1]*(dFE0dlnC - dFE1dlnC)))
        Jac2_lnC = -Jac1_lnC

        Jac1 = T.concatenate([T.stack(Jac1_gammi), Jac1_lnC])
        Jac2 = T.concatenate([T.stack(Jac2_gammi), Jac2_lnC])
        self.debug = [Jac1_lnC, Jac2_lnC, Jac2_gammi, Jac1_gammi, dFE0dlnC,
                      dFE1dlnC, dGdg, out1, out2, v0, v1, v, ct]
        return Jac1, Jac2
开发者ID:dcuevasr,项目名称:actinf,代码行数:56,代码来源:actinfThClass.py


示例4: auto4check2

def auto4check2(input, dataset):
    a = theano.shared(value=dataset[0], name="a")
    b = theano.shared(value=dataset[1], name="b")
    c = theano.shared(value=dataset[2], name="c")
    x = T.vector('x')
    u = x[0] - 0.8
    v = x[1] - (a[0] + a[1] * u ** 2 * (1 - u) ** 0.5 - a[2] * u)
    alpha = -b[0] + b[1] * u ** 2 * (1 + u) ** 0.5 + b[2] * u
    beta = c[0] * v ** 2 * (1 - c[1] * v) / (1 + c[2] * u ** 2)
    fx = alpha * np.e ** (-beta)
    g_f_x = T.jacobian(fx, x)
    grad = theano.function([x], g_f_x)
    Hessian = theano.function([x], T.hessian(fx, x))
    H_alpha_x = theano.function([x], T.hessian(alpha, x))
    H_beta_x = theano.function([x], T.hessian(beta, x))
    J_f_alpha = theano.function([x], T.grad(fx, alpha))
    J_f_beta = theano.function([x], T.grad(fx, beta))
    J_alpha_x = theano.function([x], T.grad(alpha, x))

    J_beta_x = theano.function([x], T.grad(beta, x))

    J_f_y = [J_f_alpha(input), J_f_beta(input)]
    J_y_x = [J_alpha_x(input), J_beta_x(input)]
    # print "H_alpha_x"
    # print H_alpha_x(input)
    # print "H_beta_x"
    # print H_beta_x(input)
    # print "J_f_y"
    # print J_f_y
    # print "J_y_x"
    # print J_y_x
    # print grad(input)

    return Hessian(input)
开发者ID:v-shinc,项目名称:Buaa,代码行数:34,代码来源:two_uncon.py


示例5: compile_tan_force

    def compile_tan_force(self, u_np, s_np, *args, **kargs):
        grid = u_np.grid
        grid_math = grid._math
        grid._math = T

        tensor_dim = u_np.ndim + 2
        input_data = T.TensorType('float64', (False,) * tensor_dim)()

        tensor_dim = s_np.ndim
        param = T.TensorType('float64', (False,) * tensor_dim)()
        #param = T.dvector('s')

        u_theano = grid.array(input_data.copy(), u_np.shape)
        s_theano = np.array(param.copy(), s_np.shape)
        
        ret = self._function(u_theano, s_theano, *args, **kargs)

        out_tan = T.jacobian(ret._data, param)

        if _VERBOSE_: print('tangent derived in theano mode, compiling')
        f = theano.function([input_data, param], [out_tan])
        if _VERBOSE_: print('tangent sucessfully compiled')

        grid._math = grid_math
        return f
开发者ID:pjb236,项目名称:pascal_lss,代码行数:25,代码来源:psarray_local.py


示例6: compute_jacobian

def compute_jacobian(errors, parameters):
    """
    Compute jacobian.

    Parameters
    ----------
    errors : Theano variable
        Computed MSE for each sample separetly.

    parameters : list of Theano variable
        Neural network parameters (e.g. weights, biases).

    Returns
    -------
    Theano variable
    """
    n_samples = errors.shape[0]
    J = T.jacobian(errors, wrt=parameters)

    jacobians = []
    for jacobian, parameter in zip(J, parameters):
        jacobian = jacobian.reshape((n_samples, parameter.size))
        jacobians.append(jacobian)

    return T.concatenate(jacobians, axis=1)
开发者ID:itdxer,项目名称:neupy,代码行数:25,代码来源:lev_marq.py


示例7: hessian

def hessian(objective, argument):
    """
    Compute the directional derivative of the gradient
    (which is equal to the hessian multiplied by direction).
    """
    g = T.grad(objective, argument)

    # Create a new tensor A, which has the same type (i.e. same dimensionality)
    # as argument.
    A = argument.type()

    try:
        # First attempt efficient 'R-op', this directly calculates the
        # directional derivative of the gradient, rather than explicitly
        # calculating the hessian and then multiplying.
        R = T.Rop(g, argument, A)
    except NotImplementedError:
        shp = T.shape(argument)
        H = T.jacobian(g.flatten(), argument).reshape(
                                        T.concatenate([shp, shp]), 2*A.ndim)
        R = T.tensordot(H, A, A.ndim)

    try:
        hess = theano.function([argument, A], R, on_unused_input='raise')
    except theano.compile.UnusedInputError:
        warn('Theano detected unused input - suggests hessian may be zero or '
             'constant.')
        hess = theano.function([argument, A], R, on_unused_input='ignore')
    return hess
开发者ID:gitter-badger,项目名称:pymanopt,代码行数:29,代码来源:_theano.py


示例8: test_flow_det

def test_flow_det(flow_spec):
    z0 = tt.arange(0, 20).astype('float32')
    flow = flow_spec(dim=20, z0=z0.dimshuffle('x', 0))
    with change_flags(compute_test_value='off'):
        z1 = flow.forward.flatten()
        J = tt.jacobian(z1, z0)
        logJdet = tt.log(tt.abs_(tt.nlinalg.det(J)))
        det = flow.logdet[0]
    np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001)
开发者ID:zaxtax,项目名称:pymc3,代码行数:9,代码来源:test_variational_inference.py


示例9: test002_jacobian_matrix

def test002_jacobian_matrix():
    x = tensor.matrix()
    y = 2 * x.sum(axis=0)
    rng = numpy.random.RandomState(seed=utt.fetch_seed())
    ev = numpy.zeros((10, 10, 10))
    for dx in xrange(10):
        ev[dx, :, dx] = 2.

    # test when the jacobian is called with a tensor as wrt
    Jx = tensor.jacobian(y, x)
    f = theano.function([x], Jx)
    vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
    assert numpy.allclose(f(vx), ev)

    # test when the jacobian is called with a tuple as wrt
    Jx = tensor.jacobian(y, (x,))
    assert isinstance(Jx, tuple)
    f = theano.function([x], Jx[0])
    vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
    assert numpy.allclose(f(vx), ev)

    # test when the jacobian is called with a list as wrt
    Jx = tensor.jacobian(y, [x])
    assert isinstance(Jx, list)
    f = theano.function([x], Jx[0])
    vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
    assert numpy.allclose(f(vx), ev)

    # test when the jacobian is called with a list of two elements
    z = tensor.matrix()
    y = (x * z).sum(axis=1)
    Js = tensor.jacobian(y, [x, z])
    f = theano.function([x, z], Js)
    vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
    vz = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
    vJs = f(vx, vz)
    evx = numpy.zeros((10, 10, 10))
    evz = numpy.zeros((10, 10, 10))
    for dx in xrange(10):
        evx[dx, dx, :] = vx[dx, :]
        evz[dx, dx, :] = vz[dx, :]
    assert numpy.allclose(vJs[0], evz)
    assert numpy.allclose(vJs[1], evx)
开发者ID:317070,项目名称:Theano,代码行数:43,代码来源:test_2nd_order_grads.py


示例10: get_gradients

    def get_gradients(self, model, data, ** kwargs):

        space,  sources = self.get_data_specs(model)
        space.validate(data)
        X, Y = data


        theano_rng = RandomStreams(seed = model.rng.randint(2 ** 15))
        noise = theano_rng.random_integers(size = (X.shape[0] * model.k,), low=0, high = model.dict_size - 1)


        delta = model.delta(data)
        p = model.score(X, Y)
        params = model.get_params()

        pos_ = T.jacobian(model.score(X, Y), params, disconnected_inputs='ignore')
        pos_coeff = 1 - T.nnet.sigmoid(model.delta(data))
        pos = []
        for param in pos_:
            axes = [0]
            axes.extend(['x' for item in range(param.ndim - 1)])
            pos.append(pos_coeff.dimshuffle(axes) * param)
        del pos_, pos_coeff

        noise_x = T.tile(X, (model.k, 1))
        neg_ = T.jacobian(model.score(noise_x, noise), params, disconnected_inputs='ignore')
        neg_coeff = T.nnet.sigmoid(model.delta((noise_x, noise)))
        neg = []
        for param in neg_:
            axes = [0]
            axes.extend(['x' for item in range(param.ndim - 1)])
            tmp = neg_coeff.dimshuffle(axes) * param
            new_shape = [X.shape[0], model.k]
            new_shape.extend([tmp.shape[i] for i in range(1, tmp.ndim)])
            neg.append(tmp.reshape(new_shape).sum(axis=1))
        del neg_, neg_coeff


        grads = [(pos_ - neg_).mean(axis=0) for pos_, neg_ in zip(pos, neg)]
        gradients = OrderedDict(izip(params, grads))
        updates = OrderedDict()

        return gradients, updates
开发者ID:Sandy4321,项目名称:lisa_intern,代码行数:43,代码来源:cost.py


示例11: get_stat

def get_stat(f, thetahat):
    fhat = theano.function([theta], f)(thetahat)
    dfhat = theano.function([theta], T.jacobian(f, [theta])[0])(thetahat)
    fhatcov = np.dot(np.dot(dfhat, covhat), dfhat.transpose())
    try:
        fse = np.sqrt(np.diag(fhatcov))
    except:
        fse = np.sqrt(fhatcov)
    ftstat = fhat/fse
    return fhat, fse, ftstat
开发者ID:9mat,项目名称:pymnp-logit,代码行数:10,代码来源:het-probit.py


示例12: estimate_fisher

def estimate_fisher(outputs, n_outputs, parameters):
    # shape (sample_size, n_outputs, #parameters)
    grads = T.stack(*[util.batched_flatcat(
        T.jacobian(outputs[:, j], parameters))
        for j in xrange(n_outputs)])
    # ravel the batch and output axes so that the product will sum
    # over the outputs *and* over the batch. divide by the batch
    # size to get the batch mean.
    grads = grads.reshape((grads.shape[0] * grads.shape[1], grads.shape[2]))
    fisher = T.dot(grads.T, grads) / grads.shape[0]
    return fisher
开发者ID:cooijmanstim,项目名称:organic-neural-networks,代码行数:11,代码来源:main.py


示例13: test001_jacobian_vector

def test001_jacobian_vector():
    x = tensor.vector()
    y = x * 2
    rng = numpy.random.RandomState(seed=utt.fetch_seed())

    # test when the jacobian is called with a tensor as wrt
    Jx = tensor.jacobian(y, x)
    f = theano.function([x], Jx)
    vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
    assert numpy.allclose(f(vx), numpy.eye(10) * 2)

    # test when the jacobian is called with a tuple as wrt
    Jx = tensor.jacobian(y, (x,))
    assert isinstance(Jx, tuple)
    f = theano.function([x], Jx[0])
    vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
    assert numpy.allclose(f(vx), numpy.eye(10) * 2)

    # test when the jacobian is called with a list as wrt
    Jx = tensor.jacobian(y, [x])
    assert isinstance(Jx, list)
    f = theano.function([x], Jx[0])
    vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
    assert numpy.allclose(f(vx), numpy.eye(10) * 2)

    # test when the jacobian is called with a list of two elements
    z = tensor.vector()
    y = x * z
    Js = tensor.jacobian(y, [x, z])
    f = theano.function([x, z], Js)
    vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
    vz = rng.uniform(size=(10,)).astype(theano.config.floatX)
    vJs = f(vx, vz)
    evx = numpy.zeros((10, 10))
    evz = numpy.zeros((10, 10))
    numpy.fill_diagonal(evx, vx)
    numpy.fill_diagonal(evz, vz)
    assert numpy.allclose(vJs[0], evz)
    assert numpy.allclose(vJs[1], evx)
开发者ID:317070,项目名称:Theano,代码行数:39,代码来源:test_2nd_order_grads.py


示例14: test_vectors

    def test_vectors(self):
        
        try:
            import theano.tensor as T
            from theano import function            
        except:
            return
            
        for MT in [False, True]:

            # Set up variables and function
            vals = [np.random.randn(20) for i in range(5)]
            f = lambda a, b, c, d, e : a + (b * c) - d ** e

            # Set up our objects
            Cs = [ch.Ch(v) for v in vals]
            C_result = f(*Cs)
            C_result.MT = MT

            # Set up Theano equivalents
            Ts = T.dvectors('T1', 'T2', 'T3', 'T4', 'T5')
            TF = f(*Ts)
            T_result = function(Ts, TF)        

            if False:
                import theano.gradient
                which = 1
                theano_sse = (TF**2.).sum()
                theano_grad = theano.gradient.grad(theano_sse, Ts[which])
                theano_fn = function(Ts, theano_grad)
                print theano_fn(*vals)
                C_result_grad = ch.SumOfSquares(C_result).dr_wrt(Cs[which])
                print C_result_grad
                
                # if True:
                #     aaa = np.linalg.solve(C_result_grad.T.dot(C_result_grad), C_result_grad.dot(np.zeros(C_result_grad.shape[1])))
                #     theano_hes = theano.R_obbb = theano.R_op()
                
                import pdb; pdb.set_trace()

            # Make sure values and derivatives are equal
            np.testing.assert_array_equal(C_result.r, T_result(*vals))
            for k in range(len(vals)):
                theano_derivative = function(Ts, T.jacobian(TF, Ts[k]))(*vals)
                our_derivative = np.array(C_result.dr_wrt(Cs[k]).todense())
                #print theano_derivative, our_derivative   
            
                # Theano produces has more nans than we do during exponentiation. 
                # So we test only on entries where Theano is without NaN's    
                without_nans = np.nonzero(np.logical_not(np.isnan(theano_derivative.flatten())))[0]
                np.testing.assert_array_equal(theano_derivative.flatten()[without_nans], our_derivative.flatten()[without_nans])
开发者ID:MPI-IS,项目名称:chumpy,代码行数:51,代码来源:test_ch.py


示例15: test_flow_det_local

def test_flow_det_local(flow_spec):
    z0 = tt.arange(0, 12).astype('float32')
    spec = flow_spec.cls.get_param_spec_for(d=12)
    params = dict()
    for k, shp in spec.items():
        params[k] = np.random.randn(1, *shp).astype('float32')
    flow = flow_spec(dim=12, z0=z0.reshape((1, 1, 12)), **params)
    assert flow.batched
    with change_flags(compute_test_value='off'):
        z1 = flow.forward.flatten()
        J = tt.jacobian(z1, z0)
        logJdet = tt.log(tt.abs_(tt.nlinalg.det(J)))
        det = flow.logdet[0]
    np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001)
开发者ID:zaxtax,项目名称:pymc3,代码行数:14,代码来源:test_variational_inference.py


示例16: test003_jacobian_scalar

def test003_jacobian_scalar():
    x = tensor.scalar()
    y = x * 2
    rng = numpy.random.RandomState(seed=utt.fetch_seed())

    # test when the jacobian is called with a tensor as wrt
    Jx = tensor.jacobian(y, x)
    f = theano.function([x], Jx)
    vx = numpy.cast[theano.config.floatX](rng.uniform())
    assert numpy.allclose(f(vx), 2)

    # test when the jacobian is called with a tuple as wrt
    Jx = tensor.jacobian(y, (x,))
    assert isinstance(Jx, tuple)
    f = theano.function([x], Jx[0])
    vx = numpy.cast[theano.config.floatX](rng.uniform())
    assert numpy.allclose(f(vx), 2)

    # test when the jacobian is called with a list as wrt
    Jx = tensor.jacobian(y, [x])
    assert isinstance(Jx, list)
    f = theano.function([x], Jx[0])
    vx = numpy.cast[theano.config.floatX](rng.uniform())
    assert numpy.allclose(f(vx), 2)

    # test when the jacobian is called with a list of two elements
    z = tensor.scalar()
    y = x * z
    Jx = tensor.jacobian(y, [x, z])
    f = theano.function([x, z], Jx)
    vx = numpy.cast[theano.config.floatX](rng.uniform())
    vz = numpy.cast[theano.config.floatX](rng.uniform())
    vJx = f(vx, vz)

    assert numpy.allclose(vJx[0], vz)
    assert numpy.allclose(vJx[1], vx)
开发者ID:317070,项目名称:Theano,代码行数:36,代码来源:test_2nd_order_grads.py


示例17: grad

    def grad(self, inputs, dCdf):
        """ Gradient MTF
        """
        MU = inputs[0][0]
        SD = inputs[0][1]
#        Y = self._normal(just_return = True, MU=MU, SD=SD)
        Y, Y_upd = th.scan(fn=self.norm_fun,
                               sequences=self.counter, non_sequences=[MU, SD])


        dYdMIn = T.jacobian(Y.sum(axis=0), inputs[0])
#        dYdSD = T.jacobian(Y, SD)
#        return dYdMIn[0]*dCdf[0][0] + dYdMIn[1]*dCdf[0][1],
#        return T.as_tensor([dCdf[0][0]*dYdMIn[0][0] + dCdf[0][1]*dYdMIn[1][0],
#                dCdf[0][0]*dYdMIn[0][1] + dCdf[0][1]*dYdMIn[1][1]]),
        return T.as_tensor([dCdf[0].dot(dYdMIn[:,0]), dCdf[0].dot(dYdMIn[:,1])]),
开发者ID:dcuevasr,项目名称:actinf,代码行数:16,代码来源:actinfThClass.py


示例18: compute_hessian

    def compute_hessian(self, objective, argument):
        """
        Computes the directional derivative of the gradient (which is equal to
        the Hessian multiplied by direction).
        """
        g = T.grad(objective, argument)

        # Create a new tensor A, which has the same type (i.e. same
        # dimensionality) as argument.
        try:
            A = argument.type()
        except AttributeError:
            # Assume we are on the product manifold
            A = [arg.type() for arg in argument]

        try:
            # First attempt efficient 'R-op', this directly calculates the
            # directional derivative of the gradient, rather than explicitly
            # calculating the Hessian and then multiplying.
            R = T.Rop(g, argument, A)
        except NotImplementedError:
            # TODO: fix this fallback for the product manifold.
            shp = T.shape(argument)
            H = T.jacobian(g.flatten(), argument).reshape(
                T.concatenate([shp, shp]), 2 * A.ndim)
            R = T.tensordot(H, A, A.ndim)

        try:
            hess = theano.function([argument, A], R, on_unused_input="warn")
        except TypeError:
            hess_prod = theano.function(argument + A, R,
                                        on_unused_input="warn")

            def hess(x, a):
                return hess_prod(*(x + a))

        return hess
开发者ID:j-towns,项目名称:pymanopt,代码行数:37,代码来源:_theano.py


示例19: grad_hess

def grad_hess(objective, argument):
    """
    Compute both the gradient and the directional derivative of the gradient
    (which is equal to the hessian multiplied by direction).
    """
    # TODO: Check that the hessian calculation is correct!
    # TODO: Make this compatible with non-matrix manifolds.
    g = T.grad(objective, argument)
    grad = compile(g, argument)

    # Create a new tensor A, which has the same type (i.e. same dimensionality)
    # as argument.
    A = argument.type()

    try:
        # First attempt efficient 'R-op', this directly calculates the
        # directional derivative of the gradient, rather than explicitly
        # calculating the hessian and then multiplying.
        print("begins")
        sys.stdout.flush()
        R = T.Rop(g, argument, A)
        print("ends")
        sys.stdout.flush()
    except NotImplementedError:
        # This will break if the manifold is not a matrix.
        n, p = T.shape(argument)
        H = T.jacobian(g.flatten(), argument).reshape([n, p, n, p], 4)
        R = T.tensordot(H, A)

    try:
        hess = theano.function([argument, A], R)
    except theano.compile.UnusedInputError:
        warn('Theano detected unused input - suggests hessian may be zero or '
             'constant.')
        hess = theano.function([argument, A], R, on_unused_input='ignore')
    return grad, hess
开发者ID:Nehoroshiy,项目名称:logmat_riemannian,代码行数:36,代码来源:theano_functions.py


示例20: auto4check

def auto4check(dataset, x, tol=1e-9, maxiter=1000):

    t0 = theano.shared(value=dataset[0], name="t0")
    a0 = theano.shared(value=dataset[1], name="a0")
    b0 = theano.shared(value=dataset[2], name="b0")
    c0 = theano.shared(value=dataset[3], name="c0")
    k = T.vector('k')
    a_t = np.e ** (-(k[0] + k[1]) * t0)
    b_t = k[0] / (k[0] + k[1]) * (1 - a_t)
    c_t = k[1] / (k[0] + k[1]) * (1 - a_t)
    f = T.sum((a0 - a_t) ** 2 + (b0 - b_t) ** 2 + (c0 - c_t) ** 2)
    F = theano.function([k], f)
    g_f_k = T.jacobian(f, k)
    j_f_k = theano.function([k], g_f_k)
    H_f_k = T.hessian(f, k)
    Hessian = theano.function([k], H_f_k)


    track, f_val = [], []
    track.append(array(x))
    f_val.append(F(x))
    g = j_f_k(x)
    i = 0
    print "Step =", i, "g=", g, "x=", x, "loss=", F(x)
    while norm(g) > tol:
        i += 1
        if i > maxiter:
            break
        G = Hessian(x)
        s = -np.linalg.solve(G, g)
        x += s
        track.append(array(x))
        f_val.append(F(x))
        g = j_f_k(x)
        print "step =", i, "g=", g, "x=", x, "loss=", F(x), "G=", G
    return x, F(x), track, f_val
开发者ID:v-shinc,项目名称:Buaa,代码行数:36,代码来源:two_uncon.py



注:本文中的theano.tensor.jacobian函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.join函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.ivector函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap