• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python theano.function函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.function函数的典型用法代码示例。如果您正苦于以下问题:Python function函数的具体用法?Python function怎么用?Python function使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了function函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_in_transit

def test_in_transit():
    t = np.linspace(-20, 20, 1000)
    m_planet = np.array([0.3, 0.5])
    m_star = 1.45
    r_star = 1.5
    orbit = KeplerianOrbit(
        m_star=m_star,
        r_star=r_star,
        t0=np.array([0.5, 17.4]),
        period=np.array([10.0, 5.3]),
        ecc=np.array([0.1, 0.8]),
        omega=np.array([0.5, 1.3]),
        m_planet=m_planet,
    )

    r_pl = np.array([0.1, 0.03])
    coords = theano.function([], orbit.get_relative_position(t))()
    r2 = coords[0]**2 + coords[1]**2
    inds = theano.function([], orbit.in_transit(t, r=r_pl))()

    m = np.isin(np.arange(len(t)), inds)
    in_ = r2[inds] <= ((r_star + r_pl)**2)[None, :]
    in_ &= coords[2][inds] > 0
    assert np.all(np.any(in_, axis=1))

    out = r2[~m] > ((r_star + r_pl)**2)[None, :]
    out |= coords[2][~m] <= 0
    assert np.all(out)
开发者ID:dfm,项目名称:exoplanet,代码行数:28,代码来源:keplerian_test.py


示例2: define_train_test_funcs

    def define_train_test_funcs(self):
        activation = self.layers[len(self.layers) - 1].activation
        self.Y = T.matrix("Y")
        pYs = T.reshape(activation, (self.maskY.shape[0] * self.batch_size, self.out_size))
        tYs =  T.reshape(self.Y, (self.maskY.shape[0] * self.batch_size, self.out_size))
        cost = self.categorical_crossentropy(pYs, tYs)
        
        gparams = []
        for param in self.params:
            #gparam = T.grad(cost, param)
            gparam = T.clip(T.grad(cost, param), -10, 10)
            gparams.append(gparam)

        lr = T.scalar("lr")
        # eval(): string to function
        optimizer = eval(self.optimizer)
        updates = optimizer(self.params, gparams, lr)

        #updates = sgd(self.params, gparams, lr)
        #updates = momentum(self.params, gparams, lr)
        #updates = rmsprop(self.params, gparams, lr)
        #updates = adagrad(self.params, gparams, lr)
        #updates = dadelta(self.params, gparams, lr)
        #updates = adam(self.params, gparams, lr)
        
        self.train = theano.function(inputs = [self.X, self.maskX, self.Y, self.maskY, lr, self.batch_size],
                                               givens = {self.is_train : np.cast['int32'](1)},
                                               outputs = cost,
                                               updates = updates)
        self.predict = theano.function(inputs = [self.X, self.maskX, self.batch_size],
                                                 givens = {self.is_train : np.cast['int32'](0)},
                                                 outputs = activation)
开发者ID:BinbinBian,项目名称:rnn-theano,代码行数:32,代码来源:rnn.py


示例3: test_bilinear_kernel_1D

    def test_bilinear_kernel_1D(self):
        """Test 1D kernels used in bilinear upsampling

        This method tests the correctness of the
        1D kernel values used in bilinear upsampling
        for some upsampling ratios.

        """
        rat = tensor.iscalar()
        kernel_ten = bilinear_kernel_1D(ratio=rat, normalize=False)
        f_ten = theano.function([rat], kernel_ten)

        kernel_ten_norm = bilinear_kernel_1D(ratio=rat, normalize=True)
        f_ten_norm = theano.function([rat], kernel_ten_norm)

        for ratio in [2, 3, 4, 5, 6, 7, 8, 9]:
            # getting the un-normalized kernel
            kernel = bilinear_kernel_1D(ratio=ratio, normalize=False)
            f = theano.function([], kernel)
            kernel_1D = self.numerical_kernel_1D(ratio)
            utt.assert_allclose(kernel_1D, f())
            utt.assert_allclose(kernel_1D, f_ten(ratio))

            # getting the normalized kernel
            kernel = bilinear_kernel_1D(ratio=ratio, normalize=True)
            f = theano.function([], kernel)
            kernel_1D = kernel_1D / float(ratio)
            utt.assert_allclose(kernel_1D, f())
            utt.assert_allclose(kernel_1D, f_ten_norm(ratio))
开发者ID:5730279821-TA,项目名称:Theano,代码行数:29,代码来源:test_abstract_conv.py


示例4: adam

def adam(lr, tparams, grads, inp, cost):
    gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
    gsup = [(gs, g) for gs, g in zip(gshared, grads)]

    f_grad_shared = theano.function(inp, cost, updates=gsup)

    lr0 = 0.0002
    b1 = 0.1
    b2 = 0.001
    e = 1e-8

    updates = []

    i = theano.shared(numpy.float32(0.))
    i_t = i + 1.
    fix1 = 1. - b1**(i_t)
    fix2 = 1. - b2**(i_t)
    lr_t = lr0 * (tensor.sqrt(fix2) / fix1)

    for p, g in zip(tparams.values(), gshared):
        m = theano.shared(p.get_value() * 0.)
        v = theano.shared(p.get_value() * 0.)
        m_t = (b1 * g) + ((1. - b1) * m)
        v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
        g_t = m_t / (tensor.sqrt(v_t) + e)
        p_t = p - (lr_t * g_t)
        updates.append((m, m_t))
        updates.append((v, v_t))
        updates.append((p, p_t))
    updates.append((i, i_t))

    f_update = theano.function([lr], [], updates=updates, on_unused_input='ignore')

    return f_grad_shared, f_update
开发者ID:ChuyuHsu,项目名称:SentenceRepresentation,代码行数:34,代码来源:desent.py


示例5: test_copy_random_state

    def test_copy_random_state(self):

        class Graph():
            def __init__(self, seed=123):
                self.rng = RandomStreams(seed)
                self.y = self.rng.uniform(size=(1,))

        g1 = Graph(seed=123)
        f1 = theano.function([], g1.y)

        g2 = Graph(seed=987)
        f2 = theano.function([], g2.y)

        #print 'By default, the two functions are out of sync.'
        v1 =  f1()
        v2 =  f2()

        def copy_random_state(g1, g2):
            if isinstance(g1.rng, MRG_RandomStreams):
                g2.rng.rstate = g1.rng.rstate
            for (su1, su2) in zip(g1.rng.state_updates, g2.rng.state_updates):
                su2[0].set_value(su1[0].get_value())

        #print 'We now copy the state of the theano random number generators.'
        copy_random_state(g1, g2)
        v3 = f1()
        v4 = f2()
        assert numpy.allclose(v1, 0.72803009)
        assert numpy.allclose(v2, 0.55056769)
        assert numpy.allclose(v3, 0.59044123)
        assert numpy.allclose(v4, 0.59044123)
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:31,代码来源:test_tutorial.py


示例6: test_pooling_opt

def test_pooling_opt():
    if not dnn.dnn_available(test_ctx_name):
        raise SkipTest(dnn.dnn_available.msg)

    x = T.fmatrix()

    f = theano.function(
        [x],
        pool_2d(x, ds=(2, 2), mode='average_inc_pad',
                ignore_border=True),
        mode=mode_with_gpu)

    assert any([isinstance(n.op, dnn.GpuDnnPool)
                for n in f.maker.fgraph.toposort()])

    f(numpy.zeros((10, 10), dtype='float32'))

    f = theano.function(
        [x],
        T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
                       ignore_border=True).sum(),
               x),
        mode=mode_with_gpu.including("cudnn"))

    assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)
                for n in f.maker.fgraph.toposort()])

    f(numpy.zeros((10, 10), dtype='float32'))
开发者ID:nke001,项目名称:Theano,代码行数:28,代码来源:test_dnn.py


示例7: test_flatten

def test_flatten():
    m = theano.tensor.fmatrix()
    f = theano.function([m], m.flatten(), mode=mode_with_gpu)
    val = numpy.random.rand(10, 11).astype("float32")
    res = f(val)
    utt.assert_allclose(res, val.flatten())
    assert res.shape == val.flatten().shape
    assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]
    val = numpy.random.rand(10, 11).astype("float32")
    res = f(val)
    utt.assert_allclose(res, val.flatten())
    assert res.shape == val.flatten().shape
    assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]

    f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu)
    val = numpy.random.rand(10, 11).astype("float32")
    res = f(val)
    utt.assert_allclose(res, val)
    assert res.shape == val.shape
    assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]

    m = theano.tensor.tensor3()
    f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu)
    val = numpy.random.rand(10, 11, 12).astype("float32")
    res = f(val)
    utt.assert_allclose(res, val.reshape(10, -1))
    assert res.shape == val.reshape(10, -1).shape
    assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]
开发者ID:ip01,项目名称:Theano,代码行数:28,代码来源:test_opt.py


示例8: time_linker

    def time_linker(name, linker):
        steps_a = 5
        steps_b = 100
        x = tensor.vector()
        a = build_graph(x,steps_a)
        b = build_graph(x,steps_b)


        f_a = function([x], a,
                mode=Mode(optimizer=None, linker=linker()),
                #profile='f_a speed test %s'%name,
                )
        f_b = function([x], b,
                mode=Mode(optimizer=None, linker=linker()),
                #profile='f_b speed test %s'%name,
                )

        print f_a([2.0, 3.0])
        t0 = time.time()
        print f_a([2.0, 3.0])
        t1 = time.time()

        print f_b([2.0, 3.0])

        t2 = time.time()
        print f_b([2.0, 3.0])
        t3 = time.time()

        t_a = t1 - t0
        t_b = t3 - t2

        print "%s takes %f s/Kop" % (
                name,
                (1000*(t_b-t_a) / (steps_b - steps_a)))
开发者ID:olivierverdier,项目名称:Theano,代码行数:34,代码来源:test_vm.py


示例9: test_pattern_output

    def test_pattern_output(self):
        #print (self.phi.W.get_value(borrow=True))
        assert (self.phi.W.get_value(borrow=True).shape == (self.n,self.d))
        self.phi.W.set_value ( np.array([[1,0]]).T )
        assert (self.phi.W.get_value(borrow=True).shape == (self.n,self.d))

        assert (self.psi.W.get_value(borrow=True).shape == (self.d,self.num_classes))
        self.psi.W.set_value ( np.array([[1,]]) )
        assert (self.psi.W.get_value(borrow=True).shape == (self.d,self.num_classes))

#        assert (self.beta.W.get_value(borrow=True).shape == (self.d, self.m))
#        # [1,1] means that we will project the intermediate representation
#        # onto both dimensions of the output representation
#        self.beta.W.set_value ( np.array([[1,1]]) )
#        assert (self.beta.W.get_value(borrow=True).shape == (self.d, self.m))
        
        test_prediction = lasagne.layers.get_output(self.pattern, deterministic=True)
        test_fn = theano.function([self.input_var], test_prediction)
        X_hat = test_fn(self.X)
        assert ( np.all(X_hat == self.S) )
        
#        self.phi1 = test_prediction
#        self.phi2 = lasagne.layers.get_output(self.pattern, self.side_var, deterministic=True)
        beta_prediction = self.pattern.get_beta_output_for(self.input_var, self.side_var, deterministic=True)
        beta_fn = theano.function([self.input_var, self.side_var], beta_prediction)
        C_hat = beta_fn(self.X, self.CX)
        assert ( np.all(C_hat == self.Cy) )
开发者ID:Beerkay,项目名称:concarne,代码行数:27,代码来源:pattern_linear_test.py


示例10: test_opt_gpujoin_onlyajoin

def test_opt_gpujoin_onlyajoin():
    # from a bug in normal sampling
    _a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
    _b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32')
    a = cuda.shared_constructor(_a)
    b = cuda.shared_constructor(_b)

    c = tensor.join(1, a, b)

    f = theano.function([], c, mode=mode_with_gpu)

    f()

    graph_nodes = f.maker.fgraph.toposort()

    assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
    assert isinstance(graph_nodes[-2].op, cuda.GpuJoin)

    assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))

    # test mixed dtype
    _b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float64')
    b = theano.tensor.constant(_b)

    c = tensor.join(1, a, b)

    f = theano.function([], c, mode=mode_with_gpu)

    f()

    graph_nodes = f.maker.fgraph.toposort()
    assert isinstance(graph_nodes[-1].op, theano.tensor.Join)

    assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
开发者ID:Abioy,项目名称:Theano,代码行数:34,代码来源:test_opt.py


示例11: architecture

    def architecture(self, cons, code_layer):
        """Build up the architecture by theano"""
        for i in range(len(self.layers)-1):
            # Initialize shared variables
            init_w = cons*np.random.randn(self.layers[i], self.layers[i+1])
            self.weights.append(th.shared(init_w))
            init_bias = cons*np.random.randn(self.layers[i+1])
            self.biases.append(th.shared(init_bias))

            # Building architecture
            a_before = T.dot(self.a_n[i], self.weights[i]) + \
                self.biases[i].dimshuffle('x', 0)
            a_next = self.activ(a_before)
            self.a_n.append(a_next)

        # help the optimization
        for param in (self.weights+self.biases):
            self.auxiliary.append(th.shared(np.zeros(param.get_value().shape)))

        self.encode = th.function([self.x], self.a_n[code_layer])
        self.decode = th.function([self.a_n[code_layer]], self.a_n[-1])

        # Calculate the cost and gradients
        Cost = (T.sum((self.a_n[-1]-self.y_hat)**2))/self.batch
        params = self.weights + self.biases
        grads = T.grad(Cost, params, disconnected_inputs='ignore')

        # Update parameters
        update_query = self.update(params, grads, self.auxiliary)
        self.gradient_2 = th.function(inputs=[self.x, self.y_hat],
                                      updates=update_query, outputs=Cost)
开发者ID:AaronYALai,项目名称:Machine_Learning_Techniques,代码行数:31,代码来源:autoencoder.py


示例12: test_alloc_memset_0

def test_alloc_memset_0():
    i = tensor.iscalar()
    z = numpy.zeros((1,), dtype='float32')
    o = numpy.ones((1,), dtype='float32')
    ones = numpy.ones((2,), dtype='float32')

    # Test with 0
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 0).all()

    # Test with 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 1).all()

    # Test with 1, 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(2)) == 1).all()
开发者ID:Abioy,项目名称:Theano,代码行数:31,代码来源:test_opt.py


示例13: test_local_assert_no_cpu_op

def test_local_assert_no_cpu_op():
    numpy.random.seed(1)
    m = numpy.random.uniform(-1, 1, (10, 10)).astype("float32")
    ms = cuda.shared_constructor(m, name="m_shared")
    out = theano.tensor.tanh(ms).dot(ms.T)

    mode_local_assert = mode_with_gpu.including("assert_no_cpu_op")
    mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_0")
    mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_1")

    old = config.assert_no_cpu_op
    old2 = config.on_opt_error
    # If the flag is raise
    try:
        config.assert_no_cpu_op = 'raise'
        config.on_opt_error = 'ignore'

        assert_raises(AssertionError, theano.function,
                        [], out, mode=mode_local_assert)
    finally:
        config.assert_no_cpu_op = old
        config.on_opt_error = old2

    # If the flag is ignore
    try:
        config.assert_no_cpu_op = 'ignore'
        theano.function([], out, mode=mode_local_assert)
    finally:
        config.assert_no_cpu_op = old
开发者ID:Abioy,项目名称:Theano,代码行数:29,代码来源:test_opt.py


示例14: test_in_transit_circ

def test_in_transit_circ():
    t = np.linspace(-20, 20, 1000)
    m_planet = np.array([0.3, 0.5])
    m_star = 1.45
    r_star = 1.5
    orbit = KeplerianOrbit(
        m_star=m_star,
        r_star=r_star,
        t0=np.array([0.5, 17.4]),
        period=np.array([10.0, 5.3]),
        ecc=np.array([0.0, 0.0]),
        omega=np.array([0.0, 0.0]),
        m_planet=m_planet,
    )
    orbit_circ = KeplerianOrbit(
        m_star=m_star,
        r_star=r_star,
        t0=np.array([0.5, 17.4]),
        period=np.array([10.0, 5.3]),
        m_planet=m_planet,
    )

    r_pl = np.array([0.1, 0.03])
    inds = theano.function([], orbit.in_transit(t, r=r_pl))()
    inds_circ = theano.function([], orbit_circ.in_transit(t, r=r_pl))()
    assert np.all(inds == inds_circ)
开发者ID:dfm,项目名称:exoplanet,代码行数:26,代码来源:keplerian_test.py


示例15: adadelta

def adadelta(lr,tparams,grads,x,mask,y,cost):
    zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
                                  name='%s_grad' % k)
                    for k, p in tparams.items()]
    running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
                                 name='%s_rup2' % k)
                   for k, p in tparams.items()]
    running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
                                    name='%s_rgrad2' % k)
                      for k, p in tparams.items()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
             for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function([x, mask, y], cost, updates=zgup + rg2up,
                                    name='adadelta_f_grad_shared')

    updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
             for zg, ru2, rg2 in zip(zipped_grads,
                                     running_up2,
                                     running_grads2)]
    ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
             for ru2, ud in zip(running_up2, updir)]
    #梯度更新字典
    param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]

    f_update = theano.function([lr], [], updates=ru2up + param_up,
                               on_unused_input='ignore',
                               name='adadelta_f_update')

    return f_grad_shared, f_update
开发者ID:LibCorner,项目名称:Theano_note,代码行数:32,代码来源:optimizer.py


示例16: adadelta

def adadelta(lr, tparams, grads, inp, cost, extra_ups=[], extra_outs=[],
             exclude_params=set([])):
    '''Adadelta'''
    zipped_grads = [theano.shared(p.get_value() * np.float32(0.), name='%s_grad'%k)
                    for k, p in tparams.iteritems()]
    running_up2 = [theano.shared(p.get_value() * np.float32(0.), name='%s_rup2'%k)
                   for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * np.float32(0.), name='%s_rgrad2'%k)
                      for k, p in tparams.iteritems()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
        for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function(
        inp, [cost]+extra_outs, updates=zgup+rg2up+extra_ups, profile=profile)

    updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
             for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
    ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
        for ru2, ud in zip(running_up2, updir)]
    param_up = [(p, p + ud) for p, ud in zip(tools.itemlist(tparams), updir)
        if p.name not in exclude_params]

    if not isinstance(lr, list): lr = [lr]
    f_update = theano.function(lr, [], updates=ru2up+param_up,
        on_unused_input='ignore', profile=profile)

    return f_grad_shared, f_update
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:29,代码来源:op.py


示例17: test_local_gpualloc_memset_0

def test_local_gpualloc_memset_0():
    i = theano.tensor.iscalar()
    z = numpy.zeros((1,), dtype='float32')
    o = numpy.ones((1,), dtype='float32')
    ones = numpy.ones((2,), dtype='float32')

    # Test with 0
    a = gpu_alloc(z, i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, GpuAlloc) and topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 0).all()

    # Test with 1
    a = gpu_alloc(o, i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 1).all()

    # Test with 1, 1
    a = gpu_alloc(ones, i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(2)) == 1).all()
开发者ID:Eileen0909,项目名称:Theano,代码行数:31,代码来源:test_opt.py


示例18: __init__

  def __init__(self, kernel, max_iter = 10, max_diff = None):
    """

    :param kernel: a function with a signature (expected, observed) -> a similarity measure
    that accepts symbolic theano expressions and returns them accordingly.
    See `crayimage.hotornot.em.kernels` for examples.
    :param max_iter: maximal number of iteration
    :param max_diff: stop iterations if maximal difference in weights from the previous iteration is smaller than `max_diff`.
    If None the check is not performed.
    """
    self.original_shape = None

    self.kernel = kernel
    self.max_iter = max_iter
    self.max_diff = max_diff

    self.X = theano.shared(
      np.zeros(shape=(0, 0), dtype='float32')
    )

    self.weights = theano.shared(
      np.ones(shape=(0, ), dtype='float32')
    )

    canonical = T.sum(self.weights[:, None] * self.X, axis=0) / T.sum(self.weights)

    weights_updates = self.kernel(canonical, self.X)
    weights_diff = T.max(abs(weights_updates - self.weights))

    upd = {
      self.weights : weights_updates
    }

    self.iteration = theano.function([], weights_diff if max_diff is not None else [], updates=upd)
    self.get_canonical = theano.function([], canonical)
开发者ID:maxim-borisyak,项目名称:crayimage,代码行数:35,代码来源:fast_em.py


示例19: test_dnn_conv_merge

def test_dnn_conv_merge():
    # This test that we merge correctly multiple dnn_conv.
    if not dnn.dnn_available(test_ctx_name):
        raise SkipTest(dnn.dnn_available.msg)
    img_shp = [2, 5, 6, 8]
    kern_shp = [3, 5, 5, 6]
    img = T.ftensor4('img')
    kern = T.ftensor4('kern')
    out = T.ftensor4('out')
    desc = dnn.GpuDnnConvDesc(
        border_mode='valid')(kern.shape)

    # Test forward op
    o1 = dnn.dnn_conv(img, kern)
    o2 = dnn.dnn_conv(img, kern)
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),
               numpy.random.rand(*kern_shp).astype('float32'))
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1

    # Test grad w op
    o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
    o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
    f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1

    # Test grad i op
    o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
    o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
    f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1
开发者ID:nke001,项目名称:Theano,代码行数:34,代码来源:test_dnn.py


示例20: initialise_model

 def initialise_model(self, X_train, y_train):
     print 'Initialising model...'
     self.input_shape = X_train.shape[1]
     input_var = T.matrix('inputs')
     target_var = T.matrix('targets')
     
     if self.normalise:
         y_train = self.normalise_y(y_train, reset = True)
         X_train = self.normalise_X(X_train, reset = True)
 
     # Create neural network model
     self.network = self.build_custom_mlp(input_var)
     prediction = lasagne.layers.get_output(self.network)
     loss = lasagne.objectives.squared_error(prediction, target_var)
     loss = loss.mean()
     params = lasagne.layers.get_all_params(self.network, trainable=True)
     updates = lasagne.updates.nesterov_momentum(loss, params, 
                                         learning_rate=self.learning_rate, 
                                         momentum=self.momentum)
     test_prediction = lasagne.layers.get_output(self.network,
                                                 deterministic=True)
     test_loss = lasagne.objectives.squared_error(test_prediction,
                                                  target_var)
     test_loss = test_loss.mean()
     self.train_fn = theano.function([input_var, target_var], loss, 
                                updates=updates, allow_input_downcast=True)
     self.predict_output = theano.function([input_var],
                                           outputs=test_prediction,
                                           allow_input_downcast=True)
     self.initialised = True
开发者ID:MBleeker,项目名称:Data-Mining,代码行数:30,代码来源:theano_NN_model.py



注:本文中的theano.function函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python theano.grad函数代码示例发布时间:2022-05-27
下一篇:
Python theano.dot函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap