• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.verify_grad函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.verify_grad函数的典型用法代码示例。如果您正苦于以下问题:Python verify_grad函数的具体用法?Python verify_grad怎么用?Python verify_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了verify_grad函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_maxpool

    def test_maxpool():
        # generate flatted images
        maxpoolshps = ((2,2),(3,3),(4,4),(5,5),(6,6))
        imval = N.random.rand(4,5,10,10)

        images = T.dmatrix()
        for maxpoolshp in maxpoolshps:

            # symbolic stuff
            output, outshp = sp.max_pool(images, imval.shape[1:], maxpoolshp)
            f = function([images,],[output,])
            output_val = f(imval.reshape(imval.shape[0],-1))

            # numeric verification
            my_output_val = N.zeros((imval.shape[0], imval.shape[1],
                                     imval.shape[2]/maxpoolshp[0],
                                     imval.shape[3]/maxpoolshp[1]))
            assert N.prod(my_output_val.shape[1:]) == N.prod(N.r_[imval.shape[1],outshp])

            for n in range(imval.shape[0]):
                for k in range(imval.shape[1]):
                    for i in range(imval.shape[2]/maxpoolshp[0]):
                        for j in range(imval.shape[3]/maxpoolshp[1]):
                            ii,jj = i*maxpoolshp[0], j*maxpoolshp[1]
                            patch = imval[n,k,ii:ii+maxpoolshp[0],jj:jj+maxpoolshp[1]]
                            my_output_val[n,k,i,j] = N.max(patch)
            my_output_val = my_output_val.reshape(imval.shape[0],-1)
            assert N.all(output_val == my_output_val)

            def mp(input):
                output, outshp = sp.max_pool(input, imval.shape[1:], maxpoolshp)
                return output
            T.verify_grad(None, mp, [imval.reshape(imval.shape[0],-1)])
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:33,代码来源:test_spconv.py


示例2: test_col_scale

def test_col_scale():
    x = theano.sparse.csc_dmatrix()
    s = theano.tensor.dvector()

    def d(x, s):
        return sp.sp_sum(sp.col_scale(x, s), sparse_grad=True)

    rng = numpy.random.RandomState(8723)
    R = 5
    C = 8

    x_val_dense = numpy.zeros((R, C), dtype="d")
    for idx in [(0, 0), (4, 1), (2, 1), (3, 3), (4, 4), (3, 7), (2, 7)]:
        x_val_dense.__setitem__(idx, rng.randn())
    x_val = scipy.sparse.csc_matrix(x_val_dense)

    s_val = rng.randn(C)

    f = theano.function([x, s], sp.col_scale(x, s))

    print "A", f(x_val, s_val).toarray()
    print "B", (x_val_dense * s_val)

    assert numpy.all(f(x_val, s_val).toarray() == (x_val_dense * s_val))

    if 0:
        tensor.verify_grad(None, d, [x_val, s_val], mode=theano.Mode(linker="py", optimizer="fast_compile"))
    else:
        print >> sys.stderr, "WARNING: skipping gradient test because verify_grad doesn't support sparse arguments"
开发者ID:huiwenhan,项目名称:Theano,代码行数:29,代码来源:test_sp.py


示例3: test_eigvalsh_grad

def test_eigvalsh_grad():
    rng = numpy.random.RandomState(utt.fetch_seed())
    a = rng.randn(5, 5)
    a = a + a.T
    b = 10 * numpy.eye(5, 5) + rng.randn(5, 5)
    tensor.verify_grad(lambda a, b: eigvalsh(a, b).dot([1, 2, 3, 4, 5]),
                       [a, b], rng=numpy.random)
开发者ID:Panzermann,项目名称:Theano,代码行数:7,代码来源:test_linalg.py


示例4: test_det_grad

def test_det_grad():
    # If scipy is not available, this test will fail, thus we skip it.
    if not use_scipy:
        raise SkipTest('Scipy is not available')
    rng = numpy.random.RandomState(utt.fetch_seed())

    r = rng.randn(5,5)
    tensor.verify_grad(det, [r], rng=numpy.random)
开发者ID:hamelphi,项目名称:Theano,代码行数:8,代码来源:test_linalg.py


示例5: test_expm_grad_3

def test_expm_grad_3():
    # with non-symmetric matrix (complex eigenvectors)
    if not imported_scipy:
        raise SkipTest("Scipy needed for the expm op.")
    rng = numpy.random.RandomState(utt.fetch_seed())
    A = rng.randn(5, 5).astype(config.floatX)

    tensor.verify_grad(expm, [A,], rng=rng)
开发者ID:Eileen0909,项目名称:Theano,代码行数:8,代码来源:test_slinalg.py


示例6: test_expm_grad_3

def test_expm_grad_3():
    # with non-symmetric matrix (complex eigenvectors)
    if not imported_scipy:
        raise SkipTest("Scipy needed for the expm op.")
    rng = numpy.random.RandomState(utt.fetch_seed())
    # Always test in float64 for better numerical stability.
    A = rng.randn(5, 5)

    tensor.verify_grad(expm, [A], rng=rng)
开发者ID:gyenney,项目名称:Tools,代码行数:9,代码来源:test_slinalg.py


示例7: test_inverse_grad

def test_inverse_grad():
    rng = np.random.RandomState(utt.fetch_seed())
    r = rng.randn(4, 4)
    tensor.verify_grad(matrix_inverse, [r], rng=np.random)

    rng = np.random.RandomState(utt.fetch_seed())

    r = rng.randn(4, 4)
    tensor.verify_grad(matrix_inverse, [r], rng=np.random)
开发者ID:EugenePY,项目名称:Theano,代码行数:9,代码来源:test_nlinalg.py


示例8: verify_grad

def verify_grad(op, pt, n_tests=2, rng=None, *args, **kwargs):
    """
    Wrapper for tensor/basic.py:verify_grad
    Takes care of seeding the random number generator if None is given
    """
    if rng is None:
        seed_rng()
        rng = numpy.random
    T.verify_grad(op, pt, n_tests, rng, *args, **kwargs)
开发者ID:gotomypc,项目名称:Theano,代码行数:9,代码来源:unittest_tools.py


示例9: test_fractional_max_pooling_numeric_gradient

    def test_fractional_max_pooling_numeric_gradient():
        def fun(x):
            return fmp.DisjointPseudorandomFractionalMaxPooling2DOp(
                alpha=1.414,
                u=0.5
            )(x)

        T.verify_grad(fun,
                      [np.arange(25).reshape(1, 1, 5, 5).astype(fX)],
                      rng=np.random)
开发者ID:diogo149,项目名称:treeano,代码行数:10,代码来源:fractional_max_pooling_test.py


示例10: test_eigvalsh_grad

def test_eigvalsh_grad():
    if not imported_scipy:
        raise SkipTest("Scipy needed for the geigvalsh op.")
    import scipy.linalg

    rng = numpy.random.RandomState(utt.fetch_seed())
    a = rng.randn(5, 5)
    a = a + a.T
    b = 10 * numpy.eye(5, 5) + rng.randn(5, 5)
    tensor.verify_grad(lambda a, b: eigvalsh(a, b).dot([1, 2, 3, 4, 5]), [a, b], rng=numpy.random)
开发者ID:computer-whisperer,项目名称:Theano,代码行数:10,代码来源:test_slinalg.py


示例11: test_expm_grad_2

def test_expm_grad_2():
    # with non-symmetric matrix with real eigenspecta
    if not imported_scipy:
        raise SkipTest("Scipy needed for the expm op.")
    rng = numpy.random.RandomState(utt.fetch_seed())
    A = rng.randn(5, 5).astype(config.floatX)
    w = (rng.randn(5).astype(config.floatX))**2
    A = (numpy.diag(w**0.5)).dot(A + A.T).dot(numpy.diag(w**(-0.5)))
    assert not numpy.allclose(A, A.T)

    tensor.verify_grad(expm, [A,], rng=rng)
开发者ID:Eileen0909,项目名称:Theano,代码行数:11,代码来源:test_slinalg.py


示例12: test_expm_grad_2

def test_expm_grad_2():
    # with non-symmetric matrix with real eigenspecta
    if not imported_scipy:
        raise SkipTest("Scipy needed for the expm op.")
    rng = numpy.random.RandomState(utt.fetch_seed())
    # Always test in float64 for better numerical stability.
    A = rng.randn(5, 5)
    w = rng.randn(5)**2
    A = (numpy.diag(w**0.5)).dot(A + A.T).dot(numpy.diag(w**(-0.5)))
    assert not numpy.allclose(A, A.T)

    tensor.verify_grad(expm, [A], rng=rng)
开发者ID:gyenney,项目名称:Tools,代码行数:12,代码来源:test_slinalg.py


示例13: test_softmax_grad

    def test_softmax_grad(self):
        def cmp(n, m, f, f_gpu):
            data = numpy.arange(n * m, dtype="float32").reshape(n, m)
            gdata = numpy.asarray(data)[:, :, None, None]

            out = f(data)
            gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]
            utt.assert_allclose(out, gout)

        x = T.matrix("x", "float32")
        x_gpu = T.tensor4("x_gpu", "float32")
        f_z = T.nnet.softmax_op
        f_gpu = dnn.GpuDnnSoftmax("accurate", "channel")

        # Verify the grad operation
        dims = (2, 3, 4, 5)
        gdata = numpy.arange(numpy.product(dims), dtype="float32").reshape(dims)
        T.verify_grad(f_gpu, [gdata], rng=numpy.random, mode=mode_with_gpu)

        # Verify that the CPU and GPU implementations return the same results
        # up to a tolerance.

        self._test_softmax(x, x_gpu, f_z, f_gpu, cmp)

        self._test_softmax(x, x, f_z, f_z, self._cmp)

        # Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
        # optimization is applied when cudnn is required
        y = T.fvector("y")
        f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_with_gpu)
        sorted_f = f.maker.fgraph.toposort()
        assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 1
        assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0

        # Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
        # optimization is not applied when cudnn is excluded or not
        # available
        mode_wo_cudnn = mode_with_gpu.excluding("cudnn")
        y = T.fvector("y")
        f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_wo_cudnn)
        sorted_f = f.maker.fgraph.toposort()
        assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 0
        assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 1

        # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not
        # crash with manual graph
        y = T.fvector("y")
        o = theano.tensor.nnet.SoftmaxGrad()(y, y * 2)
        f = theano.function([y], o, mode=mode_with_gpu)
        sorted_f = f.maker.fgraph.toposort()
        assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 1
        assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0
开发者ID:huamichaelchen,项目名称:Theano,代码行数:52,代码来源:test_dnn.py


示例14: test_verify_exprgrad

def test_verify_exprgrad():
    from theano import tensor
    import numpy
    x = tt.scalar()
    f = theano.function([x], x)
    #def f(x):
    #    return x

    x_val = numpy.asarray([0.1, 0.2])

    rng = numpy.random.RandomState(42)

    print 'going'
    print tensor.verify_grad(f, [x_val], rng=rng)
开发者ID:ishalyminov,项目名称:xtrack2,代码行数:14,代码来源:layers_test.py


示例15: test_grad

    def test_grad(self):
        x = tensor.matrix('x')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        xe = op(x, one_of_n)
        f = theano.function([x, one_of_n], xe)
        x_val = numpy.asarray([[.4, .6, .0], [.1, .8, .1]],
                dtype=config.floatX)
        xe_val = f(x_val, [0, 1])
        assert numpy.allclose(xe_val, -numpy.log([.4, .8]))

        def oplike(x):
            return op(x, [0, 1])

        tensor.verify_grad(oplike, [x_val], rng=numpy.random)
开发者ID:repos-python,项目名称:Theano,代码行数:15,代码来源:test_nnet.py


示例16: test_other_grad_tests

    def test_other_grad_tests(self):
        x = theano.tensor.dmatrix()
        x_val1 = numpy.array([[1,2,3],[0,5,6],[0,0,9]], dtype='float32')
        x_val2 = numpy.array([[1,2,0],[0,5,6],[7,8,9],[9,10,0]], dtype='float32')
        rng = rng = numpy.random.RandomState(43)

        p = Prod(axis=1)
        grad_p = theano.tensor.grad(p(x).sum(), x)
        grad_fn = theano.function([x], grad_p, mode=self.mode)
        assert numpy.allclose(grad_fn(x_val1), [[6.,3.,2.],[30.,0.,0.],[0.,0.,0.]])
        assert numpy.allclose(grad_fn(x_val2), [[0., 0., 2.], [30., 0., 0.], [72., 63., 56.], [0., 0., 90.]])

        p_axis0 = Prod(axis=0)
        grad_p_axis0 = theano.tensor.grad(p_axis0(x).sum(), x)
        grad_fn_axis0 = theano.function([x], grad_p_axis0, mode=self.mode)
        assert numpy.allclose(grad_fn_axis0(x_val2), [[0., 400., 0.],[63., 160., 0.], [0., 100., 0.], [0., 80., 0.]])

        tensor.verify_grad(p, [x_val1], rng=rng, mode=self.mode)
开发者ID:delallea,项目名称:Theano,代码行数:18,代码来源:test_elemwise.py


示例17: verify_layers

def verify_layers(
        batch_size,
        layers,
        train_set_x,
        train_set_y
):
    index = 0
    range_start = index*batch_size
    range_end = (index+1)*batch_size

    sample = train_set_x[range_start:range_end]
    layer_0_activation = layers[0].output(sample).eval()
    layer_1_activation = layers[1].output(layer_0_activation)

    layer_1_cost = layers[1].cost(
        T.nnet.softmax(T.mean(
            layer_1_activation,
            axis=2
        )),
        train_set_y[range_start:range_end]
    )

    layer_0_cost = layers[1].cost(
        T.nnet.softmax(T.mean(
            layers[1].output(layers[0].output(sample)),
            axis=2
        )),
        train_set_y[range_start:range_end]
    )

    temp = verify_layer(layer_1_cost, layers[1].W)
    T.verify_grad(
        temp,
        [layers[1].W.get_value()],
        rng=np.random.RandomState()
    )

    temp = verify_layer(layer_0_cost, layers[0].W)
    T.verify_grad(
        temp,
        [layers[0].W.get_value()],
        rng=np.RandomState()
    )
开发者ID:daemonmaker,项目名称:biglittle,代码行数:43,代码来源:utils.py


示例18: DisjointPseudorandomFractionalMaxPooling2DOp

        thunk.outputs = outputs
        thunk.lazy = False

        return thunk


if __name__ == "__main__":
    import theano
    import theano.tensor as T
    fX = theano.config.floatX
    inp = T.constant(np.random.randn(10, 10, 10, 10).astype(fX))
    foo = DisjointPseudorandomFractionalMaxPooling2DOp(1.414, 0.5)(inp)
    bar = foo.eval()
    print np.array(bar)
    print np.array(bar[0, 0, :2, :2])
    print np.array(inp.eval()[0, 0, :4, :4])

    g = T.grad(foo.sum(), inp)
    choo = np.array(g.eval())
    # print choo
    print np.array(bar[0, 0, :3, :3])
    print np.array(inp.eval()[0, 0, :4, :4])
    print choo[0, 0, :4, :4]

    def fun(x):
        return DisjointPseudorandomFractionalMaxPooling2DOp(1.414, 0.5)(x)

    T.verify_grad(fun,
                  [np.arange(25).reshape(1, 1, 5, 5).astype(fX)],
                  rng=np.random)
开发者ID:diogo149,项目名称:theano_fractional_max_pooling,代码行数:30,代码来源:fractional_max_pooling_op.py


示例19: test_cudnn_softmax

    def test_cudnn_softmax(self):
        if not cuda.dnn.dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)

        def cmp(n, m, f, f_gpu):
            data = numpy.arange(n * m, dtype='float32').reshape(n, m)
            gdata = numpy.asarray(data)[:, :, None, None]
            out = f(data)
            gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]
            assert numpy.allclose(out, gout), numpy.absolute(out - gout)

        x = T.matrix('x', 'float32')
        x_gpu = T.tensor4('x_gpu', 'float32')
        f_z = T.nnet.softmax
        f_gpu = theano.sandbox.cuda.dnn.GpuDnnSoftmax(
            'bc01',
            'accurate',
            'channel'
        )

        # Verify the grad operation
        dims = (2, 3, 4, 5)
        gdata = numpy.arange(
            numpy.product(dims),
            dtype='float32'
        ).reshape(dims)
        T.verify_grad(f_gpu, [gdata], rng=numpy.random)

        def check_types(graph, graph_gpu):
            self._check_types(
                graph,
                graph_gpu,
                -1,
                type(f_z),
                theano.sandbox.cuda.dnn.GpuDnnSoftmax
            )

        def check_types_opt(graph, graph_gpu):
            assert isinstance(graph.maker.fgraph.toposort()[-1].op, type(f_z))
            assert len([n for n in graph_gpu.maker.fgraph.toposort()
                        if isinstance(
                            n.op,
                            theano.sandbox.cuda.dnn.GpuDnnSoftmax
                        )]) == 1

        # Verify that the CPU and GPU implementations return the same results
        # up to a tolerance.
        self._test_softmax(
            x,
            x_gpu,
            f_z,
            f_gpu,
            cmp,
            mode_with_gpu,
            check_types
        )

        mode_w_cudnn = mode_with_gpu.including("cudnn")
        self._test_softmax(
            x, x, f_z, f_z, self._cmp,
            mode_w_cudnn, check_types_opt
        )

        # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is
        # applied when cudnn is required
        y = T.fvector('y')
        f = theano.function(
            [y],
            T.grad(T.nnet.softmax(y).mean(), y),
            mode=mode_with_gpu
        )
        sorted_f = f.maker.fgraph.toposort()
        assert(len([i
                    for i in sorted_f
                    if isinstance(
                        i.op,
                        theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad
                    )]) == 1)
        assert(len([i
                    for i in sorted_f
                    if isinstance(
                        i.op,
                        theano.tensor.nnet.SoftmaxGrad
                    )]) == 0)

        # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is not
        # applied when cudnn is excluded or not available
        mode_wo_cudnn = mode_with_gpu.excluding("cudnn")
        y = T.vector('y')
        f = theano.function(
            [y],
            T.grad(T.nnet.softmax(y).mean(), y),
            mode=mode_wo_cudnn
        )
        sorted_f = f.maker.fgraph.toposort()
        assert(len([i
                    for i in sorted_f
                    if isinstance(
                        i.op,
                        theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad
#.........这里部分代码省略.........
开发者ID:euphoris,项目名称:Theano,代码行数:101,代码来源:test_nnet.py


示例20: test_extract_diag_grad

def test_extract_diag_grad():
    rng = numpy.random.RandomState(utt.fetch_seed())
    x = rng.rand(5, 4)
    tensor.verify_grad(extract_diag, [x], rng=rng)
开发者ID:SinaHonari,项目名称:Theano,代码行数:4,代码来源:test_linalg.py



注:本文中的theano.tensor.verify_grad函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.zeros函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.vectors函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap