• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python mlp.MLP类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.models.mlp.MLP的典型用法代码示例。如果您正苦于以下问题:Python MLP类的具体用法?Python MLP怎么用?Python MLP使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了MLP类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: build_mlp_fn

def build_mlp_fn(x0, y0, x1, y1, s0, s1, c, axes):
    """
    Creates an theano function to test the WindowLayer

    Parameters
    ----------
    x0: x coordinate of the left of the window
    y0: y coordinate of the top of the window
    x1: x coordinate of the right of the window
    y1: y coordinate of the bottom of the window
    s0: x shape of the images of the input space
    s1: y shape of the images of the input space
    c: number of channels of the input space
    axes: description of the axes of the input space

    Returns
    -------
    f: a theano function applicating the window layer
    of window (x0, y0, x1, y1).
    """
    mlp = MLP(layers=[WindowLayer('h0', window=(x0, y0, x1, y1))],
              input_space=Conv2DSpace(shape=(s0, s1),
                                      num_channels=c, axes=axes))
    X = mlp.get_input_space().make_batch_theano()
    f = theano.function([X], mlp.fprop(X))
    return f
开发者ID:123fengye741,项目名称:pylearn2,代码行数:26,代码来源:test_windowlayer.py


示例2: test_masked_fprop

def test_masked_fprop():
    # Construct a dirt-simple linear network with identity weights.
    mlp = MLP(nvis=2, layers=[Linear(2, 'h0', irange=0),
                              Linear(2, 'h1', irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_biases(np.arange(3, 5, dtype=mlp.get_weights().dtype))

    # Verify that get_total_input_dimension works.
    np.testing.assert_equal(mlp.get_total_input_dimension(['h0', 'h1']), 4)
    inp = theano.tensor.matrix()

    # Accumulate the sum of output of all masked networks.
    l = []
    for mask in xrange(16):
        l.append(mlp.masked_fprop(inp, mask))
    outsum = reduce(lambda x, y: x + y, l)

    f = theano.function([inp], outsum, allow_input_downcast=True)
    np.testing.assert_equal(f([[5, 3]]), [[144., 144.]])
    np.testing.assert_equal(f([[2, 7]]), [[96., 208.]])

    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 22)
    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2,
                             ['h3'])
    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2,
                             None, 2., {'h3': 4})
开发者ID:BloodD,项目名称:pylearn2,代码行数:28,代码来源:test_mlp.py


示例3: test_kl

def test_kl():
    """
    Test whether function kl() has properly processed the input.
    """
    init_mode = theano.config.compute_test_value
    theano.config.compute_test_value = 'raise'
    
    try:
        mlp = MLP(layers=[Sigmoid(dim=10, layer_name='Y', irange=0.1)],
                  nvis=10)
        X = mlp.get_input_space().make_theano_batch()
        Y = mlp.get_output_space().make_theano_batch()
        X.tag.test_value = np.random.random(
            get_debug_values(X)[0].shape).astype(theano.config.floatX)
        Y_hat = mlp.fprop(X)

        # This call should not raise any error:
        ave = kl(Y, Y_hat, 1)

        # The following calls should raise ValueError exceptions:
        Y.tag.test_value[2][3] = 1.1
        np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1)
        Y.tag.test_value[2][3] = -0.1
        np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1)
    
    finally:
        theano.config.compute_test_value = init_mode
开发者ID:Deathmonster,项目名称:pylearn2,代码行数:27,代码来源:test_nnet.py


示例4: test_gradient_clipping

    def test_gradient_clipping(self):
        """
        Create a known gradient and check whether it is being clipped
        correctly
        """
        mlp = MLP(layers=[Linear(dim=1, irange=0, layer_name='linear')],
                  nvis=1)
        W, b = mlp.layers[0].get_params()
        W.set_value([[10]])

        X = mlp.get_input_space().make_theano_batch()
        y = mlp.get_output_space().make_theano_batch()

        cost = Default()
        gradients, _ = cost.get_gradients(mlp, (X, y))

        clipped_cost = GradientClipping(20, Default())
        clipped_gradients, _ = clipped_cost.get_gradients(mlp, (X, y))

        # The MLP defines f(x) = (x W)^2, with df/dW = 2 W x^2
        f = function([X, y], [gradients[W].sum(), clipped_gradients[W].sum()],
                     allow_input_downcast=True)

        # df/dW = df/db = 20 for W = 10, x = 1, so the norm is 20 * sqrt(2)
        # and the gradients should be clipped to 20 / sqrt(2)
        np.testing.assert_allclose(f([[1]], [[0]]), [20, 20 / np.sqrt(2)])
开发者ID:123fengye741,项目名称:pylearn2,代码行数:26,代码来源:test_gradient_clipping.py


示例5: test_conditional_returns_lr_scalers

def test_conditional_returns_lr_scalers():
    """
    Conditional.get_lr_scalers calls its MLP's get_lr_scalers method
    """
    mlp = MLP(layers=[Linear(layer_name="h", dim=5, irange=0.01, W_lr_scale=0.01)])
    conditional = DummyConditional(mlp=mlp, name="conditional")
    vae = DummyVAE()
    conditional.set_vae(vae)
    conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
    testing.assert_equal(conditional.get_lr_scalers(), mlp.get_lr_scalers())
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:10,代码来源:test_vae.py


示例6: test_sigmoid_detection_cost

def test_sigmoid_detection_cost():
    # This is only a smoke test: verifies that it compiles and runs,
    # not any particular value.
    rng = np.random.RandomState(0)
    y = (rng.uniform(size=(4, 3)) > 0.5).astype('uint8')
    X = theano.shared(rng.uniform(size=(4, 2)))
    model = MLP(nvis=2, layers=[Sigmoid(monitor_style='detection', dim=3,
                layer_name='y', irange=0.8)])
    y_hat = model.fprop(X)
    model.cost(y, y_hat).eval()
开发者ID:BloodD,项目名称:pylearn2,代码行数:10,代码来源:test_mlp.py


示例7: test_dropout_input_mask_value

def test_dropout_input_mask_value():
    # Construct a dirt-simple linear network with identity weights.
    mlp = MLP(nvis=2, layers=[IdentityLayer(2, 'h0', irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[0].dropout_input_mask_value = -np.inf
    inp = theano.tensor.matrix()
    f = theano.function([inp], mlp.masked_fprop(inp, 1, default_input_scale=1),
                        allow_input_downcast=True)
    np.testing.assert_equal(f([[4., 3.]]), [[4., -np.inf]])
开发者ID:BloodD,项目名称:pylearn2,代码行数:10,代码来源:test_mlp.py


示例8: test_softmax_binary_targets

def test_softmax_binary_targets():
    """
    Constructs softmax layers with binary target and with vector targets
    to check that they give the same cost.
    """
    num_classes = 10
    batch_size = 20
    mlp_bin = MLP(
        layers=[Softmax(num_classes, 's1', irange=0.1, binary_target_dim=1)],
        nvis=100
    )
    mlp_vec = MLP(
        layers=[Softmax(num_classes, 's1', irange=0.1)],
        nvis=100
    )

    X = mlp_bin.get_input_space().make_theano_batch()
    y_bin = mlp_bin.get_target_space().make_theano_batch()
    y_vec = mlp_vec.get_target_space().make_theano_batch()

    y_hat_bin = mlp_bin.fprop(X)
    y_hat_vec = mlp_vec.fprop(X)
    cost_bin = theano.function([X, y_bin], mlp_bin.cost(y_bin, y_hat_bin),
                               allow_input_downcast=True)
    cost_vec = theano.function([X, y_vec], mlp_vec.cost(y_vec, y_hat_vec),
                               allow_input_downcast=True)

    X_data = np.random.random(size=(batch_size, 100))
    y_bin_data = np.random.randint(low=0, high=10, size=(batch_size, 1))
    y_vec_data = np.zeros((batch_size, num_classes))
    y_vec_data[np.arange(batch_size),y_bin_data.flatten()] = 1
    np.testing.assert_allclose(cost_bin(X_data, y_bin_data),
                               cost_vec(X_data, y_vec_data))
开发者ID:julius506,项目名称:pylearn2,代码行数:33,代码来源:test_mlp.py


示例9: test_composite_layer

def test_composite_layer():
    """
    Test the routing functionality of the CompositeLayer
    """
    # Without routing
    composite_layer = CompositeLayer('composite_layer',
                                     [Linear(2, 'h0', irange=0),
                                      Linear(2, 'h1', irange=0),
                                      Linear(2, 'h2', irange=0)])
    mlp = MLP(nvis=2, layers=[composite_layer])
    for i in range(3):
        composite_layer.layers[i].set_weights(
            np.eye(2, dtype=theano.config.floatX)
        )
        composite_layer.layers[i].set_biases(
            np.zeros(2, dtype=theano.config.floatX)
        )
    X = theano.tensor.matrix()
    y = mlp.fprop(X)
    funs = [theano.function([X], y_elem) for y_elem in y]
    x_numeric = np.random.rand(2, 2).astype('float32')
    y_numeric = [f(x_numeric) for f in funs]
    assert np.all(x_numeric == y_numeric)

    # With routing
    for inputs_to_layers in [{0: [1], 1: [2], 2: [0]},
                             {0: [1], 1: [0, 2], 2: []},
                             {0: [], 1: []}]:
        composite_layer = CompositeLayer('composite_layer',
                                         [Linear(2, 'h0', irange=0),
                                          Linear(2, 'h1', irange=0),
                                          Linear(2, 'h2', irange=0)],
                                         inputs_to_layers)
        input_space = CompositeSpace([VectorSpace(dim=2),
                                      VectorSpace(dim=2),
                                      VectorSpace(dim=2)])
        mlp = MLP(input_space=input_space, layers=[composite_layer])
        for i in range(3):
            composite_layer.layers[i].set_weights(
                np.eye(2, dtype=theano.config.floatX)
            )
            composite_layer.layers[i].set_biases(
                np.zeros(2, dtype=theano.config.floatX)
            )
        X = [theano.tensor.matrix() for _ in range(3)]
        y = mlp.fprop(X)
        funs = [theano.function(X, y_elem, on_unused_input='ignore')
                for y_elem in y]
        x_numeric = [np.random.rand(2, 2).astype(theano.config.floatX)
                     for _ in range(3)]
        y_numeric = [f(*x_numeric) for f in funs]
        assert all([all([np.all(x_numeric[i] == y_numeric[j])
                         for j in inputs_to_layers[i]])
                    for i in inputs_to_layers])
开发者ID:Rt0220,项目名称:pylearn2,代码行数:54,代码来源:test_mlp.py


示例10: test_conditional_modify_updates

def test_conditional_modify_updates():
    """
    Conditional.modify_updates calls its MLP's modify_updates method
    """
    mlp = MLP(layers=[Linear(layer_name="h", dim=5, irange=0.01, max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name="conditional")
    vae = DummyVAE()
    conditional.set_vae(vae)
    conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
    updates = OrderedDict(zip(mlp.get_params(), mlp.get_params()))
    testing.assert_equal(conditional.modify_updates(updates), mlp.modify_updates(updates))
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:11,代码来源:test_vae.py


示例11: test_identity_layer

def test_identity_layer():
    nvis = 10

    mlp = MLP(nvis=nvis, layers=[util.IdentityLayer(layer_name='ident')])

    X = T.matrix()
    f = theano.function([X], mlp.fprop(X))

    for _ in range(5):
        X = np.random.rand(10, nvis).astype(theano.config.floatX)
        yield _test_identity_layer, f, X
开发者ID:HyoungWooPark,项目名称:adversarial,代码行数:11,代码来源:test_util.py


示例12: test_nested_mlp

def test_nested_mlp():
    """
    Constructs a nested MLP and tries to fprop through it
    """
    inner_mlp = MLP(layers=[Linear(10, 'h0', 0.1), Linear(10, 'h1', 0.1)],
                    layer_name='inner_mlp')
    outer_mlp = MLP(layers=[CompositeLayer(layer_name='composite',
                                           layers=[inner_mlp,
                                                   Linear(10, 'h2', 0.1)])],
                    nvis=10)
    X = outer_mlp.get_input_space().make_theano_batch()
    f = theano.function([X], outer_mlp.fprop(X))
    f(np.random.rand(5, 10).astype(theano.config.floatX))
开发者ID:julius506,项目名称:pylearn2,代码行数:13,代码来源:test_mlp.py


示例13: check_unimplemented_case

def check_unimplemented_case(ConvNonlinearity):

    conv_model = MLP(
        input_space = Conv2DSpace(shape = [1,1], axes = ['b', 0, 1, 'c'], num_channels = 1),
        layers = [ConvElemwise(layer_name='conv', nonlinearity = ConvNonlinearity, \
                  output_channels = 1, kernel_shape = [1,1], \
                  pool_shape = [1,1], pool_stride = [1,1], irange= 1.0)],
        batch_size = 1
    )        

    X = conv_model.get_input_space().make_theano_batch()
    Y = conv_model.get_target_space().make_theano_batch()
    Y_hat = conv_model.fprop(X)

    assert np.testing.assert_raises(NotImplementedError, conv_model.cost(Y, Y_hat))
开发者ID:matrogers,项目名称:pylearn2,代码行数:15,代码来源:test_convelemwise_cost.py


示例14: test_gradient

    def test_gradient(self):
        """
        Testing to see whether the gradient can be calculated.
        """
        rnn = MLP(input_space=SequenceSpace(VectorSpace(dim=1)),
                  layers=[Recurrent(dim=2, layer_name='recurrent',
                                    irange=0, nonlinearity=lambda x: x),
                          Linear(dim=1, layer_name='linear', irange=0)])

        X_data, X_mask = rnn.get_input_space().make_theano_batch()
        y_data, y_mask = rnn.get_output_space().make_theano_batch()

        default_cost = Default()
        cost = default_cost.expr(rnn, ((X_data, X_mask), (y_data, y_mask)))
        tensor.grad(cost, rnn.get_params(), disconnected_inputs='ignore')
开发者ID:capybaralet,项目名称:pylearn2,代码行数:15,代码来源:test_rnn.py


示例15: test_min_zero

def test_min_zero():
    """
    This test guards against a bug where the size of the zero buffer used with
    the min_zero flag was specified to have the wrong size. The bug only
    manifested when compiled with optimizations off, because the optimizations
    discard information about the size of the zero buffer.
    """
    mlp = MLP(input_space=VectorSpace(1),
            layers= [Maxout(layer_name="test_layer", num_units=1, num_pieces = 2,
            irange=.05, min_zero=True)])
    X = T.matrix()
    output = mlp.fprop(X)
    # Compile in debug mode so we don't optimize out the size of the buffer
    # of zeros
    f = function([X], output, mode="DEBUG_MODE")
    f(np.zeros((1, 1)).astype(X.dtype))
开发者ID:JakeMick,项目名称:pylearn2,代码行数:16,代码来源:test_maxout.py


示例16: test_sigmoid_layer_misclass_reporting

def test_sigmoid_layer_misclass_reporting():
    mlp = MLP(nvis=3, layers=[Sigmoid(layer_name='h0', dim=1, irange=0.005,
                                      monitor_style='classification')])
    target = theano.tensor.matrix(dtype=theano.config.floatX)
    batch = theano.tensor.matrix(dtype=theano.config.floatX)
    rval = mlp.layers[0].get_monitoring_channels_from_state(mlp.fprop(batch), target)

    f = theano.function([batch, target], [tensor.gt(mlp.fprop(batch), 0.5),
                                          rval['misclass']],
                        allow_input_downcast=True)
    rng = np.random.RandomState(0)

    for _ in range(10):  # repeat a few times for statistical strength
        targets = (rng.uniform(size=(30, 1)) > 0.5).astype('uint8')
        out, misclass = f(rng.normal(size=(30, 3)), targets)
        np.testing.assert_allclose((targets != out).mean(), misclass)
开发者ID:capybaralet,项目名称:current,代码行数:16,代码来源:test_mlp.py


示例17: buildLayer

    def buildLayer(self):    
        # setup layer
        self.layers = []
        for param in self.p_layers:            
            if param[0].param_type==0:
                self.layers = self.layers + DBL_ConvLayers(param)
            elif param[0].param_type==1:
                self.layers = self.layers + DBL_FcLayers(param)
            elif param[0].param_type==2:
                self.layers = self.layers + DBL_CfLayers(param)        
        self.model = MLP(self.layers, input_space=self.ishape)

        # load available weight
        pre_dl_id = self.param_pkl[:self.param_pkl.rfind('_')+1]
        fns = glob.glob(pre_dl_id+'*.pkl')
        epoch_max = 0
        if len(fns)==0:
            # first time to do it, load matlab prior
            mat_init = 'init_p'+str(self.model_id)+'_'+str(self.train_id)+'.mat'
            if os.path.exists(mat_init):
                print "load initial mat weight: ", mat_init
                self.loadWeight(mat_init)
        else:
            for fn in fns:
                epoch_id = int(fn[fn.rfind('_')+1:fn.find('.pkl')])
                if (epoch_id>epoch_max and epoch_id<=self.num_epoch):
                    epoch_max = epoch_id
            if epoch_max>0:
                print "load weight at epoch: ", epoch_max
                self.loadWeight(pre_dl_id+str(epoch_max)+'.pkl')
                self.num_epoch -= epoch_max
        self.p_monitor['epoch'] = epoch_max
开发者ID:caomw,项目名称:Deep_wrapper,代码行数:32,代码来源:DBL_model.py


示例18: test_conditional_encode_conditional_parameters

def test_conditional_encode_conditional_parameters():
    """
    Conditional.encode_conditional_parameters calls its MLP's fprop method
    """
    mlp = MLP(layers=[Linear(layer_name="h", dim=5, irange=0.01, max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name="conditional")
    vae = DummyVAE()
    conditional.set_vae(vae)
    input_space = VectorSpace(dim=5)
    conditional.initialize_parameters(input_space=input_space, ndim=5)

    X = T.matrix("X")
    mlp_Y1, mlp_Y2 = mlp.fprop(X)
    cond_Y1, cond_Y2 = conditional.encode_conditional_params(X)
    f = theano.function([X], [mlp_Y1, mlp_Y2, cond_Y1, cond_Y2])
    rval = f(as_floatX(numpy.random.uniform(size=(10, 5))))
    numpy.testing.assert_allclose(rval[0], rval[2])
    numpy.testing.assert_allclose(rval[1], rval[3])
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:18,代码来源:test_vae.py


示例19: test_batchwise_dropout

def test_batchwise_dropout():
    mlp = MLP(nvis=2, layers=[IdentityLayer(2, 'h0', irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[0].dropout_input_mask_value = 0
    inp = theano.tensor.matrix()
    f = theano.function([inp], mlp.dropout_fprop(inp, per_example=False),
                        allow_input_downcast=True)
    for _ in range(10):
        d = f([[3.0, 4.5]] * 3)
        np.testing.assert_equal(d[0], d[1])
        np.testing.assert_equal(d[0], d[2])

    f = theano.function([inp], mlp.dropout_fprop(inp, per_example=True),
                        allow_input_downcast=True)
    d = f([[3.0, 4.5]] * 3)
    print d
    np.testing.assert_(np.any(d[0] != d[1]) or np.any(d[0] != d[2]))
开发者ID:BloodD,项目名称:pylearn2,代码行数:18,代码来源:test_mlp.py


示例20: test_show_weights

def test_show_weights():
    """
    Create a pickled model and show the weights
    """
    skip_if_no_matplotlib()
    with open('model.pkl', 'wb') as f:
        model = MLP(layers=[Linear(dim=1, layer_name='h0', irange=0.1)],
                    nvis=784)
        model.dataset_yaml_src = """
!obj:pylearn2.datasets.mnist.MNIST {
        which_set: 'train'
}
"""
        cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
    show_weights('model.pkl', rescale='individual',
                 border=True, out='garbage.png')
    os.remove('model.pkl')
    os.remove('garbage.png')
开发者ID:123fengye741,项目名称:pylearn2,代码行数:18,代码来源:test_show_weights.py



注:本文中的pylearn2.models.mlp.MLP类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python model.Model类代码示例发布时间:2022-05-25
下一篇:
Python dbm.flatten函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap