• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python train.Train类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.train.Train的典型用法代码示例。如果您正苦于以下问题:Python Train类的具体用法?Python Train怎么用?Python Train使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Train类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: my_train

def my_train():
    trainset = CIN_FEATURE2(which_set='train')
    validset = CIN_FEATURE2(which_set='valid')
    layers = []
    layers1 = []
    h1 = Linear(layer_name='h1', dim=850, irange=0.05)
    h2 = Linear(layer_name='h2', dim=556, irange=0.05)
    layers1.append(h1)
    layers1.append(h2)
    l1 = CompositeLayerWithSource(layer_name='c', layers=layers1)
    l2 = Linear(layer_name='o', dim=2, irange=0.05)
    layers.append(l1)
    layers.append(l2)

    input_space = CompositeSpace(components=[VectorSpace(dim=850), VectorSpace(dim=556)])
    input_source = ['feature850', 'feature556']
    model = MLPWithSource(batch_size=1140, layers=layers,
                          input_space=input_space, input_source=input_source)

    algorithm = BGD(conjugate=1,
                    # batch_size=1140,
                    line_search_mode='exhaustive',
                    cost=Default(),
                    termination_criterion=EpochCounter(max_epochs=MAX_EPOCHS))

    train = Train(dataset=trainset, model=model, algorithm=algorithm)
    train.main_loop()
开发者ID:jackal092927,项目名称:pylearn2_med,代码行数:27,代码来源:mlp_composite_train.py


示例2: test_train_ae

def test_train_ae():
    ds = MNIST(which_set='train',one_hot=True,all_labelled=ALL_LABELLED,supervised=SUPERVISED)

    gsn = GSN.new(
        layer_sizes=[ds.X.shape[1], HIDDEN_SIZE,ds.X.shape[1]],
        activation_funcs=["sigmoid", "tanh", rescaled_softmax],
        pre_corruptors=[GaussianCorruptor(GAUSSIAN_NOISE)] * 3,
        post_corruptors=[SaltPepperCorruptor(SALT_PEPPER_NOISE), None,SmoothOneHotCorruptor(GAUSSIAN_NOISE)],
        layer_samplers=[BinomialSampler(), None, MultinomialSampler()],
        tied=False
    )

    _mbce = MeanBinaryCrossEntropy()
    reconstruction_cost = lambda a, b: _mbce.cost(a, b) / ds.X.shape[1]

    c = GSNCost([(0, 1.0, reconstruction_cost)], walkback=WALKBACK)

    alg = SGD(
        LEARNING_RATE,
        init_momentum=MOMENTUM,
        cost=c,
        termination_criterion=EpochCounter(MAX_EPOCHS),
        batches_per_iter=BATCHES_PER_EPOCH,
        batch_size=BATCH_SIZE,
        monitoring_dataset=ds,
        monitoring_batches=MONITORING_BATCHES
   )

    trainer = Train(ds, gsn, algorithm=alg, save_path="./results/gsn_ae_trained.pkl",
                    save_freq=5, extensions=[MonitorBasedLRAdjuster()])
    trainer.main_loop()
    print "done training"
开发者ID:GarfieldEr007,项目名称:imageClassification,代码行数:32,代码来源:gsn_wrapper.py


示例3: test_train_ae

def test_train_ae():
    GC = GaussianCorruptor

    gsn = GSN.new(
        layer_sizes=[ds.X.shape[1], 1000],
        activation_funcs=["sigmoid", "tanh"],
        pre_corruptors=[None, GC(1.0)],
        post_corruptors=[SaltPepperCorruptor(0.5), GC(1.0)],
        layer_samplers=[BinomialSampler(), None],
        tied=False
    )

    # average MBCE over example rather than sum it
    _mbce = MeanBinaryCrossEntropy()
    reconstruction_cost = lambda a, b: _mbce.cost(a, b) / ds.X.shape[1]

    c = GSNCost([(0, 1.0, reconstruction_cost)], walkback=WALKBACK)

    alg = SGD(
        LEARNING_RATE,
        init_momentum=MOMENTUM,
        cost=c,
        termination_criterion=EpochCounter(MAX_EPOCHS),
        batches_per_iter=BATCHES_PER_EPOCH,
        batch_size=BATCH_SIZE,
        monitoring_dataset=ds,
        monitoring_batches=10
   )

    trainer = Train(ds, gsn, algorithm=alg, save_path="gsn_ae_example.pkl",
                    save_freq=5)
    trainer.main_loop()
    print "done training"
开发者ID:EderSantana,项目名称:pylearn2,代码行数:33,代码来源:gsn_example.py


示例4: test_training_a_model

def test_training_a_model():
    """
    tests wether SparseDataset can be trained
    with a dummy model.
    """

    dim = 3
    m = 10
    rng = np.random.RandomState([22, 4, 2014])

    X = rng.randn(m, dim)
    ds = csr_matrix(X)
    dataset = SparseDataset(from_scipy_sparse_dataset=ds)

    model = SoftmaxModel(dim)
    learning_rate = 1e-1
    batch_size = 5

    epoch_num = 2
    termination_criterion = EpochCounter(epoch_num)

    cost = DummyCost()

    algorithm = SGD(learning_rate, cost, batch_size=batch_size,
                    termination_criterion=termination_criterion,
                    update_callbacks=None,
                    init_momentum=None,
                    set_batch_size=False)

    train = Train(dataset, model, algorithm, save_path=None,
                  save_freq=0, extensions=None)

    train.main_loop()
开发者ID:Deathmonster,项目名称:pylearn2,代码行数:33,代码来源:test_sparse_dataset.py


示例5: test_batch_size_specialization

def test_batch_size_specialization():

    # Tests that using a batch size of 1 for training and a batch size
    # other than 1 for monitoring does not result in a crash.
    # This catches a bug reported in the [email protected]
    # e-mail "[pylearn-dev] monitor assertion error: channel_X.type != X.type"
    # The training data was specialized to a row matrix (theano tensor with
    # first dim broadcastable) and the monitor ended up with expressions
    # mixing the specialized and non-specialized version of the expression.

    m = 2
    rng = np.random.RandomState([25,9,2012])
    X = np.zeros((m,1))
    dataset = DenseDesignMatrix(X=X)

    model = SoftmaxModel(1)

    learning_rate = 1e-3

    cost = DummyCost()

    algorithm = SGD(learning_rate, cost, batch_size=1,
                 monitoring_batches=1, monitoring_dataset=dataset,
                 termination_criterion=EpochCounter(max_epochs=1),
                 update_callbacks=None,
                 set_batch_size = False)

    train = Train(dataset, model, algorithm, save_path=None,
                 save_freq=0, extensions=None)

    train.main_loop()
开发者ID:fancyspeed,项目名称:pylearn2,代码行数:31,代码来源:test_sgd.py


示例6: train

def train():
    LEARNING_RATE = 1e-4
    MOMENTUM = 0.25

    MAX_EPOCHS = 500
    BATCHES_PER_EPOCH = 100
    BATCH_SIZE = 1000

    dataset = FunnelDistribution()
    cost = FunnelGSNCost([(0, 1.0, MSR())], walkback=1)

    gc = GaussianCorruptor(0.75)
    dc = DropoutCorruptor(.5)
    gsn = GSN.new([10, 200, 10],
                  [None, "tanh", "tanh"], # activation
                  [None] * 3, # pre corruption
                  [None] * 3, # post corruption
                  [None] * 3, # layer samplers
                  tied=False)
    gsn._bias_switch = False

    alg = SGD(LEARNING_RATE, init_momentum=MOMENTUM, cost=cost,
              termination_criterion=EpochCounter(MAX_EPOCHS),
              batches_per_iter=BATCHES_PER_EPOCH, batch_size=BATCH_SIZE,
              monitoring_batches=100,
              monitoring_dataset=dataset)

    trainer = Train(dataset, gsn, algorithm=alg, save_path="funnel_gsn.pkl",
                    extensions=[MonitorBasedLRAdjuster()],
                    save_freq=50)

    trainer.main_loop()
    print "done training"
开发者ID:lightcatcher,项目名称:funnel_gsn,代码行数:33,代码来源:learner.py


示例7: test_sgd_sup

def test_sgd_sup():

    # tests that we can run the sgd algorithm
    # on a supervised cost.
    # does not test for correctness at all, just
    # that the algorithm runs without dying

    dim = 3
    m = 10

    rng = np.random.RandomState([25, 9, 2012])

    X = rng.randn(m, dim)

    idx = rng.randint(0, dim, (m, ))
    Y = np.zeros((m, dim))
    for i in xrange(m):
        Y[i, idx[i]] = 1

    dataset = DenseDesignMatrix(X=X, y=Y)

    m = 15
    X = rng.randn(m, dim)

    idx = rng.randint(0, dim, (m,))
    Y = np.zeros((m, dim))
    for i in xrange(m):
        Y[i, idx[i]] = 1

    # Including a monitoring dataset lets us test that
    # the monitor works with supervised data
    monitoring_dataset = DenseDesignMatrix(X=X, y=Y)

    model = SoftmaxModel(dim)

    learning_rate = 1e-3
    batch_size = 5

    cost = SupervisedDummyCost()

    # We need to include this so the test actually stops running at some point
    termination_criterion = EpochCounter(5)

    algorithm = SGD(learning_rate, cost,
                    batch_size=batch_size,
                    monitoring_batches=3,
                    monitoring_dataset=monitoring_dataset,
                    termination_criterion=termination_criterion,
                    update_callbacks=None,
                    init_momentum=None,
                    set_batch_size=False)

    train = Train(dataset,
                  model,
                  algorithm,
                  save_path=None,
                  save_freq=0,
                  extensions=None)

    train.main_loop()
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:60,代码来源:test_sgd.py


示例8: finish_one_layer

def finish_one_layer(X_img_train, X_txt_train, y_train, X_img_test, X_txt_test, y_test, h_units, epochs, lr=0.1, model_type='FullModal', alpha=0.8, layer_num='1', prefix='', suffix='', save_path=''):
    """预备+训练+测试完整的一层"""
    #1.构造数据集
    dsit_train, dsit_test = make_dataset(X_img_train=X_img_train, X_txt_train=X_txt_train, y_train=y_train, 
                                                        X_img_test=X_img_test, X_txt_test=X_txt_test, y_test=y_test)

    #2.训练单层模型
    monitoring_dataset = {'train': dsit_train, 'test': dsit_test}
	
    ae_model = MyMultimodalAutoEncoder(model_type=model_type, alpha=alpha, n_vis_img=X_img_train.shape[1], n_vis_txt=X_txt_train.shape[1], n_hid_img=h_units, n_hid_txt=h_units, dec_f_img=True, dec_f_txt=True)
    alg = SGD(learning_rate=lr, cost=None, batch_size=20, init_momentum=None, monitoring_dataset=monitoring_dataset, termination_criterion=EpochCounter(max_epochs=epochs))
    
    train = Train(dataset=dsit_train, model=ae_model, algorithm=alg, save_path='multi_ae_save_layer' + layer_num + '.pkl', save_freq=10)
    
    t0 = time.clock()
    train.main_loop()
    print 'training time for layer%s: %f' % (layer_num, time.clock() - t0)
    
    #3.计算经过训练后模型传播的设计矩阵
    X_img_propup_train, X_txt_propup_train, X_img_propup_test, X_txt_propup_test, X_propup_train, X_propup_test = propup_design_matrix(X_train=dsit_train.X, X_test=dsit_test.X, ae_model=ae_model)
    
    #4.测试训练后的模型分类性能
    print '!!!evaluate model on dataset+++++++++++++++++++++++++++++++++++++++++++++++++++++++'
    model_evaluate(X_img_train=X_img_propup_train, X_txt_train=X_txt_propup_train, y_train=y_train, X_img_test= X_img_propup_test, X_txt_test=X_txt_propup_test, y_test=y_test, layer_num=layer_num, prefix=prefix, suffix=suffix, save_path=save_path)
    
    return X_img_propup_train, X_txt_propup_train, X_img_propup_test, X_txt_propup_test
开发者ID:zanghu,项目名称:MyDNNmodule,代码行数:26,代码来源:corre_ae_tools_cos.py


示例9: test_serialization_guard

def test_serialization_guard():

    # tests that Train refuses to serialize the dataset

    dim = 2
    m = 11

    rng = np.random.RandomState([28,9,2012])
    X = rng.randn(m, dim)
    dataset = DenseDesignMatrix(X=X)

    model = DummyModel(dim)
    # make the dataset part of the model, so it will get
    # serialized
    model.dataset = dataset

    Monitor.get_monitor(model)

    algorithm = DummyAlgorithm()

    train = Train(dataset, model, algorithm, save_path='_tmp_unit_test.pkl',
                 save_freq=1, extensions=None)

    try:
        train.main_loop()
    except RuntimeError:
        return
    assert False # train did not complain, this is a bug
开发者ID:BloodNg,项目名称:pylearn2,代码行数:28,代码来源:test_train.py


示例10: test_multiple_inputs

def test_multiple_inputs():
    """
    Create a VectorSpacesDataset with two inputs (features0 and features1)
    and train an MLP which takes both inputs for 1 epoch.
    """
    mlp = MLP(
        layers=[
            FlattenerLayer(
                CompositeLayer(
                    'composite',
                    [Linear(10, 'h0', 0.1),
                     Linear(10, 'h1', 0.1)],
                    {
                        0: [1],
                        1: [0]
                    }
                )
            ),
            Softmax(5, 'softmax', 0.1)
        ],
        input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
        input_source=('features0', 'features1')
    )
    dataset = VectorSpacesDataset(
        (np.random.rand(20, 20).astype(theano.config.floatX),
         np.random.rand(20, 15).astype(theano.config.floatX),
         np.random.rand(20, 5).astype(theano.config.floatX)),
        (CompositeSpace([
            VectorSpace(20),
            VectorSpace(15),
            VectorSpace(5)]),
         ('features1', 'features0', 'targets')))
    train = Train(dataset, mlp, SGD(0.1, batch_size=5))
    train.algorithm.termination_criterion = EpochCounter(1)
    train.main_loop()
开发者ID:lamblin,项目名称:pylearn2,代码行数:35,代码来源:test_mlp.py


示例11: finish_one_layer

def finish_one_layer(X_train, y_train, X_test, y_test, img_units, txt_units, h_units, epochs, lr=0.1, model_type='FullModal', alpha=0.5, beta=0.5, layer_num='1', prefix='', suffix='', save_path=''):
    """
    预备+训练+测试完整的一层
    暂时假定单模态是图像,将图像平均分为两半
    """
    #0.参数检查
    print 'img_units=', img_units
    print 'txt_units=', txt_units
    print 'X_train.shape[1]=', X_train.shape[1]
    assert img_units + txt_units == X_train.shape[1]
    assert img_units + txt_units == X_test.shape[1]
    #1.构造数据集
    dsit_train, dsit_test = make_dataset_single_modal(X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test)

    #2.训练单层模型
    monitoring_dataset = {'train': dsit_train, 'test': dsit_test}
    print 'in finish_one_layer, alpha=%f, beta=%f' % (alpha, beta)	
    ae_model = AdjustableMultimodalAutoEncoder(model_type=model_type, alpha=alpha, beta=beta, n_vis_img=img_units, n_vis_txt=txt_units, n_hid_img=h_units, n_hid_txt=h_units, dec_f_img=True, dec_f_txt=True)
    alg = SGD(learning_rate=lr, cost=None, batch_size=20, init_momentum=None, monitoring_dataset=monitoring_dataset, termination_criterion=EpochCounter(max_epochs=epochs)) #cost=None,目的是使用模型自带的get_default_cost()的返回值提供的代价
    
    train = Train(dataset=dsit_train, model=ae_model, algorithm=alg, save_path='multi_ae_save_layer' + layer_num + '.pkl', save_freq=10)
    
    t0 = time.clock()
    train.main_loop()
    print 'training time for layer%s: %f' % (layer_num, time.clock() - t0)
    
    #3.计算经过训练后模型传播的设计矩阵
    X_img_propup_train, X_txt_propup_train, X_img_propup_test, X_txt_propup_test, X_propup_train, X_propup_test = propup_design_matrix(X_train=dsit_train.X, X_test=dsit_test.X, ae_model=ae_model)
    
    #4.测试训练后的模型分类性能
    print '!!!evaluate model on dataset+++++++++++++++++++++++++++++++++++++++++++++++++++++++'
    model_evaluate(X_img_train=X_img_propup_train, X_txt_train=X_txt_propup_train, y_train=y_train, X_img_test= X_img_propup_test, X_txt_test=X_txt_propup_test, y_test=y_test, layer_num=layer_num, prefix=prefix, suffix=suffix, save_path=save_path)
    
    return X_propup_train, X_propup_test
开发者ID:zanghu,项目名称:MyDNNmodule,代码行数:34,代码来源:corre_ae_tools_single.py


示例12: train_with_monitoring_datasets

    def train_with_monitoring_datasets(train_dataset,
                                       monitoring_datasets,
                                       model_force_batch_size,
                                       train_iteration_mode,
                                       monitor_iteration_mode):

        model = SoftmaxModel(dim)
        if model_force_batch_size:
            model.force_batch_size = model_force_batch_size

        cost = DummyCost()

        algorithm = SGD(learning_rate, cost,
                        batch_size=batch_size,
                        train_iteration_mode=train_iteration_mode,
                        monitor_iteration_mode=monitor_iteration_mode,
                        monitoring_dataset=monitoring_datasets,
                        termination_criterion=EpochCounter(2))

        train = Train(train_dataset,
                      model,
                      algorithm,
                      save_path=None,
                      save_freq=0,
                      extensions=None)

        train.main_loop()
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:27,代码来源:test_sgd.py


示例13: __init__

class RBMTraining:
	def __init__(self, data_path="./datasets/", save_path="training.pkl", simulation_data = None, identifier = 0):
		self.id = identifier
		self.data_path = data_path
		self.save_path = save_path
		if simulation_data != None:
			self.sim_data = simulation_data
			self.save_data_loaded()
		else:
			self.sim_data = SimulationData(data_path)
			self.load_data()
		
	def load_data(self):
		self.sim_data.load_data()
		self.sim_data.preprocessor() 

		tmp = self.sim_data.split_train_test()
		self.datasets = {'train' : tmp[0], 'test' : tmp[1]}

		self.num_simulations = self.sim_data.num_simulations
		self.input_values = self.sim_data.input_values
		self.output_values = self.sim_data.output_values

	def set_structure(self, num_layers = 4, shape = 'linear'):
		self.vis = self.input_values
		self.hid = self.output_values
		return [self.vis, self.hid]
		
		   
	def get_model(self):
		self.model = RBM(nvis=self.vis, nhid=self.hid, irange=.05)
		return self.model
	   
	def set_training_criteria(self, 
							learning_rate=0.05,
							batch_size=10, 
							max_epochs=10):
		
		self.training_alg = DefaultTrainingAlgorithm(batch_size = batch_size, 
													monitoring_dataset = self.datasets, 
													termination_criterion = EpochCounter(max_epochs))
	
	def set_extensions(self, extensions=None):
		self.extensions = None #[MonitorBasedSaveBest(channel_name='objective',
												#save_path = './training/training_monitor_best.pkl')]
		
	def set_attributes(self, attributes):
		self.attributes = attributes

	def define_training_experiment(self, save_freq = 10):
		self.experiment = Train(dataset=self.datasets['train'], 
								model=self.model, 
								algorithm=self.training_alg, 
								save_path=self.save_path , 
								save_freq=save_freq, 
								allow_overwrite=True, 
								extensions=self.extensions)

	def train_experiment(self):
		self.experiment.main_loop()
开发者ID:albertomontesg,项目名称:deeplearn,代码行数:60,代码来源:RBMTraining.py


示例14: test_execution_order

def test_execution_order():

    # ensure save is called directly after monitoring by checking 
    # parameter values in `on_monitor` and `on_save`.

    model = MLP(layers=[Softmax(layer_name='y',
                                n_classes=2,
                                irange=0.)],
                nvis=3)

    dataset = DenseDesignMatrix(X=np.random.normal(size=(6, 3)),
                                y=np.random.normal(size=(6, 2)))

    epoch_counter = EpochCounter(max_epochs=1)

    algorithm = SGD(batch_size=2, learning_rate=0.1,
                    termination_criterion=epoch_counter)

    extension = ParamMonitor()

    train = Train(dataset=dataset,
                  model=model,
                  algorithm=algorithm,
                  extensions=[extension],
                  save_freq=1,
                  save_path="save.pkl")

    # mock save
    train.save = MethodType(only_run_extensions, train)

    train.main_loop()
开发者ID:123fengye741,项目名称:pylearn2,代码行数:31,代码来源:test_train.py


示例15: test_empty_monitoring_datasets

def test_empty_monitoring_datasets():
    """
    Test that handling of monitoring datasets dictionnary
    does not fail when it is empty.
    """

    learning_rate = 1e-3
    batch_size = 5

    dim = 3

    rng = np.random.RandomState([25, 9, 2012])

    train_dataset = DenseDesignMatrix(X=rng.randn(10, dim))

    model = SoftmaxModel(dim)

    cost = DummyCost()

    algorithm = SGD(learning_rate, cost,
                    batch_size=batch_size,
                    monitoring_dataset={},
                    termination_criterion=EpochCounter(2))

    train = Train(train_dataset,
                  model,
                  algorithm,
                  save_path=None,
                  save_freq=0,
                  extensions=None)

    train.main_loop()
开发者ID:allansp84,项目名称:pylearn2,代码行数:32,代码来源:test_sgd.py


示例16: MultiPIECV

def MultiPIECV():
  # Learning rate, nr pieces
  parms = [(0.1, 2), (0.1, 3), (0.01, 2), (0.01, 3)]

  accuracies = []

  for i in xrange(len(parms)):
    h0 = maxout.Maxout(layer_name='h0', num_units=1500, num_pieces=parms[i][1], W_lr_scale=1.0, irange=0.005, b_lr_scale=1.0)
    h1 = maxout.Maxout(layer_name='h1', num_units=1500, num_pieces=parms[i][1], W_lr_scale=1.0, irange=0.005, b_lr_scale=1.0)
    h2 = maxout.Maxout(layer_name='h2', num_units=1500, num_pieces=parms[i][1], W_lr_scale=1.0, irange=0.005, b_lr_scale=1.0)
    outlayer = mlp.Softmax(layer_name='y', n_classes=6, irange=0)

    layers = [h0, h1, h2, outlayer]

    model = mlp.MLP(layers, nvis=1200)

    trainIndices, validationIndices, testIndices = getMultiPIEindices()
    train = MultiPIE('train', indices=trainIndices)
    valid = MultiPIE('valid', indices=validationIndices)
    test = MultiPIE('test',   indices=testIndices)

    monitoring = dict(valid=valid)
    termination = MonitorBased(channel_name="valid_y_misclass", N=100)
    extensions = [best_params.MonitorBasedSaveBest(channel_name="valid_y_misclass",
                                                   save_path="/data/mcr10/train_best.pkl")]

    algorithm = sgd.SGD(parms[i][0], batch_size=100, cost=Dropout(),
                        monitoring_dataset=monitoring, termination_criterion=termination)

    save_path = "/data/mcr10/train_best.pkl"

    if not args.train and os.path.exists(save_path):
        model = serial.load(save_path)
    else:
      print 'Running training'
      train_job = Train(train, model, algorithm, extensions=extensions, save_path="/data/mcr10/trainpie.pkl", save_freq=1)
      train_job.main_loop()

    X = model.get_input_space().make_batch_theano()
    Y = model.fprop(X)

    y = T.argmax(Y, axis=1)

    f = function(inputs=[X], outputs=y, allow_input_downcast=True)
    yhat = f(test.X)

    print sum(yhat)
    print yhat.shape

    y = np.argmax(np.squeeze(test.get_targets()), axis=1)

    accuracy =  (y==yhat).sum() / y.size
    accuracies += [accuracy]

  # TODO: some confusion matrix?
  for i in xrange(len(parms)):
    print "for parameter" + str(i)
    print "the correct rate was " + str(accuracies[i])
开发者ID:valadhi,项目名称:AttachmentDBN,代码行数:58,代码来源:maxoutMain.py


示例17: fit

 def fit(X,y=None):#TODO
     dataset=
     self.set_train_params(train_params={
         'dtataset':dataset,
         'algorithm':algorithm,
         'extensions':extension})#which influenced by X,y
         #real data or symbol?
     train=Train(model=self.model,**self.train_params)
     train.main_loop()
开发者ID:hxsnow10,项目名称:text_classification,代码行数:9,代码来源:nets.py


示例18: run

 def run(self, start_config_id = None):
     self.db = DatabaseHandler()
     print 'running'
     while True:
         if start_config_id is None:
             (config_id, model_id, ext_id, train_id,
                 dataset_id, random_seed, batch_size) \
                  = self.select_next_config(self.experiment_id)
         else:
             (config_id, model_id, ext_id, train_id,
                 dataset_id, random_seed, batch_size) \
                  = self.select_config(start_config_id)
         start_config_id = None
         
         (dataset_desc, input_space_id) = self.select_dataset(dataset_id)
         input_space = self.get_space(input_space_id)
         
         # build model
         model = self.get_model(model_id, 
                                random_seed, 
                                batch_size, 
                                input_space)
         
         # extensions
         extensions = self.get_extensions(ext_id)
         
         # prepare monitor
         self.prep_valtest_monitor(model, batch_size)
         
         # monitor based save best
         if self.mbsb_channel_name is not None:
             save_path = self.save_prefix+str(config_id)+"_best.pkl"
             extensions.append(MonitorBasedSaveBest(
                     channel_name = self.mbsb_channel_name,
                     save_path = save_path,
                     cost = False \
                 )
             )
         
         # HPS Logger
         extensions.append(
             HPSLog(self.log_channel_names, self.db, config_id)
         )
         
         # training algorithm
         algorithm = self.get_trainingAlgorithm(train_id, batch_size)
         
         print 'sgd complete'
         learner = Train(dataset=self.train_ddm,
                         model=model,
                         algorithm=algorithm,
                         extensions=extensions)
         print 'learning'     
         learner.main_loop()
         
         self.set_end_time(config_id)
开发者ID:nicholas-leonard,项目名称:ift6266,代码行数:56,代码来源:hps.py


示例19: __call__

 def __call__(self):
     dataset = DenseDesignMatrix(X=self.X)
     self.cnmf.termination_criterion = self.termination_criterion
     self.cnmf.set_W(self.W)
     train = Train(dataset, self.cnmf)
     train.main_loop()
     self.cnmf.monitor = Monitor(self.cnmf)
     H = self.cnmf.H.get_value()
     results = {"W": self.cnmf.W.get_value(), "H": H}
     return numpy.argmax(H, axis=1), results
开发者ID:ejake,项目名称:tensor-factorization,代码行数:10,代码来源:cnmf_experiment.py


示例20: test_sgd_topo

def test_sgd_topo():

    # tests that we can run the sgd algorithm
    # on data with topology
    # does not test for correctness at all, just
    # that the algorithm runs without dying

    rows = 3
    cols = 4
    channels = 2
    dim = rows * cols * channels
    m = 10

    rng = np.random.RandomState([25,9,2012])

    X = rng.randn(m, rows, cols, channels)

    idx = rng.randint(0, dim, (m,))
    Y = np.zeros((m,dim))
    for i in xrange(m):
        Y[i,idx[i]] = 1

    dataset = DenseDesignMatrix(topo_view=X, y=Y)

    m = 15
    X = rng.randn(m, rows, cols, channels)

    idx = rng.randint(0, dim, (m,))
    Y = np.zeros((m,dim))
    for i in xrange(m):
        Y[i,idx[i]] = 1

    # including a monitoring datasets lets us test that
    # the monitor works with supervised data
    monitoring_dataset = DenseDesignMatrix(topo_view=X, y=Y)

    model = TopoSoftmaxModel(rows, cols, channels)

    learning_rate = 1e-3
    batch_size = 5

    cost = CrossEntropy()

    # We need to include this so the test actually stops running at some point
    termination_criterion = EpochCounter(5)

    algorithm = SGD(learning_rate, cost, batch_size=5,
                 monitoring_batches=3, monitoring_dataset= monitoring_dataset,
                 termination_criterion=termination_criterion, update_callbacks=None,
                 init_momentum = None, set_batch_size = False)

    train = Train(dataset, model, algorithm, save_path=None,
                 save_freq=0, extensions=None)

    train.main_loop()
开发者ID:mathewsbabu,项目名称:pylearn,代码行数:55,代码来源:test_sgd.py



注:本文中的pylearn2.train.Train类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python learning_rule.Momentum类代码示例发布时间:2022-05-25
下一篇:
Python skip.skip_if_no_data函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap