• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Java RBM类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.RBM的典型用法代码示例。如果您正苦于以下问题:Java RBM类的具体用法?Java RBM怎么用?Java RBM使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



RBM类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了RBM类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: getConfiguration

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
   {
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
	.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
	.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
	.dropOut(0.5).build();

final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
	.nOut(parameters.getOutputSize()).activation("softmax").build();

return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
	.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
	.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
	.layer(1, outputLayer).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:17,代码来源:DeepBeliefNetworkModel.java


示例2: getConfiguration

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration(DataFrame dataset) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(seed)
                .constrainGradientToUnitNorm(true)
                .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
                .list(4)
                .layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                        .weightInit(WeightInit.XAVIER)
                        .nIn(rows * columns).nOut(600).build())
                .layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                        .weightInit(WeightInit.XAVIER)
                        .nIn(600).nOut(250).build())
                .layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                        .weightInit(WeightInit.XAVIER)
                        .nIn(250).nOut(200).build())
                .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
                        .weightInit(WeightInit.XAVIER)
                        .activation("softmax")
                        .nIn(200).nOut(AUTOMATIC).build())
                .pretrain(true).backprop(false)
                .build();

        return conf;
    }
 
开发者ID:javadba,项目名称:dl4j-spark-ml-examples,代码行数:26,代码来源:JavaLfwClassification.java


示例3: main

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    final int numRows = 28;
    final int numColumns = 28;
    int seed = 123;
    int numSamples = MnistDataFetcher.NUM_EXAMPLES;
    int batchSize = 1000;
    int iterations = 1;
    int listenerFreq = iterations/5;

    log.info("Load data....");
    DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);

    log.info("Build model....");
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
            .list(8)
            .layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(1, new RBM.Builder().nIn(2000).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(2, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(3, new RBM.Builder().nIn(500).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(4, new RBM.Builder().nIn(30).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) 
            .layer(5, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(6, new RBM.Builder().nIn(1000).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(7, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nIn(2000).nOut(numRows*numColumns).build())
            .pretrain(true).backprop(true)
            .build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();

    model.setListeners(new ScoreIterationListener(listenerFreq));

    log.info("Train model....");
    while(iter.hasNext()) {
        DataSet next = iter.next();
        model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
    }
}
 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:41,代码来源:DeepAutoEncoder.java


示例4: getConfiguration

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final int[] hiddenLayerNodes = parameters.getHiddeLayerNodes();
final int nLayers = hiddenLayerNodes.length;
final ListBuilder list = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
	.iterations(parameters.getIterations()).optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
	.list(nLayers);
for (int i = 0; i < nLayers; i++)
{
    int nIn;
    if (i == 0)
    {
	nIn = parameters.getInputSize();
    }
    else
    {
	nIn = hiddenLayerNodes[i - 1];
    }

    if (i < nLayers - 1)
    {
	final RBM hiddenLayer = new RBM.Builder().nIn(nIn).nOut(hiddenLayerNodes[i])
		.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build();
	list.layer(i, hiddenLayer);
    }
    else
    {
	final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
		.nIn(nIn).nOut(parameters.getOutputSize()).build();
	list.layer(nLayers - 1, outputLayer);
    }
}
return list.pretrain(true).backprop(true).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:36,代码来源:DeepAutoEncoderModel.java


示例5: getModel

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static MultiLayerNetwork getModel(int numInputs) {
    MultiLayerConfiguration conf =  new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
            .gradientNormalizationThreshold(1.0)
            .regularization(true)
            .dropOut(Config.DROPOUT)
            .updater(Config.UPDATER)
            .adamMeanDecay(0.5)
            .adamVarDecay(0.5)
            .weightInit(WeightInit.XAVIER)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .list()
            .layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.GAUSSIAN)
                    .nIn(numInputs).nOut(2750).dropOut(0.75)
                    .activation(Activation.RELU).build())
            .layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                    .nIn(2750).nOut(2000)
                    .activation(Activation.RELU).build())
            .layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                    .nIn(2000).nOut(1000)
                    .activation(Activation.RELU).build())
            .layer(3, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                    .nIn(1000).nOut(200)
                    .activation(Activation.RELU).build())
            .layer(4, new OutputLayer.Builder(Config.LOSS_FUNCTION)
                    .nIn(200).nOut(Config.NUM_OUTPUTS).updater(Config.UPDATER)
                    .adamMeanDecay(0.6).adamVarDecay(0.7)
                    .build())
            .pretrain(true).backprop(true)
            .build();
    return new MultiLayerNetwork(conf);
}
 
开发者ID:madeleine789,项目名称:dl4j-apr,代码行数:35,代码来源:DBN.java


示例6: getConfiguration

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration() {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(11L) // Seed to lock in weight initialization for tuning
                .iterations(100) // # training iterations predict/classify & backprop
                .learningRate(1e-3f) // Optimization step size
                .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT) // Backprop method (calculate the gradients)
                .momentum(0.9)
                .constrainGradientToUnitNorm(true)
                .useDropConnect(true)
                .list(2) // # NN layers (does not count input layer)
                .layer(0, new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
                                .nIn(4) // # input nodes
                                .nOut(3) // # fully connected hidden layer nodes. Add list if multiple layers.
                                .weightInit(WeightInit.XAVIER)
                                .activation("relu")
                                .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                                .updater(Updater.ADAGRAD)
                                .k(1) // # contrastive divergence iterations
                                .dropOut(0.5)
                                .build()
                ) // NN layer type
                .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                .nIn(3) // # input nodes
                                .nOut(3) // # output nodes
                                .activation("softmax")
                                .weightInit(WeightInit.XAVIER)
                                .updater(Updater.ADAGRAD)
                                .dropOut(0.5)
                                .build()
                ) // NN layer type
                .build();

        return conf;
    }
 
开发者ID:javadba,项目名称:dl4j-spark-ml-examples,代码行数:36,代码来源:JavaIrisClassification.java


示例7: deepBeliefNetwork

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
private static MultiLayerNetwork deepBeliefNetwork(int seed,
		int iterations, int numRows, int numColumns, int outputNum) {
	MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
			.seed(seed)
			.gradientNormalization(
					GradientNormalization.ClipElementWiseAbsoluteValue)
			.gradientNormalizationThreshold(1.0)
			.iterations(iterations)
			.momentum(0.5)
			.momentumAfter(Collections.singletonMap(3, 0.9))
			.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
			.list(4)
			.layer(0,
					new RBM.Builder().nIn(numRows * numColumns).nOut(500)
							.weightInit(WeightInit.XAVIER)
							.lossFunction(LossFunction.RMSE_XENT)
							.visibleUnit(RBM.VisibleUnit.BINARY)
							.hiddenUnit(RBM.HiddenUnit.BINARY).build())
			.layer(1,
					new RBM.Builder().nIn(500).nOut(250)
							.weightInit(WeightInit.XAVIER)
							.lossFunction(LossFunction.RMSE_XENT)
							.visibleUnit(RBM.VisibleUnit.BINARY)
							.hiddenUnit(RBM.HiddenUnit.BINARY).build())
			.layer(2,
					new RBM.Builder().nIn(250).nOut(200)
							.weightInit(WeightInit.XAVIER)
							.lossFunction(LossFunction.RMSE_XENT)
							.visibleUnit(RBM.VisibleUnit.BINARY)
							.hiddenUnit(RBM.HiddenUnit.BINARY).build())
			.layer(3,
					new OutputLayer.Builder(
							LossFunction.NEGATIVELOGLIKELIHOOD)
							.activation("softmax").nIn(200).nOut(outputNum)
							.build()).pretrain(true).backprop(false)
			.build();

	MultiLayerNetwork model = new MultiLayerNetwork(conf);

	return model;
}
 
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:42,代码来源:NeuralNetworks.java


示例8: DeepAutoEncoderExample

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public DeepAutoEncoderExample() {
    try {
        int seed = 123;
        int numberOfIterations = 1;
        iterator = new MnistDataSetIterator(1000, MnistDataFetcher.NUM_EXAMPLES, true);
        
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(seed)
                .iterations(numberOfIterations)
                .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
                .list()
                .layer(0, new RBM.Builder().nIn(numberOfRows * numberOfColumns)
                        .nOut(1000)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(1, new RBM.Builder().nIn(1000).nOut(500)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(2, new RBM.Builder().nIn(500).nOut(250)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(3, new RBM.Builder().nIn(250).nOut(100)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(4, new RBM.Builder().nIn(100).nOut(30)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //encoding stops
                .layer(5, new RBM.Builder().nIn(30).nOut(100)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //decoding starts
                .layer(6, new RBM.Builder().nIn(100).nOut(250)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(7, new RBM.Builder().nIn(250).nOut(500)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(8, new RBM.Builder().nIn(500).nOut(1000)
                        .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(9, new OutputLayer.Builder(
                                LossFunctions.LossFunction.RMSE_XENT).nIn(1000)
                        .nOut(numberOfRows * numberOfColumns).build())
                .pretrain(true).backprop(true)
                .build();

        model = new MultiLayerNetwork(conf);
        model.init();

        model.setListeners(Collections.singletonList(
                (IterationListener) new ScoreIterationListener()));

        while (iterator.hasNext()) {
            DataSet dataSet = iterator.next();
            model.fit(new DataSet(dataSet.getFeatureMatrix(),
                    dataSet.getFeatureMatrix()));
        }

        modelFile = new File("savedModel");
        ModelSerializer.writeModel(model, modelFile, true);
    } catch (IOException ex) {
        ex.printStackTrace();
    }
}
 
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:55,代码来源:DeepAutoEncoderExample.java


示例9: main

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    final int numRows = 28;
    final int numColumns = 28;
    int seed = 123;
    int numSamples = MnistDataFetcher.NUM_EXAMPLES;
    int batchSize = 1000;
    int iterations = 1;
    int listenerFreq = iterations/5;

    log.info("Load data....");
    DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);

    log.info("Build model....");
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
            .list(10)
            .layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(1, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(2, new RBM.Builder().nIn(500).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(3, new RBM.Builder().nIn(250).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(4, new RBM.Builder().nIn(100).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //encoding stops
            .layer(5, new RBM.Builder().nIn(30).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //decoding starts
            .layer(6, new RBM.Builder().nIn(100).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(7, new RBM.Builder().nIn(250).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(8, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT).nIn(1000).nOut(numRows*numColumns).build())
            .pretrain(true).backprop(true)
            .build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();

    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    while(iter.hasNext()) {
        DataSet next = iter.next();
        model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
    }
}
 
开发者ID:PacktPublishing,项目名称:Java-Data-Science-Cookbook,代码行数:43,代码来源:DeepAutoEncoderExample.java


示例10: main

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
     
    Nd4j.MAX_SLICES_TO_PRINT = -1;
    Nd4j.MAX_ELEMENTS_PER_SLICE = -1;
    Nd4j.ENFORCE_NUMERICAL_STABILITY = true;
    final int numRows = 4;
    final int numColumns = 1;
    int outputNum = 10;
    int numSamples = 150;
    int batchSize = 150;
    int iterations = 100;
    int seed = 123;
    int listenerFreq = iterations/2;

    log.info("Load data....");
    DataSetIterator iter = new IrisDataSetIterator(batchSize, numSamples);
    
    DataSet iris = iter.next();

    iris.normalizeZeroMeanZeroUnitVariance();

    log.info("Build model....");
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().regularization(true)
            .miniBatch(true)
           
            .layer(new RBM.Builder().l2(1e-1).l1(1e-3)
                    .nIn(numRows * numColumns)  
                    .nOut(outputNum) 
                    .activation("relu")  
                    .weightInit(WeightInit.RELU)  
                    .lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).k(3)
                    .hiddenUnit(HiddenUnit.RECTIFIED).visibleUnit(VisibleUnit.GAUSSIAN)
                    .updater(Updater.ADAGRAD).gradientNormalization(GradientNormalization.ClipL2PerLayer)
                    .build())
            .seed(seed)  
            .iterations(iterations)
            .learningRate(1e-3)  
            .optimizationAlgo(OptimizationAlgorithm.LBFGS)
            .build();
    Layer model = LayerFactories.getFactory(conf.getLayer()).create(conf);
    model.setListeners(new ScoreIterationListener(listenerFreq));

    log.info("Evaluate weights....");
    INDArray w = model.getParam(DefaultParamInitializer.WEIGHT_KEY);
    log.info("Weights: " + w);
    log.info("Scaling the dataset");
    iris.scale();
    log.info("Train model....");
    for(int i = 0; i < 20; i++) {
        log.info("Epoch "+i+":");
        model.fit(iris.getFeatureMatrix());
    }

}
 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:55,代码来源:RBM.java


示例11: main

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    final int numRows = 28;
    final int numColumns = 28;
    int seed = 123;
    int numSamples = MnistDataFetcher.NUM_EXAMPLES;
    int batchSize = 1000;
    int iterations = 1;
    int listenerFreq = iterations/5;

    log.info("Load data....");
    DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);

    log.info("Build model....");
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
            .list()
            .layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(1000).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
            .layer(1, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
            .layer(2, new RBM.Builder().nIn(500).nOut(250).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
            .layer(3, new RBM.Builder().nIn(250).nOut(100).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
            .layer(4, new RBM.Builder().nIn(100).nOut(30).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())  
            .layer(5, new RBM.Builder().nIn(30).nOut(100).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())  
            .layer(6, new RBM.Builder().nIn(100).nOut(250).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
            .layer(7, new RBM.Builder().nIn(250).nOut(500).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
            .layer(8, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
            .layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nIn(1000).nOut(numRows*numColumns).build())
            .pretrain(true).backprop(true)
            .build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();

    model.setListeners(new ScoreIterationListener(listenerFreq));

    log.info("Train model....");
    while(iter.hasNext()) {
        DataSet next = iter.next();
        model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
    }


}
 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:45,代码来源:DBN.java


示例12: testFromSvmLight

import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
@Test
public void testFromSvmLight() throws Exception {
    JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), new ClassPathResource("svmLight/iris_svmLight_0.txt").getTempFileFromArchive().getAbsolutePath()).toJavaRDD().map(new Function<LabeledPoint, LabeledPoint>() {
        @Override
        public LabeledPoint call(LabeledPoint v1) throws Exception {
            return new LabeledPoint(v1.label(), Vectors.dense(v1.features().toArray()));
        }
    }).cache();

    DataSet d = new IrisDataSetIterator(150,150).next();
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(123)
            .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
            .iterations(100).miniBatch(true)
            .maxNumLineSearchIterations(10)
            .list()
            .layer(0, new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
                    .nIn(4).nOut(100)
                    .weightInit(WeightInit.XAVIER)
                    .activation("relu")
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
            .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                    .nIn(100).nOut(3)
                    .activation("softmax")
                    .weightInit(WeightInit.XAVIER)
                    .build())
            .backprop(false)
            .build();



    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    System.out.println("Initializing network");
    SparkDl4jMultiLayer master = new SparkDl4jMultiLayer(sc,getBasicConf(),new ParameterAveragingTrainingMaster(true,numExecutors(),1,5,1,0));

    MultiLayerNetwork network2 = master.fitLabeledPoint(data);
    Evaluation evaluation = new Evaluation();
    evaluation.eval(d.getLabels(), network2.output(d.getFeatureMatrix()));
    System.out.println(evaluation.stats());
}
 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:42,代码来源:TestSparkMultiLayerParameterAveraging.java



注:本文中的org.deeplearning4j.nn.conf.layers.RBM类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java OFPortFeaturesSerializerVer11类代码示例发布时间:2022-05-22
下一篇:
Java DataTransferProtos类代码示例发布时间:2022-05-22
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap