• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Scala PipelineModel类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中org.apache.spark.ml.PipelineModel的典型用法代码示例。如果您正苦于以下问题:Scala PipelineModel类的具体用法?Scala PipelineModel怎么用?Scala PipelineModel使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了PipelineModel类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1: TrainModel

//设置package包名称以及导入依赖的类
package songs

import org.apache.spark.ml.PipelineModel
import org.apache.spark.ml.regression.LinearRegressionModel
import org.apache.spark.mllib.evaluation.RegressionMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import org.slf4j.LoggerFactory

object TrainModel {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName(Config.appName)
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    val logger = LoggerFactory.getLogger(getClass.getName)

    logger.info("Loading datasets from parquet format")
    val data = SongML.loadModelData(sqlContext = sqlContext)

    logger.info("Showing summary stats for training data")
    val summary = data.training.describe(SongML.allColumns:_*)
    summary.show(1000)

    logger.info("Training Linear Regression Model")
    val startTime = System.nanoTime()

    val pipeline = SongML.trainingPipeline.fit(data.training)

    val elapsedTime = (System.nanoTime() - startTime) / 1e9
    logger.info(s"Training time: $elapsedTime seconds")

    logger.info("Calculating Regression Metrics")
    val bestModel = pipeline.bestModel.asInstanceOf[PipelineModel]
    val testPredictions: RDD[(Double,Double)] = bestModel.transform(data.training)
      .select(SongML.predictionColumn, SongML.labelColumn)
      .map(r => (r.getAs[Double](SongML.predictionColumn), r.getAs[Double](SongML.labelColumn)))

    val rm = new RegressionMetrics(testPredictions)

    val model = bestModel.stages(SongML.lrStages.indices.last).asInstanceOf[LinearRegressionModel]

    logger.info(s"Saving model to ${Config.modelOut}")
    model.write.overwrite().save(Config.modelOut)

    logger.info(SongML.printStats(model,rm,"Training"))

    logger.info("Exiting")
    sc.stop()
  }
} 
开发者ID:jasonmar,项目名称:millionsongs,代码行数:54,代码来源:TrainModel.scala


示例2: TwitterFireRepl

//设置package包名称以及导入依赖的类
package com.aluxian.tweeather.scripts

import org.apache.spark.Logging
import org.apache.spark.ml.PipelineModel
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.sql.functions._

import scala.io.Source


object TwitterFireRepl extends Script with Logging {

  override def main(args: Array[String]) {
    super.main(args)
    import sqlc.implicits._

    println("Loading fire model...")
    sc // dummy call to init the context
    val model = PipelineModel.load("/tw/fire/models/fire.model")
    println("Done. Write the input as <temperature>,<pressure>,<humidity> and press <enter>")

    for (input <- Source.stdin.getLines) {
      val t = udf { (input: String) =>
        val values = input.split(",").map(_.toDouble)
        Vectors.dense(values)
      }

      val data = sc
        .parallelize(Seq(input), 1)
        .toDF("kb_input")
        .withColumn("raw_input", t(col("kb_input")))

      model
        .transform(data)
        .show(truncate = false)
    }
  }

} 
开发者ID:cnajeefa,项目名称:Tourism-Sentiment-Analysis,代码行数:40,代码来源:TwitterFireRepl.scala


示例3: TwitterEmoRepl

//设置package包名称以及导入依赖的类
package com.aluxian.tweeather.scripts

import org.apache.spark.Logging
import org.apache.spark.ml.PipelineModel

import scala.io.Source


object TwitterEmoRepl extends Script with Logging {

  override def main(args: Array[String]) {
    super.main(args)
    import sqlc.implicits._

    println("Loading emo model...")
    sc // dummy call to init the context
    val model = PipelineModel.load("/tw/sentiment/models/emo.model")
    println("Done. Write the sentence you want analysed and press <enter>")

    for (input <- Source.stdin.getLines) {
      val data = sc
        .parallelize(Seq(input), 1)
        .toDF("raw_text")

      model
        .transform(data)
        .show(truncate = false)
    }
  }

} 
开发者ID:cnajeefa,项目名称:Tourism-Sentiment-Analysis,代码行数:32,代码来源:TwitterEmoRepl.scala


示例4: Sentiment140Repl

//设置package包名称以及导入依赖的类
package com.aluxian.tweeather.scripts

import org.apache.spark.Logging
import org.apache.spark.ml.PipelineModel

import scala.io.Source


object Sentiment140Repl extends Script with Logging {

  override def main(args: Array[String]) {
    super.main(args)
    import sqlc.implicits._

    println("Loading 140 model...")
    sc // dummy call to init the context
    val model = PipelineModel.load("tw/sentiment/models/140.model")
    println("Done. Write the sentence you want analysed and press <enter>")

    for (input <- Source.stdin.getLines) {
      val data = sc
        .parallelize(Seq(input), 1)
        .toDF("raw_text")

      model.transform(data)
        .select("probability", "prediction")
        .foreach(println)
    }
  }

} 
开发者ID:cnajeefa,项目名称:Tourism-Sentiment-Analysis,代码行数:32,代码来源:Sentiment140Repl.scala


示例5: BaseTransformerConverter

//设置package包名称以及导入依赖的类
package org.apache.spark.ml.mleap.converter.runtime

import com.truecar.mleap.runtime.transformer
import org.apache.spark.ml.PipelineModel
import org.apache.spark.ml.classification.RandomForestClassificationModel
import org.apache.spark.ml.feature.{IndexToString, StandardScalerModel, StringIndexerModel, VectorAssembler}
import org.apache.spark.ml.mleap.classification.SVMModel
import org.apache.spark.ml.mleap.converter.runtime.classification.{RandomForestClassificationModelToMleap, SupportVectorMachineModelToMleap}
import org.apache.spark.ml.mleap.converter.runtime.feature.{IndexToStringToMleap, StandardScalerModelToMleap, StringIndexerModelToMleap, VectorAssemblerModelToMleap}
import org.apache.spark.ml.mleap.converter.runtime.regression.{LinearRegressionModelToMleap, RandomForestRegressionModelToMleap}
import org.apache.spark.ml.regression.{LinearRegressionModel, RandomForestRegressionModel}


trait BaseTransformerConverter extends SparkTransformerConverter {
  // regression
  implicit val mleapLinearRegressionModelToMleap: TransformerToMleap[LinearRegressionModel, transformer.LinearRegressionModel] =
    addConverter(LinearRegressionModelToMleap)
  implicit val mleapRandomForestRegressionModelToMleap: TransformerToMleap[RandomForestRegressionModel, transformer.RandomForestRegressionModel] =
    addConverter(RandomForestRegressionModelToMleap)

  // classification
  implicit val mleapRandomForestClassificationModelToMleap: TransformerToMleap[RandomForestClassificationModel, transformer.RandomForestClassificationModel] =
    addConverter(RandomForestClassificationModelToMleap)
  implicit val mleapSupportVectorMachineModelToMleap: TransformerToMleap[SVMModel, transformer.SupportVectorMachineModel] =
    addConverter(SupportVectorMachineModelToMleap)

  //feature
  implicit val mleapIndexToStringToMleap: TransformerToMleap[IndexToString, transformer.ReverseStringIndexerModel] =
    addConverter(IndexToStringToMleap)
  implicit val mleapStandardScalerModelToMleap: TransformerToMleap[StandardScalerModel, transformer.StandardScalerModel] =
    addConverter(StandardScalerModelToMleap)
  implicit val mleapStringIndexerModelToMleap: TransformerToMleap[StringIndexerModel, transformer.StringIndexerModel] =
    addConverter(StringIndexerModelToMleap)
  implicit val mleapVectorAssemblerToMleap: TransformerToMleap[VectorAssembler, transformer.VectorAssemblerModel] =
    addConverter(VectorAssemblerModelToMleap)

  // other
  implicit val mleapPipelineModelToMleap: TransformerToMleap[PipelineModel, transformer.PipelineModel] =
    addConverter(PipelineModelToMleap(this))
}
object BaseTransformerConverter extends BaseTransformerConverter 
开发者ID:TrueCar,项目名称:mleap,代码行数:42,代码来源:BaseTransformerConverter.scala


示例6: PipelineClassifier

//设置package包名称以及导入依赖的类
import org.apache.spark.ml.PipelineModel
import org.apache.spark.mllib.linalg.DenseVector
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame

class PipelineClassifier(val pipeline: PipelineModel) extends UnifiedClassifier with Serializable {
  override def predict[T](data: DataFrame): RDD[(T, Double)] = {
    val singletonDF = ModelHelpers.addMetadata(data)
    val predictions = pipeline.transform(singletonDF)
    predictions.map(row => {
      val firstClass = row.getAs[DenseVector](DataFrameColumns.RAW_PREDICTION)(1)
      val zeroClass = row.getAs[DenseVector](DataFrameColumns.RAW_PREDICTION)(0)
      val prob = firstClass.toDouble / (firstClass.toDouble + zeroClass.toDouble)
      (row.getAs[T](DataFrameColumns.KEY), prob)
    })
  }
} 
开发者ID:Spikhalskiy,项目名称:hackaton,代码行数:18,代码来源:PipelineClassifier.scala


示例7: PredictNewsClassDemo

//设置package包名称以及导入依赖的类
package applications.mining

import algorithms.evaluation.MultiClassEvaluation
import config.paramconf.ClassParams
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.PipelineModel
import org.apache.spark.sql.{Row, SparkSession}


object PredictNewsClassDemo extends Serializable {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    val spark = SparkSession
      .builder
      .master("local[2]")
      .appName("predict news multi class demo")
      .getOrCreate()

    val args = Array("ckooc-ml/data/classnews/predict", "lr")
    val filePath = args(0)
    val modelType = args(1)

    var modelPath = ""
    val params = new ClassParams

    modelType match {
      case "lr" => modelPath = params.LRModelPath
      case "dt" => modelPath = params.DTModelPath
      case _ =>
        println("???????")
        System.exit(1)
    }

    import spark.implicits._
    val data = spark.sparkContext.textFile(filePath).flatMap { line =>
      val tokens: Array[String] = line.split("\u00ef")
      if (tokens.length > 3) Some((tokens(0), tokens(1), tokens(2), tokens(3))) else None
    }.toDF("label", "title", "time", "content")
    data.persist()

    //???????????
    val model = PipelineModel.load(modelPath)
    val predictions = model.transform(data)

    //=== ????
    val resultRDD = predictions.select("prediction", "indexedLabel").rdd.map { case Row(prediction: Double, label: Double) => (prediction, label) }
    val (precision, recall, f1) = MultiClassEvaluation.multiClassEvaluate(resultRDD)
    println("\n\n========= ???? ==========")
    println(s"\n??????$precision")
    println(s"??????$recall")
    println(s"F1??$f1")

    //    predictions.select("label", "predictedLabel", "content").show(100, truncate = false)
    data.unpersist()

    spark.stop()
  }
} 
开发者ID:yhao2014,项目名称:CkoocNLP,代码行数:60,代码来源:PredictNewsClassDemo.scala


示例8: save

//设置package包名称以及导入依赖的类
package ml

import model.{ AppReader }
import org.apache.spark.ml.{ PipelineModel }
import scalaz.\/

trait PipelineRepository {
  private val baseDir = "/Users/kirill/Documents/Projects/sentiment-service/src/main/resources"

  def save(pipeline: PipelineModel, fileName: String): AppReader[Unit] = AppReader[Unit] {
    spark ?
      \/.fromTryCatchNonFatal {
        pipeline.write.overwrite().save(s"$baseDir/$fileName")
      }
  }

  def load(fileName: String): AppReader[PipelineModel] = AppReader[PipelineModel] {
    spark ?
      \/.fromTryCatchNonFatal {
        PipelineModel.load(s"$baseDir/$fileName")
      }
  }
} 
开发者ID:stormy-ua,项目名称:sentiment-service,代码行数:24,代码来源:PipelineRepository.scala


示例9: estimate

//设置package包名称以及导入依赖的类
package ml

import model.{ NegativeSentiment, PositiveSentiment, Sentiment, AppReader }
import org.apache.spark.ml.{ PipelineModel }
import scalaz.{ \/ }

trait SentimentEstimator {
  def estimate(pipeline: PipelineModel, text: String): AppReader[Sentiment] = AppReader[Sentiment] {
    spark ?
      \/.fromTryCatchNonFatal {
        val df = spark.createDataFrame(Seq((0, text))).toDF("id", "text")
        val isPositive = pipeline.transform(df)
          .select("prediction")
          .first().getDouble(0)

        if (isPositive == 1.0) PositiveSentiment else NegativeSentiment
      }
  }

} 
开发者ID:stormy-ua,项目名称:sentiment-service,代码行数:21,代码来源:SentimentEstimator.scala



注:本文中的org.apache.spark.ml.PipelineModel类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala RowMatrix类代码示例发布时间:2022-05-23
下一篇:
Scala KeyedMessage类代码示例发布时间:2022-05-23
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap