多应用+插件架构,代码干净,二开方便,首家独创一键云编译技术,文档视频完善,免费商用码云13.8K 广告
```scala import org.apache.spark.sql.{DataFrame, SparkSession} object Demo { def main(args: Array[String]): Unit = { val spark: SparkSession = SparkSession.builder().master("local[*]") .appName(this.getClass.getName) .getOrCreate() // 调用createDataFrame方法创建DataFrame val training: DataFrame = spark.createDataFrame(Seq( (0L, "a b c d e spark", 1.0), (1L, "b d", 0.0), (2L, "spark f g h", 1.0), (3L, "hadoop mapreduce", 0.0))).toDF("id", "text", "label") training.show() // +---+----------------+-----+ // | id| text|label| // +---+----------------+-----+ // | 0| a b c d e spark| 1.0| // | 1| b d| 0.0| // | 2| spark f g h| 1.0| // | 3|hadoop mapreduce| 0.0| // +---+----------------+-----+ } } ```