spark dataFrame api操作

 

api代码示例入下

object DataFrameCase {

  def main(args: Array[String]): Unit = {
    val sparksession = SparkSession.builder().appName("DataFrameCase").master("local[2]").getorCreate()

    val path = "G:\\student.data"

    val sparkrdd = sparksession.sparkContext.textFile(path)

    //隐式转化
    import sparksession.implicits._
    val studentDF = sparkrdd.map(_.split("\\|")).map(line => Student(line(0).toInt,line(1),line(2),line(3))).toDF()

    val studentDF2 = sparkrdd.map(_.split("\\|")).map(line => Student(line(0).toInt,line(1),line(2),line(3))).toDF()

    studentDF.show()

    studentDF.take(10)

    studentDF.filter("name='' OR name = 'NULL'").show()

    //取前30行,不截取字段值
    studentDF.show(30,false)

    studentDF.where("name='' OR name = 'NULL'").show()

    studentDF.filter("SUBSTRING(name,0,1)='t'").show()

    studentDF.sort("name").show()

    //可以传入列,或直接传入字段
    studentDF.sort(studentDF("name").desc,studentDF("id")).show()

    studentDF.select(studentDF.col("name").as("studentname")).show()

    //join
    studentDF.join(studentDF2,studentDF.col("id") === studentDF2.col("id")).show()
  }

  case class Student(id:Int,name:String,phone:String,email:String)
}

操作文件内容如下:

1|burke|18722323|sdfsdf.com
2|turke|18722323|sdfsdf.com
3|turke|18722323|sdfsdf.com
4|curke|18722323|sdfsdf.com
5|burke|18722323|sdfsdf.com
6|burke|18722323|sdfsdf.com
7|burke|18722323|sdfsdf.com
8|murke|18722323|sdfsdf.com
9|burke|18722323|sdfsdf.com
10||1872563|sdfsdf.com
11||2232323|dsfsdf.com
12|NULL|937489|dsfsdew.com

 

相关文章

1.SparkStreaming是什么?SparkStreaming是SparkCore的扩展A...
本篇内容介绍了“Spark通讯录相似度计算怎么实现”的有关知识...
本篇文章给大家分享的是有关如何进行Spark数据分析,小编觉得...
本篇内容主要讲解“Spark Shuffle和Hadoop Shuffle有哪些区别...
这篇文章主要介绍“TSDB的数据怎么利用Hadoop/spark集群做数...
本篇内容介绍了“Hadoop与Spark性能原理是什么”的有关知识,...