Scala 实战

发布于 2020-04-02 作者 风铃 63次 浏览 版块 前端

实用技巧

List

1
2
3
4
5
6
7
List(1, 9, 2, 4, 5) span (_ < 3)       // (List(1), List(9, 2, 4, 5))  碰到不符合就结束

List(1, 9, 2, 4, 5) partition (_ < 3) // (List(1, 2), List(9, 4, 5)) 扫描所有

List(1, 9, 2, 4, 5) splitAt 2 // (List(1, 9), List(2, 4, 5)) 以下标为分割点

List(1, 9, 2, 4, 5) groupBy (5 < _) // Map(false -> List(1, 2, 4, 5), true -> List(9)) 分割成 Map 对象,以 Boolean 类型为 Key

Iterator

grouped

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import scala.collection.{AbstractIterator, mutable}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.BigquerySparkSession._

val conf = new SparkConf()
val builder = SparkSession.builder().config(conf).enableHiveSupport()
val spark = builder.getOrCreateBigquerySparkSession()
val df = spark.sql("use db; select * from table")

val dataset = df.rdd.mapPartitions(iter => {

// 将每个 partition 中的多行数据,以 100 为长度作为一组,进行一次批处理
iter.grouped(100)
.flatMap(rows => {
val records = new mutable.MutableList[String]()
rows.foreach(row => records.add(JSON.toJSONString(row, false)))
records
})
})

val filteredEmptyLine = dataset
.filter(_ != null)
.map(JSON.toJSONString(_, false))
.filter(_.trim.length != 0)
收藏
暂无回复