groupBy(function)
function返回key,傳入的RDD的各個元素根據這個key進行分組ide
def main(args: Array[String]): Unit = { //默認分區12個 val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("test").set("spark.default.parallelism", "12")) var rdd1 = sc.makeRDD(1 to 10, 2) rdd1.groupBy(x => { if (x % 2 == 0) "even" else "odd" }).collect.foreach(println(_)) }
16/12/20 16:39:07 INFO DAGScheduler: Job 0 finished: collect at ShellTest.scala:25, took 2.225605 s
(even,CompactBuffer(2, 4, 6, 8, 10))
(odd,CompactBuffer(1, 3, 5, 7, 9))
16/12/20 16:39:07 INFO SparkContext: Invoking stop() from shutdown hook函數
def main(args: Array[String]): Unit = { //默認分區12個 val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("test").set("spark.default.parallelism", "12")) val a = sc.parallelize(List("dog", "tiger", "lion", "cat", "spider", "eagle"), 2) val b = a.keyBy(_.length)//給value加上key,key爲對應string的長度 b.groupByKey.collect.foreach(println(_)) }
16/12/20 16:42:25 INFO DAGScheduler: Job 0 finished: collect at ShellTest.scala:26, took 2.853266 s
(3,CompactBuffer(dog, cat))
(4,CompactBuffer(lion))
(5,CompactBuffer(tiger, eagle))
(6,CompactBuffer(spider))
16/12/20 16:42:25 INFO SparkContext: Invoking stop() from shutdown hookspa
def groupByKey(): RDD[(K, Iterable[V])]scala
def groupByKey(numPartitions: Int): RDD[(K, Iterable[V])]string
def groupByKey(partitioner: Partitioner): RDD[(K, Iterable[V])]it
該函數用於將RDD[K,V]中每一個K對應的V值,合併到一個集合Iterable[V]中,spark
參數numPartitions用於指定分區數;io
參數partitioner用於指定分區函數;ast
def main(args: Array[String]): Unit = { //默認分區12個 val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("test").set("spark.default.parallelism", "12")) val rdd1 = sc.makeRDD(Array((1, "A"), (1, "B"), (2, "A"), (2, "D"), (3, "E"), (1, "A"))) rdd1.groupByKey(2).collect.foreach(println(_)) }
16/12/20 16:18:35 INFO DAGScheduler: Job 0 finished: collect at ShellTest.scala:23, took 1.716898 s
(2,CompactBuffer(A, D))
(1,CompactBuffer(A, B, A))
(3,CompactBuffer(E))
16/12/20 16:18:35 INFO SparkContext: Invoking stop() from shutdown hookfunction
def reduceByKey(func: (V, V) => V): RDD[(K, V)]
def reduceByKey(func: (V, V) => V, numPartitions: Int): RDD[(K, V)]
def reduceByKey(partitioner: Partitioner, func: (V, V) => V): RDD[(K, V)]
該函數用於將RDD[K,V]中每一個K對應的V值根據映射函數來運算。
參數numPartitions用於指定分區數;
參數partitioner用於指定分區函數
def main(args: Array[String]): Unit = { //默認分區12個 val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("test").set("spark.default.parallelism", "12")) val rdd1 = sc.makeRDD(Array((1, "A"), (1, "B"), (2, "A"), (2, "D"), (3, "E"), (1, "A"))) rdd1.reduceByKey(_+_).collect.foreach(println(_)) }
16/12/20 16:21:11 INFO DAGScheduler: Job 0 finished: collect at ShellTest.scala:23, took 1.476519 s
(1,ABA)
(2,AD)
(3,E)
16/12/20 16:21:11 INFO SparkContext: Invoking stop() from shutdown hook
def reduceByKeyLocally(func: (V, V) => V): Map[K, V]
該函數將RDD[K,V]中每一個K對應的V值根據映射函數來運算,運算結果映射到一個Map[K,V]中,而不是RDD[K,V]。
def main(args: Array[String]): Unit = { //默認分區12個 val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("test").set("spark.default.parallelism", "12")) val rdd1 = sc.makeRDD(Array((1, "A"), (1, "B"), (2, "A"), (2, "D"), (3, "E"), (1, "A"))) rdd1.reduceByKeyLocally(_+_).foreach(println(_)) }
(1,ABA) (2,AD) (3,E)