GroupByKey
package sparkcoreimport org.apache.spark.rdd.RDDimport org.apache.spark.{SparkConf, SparkContext}object Demo07GroupByKey { def main(args: Array[String]): Unit = { val conf: SparkConf = new SparkConf().setAppName("GroupByKey").setMaster("local") val sc = new SparkContext(conf) val linesRDD: RDD[String] = sc.textFile("data/words.txt") //flatmap将数据进行切分,一行切成多行 val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(",")) // wordsRDD.foreach(println) //将切好的一个个单词进行map,转换成kv格式 val kvRDD: RDD[(String, Int)] = wordsRDD.map(word => (word, 1)) /** * groupByKey:通过key进行分组,将value放在迭代器中 * groupBy:指定一个分组的列 * * 都会产生shuffle */ val groupByKeyRDD: RDD[(String, Iterable[Int])] = kvRDD.groupByKey() val countRDD: RDD[(String, Int)] =groupByKeyRDD.map { case (word: String, values: Iterable[Int]) => (word, values.sum) } countRDD.foreach(println) val groupByRDD: RDD[(String, Iterable[(String, Int)])] = kvRDD.groupBy(kv => kv._1) groupByRDD.foreach(println) }}
这篇关于GroupByKey的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持
还没有评论,来说两句吧...