其主要實現思想就是RDD,把全部計算的數據保存在分佈式的內存中。迭代計算一般狀況下都是對同一個數據集作反覆的迭代計算,數據在內存中將大大提高IO操做。這也是Spark涉及的核心:內存計算java
由於Spark是用scala語言實現的,Spark和scala可以緊密的集成,因此Spark能夠完美的運用scala的解釋器,使得其中的scala能夠向操做本地集合對象同樣輕鬆操做分佈式數據集node
經過並行化集合建立RDD(用於測試)c++
val list = List("java c++ java","java java java c++") val rdd = sc.parallelize(list)
經過加載hdfs中的數據建立RDD(生產環境)sql
val rdd = sc.textFile("hdfs://uplooking01:8020/sparktest/")
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.uplooking.bigdata</groupId> <artifactId>2018-11-08-spark</artifactId> <version>1.0-SNAPSHOT</version> <properties> <maven.compiler.source>1.8</maven.compiler.source> <maven.compiler.target>1.8</maven.compiler.target> <scala.version>2.11.8</scala.version> <spark.version>2.2.0</spark.version> <hadoop.version>2.7.5</hadoop.version> </properties> <dependencies> <!-- 導入scala的依賴 --> <dependency> <groupId>org.scala-lang</groupId> <artifactId>scala-library</artifactId> <version>${scala.version}</version> </dependency> <!-- 導入spark的依賴 --> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-core_2.11</artifactId> <version>${spark.version}</version> </dependency> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-sql_2.11</artifactId> <version>${spark.version}</version> </dependency> <!-- 指定hadoop-client API的版本 --> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>${hadoop.version}</version> </dependency> </dependencies> <build> <plugins> <!--編譯Scala--> <plugin> <groupId>net.alchim31.maven</groupId> <artifactId>scala-maven-plugin</artifactId> <version>3.2.2</version> <executions> <execution> <id>scala-compile-first</id> <phase>process-resources</phase> <goals> <goal>add-source</goal> <goal>compile</goal> </goals> </execution> <execution> <id>scala-test-compile</id> <phase>process-test-resources</phase> <goals> <goal>testCompile</goal> </goals> </execution> </executions> </plugin> <!--編譯Java--> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <executions> <execution> <phase>compile</phase> <goals> <goal>compile</goal> </goals> </execution> </executions> </plugin> <!-- 打jar插件 --> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-shade-plugin</artifactId> <version>2.4.3</version> <configuration> <createDependencyReducedPom>false</createDependencyReducedPom> </configuration> <executions> <execution> <phase>package</phase> <goals> <goal>shade</goal> </goals> <configuration> <filters> <filter> <artifact>*:*</artifact> <excludes> <exclude>META-INF/*.SF</exclude> <exclude>META-INF/*.DSA</exclude> <exclude>META-INF/*.RSA</exclude> </excludes> </filter> </filters> </configuration> </execution> </executions> </plugin> </plugins> </build> </project>
val conf = new SparkConf() conf.setAppName("Ops1") val sc = new SparkContext(conf) val rdd1: RDD[String] = sc.parallelize(List("java c+ java", "java java c++")) val ret = rdd1.collect().toBuffer println(ret)
spark-submit --master spark://uplooking01:7077 --class com.uplooking.bigdata.spark01.Ops1 original-spark-1.0-SNAPSHOT.jar
import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import scala.collection.mutable object Ops1 { def main(args: Array[String]): Unit = { val conf = new SparkConf() conf.setAppName("Ops1") conf.setMaster("local[4]") val sc = new SparkContext(conf) //通常不會指定最小分區數 val rdd1 = sc.textFile("hdfs://uplooking01:8020/sparktest/") val rdd2: RDD[String] = rdd1.flatMap(line => line.split(" ")) val rdd3: RDD[(String, Int)] = rdd2.map(word => (word, 1)) val rdd4: RDD[(String, Int)] = rdd3.reduceByKey(_ + _) val ret: mutable.Buffer[(String, Int)] = rdd4.collect().toBuffer println(ret) println(rdd1.partitions.length) } }
並行化的方式指定分區數(通常會指定分區數)apache
手動指定分區數maven
val rdd = sc.parallelize(List("java c+ java", "java java c++"), 2)
textFile的方式指定分區數分佈式