Spark SQL支持兩種不一樣的方法將現有的RDDs轉換爲數據集。java
上一篇文章經過實例講解經過映射方式裝換,這裏實例講解第二種方式;
sql
第二種方法:經過編程接口,該接口容許您構造一個模式,而後將其應用於現有的RDD。雖然此方法更詳細,但它容許您在列及其類型直到運行時才知道時構造數據集。apache
數據準備studentData.txt編程
1001,20,zhangsan1002,17,lisi1003,24,wangwu1004,16,zhaogang
代碼實例:
api
package com.unicom.ljs.spark220.study;
import org.apache.spark.SparkConf;import org.apache.spark.SparkContext;import org.apache.spark.api.java.JavaRDD;import org.apache.spark.api.java.JavaSparkContext;import org.apache.spark.api.java.function.Function;import org.apache.spark.rdd.RDD;import org.apache.spark.sql.*;import org.apache.spark.sql.types.*;
import java.util.ArrayList;import java.util.List;
/** * @author: Created By lujisen * @company ChinaUnicom Software JiNan * @date: 2020-01-21 13:42 * @version: v1.0 * @description: com.unicom.ljs.spark220.study */public class RDD2DataFrameProgramatically { public static void main(String[] args) { SparkConf sparkConf = new SparkConf().setMaster("local[*]").setAppName("RDD2DataFrameProgramatically"); JavaSparkContext sc = new JavaSparkContext(sparkConf); SQLContext sqlContext = new SQLContext(sc);
JavaRDD<String> lineRDD =sc.textFile("C:\\Users\\Administrator\\Desktop\\studentData.txt"); JavaRDD<Row> rowJavaRDD = lineRDD.map(new Function<String, Row>() { @Override public Row call(String line) throws Exception { String[] splitLine = line.split(","); return RowFactory.create(Integer.valueOf(splitLine[0]) ,Integer.valueOf(splitLine[1]) ,splitLine[2]); } }); List<StructField> structFields=new ArrayList<StructField>(); /*StructField structField1=new StructField("id", DataTypes.IntegerType,true);*/ structFields.add(DataTypes.createStructField("id",DataTypes.IntegerType,true)); structFields.add(DataTypes.createStructField("age",DataTypes.IntegerType,true)); structFields.add(DataTypes.createStructField("name",DataTypes.StringType,true));
StructType structType=DataTypes.createStructType(structFields);
Dataset<Row> dataFrame = sqlContext.createDataFrame(rowJavaRDD, structType);
dataFrame.registerTempTable("studentInfo");
Dataset<Row> resultDataSet = sqlContext.sql("select * from studentInfo where age > 17");
List<Row> collect = resultDataSet.javaRDD().collect(); for(Row row: collect){ System.out.println(row); } sc.close(); }}
pom.xml關鍵依賴:app
2.2.02.11.8
<dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-sql_2.11</artifactId> <version>${spark.version}</version></dependency><dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-core_2.11</artifactId> <version>${spark.version}</version></dependency>