雖然咱們能夠經過上面顯示的KuduContext執行大量操做,但咱們還能夠直接從默認數據源自己調用讀/寫API。sql
要設置讀取,咱們須要爲Kudu表指定選項,命名咱們要讀取的表以及爲表提供服務的Kudu集羣的Kudu主服務器列表。apache
import org.apache.kudu.spark.kudu._ import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.sql.SparkSession /** * Created by angel; */ object DataFrame_read { def main(args: Array[String]): Unit = { val sparkConf = new SparkConf().setAppName("AcctfileProcess") //設置Master_IP並設置spark參數 .setMaster("local") .set("spark.worker.timeout", "500") .set("spark.cores.max", "10") .set("spark.rpc.askTimeout", "600s") .set("spark.network.timeout", "600s") .set("spark.task.maxFailures", "1") .set("spark.speculationfalse", "false") .set("spark.driver.allowMultipleContexts", "true") .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") val sparkContext = SparkContext.getOrCreate(sparkConf) val sqlContext = SparkSession.builder().config(sparkConf).getOrCreate().sqlContext //TODO 1:定義表名 val kuduTableName = "spark_kudu_tbl" val kuduMasters = "hadoop01:7051,hadoop02:7051,hadoop03:7051" //使用spark建立kudu表 val kuduContext = new KuduContext(kuduTableName, sqlContext.sparkContext) //TODO 2:配置kudu參數 val kuduOptions: Map[String, String] = Map( "kudu.table" -> kuduTableName, "kudu.master" -> kuduMasters) //TODO 3:執行讀取操做 val customerReadDF = sqlContext.read.options(kuduOptions).kudu val filterData = customerReadDF.select("name" ,"age", "city").filter("age<30") //TODO 4:打印 filterData.show() } }