spark--環境搭建--6.Spark1.3.0集羣搭建

1. spark安裝java

$ cd /usr/localshell

$ tar -zxvf spark-1.3.0-bin-hadoop2.4.tgzapache

$ mv spark-1.3.0-bin-hadoop2.4 spark瀏覽器

$ vi ~/.bashrcbash

export SPARK_HOME=/usr/local/spark/
export PATH=$PATH:$SPARK_HOME/bin
export CLASSPATH=.:$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib

$ source ~/.bashrcoop

$ cd spark/conf/spa

$ mv spark-env.sh.template spark-env.shscala

$ vi spark-env.shblog

export JAVA_HOME=/usr/java/latest/
export SCALA_HOME=/usr/local/scala/
# spark集羣的master節點ip
export SPARK_MASTER_IP=192.168.2.100
# 指定worker節點可以最大分配給Excutors的內存大小
export SPARK_WORKER_MEMORY=1g
# hadoop集羣的配置文件目錄
export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop

$ mv slaves.template slavesip

$ vi slaves

localhost 改成 
spark2
spark3

$ cd /usr/local

$ scp -r spark root@spark2:/usr/local/

$ scp -r spark root@spark3:/usr/local/

$ scp ~/.bashrc root@spark2:~/.bashrc

$ scp ~/.bashrc root@spark3:~/.bashrc

# 分別在2和3執行

$ source ~/.bashrc

2. 啓動spark

$ cd spark/sbin/

$ ./start-all.sh

$ jps

# 瀏覽器打開  http://spark1:8080

$ cd ../../

$ spark-shell

> exit

相關文章
相關標籤/搜索