JAVA_HOME=/home/hadoop/jdk1.6.0_38 PATH=$JAVA_HOME/bin:$PATH CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/jre/lib/dt.jar:$JAVA_HOME/jre/lib/tools.jar export JAVA_HOME PATH CLASSPATH |
source /etc/profile |
tar -zxvf scala-2.10.4.tgz |
export SCALA__HOME=/home/hadoop/scala-2.10.4/scala-2.10.4 export PATH=${SCALA_HOME}/bin:$PATH |
source /etc/profile |
ssh-keygen-trsa |
scp id_rsa.pub root@172.20.14.144:/home /*可以使用pssh對所有節點分發*/ |
cat /home/id_rsa.pub >> /root/.ssh/authorized_keys /*可以使用pssh對所有節點分發*/ |
wget http://www.trieuvan.com/apache/hadoop/common/hadoop-2.2.0/hadoop-2.2.0.tar.gz |
tar -vxzf hadoop-2.2.0.tar.gz -C /usr/local cd /usr/local mv hadoop-2.2.0 hadoop chown -R hduser:hadoop hadoop |
vi /etc/profile |
export JAVA_HOME=/usr/lib/jvm/jdk/ export HADOOP_INSTALL=/usr/local/hadoop export PATH=$PATH:$HADOOP_INSTALL/bin export PATH=$PATH:$HADOOP_INSTALL/sbin export HADOOP_MAPRED_HOME=$HADOOP_INSTALL export HADOOP_COMMON_HOME=$HADOOP_INSTALL export HADOOP_HDFS_HOME=$HADOOP_INSTALL export YARN_HOME=$HADOOP_INSTALL |
export JAVA_HOME=/usr/lib/jvm/jdk/ |
<configuration> /*這裏的值指的是默認的HDFS路徑*/ <property> <name>fs.defaultFS</name> <value>hdfs://Master:9000</value> </property> /*緩衝區大小: io.file.buffer.size默認是4KB*/ <property> <name>io.file.buffer.size</name> <value>131072</value> </property> /*臨時文件夾路徑*/ <property> <name>hadoop.tmp.dir</name> <value>file:/home/tmp</value> <description> Abase for other temporary directories </description> </property> <property> <name>hadoop.proxyuser.hduser.hosts</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.hduser.groups</name> <value>*</value> </property> </configuration> |
<configuration> <property> <name>yarn.nodemanager.aux-services</value> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> /*resourceManager的地址*/ <property> <name>yarn.resourcemanager.address</name> <value>Master:8030</value> </property> /*調度器的端口*/ <property> <name>yarn.resourcemanager.scheduler.address</name> <value>Master1:8030</value> </property> /*resource-tracker端口*/ <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>Master:8031</value> /*resourcemanager管理器端口*/ <property> <name>yarn.resourcemanager.admin.address</name> <value>Master:8033</value> </property> /*ResourceManager 的Web端口、監控 job 的資源調度*/ <property> <name>yarn.resourcemanager.webapp.address</name> <value>Master:8088</value> </property> </configuration> |
<configuration> /*hadoop對map-reduce運行礦建一共提供了3種實現,在mapred-site.xml中經過「mapreduce.framework.name」這個屬性來設置爲"classic"."yarn"或者「local」*/ <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> /*MapReduce JobHistory Server地址*/ <property> <name>mapreduce.jobhistory.address</name> <value>Master:10020</value> </property> /*MapReduce JobHistory Server web UI 地址*/ <property> <name>mapreduce.jobhistory.webapp.address</name> <value>Master:19888</value> </property> </configuration> |
mkdir /hdfs/namenode mkdir /hdfs/datanode |
<configuration> /*配置主節點名和端口*/ <property> <name>dfs.namenode.secondary.http-address</name> <value>Master:9001</value> </property> /*配置從節點和端口號*/ <property> <name>dfs.namenode.name.dir</name> <value>file:/hdfs/namenode</value> </property> /*配置datanode的數據存儲目錄*/ <property> <name>dfs.datanode.data.dir</name> <value>file:/hdfs/datanode</value> </property> /*配置副本數*/ <property> <name>dfs.replication</name> <value>3</value> </property> /*將dfs.webhdfs.enabled屬性設置爲true,不然就不能使用webhdfs的LISTSTATUS,LISTFILESTATUS等須要列出文件,文件夾狀態的命令,由於這些信息都是由namenode保存的*/ <proeprty> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> </configuration> |
Master /*Master爲主節點主機名*/ |
/*Slave爲從節點主機名*/ Slave1 Slave2 Slave3 Slave4 Slave5 |
./pssh -h hosts.txt -r /hadoop / |
./sbin/start-all.sh |
jps DataNode ResourceManager Jps NodeManager NameNode SecondaryNameNode |
export SCALA_HOME=/paht/to/scala-2.10.4 export SPARK_WORKER_MEMORY=7g export SPARK_MASTER_IP=172.16.0.140 export MASTER=spark://172.16.0.140:7077 |
Slave1 Slave2 Slave3 Slave4 Slave5 |