## 解壓 [yyl@node1 program]$ tar -zxf hadoop-2.5.2.tar.gz ## 建立文件夾 [yyl@node1 program]$ mkdir hadoop-2.5.2/name [yyl@node1 program]$ mkdir hadoop-2.5.2/data [yyl@node1 program]$ mkdir hadoop-2.5.2/tmp ## 配置hadoop-env.sh [yyl@node1 program]$ cd hadoop-2.5.2/etc/hadoop/ [yyl@node1 hadoop]$ vim hadoop-env.sh export JAVA_HOME=/usr/lib/java/jdk1.7.0_80 ## 配置yarn-env.sh [yyl@node1 hadoop]$ vim yarn-env.sh export JAVA_HOME=/usr/lib/java/jdk1.7.0_80 ## 配置slaves [yyl@node1 hadoop]$ vim slaves node3.zhch node4.zhch node5.zhch ## 配置core-site.xml [yyl@node1 program]$ cd hadoop-2.5.2/etc/hadoop/ [yyl@node1 hadoop]$ vim core-site.xml <configuration> <property> <name>fs.defaultFS</name> <value>hdfs://node1.zhch:9000</value> </property> <property> <name>io.file.buffer.size</name> <value>131072</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/home/yyl/program/hadoop-2.5.2/tmp</value> </property> <property> <name>hadoop.proxyuser.hduser.hosts</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.hduser.groups</name> <value>*</value> </property> </configuration> ## 配置hdfs-site.xml [yyl@node1 hadoop]$ vim hdfs-site.xml <configuration> <property> <name>dfs.namenode.name.dir</name> <value>file:/home/yyl/program/hadoop-2.5.2/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/home/yyl/program/hadoop-2.5.2/data</value> </property> <property> <name>dfs.replication</name> <value>1</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> <property> <name>dfs.permissions</name> <value>false</value> </property> <property> <name>dfs.nameservices</name> <value>ns1,ns2</value> </property> <property> <name>dfs.namenode.rpc-address.ns1</name> <value>node1.zhch:9000</value> </property> <property> <name>dfs.namenode.http-address.ns1</name> <value>node1.zhch:50070</value> </property> <property> <name>dfs.namenode.rpc-address.ns2</name> <value>node2.zhch:9000</value> </property> <property> <name>dfs.namenode.http-address.ns2</name> <value>node2.zhch:50070</value> </property> </configuration> ##配置 mapred-site.xml [yyl@node1 hadoop]$ cp mapred-site.xml.template mapred-site.xml [yyl@node1 hadoop]$ vim mapred-site.xml <configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>node1.zhch:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>node1.zhch:19888</value> </property> </configuration> ##配置 yarn-site.xml [yyl@node1 hadoop]$ vim yarn-site.xml <configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>node1.zhch:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>node1.zhch:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>node1.zhch:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>node1.zhch:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>node1.zhch:8088</value> </property> </configuration> ## 分發到各個節點 [yyl@node1 hadoop]$ cd /home/yyl/program/ [yyl@node1 program]$ scp -rp hadoop-2.5.2 yyl@node2.zhch:/home/yyl/program/ [yyl@node1 program]$ scp -rp hadoop-2.5.2 yyl@node3.zhch:/home/yyl/program/ [yyl@node1 program]$ scp -rp hadoop-2.5.2 yyl@node4.zhch:/home/yyl/program/ [yyl@node1 program]$ scp -rp hadoop-2.5.2 yyl@node5.zhch:/home/yyl/program/ ## 在各個節點上設置hadoop環境變量 [yyl@node1 ~]$ vim .bash_profile export HADOOP_PREFIX=/home/yyl/program/hadoop-2.5.2 export HADOOP_COMMON_HOME=$HADOOP_PREFIX export HADOOP_HDFS_HOME=$HADOOP_PREFIX export HADOOP_MAPRED_HOME=$HADOOP_PREFIX export HADOOP_YARN_HOME=$HADOOP_PREFIX export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin
2、NameNode java
## 在namenode1上執行格式化 [yyl@node1 ~]$ hdfs namenode -format -clusterId c1 ## 在namenode2上執行格式化 [yyl@node2 ~]$ hdfs namenode -format -clusterId c1 ## 在namenode1啓動namenode [yyl@node1 ~]$ hadoop-daemon.sh start namenode starting namenode, logging to /home/yyl/program/hadoop-2.5.2/logs/hadoop-yyl-namenode-node1.zhch.out [yyl@node1 ~]$ jps 1177 NameNode 1240 Jps ## 在namenode2啓動namenode [yyl@node2 ~]$ hadoop-daemon.sh start namenode starting namenode, logging to /home/yyl/program/hadoop-2.5.2/logs/hadoop-yyl-namenode-node2.zhch.out [yyl@node2 ~]$ jps 1508 Jps 1445 NameNode
3、HDFS聯邦檢查 node
http://node1.zhch:50070/[yyl@node1 ~]$ hadoop-daemons.sh start datanode node4.zhch: starting datanode, logging to /home/yyl/program/hadoop-2.5.2/logs/hadoop-yyl-datanode-node4.zhch.out node5.zhch: starting datanode, logging to /home/yyl/program/hadoop-2.5.2/logs/hadoop-yyl-datanode-node5.zhch.out node3.zhch: starting datanode, logging to /home/yyl/program/hadoop-2.5.2/logs/hadoop-yyl-datanode-node3.zhch.out [yyl@node1 ~]$ start-yarn.sh starting yarn daemons starting resourcemanager, logging to /home/yyl/program/hadoop-2.5.2/logs/yarn-yyl-resourcemanager-node1.zhch.out node5.zhch: starting nodemanager, logging to /home/yyl/program/hadoop-2.5.2/logs/yarn-yyl-nodemanager-node5.zhch.out node3.zhch: starting nodemanager, logging to /home/yyl/program/hadoop-2.5.2/logs/yarn-yyl-nodemanager-node3.zhch.out node4.zhch: starting nodemanager, logging to /home/yyl/program/hadoop-2.5.2/logs/yarn-yyl-nodemanager-node4.zhch.out [yyl@node1 ~]$ jps 1402 Jps 1177 NameNode 1333 ResourceManager [yyl@node2 ~]$ jps 1445 NameNode 1539 Jps [yyl@node3 ~]$ jps 1214 NodeManager 1166 DataNode 1256 Jps下次啓動不須要重複上面的步驟,能夠直接使用下面的命令啓動集羣: sh $HADOOP_HOME/sbin/start-dfs.sh sh $HADOOP_HOME/sbin/start-yarn.sh