export JAVA_HOME=/usr/local/jdk1.8.0_73 export JRE_HOME=${JAVA_HOME}/jre export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib export PATH=$PATH:${JAVA_HOME}/bin
不要忘了source /etc/profile哦!
b).建立hadoop用戶html
# 建立hadoop用戶 useradd -m hadoop #設置hadoop用戶密碼 passwd hadoop
export HADOOP_HOME=/home/hadoop/hadoop-2.7.2 export PATH=$PATH:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin
b).進入hadoop_home/etc/hadoop/目錄,修改以下文件內容
1.修改hadoop-env.sh文件,配置JAVA_HOMEnode
export JAVA_HOME=/usr/local/jdk1.8.0_73
2.修改core-site.xml文件添加以下內容web
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://hadoop-master:9000</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/home/hadoop/hadoop-2.7.2/tmp</value> </property> </configuration>
3.修改hdfs-site.xml文件,添加以下內容apache
<configuration> <property> <name>dfs.namenode.name.dir</name> <value>file:/home/hadoop/hadoop-2.7.2/dfs/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/home/hadoop/hadoop-2.7.2/dfs/data</value> </property> <property> <name>dfs.replication</name> <value>2</value> </property> <property> <name>dfs.namenode.secondary.http-address</name> <value>hadoop-master:9001</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> </configuration>
4.修改mapred-site.xml文件,添加以下內容ubuntu
<configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>hadoop-master:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>hadoop-master:19888</value> </property> </configuration>
5.修改yarn-site.xml文件,添加以下內容bash
<configuration> <property> <name>yarn.resourcemanager.address</name> <value>hadoop-master:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>hadoop-master:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>hadoop-master:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>hadoop-master:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>hadoop-master:8088</value> </property> <property> <name>yarn.resourcemanager.hostname</name> <value>hadoop-master</value> </property> <property> <name>yarn.resourcemanager.scheduler.class</name> <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value> </property> <property> <name>yarn.scheduler.maximum-allocation-mb</name> <value>1536</value> </property> <!-- config node manager --> <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>1536</value> </property> <property> <name>yarn.nodemanager.resource.cpu-vcores</name> <value>8</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> </configuration>
6.修改slaves文件,將全部從節點添加到該文件中,如:服務器
#這裏的節點名次須要和/etc/hosts文件中配置映射的名稱一致 hadoop-slave1 hadoop-slave2
修改主機名爲hadoop-master(/etc/hostname)
注意:每個從節點都要修改相應的hostname,如hadoop-slave1,hadoop-slave2,.....oracle
在主節點上格式化hdfs,hadoop namenode -formatapp
修改hosts文件(每個主從節點都要配置),將主從節點都配置進去,註釋原有的localhost,以下:ssh
#127.0.0.1 localhost #127.0.1.1 ubuntu #主節點 192.168.100.180 hadoop-master #從節點1 192.168.100.181 hadoop-slave1 #從節點2 192.168.100.182 hadoop-slave2
將配置ssh免密碼登,這裏不贅述,參考:http://jingyan.baidu.com/article/f3ad7d0fe7bd0d09c2345b75.html
克隆當前配置的服務器系統到多個本身點或在多個從節點上重複1~5步驟。
注意:1.克隆系統後記得修改/etc/hostname文件,/etc/hosts文件主從節點保持一致。
2.克隆後須要修改子節點的ip地址,建議將主從節點的ip設置爲靜態IP。
經過以上配置後,基本完成hadoop的環境配置工做,可經過start-all.sh啓動hadoop
a).經過master的主機IP:8088訪問hadoop的web管理界面,如圖
b).經過master的主機IP:50070訪問hdfs的web管理界面