CentOS7.2java
hostname | ip | role |
hadoop001 | 192.168.252.164 | hdfs:namenode,datanode,sceondnamenodenode yarn:resourcemanager,nodemanagerlinux |
hadoop002 | 192.168.252.165 | hdfs:datanodebash yarn:nodemanager網絡 |
hadoop003 | 192.168.252.166 | hdfs:datanodessh yarn:nodemanageroop |
jdk-7u55-linux-x64.tar.gzspa
hadoop-2.6.4.tar.gz操作系統
systemctl stop firewalld chkconfig firewalld off
vi /etc/selinux/config
SELINUX=disabledrest
vi /etc/sysconfig/network-scripts/ifcfg-eno16777736
TYPE=Ethernet BOOTPROTO=static NAME=eno16777736 DEVICE=eno16777736 ONBOOT=yes IPADDR=192.168.252.164 NETMASK=255.255.255.0 GATEWAY=192.168.252.1
systenctl restart network
vi /etc/sysconfig/network
HOSTNAME=hadoop001
vi /etc/hosts
192.168.252.164 hadoop001 192.168.252.165 hadoop002 192.168.252.166 hadoop003
生成密鑰文件(~/.ssh目錄下生成id_rsa和id_rsa.pub)
ssh-keygen -t rsa
複製公鑰 (~/.ssh目錄下)
cp id_rsa.pub authorized_keys
每一個節點執行完畢以後,合併各個節點的authorized_keys,並用合併後的文件覆蓋原有authorized_keys。
tar zxvf jdk-7u55-linux-x64.tar.gz
vi ~/.bashrc
export JAVA_HOME=/usr/jdk1.7.0_55 export HADOOP_HOME=/opt/hadoop export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source ~/.bashrc
2.節點一搭建
tar zxvf hadoop-2.6.4.tar.gz mv hadoop-2.6.4.tar.gz hadoop
vi /etc/profile
export JAVA_HOME=/usr/jdk1.7.0_55 export HADOOP_HOME=/opt/hadoop export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile
core-site.xml
<property> <name>fs.default.name</name> <value>hdfs://hadoop001:9000</value> </property>
hdfs-site.xml
<property> <name>dfs.name.dir</name> <value>/usr/local/data/namenode</value> </property> <property> <name>dfs.data.dir</name> <value>/usr/local/data/datanode</value> </property> <property> <name>dfs.tmp.dir</name> <value>/usr/local/data/tmp</value> </property> <property> <name>dfs.replication</name> <value>3</value> </property>
mapred-site.xml
<property> <name>mapreduce.framework.name</name> <value>yarn</value> </property>
yarn-site.xml
<property> <name>yarn.resourcemanager.hostname</name> <value>hadoop001</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property>
Slaves
hadoop001 hadoop002 Hadoop003
scp -r hadoop 192.168.252.165:/opt scp -r hadoop 192.168.252.166:/opt
scp -r profile 192.168.252.165:/etc scp -r profile 192.168.252.166:/etc
mkdir /usr/local/data
hdfs namenode -format
start-dfs.sh
jps命令或50070端口
hadoop001:namenode\datanode\sceondnamenode
hadoop002:datanode
hadoop003:datanode
start-yarn.sh
jps,8088端口
hadoop001:resourcemanager\nodemanager
hadoop002:nodemanager
hadoop003:nodemanager