vim /etc/sysconfig/networkjava
vim /etc/udev/rules.d/70-persistent-net.rulesnode
vim /etc/sysconfig/network-scripts/ifcfg-eth0linux
DEVICE=eth0web
TYPE=Ethernetapache
ONBOOT=yesvim
BOOTPROTO=staticbash
NAME="eth0"服務器
IPADDR=192.168.1.101app
PREFIX=24ssh
GATEWAY=192.168.1.2
DNS1=192.168.1.2
vim /etc/hosts
192.168.1.101 hadoop101
192.168.1.102 hadoop102
192.168.1.103 hadoop103
192.168.1.104 hadoop104
192.168.1.105 hadoop105
192.168.1.106 hadoop106
192.168.1.107 hadoop107
192.168.1.108 hadoop108
service iptables stop
chkconfig iptables off
useradd test
password test
sudo vim /etc/suoders
[test@hadoop102 opt]$ sudo mkdir /opt/software
[test@hadoop102 opt]$ sudo mkdir /opt/module
[test@hadoop102 opt]$ sduo chown test:test /opt/software /opt/software
cd ~
sudo mkdir bin/
cd bin/
vim xsync
#!/bin/bash
#1.獲取輸入參數個數,若是沒有參數,直接退出
pcount=$#
if(pcount=$#);then
echo no args;
exit;
fi
#2.獲取文件名稱
p1=$1
fname=`basename $p1`
echo fname=$fname
#3 獲取上級目錄到絕對路徑
pdir=`cd -P $(dirname $p1); pwd`
echo pdir=$pdir
#4 獲取當前用戶名稱
user=`whoami`
#5 循環
for(host=103;host<105;host++);do
echo -------------- hadoop$
host ------------------
rsync -av $pdir/$fname $user@hadoop$host:$pdir
done
chmod +x xsync
sudo cp xsync /bin
sudo xsync /bin/xsync
(1)查詢是否安裝Java軟件
[test@hadoop102 ~]$ rpm -qa | grep java
(2)若是安裝的版本低於1.7,卸載該JDK
[test@hadoop102 ~]$ sudo -rpm -e 軟件包
[test@hadoop102 ~]$sudo rpm -qa | grep java | xargs sudo rpm -e --nodeps
(3)查看JDK安裝路徑
[test@hadoop102 ~]$which java
[test@hadoop102 opt]$ tar -zxvf jdk-8u144-linux-x64.tar.gz -C /opt/module/
[test@hadoop102 opt]$sudo vim /etc/profile.d/env.sh
#JAVA_HOME
export JAVA_HOME=/opt/module/jdk1.8.0_144
export PATH=$PATH:$JAVA_HOME/bin
[test@hadoop102 opt]$source /etc/profile.d/env.sh
java -version
[test@hadoop102 opt]$tar -zxvf hadoop-2.7.2.tar.gz -C /opt/module/
[test@hadoop102 opt]$sudo vim /etc/profile.d/env.sh
#HADOOP_HOME
export $HADOOP_HOME=/opt/module/hadoop-2.7.2
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
[test@hadoop102 opt]$soure /etc/profile.d/env.sh
[test@hadoop102 opt]$hadoop version
[test@hadoop102 .ssh]$hssh-keygen -t rsa 三次回車
[test@hadoop102 .ssh]$ssh-copy-id hadoop102
[test@hadoop102 .ssh]$ssh-copy-id hadoop103
[test@hadoop102 .ssh]$ssh-copy-id hadoop14
測試 [test@hadoop102 .ssh]$ssh hadoop103
[test@hadoop102 .ssh]$exit
xsync /home/test/.ssh
cd /opt/module/hhadoop-2.7.2/etc/hadoop
vim hadoop-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
vim yarn-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
vim marpred-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
vim slaves(不能有任何空格)
hadoop102
hadoop103
hadoop104
<!-- 指定HDFS中NameNode的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop102:9000</value>
</property>
<!-- 指定Hadoop運行時產生文件的存儲目錄 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-2.7.2/data/tmp</value>
</property>
<!-- 數據的副本數量 -->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!-- 指定Hadoop輔助名稱節點主機配置 -->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop104:50090</value>
</property>
<!-- Site specific YARN configuration properties -->
<!-- Reducer獲取數據的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- 指定YARN的ResourceManager的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop103</value>
</property>
<!-- 日誌彙集功能使能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- 日誌保留時間設置7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
配置:
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- 歷史服務器端地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop104:10020</value>
</property>
<!-- 歷史服務器web端地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop104:19888</value>
</property>
啓動:
啓動歷史服務器:mr-jobhistory-daemon.sh start historyserver
xsync /opt/module/hadoop-2.7.2/etc
[test@hadoop102 hadoop-2.7.2]$bin/hdfs namenode -format
[test@hadoop102 hadoop-2.7.2]$bin/start-dfs.sh
[test@hadoop103 hadoop-2.7.2]$bin/start-yarn.sh
rm -fr data logs
<property>
<name>io.compression.codecs</name>
<value>
org.apache.hadoop.io.compress.GzipCodec,
org.apache.hadoop.io.compress.DefaultCodec,
org.apache.hadoop.io.compress.BZip2Codec,
org.apache.hadoop.io.compress.SnappyCodec,
com.hadoop.compression.lzo.LzoCodec,
com.hadoop.compression.lzo.LzopCodec
</value>
</property>
<property>
<name>io.compression.codec.lzo.class</name>
<value>com.hadoop.compression.lzo.LzoCodec</value>
</property>
(1)建立並格式化新分區
fdisk /dev/sda
m #進入幫助引導模式
n #新增分區
p #指定新分區爲基本分區
一路回車 #但要記住分區號
w #保存並執行剛纔的分區操做
reboot #重啓
==============================
fdisk -l
==============================
mkfs.xfs /dev/sdax,x爲分區號
(2)建立盤符並掛載盤符
mdkir /newdisk
臨時掛載 mount /dev/sdax /newdisk
永久掛載 vim /etc/fstab
/dev/sdax /newdisk ext4 defaults 0 0
(3)賦予權限
chown -R test:test /newdisk
vim /opt/module/hadoop-2.7.2/etc/hadoop/hdfs-site.xml
<property>
<name>dfs.datanode.data.dir</name>
<value>${hadoop.tmp.dir}/dfs/data, /newdisk</value>
</property>