yum install -y ntp #安裝時間服務ntpdate us.pool.ntp.org #同步時間
vi /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0IPADDR=192.168.8.101NETMASK=255.255.255.0GATEWAY=192.168.8.2HWADDR=00:0C:29:56:63:A1TYPE=EthernetUUID=ecb7f947-8a93-488c-a118-ffb011421cacONBOOT=yesNM_CONTROLLED=yesBOOTPROTO=none
service network restart
ifconfig eth0
eth0 Link encap:Ethernet HWaddr 00:0C:29:6C:20:2B inet addr:192.168.8.101 Bcast:192.168.8.255 Mask:255.255.255.0 inet6 addr: fe80::20c:29ff:fe6c:202b/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:777 errors:0 dropped:0 overruns:0 frame:0 TX packets:316 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:1000 RX bytes:70611 (68.9 KiB) TX bytes:49955 (48.7 KiB)
rm -f /etc/udev/rules.d/70-persistent-net.rule
vim /etc/sysconfig/network-scripts/ifcfg-eth0
第三步:重啓服務器php
reboot
vi /etc/sysconfig/network
NETWORKING=yesHOSTNAME=hadoop01NETWORKING_IPV6=no
vi /etc/hosts
127.0.0.1 localhost192.168.8.101 hadoop01 192.168.8.102 hadoop02 192.168.8.103 hadoop03 192.168.8.104 hadoop04 192.168.8.105 hadoop05 192.168.8.106 hadoop06
關閉ipv6html
一、查看系統是否開啓ipv6
java
a)經過網卡屬性查看node
命令:ifconfig
linux
註釋:有 「inet6 addr:。。。。。。。「 的表示開啓了ipv6功能
apache
b)經過內核模塊加載信息查看
vim
命令:lsmod | grep ipv6
瀏覽器
二、ipv6關閉方法安全
在/etc/modprobe.d/dist.conf結尾添加
服務器
alias net-pf-10 offalias ipv6 off
可用vi等編輯器,也能夠經過命令:
cat <<EOF>>/etc/modprobe.d/dist.conf
alias net-pf-10 off
alias ipv6 off
EOF
關閉防火牆
chkconfig iptables stopchkconfig iptables off
改好後重啓服務器:
reboot
vi /etc/proflie
export JAVA_HOME=/soft/jdk1.7.0_80/export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
source /etc/profile
cd /root/.ssh ssh-keygen -t rsa #4個回車
id_rsa id_rsa.pub
ssh-copy-id -i hadoop01 ssh-copy-id -i hadoop02 ssh-copy-id -i hadoop03 ssh-copy-id -i hadoop04 ssh-copy-id -i hadoop05 ssh-copy-id -i hadoop06
[root@hadoop01 .ssh]# ssh hadoop05Last login: Tue Nov 10 17:43:41 2015 from 192.168.8.1[root@hadoop05 ~]#
ssh-keygen -t rsa #4個回車
ssh-copy-id -i hadoop01
tar -zxvf zookeeper-3.4.6.tar.gz -C /root/soft
mv zoo.sample.cfg zoo.cfg
vi zoo.cfg
[root@hadoop04 conf]# vi zoo.cfg# The number of milliseconds of each ticktickTime=2000# The number of ticks that the initial# synchronization phase can takeinitLimit=10# The number of ticks that can pass between# sending a request and getting an acknowledgementsyncLimit=5# the directory where the snapshot is stored.# do not use /tmp for storage, /tmp here is just# example sakes.dataDir=/soft/zookeeper-3.4.6/data# the port at which the clients will connectclientPort=2181# the maximum number of client connections.# increase this if you need to handle more clients#maxClientCnxns=60## Be sure to read the maintenance section of the# administrator guide before turning on autopurge.## http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance## The number of snapshots to retain in dataDir#autopurge.snapRetainCount=3# Purge task interval in hours# Set to "0" to disable auto purge feature#autopurge.purgeInterval=1server.1=192.168.8.104:2888:3888server.2=192.168.8.105:2888:3888server.3=192.168.8.106:2888:3888
vi myid
bin/zkServer.sh start #啓動bin/zkServer.sh status #查看狀態
[root@hadoop04 zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /soft/zookeeper-3.4.6/bin/../conf/zoo.cfgMode: leader
[root@hadoop05 zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /soft/zookeeper-3.4.6/bin/../conf/zoo.cfgMode: follower
[root@hadoop06 zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /soft/zookeeper-3.4.6/bin/../conf/zoo.cfgMode: follower
bin/zkServer.sh stop
[root@hadoop04 zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /soft/zookeeper-3.4.6/bin/../conf/zoo.cfgError contacting service. It is probably not running.[root@hadoop04 zookeeper-3.4.6]#
[root@hadoop05 zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /soft/zookeeper-3.4.6/bin/../conf/zoo.cfgMode: follower
[root@hadoop06 zookeeper-3.4.6]# bin/zkServer.sh statusJMX enabled by defaultUsing config: /soft/zookeeper-3.4.6/bin/../conf/zoo.cfgMode: leader
[root@hadoop01 hadoop-2.7.1]# lsbin etc include journal lib libexec LICENSE.txt logs NOTICE.txt README.txt sbin share
export JAVA_HOME=/soft/jdk1.7.0_80/export HADOOP_HOME=/soft/hadoop-2.7.1export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin
source /etc/profile
[root@hadoop01 hadoop-2.7.1]# which hadoop/soft/hadoop-2.7.1/bin/hadoop
vim hadoop-env.sh
export JAVA_HOME=/soft/jdk1.7.0_80/
<configuration> <!-- 指定hdfs的nameservice爲ns1 --> <property> <name>fs.defaultFS</name> <value>hdfs://ns1</value> </property> <!-- 指定hadoop臨時目錄 --> <property> <name>hadoop.tmp.dir</name> <value>/soft/hadoop-2.7.1/tmp</value> </property> <!-- 指定zookeeper地址 --> <property> <name>ha.zookeeper.quorum</name> <value>hadoop04:2181,hadoop05:2181,hadoop06:2181</value> </property></configuration>
<configuration> <!--指定hdfs的nameservice爲ns1,須要和core-site.xml中的保持一致 --> <property> <name>dfs.nameservices</name> <value>ns1</value> </property> <!-- ns1下面有兩個NameNode,分別是nn1,nn2 --> <property> <name>dfs.ha.namenodes.ns1</name> <value>nn1,nn2</value> </property> <!-- nn1的RPC通訊地址 --> <property> <name>dfs.namenode.rpc-address.ns1.nn1</name> <value>hadoop01:9000</value> </property> <!-- nn1的http通訊地址 --> <property> <name>dfs.namenode.http-address.ns1.nn1</name> <value>hadoop01:50070</value> </property> <!-- nn2的RPC通訊地址 --> <property> <name>dfs.namenode.rpc-address.ns1.nn2</name> <value>hadoop02:9000</value> </property> <!-- nn2的http通訊地址 --> <property> <name>dfs.namenode.http-address.ns1.nn2</name> <value>hadoop02:50070</value> </property> <!-- 指定NameNode的元數據在JournalNode上的存放位置 --> <property> <name>dfs.namenode.shared.edits.dir</name> <value>qjournal://hadoop04:8485;hadoop05:8485;hadoop06:8485/ns1</value> </property> <!-- 指定JournalNode在本地磁盤存放數據的位置 --> <property> <name>dfs.journalnode.edits.dir</name> <value>/soft/hadoop-2.7.1/journal</value> </property> <!-- 開啓NameNode失敗自動切換 --> <property> <name>dfs.ha.automatic-failover.enabled</name> <value>true</value> </property> <!-- 配置失敗自動切換實現方式 --> <property> <name>dfs.client.failover.proxy.provider.ns1</name> <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value> </property> <!-- 配置隔離機制 --> <property> <name>dfs.ha.fencing.methods</name> <value>sshfence</value> </property> <!-- 使用隔離機制時須要ssh免登錄 --> <property> <name>dfs.ha.fencing.ssh.private-key-files</name> <value>/root/.ssh/id_rsa</value> </property></configuration>
vi slaves
hadoop04hadoop05hadoop06
mv mapred-site.xml.example mapred-site.xml
<configuration> <!-- 指定mr框架爲yarn方式 --> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property></configuration>
<configuration> <!-- 指定resourcemanager地址 -->
<property> <name>yarn.resourcemanager.hostname</name> <value>hadoop03</value> </property> <!-- 指定nodemanager啓動時加載server的方式爲shuffle server --> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property></configuration>
scp -r hadoop2.7.1 hadoop02:/soft/scp -r hadoop2.7.1 hadoop03:/soft/scp -r hadoop2.7.1 hadoop04:/soft/scp -r hadoop2.7.1 hadoop05:/soft/scp -r hadoop2.7.1 hadoop06:/soft/
[root@hadoop04 zookeeper-3.4.6]# bin/zkServer.sh startJMX enabled by defaultUsing config: /soft/zookeeper-3.4.6/bin/../conf/zoo.cfgStarting zookeeper ... STARTED
[root@hadoop01 hadoop-2.7.1]# sbin/hadoop-daemons.sh start journalnode
[root@hadoop04 zookeeper-3.4.6]# jps1532 JournalNode1796 Jps1470 QuorumPeerMain
hadoop namenode -format
scp -r tmp/ hadoop02:/soft/hadoop-2.7.1/
hdfs zkfc -formatZK
sbin/start-dfs.sh
sbin/start-yarn.sh
192.168.8.101 hadoop01192.168.8.102 hadoop02192.168.8.103 hadoop03192.168.8.104 hadoop04192.168.8.105 hadoop05192.168.8.106 hadoop06
[root@hadoop01 hadoop-2.7.1]# jps1614 NameNode2500 Jps1929 DFSZKFailoverController[root@hadoop01 hadoop-2.7.1]# kill -9 1614