Hadoop搭建全程

 

 

 

 

 

 

修改配置文件java

cd /etc/sysconfig/network-scripts  #進入網絡配置目錄node

dir ifcfg*                         #找到網卡配置文件mysql

ifcfg-eno16777736  ifcfg-lolinux

vi ifcfg-eno16777736sql

配置文件內容docker

TYPE=Ethernetshell

BOOTPROTO=static                              #改爲static,針對NAT數據庫

NAME=eno16777736express

UUID=4cc9c89b-cf9e-4847-b9ea-ac713baf4cc8apache

DEVICE=eno16777736

ONBOOT=yes               #開機啓動此網卡

IPADDR=192.168.163.155   #固定IP地址29  30  31

NETMASK=255.255.255.0       #子網掩碼

GATEWAY=192.168.163.2       #網關和NAT自動配置的相同,不一樣則沒法登陸

DNS1=192.168.163.2          #和網關相同

 

退出Esc Shift+:wq

 

關閉防火牆               systemctl stop firewalld

查看防火牆是否關閉       systemctl status firewalld

 

重啓網絡                  service network restart

ip addr                     #查看IP地址 ip add

 

 

 

 

 

Connecting to 192.168.163.155:22...

Connection established.

To escape to local shell, press 'Ctrl+Alt+]'.

 

Last login: Wed Feb 28 23:07:06 2018

 [root@localhost ~]# service network restart

Restarting network (via systemctl):                        [  肯定  ]

[root@localhost ~]# vi /etc/hosts

[root@hadoop01 ~]# cat /etc/hosts

192.168.163.155 hadoop01

192.168.163.156 hadoop02

192.168.163.157 hadoop03

 

[root@hadoop01 ~]#

[root@localhost ~]# hostname hadoop01

[root@localhost ~]# hostname

hadoop01

[root@localhost ~]# service network restart

Restarting network (via systemctl):                        [  肯定  ]

[root@localhost ~]# ssh-keygen---------------------------------三次回車

Generating public/private rsa key pair.

Enter file in which to save the key (/root/.ssh/id_rsa):

Created directory '/root/.ssh'.

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in /root/.ssh/id_rsa.

Your public key has been saved in /root/.ssh/id_rsa.pub.

The key fingerprint is:

0d:2f:6f:c7:f9:02:64:88:d4:3a:b8:e9:b0:fb:8c:b4 root@hadoop01

The key's randomart image is:

+--[ RSA 2048]----+

|       .         |

|      . .        |

|     o o..       |

|    . + .+o      |

|     o .Soo      |

|  . o    o.. .   |

|  .+      o.+    |

| ..+.    . ...   |

|  Eoo        ..  |

 

[root@hadoop01 ~]# . go

[root@hadoop01 src]# pwd

/usr/local/src

[root@hadoop01 src]# mkdir java

[root@hadoop01 src]#cd java

 

傳tar包,解壓

tar -xvf jdk-7u51-linux-x64.tar.gz #解壓壓縮包

 

[root@hadoop01 java]# ll

總用量 134968

drwxr-xr-x. 8   10  143      4096 12月 19 2013 jdk1.7.0_51

-rw-r--r--. 1 root root 138199690 3月  26 2014 jdk-7u51-linux-x64.tar.gz

 

[root@hadoop01 java]# vi /etc/profile

#set java environment

JAVA_HOME=/usr/local/src/java/jdk1.7.0_51

JAVA_BIN=/usr/local/src/java/jdk1.7.0_51/bin

PATH=$JAVA_HOME/bin:$PATH

CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export JAVA_HOME JAVA_BIN PATH CLASSPATH

 

[root@hadoop01 java]# source /etc/profile

[root@hadoop01 java]# java -version

java version "1.7.0_51"

Java(TM) SE Runtime Environment (build 1.7.0_51-b13)

Java HotSpot(TM) 64-Bit Server VM (build 24.51-b03, mixed mode)

[root@hadoop01 java]# systemctl stop firewalld.service

[root@hadoop01 java]# systemctl disable firewalld.service

Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

Removed symlink /etc/systemd/system/basic.target.wants/firewalld.service.

 

[root@hadoop01 java]# cd ..

[root@hadoop01 src]# ll

總用量 0

drwxr-xr-x. 2 root root  6 4月   7 2017 docker

drwxr-xr-x. 3 root root 56 3月   1 00:51 java

[root@hadoop01 src]# mkdir zk

[root@hadoop01 src]# cd zk

[root@hadoop01 zk]# ll

總用量 0

上傳tar包

[root@hadoop01 zk]# ll

總用量 21740

-rw-r--r--. 1 root root 22261552 7月   6 2016 zookeeper-3.4.8.tar.gz

[root@hadoop01 zk]# tar -xvf zookeeper-3.4.8.tar.gz

zookeeper-3.4.8/zookeeper-3.4.8.jar.sha1

zookeeper-3.4.8/NOTICE.txt

zookeeper-3.4.8/build.xml

[root@hadoop01 zk]# ll

總用量 21744

drwxr-xr-x. 10 1000 1000     4096 2月   6 2016 zookeeper-3.4.8

-rw-r--r--.  1 root root 22261552 7月   6 2016 zookeeper-3.4.8.tar.gz

[root@hadoop01 zk]#

[root@hadoop01 zk]# cd zookeeper-3.4.8

[root@hadoop01 zookeeper-3.4.8]# ll

總用量 1572

drwxr-xr-x.  2 1000 1000    4096 2月   6 2016 bin

-rw-rw-r--.  1 1000 1000   83235 2月   6 2016 build.xml

-rw-rw-r--.  1 1000 1000   88625 2月   6 2016 CHANGES.txt

drwxr-xr-x.  2 1000 1000      74 2月   6 2016 conf

drwxr-xr-x. 10 1000 1000    4096 2月   6 2016 contrib

drwxr-xr-x.  2 1000 1000    4096 2月   6 2016 dist-maven

drwxr-xr-x.  6 1000 1000    4096 2月   6 2016 docs

-rw-rw-r--.  1 1000 1000    1953 2月   6 2016 ivysettings.xml

-rw-rw-r--.  1 1000 1000    3498 2月   6 2016 ivy.xml

drwxr-xr-x.  4 1000 1000    4096 2月   6 2016 lib

-rw-rw-r--.  1 1000 1000   11938 2月   6 2016 LICENSE.txt

-rw-rw-r--.  1 1000 1000     171 2月   6 2016 NOTICE.txt

-rw-rw-r--.  1 1000 1000    1770 2月   6 2016 README_packaging.txt

-rw-rw-r--.  1 1000 1000    1585 2月   6 2016 README.txt

drwxr-xr-x.  5 1000 1000      44 2月   6 2016 recipes

drwxr-xr-x.  8 1000 1000    4096 2月   6 2016 src

-rw-rw-r--.  1 1000 1000 1360961 2月   6 2016 zookeeper-3.4.8.jar

-rw-rw-r--.  1 1000 1000     819 2月   6 2016 zookeeper-3.4.8.jar.asc

-rw-rw-r--.  1 1000 1000      33 2月   6 2016 zookeeper-3.4.8.jar.md5

-rw-rw-r--.  1 1000 1000      41 2月   6 2016 zookeeper-3.4.8.jar.sha1

[root@hadoop01 zookeeper-3.4.8]#

 

[root@hadoop01 zookeeper-3.4.8]# mkdir log data

[root@hadoop01 zookeeper-3.4.8]# ll

總用量 1572

drwxr-xr-x.  2 1000 1000    4096 2月   6 2016 bin

-rw-rw-r--.  1 1000 1000   83235 2月   6 2016 build.xml

-rw-rw-r--.  1 1000 1000   88625 2月   6 2016 CHANGES.txt

drwxr-xr-x.  2 1000 1000      74 2月   6 2016 conf

drwxr-xr-x. 10 1000 1000    4096 2月   6 2016 contrib

drwxr-xr-x.  2 root root       6 3月   1 01:53 data

drwxr-xr-x.  2 1000 1000    4096 2月   6 2016 dist-maven

drwxr-xr-x.  6 1000 1000    4096 2月   6 2016 docs

-rw-rw-r--.  1 1000 1000    1953 2月   6 2016 ivysettings.xml

-rw-rw-r--.  1 1000 1000    3498 2月   6 2016 ivy.xml

drwxr-xr-x.  4 1000 1000    4096 2月   6 2016 lib

-rw-rw-r--.  1 1000 1000   11938 2月   6 2016 LICENSE.txt

drwxr-xr-x.  2 root root       6 3月   1 01:53 log

-rw-rw-r--.  1 1000 1000     171 2月   6 2016 NOTICE.txt

-rw-rw-r--.  1 1000 1000    1770 2月   6 2016 README_packaging.txt

-rw-rw-r--.  1 1000 1000    1585 2月   6 2016 README.txt

drwxr-xr-x.  5 1000 1000      44 2月   6 2016 recipes

drwxr-xr-x.  8 1000 1000    4096 2月   6 2016 src

-rw-rw-r--.  1 1000 1000 1360961 2月   6 2016 zookeeper-3.4.8.jar

-rw-rw-r--.  1 1000 1000     819 2月   6 2016 zookeeper-3.4.8.jar.asc

-rw-rw-r--.  1 1000 1000      33 2月   6 2016 zookeeper-3.4.8.jar.md5

-rw-rw-r--.  1 1000 1000      41 2月   6 2016 zookeeper-3.4.8.jar.sha1

[root@hadoop01 zookeeper-3.4.8]#

[root@hadoop01 zookeeper-3.4.8]# cd data

[root@hadoop01 data]# vi myid

 

 

[root@hadoop01 data]# cd ../

[root@hadoop01 zookeeper-3.4.8]# cd conf

[root@hadoop01 conf]# ll

總用量 12

-rw-rw-r--. 1 1000 1000  535 2月   6 2016 configuration.xsl

-rw-rw-r--. 1 1000 1000 2161 2月   6 2016 log4j.properties

-rw-rw-r--. 1 1000 1000  922 2月   6 2016 zoo_sample.cfg

[root@hadoop01 conf]#

[root@hadoop01 conf]# cp zoo_sample.cfg zoo.cfg

[root@hadoop01 conf]# ll

總用量 16

-rw-rw-r--. 1 1000 1000  535 2月   6 2016 configuration.xsl

-rw-rw-r--. 1 1000 1000 2161 2月   6 2016 log4j.properties

-rw-r--r--. 1 root root  922 3月   1 01:59 zoo.cfg

-rw-rw-r--. 1 1000 1000  922 2月   6 2016 zoo_sample.cfg

[root@hadoop01 conf]#

[root@hadoop01 conf]# vi zoo.cfg

 

 

打開一個新鏈接,也鏈接本臺虛擬機,複製目錄

 

Connecting to 192.168.163.155:22...

Connection established.

To escape to local shell, press 'Ctrl+Alt+]'.

 

Last login: Thu Mar  1 00:01:10 2018 from hadoop01

[root@hadoop01 ~]# . go

[root@hadoop01 src]# pwd

/usr/local/src

[root@hadoop01 src]# cd zk

[root@hadoop01 zk]# cd zookeeper-3.4.8

[root@hadoop01 zookeeper-3.4.8]# pwd

/usr/local/src/zk/zookeeper-3.4.8

[root@hadoop01 zookeeper-3.4.8]#

 

 

tickTime=2000                      #tickTime心跳時間,

clientPort=2181                    #訪問端口

dataDir=/usr/local/src/zk/zookeeper-3.4.8/data       #設置日誌路徑

dataLogDir=/usr/local/src/zk/zookeeper-3.4.8/log #增長設置日誌路徑

server.1=hadoop01:2888:3888     #集羣最少3個節點,可按機器名

server.2=hadoop02:2888:3888     #2888指follower連leader端口

server.3=hadoop03:2888:3888     #3888指定選舉的端口

 

 

 

 

保存退出,克隆虛擬機hadoop01命名爲hadoop02和hadoop03

 

 

 

 

 

 

 

修改hadoop02和hadoop03的IP

修改配置文件

cd /etc/sysconfig/network-scripts  #進入網絡配置目錄

dir ifcfg*                         #找到網卡配置文件

ifcfg-eno16777736  ifcfg-lo

vi ifcfg-eno16777736

配置文件內容

TYPE=Ethernet

BOOTPROTO=static                              #改爲static,針對NAT

NAME=eno16777736

UUID=4cc9c89b-cf9e-4847-b9ea-ac713baf4cc8

DEVICE=eno16777736

ONBOOT=yes               #開機啓動此網卡

IPADDR=192.168.163.156      #固定IP地址29  30  31

NETMASK=255.255.255.0       #子網掩碼

GATEWAY=192.168.163.2       #網關和NAT自動配置的相同,不一樣則沒法登陸

DNS1=192.168.163.2          #和網關相同

 

退出Esc Shift+:wq

 

關閉防火牆               systemctl stop firewalld

查看防火牆是否關閉       systemctl status firewalld

 

重啓網絡                  service network restart

ip addr                     #查看IP地址 ip add

 

 

 

 

 

同理修改另外一臺

 

 

 

 

1本機SSH 免密登陸

在虛擬機中Terminal中執行下面語句

ssh-keygen                                           #三次回車便可

 

 

產生公鑰和私鑰

 

 

 

 

 

 

 

 

 

 

 

 

按上述過程記錄密碼用戶名,全選鏈接

 

產生公鑰和私鑰

ssh-copy-id -i .ssh/id_rsa.pub root@192.168.163.156  #複製密鑰

ssh 192.168.163.156                              #直接登陸

 

 

[root@hadoop01 ~]# cd .ssh/

[root@hadoop01 .ssh]# pwd

/root/.ssh

[root@hadoop01 .ssh]# ls

authorized_keys  id_rsa  id_rsa.pub  known_hosts

[root@hadoop01 .ssh]# cd ../

[root@hadoop01 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop02

[root@hadoop01 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop02

The authenticity of host 'hadoop02 (192.168.163.156)' can't be established.

ECDSA key fingerprint is 2c:7a:dc:43:9d:f1:16:d2:19:9c:66:f5:c0:ff:10:06.

Are you sure you want to continue connecting (yes/no)? yes

/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed

 

/usr/bin/ssh-copy-id: WARNING: All keys were skipped because they already exist on the remote system.

 

[root@hadoop01 ~]#

[root@hadoop01 ~]# ssh hadoop02

Last login: Thu Mar  1 04:50:20 2018 from 192.168.163.1

[root@hadoop02 ~]#

[root@hadoop02 ~]# ssh hadoop01

Last login: Thu Mar  1 04:50:20 2018 from 192.168.163.1

[root@hadoop01 ~]# pwd

/root

[root@hadoop01 /]# cd root

[root@hadoop01 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop03

[root@hadoop01 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop03

The authenticity of host 'hadoop03 (192.168.163.157)' can't be established.

ECDSA key fingerprint is 2c:7a:dc:43:9d:f1:16:d2:19:9c:66:f5:c0:ff:10:06.

Are you sure you want to continue connecting (yes/no)? yes

/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed

 

/usr/bin/ssh-copy-id: WARNING: All keys were skipped because they already exist on the remote system.

[root@hadoop01 ~]# ssh hadoop03

Last login: Thu Mar  1 04:50:21 2018 from 192.168.163.1

[root@hadoop03 ~]#

 

 

第二個節點,配置myid

Connecting to 192.168.163.155:22...

Connection established.

m

[root@hadoop02 ~]# . go

[root@hadoop02 src]# ll

總用量 0

drwxr-xr-x. 2 root root  6 4月   7 2017 docker

drwxr-xr-x. 3 root root 56 3月   1 00:51 java

drwxr-xr-x. 3 root root 57 3月   1 01:51 zk

[root@hadoop02 src]# cd zk

[root@hadoop02 zk]# ll

總用量 21744

drwxr-xr-x. 12 1000 1000     4096 3月   1 01:53 zookeeper-3.4.8

-rw-r--r--.  1 root root 22261552 7月   6 2016 zookeeper-3.4.8.tar.gz

[root@hadoop02 zk]# cd zookeeper-3.4.8

[root@hadoop02 zookeeper-3.4.8]# ll

總用量 1572

drwxr-xr-x.  2 1000 1000    4096 2月   6 2016 bin

-rw-rw-r--.  1 1000 1000   83235 2月   6 2016 build.xml

-rw-rw-r--.  1 1000 1000   88625 2月   6 2016 CHANGES.txt

drwxr-xr-x.  2 1000 1000      88 3月   1 02:13 conf

drwxr-xr-x. 10 1000 1000    4096 2月   6 2016 contrib

drwxr-xr-x.  2 root root      17 3月   1 01:57 data

drwxr-xr-x.  2 1000 1000    4096 2月   6 2016 dist-maven

drwxr-xr-x.  6 1000 1000    4096 2月   6 2016 docs

-rw-rw-r--.  1 1000 1000    1953 2月   6 2016 ivysettings.xml

-rw-rw-r--.  1 1000 1000    3498 2月   6 2016 ivy.xml

drwxr-xr-x.  4 1000 1000    4096 2月   6 2016 lib

-rw-rw-r--.  1 1000 1000   11938 2月   6 2016 LICENSE.txt

drwxr-xr-x.  2 root root       6 3月   1 01:53 log

-rw-rw-r--.  1 1000 1000     171 2月   6 2016 NOTICE.txt

-rw-rw-r--.  1 1000 1000    1770 2月   6 2016 README_packaging.txt

-rw-rw-r--.  1 1000 1000    1585 2月   6 2016 README.txt

drwxr-xr-x.  5 1000 1000      44 2月   6 2016 recipes

drwxr-xr-x.  8 1000 1000    4096 2月   6 2016 src

-rw-rw-r--.  1 1000 1000 1360961 2月   6 2016 zookeeper-3.4.8.jar

-rw-rw-r--.  1 1000 1000     819 2月   6 2016 zookeeper-3.4.8.jar.asc

-rw-rw-r--.  1 1000 1000      33 2月   6 2016 zookeeper-3.4.8.jar.md5

-rw-rw-r--.  1 1000 1000      41 2月   6 2016 zookeeper-3.4.8.jar.sha1

[root@hadoop02 zookeeper-3.4.8]# cd data

[root@hadoop02 data]# cat myid

1

[root@hadoop02 data]# vi myid

[root@hadoop02 data]# cat myid

2

 

第三個節點,配置myid

 

Connecting to 192.168.163.157:22...

Connection established.

To escape to local shell, press 'Ctrl+Alt+]'.

 

Last login: Thu Mar  1 03:10:29 2018 from 192.168.163.1

[root@hadoop03 ~]# .go

-bash: .go: 未找到命令

[root@hadoop03 ~]# cd /usr/local/src/zk/

[root@hadoop03 zk]# cd zookeeper-3.4.8/data/

[root@hadoop03 data]# cat myid

1

[root@hadoop02 data]# vi myid

[root@hadoop02 data]# cat myid

3

[root@hadoop02 data]#

 

 

 

 

啓動服務:

[root@hadoop03 ~]# . go

[root@hadoop03 src]# cd zk/zookeeper-3.4.8

[root@hadoop03 zookeeper-3.4.8]# cd bin

[root@hadoop03 bin]#

 

1.1啓動、中止、重啓

./zkServer.sh start

sh bin/zkServer.sh start           #啓動ZK服務

sh bin/zkServer.sh stop            #中止ZK服務

sh bin/zkServer.sh restart         #重啓ZK服務

1.2檢查服務

[root@localhost conf]# jps

5863 Jps

2416rumPeerMain      #QuorumPeerMain是zookeeper進程,啓動正常

1.3查看集羣狀態

./zkServer.sh status

sh bin/zkServer.sh status              #查看ZK狀態

查看結果:集羣中只有一個leader,其餘都是follower

[root@localhost bin]# ./zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Mode: leader

[root@localhost bin]# ./zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Mode: follower

 

 

三個虛擬機分別都啓動zookeeper

 

Connecting to 192.168.163.155:22...

Connection established.

To escape to local shell, press 'Ctrl+Alt+]'.

 

Last login: Thu Mar  1 16:27:54 2018 from 192.168.163.1

[root@hadoop01 ~]# . go

[root@hadoop01 src]# cd zk/zookeeper-3.4.8/bin

[root@hadoop01 bin]# ./zkServer.sh start

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Starting zookeeper ... STARTED

[root@hadoop01 bin]#

[root@hadoop01 bin]# jps

2462 QuorumPeerMain

2516 Jps

[root@hadoop01 bin]#

 

Connecting to 192.168.163.156:22...

[root@hadoop02 ~]# . go

[root@hadoop02 src]# cd zk/zookeeper-3.4.8/bin

[root@hadoop02 bin]# ./zkServer.sh start

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Starting zookeeper ... STARTED

[root@hadoop02 bin]#

[root@hadoop02 bin]# jps

2330 QuorumPeerMain

2394 Jps

[root@hadoop02 bin]#

 

Connecting to 192.168.163.157:22...

Connection established.

To escape to local shell, press 'Ctrl+Alt+]'.

 

Last login: Thu Mar  1 16:27:54 2018 from 192.168.163.1

[root@hadoop03 ~]# . go

[root@hadoop03 src]# cd zk/zookeeper-3.4.8/bin

[root@hadoop03 bin]# ./zkServer.sh start

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Starting zookeeper ... STARTED

[root@hadoop03 bin]#

[root@hadoop03 bin]# jps

2320 QuorumPeerMain

2379 Jps

[root@hadoop03 bin]#

 

都啓動後查看三個的啓動狀態(是否鏈接):

[root@hadoop02 bin]# ./zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Mode: leader

[root@hadoop02 bin]#

 

 

[root@hadoop03 bin]# ./zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop03 bin]#

 

 

[root@hadoop01 bin]# ./zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/src/zk/zookeeper-3.4.8/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop01 bin]#

 

1.4客戶端訪問

[root@localhost bin]# ./zkCli.sh -server hadoop01:2181

 

 

 

[root@hadoop01 bin]# ./zkCli.sh -server hadoop01:2181

Connecting to hadoop01:2181

2018-03-01 16:45:54,476 [myid:] - INFO  [main:Environment@100] - Client environment:zookeeper.version=3.4.8--1, built on 02/06/2016 03:18 GMT

2018-03-01 16:45:54,479 [myid:] - INFO  [main:Environment@100] - Client environment:host.name=hadoop01

2018-03-01 16:45:54,479 [myid:] - INFO  [main:Environment@100] - Client environment:java.version=1.7.0_51

2018-03-01 16:45:54,483 [myid:] - INFO  [main:Environment@100] - Client environment:java.vendor=Oracle Corporation

2018-03-01 16:45:54,483 [myid:] - INFO  [main:Environment@100] - Client environment:java.home=/usr/local/src/java/jdk1.7.0_51/jre

2018-03-01 16:45:54,483 [myid:] - INFO  [main:Environment@100] - Client environment:java.class.path=/usr/local/src/zk/zookeeper-3.4.8/bin/../build/classes:/usr/local/src/zk/zookeeper-3.4.8/bin/../build/lib/*.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../lib/slf4j-log4j12-1.6.1.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../lib/slf4j-api-1.6.1.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../lib/netty-3.7.0.Final.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../lib/log4j-1.2.16.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../lib/jline-0.9.94.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../zookeeper-3.4.8.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../src/java/lib/*.jar:/usr/local/src/zk/zookeeper-3.4.8/bin/../conf:.:/usr/local/src/java/jdk1.7.0_51/lib/dt.jar:/usr/local/src/java/jdk1.7.0_51/lib/tools.jar

2018-03-01 16:45:54,483 [myid:] - INFO  [main:Environment@100] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib

2018-03-01 16:45:54,484 [myid:] - INFO  [main:Environment@100] - Client environment:java.io.tmpdir=/tmp

2018-03-01 16:45:54,484 [myid:] - INFO  [main:Environment@100] - Client environment:java.compiler=<NA>

2018-03-01 16:45:54,484 [myid:] - INFO  [main:Environment@100] - Client environment:os.name=Linux

2018-03-01 16:45:54,484 [myid:] - INFO  [main:Environment@100] - Client environment:os.arch=amd64

2018-03-01 16:45:54,484 [myid:] - INFO  [main:Environment@100] - Client environment:os.version=3.10.0-327.el7.x86_64

2018-03-01 16:45:54,485 [myid:] - INFO  [main:Environment@100] - Client environment:user.name=root

2018-03-01 16:45:54,485 [myid:] - INFO  [main:Environment@100] - Client environment:user.home=/root

2018-03-01 16:45:54,485 [myid:] - INFO  [main:Environment@100] - Client environment:user.dir=/usr/local/src/zk/zookeeper-3.4.8/bin

2018-03-01 16:45:54,486 [myid:] - INFO  [main:ZooKeeper@438] - Initiating client connection, connectString=hadoop01:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@540664ca

Welcome to ZooKeeper!

2018-03-01 16:45:54,531 [myid:] - INFO  [main-SendThread(hadoop01:2181):ClientCnxn$SendThread@1032] - Opening socket connection to server hadoop01/192.168.163.155:2181. Will not attempt to authenticate using SASL (unknown error)

2018-03-01 16:45:54,568 [myid:] - INFO  [main-SendThread(hadoop01:2181):ClientCnxn$SendThread@876] - Socket connection established to hadoop01/192.168.163.155:2181, initiating session

JLine support is enabled

[zk: hadoop01:2181(CONNECTING) 0] 2018-03-01 16:45:54,701 [myid:] - INFO  [main-SendThread(hadoop01:2181):ClientCnxn$SendThread@1299] - Session establishment complete on server hadoop01/192.168.163.155:2181, sessionid = 0x161e0b83e780000, negotiated timeout = 30000

 

WATCHER::

 

WatchedEvent state:SyncConnected type:None path:null

 

 

 

[root@hadoop01 zk]# cd /usr/local/src/

[root@hadoop01 src]#

[root@hadoop01 src]# mkdir hadoop

[root@hadoop01 src]#

 

tar包,並解壓(在h1

[root@hadoop01 src]# ls

docker  hadoop  java  zk

[root@hadoop01 src]# cd hadoop/

[root@hadoop01 hadoop]# ll

總用量 205672

-rw-r--r--. 1 root root 210606807 9月  16 2015 hadoop-2.7.1.tar.gz

[root@hadoop01 hadoop]# tar -xvf hadoop-2.7.1.tar.gz

hadoop-2.7.1/libexec/mapred-config.sh

hadoop-2.7.1/libexec/httpfs-config.sh

hadoop-2.7.1/libexec/hadoop-config.sh

hadoop-2.7.1/libexec/mapred-config.cmd

hadoop-2.7.1/libexec/kms-config.sh

hadoop-2.7.1/libexec/hdfs-config.cmd

hadoop-2.7.1/libexec/yarn-config.sh

hadoop-2.7.1/libexec/hdfs-config.sh

hadoop-2.7.1/README.txt

hadoop-2.7.1/NOTICE.txt

hadoop-2.7.1/lib/

hadoop-2.7.1/lib/native/

hadoop-2.7.1/lib/native/libhadoop.a

hadoop-2.7.1/lib/native/libhadoop.so

hadoop-2.7.1/lib/native/libhadooppipes.a

hadoop-2.7.1/lib/native/libhdfs.so.0.0.0

hadoop-2.7.1/lib/native/libhadooputils.a

hadoop-2.7.1/lib/native/libhdfs.a

hadoop-2.7.1/lib/native/libhdfs.so

hadoop-2.7.1/lib/native/libhadoop.so.1.0.0

hadoop-2.7.1/LICENSE.txt

[root@hadoop01 hadoop]# ll

總用量 205676

drwxr-xr-x. 9 10021 10021      4096 6月  29 2015 hadoop-2.7.1

-rw-r--r--. 1 root  root  210606807 9月  16 2015 hadoop-2.7.1.tar.gz

[root@hadoop01 hadoop]#

 

 

 

[root@hadoop01 hadoop]# cd hadoop-2.7.1

[root@hadoop01 hadoop-2.7.1]# ll

總用量 36

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 bin

drwxr-xr-x. 3 10021 10021    19 6月  29 2015 etc

drwxr-xr-x. 2 10021 10021   101 6月  29 2015 include

drwxr-xr-x. 3 10021 10021    19 6月  29 2015 lib

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 libexec

-rw-r--r--. 1 10021 10021 15429 6月  29 2015 LICENSE.txt

-rw-r--r--. 1 10021 10021   101 6月  29 2015 NOTICE.txt

-rw-r--r--. 1 10021 10021  1366 6月  29 2015 README.txt

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 sbin

drwxr-xr-x. 4 10021 10021    29 6月  29 2015 share

[root@hadoop01 hadoop-2.7.1]#

 

[root@hadoop01 hadoop-2.7.1]# cd bin

[root@hadoop01 bin]# ll

總用量 448

-rwxr-xr-x. 1 10021 10021 160127 6月  29 2015 container-executor

-rwxr-xr-x. 1 10021 10021   6488 6月  29 2015 hadoop

-rwxr-xr-x. 1 10021 10021   8786 6月  29 2015 hadoop.cmd

-rwxr-xr-x. 1 10021 10021  12223 6月  29 2015 hdfs

-rwxr-xr-x. 1 10021 10021   7327 6月  29 2015 hdfs.cmd

-rwxr-xr-x. 1 10021 10021   5953 6月  29 2015 mapred

-rwxr-xr-x. 1 10021 10021   6310 6月  29 2015 mapred.cmd

-rwxr-xr-x. 1 10021 10021   1776 6月  29 2015 rcc

-rwxr-xr-x. 1 10021 10021 204075 6月  29 2015 test-container-executor

-rwxr-xr-x. 1 10021 10021  13308 6月  29 2015 yarn

-rwxr-xr-x. 1 10021 10021  11386 6月  29 2015 yarn.cmd

[root@hadoop01 bin]# cd ../

[root@hadoop01 hadoop-2.7.1]# cd sbin

[root@hadoop01 sbin]# ll

總用量 120

-rwxr-xr-x. 1 10021 10021 2752 6月  29 2015 distribute-exclude.sh

-rwxr-xr-x. 1 10021 10021 6452 6月  29 2015 hadoop-daemon.sh

-rwxr-xr-x. 1 10021 10021 1360 6月  29 2015 hadoop-daemons.sh

-rwxr-xr-x. 1 10021 10021 1640 6月  29 2015 hdfs-config.cmd

-rwxr-xr-x. 1 10021 10021 1427 6月  29 2015 hdfs-config.sh

-rwxr-xr-x. 1 10021 10021 2291 6月  29 2015 httpfs.sh

-rwxr-xr-x. 1 10021 10021 3128 6月  29 2015 kms.sh

-rwxr-xr-x. 1 10021 10021 4080 6月  29 2015 mr-jobhistory-daemon.sh

-rwxr-xr-x. 1 10021 10021 1648 6月  29 2015 refresh-namenodes.sh

-rwxr-xr-x. 1 10021 10021 2145 6月  29 2015 slaves.sh

-rwxr-xr-x. 1 10021 10021 1779 6月  29 2015 start-all.cmd

-rwxr-xr-x. 1 10021 10021 1471 6月  29 2015 start-all.sh

-rwxr-xr-x. 1 10021 10021 1128 6月  29 2015 start-balancer.sh

-rwxr-xr-x. 1 10021 10021 1401 6月  29 2015 start-dfs.cmd

-rwxr-xr-x. 1 10021 10021 3734 6月  29 2015 start-dfs.sh

-rwxr-xr-x. 1 10021 10021 1357 6月  29 2015 start-secure-dns.sh

-rwxr-xr-x. 1 10021 10021 1571 6月  29 2015 start-yarn.cmd

-rwxr-xr-x. 1 10021 10021 1347 6月  29 2015 start-yarn.sh

-rwxr-xr-x. 1 10021 10021 1770 6月  29 2015 stop-all.cmd

-rwxr-xr-x. 1 10021 10021 1462 6月  29 2015 stop-all.sh

-rwxr-xr-x. 1 10021 10021 1179 6月  29 2015 stop-balancer.sh

-rwxr-xr-x. 1 10021 10021 1455 6月  29 2015 stop-dfs.cmd

-rwxr-xr-x. 1 10021 10021 3206 6月  29 2015 stop-dfs.sh

-rwxr-xr-x. 1 10021 10021 1340 6月  29 2015 stop-secure-dns.sh

-rwxr-xr-x. 1 10021 10021 1642 6月  29 2015 stop-yarn.cmd

-rwxr-xr-x. 1 10021 10021 1340 6月  29 2015 stop-yarn.sh

-rwxr-xr-x. 1 10021 10021 4295 6月  29 2015 yarn-daemon.sh

-rwxr-xr-x. 1 10021 10021 1353 6月  29 2015 yarn-daemons.sh

[root@hadoop01 sbin]#

 

 

 

 

 

[root@hadoop01 sbin]# cd ../

[root@hadoop01 hadoop-2.7.1]# ll

總用量 36

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 bin

drwxr-xr-x. 3 10021 10021    19 6月  29 2015 etc

drwxr-xr-x. 2 10021 10021   101 6月  29 2015 include

drwxr-xr-x. 3 10021 10021    19 6月  29 2015 lib

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 libexec

-rw-r--r--. 1 10021 10021 15429 6月  29 2015 LICENSE.txt

-rw-r--r--. 1 10021 10021   101 6月  29 2015 NOTICE.txt

-rw-r--r--. 1 10021 10021  1366 6月  29 2015 README.txt

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 sbin

drwxr-xr-x. 4 10021 10021    29 6月  29 2015 share

[root@hadoop01 hadoop-2.7.1]#

 

root@hadoop01 hadoop-2.7.1]# cd etc

[root@hadoop01 etc]# ll

總用量 4

drwxr-xr-x. 2 10021 10021 4096 6月  29 2015 hadoop

[root@hadoop01 etc]# cd hadoop/

 [root@hadoop01 hadoop]# pwd

/usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

[root@hadoop01 hadoop]#

1)  文件位置

 

 

[root@hadoop01 hadoop]# ll

總用量 152

-rw-r--r--. 1 10021 10021  4436 6月  29 2015 capacity-scheduler.xml

-rw-r--r--. 1 10021 10021  1335 6月  29 2015 configuration.xsl

-rw-r--r--. 1 10021 10021   318 6月  29 2015 container-executor.cfg

-rw-r--r--. 1 10021 10021   774 6月  29 2015 core-site.xml

-rw-r--r--. 1 10021 10021  3670 6月  29 2015 hadoop-env.cmd

-rw-r--r--. 1 10021 10021  4224 6月  29 2015 hadoop-env.sh

-rw-r--r--. 1 10021 10021  2598 6月  29 2015 hadoop-metrics2.properties

-rw-r--r--. 1 10021 10021  2490 6月  29 2015 hadoop-metrics.properties

-rw-r--r--. 1 10021 10021  9683 6月  29 2015 hadoop-policy.xml

-rw-r--r--. 1 10021 10021   775 6月  29 2015 hdfs-site.xml

-rw-r--r--. 1 10021 10021  1449 6月  29 2015 httpfs-env.sh

-rw-r--r--. 1 10021 10021  1657 6月  29 2015 httpfs-log4j.properties

-rw-r--r--. 1 10021 10021    21 6月  29 2015 httpfs-signature.secret

-rw-r--r--. 1 10021 10021   620 6月  29 2015 httpfs-site.xml

-rw-r--r--. 1 10021 10021  3518 6月  29 2015 kms-acls.xml

-rw-r--r--. 1 10021 10021  1527 6月  29 2015 kms-env.sh

-rw-r--r--. 1 10021 10021  1631 6月  29 2015 kms-log4j.properties

-rw-r--r--. 1 10021 10021  5511 6月  29 2015 kms-site.xml

-rw-r--r--. 1 10021 10021 11237 6月  29 2015 log4j.properties

-rw-r--r--. 1 10021 10021   951 6月  29 2015 mapred-env.cmd

-rw-r--r--. 1 10021 10021  1383 6月  29 2015 mapred-env.sh

-rw-r--r--. 1 10021 10021  4113 6月  29 2015 mapred-queues.xml.template

-rw-r--r--. 1 10021 10021   758 6月  29 2015 mapred-site.xml.template

-rw-r--r--. 1 10021 10021    10 6月  29 2015 slaves

-rw-r--r--. 1 10021 10021  2316 6月  29 2015 ssl-client.xml.example

-rw-r--r--. 1 10021 10021  2268 6月  29 2015 ssl-server.xml.example

-rw-r--r--. 1 10021 10021  2250 6月  29 2015 yarn-env.cmd

-rw-r--r--. 1 10021 10021  4567 6月  29 2015 yarn-env.sh

-rw-r--r--. 1 10021 10021   690 6月  29 2015 yarn-site.xml

[root@hadoop01 hadoop]#

2)  每一個文件中配置的項做用,怎麼修改

 

 

1.4.2編輯hadoop-env.sh(修改兩項內容,都設置成絕對路徑)

vi hadopp.sh

vi hadoop-env.sh

vi etc/hadoop/hadoop-env.sh

#JDK安裝目錄,雖然系統配置了JAVA_HOME,但有時沒法正確識別,最後進行配置

export JAVA_HOME=/usr/local/src/java/jdk1.7.0_51/   

#指定hadoop的配置文件目錄,不運行hadoop能夠不指定

export HADOOP_CONF_DIR=/usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

 

 

 

source /etc/profile

 

1)  hadoop-env.sh

[root@hadoop01 hadoop]# vi hadoop-env.sh

 

# Licensed to the Apache Software Foundation (ASF) under one

# or more contributor license agreements.  See the NOTICE file

# distributed with this work for additional information

# regarding copyright ownership.  The ASF licenses this file

# to you under the Apache License, Version 2.0 (the

# "License"); you may not use this file except in compliance

# with the License.  You may obtain a copy of the License at

#

#     http://www.apache.org/licenses/LICENSE-2.0

#

# Unless required by applicable law or agreed to in writing, software

# distributed under the License is distributed on an "AS IS" BASIS,

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

# See the License for the specific language governing permissions and

# limitations under the License.

 

# Set Hadoop-specific environment variables here.

 

# The only required environment variable is JAVA_HOME.  All others are

# optional.  When running a distributed configuration it is best to

# set JAVA_HOME in this file, so that it is correctly defined on

# remote nodes.

 

# The java implementation to use.

export JAVA_HOME=/usr/local/src/java/jdk1.7.0_51/

 

"hadoop-env.sh" 98L, 4224C

 

# The jsvc implementation to use. Jsvc is required to run secure datanodes

# that bind to privileged ports to provide authentication of data transfer

# protocol.  Jsvc is not required if SASL is configured for authentication of

# data transfer protocol using non-privileged ports.

#export JSVC_HOME=${JSVC_HOME}

 

export HADOOP_CONF_DIR=/usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

 

修改兩項內容,都設置成絕對路徑,JAVA_HOMEHADOOP_CONF_DIR

保存退出

2core-site.xml

配置configuration節點增長3個內容,配置hdfs路徑,臨時目錄,zk集羣

<?xml version="1.0" encoding="UTF-8"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!--

  Licensed under the Apache License, Version 2.0 (the "License");

  you may not use this file except in compliance with the License.

  You may obtain a copy of the License at

 

    http://www.apache.org/licenses/LICENSE-2.0

 

  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License. See accompanying LICENSE file.

-->

 

<!-- Put site-specific property overrides in this file. -->

 

<configuration>

    <property>

        <name>fs.defaultFS</name>

        <value>hdfs://hadoop01:9000</value>

    </property>

    <property>

        <name>hadoop.tmp.dir</name>

        <value>/usr/local/src/hadoop/hadoop-2.7.1/tmp</value>

    </property>

    <property>

        <name>ha.zookeeper.quorum</name>

        <value>hadoop01:2181,hadoop02:2181,hadoop03:2181</value>

    </property>

[root@hadoop01 hadoop]# cd ../

[root@hadoop01 etc]# cd ../

[root@hadoop01 hadoop-2.7.1]# mkdir tmp

[root@hadoop01 hadoop-2.7.1]#

[root@hadoop01 hadoop-2.7.1]# ll

總用量 36

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 bin

drwxr-xr-x. 3 10021 10021    19 6月  29 2015 etc

drwxr-xr-x. 2 10021 10021   101 6月  29 2015 include

drwxr-xr-x. 3 10021 10021    19 6月  29 2015 lib

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 libexec

-rw-r--r--. 1 10021 10021 15429 6月  29 2015 LICENSE.txt

-rw-r--r--. 1 10021 10021   101 6月  29 2015 NOTICE.txt

-rw-r--r--. 1 10021 10021  1366 6月  29 2015 README.txt

drwxr-xr-x. 2 10021 10021  4096 6月  29 2015 sbin

drwxr-xr-x. 4 10021 10021    29 6月  29 2015 share

drwxr-xr-x. 2 root  root      6 3月   1 17:41 tmp

[root@hadoop01 hadoop-2.7.1]#

</configuration>

3)hdfs-en.xml

配置namenode節點是哪一個服務器,datanode備份數據的節點個數

 

 

<?xml version="1.0" encoding="UTF-8"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!--

  Licensed under the Apache License, Version 2.0 (the "License");

  you may not use this file except in compliance with the License.

  You may obtain a copy of the License at

 

    http://www.apache.org/licenses/LICENSE-2.0

 

  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License. See accompanying LICENSE file.

-->

 

<!-- Put site-specific property overrides in this file. -->

 

<configuration>

    <property>

        <name>dfs.replication</name>

        <value>1</value>

    </property>

</configuration

1)  mapred-site.xml

[root@hadoop01 hadoop-2.7.1]# cd etc/hadoop/

[root@hadoop01 hadoop]# cp mapred-site.xml.template mapred-site.xml

 

<?xml version="1.0"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!--

  Licensed under the Apache License, Version 2.0 (the "License");

  you may not use this file except in compliance with the License.

  You may obtain a copy of the License at

 

    http://www.apache.org/licenses/LICENSE-2.0

 

  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License. See accompanying LICENSE file.

-->

 

<!-- Put site-specific property overrides in this file. -->

 

<configuration>

<property>

    <name>mapreduce.framework.name</name>

    <value>yarn</value>

  </property>

</configuration>

 

配置集羣管理方式yarn

Hive

select * from tb_user 不會使用reduce

select count(*) from tb_user 會使用reduce,假死!!!!!

 

5)yarn-site.xml

配置yarn服務,shuffle

<?xml version="1.0"?>

<!--

  Licensed under the Apache License, Version 2.0 (the "License");

  you may not use this file except in compliance with the License.

  You may obtain a copy of the License at

 

    http://www.apache.org/licenses/LICENSE-2.0

 

  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License. See accompanying LICENSE file.

-->

<configuration>

 

<!-- Site specific YARN configuration properties -->

  <property>

    <name>yarn.resourcemanager.hostname</name>

    <value>hadoop01</value>

  </property>

  <property>

    <name>yarn.nodemanager.aux-services</name>

    <value>mapreduce_shuffle</value>

  </property>

</configuration>

6)slaves

配置hadoop集羣從節點

hadoop01

啓動HadoopHDFS

查看

[root@hadoop01 hadoop]# vi hadoop-env.sh

[root@hadoop01 hadoop]# vi core-site.xml

[root@hadoop01 hadoop]# vi hdfs-en.xml

[root@hadoop01 hadoop]# vi mapred-site.xml

[root@hadoop01 hadoop]# vi yarn-site.xml

1.4.3在/etc/profile中進行配置

配置hadoop的環境變量:

 #set hadoop env

HADOOP_HOME=/usr/local/src/hadoop/hadoop-2.7.1/

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

 

 

 

 

 

[root@hadoop01 hadoop]# vi /etc/profile

[root@hadoop01 hadoop]# cat /etc/profile

# /etc/profile

# System wide environment and startup programs, for login setup

# Functions and aliases go in /etc/bashrc

unset i

unset -f pathmunge

#set java environment

JAVA_HOME=/usr/local/src/java/jdk1.7.0_51

JAVA_BIN=/usr/local/src/java/jdk1.7.0_51/bin

PATH=$JAVA_HOME/bin:$PATH

CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export JAVA_HOME JAVA_BIN PATH CLASSPATH

#set hadoop env

HADOOP_HOME=/usr/local/src/hadoop/hadoop-2.7.1/

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

[root@hadoop01 hadoop]# source /etc/profile

 

[root@hadoop01 hadoop]# echo $HADOOP_HOME

/usr/local/src/hadoop/hadoop-2.7.1/

 

鏈接第二臺虛擬機

 

 

建立hadoop文件夾,傳入tar包並解壓

[root@hadoop02 src]# cd hadoop/

[root@hadoop02 hadoop]# ll

總用量 262080

-rw-r--r--. 1 root root 169574400 3月   1 19:01 hadoop-2.7.1.tar.gz

[root@hadoop02 hadoop]# tar -xvf hadoop-2.7.1.tar.gz

 

 

鏈接第三臺虛擬機(同上)

 

 

[root@hadoop03 bin]# cd /usr/local/src

[root@hadoop03 src]# mkdir hadoop

[root@hadoop03 src]# ll

總用量 0

drwxr-xr-x. 2 root root  6 4月   7 2017 docker

drwxr-xr-x. 2 root root  6 3月   1 18:59 hadoop

drwxr-xr-x. 3 root root 56 3月   1 00:51 java

drwxr-xr-x. 3 root root 57 3月   1 01:51 zk

[root@hadoop03 src]#

建立hadoop文件夾,傳入tar包並解壓

 

[root@hadoop03 src]# cd hadoop/

[root@hadoop03 hadoop]# ll

總用量 262080

-rw-r--r--. 1 root root 169574400 3月   1 19:01 hadoop-2.7.1.tar.gz

[root@hadoop03 hadoop]# tar -xvf hadoop-2.7.1.tar.gz

 

1.5 其餘節點處理(複製到其餘節點)

[root@hadoop01 hadoop]# pwd

/usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

[root@hadoop01 hadoop]#

 

複製路徑:

[root@hadoop01 ~]# cd /usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

[root@hadoop01 hadoop]#

 [root@hadoop01 hadoop]# cd ../

scp -r hadoop/ root@hadoop02:/usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

 

[root@hadoop01etc]# scp -r hadoop/ root@hadoop02:/usr/local/src/hadoop/hadoop-2.7.1/etc/

mapred-env.cmd                                         100%  951     0.9KB/s   00:00   

container-executor.cfg                                 100%  318     0.3KB/s   00:00   

capacity-scheduler.xml                                 100% 4436     4.3KB/s   00:00   

ssl-server.xml.example                                 100% 2268     2.2KB/s   00:00   

yarn-site.xml                                          100%  901    

ies                             100% 2598     2.5KB/s   00:00   

httpfs-log4j.properties                                100% 1657     1.6KB/s   00:00   

hadoop-env.sh                                          100% 4256     4.2KB/s   00:00   

mapred-site.xml                                        100%  848     0.8KB/s   00:00   

slaves                                                 100%    9     0.0KB/s   00:00   

core-site.xml                                          100% 1149     1.1KB/s   00:00   

[root@hadoop01 etc]#

[root@hadoop01 etc]# scp -r hadoop/ root@hadoop03:/usr/local/src/hadoop/hadoop-2.7.1/etc/

mapred-env.cmd                                         100%  951     0.9KB/s   00:00   

container-executor.cfg                                 100%  318     0.3KB/s   00:00   

capacity-scheduler.xml                                 100% 4436     4.3KB/s  

mapred-site.xml                                        100%  848     0.8KB/s   00:00   

slaves                                                 100%    9     0.0KB/s   00:00   

core-site.xml                                          100% 1149     1.1KB/s   00:00   

[root@hadoop01 etc]#

 

source /etc/profile

[root@hadoop01 etc]# source /etc/profile

[root@hadoop01 etc]#  scp -r /etc/profile root@hadoop02:/etc/profile

profile                                                100% 2103     2.1KB/s   00:00   

[root@hadoop01 etc]# scp -r /etc/profile root@hadoop03:/etc/profile

profile                                                100% 2103     2.1KB/s   00:00   

 

 

檢查其餘兩臺,看文件是否複製成功

 

[root@hadoop02 ~]# cd /usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

 [root@hadoop02 hadoop]# vi mapred-site.xml

 

[root@hadoop03 ~]# cd /usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

[root@hadoop03 hadoop]# ll

總用量 156

-rw-r--r--. 1 10021 10021  4436 6月  29 2015 capacity-scheduler.xml

-rw-r--r--. 1 10021 10021  1335 6月  29 2015 configuration.xsl

-rw-r--r--. 1 10021 10021   318 6月  29 2015 container-executor.cfg

-rw-r--r--. 1 10021 10021   774 6月  29 2015 core-site.xml

drwxr-xr-x. 2 root  root   4096 3月   1 20:48 hadoop

-rw-r--r--. 1 10021 10021  3670 6月  29 2015 hadoop-env.cmd

-rw-r--r--. 1 10021 10021  4224 6月  29 2015 hadoop-env.sh

-rw-r--r--. 1 10021 10021  2598 6月  29 2015 hadoop-metrics2.properties

-rw-r--r--. 1 10021 10021  2490 6月  29 2015 hadoop-metrics.properties

[root@hadoop03 hadoop]# vi core-site.xml

 

 

[root@hadoop02 hadoop]# cat /etc/prifile

cat: /etc/prifile: 沒有那個文件或目錄

[root@hadoop02 hadoop]# cat /etc/profile

# /etc/profile

unset i

unset -f pathmunge

#set java environment

JAVA_HOME=/usr/local/src/java/jdk1.7.0_51

JAVA_BIN=/usr/local/src/java/jdk1.7.0_51/bin

PATH=$JAVA_HOME/bin:$PATH

CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export JAVA_HOME JAVA_BIN PATH CLASSPATH

#set hadoop env

HADOOP_HOME=/usr/local/src/hadoop/hadoop-2.7.1/

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

 

[root@hadoop02 hadoop]#

 

[root@hadoop03 hadoop]#  cat /etc/profile

#set hadoop env

HADOOP_HOME=/usr/local/src/hadoop/hadoop-2.7.1/

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

 

複製成功後,須讓文件生效

source /etc/profile

[root@hadoop02 hadoop]# source /etc/profile

[root@hadoop03 hadoop]# source /etc/profile

 

2啓動HadoopHDFS

2.1格式化文件系統

 

bin/hdfs namenode –format

 

看到上面的successfully格式化成功,文件在/tmp/hadoop-root/dfs/name下

bi/hdfs namenode –format

./hdfs namenode –format

 [root@hadoop01 bin]# ./hdfs namenode –format---------在當前目錄啓動文件就用./

 

 

[root@hadoop01 etc]# cd ../

[root@hadoop01 hadoop-2.7.1]# cd bin

[root@hadoop01 bin]# pwd

/usr/local/src/hadoop/hadoop-2.7.1/bin

[root@hadoop01 bin]# ll

總用量 448

-rwxr-xr-x. 1 10021 10021 160127 6月  29 2015 container-executor

-rwxr-xr-x. 1 10021 10021   6488 6月  29 2015 hadoop

-rwxr-xr-x. 1 10021 10021   8786 6月  29 2015 hadoop.cmd

-rwxr-xr-x. 1 10021 10021  12223 6月  29 2015 hdfs

-rwxr-xr-x. 1 10021 10021   7327 6月  29 2015 hdfs.cmd

-rwxr-xr-x. 1 10021 10021   5953 6月  29 2015 mapred

-rwxr-xr-x. 1 10021 10021   6310 6月  29 2015 mapred.cmd

-rwxr-xr-x. 1 10021 10021   1776 6月  29 2015 rcc

-rwxr-xr-x. 1 10021 10021 204075 6月  29 2015 test-container-executor

-rwxr-xr-x. 1 10021 10021  13308 6月  29 2015 yarn

-rwxr-xr-x. 1 10021 10021  11386 6月  29 2015 yarn.cmd

1.1.1    啓動HDFS服務

sbin/start-dfs.sh     啓動HDFS服務

[root@hadoop01 sbin]# ./start-dfs.sh

Starting namenodes on [hadoop01]

hadoop01: namenode running as process 4822. Stop it first.

hadoop01: datanode running as process 4944. Stop it first.

Starting secondary namenodes [0.0.0.0]

0.0.0.0: secondarynamenode running as process 5102. Stop it first.

[root@hadoop01 sbin]#

 

經過jps檢查進程,必須有NameNode,SecondaryNameNode,DataNode

沒有,不全,配置xml可能有錯。重裝!!!

 

sbin/start-dfs.sh        #中止服務stop-dfs.sh

./start-dfs.sh

 

也能夠執行sbin/start-all.sh啓動hadoop,其中就包括hdfs。它會多啓動兩個服務:nodeManager和ResourceManager。執行jps就應該顯示6個服務,就表明啓動成功。

 [root@hadoop01 bin]# cd ../

[root@hadoop01 hadoop-2.7.1]# cd sbin

[root@hadoop01 sbin]# ./start-dfs.sh

Starting namenodes on [hadoop01]

hadoop01: starting namenode, logging to /usr/local/src/hadoop/hadoop-2.7.1/logs/hadoop-root-namenode-hadoop01.out

hadoop01: starting datanode, logging to /usr/local/src/hadoop/hadoop-2.7.1/logs/hadoop-root-datanode-hadoop01.out

Starting secondary namenodes [0.0.0.0]

The authenticity of host '0.0.0.0 (0.0.0.0)' can't be established.

ECDSA key fingerprint is 2c:7a:dc:43:9d:f1:16:d2:19:9c:66:f5:c0:ff:10:06.

Are you sure you want to continue connecting (yes/no)? yes

Starting namenodes on [hadoop01]

[root@hadoop01 sbin]# ./start-dfs.sh

Starting namenodes on [hadoop01]

hadoop01: starting namenode, logging to /usr/local/src/hadoop/hadoop-2.7.1/logs/hadoop-root-namenode-hadoop01.out

hadoop01: datanode running as process 3121. Stop it first.

Starting secondary namenodes [0.0.0.0]

0.0.0.0: secondarynamenode running as process 3261. Stop it first.

[root@hadoop01 sbin]#kill -9 3121

[root@hadoop01 sbin]#kill -9 3261

 

 

 

檢查服務是否正常

 

 

 

也能夠經過瀏覽器直接訪問:http://192.168.163.155:50070/

經過jps檢查進程。必須有NameNode,SecondaryNameNode,DataNode

沒有。

1)  [root@hadoop01 sbin]# jps

2)  4944 DataNode

3)  2462 QuorumPeerMain

4)  5102 SecondaryNameNode

5)  5211 Jps

6)  4822 NameNode

7)  [root@hadoop01 sbin]#格式化namenode

 

 

測試:

[root@hadoop01 bin]# hdfs dfs -ls /

[root@hadoop01 bin]# hdfs dfs -mkdir /user

[root@hadoop01 bin]# hdfs dfs -ls /

Found 1 items

drwxr-xr-x   - root supergroup          0 2018-03-01 22:24 /user

[root@hadoop01 bin]# dir

container-executor  hdfs      mapred.cmd             yarn

hadoop               hdfs.cmd  rcc              yarn.cmd

hadoop.cmd        mapred    test-container-executor

[root@hadoop01 bin]#

 

複製hadoop01鏈接,而後複製路徑

Connecting to 192.168.163.155:22...

Connection established.

To escape to local shell, press 'Ctrl+Alt+]'.

 

Last login: Thu Mar  1 20:38:32 2018 from 192.168.163.1

[root@hadoop01 ~]# . go

[root@hadoop01 src]# cd hadoop/hadoop-2.7.1/etc/hadoop/

[root@hadoop01 hadoop]# pwd

/usr/local/src/hadoop/hadoop-2.7.1/etc/hadoop

[root@hadoop01 hadoop]#

[root@hadoop01 hadoop]# cd ~

[root@hadoop01 ~]# ll

總用量 16740

drwxr-xr-x. 2 root root        6 4月   7 2017 docker

-rw-r--r--. 1 root root    14540 3月  21 2017 epel-release-6-8.noarch.rpm

-rw-r--r--. 1 root root       18 5月  13 2017 go

-rw-r--r--. 1 root root 17120775 4月   6 2017 kong-0.10.1.el6.noarch.rpm

[root@hadoop01 ~]# ll

總用量 16744

-rw-r--r--. 1 root root     1150 3月   1 14:06 core-site.xml     -----------上傳core-site.xml

drwxr-xr-x. 2 root root        6 4月   7 2017 docker

-rw-r--r--. 1 root root    14540 3月  21 2017 epel-release-6-8.noarch.rpm

-rw-r--r--. 1 root root       18 5月  13 2017 go

-rw-r--r--. 1 root root 17120775 4月   6 2017 kong-0.10.1.el6.noarch.rpm

 

在原鏈接上傳

[root@hadoop01 bin]# ./hdfs dfs -put /root/core-site.xml /user

[root@hadoop01 bin]# hdfs dfs -ls /

Found 1 items

drwxr-xr-x   - root supergroup          0 2018-03-01 22:34 /user

[root@hadoop01 bin]# hdfs dfs -ls /user

Found 1 items

-rw-r--r--   1 root supergroup       1150 2018-03-01 22:34 /user/core-site.xml

[root@hadoop01 bin]#

 

小結:

1)  Hadoop 海量數據存儲(HDFS)、數據加工(MapReduce)、數據離線分析(MapReduce)

2)  分佈式集羣Yarn(千臺規模)

3)  Hadoop生態鏈()

 

全部工具都是基於HDFS,分佈式離線計算MapReduce,Hive數據倉庫(封裝了MapReduce,造成HQL,類SQL,操做比MR方便),Pig轉換數據處理,Mahout Hadoop機器學習,

Sqoop ETL數據清洗,HDFS和mysql(大數據數據結構和傳統關係型數據庫數據轉換)

Flume 日誌收集框架,ZoooKeeper集羣寫做,配置

 

搭建Hadoop環境

1)  zk集羣

2)  Hadoop配置(一堆xml配置文件,不能隨便些,都有規定)

3)  NameNode(管理hadoop集羣),SecondaryNameNode(NameNode節點備份),若是NameNode節點宕機,SecondaryNameNode頂上來。DataName(數據計算節點)

注意

1)  zk集羣,修改myid

2)  ./zkServer.sh start-foreground 在控制檯展示日誌,先把3個節點都啓動

3)  ./zkServer.sh stop

4)  Namenode format配置不正確,格式化失敗,致使namenode啓動不了

相關文章
相關標籤/搜索