主機名 別名 | IP | 角色 |
9321a27a2b91 hadoop1 | 172.17.0.10 | NN1 ZK RM |
7c3a3c9cd595 hadoop2 | 172.17.0.9 | NN2 ZK RM JOBHIS |
f89eaf2a2548 hadoop3 | 172.17.0.8 | DN ZK ND |
28620eee1426 hadoop4 | 172.17.0.7 | DN QJM1 ND |
ae1f06bd04c8 hadoop5 | 172.17.0.6 | DN QJM2 ND |
11c433a003b6 hadoop6 | 172.17.0.5 | DN QJM3 ND |
用戶 | 組 | 做用 |
hdfs | hadoop | 管理HDFS |
yarn | hadoop | 管理yarn |
zookeeper | hadoop | 管理zookeeper |
hvie | hadoop | 管理hvie |
hbase | hadoop | 管理hbase |
groupadd hadoop
useradd -g hadoop hdfs
passwd hdfs <<EOF
hdfs
hdfs
EOF
useradd -g hadoop yarn
passwd yarn <<EOF
yarn
yarn
EOF
useradd -g hadoop zookeeper
passwd zookeeper <<EOF
zookeeper
zookeeper
EOF
useradd -g hadoop hive
passwd hive <<EOF
hive
hive
EOF
useradd -g hadoop hbase
passwd hbase <<EOF
hbase
hbase
EOF
echo user added!
echo "127.0.0.1 localhost localhost">/etc/hosts
echo "172.17.0.6 9321a27a2b91 hadoop1">>/etc/hosts
echo "172.17.0.7 7c3a3c9cd595 hadoop2">>/etc/hosts
echo "172.17.0.8 f89eaf2a2548 hadoop3">>/etc/hosts
echo "172.17.0.9 28620eee1426 hadoop4">>/etc/hosts
echo "172.17.0.10 ae1f06bd04c8 hadoop5">>/etc/hosts
echo "172.17.0.11 11c433a003b6 hadoop6">>/etc/hosts
su hdfs
ssh-copy-id -i ~/.ssh/id_rsa.pub 172.17.0.6
ssh-copy-id -i ~/.ssh/id_rsa.pub 172.17.0.7
ssh-copy-id -i ~/.ssh/id_rsa.pub 172.17.0.8
ssh-copy-id -i ~/.ssh/id_rsa.pub 172.17.0.9
ssh-copy-id -i ~/.ssh/id_rsa.pub 172.17.0.10
ssh-copy-id -i ~/.ssh/id_rsa.pub 172.17.0.11
[hdfs@9321a27a2b91 root]$ ulimit -a
core file size (blocks,-c) unlimited
data seg size (kbytes,-d) unlimited
scheduling priority (-e)0
file size (blocks,-f) unlimited
pending signals (-i)95612
max locked memory (kbytes,-l)64
max memory size (kbytes,-m) unlimited
open files (-n)65536
pipe size (512 bytes,-p)8
POSIX message queues (bytes,-q)819200
real-time priority (-r)0
stack size (kbytes,-s)8192
cpu time (seconds,-t) unlimited
max user processes (-u)1024
virtual memory (kbytes,-v) unlimited
file locks (-x) unlimited
hdfs hard nfile 65536
hdfs soft nfile 65536
yarn hard nfile 65536
yarn soft nfile 65536
......
service iptables stop
setenforce 0
[root@9321a27a2b91 ~]#mkdir /usr/local/java
[root@9321a27a2b91 ~]#cp jdk-8u121-linux-x64.tar.gz /usr/local/java/
[root@9321a27a2b91 ~]#chown -R hdfs:hadoop /usr/local/java/
[root@9321a27a2b91 ~]#su hdfs
[hdfs@9321a27a2b91 java]$ tar -zxvf jdk-8u121-linux-x64.tar.gz
mkdir /usr/local/java
chown hdfs:hadoop /usr/local/java
su hdfs
scp -r hdfs@hadoop1:/usr/local/java/jdk1.8.0_121 /usr/local/java
[root@9321a27a2b91 ~]# mkdir /opt/hadoop
[root@9321a27a2b91 ~]# chown hdfs:hadoop hadoop-2.7.3.tar.gz
[root@9321a27a2b91 ~]# chown hdfs:hadoop /opt/hadoop
[root@9321a27a2b91 ~]# cp hadoop-2.7.3.tar.gz /opt/hadoop
[root@9321a27a2b91 ~]# su hdfs
[hdfs@9321a27a2b91 root]$ cd /opt/hadoop/
[hdfs@9321a27a2b91 hadoop]$ tar -zxvf hadoop-2.7.3.tar.gz
yum -y install ntp
#本子網內主機均可以同步
restrict 172.17.0.0 mask 255.255.0.0 nomodify
#優先時間服務器
server 172.17.0.10 prefer
#日誌文件位置
logfile /var/log/ntp.log
[root@9321a27a2b91 hadoop]# service ntpd start
Starting ntpd:[ OK ]
[root@9321a27a2b91 hadoop]# service ntpd status
ntpd dead but pid file exists
3Apr11:20:08 ntpd[732]: ntp_io: estimated max descriptors:65536, initial socket boundary:16
3Apr11:20:08 ntpd[732]:Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
3Apr11:20:08 ntpd[732]:Listen and drop on 1 v6wildcard :: UDP 123
3Apr11:20:08 ntpd[732]:Listen normally on 2 lo 127.0.0.1 UDP 123
3Apr11:20:08 ntpd[732]:Listen normally on 3 eth0 172.17.0.10 UDP 123
3Apr11:20:08 ntpd[732]:Listen normally on 4 lo ::1 UDP 123
3Apr11:20:08 ntpd[732]:Listen normally on 5 eth0 fe80::42:acff:fe11:a UDP 123
3Apr11:20:08 ntpd[732]:Listening on routing socket on fd #22 for interface updates
3Apr11:20:08 ntpd[732]:0.0.0.0 c016 06 restart
3Apr11:20:08 ntpd[732]: ntp_adjtime() failed:Operation not permitted
3Apr11:20:08 ntpd[732]:0.0.0.0 c012 02 freq_set kernel 0.000 PPM
3Apr11:20:08 ntpd[732]:0.0.0.0 c011 01 freq_not_set
3Apr11:20:08 ntpd[732]: cap_set_proc() failed to drop root privileges:Operation not permitted
OPTIONS="-u ntp:ntp -p /var/run/ntpd.pid -g"
echo "# Drop root to id 'ntp:ntp' by default.">/etc/sysconfig/ntpd
echo "#OPTIONS="-u ntp:ntp -p /var/run/ntpd.pid -g" ">>/etc/sysconfig/ntpd
[root@9321a27a2b91 hadoop]# service ntpd start
Starting ntpd:[ OK ]
[root@9321a27a2b91 hadoop]# service ntpd status
ntpd (pid 796) is running..
server 172.17.0.10 prefer
su hdfs
JAVA_HOME=/usr/local/java/jdk1.8.0_121
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib:$CLASS
HADOOP_HOME=/opt/hadoop/hadoop-2.7.3
HADOOP_PREFIX=/opt/hadoop/hadoop-2.7.3
HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
HADOOP_YARN_HOME=$HADOOP_HOME
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JAVA_HOME/jre/lib/amd64/server
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export PATH
su hdfs
scp -r hdfs@hadoop1:/home/hdfs/.bash_profile ~
export JAVA_HOME=
/usr/local/java/jdk1.8.0_121#hadoop進程的最大heapsize包括namenode/datanode/ secondarynamenode等,默認1000M
#export HADOOP_HEAPSIZE=
#namenode的初始heapsize,默認取上面的值,按須要分配
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
#JVM啓動參數,默認爲空
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
#還能夠單獨配置各個組件的內存:
export HADOOP_NAMENODE_OPTS=
export HADOOP_DATANODE_OPTS
export HADOOP_SECONDARYNAMENODE_OPTS
#設置hadoop日誌,默認是$HADOOP_HOME/log
export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
export JAVA_HOME=
/usr/local/java/jdk1.8.0_121
JAVA_HEAP_MAX=-Xmx1000m
# YARN_HEAPSIZE=1000 #yarn 守護進程heapsize
#export YARN_RESOURCEMANAGER_HEAPSIZE=1000 #單獨設置RESOURCEMANAGER的HEAPSIZE
#export YARN_TIMELINESERVER_HEAPSIZE=1000 #單獨設置TIMELINESERVER(jobhistoryServer)的HEAPSIZE
#export YARN_RESOURCEMANAGER_OPTS= #單獨設置RESOURCEMANAGER的JVM選項
#export YARN_NODEMANAGER_HEAPSIZE=1000 #單獨設置NODEMANAGER的HEAPSIZE
#export YARN_NODEMANAGER_OPTS= #單獨設置NODEMANAGER的JVM選項
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop1:9000</value>
<description>HDFS 端口</description></property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/hadoop-2.7.3/tmp</value>
<description>默認值/tmp/hadoop-${user.name},修改爲持久化的目錄</description>
</property>
</configuration>
mkdir ${HADOOP_HOME}/tmp
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/opt/hadoop/hadoop-2.7.3/namenodedir</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
<description>數據塊大小,128M</description>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/opt/hadoop/hadoop-2.7.3/datadir</value>
<description>datanode 數據目錄</description></property>
</configuration>
mkdir ${HADOOP_HOME}/datadir
mkdir
${HADOOP_HOME}/namenodedirParameter | Value | Notes |
---|---|---|
mapreduce.framework.name | yarn | Execution framework set to Hadoop YARN. MR任務執行框架 |
mapreduce.map.memory.mb | 1536 | Larger resource limit for maps. map內存上限 |
mapreduce.map.java.opts | -Xmx1024M | Larger heap-size for child jvms of maps. map的子進程虛擬機heapsize |
mapreduce.reduce.memory.mb | 3072 | Larger resource limit for reduces. redouce任務內存上限 |
mapreduce.reduce.java.opts | -Xmx2560M | Larger heap-size for child jvms of reduces. redouce的子進程虛擬機heapsize |
mapreduce.task.io.sort.mb | 512 | Higher memory-limit while sorting data for efficiency. 排序內存 |
mapreduce.task.io.sort.factor | 100 | More streams merged at once while sorting files. 排序因子 |
mapreduce.reduce.shuffle.parallelcopies | 50 | Higher number of parallel copies run by reduces to fetch outputs from very large number of maps. 並行數 |
Parameter | Value | Notes |
---|---|---|
mapreduce.jobhistory.address | MapReduce JobHistory Server host:port | Default port is 10020. jobhistory地址:主機+端口 |
mapreduce.jobhistory.webapp.address | MapReduce JobHistory Server Web UI host:port | Default port is 19888. jobhistory web端口 |
mapreduce.jobhistory.intermediate-done-dir | /mr-history/tmp | Directory where history files are written by MapReduce jobs. |
mapreduce.jobhistory.done-dir | /mr-history/done | Directory where history files are managed by the MR JobHistory Server. |
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
<description>使用yarn來管理mr</description>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop2</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop2</value>
</property>
<property>
<name>mapreduce.jobhistory.intermediate-done-dir</name>
<value>/opt/hadoop/hadoop-2.7.3/mrHtmp</value>
</property>
<property>
<name>mapreduce.jobhistory.done-dir</name>
<value>/opt/hadoop/hadoop-2.7.3/mrhHdone</value>
</property>
</configuration>
Parameter | Value | Notes |
---|---|---|
yarn.resourcemanager.address | ResourceManager host:port for clients to submit jobs. | host:port If set, overrides the hostname set in yarn.resourcemanager.hostname. resourcemanager的地址,格式 主機:端口 |
yarn.resourcemanager.scheduler.address | ResourceManager host:port for ApplicationMasters to talk to Scheduler to obtain resources. | host:port If set, overrides the hostname set in yarn.resourcemanager.hostname. 調度器地址 ,覆蓋yarn.resourcemanager.hostname |
yarn.resourcemanager.resource-tracker.address | ResourceManager host:port for NodeManagers. | host:port If set, overrides the hostname set in yarn.resourcemanager.hostname. datanode像rm報告的端口, 覆蓋 yarn.resourcemanager.hostname |
yarn.resourcemanager.admin.address | ResourceManager host:port for administrative commands. | host:port If set, overrides the hostname set in yarn.resourcemanager.hostname. RM管理地址,覆蓋 yarn.resourcemanager.hostname |
yarn.resourcemanager.webapp.address | ResourceManager web-ui host:port. | host:port If set, overrides the hostname set in yarn.resourcemanager.hostname. RM web地址,有默認值 |
yarn.resourcemanager.hostname | ResourceManager host. | host Single hostname that can be set in place of setting allyarn.resourcemanager*address resources. Results in default ports for ResourceManager components. RM的主機,使用默認端口 |
yarn.resourcemanager.scheduler.class | ResourceManager Scheduler class. | CapacityScheduler (recommended), FairScheduler (also recommended), or FifoScheduler |
yarn.scheduler.minimum-allocation-mb | Minimum limit of memory to allocate to each container request at the Resource Manager. | In MBs 最小容器內存(每一個container最小內存) |
yarn.scheduler.maximum-allocation-mb | Maximum limit of memory to allocate to each container request at the Resource Manager. | In MBs 最大容器內存(每一個container最大內存) |
yarn.resourcemanager.nodes.include-path /yarn.resourcemanager.nodes.exclude-path | List of permitted/excluded NodeManagers. | If necessary, use these files to control the list of allowable NodeManagers. 哪些datanode能夠被RM管理 |
yarn.nodemanager.resource.memory-mb | Resource i.e. available physical memory, in MB, for given NodeManager | Defines total available resources on the NodeManager to be made available to running containers Yarn在NodeManager最大內存 |
yarn.nodemanager.vmem-pmem-ratio | Maximum ratio by which virtual memory usage of tasks may exceed physical memory | The virtual memory usage of each task may exceed its physical memory limit by this ratio. The total amount of virtual memory used by tasks on the NodeManager may exceed its physical memory usage by this ratio. 任務使用的虛擬內存超過被容許的推理內存的比率,超過則kill掉 |
yarn.nodemanager.local-dirs | Comma-separated list of paths on the local filesystem where intermediate data is written. | Multiple paths help spread disk i/o. datamanager的本地目錄 |
yarn.nodemanager.log-dirs | Comma-separated list of paths on the local filesystem where logs are written. | Multiple paths help spread disk i/o. datamanager日誌目錄 |
yarn.nodemanager.log.retain-seconds | 10800 | Default time (in seconds) to retain log files on the NodeManager Only applicable if log-aggregation is disabled. |
yarn.nodemanager.remote-app-log-dir | /logs | HDFS directory where the application logs are moved on application completion. Need to set appropriate permissions. Only applicable if log-aggregation is enabled. |
yarn.nodemanager.remote-app-log-dir-suffix | logs | Suffix appended to the remote log dir. Logs will be aggregated to ${yarn.nodemanager.remote-app-log-dir}/${user}/${thisParam} Only applicable if log-aggregation is enabled. |
yarn.nodemanager.aux-services | mapreduce_shuffle | Shuffle service that needs to be set for Map Reduce applications. shuffle服務類型 |
yarn.acl.enable | true /false | Enable ACLs? Defaults to false. 是否開啓ACL |
yarn.admin.acl | Admin ACL | ACL to set admins on the cluster. ACLs are of for comma-separated-usersspacecomma-separated-groups. Defaults to special value of * which meansanyone. Special value of just space means no one has access. ACL用戶,用,分隔 如root,yarn |
yarn.log-aggregation-enable | false | Configuration to enable or disable log aggregation 啓用日誌彙集.日誌聚焦到一個節點 |
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop1</value>
<description>設置resourcemanager節點</description></property>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
<description>設置nodemanager的aux服務</description></property>
</configuration>
vi $HADOOP_HOME/et/hadoop/slaves
hadoop3
hadoop4
hadoop5
hadoop6
mkdir /opt/hadoop
chown hdfs:hadoop /opt/hadoop
su hdfs
scp -r hdfs@hadoop1:/opt/hadoop/hadoop-2.7.3 /opt/hadoop
$HADOOP_HOME/bin/hdfs namenode -format
[hdfs@9321a27a2b91 hadoop-2.7.3]$ start-dfs.sh
Starting namenodes on [hadoop1]
hadoop1: starting namenode, logging to /opt/hadoop/hadoop-2.7.3/logs/hadoop-hdfs-namenode-9321a27a2b91.out
hadoop3: starting datanode, logging to /opt/hadoop/hadoop-2.7.3/logs/hadoop-hdfs-datanode-f89eaf2a2548.out
hadoop4: starting datanode, logging to /opt/hadoop/hadoop-2.7.3/logs/hadoop-hdfs-datanode-28620eee1426.out
hadoop5: starting datanode, logging to /opt/hadoop/hadoop-2.7.3/logs/hadoop-hdfs-datanode-ae1f06bd04c8.out
hadoop6: starting datanode, logging to /opt/hadoop/hadoop-2.7.3/logs/hadoop-hdfs-datanode-11c433a003b6.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /opt/hadoop/hadoop-2.7.3/logs/hadoop-hdfs-secondarynamenode-9321a27a2b91.out
[hdfs@9321a27a2b91 hadoop]$ jps
11105 Jps
10981 SecondaryNameNode
10777 NameNode
[hdfs@9321a27a2b91 hadoop-2.7.3]$ hdfs dfs -put NOTICE.txt /
[hdfs@9321a27a2b91 hadoop-2.7.3]$ hdfs dfs -ls /
Found 1 items
-rw-r--r-- 3 hdfs supergroup 14978 2017-04-03 19:15 /NOTICE.txt
[root@9321a27a2b91 hdfs]# curl hadoop1:50070
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
................
[hdfs@9321a27a2b91 hadoop]$ start-yarn.sh
starting yarn daemons
starting resourcemanager, logging to /opt/hadoop/hadoop-2.7.3/logs/yarn-hdfs-resourcemanager-9321a27a2b91.out
hadoop5: starting nodemanager, logging to /opt/hadoop/hadoop-2.7.3/logs/yarn-hdfs-nodemanager-ae1f06bd04c8.out
hadoop6: starting nodemanager, logging to /opt/hadoop/hadoop-2.7.3/logs/yarn-hdfs-nodemanager-11c433a003b6.out
hadoop3: starting nodemanager, logging to /opt/hadoop/hadoop-2.7.3/logs/yarn-hdfs-nodemanager-f89eaf2a2548.out
hadoop4: starting nodemanager, logging to /opt/hadoop/hadoop-2.7.3/logs/yarn-hdfs-nodemanager-28620eee1426.out
[hdfs@9321a27a2b91 hadoop]$ jps
11105 Jps
10981 SecondaryNameNode
10777 NameNode
10383 ResourceManager
[hdfs@9321a27a2b91 hadoop-2.7.3]$ bin/hdfs dfs -mkdir /user
[hdfs@9321a27a2b91 hadoop-2.7.3]$ bin/hdfs dfs -mkdir /user/hdfs
[hdfs@9321a27a2b91 hadoop-2.7.3]$ bin/hdfs dfs -put etc/hadoop input
...............
17/04/12 12:38:24 INFO mapreduce.JobSubmitter: number of splits:30
17/04/12 12:38:24 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1491968887469_0003
17/04/12 12:38:24 INFO mapreduce.JobSubmitter: Cleaning up the staging area /tmp/hadoop-yarn/staging/hdfs/.staging/job_1491968887469_0003
java.lang.IllegalArgumentException: Does not contain a valid host:port authority: hadoop2
at org.apache.hadoop.net.NetUtils.createSocketAddr(NetUtils.java:213)
........................
[hdfs@9321a27a2b91 hadoop]$ start-dfs.sh
Starting namenodes on [9321a27a2b91]
The authenticity of host '9321a27a2b91 (172.17.0.10)' can't be established.
RSA key fingerprint is 60:0c:61:73:2c:49:ef:e3:f7:61:c9:27:93:5a:1d:c7.
Are you sure you want to continue connecting (yes/no)?
[hdfs@11c433a003b6 hadoop-2.7.3]$ $HADOOP_HOME/sbin/hadoop-daemons.sh start datanode
The authenticity of host '28620eee1426 (172.17.0.7)' can't be established.
RSA key fingerprint is 60:0c:61:73:2c:49:ef:e3:f7:61:c9:27:93:5a:1d:c7.
Are you sure you want to continue connecting (yes/no)? The authenticity of host '11c433a003b6 (172.17.0.5)' can't be established.
RSA key fingerprint is 60:0c:61:73:2c:49:ef:e3:f7:61:c9:27:93:5a:1d:c7.
Are you sure you want to continue connecting (yes/no)? The authenticity of host 'ae1f06bd04c8 (172.17.0.6)' can't be established.
RSA key fingerprint is 60:0c:61:73:2c:49:ef:e3:f7:61:c9:27:93:5a:1d:c7.
Are you sure you want to continue connecting (yes/no)? f89eaf2a2548: datanode running as process 5764. Stop it first.
[hdfs@11c433a003b6 hadoop-2.7.3]$ $HADOOP_HOME/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
cat: /opt/hadoop/hadoop-2.7.3/etc/hadoop/slaves: No such file or directory
usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."
# if no args specified, show usage
if[ $# -le 1 ]; then
echo $usage
exit 1
fi
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
exec "$bin/slaves.sh"--config $HADOOP_CONF_DIR cd "$HADOOP_PREFIX" \; "$bin/hadoop-daemon.sh"--config $HADOOP_CONF_DIR "$@"
[hdfs@9321a27a2b91 hadoop]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
starting namenode, logging to /opt/hadoop/hadoop-2.7.3/logs/hadoop-hdfs-namenode-9321a27a2b91.out
[hdfs@f89eaf2a2548 hadoop-2.7.3]$ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
starting resourcemanager, logging to /opt/hadoop/hadoop-2.7.3/logs/yarn-hdfs-resourcemanager-f89eaf2a2548.out
[hdfs@9321a27a2b91 hadoop-2.7.3]$ bin/hdfs dfs -put etc/hadoop input
...............
17/04/1212:38:24 INFO mapreduce.JobSubmitter: number of splits:30
17/04/1212:38:24 INFO mapreduce.JobSubmitter:Submitting tokens for job: job_1491968887469_0003
17/04/1212:38:24 INFO mapreduce.JobSubmitter:Cleaning up the staging area /tmp/hadoop-yarn/staging/hdfs/.staging/job_1491968887469_0003
java.lang.IllegalArgumentException:Does not contain a valid host:port authority: hadoop2
at org.apache.hadoop.net.NetUtils.createSocketAddr(NetUtils.java:213)
........................
<property>
<name>mapreduce.jobhistory.address</name>
<value>7c3a3c9cd595</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>7c3a3c9cd595</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>7c3a3c9cd595:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>7c3a3c9cd595:19888</value>
</property>
2017-04-0319:13:12,328 WARN org.apache.hadoop.hdfs.server.datanode.DataNode:IOExceptionin offerService
java.io.EOFException:End of FileException between local host is:"ae1f06bd04c8/172.17.0.6"; destination host is:"hadoop1":9000;: java.io.EOFException;For more details see: http://wiki.a
pache.org/hadoop/EOFException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(NativeMethod)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)