mkdir module
vi /etc/profilehtml
JAVA_HOME=/usr/local/jdk1.8.0_151 HADOOP_HOME=/opt/module/hadoop-2.10.0 CLASSPATH=.:$JAVA_HOME/lib.tools.jar PATH=$JAVA_HOME/bin:$PATH:$HADOOP_HOME/bin export JAVA_HOME CLASSPATH PATH
配置完畢,刷新node
source /etc/profilelinux
這就安裝完畢了,簡單吧。。。web
echo 'hadoop mapreduce hivehbase spark stormsqoop hadoop hivespark' > data/wc.inputapache
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.10.0.jar wordcount ../data/wc.input outputcentos
進入hadoop目錄服務器
cd /opt/module/hadoop-2.10.0/etc/hadoopapp
export JAVA_HOME=/usr/local/jdk1.8.0_151
<configuration> <!-- 指定HDFS中namenode的路徑 --> <property> <name>fs.defaultFS</name> <value>hdfs://localhost:9000</value> </property> <!-- 指定HDFS運行時產生的文件的存儲目錄 --> <property> <name>hadoop.tmp.dir</name> <value>/opt/module/hadoop-2.10.0/data/tmp</value> </property> </configuration>
<property> <name>dfs.replication</name> <value>1</value> </property>
bin/hdfs namenode -formatwebapp
sbin/hadoop-daemon.sh start namenode分佈式
sbin/hadoop-daemon.sh start datanode
bin/hdfs dfs -mkdir -p /usr/mmc
bin/hdfs dfs -put /opt/module/data/wc.input /usr/mmc
bin/hdfs dfs -rm -r /usr/mmc
網頁上查看效果:
export JAVA_HOME=/usr/local/jdk1.8.0_151
<configuration> <!-- Site specific YARN configuration properties --> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.resourcemanager.hostname</name> <value>hadoop101</value> </property> </configuration>
hadoop101那裏要配置爲你虛擬機的hostname
export JAVA_HOME=/usr/local/jdk1.8.0_151
mv mapred-site.xml.template mapred-site.xml
<configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> </configuration>
sbin/yarn-daemon.sh start resourcemanager sbin/yarn-daemon.sh start nodemanager
hdfs dfs -mkdir -p /usr/mmc/input hdfs dfs -put ../data/wc.input /usr/mmc/input
注意:運行以前用jps查看下,這些都啓動沒有NameNode、NodeManager 、DataNode、ResourceManager
hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.10.0.jar wordcount /usr/mmc/input /usr/mmc/output
http://192.168.1.21:8088/cluster
此時能夠看到執行的進度了,可是那個History連接仍是點不動,須要啓動歷史服務器
<configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>eshop01:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>eshop01:19888</value> </property> </configuration>
sbin/mr-jobhistory-daemon.sh start historyserver
注意:開啓日誌彙集須要重啓Nodemanager,resourcemanager,historymanager
<!--開啓日誌彙集功能 --> <property> <name>yarn.log-aggregation-enable</name> <value>true</value> </property> <!-- 日誌保留時間 --> <property> <name>yarn.log-aggregation.retain-seconds</name> <value>604800</value> </property>
啓動Nodemanager,resourcemanager,historymanager
運行實例程序
hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.10.0.jar wordcount /usr/mmc/input /usr/mmc/output