//vi /etc/sysconfig/network-scripts/ifcfg-eth0 BOOTPROTO=static IPADDR=192.168.0.205 GATEWAY=192.168.0.1 DNS1=192.168.0.1
//vi /etc/sysconfig/network HOSTNAME=Master.Hadoop
//添加用戶 sudo useradd -m hadoop -s /bin/bash //修改密碼 sudo passwd hadoop //賦予root權限 vi /etc/sudoers hadoop ALL=(ALL) ALL
vi /etc/hosts 192.168.0.205 Master.Hadoop 192.168.0.206 Slave1.Hadoop 192.168.0.207 Slave2.Hadoop 192.168.0.208 Slave3.Hadoop
//建立jave目錄 mkdir -p /usr/java/ //複製rpm包到java目錄下 cp jdk-8u112-linux-x64.rpm /usr/java/jdk-8u112-linux-x64.rpm //切換至java目錄 cd /usr/java //安裝java rpm -ivh jdk-8u112-linux-x64.rpm //在profile文件最後追加入以下內容: vi /etc/profile export JAVA_HOME=/usr/java/jdk1.8.0_112 export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export PATH=$PATH:$JAVA_HOME/bin //從新加載環境變量(也能夠重啓計算機) source /etc/profile
//SHA-256 sha256sum hadoop-2.7.3.tar.gz //MD5 md5sum hadoop-2.7.3.tar.gz
4.安裝方式html
//建立目錄 在/home/hadoop目錄下建立數據存放的文件夾,tmp、hdfs、hdfs/data、hdfs/name //配置/home/hadoop/hadoop-2.7.0/etc/hadoop目錄下的core-site.xml <configuration> <property> <name>fs.defaultFS</name> <value>hdfs://192.168.0.205:9000</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/home/hadoop/tmp</value> </property> <property> <name>io.file.buffer.size</name> <value>131702</value> </property> </configuration> //配置/home/hadoop/hadoop-2.7.0/etc/hadoop目錄下的hdfs-site.xml <configuration> <property> <name>dfs.namenode.name.dir</name> <value>file:/home/hadoop/dfs/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/home/hadoop/dfs/data</value> </property> <property> <name>dfs.replication</name> <value>2</value> </property> <property> <name>dfs.namenode.secondary.http-address</name> <value>192.168.0.205:9001</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> </configuration> //配置/home/hadoop/hadoop-2.7.0/etc/hadoop目錄下的mapred-site.xml <configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>192.168.0.205:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>192.168.0.205:19888</value> </property> </configuration> //配置/home/hadoop/hadoop-2.7.0/etc/hadoop目錄下的yarn-site.xml <configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>192.168.0.205:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>192.168.0.205:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>192.168.0.205:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>192.168.0.205:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>192.168.0.205:8088</value> </property> <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>768</value> </property> </configuration> //配置/home/hadoop/hadoop-2.7.0/etc/hadoop目錄下hadoop-env.sh、yarn-env.sh的JAVA_HOME,不設置的話,啓動不了, export JAVA_HOME=/usr/java/jdk1.8.0_112 //在Master服務器啓動hadoop,從節點會自動啓動,進入/home/hadoop/hadoop-2.7.0目錄 (1)初始化,輸入命令,bin/hdfs namenode -format (2)所有啓動sbin/start-all.sh,也能夠分開sbin/start-dfs.sh、sbin/start-yarn.sh (3)中止的話,輸入命令,sbin/stop-all.sh (4)輸入命令,jps,能夠看到相關信息 //Web訪問,要先開放端口或者直接關閉防火牆 (1)輸入命令,systemctl stop firewalld.service (2)瀏覽器打開http://192.168.0.182:8088/ (3)瀏覽器打開http://192.168.0.182:50070/ //安裝完成。這只是大數據應用的開始,以後的工做就是,結合本身的狀況,編寫程序調用Hadoop的接口,發揮hdfs、mapreduce的做用。
//生成免登錄公鑰和祕鑰 ssh-keygen -t rsa -P '' //打開master ssh權限文件目錄 cd ~/.ssh/ //公鑰庫文件生成 cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys //slave1公鑰同步到master上 scp ~/.ssh/id_rsa.pub hadoop@Master.Hadoop:~/.ssh/id_rsa.pub.slave1 //追加slave1到master cat ~/.ssh/id_rsa.pub.slave1 >> ~/.ssh/authorized_keys //其它slave相似的處理方法 //authorized_keys完整的公鑰文件拷貝到slave上(master、slave一、slave2等) scp ~/.ssh/authorized_keys hadoop@Slave1.Hadoop:~/.ssh/authorized_keys