hadoop集羣單點配置



===================    ===============================
----------------hadoop集羣搭建   -----------------------
======================       ===========================
192.168.75.7          255.255.255.0             192.168.75.2
00:50:56:27:0C:F1
----------------------虛擬機基礎配置 -------------------
1.編輯硬件設備,設共享目錄
2-添加hosts頭

--------------------hosts頭-----------------------
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
----------------------------------------------------

------------------------------System eth0 網絡配置 ------root---------
----------------------- VM tool 安裝 -----------------root-----------
1-虛擬機/安裝VMware tool、雙擊打開VMwareTool tar包/解壓到文件系統tmp
./tmp/vmware-tools-distrib/vmware-install.pl
reboot

1.網絡橋接 設置物理地址
vi /etc/sysconfig/network
2.面板刪鏈接配置,只剩System eth0
rm -rf /etc/udev/rules.d/70-persistent-net.rules 
cp /mnt/hgfs/setup/hosts /etc/hosts
reboot
vi /etc/udev/rules.d/70-persistent-net.rules
vi /etc/sysconfig/network-scripts/ifcfg-eth0    物理地址大寫


-------------------------ifcfg-eth0文件---------------------
DEVICE="eth0"
BOOTPROTO=none
IPV6INIT="yes"
NM_CONTROLLED="yes"
ONBOOT="yes"
TYPE="Ethernet"
IPADDR=192.168.1.120
PREFIX=24
GATEWAY=192.168.1.1
DNS1=192.168.1.1
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
NAME="System eth0"
HWADDR=00:50:56:2A:C2:8D
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes





--------------------------------------------------------
service iptables stop
chkconfig iptables off
service network restart


-------------------- jdk 安裝 -----------------root-------
 cp /mnt/hgfs/setup/jdk-8u211-linux-x64.rpm /opt/
rpm -ivh /mnt/hgfs/setup/jdk-8u211-linux-x64.rpm 
which java
ll /usr/java/jdk1.8.0_161/bin/java        Java路徑:/usr/java/jdk1.8.0_161
vi /etc/profile

----------------------profile 文件---------------
export JAVA_HOME=/usr/java/jdk1.8.0_161
export JRE_HOME=$JAVA/jre
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
-----------------------------------------------
***********************卸載JDK***********************************
rpm -qa|grep jdk
---看到:jdk-1.6.0_22-fcs
rpm -e --nodeps jdk-1.6.0_22-fcs   
***************************************************************
source /etc/profile
java -version


===============  Hadoop 安裝 ------先root後分配權限給Hadoop   =========
cp /mnt/hgfs/setup/hadoop-2.7.6.tar.gz /opt/
tar -zxvf /opt/hadoop-2.7.6.tar.gz -C /opt/
vi /etc/profile

----------------------profile 文件---------------
export HADOOP_DEV_HOME=/opt/hadoop-2.7.6
export PATH=$PATH:$HADOOP_DEV_HOME/bin
export PATH=$PATH:$HADOOP_DEV_HOME/sbin
export HADOOP_MAPARED_HOME=${HADOOP_DEV_HOME}
export HADOOP_COMMON_HOME=${HADOOP_DEV_HOME}
export HADOOP_HDFS_HOME=${HADOOP_DEV_HOME}
export YARN_HOME=${HADOOP_DEV_HOME}
export HADOOP_CONF_DIR=${HADOOP_DEV_HOME}/etc/hadoop
-----------------------------------------------
source /etc/profile


vi /opt/hadoop-2.7.6/etc/hadoop/hadoop-env.sh 
----------------------hadoop-env.sh  文件---------------
export JAVA_HOME=/usr/java/jdk1.8.0_161
------------------------------------------------------------







vi /opt/hadoop-2.7.6/etc/hadoop/core-site.xml 
-------- core-site.xml  文件-------要在/opt/hadoop-2.7.6/新建目錄   /data/tmp---
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://cMater:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/root/bigdata/hadoop-2.6.5/data/tmp</value>
</property>
</configuration>     
-----------------------------------------------------------------------------


vi /opt/hadoop-2.7.6/etc/hadoop/hdfs-site.xml 
------ hdfs-site.xml  文件----要在/opt/hadoop-2.7.6/新建目錄   /data/cMater  /data/namenode  /data/checkpoint -------
<configuration>
<property>
<name>dfs.datanode.data.dir</name>
<value>/opt/hadoop-2.7.6/data/cMater</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/opt/hadoop-2.7.6/data/namenode</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>/opt/hadoop-2.7.6/data/checkpoint</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
</configuration>
------------------------------------------------------------
 mkdir -p /opt/hadoop-2.7.6/data/tmp

 mkdir -p /opt/hadoop-2.7.6/data/cMater
 mkdir -p /opt/hadoop-2.7.6/data/namenode
 mkdir -p /opt/hadoop-2.7.6/data/checkpoint







vi /opt/hadoop-2.7.6/etc/hadoop/yarn-site.xml 
---------------------- yarn-site.xml  文件---------------
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>cMater</value>
</property>
</configuration>
------------------------------------------------------------






cp /opt/hadoop-2.7.6/etc/hadoop/mapred-site.xml.template /opt/hadoop-2.7.6/etc/hadoop/mapred-site.xml
vi /opt/hadoop-2.7.6/etc/hadoop/mapred-site.xml
---------------------- mapred-site.xml 文件---------------
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobtracker.staging.root.dir</name>
<value>/user</value>
</property>
</configuration>
------------------------------------------------------------

========================用戶  權限======================
useradd hadoop
passwd hadoop
chmod u+w /etc/sudoers
vi /etc/sudoers

------------sudoers 文件配置---root    ALL=(ALL)       ALL下添加一行----
hadoop  ALL=(ALL)       ALL
----------------------------------------------------------------------
chmod u-w /etc/sudoers
chown -R hadoop:hadoop /opt/hadoop-2.7.6







============啓動 Hadoop ========切換Hadoop用戶=============
su hadoop
******格式化 HDFS****************
hdfs namenode -format

*******單點重複格式化處理********
rm -rf 進行清空 
/data/tmp
/data/cMater  
/data/namenode  
/data/checkpoint 
內的緩存

***********單點啓動失敗*********
檢查core-site.xml、hdfs-site.xml 、yarn-site.xml、mapred-site.xml文件格式

source /etc/hosts    失敗
--------------------單節點啓動--------------------------------namenode----
//hadoop-daemon.sh start secondarynamenode
hadoop-daemon.sh start namenode
hadoop-daemon.sh start datanode
yarn-daemon.sh start resourcemanager
yarn-daemon.sh start nodemanager
mr-jobhistory-daemon.sh start historyserver
yarn-daemon.sh start proxyserver



//hadoop-daemon.sh stop secondarynamenode
hadoop-daemon.sh stop namenode
hadoop-daemon.sh stop datanode
yarn-daemon.sh stop resourcemanager
yarn-daemon.sh stop nodemanager
mr-jobhistory-daemon.sh stop historyserver
yarn-daemon.sh stop proxyserver

----------------
./opt/hadoop-2.7.6/sbin/hadoop-daemon.sh start namenode


-----------datanode----Hadoop找不到類時檢查nodemanager是否拼寫正確--------
hadoop-daemon.sh start datanode
yarn-daemon.sh start nodemanager

hadoop-daemon.sh stop datanode
yarn-daemon.sh stop nodemanager

-------------------端口解析-----------------
HDFS         50070         http服務的端口
yarn           8088           http服務的端口
proxyserver   WebAppProxyServer   
history         JobHistoryServer   
===========================================






--------------------HDFS 權限問題---------------------
useradd 20160216048       # root操做
passwd 20160216048          #root操做
sudo usermod -a -G hadoop 20160216048  # root操做     20160216048添加到Hadoop組
hadoop fs -put /opt/hadoop-2.7.6.tar.gz /root/   #hadoop操做,root沒法上傳
hadoop fs -chown -R 20160216048:hadoop /root/  #hadoop操做
hadoop dfs -chmod -R 755 /abc   #hadoop操做
hadoop dfsadmin -safemode leave  #解除hadoop的安全模式
hadoop dfs -chmod -R 777 /abc  #hadoop操做,修改權限,讓組外用戶進行操做
__________________________權限比重_______________________
drwxr-xr-x       -rw-r--r--
 d:文件夾         r:4          w:2         x:1


注意:hadoop fs -rm -r /tmp/ 與hadoop fs -rm -r /tmp沒有區別!
==========================    ==========================
-------------------  HDFS 使用     ----------------------
=====================       ==========================

------------------殺掉當前正在執行的hadoop任務---------------
列出當前hadoop正在執行的jobs:    hadoop job -list
殺掉job: job_201212111628_11166:                    
[hadoop@
192.168.10.11 bin]$ ./hadoop job -kill job_201212111628_11166 -----------------------Linux自定義命令---------------------- vi /root/.bashrc alias 你自定的命令=系統原來的命令 如:alias hf='hadoop fs' -----------------------------HDFS文件回傳---------------------- hadoop fs -get /user/root/ds_out /mnt/hgfs/setup/data/ *****************************Linux補充 免密碼登陸*************** 方法一:主從主機間要相互作一次 ssh-keygen -t rsa ssh-copy-id -i ~/.ssh/id_rsa.pub cSlave01 方法二: 所有會話 : ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa ssh-copy-id node11/node12/node13 ------------------------------禁用IPV6----------------------- ## 禁用IPv6 echo " " >> /etc/modprobe.d/dist.conf echo "alias net-pf-10 off" >> /etc/modprobe.d/dist.conf echo "alias ipv6 off" >> /etc/modprobe.d/dist.conf ## 查看是否追加成功 tail /etc/modprobe.d/dist.conf ---------------------設置文件打開數目和用戶最大進程數--------------- ## 設置用戶最大進程數 vim /etc/security/limits.conf ## 結尾添加如下內容 * soft nofile 32768 * hard nofile 1048576 * soft nproc 65536 * hard nproc unlimited * soft memlock unlimited * hard memlock unlimited ---------------------------時區設置------------------------- 統一時區爲東八區 咱們國家的時區爲東八區(+0800): #查看當前時區 date -R;cat /etc/sysconfig/clock 不是北京時間,要設置一下時區,方法以下,執行命令: # 設置東八區時區爲當前時區 rm -rf /etc/localtime cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime # 手動同步下網絡時間 ntpdate -u cn.pool.ntp.org vim /etc/sysconfig/clock ZONE="Asia/Shanghai" 在次查看 date -R;cat /etc/sysconfig/clock ----------------------------Linux操做系統系統語言採用------------------ -------------轉英文--------------- # 查看操做系統系統語言 echo $LANG # 修改操做系統系統語言 vim /etc/sysconfig/i18n LANG="en_US.UTF-8" ----------------轉中文------------ # 查看操做系統系統語言 echo $LANG # 修改操做系統系統語言 vim /etc/sysconfig/i18n LANG="zh_CN.UTF-8"
相關文章
相關標籤/搜索