清晰安裝Hadoop/Hbase/Thrift

// 如下是本人在 CentOS 6.4 系統版本下的實踐對 Hadoop/Hbase/Thrift安裝 記錄和總結
 

1,準備安裝包:
    http://ohse.de/uwe/software/lrzsz.htmlhtml


2, 安裝與配置Hadoop
     http://book.51cto.com/art/201110/298590.htm
     1> 安裝jdk:
        #tar -xvf targz/jdk-7u25-linux-x64.tar.gz
        #mkdir /usr/local/jvm/java-7-sun -p
        #mv jdk1.7.0_25/* /usr/local/jvm/java-7-sun/
        #vi ~/.bashrc
            export JAVA_HOME=/usr/local/jvm/java-7-sun
            export JRE_HOME=${JAVA_HOME}/jre
            export CLASSPATH=${JAVA_HOME}/lib:${JRE_HOME}/lib:${CLASSPATH}
            export PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin:${PYTHONHOME}/bin:${THRIFT_HOME}/bin:${PATH}
        #. ~/.bashrc
        #java -version
            java version "1.7.0_25"
            Java(TM) SE Runtime Environment (build 1.7.0_25-b15)
            Java HotSpot(TM) 64-Bit Server VM (build 23.25-b01, mixed mode)
        // 說明安裝成功

     2> 配置SSH免密碼登陸
        # //修改hosts文件
        #vi /etc/hosts
            192.168.0.20 master
            192.168.0.21 slave1
            192.168.0.22 slave1

        #// 找到如下內容,並去掉註釋符」#「
        #vi /etc/ssh/sshd_config
            RSAAuthentication yes
            PubkeyAuthentication yes
            AuthorizedKeysFile      .ssh/authorized_keys
        #// 若ssh端口爲非22,則修改腳本內容
        #vi /usr/bin/ssh-copy-id
            { eval "$GET_ID" ; } | ssh -p myport $1 "umask 077; test -d ~/.ssh || mkdir
        #// 接下來就生成免密碼登陸態文件
            ssh-keygen -t rsa -P ''
            ssh-copy-id -i ~/.ssh/id_rsa.pub root@master
            ssh-copy-id -i ~/.ssh/id_rsa.pub root@slave2
            ssh-copy-id -i ~/.ssh/id_rsa.pub root@slave1

     3> 安裝hadoop
        #//解壓hadoop-1.0.4.tar.gz
        #tar -xvf hadoop-1.0.4.tar.gz
        #mv hadoop-1.0.4/ /usr/local/hadoop/
        #//修改文件句柄限制
        #vi /etc/security/limits.conf
            *               -        nofile          65535
            *               soft    nproc           32000
            *               hard   nproc           32000
        #//修改當前主機名,否則在格式化時會出現這種錯誤,以下:
        #//SHUTDOWN_MSG: Shutting down NameNode at java.net.UnknownHostException: hadoop1: hadoop1
        #vi /etc/sysconfig/network
            NETWORKING=yes
            HOSTNAME=master
        #//修改用戶環境變量
        #vi ~/.bashrc
            export HADOOP_HOME=/usr/local/hadoop/hadoop-1.0.4
            #將$HADOOP_HOME目錄下的jar包所有添加
            for i in $HADOOP_HOME/*.jar
            do
            CLASSPATH=$CLASSPATH:$i
            done

            #將$HADOOP_HOME/lib目錄下的jar所有添加
            for i in $HADOOP_HOME/lib/*.jar
            do
            CLASSPATH=$CLASSPATH:$i
            done

        #//修改hadoop-env.sh配置文件   
        #cd /usr/local/hadoop/hadoop-1.0.4/
        #vi conf/hadoop-env.sh
            export JAVA_HOME=/usr/local/jvm/java-7-sun/
        #mkdir /usr/local/hadoop/hadoop-1.0.4/pids
            export HADOOP_PID_DIR=/usr/local/hadoop/hadoop-1.0.4/pids
            #取消環境變量$HADOOP_HOME過期的警告
            export HADOOP_HOME_WARN_SUPPRESS=1
            #修改對應的免登陸的ssh端口,由於上面配置ssh免登陸時的端口修改了
            #http://blog.chenlb.com/2008/12/change-hadoop-ssh-port.html
            export HADOOP_SSH_OPTS="-p myport"
        #//修改core-site.xml配置文件 
        #vi conf/core-site.xml
            <configuration>
                <property>
                    <name>fs.default.name</name>
                    <value>hdfs://master:9000</value>
                </property>

                <!-- 這個建議加上,不然機器重啓後,全部數據將會丟失 -->
                <property>
                    <name>hadoop.tmp.dir</name>
                    <value>/usr/local/hadoop/hadoop-1.0.4/tmp</value>
                </property>
            </configuration>
        #vi conf/hdfs-site.xml
            <property>
                <name>dfs.name.dir</name>
                <value>/usr/local/hadoop/hadoop-1.0.4/name</value>
                <description>
                    這裏有的說要建立name目錄有的說不要建立(建議不建立)
                </description>
            </property>

            <property>
                <name>dfs.data.dir</name>
                <value>/u01/hadoopdata,/u02/hadoopdata,/u03/hadoopdata,/u04/hadoopdata</value>
                <description>
                    DataNode上存儲數據塊的地方。若是指定多個目錄(逗號分割),則數據庫被隨機的存放。
                </description>
            </property>

            <property>
                <name>dfs.replication</name>
                <value>3</value>
            </property>

            <property>
                <name>dfs.datanode.max.xcievers</name>
                <value>4096</value>
                <description>
                    datanode同時打開的文件上限。默認256過小。
                </description>
            </property>
        #vi conf/mapred-site.xml
            <property> 
                <name>mapred.job.tracker</name>
                <value>hdfs://master:9001</value>
            </property>
        #//格式化namenode
        #./bin/hadoop namenode -format
        #//如果分佈式系統,則要把當前目錄下的全部文件scp到各個datanode機器上
        #scp -r * root@slave1:/usr/local/hadoop/hadoop-1.0.4

        注意:全部的slave機器上的配置建議都與master一致,即把上述配置文件和hadoop目錄都scp到全部的slave機器上  

     4> 運行
        #cd /usr/local/hadoop/hadoop-1.0.4/bin
        #./start-all.sh

     5> 頁面驗證
        http://192.168.100.20:50030 (MapReduce的Web頁面) 
        http://192.168.100.20:50070 (HDFS的Web頁面)
        http://192.168.100.20:50060 (Task Tracker Status)

     6> 後的進程名稱:
        * NameNode
        * SecondaryNameNode
        * DataNode
        * JobTracker
        * TaskTracker

3, 安裝與配置Hbase
     1>安裝Hbase
        #//解壓hadoop-1.0.4.tar.gz
        #tar -xvf hbase-0.94.9.tar.gz
        #mv hbase-0.94.9/ /usr/local/hadoop/
        #vi ~/.bashrc
            export HBASE_HOME=/usr/local/hadoop/hbase-0.94.9
        #cd /usr/local/hadoop/hbase-0.94.9
        #vi conf/hbase-env.sh
            export JAVA_HOME=/usr/local/jvm/java-7-sun/
        #//下面的HBASE_CLASSPATH是方便hbase和hadoop進行關聯
            export HBASE_CLASSPATH=/usr/local/hadoop/hadoop-1.0.4/conf
            export HBASE_PID_DIR=/usr/local/hadoop/hbase-0.94.9/pids
            export HBASE_MANAGES_ZK=true
        #修改對應的免登陸的ssh端口,由於上面配置ssh免登陸時的端口修改了
        # http://blog.chenlb.com/2008/12/change-hadoop-ssh-port.html
            export HBASE_SSH_OPTS="-p"myport
        #vi conf/hbase-site.xml
            <property>
                <name>hbase.rootdir</name>
                <value>hdfs://master:9000/hbase</value>
                <description>
                    設置hbase在hdfs上的目錄,主機名爲hdfs的namenode節點所在的主機
                </description>
            </property>
            <property>
                <name>hbase.cluster.distributed</name>
                <value>true</value>
                <description>
                    設置爲true,代表是徹底分佈式的hbase集羣,僞分佈式模式下也須要設置爲true
                </description>
            </property>
            <property>
                <name>hbase.zookeeper.property.dataDir</name>
                <value>/usr/local/hadoop/hbase-0.94.9/zookeeper</value>    <description>
                    ZooKeeper保存數據的目錄地址
                </description>
            </property>
            <property>
                <name>hbase.zookeeper.quorum</name>
                <value>slave1,slave2</value>
                <description>
                    設置zookeeper的主機,建議使用單數
                </description>
            </property>
            <property>
                <name>hbase.client.write.buffer</name>
                <value>209715200</value>
                <description>
                    HTable客戶端的寫緩衝的默認大小
                </description>
            </property>
            <property>
                <name>hbase.client.keyvalue.maxsize</name>
                <value>209715200</value>
                <description>
                    一個KeyValue實例的最大size
                </description>
            </property>
        #vi regionservers
            slave1

     2> 運行
        #cd /usr/local/hadoop/hbase-0.94.9/bin
        #./start-hbase.sh

     3> 頁面驗證
        http://192.168.100.20:60010/master-status

     4> 後的進程名稱:
        * HQuorumPeer
        * HMaster
        * HRegionServer

     5> 僞分佈式配置(以上是徹底分佈式架構的搭建,若是是僞分佈,則要修改下面內容,其餘與徹底分佈式同樣)
        #vi hbase-site.xml
            <property>
                <name>hbase.zookeeper.quorum</name>
                <value>localhost</value>
                <description>
                    設置zookeeper的主機,建議使用單數
                </description>
            </property>
        #vi regionservers
            master

        若是不按照上面的配置,會出現各類奇怪問題,以下日誌:
            FATAL org.apache.hadoop.hbase.master.HMaster: Unhandled exception
            org.apache.hadoop.hbase.master.HMaster: Unhandled exception. Starting shutdown
 
4, 上述所涉及到的配置文件列表
  

5, 安裝thrift及依賴庫
    #tar -xvf libevent-2.0.20-stable.tar.gz
    #./configure --prefix=/usr/local/libevent
    #mk
    #mk install

    #tar -xvf Python-2.7.5.tar.bz2
    #./configure --prefix=/usr/local/python
    #mk
    #mk install

    #tar -xvf boost_1_52_0.tar.gz
    #./bootstrap.sh --prefix=/usr/local/boost --with-python=/usr/local/python
    #./b2
    #./b2 install

    ============================================
    boost編譯問題:
    wrap_python.hpp:50:23: error: pyconfig.h: No such file or directory
    有多是作過一次./bootstrap.sh(後面沒有帶參數)致使,這時要刪除目錄再來一次帶參數的配置化
    ============================================

    #tar -xvf thrift-0.9.0.tar.gz
    #export CPPFLAGS="-DHAVE_NETDB_H=1 -fpermissive"
    #./configure --with-boost=/usr/local/boost/ --with-qt4=false --with-java=/usr/local/jvm/java-7-sun/ --with-libevent=/usr/local/libevent/ --prefix=/usr/local/thrift
    #mk
    #mk install

6, 其餘
     1>啓動hadoop的thrift服務(用於HDFS的訪問)
        a> 先需修改start_thrift_server.sh
            #vi /usr/local/hadoop/hadoop-1.0.4/src/contrib/thriftfs/scripts/start_thrift_server.sh
             修改1:
                將
                    for f in $TOP/build/*.jar ; do
                改成:
                    for f in $TOP/*.jar ; do
             修改2:
                將
                    for f in $TOP/build/contrib/thriftfs/*.jar ; do
                改成:
                    for f in $TOP/src/contrib/thriftfs/*.jar ; do
        b> 啓動thrift服務進程
            #/usr/local/hadoop/hadoop-1.0.4/src/contrib/thriftfs/scripts/start_thrift_server.sh #thriftport#
     2> 啓動hbase的thrift服務
        a> 先確保hadoop的的狀態爲:Safe mode is OFF
            #/usr/local/hadoop/hadoop-1.0.4/bin/hadoop dfsadmin -safemode leave
        b> 啓動thrift服務進程
            #/home/hadoop/hbase/bin/hbase thrift start
            或者
            #./hbase-daemon.sh start thrift
 
     以上內容也使用evernote做了分享,能夠下載到上述中的配置文件《 清晰安裝Hadoop/Hbase/Thrift--EverNote版》。
    說明:配置並安裝好hadoop/hbase/thrift,就可使用各類編程語言進行對hbase或hadoop的訪問的開發了。開發步驟稍候詳見另一篇帖子《CPP/PHP訪問hbase的代碼開發》。
相關文章
相關標籤/搜索