CentOS7-64bit 編譯 Hadoop-2.5.0,並分佈式安裝

1.系統環境說明

CentOS 7.0 x64 版本html

192.168.1.7 master
192.168.1.8 slave
192.168.1.9 slave
192.168.1.10 slavejava

2.安裝前的準備工做

2.1 關閉防火牆

# systemctl status firewalld.service  --查看防火牆狀態
# systemctl stop firewalld.service    --關閉防火牆
# systemctl disable firewalld.service --永久關閉防火牆

2.2 檢查ssh安裝狀況,若是沒有則安裝ssh

# systemctl status sshd.service  --查看ssh狀態
# yum install openssh-server openssh-clients

2.3 安裝vim

# yum -y install vim

2.4 設置靜態ip地址

# vim /etc/sysconfig/network-scripts/ifcfg-eno16777736

BOOTPROTO="static"
ONBOOT="yes"
IPADDR0="192.168.1.7"
PREFIX0="255.255.255.0"
GATEWAY0="192.168.1.1"
DNS1="61.147.37.1"
DNS2="101.226.4.6"node

2.5 修改host名稱

# vim /etc/sysconfig/network

HOSTNAME=masterlinux

# vim /etc/hosts

192.168.1.7   master
192.168.1.8   slave1
192.168.1.9   slave2
192.168.1.10  slave3
# hostnamectl set-hostname master    (CentOS7 下原有的修改host方法無效了)

2.6 建立hadoop用戶

# useradd hadoop --建立用戶名爲hadoop的用戶# passwd hadoop  --爲用戶hadoop設置密碼

2.7 配置ssh無密鑰登陸

-----------下面是在master上面的操做web

# su hadoop --切換到hadoop用戶$ cd ~      --打開用戶文件夾
$ ssh-keygen -t rsa -P '' --生成密碼對,/home/hadoop/.ssh/id_rsa和/home/hadoop/.ssh/id_rsa.pub
$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys        --把id_rsa.pub追加到受權的key裏面去
$ chmod 600 ~/.ssh/authorized_keys        --修改權限
$ su  --切換到root用戶# vim /etc/ssh/sshd_config   --修改ssh配置文件
 RSAAuthentication yes #啓用RSA認證
 PubkeyAuthentication yes #啓用公鑰私鑰配對認證方式
 AuthorizedKeysFile .ssh/authorized_keys #公鑰文件路徑
# su hadoop --切換到hadoop用戶
$ scp ~/.ssh/id_rsa.pub hadoop@192.168.1.8:~/                --把公鑰複製全部的Slave機器上

----------下面是在slave1上面的操做apache

 # su hadoop --切換到hadoop用戶
 $ mkdir ~/.ssh
 $ chmod 700 ~/.ssh
 $ cat ~/id_rsa.pub >> ~/.ssh/authorized_keys                --追加到受權文件"authorized_keys"
 $ chmod 600 ~/.ssh/authorized_keys                          --修改權限
 $ su --切換回root用戶 # vim /etc/ssh/sshd_config   --修改ssh配置文件
  RSAAuthentication yes #啓用RSA認證
  PubkeyAuthentication yes #啓用公鑰私鑰配對認證方式
  AuthorizedKeysFile .ssh/authorized_keys #公鑰文件路徑

3.安裝必須的軟件

3.1 安裝JDK

# rpm -ivh jdk-7u67-linux-x64.rpm

Preparing...vim

  ##################################### [100%]

1:jdkapp

  ##################################### [100%]

Unpacking JAR files...
rt.jar...
jsse.jar...
charsets.jar...
tools.jar...
localedata.jar...ssh

# vim /etc/profile
 export JAVA_HOME=/usr/java/jdk1.7.0_67 
 export PATH=$PATH:$JAVA_HOME/bin
# source profile --修改生效

3.2 安裝其餘必須軟件

# yum install maven svn ncurses-devel gcc* lzo-devel zlib-devel autoconf automake libtool cmake openssl-devel

3.3 安裝ant

# tar zxvf apache-ant-1.9.4-bin.tar.gz# vim /etc/profile
 export ANT_HOME=/usr/local/apache-ant-1.9.4
 export PATH=$PATH:$ANT_HOME/bin

3.4 安裝findbugs

# tar zxvf findbugs-3.0.0.tar.gz# vim /etc/profile
 export FINDBUGS_HOME=/usr/local/findbugs-3.0.0
 export PATH=$PATH:$FINDBUGS_HOME/bin

3.5 安裝protobuf

# tar zxvf protobuf-2.5.0.tar.gz(必須是2.5.0版本的,否則編譯hadoop的時候報錯)
# cd protobuf-2.5.0
# ./configure --prefix=/usr/local
# make && make install

4. 編譯hadoop源碼

# tar zxvf hadoop-2.5.0-src.tar.gz
# cd hadoop-2.5.0-src
# mvn package -Pdist,native,docs -DskipTests -Dtar

4.1 maven中央倉庫的配置(改爲oschina,增長訪問速度)

# vim /usr/share/mavem/conf/settings.xml<mirrors>
    <mirror>
        <id>nexus-osc</id>
        <mirrorOf>*</mirrorOf>
        <name>Nexus osc</name>
        <url>http://maven.oschina.net/content/groups/public/</url>
    </mirror></mirrors><profiles>
    <profile>
    <id>jdk17</id>
    <activation>
        <activeByDefault>true</activeByDefault>
        <jdk>1.7</jdk>
    </activation>
    <properties>
        <maven.compiler.source>1.7</maven.compiler.source>
        <maven.compiler.target>1.7</maven.compiler.target>
        <maven.compiler.compilerVersion>1.7</maven.compiler.compilerVersion>
    </properties>    
        <repositories>
           <repository>
                <id>nexus</id>
                <name>local private nexus</name>
                <url>http://maven.oschina.net/content/groups/public/</url>
                <releases>
                    <enabled>true</enabled>
                </releases>
                <snapshots>
                    <enabled>false</enabled>
                </snapshots>
            </repository>
         </repositories>
        <pluginRepositories>
            <pluginRepository>
                <id>nexus</id>
                <name>local private nexus</name>
                <url>http://maven.oschina.net/content/groups/public/</url>
                <releases>
                    <enabled>true</enabled>
                </releases>
                <snapshots>
                    <enabled>false</enabled>
                </snapshots>
            </pluginRepository>
         </pluginRepositories>
    </profile></profiles>

4.2 編譯完成以後,目錄/usr/hadoop-2.5.0-src/hadoop-dist/target/hadoop-2.5.0

# ./bin/hadoop versionHadoop 2.5.0Subversion Unknown -r Unknown
Compiled by root on 2014-09-12T00:47Z
Compiled with protoc 2.5.0From source with checksum 423dcd5a752eddd8e45ead6fd5ff9a24
This command was run using /usr/hadoop-2.5.0-src/hadoop-dist/target/hadoop-2.5.0/share/hadoop/common/hadoop-common-2.5.0.jar
# file lib//native/*lib//native/libhadoop.a:        
current ar archivelib//native/libhadooppipes.a:   
current ar archivelib//native/libhadoop.so:       
symbolic link to `libhadoop.so.1.0.0'lib//native/libhadoop.so.1.0.0: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, BuildID[sha1]=0x972b31264a1ce87a12cfbcc331c8355e32d0e774, not strippedlib//native/libhadooputils.a:   
current ar archivelib//native/libhdfs.a:          
current ar archivelib//native/libhdfs.so:         
symbolic link to `libhdfs.so.0.0.0'lib//native/libhdfs.so.0.0.0:   ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, BuildID[sha1]=0x200ccf97f44d838239db3347ad5ade435b472cfa, not stripped

5. 配置hadoop

5.1 基礎操做

# cp -r /usr/hadoop-2.5.0-src/hadoop-dist/target/hadoop-2.5.0 /opt/hadoop-2.5.0
# chown -R hadoop:hadoop /opt/hadoop-2.5.0
# vi /etc/profile
 export HADOOP_HOME=/opt/hadoop-2.5.0
 export PATH=$PATH:$HADOOP_HOME/bin
# su hadoop
$ cd /opt/hadoop-2.5.0
$ mkdir -p dfs/name
$ mkdir -p dfs/data
$ mkdir -p tmp
$ cd etc/hadoop

5.2 配置全部slave節點

$ vim slaves
slave1
slave2
slave3

5.3 修改hadoop-env.sh和yarn-env.sh

$ vim hadoop-env.sh / vim yarn-env.sh
export JAVA_HOME=/usr/java/jdk1.7.0_67

5.4 修改core-site.xml

<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/opt/hadoop-2.5.0/tmp</value>
</property>
<property>
<name>hadoop.proxyuser.hadoop.hosts</name>
<value></value>
</property>
<property>
<name>hadoop.proxyuser.hadoop.groups</name>
<value>
</value>
</property>
</configuration>webapp

5.5 修改hdfs-site.xml

<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/opt/hadoop-2.5.0/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/opt/hadoop-2.5.0/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>

5.6 修改mapred-site.xml

# cp mapred-site.xml.template mapred-site.xml

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
</configuration>

5.7 配置yarn-site.xml

<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:8088</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>768</value>
</property>
</configuration>

5.8 格式化namenode

$ ./bin/hdfs namenode -format

5.9 啓動hdfs

$ ./sbin/start-dfs.sh
$ ./sbin/start-yarn.sh

5.10 檢查啓動狀況

http://192.168.1.7:8088http://192.168.1.7:50070


參考 http://www.it165.net/admin/html/201403/2453.html

Hadoop- - 2.2.0 4 64  位源碼編譯及分佈式安裝.pdf

相關文章
相關標籤/搜索