system:
centos7.5
hostname:
hadoop1
soft:
hadoop-2.9.2
apache-hive-2.3.4-bin
jdk-8u201-linux-x64
mysql5.7《安裝略》html
設置靜態ip地址
略
添加主機與ip映射
[root@hadoop1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.3.76 hadoop1
關閉防火牆:
[root@hadoop1 ~]# systemctl stop firewalld
[root@hadoop1 ~]# systemctl disable firewalld
關閉seLinux
[root@hadoop1 ~]# egrep -v "^#|^$" /etc/selinux/config
SELINUX=disabled
其它參數設置:
[root@hadoop1 ~]# sysctl -w vm.max_map_count=262144
vm.max_map_count = 262144java
[root@hadoop1 ~]# egrep -v "^#|^$" /etc/security/limits.conf * soft nofile 65536 * hard nofile 131072 * soft nproc 65536 * hard nproc 65536
安裝java:
[root@hadoop1 opt]# ls
jdk-8u201-linux-x64.rpm
[root@hadoop1 opt]# rpm -ih jdk-8u201-linux-x64.rpm
warning: jdk-8u201-linux-x64.rpm: Header V3 RSA/SHA256 Signature, key ID ec551f03: NOKEY
################################# [100%]
Updating / installing...
################################# [100%]
Unpacking JAR files...
tools.jar...
plugin.jar...
javaws.jar...
deploy.jar...
rt.jar...
jsse.jar...
charsets.jar...
localedata.jar...node
[root@hadoop1 opt]# java -version java version "1.8.0_201" Java(TM) SE Runtime Environment (build 1.8.0_201-b09) Java HotSpot(TM) 64-Bit Server VM (build 25.201-b09, mixed mode)
創建hadoop帳戶《987654321》:
[root@hadoop1 opt]# useradd hadoop
[root@hadoop1 opt]# passwd hadoopmysql
設置sudo權限
略linux
重啓系統:
[root@hadoop1 ~]# rebootweb
如下是有hadoop操做:sql
設置ssh免密碼登陸: [hadoop@hadoop1 ~]$ ssh-keygen Generating public/private rsa key pair. Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /home/hadoop/.ssh/id_rsa. Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub. The key fingerprint is: 6e:d0:62:99:3c:02:10:cf:00:5a:71:3c:6f:82:67:94 hadoop@hadoop1 The key's randomart image is: +--[ RSA 2048]----+ |
*.oo.. | .* .E | . +o o | ..+.o+ | o.oO S | o = | o | . |
---|
+-----------------+ [hadoop@hadoop1 ~]$ ssh-copy-id -i .ssh/id_rsa.pub hadoop@hadoop1
hadoop搭建:
[root@hadoop1 hadoop]# su - hadoop
Last login: Fri Apr 19 20:56:04 CST 2019 on pts/0
[hadoop@hadoop1 ~]$ ls
hadoop-2.9.2.tar.gz
[hadoop@hadoop1 ~]$ tar -zxf hadoop-2.9.2.tar.gz
[hadoop@hadoop1 ~]$ ls
hadoop-2.9.2 hadoop-2.9.2.tar.gz數據庫
配置hadoop-env.sh
[hadoop@hadoop1 ~]$ vim hadoop-2.9.2/etc/hadoop/hadoop-env.sh
#export JAVA_HOME=${JAVA_HOME}
export JAVA_HOME=/usr/java/jdk1.8.0_201-amd64apache
配置core-site.xml
vim hadoop-2.9.2/etc/hadoop/core-site.xml
<configuration>
<!-- 指定HADOOP所使用的文件系統schema(URI),HDFS的老大(NameNode)的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.3.76:9000</value>
</property>
<!-- 指定hadoop運行時產生臨時文件的存儲目錄 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/tmp</value>
</property>vim
[hadoop@hadoop1 ~]$ mkdir tmp
配置hdfs-site.xml
[hadoop@hadoop1 ~]$ vim hadoop-2.9.2/etc/hadoop/hdfs-site.xml
<property>
<name>dfs.namenode.name.dir</name>
<value>/home/hadoop/dfs/namenode</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/home/hadoop/dfs/datanode</value>
<final>true</final>
</property>
<property>
<name>dfs.http.address</name>
<value>192.168.3.76:50070</value>
<description>The address and the base port where the dfs namenode web ui will listen on.If the port is 0 then the server will start on a free port</description>
</property>
<!-- 指定HDFS副本的數量 -->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
建目錄:
[hadoop@hadoop1 ~]$ mkdir dfs/datanode -p
[hadoop@hadoop1 ~]$ mkdir dfs/namenode -p
配置mapred-site.xml
[hadoop@hadoop1 ~]$ cp hadoop-2.9.2/etc/hadoop/mapred-site.xml.template hadoop-2.9.2/etc/hadoop/mapred-site.xml
[hadoop@hadoop1 ~]$ vim hadoop-2.9.2/etc/hadoop/mapred-site.xml
<!-- 指定mr運行在yarn上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>hdfs://192.168.3.76:9001</value>
</property>
<property>
<name>mapred.system.dir</name>
<value>file:/home/hadoop/mapred/system</value>
<final>true</final>
</property>
<property>
<name>mapred.local.dir</name>
<value>file:/home/hadoop/mapred/local</value>
<final>true</final>
</property>
創建目錄:
[hadoop@hadoop1 ~]$ mkdir mapred/local -p
[hadoop@hadoop1 ~]$ mkdir mapred/system -p
配置yarn-site.xml
[hadoop@hadoop1 ~]$ vim hadoop-2.9.2/etc/hadoop/yarn-site.xml
<!-- 指定YARN的老大(ResourceManager)的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>192.168.3.76</value>
</property>
<!-- reducer獲取數據的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
格式化hdfs
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs namenode -format
出現下面第二行的successfully,那麼表名成功
19/04/19 21:24:20 INFO namenode.FSImage: Allocated new BlockPoolId: BP-1608387477-192.168.3.76-1555680260442
19/04/19 21:24:20 INFO common.Storage: Storage directory /home/hadoop/tmp/dfs/name has been successfully formatted.
19/04/19 21:24:20 INFO namenode.FSImageFormatProtobuf: Saving image file /home/hadoop/tmp/dfs/name/current/fsimage.ckpt_0000000000000000000 using no compression
19/04/19 21:24:20 INFO namenode.FSImageFormatProtobuf: Image file /home/hadoop/tmp/dfs/name/current/fsimage.ckpt_0000000000000000000 of size 325 bytes saved in 0 seconds .
19/04/19 21:24:20 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
19/04/19 21:24:20 INFO namenode.NameNode: SHUTDOWN_MSG:
/****
SHUTDOWN_MSG: Shutting down NameNode at hadoop1/192.168.3.76
****/
啓動並測試hdfs:
[hadoop@hadoop1 ~]$ hadoop-2.9.2/sbin/start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [hadoop1]
hadoop1: starting namenode, logging to /home/hadoop/hadoop-2.9.2/logs/hadoop-hadoop-namenode-hadoop1.out
localhost: starting datanode, logging to /home/hadoop/hadoop-2.9.2/logs/hadoop-hadoop-datanode-hadoop1.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /home/hadoop/hadoop-2.9.2/logs/hadoop-hadoop-secondarynamenode-hadoop1.out
starting yarn daemons
starting resourcemanager, logging to /home/hadoop/hadoop-2.9.2/logs/yarn-hadoop-resourcemanager-hadoop1.out
localhost: starting nodemanager, logging to /home/hadoop/hadoop-2.9.2/logs/yarn-hadoop-nodemanager-hadoop1.out
檢測:
[hadoop@hadoop1 ~]$ jps
4705 SecondaryNameNode
4865 ResourceManager
4386 NameNode
5157 NodeManager
5318 Jps
4488 DataNode
實例測試:
[hadoop@hadoop1 ~]$ ll
total 357872
drwxrwxr-x 4 hadoop hadoop 36 Apr 19 22:03 dfs
drwxr-xr-x 10 hadoop hadoop 150 Apr 19 21:27 hadoop-2.9.2
-rw-r--r-- 1 hadoop hadoop 366447449 Apr 19 20:56 hadoop-2.9.2.tar.gz
drwxrwxr-x 4 hadoop hadoop 31 Apr 19 21:52 mapred
-rw-r--r-- 1 hadoop hadoop 11323 Apr 19 22:11 qqqq.xlsx
drwxrwxr-x 4 hadoop hadoop 35 Apr 19 22:06 tmp
這裏的文件名必需要以‘/’開頭,暫時只瞭解是hdfs是以絕對路徑爲基礎,由於沒有 ‘-cd’這樣的命令支持
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -mkdir /input
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -put qqqq.xlsx /input
也能夠查看此時新建的input目錄裏面有什麼
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -ls /
Found 1 items
drwxr-xr-x - hadoop supergroup 0 2019-04-19 22:14 /input
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -ls /input
Found 1 items
-rw-r--r-- 1 hadoop supergroup 11323 2019-04-19 22:14 /input/qqqq.xlsx
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hadoop jar hadoop-2.9.2/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.9.2.jar grep /input /output 'dfs[a-z.]+'
19/04/19 22:23:05 INFO mapreduce.Job: Job job_1555682785585_0001 completed successfully
19/04/19 22:23:23 INFO mapreduce.Job: Job job_1555682785585_0002 completed successfully
結果:
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -ls /
Found 4 items
drwxr-xr-x - hadoop supergroup 0 2019-04-19 22:14 /input
drwxr-xr-x - hadoop supergroup 0 2019-04-19 22:23 /output
drwx------ - hadoop supergroup 0 2019-04-19 22:22 /tmp
drwxr-xr-x - hadoop supergroup 0 2019-04-19 22:22 /user
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -ls /output
Found 2 items
-rw-r--r-- 1 hadoop supergroup 0 2019-04-19 22:23 /output/_SUCCESS
-rw-r--r-- 1 hadoop supergroup 0 2019-04-19 22:23 /output/part-r-00000
檢測二:
[hadoop@hadoop1 ~]$ ll
total 357876
drwxrwxr-x 4 hadoop hadoop 36 Apr 19 22:03 dfs
drwxr-xr-x 10 hadoop hadoop 150 Apr 19 21:27 hadoop-2.9.2
-rw-r--r-- 1 hadoop hadoop 366447449 Apr 19 20:56 hadoop-2.9.2.tar.gz
drwxrwxr-x 4 hadoop hadoop 31 Apr 19 21:52 mapred
-rw-r--r-- 1 hadoop hadoop 11323 Apr 19 22:11 qqqq.xlsx
drwxrwxr-x 4 hadoop hadoop 35 Apr 19 22:06 tmp
-rw-rw-r-- 1 hadoop hadoop 213 Apr 19 22:30 www.text
[hadoop@hadoop1 ~]$ cat www.text
http://blog.csdn.net/u012342408/article/details/50520696
http://blog.csdn.net/hitwengqi/article/details/8008203
http://blog.csdn.net/zl007700/article/details/50533675
http://www.javashuo.com/article/p-ayvzfrlr-hk.html
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -put www.text /input
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -ls /input
Found 2 items
-rw-r--r-- 1 hadoop supergroup 11323 2019-04-19 22:14 /input/qqqq.xlsx
-rw-r--r-- 1 hadoop supergroup 213 2019-04-19 22:31 /input/www.text
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hadoop jar hadoop-2.9.2/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.9.2.jar grep /input/www.text /www 'dfs[a-z.]+'
19/04/19 22:33:33 INFO mapreduce.Job: Job job_1555682785585_0004 completed successfully
19/04/19 22:33:51 INFO mapreduce.Job: Job job_1555682785585_0005 completed successfully
結果:
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -ls /www
Found 2 items
-rw-r--r-- 1 hadoop supergroup 0 2019-04-19 22:33 /www/_SUCCESS
-rw-r--r-- 1 hadoop supergroup 0 2019-04-19 22:33 /www/part-r-00000
查看安裝包:
[root@hadoop1 hive]# su - hadoop
Last login: Sat Apr 20 15:46:10 CST 2019 on pts/1
[hadoop@hadoop1 ~]$ ll
total 589016
-rw-r--r-- 1 hadoop hadoop 232234292 Apr 20 15:53 apache-hive-2.3.4-bin.tar.gz
drwxrwxr-x 4 hadoop hadoop 36 Apr 19 22:03 dfs
drwxr-xr-x 10 hadoop hadoop 150 Apr 19 21:27 hadoop-2.9.2
-rw-r--r-- 1 hadoop hadoop 366447449 Apr 19 20:56 hadoop-2.9.2.tar.gz
drwxrwxr-x 4 hadoop hadoop 31 Apr 19 21:52 mapred
-rw-r--r-- 1 hadoop hadoop 4452049 Apr 20 15:53 mysql-connector-java-5.1.47.tar.gz
-rw-r--r-- 1 hadoop hadoop 11323 Apr 19 22:11 qqqq.xlsx
drwxrwxr-x 4 hadoop hadoop 35 Apr 19 22:06 tmp
-rw-rw-r-- 1 hadoop hadoop 213 Apr 19 22:30 www.text
[hadoop@hadoop1 ~]$ tar -zxf apache-hive-2.3.4-bin.tar.gz
[hadoop@hadoop1 ~]$ tar -zxf mysql-connector-java-5.1.47.tar.gz
設置hive變量:
[hadoop@hadoop1 ~]$ egrep -v "^#|^$" .bashrc
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
export HIVE_HOME=/home/hadoop/apache-hive-2.3.4-bin
export PATH=$PATH:$HIVE_HOME/bin
[hadoop@hadoop1 ~]$ source .bashrc
創建配置文件:
[hadoop@hadoop1 conf]$ pwd
/home/hadoop/apache-hive-2.3.4-bin/conf
[hadoop@hadoop1 conf]$ cp hive-env.sh.template hive-env.sh
[hadoop@hadoop1 conf]$ cp hive-default.xml.template hive-site.xml
[hadoop@hadoop1 conf]$ cp hive-log4j2.properties.template hive-log4j2.properties
[hadoop@hadoop1 conf]$ cp hive-exec-log4j2.properties.template hive-exec-log4j2.properties
配置hive-site.xml,修改一下幾行:
<property>
<name>hive.exec.scratchdir</name>
<value>/home/hadoop/tmp/hive-${user.name}</value>
<description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, with ${hive.scratch.dir.permission}.</description>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>/home/hadoop/tmp/${user.name}</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/home/hadoop/tmp/hive/resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/home/hadoop/tmp/${user.name}/operation_logs</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/home/hadoop/tmp/${user.name}</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://192.168.3.76:3306/hive_metadata?&createDatabaseIfNotExist=true&characterEncoding=UTF-8&useSSL=false</value>
<description>
JDBC connect string for a JDBC metastore.
To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.
For example, jdbc:postgresql://myhost/db?ssl=true for postgres database.
</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>Username to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>Wd#GDrf142D</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>datanucleus.schema.autoCreateAll</name>
<value>true</value>
<description>Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once.To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not recommended for production use cases, run schematool command instead.</description>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
<description>
Enforce metastore schema version consistency.
True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic
schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures
proper metastore schema migration. (Default)
False: Warn if the version information stored in metastore doesn't match with one from in Hive jars.
</description>
</property>
配置hive-env.sh:
# HADOOP_HOME=${bin}/../../hadoop HADOOP_HOME=/home/hadoop/hadoop-2.9.2 # Hive Configuration Directory can be controlled by: # export HIVE_CONF_DIR= export HIVE_CONF_DIR=/home/hive/apache-hive-2.3.4-bin/conf
加載mysql驅動
[hadoop@hadoop1 ~]$ ll
total 231144
drwxrwxr-x 10 hive hive 4096 Apr 20 14:21 apache-hive-2.3.4-bin
-rw-r--r-- 1 hive hive 232234292 Apr 20 14:16 apache-hive-2.3.4-bin.tar.gz
-rw-r--r-- 1 hive hive 4452049 Apr 20 15:23 mysql-connector-java-5.1.47.tar.gz
drwxrwxr-x 3 hive hive 17 Apr 20 15:07 tmp
[hive@hadoop1 ~]$ tar -zxf mysql-connector-java-5.1.47.tar.gz
[hadoop@hadoop1 ~]$ cp mysql-connector-java-5.1.47/mysql-connector-java-5.1.47.jar apache-hive-2.3.4-bin/lib/
爲Hive建立HDFS目錄
在 Hive 中建立表以前須要使用如下 HDFS 命令建立 /tmp 和 /user/hive/warehouse (hive-site.xml 配置文件中屬性項 hive.metastore.warehouse.dir 的默認值) 目錄並給它們賦寫權限
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -mkdir tmp
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -mkdir -p /user/hive/warehouse
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -chmod g+w /user/hive/warehouse
[hadoop@hadoop1 ~]$ hadoop-2.9.2/bin/hdfs dfs -chmod g+w tmp/
給mysql建立用戶hive/密碼hive<在沒有使用mysql數據庫root帳戶的狀況下使用>:
$ mysql -u root -p #密碼已設爲123456
mysql> CREATE USER 'hive'@'localhost' IDENTIFIED BY "hive";
mysql> grant all privileges on . to hive@localhost identified by 'hive';
更改root遠程訪問:
mysql> use mysql;
mysql> update user set host = '%' where user = 'root';
mysql> flush privileges;
mysql> select host, user from user;
+-----------+---------------+
| host | user |
+-----------+---------------+
| % | root |
| localhost | mysql.session |
| localhost | mysql.sys |
+-----------+---------------+
運行Hive
在命令行運行 hive 命令時必須保證 HDFS 已經啓動。能夠使用 start-dfs.sh 來啓動 HDFS。
從 Hive 2.1 版本開始, 咱們須要先運行 schematool 命令來執行初始化操做。
[hadoop@hadoop1 ~]$ apache-hive-2.3.4-bin/bin/schematool -dbType mysql -initSchema SLF4J: Class path contains multiple SLF4J bindings. SLF4J: Found binding in [jar:file:/home/hadoop/apache-hive-2.3.4-bin/lib/log4j-slf4j-impl-2.6.2.jar!/org/slf4j/impl/StaticLoggerBinder.class] SLF4J: Found binding in [jar:file:/home/hadoop/hadoop-2.9.2/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class] SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation. SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory] Metastore connection URL: jdbc:mysql://192.168.3.76:3306/hive_metadata?&createDatabaseIfNotExist=true&characterEncoding=UTF-8&useSSL=false Metastore Connection Driver : com.mysql.jdbc.Driver Metastore connection User: root Starting metastore schema initialization to 2.3.0 Initialization script hive-schema-2.3.0.mysql.sql Initialization script completed schemaTool completed
啓動hive並測試:
[hadoop@hadoop1 ~]$ apache-hive-2.3.4-bin/bin/hive
which: no hbase in (/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/hadoop/.local/bin:/home/hadoop/bin:/home/hadoop/apache-hive-2.3.4-bin/bin)
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/home/hadoop/apache-hive-2.3.4-bin/lib/log4j-slf4j-impl-2.6.2.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/home/hadoop/hadoop-2.9.2/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Logging initialized using configuration in file:/home/hadoop/apache-hive-2.3.4-bin/conf/hive-log4j2.properties Async: true Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases. hive> show tables; OK Time taken: 3.236 seconds hive> show databases; OK default Time taken: 0.055 seconds, Fetched: 1 row(s) hive>
簡答的hive語句測試:
建表:
hive> CREATE TABLE IF NOT EXISTS test (id INT,name STRING)ROW FORMAT DELIMITED FIELDS TERMINATED BY " " LINES TERMINATED BY "\n";
OK
Time taken: 0.524 seconds
hive> insert into test values(1,'張三');
WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
Query ID = hadoop_20190420163725_0be10015-72ae-4642-b2c4-311aaeaacaa8
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1555738609578_0001, Tracking URL = http://hadoop1:8088/proxy/application_1555738609578_0001/Kill Command = /home/hadoop/hadoop-2.9.2/bin/hadoop job -kill job_1555738609578_0001Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 02019-04-20 16:37:38,182 Stage-1 map = 0%, reduce = 0%2019-04-20 16:37:43,443 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.28 secMapReduce Total cumulative CPU time: 2 seconds 280 msecEnded Job = job_1555738609578_0001Stage-4 is selected by condition resolver.Stage-3 is filtered out by condition resolver.Stage-5 is filtered out by condition resolver.Moving data to directory hdfs://192.168.3.76:9000/user/hive/warehouse/test/.hive-staging_hive_2019-04-20_16-37-25_672_7073846121967206245-1/-ext-10000Loading data to table default.testMapReduce Jobs Launched: Stage-Stage-1: Map: 1 Cumulative CPU: 2.28 sec HDFS Read: 4249 HDFS Write: 77 SUCCESSTotal MapReduce CPU Time Spent: 2 seconds 280 msecOKTime taken: 19.356 secondshive> select * from test;OK1 張三Time taken: 0.352 seconds, Fetched: 1 row(s)