#建立表人信息表 person(String name,int age)
hive> create table person(name STRING,age INT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' STORED AS TEXTFILE;
OK
Time taken: 0.541 seconds
#建立表票價信息表 ticket(int age,float price)
hive> create table ticket(age INT,price FLOAT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' STORED AS TEXTFILE;
OK
Time taken: 0.154 seconds
#建立本地數據文件
-rw-rw-r-- 1 hadoop hadoop 40 Feb 6 13:28 person.txt
-rw-rw-r-- 1 hadoop hadoop 45 Feb 6 13:28 ticket.txt
#將本地的數據文件load到hive數據倉庫中
hive> LOAD DATA LOCAL INPATH '/home/hadoop/hfxdoc/person.txt' OVERWRITE INTO TABLE person;
Copying data from file:/home/hadoop/hfxdoc/person.txt
Copying file: file:/home/hadoop/hfxdoc/person.txt
Loading data to table default.person
Deleted hdfs://10.15.107.155:8000/user/hive/warehouse/person
OK
Time taken: 0.419 seconds
hive> LOAD DATA LOCAL INPATH '/home/hadoop/hfxdoc/ticket.txt' OVERWRITE INTO TABLE ticket;
Copying data from file:/home/hadoop/hfxdoc/ticket.txt
Copying file: file:/home/hadoop/hfxdoc/ticket.txt
Loading data to table default.ticket
Deleted hdfs://10.15.107.155:8000/user/hive/warehouse/ticket
OK
Time taken: 0.25 seconds
#load命令會將數據文件移動到配置好的數據路徑下:/user/hive/warehouse
hive> show tables;
hive> describe person
hive> select * from person;
OK
huang 26
lili 25
dongdong 13
wangxiao 5
Time taken: 0.092 seconds
hive>
#注意select *語句是不會編譯成MapReduce程序的,因此很快。
#稍做複雜點的join查詢
hive> select * from person join ticket on person.age = ticket.age;
MapReduce Total cumulative CPU time: 5 seconds 510 msec
Ended Job = job_201301211420_0011
MapReduce Jobs Launched:
Job 0: Map: 2 Reduce: 1 Cumulative CPU: 5.51 sec HDFS Read: 519 HDFS Write: 71 SUCCESS
Total MapReduce CPU Time Spent: 5 seconds 510 msec
OK
wangxiao 5 5 10.0
dongdong 13 13 20.0
lili 25 25 30.0
huang 26 26 30.0
Time taken: 32.465 seconds
#這裏查詢語句被編譯成MapReduce程序,在hadoop上執行
#採用外部表
#首先將本地文件put到hdfs文件路徑下
[hadoop@localhost hfxdoc]$ hadoop fs -mkdir /tmp/ticket
[hadoop@localhost hfxdoc]$ hadoop fs -put person.txt /tmp/ticket
[hadoop@localhost hfxdoc]$ hadoop fs -put ticket.txt /tmp/ticket
[hadoop@localhost hfxdoc]$ hadoop fs -ls /tmp/ticket
Found 2 items
-rw-r--r-- 1 hadoop supergroup 40 2013-02-06 13:45 /tmp/ticket/person.txt
-rw-r--r-- 1 hadoop supergroup 45 2013-02-06 13:45 /tmp/ticket/ticket.txt
create external table person_ext(name STRING,age INT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' STORED AS TEXTFILE LOCATION '/tmp/ticket'
#LOCATION只能配置數據路徑,而剛剛咱們的路徑下有兩個表的文件?這樣建立的其中一個表能夠嗎?
#不能夠!因此,一個文件路徑下面的全部文件都應該是關聯這個數據表的數據文件。
#若是有其餘表的文件,這個建立過程不會報錯,由於,hive默認文本里的字符串類型均可以隱式轉換成任何其餘數據類型。好比你還有一個文件是一行三列的,那麼第三列
#在person表中是解析不到的,若是每行只有一列,那麼第二列將會用NULL來補齊。因此咱們調整下hdfs文件路徑。
hive> select * from person_ext;
OK
huang 26
lili 25
dongdong 13
wangxiao 5
1 10
2 10
5 10
13 20
14 20
25 30
26 30
31 40
Time taken: 0.088 seconds
hive> drop table person_ext;
#Drop外表的操做不會刪除元信息覺得的數據,因此hdfs上仍是存在數據文件web
#複雜類型的數據表,這裏列之間以'\t'分割,數組元素之間以','分割
#數據文件內容以下
1 huangfengxiao beijing,shanghai,tianjin,hangzhou
2 linan changchu,chengdu,wuhan
hive> create table complex(name string,work_locations array<string>)
> ROW FORMAT DELIMITED
> FIELDS TERMINATED BY '\t'
> COLLECTION ITEMS TERMINATED BY ',';sql
hive> describe complex;
OK
name string
work_locations array<string>apache
hive> LOAD DATA LOCAL INPATH '/home/hadoop/hfxdoc/complex.txt' OVERWRITE INTO TABLE complex
hive> select * from complex;
OK
huangfengxiao ["beijing","shanghai","tianjin","hangzhou"]
linan ["changchu","chengdu","wuhan"]
Time taken: 0.125 seconds數組
hive> select name, work_locations[0] from complex;
MapReduce Total cumulative CPU time: 790 msec
Ended Job = job_201301211420_0012
MapReduce Jobs Launched:
Job 0: Map: 1 Cumulative CPU: 0.79 sec HDFS Read: 296 HDFS Write: 37 SUCCESS
Total MapReduce CPU Time Spent: 790 msec
OK
huangfengxiao beijing
linan changchu
Time taken: 20.703 seconds
#如何分區?
表class(teacher sting,student string,age int)
Mis li huangfengxiao 20
Mis li lijie 21
Mis li dongdong 21
Mis li liqiang 21
Mis li hemeng 21
Mr xu dingding 19
Mr xu wangqiang 19
Mr xu lidong 19
Mr xu hexing 19
若是咱們將這個班級成員的數據按teacher來分區
create table classmem(student string,age int) partitioned by(teacher string)
分區文件
classmem_Misli.txt
huangfengxiao 20
lijie 21
dongdong 21
liqiang 21
hemeng 21
classmem_MrXu.txt
dingding 19
wangqiang 19
lidong 19
hexing 19
LOAD DATA LOCAL INPATH '/home/hadoop/hfxdoc/classmem_Misli.txt' INTO TABLE classmem partition (teacher = 'Mis.li')
LOAD DATA LOCAL INPATH '/home/hadoop/hfxdoc/classmem_MrXu.txt' INTO TABLE classmem partition (teacher = 'Mis.Xu')
#分區列被默認到最後一列
hive> select * from classmem where teacher = 'Mr.Xu';
OK
dingding 19 NULL Mr.Xu
wangqiang 19 NULL Mr.Xu
lidong 19 NULL Mr.Xu
hexing 19 NULL Mr.Xu
Time taken: 0.196 seconds
#直接從分區檢索,加速;若是where子句的條件不是分區列,那麼,這個sql將被編譯成mapreduce程序,延時很大。
#因此,咱們創建分區,是爲了一些經常使用的篩選查詢字段而用的。oop
#桶的使用?更高效!可取樣!主要用於大數據集的取樣
桶的原理是對一個表(或者分區)進行切片,選擇被切片的字段,設定桶的個數,用字段與個數的hash值進行入桶。
好比bucket.txt數據文件內容以下:
id name age
1 huang 11
2 li 11
3 xu 12
4 zhong 14
5 hu 15
6 liqiang 17
7 zhonghua 19
若是咱們想將這個數據表切成3個桶,切片字段爲id
那麼用id字段hash後,3個桶的內容以下:
桶id hash 3 =0
3 xu 12
6 liqiang 17
桶id hash 3 =1
1 huang 11
4 zhong 14
7 zhonghua 19
桶id hash 3 =2
2 li 11
5 hu 15
這個過程的建立表語句以下:
create table bucketmem (id int,name string,age int) CLUSTERED BY (id) sorted by (id asc) into 3 buckets
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';大數據
LOAD DATA LOCAL INPATH '/home/hadoop/hfxdoc/bucketmem.txt' INTO TABLE bucketmem;
select * from bucketmem tablesample(bucket 1 out of 4)spa
#其餘操做參考,更完整的請參考官網: https://cwiki.apache.org/confluence/display/Hive/Home
1) 建立與已知表相同結構的表Like:
只複製表的結構,而不復製表的內容。
create table test_like_table like test_bucket;.net
2) 對錶進行重命名 rename to:
ALTER TABLE table_name RENAME TO new_table_nameorm
3) 增長分區 Add Partitions:
ALTER TABLE table_name ADD partition_spec [ LOCATION 'location1' ]partition_spec [ LOCATION 'location2' ]hadoop
4) 對錶中的某一列進行修改,包括列的名稱/列的數據類型/列的位置/列的註釋
ALTER TABLE table_name CHANGE [COLUMN] col_old_name col_new_name column_type[COMMENT col_comment] [FIRST|AFTER column_name]
5) 添加/替換列Add/ReplaceColumns
ALTER TABLE table_name ADD|REPLACE COLUMNS (col_name data_type [COMMENTcol_comment], ...)
ADD COLUMNS 容許用戶在當前列的末尾增長新的列,可是在分區列以前。
6) 建立表的完整語句: Create [EXTERNAL] TABLE [IF NOT EXISTS] table_name [(col_name data_type [COMMENT col_comment], ...)] [COMMENT table_comment] [PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)] [CLUSTERED BY (col_name, col_name, ...) [SORTED BY (col_name [ASC|DESC], ...)]INTO num_buckets BUCKETS] [ROW FORMAT row_format] [STORED AS file_format] [LOCATION hdfs_path] 7) 在hive中查看hdfs文件 >dfs -ls /user;