Hadoop Mapper 階段將數據直接從 HDFS 導入 Hbase

數據源格式以下: html

20130512	1	-1	-1	13802	1	2013-05-12 07:26:22	
20130512	1	-1	-1	13802	1	2013-05-12 11:18:24
咱們期待的結果是數據直接從 hdfs 讀取後 寫入 hbase,沒有 reduce 階段,

代碼以下: java

package WebsiteAnalysis;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Map2Hdfs {
	public static final String NAME = "ImportFromFile";

	public enum Counters {
		LINES
	}

	static class ImportMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, Writable> {
		private byte[] family = null;
		private byte[] qualifier = null;

		@Override
		protected void setup(Context context) throws IOException, InterruptedException {
			String column = context.getConfiguration().get("conf.column");
			byte[][] colkey = KeyValue.parseColumn(Bytes.toBytes(column));
			family = colkey[0];
			if (colkey.length > 1) {
				qualifier = colkey[1];
			}
		}

		@Override
		public void map(LongWritable offset, Text line, Context context) throws IOException {
			try {
				String[] lineArr = line.toString().split("\t");
				Put put = new Put(Bytes.toBytes(offset + ""));
				put.add(family, Bytes.toBytes("time"), Bytes.toBytes(lineArr[lineArr.length - 1]));
				context.write(new ImmutableBytesWritable(Bytes.toBytes(offset + "")), put);
				context.getCounter(Counters.LINES).increment(1);
			} catch (Exception e) {
				e.printStackTrace();
			}
		}
	}

	public static void main(String[] args) throws Exception {
		Configuration conf = HBaseConfiguration.create();
		String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
		conf.set("conf.column", "cf");
		String inputPath = "/dsap/middata/lj/ooxx/pv";
		Job job = new Job(conf, "TestMap2Hdfs");

		job.setJarByClass(Map2Hdfs.class);
		job.setMapperClass(ImportMapper.class);
		job.setOutputFormatClass(TableOutputFormat.class);
		job.getConfiguration().set(TableOutputFormat.OUTPUT_TABLE, "TestMap2Hdfs");
		job.setOutputKeyClass(ImmutableBytesWritable.class);
		job.setOutputValueClass(Writable.class);
		job.setNumReduceTasks(0);
		FileInputFormat.addInputPath(job, new Path(inputPath + "/" + otherArgs[0]));
		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}
}


REF:


http://stackoverflow.com/questions/11061854/hadoop-writing-to-hbase-directly-from-the-mapper apache

http://blog.sina.com.cn/s/blog_62a9902f0101904h.html  新建表的方式寫入 app

hbase-hdfs MapReduce 數據讀寫總結 ide

http://blog.pureisle.net/archives/1938.html  hbase hdfs MR 讀寫的幾種狀況總結 oop

http://blog.csdn.net/kirayuan/article/details/7001278  hbase表拷貝樣例代碼 spa

相關文章
相關標籤/搜索