最近在處理離線數據導入HBase的問題,涉及從Hdfs中讀取gz壓縮文件,把思路記錄下來,以做備用。具體代碼以下:java
package org.dba.util; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionInputStream; public class ReadHdfs { public static void ReadFile(String fileName) throws IOException{ Configuration conf = new Configuration(); Path file = new Path(fileName); FileSystem fs = FileSystem.get(conf); FSDataInputStream hdfsInstream = fs.open(file); CompressionCodecFactory factory = new CompressionCodecFactory(conf); CompressionCodec codec = factory.getCodec(file); BufferedReader reader = null; try{ if(codec == null){ reader = new BufferedReader(new InputStreamReader(hdfsInstream)); }else{ CompressionInputStream comInStream = codec.createInputStream(hdfsInstream); reader = new BufferedReader(new InputStreamReader(comInStream)); System.out.println(reader.readLine().substring(0, 100)); } }catch(Exception e){ e.printStackTrace(); } } public static void main(String[] args) throws IOException{ ReadFile(args[0]); } }