《Hadoop實戰》之專利統計

數據

專利數據的特性java

  • 專利引用數據所構成的關係圖與網頁連接以及社會網絡圖大同小異
  • 專利發佈以時間爲序,特性相似於時間序列
  • 專利關聯到一我的和一個位置,可視爲我的信息或地理數據

首先拿到專利數據:http://data.nber.org/patents/apache

本文使用是的cite75-99.txt,該文件涵蓋了自1975年到1999年間對美國專利的引用,包含超過1600萬條數據,前幾行以下圖:api

其中第一列爲專利號、第二列爲被第一列引用的專利號網絡

CITING CITED
3858241 956203

需求一

  1. 讀取專利引用數據,對於每個專利找到哪些專列對他進行了引用並進行合併。
  2. 進行倒序排序

思路

  • 針對1,將Mapper的鍵值互換輸出便可
  • 針對2,簡單方法是設置reduce=1,直接輸出全局排序文件
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.util.Iterator;


// 讀取專利引用數據,對於每個專利找到哪些專列對他進行了引用並進行合併。
public class FindCitedPatentsAndOrder extends Configured implements Tool {

    public static class MapClass extends MapReduceBase implements Mapper<Text, Text, Text, Text> {

        @Override
        public void map(Text key, Text value, OutputCollector<Text, Text> output, Reporter reporter)
                throws IOException {
            output.collect(value, key);  // 關鍵點
        }
    }

    public static class ReduceClass extends MapReduceBase implements Reducer<Text, Text, Text, Text> {

        @Override
        public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output,
                           Reporter reporter) throws IOException {
            String csv = "";
            while (values.hasNext()) {
                if (csv.length() > 0) csv += ",";
                csv += values.next().toString();
            }
            output.collect(key, new Text(csv));
        }
    }

    @Override
    public int run(String[] args) throws Exception {
        JobConf job = new JobConf(getConf(), getClass());

        Path in = new Path(args[0]);
        Path out = new Path(args[1]);
        FileInputFormat.addInputPath(job, in);
        FileOutputFormat.setOutputPath(job, out);

        job.setJobName("FindCitedPatentsAndOrder");
        job.setMapperClass(MapClass.class);
        job.setReducerClass(ReduceClass.class);

        job.setInputFormat(KeyValueTextInputFormat.class);
        job.setOutputFormat(TextOutputFormat.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        job.set("key.value.separator.in.input.line", ",");

        JobClient.runJob(job);
        return 0;
    }

    public static void main(String[] args) throws Exception {
        int exitCode = ToolRunner.run(new FindCitedPatentsAndOrder(), args);
        System.exit(exitCode);
    }
}

需求二

  • 計算不一樣引用次數專利的數目

思路

  • 在FindCitedPatentsAndOrder的基礎上修改Reduce函數,統計數目
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.util.Iterator;

// 計算不一樣引用次數專利的數目
public class CitedPatentsNumberCounter extends Configured implements Tool {
    public static class MapClass extends MapReduceBase implements Mapper<Text, Text, Text, Text> {

        @Override
        public void map(Text key, Text value, OutputCollector<Text, Text> output, Reporter reporter)
                throws IOException {
            output.collect(value, key);  // 關鍵點
        }
    }

    public static class ReduceClass extends MapReduceBase implements Reducer<Text, Text, Text, IntWritable> {

        @Override
        public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, IntWritable> output,
                           Reporter reporter) throws IOException {
            int count = 0;
            while (values.hasNext()){
                values.next();
                count++;
            }
            output.collect(key, new IntWritable(count));
        }
    }

    @Override
    public int run(String[] args) throws Exception {
        JobConf job = new JobConf(getConf(), getClass());

        Path in = new Path(args[0]);
        Path out = new Path(args[1]);
        FileInputFormat.addInputPath(job, in);
        FileOutputFormat.setOutputPath(job, out);

        job.setJobName("CitedPatentsNumberCounter");
        job.setMapperClass(MapClass.class);
        job.setReducerClass(ReduceClass.class);

        job.setInputFormat(KeyValueTextInputFormat.class);
        job.setOutputFormat(TextOutputFormat.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);  // 同時設置K2,V2和K3,V3的類型

        JobClient.runJob(job);
        return 0;
    }

    public static void main(String[] args) throws Exception {
        int exitCode = ToolRunner.run(new CitedPatentsNumberCounter(), args);
        System.exit(exitCode);
    }
}

需求三

  • 在需求二輸出的被引用數量數據的基礎上,統計被引用數量的頻次

思路

  • 將輸入數據替換成需求二的輸出數據便可
  • 爲了重用Mapper裏面的OutputKey和OutputValue,將它們在類中初始化
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.util.Iterator;

public class CitationFrequencyStatistics extends Configured implements Tool {
    public static class MapClass extends MapReduceBase implements Mapper<Text, Text, IntWritable, IntWritable> {
        private final  static IntWritable UNO = new IntWritable(1);  // 單位1
        private IntWritable citationCount = new IntWritable();


        @Override
        public void map(Text key, Text value, OutputCollector<IntWritable, IntWritable> output, Reporter reporter)
                throws IOException {
            citationCount.set(Integer.parseInt(value.toString()));
            output.collect(citationCount, UNO);  // 關鍵點
        }
    }

    public static class ReduceClass extends MapReduceBase implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {

        @Override
        public void reduce(IntWritable key, Iterator<IntWritable> values, OutputCollector<IntWritable, IntWritable> output,
                           Reporter reporter) throws IOException {
            int count = 0;
            while (values.hasNext()){
                values.next();
                count++;
            }
            output.collect(key, new IntWritable(count));
        }
    }

    @Override
    public int run(String[] args) throws Exception {
        JobConf job = new JobConf(getConf(), getClass());

        Path in = new Path(args[0]);
        Path out = new Path(args[1]);
        FileInputFormat.addInputPath(job, in);
        FileOutputFormat.setOutputPath(job, out);

        job.setJobName("CitationFrequencyStatistics");
        job.setMapperClass(MapClass.class);
        job.setReducerClass(ReduceClass.class);

        job.setInputFormat(KeyValueTextInputFormat.class);
        job.setOutputFormat(TextOutputFormat.class);
        job.setOutputKeyClass(IntWritable.class);
        job.setOutputValueClass(IntWritable.class);  // 同時設置K2,V2和K3,V3的類型

        JobClient.runJob(job);
        return 0;
    }

    public static void main(String[] args) throws Exception {
        int exitCode = ToolRunner.run(new CitationFrequencyStatistics(), args);
        System.exit(exitCode);
    }
}

Hadoop的新API

Hadoop最新版本的MapReduce Release 0.20.0的API包括了一個全新的Mapreduce JAVA API,有時候也稱爲上下文對象。app

  新的API類型上不兼容之前的API,因此,之前的應用程序須要重寫才能使新的API發揮其做用 。ide

  新的API和舊的API之間有下面幾個明顯的區別。函數

  • 新的API傾向於使用抽象類,而不是接口,由於這更容易擴展。例如,你能夠添加一個方法(用默認的實現)到一個抽象類而不需修改類以前的實現方法。在新的API中,Mapper和Reducer是抽象類。
  • 新的API是在org.apache.hadoop.mapreduce包(和子包)中的。以前版本的API則是放在org.apache.hadoop.mapred中的。
  • 新的API普遍使用context object(上下文對象),並容許用戶代碼與MapReduce系統進行通訊。例如,MapContext基本上充當着JobConf的OutputCollector和Reporter的角色。
  • 新的API同時支持"推"和"拉"式的迭代。在這兩個新老API中,鍵/值記錄對被推mapper中,但除此以外,新的API容許把記錄從map()方法中拉出,這也適用於reducer。"拉"式的一個有用的例子是分批處理記錄,而不是一個接一個。
  • 新的API統一了配置。舊的API有一個特殊的JobConf對象用於做業配置,這是一個對於Hadoop一般的Configuration對象的擴展。在新的API中,這種區別沒有了,因此做業配置經過Configuration來完成。做業控制的執行由Job類來負責,而不是JobClient,它在新的API中已經蕩然無存。

基於新api重寫的Hadoop基礎程序模板

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;

import java.io.IOException;

public class MyJob extends Configured implements Tool {

    public static class MapClass extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] citation = value.toString().split(",");
            context.write(new Text(citation[1]), new Text(citation[0]));
        }
    }

    public static class ReduceClass extends Reducer<Text, Text, Text, Text> {
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            String csv = "";
            for (Text val: values) {
                if (csv.length() > 0) csv += ",";
                csv += val.toString();
            }
            context.write(key, new Text(csv));
        }
    }

    @Override
    public int run(String[] strings) throws Exception {
        Configuration conf = getConf();

        Job job = new Job(conf, "Myjob");
        job.setJarByClass(MyJob.class);

        job.setMapperClass(MapClass.class);
        job.setReducerClass(ReduceClass.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        return 0;
    }
}
相關文章
相關標籤/搜索