hadoop map reduce自定義數據類型時注意順序,不然報錯。

自定義數據類型,實現Writable接口,重寫write方法和readFields方法時,在操做字段時,必須保證順序,若是在write方法先寫id字段,則在readFields也先讀id字段。不然報錯:java

package com.my.hadoop;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class PairWritable implements Writable{

    private int id;
    private String name;

    public PairWritable() {
    }

    public PairWritable(int id, String name) {
       set(id,name);
    }

    public void set(int id,String name){
        this.setId(id);
        this.setName(name);
    }

    public int getId() {
        return id;
    }

    public void setId(int id) {
        this.id = id;
    }

    public String getName() {
        return name;
    }

    public void setName(String name) {
        this.name = name;
    }

    @Override
    public String toString() {
        return id+"\t"+name;
    }

    /**
     * 寫字段的順序與讀字段的順序必須一致。如在write中首先讀id,則read方法中,必須先讀id。否則會出錯
     * @param dataOutput
     * @throws IOException
     */
    @Override
    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeInt(id);
        dataOutput.writeUTF(name);
    }

    @Override
    public void readFields(DataInput dataInput) throws IOException {
        this.id=dataInput.readInt();
        this.name=dataInput.readUTF();
    }

    @Override
    public int hashCode() {
        return super.hashCode();
    }

    @Override
    public boolean equals(Object obj) {
        return super.equals(obj);
    }
}
相關文章
相關標籤/搜索