基於內容的推薦算法實現


Job1.javajava

package com.oracle.moviecf;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 與Job2共同完成:
 *  用戶對電影的評價矩陣 × 電影特徵建模矩陣 = 用戶對某種特徵的偏好權重
 * (矩陣的相乘)
 * */
public class Job1 {
    public static class Map extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            String line = value.toString();
            String split[];
            String val;
            String mvID;
            String userID;
            
            if(line.startsWith("u")) {
                split = line.split("\t");
                userID = split[0];
                for(int i = 1; i < split.length; i++) {
                    val = split[i].substring(split[i].indexOf("_")+1);
                    mvID = split[i].substring(0, split[i].indexOf("_"));
                    context.write(new Text(mvID), new Text(userID+"_"+val));
                }
            }
            if(line.startsWith("m")) {
                split = line.split("\t");
                for(int i = 1; i < split.length; i++) {
                    context.write(new Text(split[0]), new Text(split[i]));
                }
            }
            
        }
    }
    
    public static class Reduce extends Reducer<Text, Text, Text, Text> {
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context)
                throws IOException, InterruptedException {
            List<String> list_user_val = new ArrayList<>();
            List<String> list_tag_val = new ArrayList<>();
            for(Text i: values) {
                if(i.toString().startsWith("u")) {
                    list_user_val.add(i.toString());
                }else {
                    list_tag_val.add(i.toString());
                }
            }
            
            String userID;
            double userVal;
            String tagID;
            double tagVal;
            for(String i: list_user_val) {
                userID = i.substring(0, i.indexOf("_"));
                userVal = Double.parseDouble(i.substring(i.indexOf("_")+1));
                for(String j: list_tag_val) {
                    tagID = j.substring(0, j.indexOf("_"));
                    tagVal = Double.parseDouble(j.substring(j.indexOf("_")+1));
                    context.write(new Text(userID+"_"+tagID), new Text(String.valueOf(userVal*tagVal)));
                }
            }
        }
    }
    
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        
        Job job = Job.getInstance(conf);
        job.setJarByClass(Job1.class);
        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);
        
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        
        FileInputFormat.setInputPaths(job, new Path("/moviecf/input"));
        FileOutputFormat.setOutputPath(job, new Path("/moviecf/output1"));
        
        job.waitForCompletion(true);
    }
}


Job2.javaapache

package com.oracle.moviecf;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 與Job1共同完成
 * 目的:
 *  用戶對電影的評價矩陣 × 電影特徵建模矩陣 = 用戶對某種特徵的偏好權重
 * */
public class Job2 {
    public static class Map extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            
            String line = value.toString();
            String split[] = line.split("\t");
            String userID;
            String tagID;
            
            userID = split[0].substring(0, split[0].indexOf("_"));
            tagID = split[0].substring(split[0].indexOf("_")+1);
            context.write(new Text(userID), new Text(tagID+"_"+split[1]));
        }
    }
    
    public static class Reduce extends Reducer<Text, Text, Text, Text> {
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context)
                throws IOException, InterruptedException {
            
            HashMap<String, Double> map = new HashMap<>();
            List<String> list = new ArrayList<>();
            String tagID;
            double val;
            
            for(Text i: values) {
                tagID = i.toString().substring(0, i.toString().indexOf("_"));
                val = Double.parseDouble(i.toString().substring(i.toString().indexOf("_")+1));
                if(!map.containsKey(tagID)) {
                    map.put(tagID, val);
                    list.add(tagID);
                }else {
                    map.put(tagID, val+map.get(tagID));
                }
            }
            
            StringBuffer line = new StringBuffer();
            Collections.sort(list);
            for(String i: list) {
                line.append(i+"_"+map.get(i)+"\t");
            }
            
            context.write(key, new Text(line.toString()));
        }
    }
    
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        
        Job job = Job.getInstance(conf);
        job.setJarByClass(Job2.class);
        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);
        
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        
        FileInputFormat.setInputPaths(job, new Path("/moviecf/output1"));
        FileOutputFormat.setOutputPath(job, new Path("/moviecf/output2"));
        
        job.waitForCompletion(true);
    }
}


Job3.javaoracle

package com.oracle.moviecf;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;/**
 * 類似度
 * 與Job4同用
 * 根據 用戶對特徵的偏好權重矩陣 與 電影特徵建模矩陣 ,求用戶對電影的餘弦類似度,即獲得每一個用戶對每一個電影的喜歡程度
 * 
 * */
public class Job3 {
    public static class Map extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            
            String line = value.toString();
            String split[] = line.split("\t");
            String userID = split[0];
            String tagID;
            double val;
            double a = 0;   //分母
            
            for(int i = 1; i < split.length; i++) {
                val = Double.parseDouble(split[i].substring(split[i].indexOf("_")+1));
                a += val*val;
            }
            
            for(int i = 1; i < split.length; i++) {
                tagID = split[i].substring(0, split[i].indexOf("_"));
                val = Double.parseDouble(split[i].substring(split[i].indexOf("_")+1));
                context.write(new Text(tagID), new Text(userID+"_"+val+"_"+a));
            }
        }
    }
    
    public static class Reduce extends Reducer<Text, Text, Text, Text> {
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context)
                throws IOException, InterruptedException {
            
            
            List<String> list_user = new ArrayList<>();
            List<String> list_movie = new ArrayList<>();
            String user_a;
            String mv_a;
            double user_val;
            double mv_val;
            String userID;
            String mvID;
            
            for(Text i: values) {
                if(i.toString().startsWith("u")) {
                    list_user.add(i.toString());
                }else {
                    list_movie.add(i.toString());
                }
            }
            
            String split[];
            for(String i: list_user) {
                split = i.split("_");
                userID = split[0];
                user_val = Double.parseDouble(split[1]);
                user_a = split[2];
                for(String j: list_movie) {
                    split = j.split("_");
                    mvID = split[0];
                    mv_val = Double.parseDouble(split[1]);
                    mv_a = split[2];
                    context.write(new Text(userID+"_"+mvID), new Text((mv_val*user_val)+"_"+user_a+"_"+mv_a));
                }
            }
            
        
            
        /*for(Text i: values) {
            context.write(key, i);
        }*/
            
        }
    }
    
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        
        Job job = Job.getInstance(conf);
        job.setJarByClass(Job3.class);
        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);
        
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        
        
        FileInputFormat.setInputPaths(job, new Path("/moviecf/output2"));
        FileInputFormat.addInputPath(job, new Path("/moviecf/input/data1.txt"));
        FileOutputFormat.setOutputPath(job, new Path("/moviecf/output3"));
        
        job.waitForCompletion(true);
    }
}


Job4.javaapp

package com.oracle.moviecf;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 與Job3同用
 * 根據 用戶對特徵的偏好權重矩陣 與 電影特徵建模矩陣 ,求用戶對電影的餘弦類似度,即獲得每一個用戶對每一個電影的喜歡程度
 * */

public class Job4 {
    public static class Map extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            String line = value.toString();
            String split[] = line.split("\t");
            String userID = line.substring(0, line.indexOf("_"));
            String mvID = split[0].substring(split[0].indexOf("_")+1);
            
            context.write(new Text(userID), new Text(mvID+"\t"+split[1]));
        }
    }
    
    public static class Reduce extends Reducer<Text, Text, Text, Text> {
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context)
                throws IOException, InterruptedException {
            HashMap<String, Double> map_a = new HashMap<>();
            HashMap<String, Double> map_b = new HashMap<>();
            List<String> keyList = new ArrayList<>();
            
            String[] split;
            String[] stemp;
            double a;
            double b;
            for(Text i: values) {
                split = i.toString().split("\t");
                stemp = split[1].split("_");
                a = Math.sqrt(Double.parseDouble(stemp[1])*Double.parseDouble(stemp[2]));
                b = Double.parseDouble(stemp[0]);
                if(!map_a.containsKey(split[0])) {
                    map_a.put(split[0], a);
                    map_b.put(split[0], b);
                    keyList.add(split[0]);
                }else {
                    map_b.put(split[0], b+map_b.get(split[0]));
                }
            }
            
            Collections.sort(keyList);
            StringBuffer result = new StringBuffer();
            String userID = key.toString();
            double val;
            for(String i: keyList) {
                val = map_b.get(i) / map_a.get(i);
                result.append(i+"_"+String.format("%.1f", val)+"\t");
            }
            context.write(key, new Text(result.toString()));
        }
    }
    
    public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
        
        Job job = Job.getInstance(conf);
        job.setJarByClass(Job4.class);
        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);
        
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        
        
        FileInputFormat.setInputPaths(job, new Path("/moviecf/output3"));
        FileOutputFormat.setOutputPath(job, new Path("/moviecf/output4"));
        
        job.waitForCompletion(true);
    }
}
相關文章
相關標籤/搜索