1、快速配置Hadoop並啓動(爲了快速上手用單機搭建):java
hadoop下載地載:http://mirror.bit.edu.cn/apache/hadoop/
一、修改配置文件:
core-site.xmlnode
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://localhost:9000</value> </property> </configuration>
hdfs-site.xmlapache
<configuration> <property> <name>dfs.replication</name> <value>1</value> </property> </configuration>
mapred-site.xml瀏覽器
<configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> </configuration>
yarn-site.xmlbash
<configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> </configuration>
hadoop-env.sh服務器
export JAVA_HOME=/usr/java/jdk1.8.0_121
二、格式化文件系統app
./hdfs namenode -format
三、啓動名稱節點和數據節點後臺進程oop
./sbin/start-dfs.sh
啓動ResourceManger和NodeManager後臺進程測試
./sbin/start-yarn.sh
或者只用code
./sbin/start-all.sh
2、測試
2.1 HDFS測試
使用瀏覽器查看hdfs目錄,端口號是50070:
操做材料下載
https://pan.baidu.com/s/1hs62YTe
進入hadoop解壓目錄下的bin目錄, HDFS建立目錄:
./hdfs dfs -mkdir /wordcount ./hdfs dfs -mkdir /wordcount/result ./hadoop fs -rmr /wordcount/result
拷貝input文件夾到HDFS目錄下
./hdfs dfs -put /opt/input /wordcount
查看文件列表:
./hadoop fs -ls /wordcount/input
2.2 MapReduce測試
是參考官方文檔的wordcount實驗,將wordcount的代碼譯並打包,放到服務器的目錄(/opt/testsource)下(注意不是hdfs的目錄下)
並將測試的要進行wordcount的文件放入hdfs的/wordcount/input目錄下
執行hadoop job
./hadoop jar /opt/testsource/learning.jar hadoop.WordCount /wordcount/input /wordcount/result
確認執行結果
hdfs dfs -cat /wordcount/result/*
附wordcount代碼:
package hadoop; /** * Created by BD-PC11 on 2017/3/29. */ import java.io.IOException; import java.util.*; import org.apache.hadoop.fs.Path; import org.apache.hadoop.conf.*; import org.apache.hadoop.io.*; import org.apache.hadoop.mapred.*; import org.apache.hadoop.util.*; public class WordCount { public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { String line = value.toString(); StringTokenizer tokenizer = new StringTokenizer(line); while (tokenizer.hasMoreTokens()) { word.set(tokenizer.nextToken()); output.collect(word, one); } } } public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> { public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { int sum = 0; while (values.hasNext()) { sum += values.next().get(); } output.collect(key, new IntWritable(sum)); } } public static void main(String[] args) throws Exception { JobConf conf = new JobConf(WordCount.class); conf.setJobName("wordcount"); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Map.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); conf.setInputFormat(TextInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[0])); FileOutputFormat.setOutputPath(conf, new Path(args[1])); JobClient.runJob(conf); } }