如何数据清洗
创建Mapper类输入代码
package org.example;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
// 继承Mapper
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 获取一行数据,用空格拆分为一个个单词
String[] words = value.toString().split(" ");
// 遍历单词,设置键值对,值为1
for (String word : words) {
context.write(new Text(word), new LongWritable(1));
}
}
}
创建Driver类输入代码
package org.example;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCountDriver {
// mapreduce的Driver
// 提交job
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
// 1. 获取配置信息以及获取job对象
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
// 2. 关联本地的jar
job.setJarByClass(WordCountDriver.class);
// 3. 关联Mapper和Reducer
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4. 设置Mapper输出的KV类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
//5.设置最终输出的kv的类型
//设置reduceTask的数量为0
job.setOutputKeyClass(0);
//6.设置输入和输出路径
FileInputFormat.setInputPaths(job, new Path("input"));
FileOutputFormat.setOutputPath(job, new Path("output"));
// 7. 提交job
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
.class);