这是Hadoop字数统计Java映射并减少源代码:
在map函数中,我已经到了可以输出所有以字母“ c”开头的单词以及该单词出现的总次数的位置,但是我想做的就是输出总数以字母“ c”开头的单词,但我在获取总数上有些停留。任何帮助将不胜感激,谢谢。
例
我得到的输出:
可以2
罐3
猫5
我想要得到的是:
合计10
public static class MapClass extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { String line = value.toString(); StringTokenizer itr = new StringTokenizer(line); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); if(word.toString().startsWith("c"){ output.collect(word, one); } } } } public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> { public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { int sum = 0; while (values.hasNext()) { sum += values.next().get(); //gets the sum of the words and add them together } output.collect(key, new IntWritable(sum)); //outputs the word and the number } }
克里斯·格肯 的答案是正确的。
如果您要输出单词作为关键字,则只会帮助您计算以“ c”开头的唯一单词的数量
并非所有“ c”的总数。
因此,您需要从mapper输出一个唯一的密钥。
while (itr.hasMoreTokens()) { String token = itr.nextToken(); if(token.startsWith("c")){ word.set("C_Count"); output.collect(word, one); } }
这是使用New Api的示例
驾驶舱
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class WordCount { public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = new Job(conf, "wordcount"); FileSystem fs = FileSystem.get(conf); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); if (fs.exists(new Path(args[1]))) fs.delete(new Path(args[1]), true); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); job.setJarByClass(WordCount.class); job.waitForCompletion(true); } }
映射器类
import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; public class Map extends Mapper<LongWritable, Text, Text, IntWritable> { private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); StringTokenizer itr = new StringTokenizer(line); while (itr.hasMoreTokens()) { String token = itr.nextToken(); if(token.startsWith("c")){ word.set("C_Count"); context.write(word, one); } } } }
减速机类
import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; public class Reduce extends Reducer<Text, IntWritable, Text, IntWritable> { public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } context.write(key, new IntWritable(sum)); } }