Java map reduce - count attributes in reduce - java

I am working on map reduce. I have two data sets. I have to combine these two based on an ID and count the number of occurrences of the ID separately for each context. (For example, if it lists the data from a travel agency that operates in a few states, the output I need is of the format : User ID - count of number of visits in NY, count of number of visits in IL). That data set contains the field state: 'NY'. I have a predefined set of states(NY, IL).
While reducing it, I am always getting the count as zero though there is data.
My output is UID 0 0 for all IDs.
Below is my code:
`import java.io.IOException;
import java.util.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
public class myMap {
/* Map*/
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, Text> {
public void map(LongWritable key, Text value, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
String line = value.toString();
StringTokenizer tokens = new StringTokenizer(line, ",");
Boolean eventFlag = false;
String UID = "", state = "";
while (tokens.hasMoreTokens()) {
String currToken = tokens.nextToken();
String[] keyValue = currToken.split(":");
if (keyValue[0].equals( "state")) {
state = keyValue[1].trim();
}
if (keyValue[0].equalsIgnoreCase( "user")) {
UID = keyValue[1];
}
}
output.collect(new Text(UID), new Text(state));
}
}
/* Reducer*/
public static class Reduce extends MapReduceBase implements Reducer<Text, Text, Text,Text> {
public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
int nyCnt = 0;
int ilCnt = 0;
String currValue = new String();
while (values.hasNext()) {
currValue = values.next().toString();
if (currValue.equalsIgnoreCase("NY")) {
nyCnt+=1;
}
if (currValue.equalsIgnoreCase("IL")) {
ilCnt+=1;
}
output.collect(key , new Text(currValue));
}
String counts = Integer.toString(nyCnt) + " " + Integer.toString(ilCnt);
output.collect(key, new Text(counts) );
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(myMap.class);
conf.setJobName("myMap");
conf.setJarByClass(myMap.class);
conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
`
Any help regarding what is wrong will be useful. Thank you.

Related

Duplicate "values" for some key in map-reduce java program

I am new in mapreduce and hadoop (hadoop 3.2.3 and java 8).
I am trying to separate some lines based on a symbol in a line.
Example: "q1,a,q0," should be return ('a',"q1,a,q0,") as (key, value).
My dataset contains ten(10) lines , five(5) for key 'a' and five for key 'b'.
I expect to get 5 line for each key but i always get five for 'a' and 10 for 'b'
Data
A,q0,a,q1;A,q0,b,q0;A,q1,a,q1;A,q1,b,q2;A,q2,a,q1;A,q2,b,q0;B,s0,a,s0;B,s0,b,s1;B,s1,a,s1;B,s1,b,s0
Mapper class:
import java.io.IOException;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class MyMapper extends Mapper<LongWritable, Text, ByteWritable ,Text>{
private ByteWritable key1 = new ByteWritable();
//private int n ;
private int count =0 ;
private Text wordObject = new Text();
#Override
public void map(LongWritable key, Text value, Context context)throws IOException, InterruptedException {
String ftext = value.toString();
for (String line: ftext.split(";")) {
wordObject = new Text();
if (line.split(",")[2].equals("b")) {
key1.set((byte) 'b');
wordObject.set(line) ;
context.write(key1,wordObject);
continue ;
}
key1.set((byte) 'a');
wordObject.set(line) ;
context.write(key1,wordObject);
}
}
}
Reducer class:
import java.io.IOException;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
public class MyReducer extends Reducer<ByteWritable, Text, ByteWritable ,Text>{
private Integer count=0 ;
#Override
public void reduce(ByteWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
for(Text val : values ) {
count++ ;
}
Text symb = new Text(count.toString()) ;
context.write(key , symb);
}
}
Driver class:
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class MyDriver extends Configured implements Tool {
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.out.printf("Usage: %s [generic options] <inputdir> <outputdir>\n", getClass().getSimpleName());
return -1;
}
#SuppressWarnings("deprecation")
Job job = new Job(getConf());
job.setJarByClass(MyDriver.class);
job.setJobName("separation ");
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
job.setMapOutputKeyClass(ByteWritable.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(ByteWritable.class);
job.setOutputValueClass(Text.class);
boolean success = job.waitForCompletion(true);
return success ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new Configuration(), new MyDriver(), args);
System.exit(exitCode);
}
}
The problem was solved by putting the variable "count" inside the function "Reduce()".
Does your input read more than one line that has 5 more b's? I cannot reproduce for that one line, but your code can be cleaned up.
For the following code, I get output as
a 5
b 5
static class Mapper extends org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, ByteWritable, Text> {
final ByteWritable keyOut = new ByteWritable();
final Text valueOut = new Text();
#Override
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, ByteWritable, Text>.Context context) throws IOException, InterruptedException {
String line = value.toString();
if (line.isEmpty()) {
return;
}
StringTokenizer tokenizer = new StringTokenizer(line, ";");
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
String[] parts = token.split(",");
String keyStr = parts[2];
if (keyStr.matches("[ab]")) {
keyOut.set((byte) keyStr.charAt(0));
valueOut.set(token);
context.write(keyOut, valueOut);
}
}
}
}
static class Reducer extends org.apache.hadoop.mapreduce.Reducer<ByteWritable, Text, Text, LongWritable> {
static final Text keyOut = new Text();
static final LongWritable valueOut = new LongWritable();
#Override
protected void reduce(ByteWritable key, Iterable<Text> values, org.apache.hadoop.mapreduce.Reducer<ByteWritable, Text, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
keyOut.set(new String(new byte[]{key.get()}, StandardCharsets.UTF_8));
valueOut.set(StreamSupport.stream(values.spliterator(), true)
.mapToLong(v -> 1).sum());
context.write(keyOut, valueOut);
}
}

Map reduce example beside word count

I followed step by step via example in here : https://www.tutorialspoint.com/hadoop/hadoop_mapreduce.htm
I want to find max of each year in file like the following:
1320 23
1221 60
1320 33
1221 66
And the result that I expected is:
1320 33
1221 66
And I did like the following in java:
import java.util.*;
import java.io.IOException;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
public class ProcessUnits {
//Mapper class
public static class E_EMapper extends MapReduceBase implements
Mapper<LongWritable ,/*Input key Type */
Text, /*Input value Type*/
Text, /*Output key Type*/
IntWritable> /*Output value Type*/
{
//Map function
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
String line = value.toString();
String lasttoken = null;
StringTokenizer s = new StringTokenizer(line," ");
String year = s.nextToken();
while(s.hasMoreTokens()) {
lasttoken = s.nextToken();
}
int avgprice = Integer.parseInt(lasttoken);
output.collect(new Text(year), new IntWritable(avgprice));
}
}
//Reducer class
public static class E_EReduce extends MapReduceBase implements Reducer< Text, IntWritable, Text, IntWritable > {
//Reduce function
public void reduce( Text key, Iterator <IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
int maxavg = 0 ;
int val = Integer.MIN_VALUE;
while (values.hasNext()) {
val = values.next().get();
if(val > maxavg) {
maxavg = val ;
}
}
output.collect(key, new IntWritable(maxavg));
}
}
//Main function
public static void main(String args[])throws Exception {
JobConf conf = new JobConf(ProcessUnits.class);
conf.setJobName("max_eletricityunits");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(E_EMapper.class);
conf.setCombinerClass(E_EReduce.class);
conf.setReducerClass(E_EReduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
The error I got when I execute this program is the following:
Error: java.util.NoSuchElementException
at java.util.StringTokenizer.nextToken(StringTokenizer.java:349)
at ProcessUnits$E_EMapper.map(ProcessUnits.java:28)
at ProcessUnits$E_EMapper.map(ProcessUnits.java:14)
at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:54)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:465)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:349)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1845)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172)
I know this problem is because my program can't maps line by line of file , it maps entire file
String line = value.toString();
String lasttoken = null;
StringTokenizer s = new StringTokenizer(line," ");
String year = s.nextToken();
while(s.hasMoreTokens()) {
lasttoken = s.nextToken();
}
int avgprice = Integer.parseInt(lasttoken);
output.collect(new Text(year), new IntWritable(avgprice));
Any idea to solve this problem from you guys?
Try reading each line from file once and split the values. Map all the corresponding years and prices. Then using reduce function compare the price with some constant if greater assign the value.
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class E_Mapper extends Mapper<LongWritable, Text, Text, IntWritable> {
public void map(LongWritable ikey, Text ivalue, Context context)
throws IOException, InterruptedException {
String line= ivalue.toString();
String [] values = line.splitBy(" ");
for(String price:values)
{
context.write(new Text(year),price);
}}}
public class E_Reducer extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int avg=0;
for (IntWritable val : values) {
if(val.get()>avg){
context.write(key,new IntWritable(sum));
}}}

Hadoop map-reduce mapper programming

import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class ADDMapper extends MapReduceBase implements Mapper<LongWritable,
Text,Text,LongWritable>
{ #Override
public void map(LongWritable key, Text value,OutputCollector<Text, LongWritable> output, Reporter r)throws IOException
{
String s=value.toString();
char[] words=s.toCharArray();
int wno=0;
int ino=0;
for(int i=0;i<words.length;i++)
{
String temp="";
for(int j=ino;j<words.length;j++)
{
if(words[j]!=' ')
{ temp+=words[j];
}
else
{
wno=j;
if(temp!="")
{
ino=ino + key; //////POINT OF ERROR
output.collect(new Text(temp),new LongWritable(ino));
}
temp="";
ino=wno+1;
break;
}
}
}
}
}
I want to get the index value of every string, sorted by string.
The above code is neither giving the index value nor shuffling the strings.
let
input file:
hi how are you
hi i am right.
how is your job.
hi are you ok.
output:
am 50
are 7,33
hi 0,30,44
how 3,14
.
.
Please run the below code, it is running fine and gives your expected output.
provide input and output path in command line arguments.(args[0], args[1])
import java.io.IOException;
import java.util.*;
import java.util.Map.Entry;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
public class IndexCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
String str=value.toString();
String[] tokens = str.split(" "); //split into words
//create hashmap for unique word
HashMap<String,Integer> uniqueString = new HashMap<String,Integer>();
for(int i=0;i<tokens.length;i++){
uniqueString.put(tokens[i],1);
}
//for sorting create TreeMap from above hash map
TreeMap<String, Integer> map = new TreeMap<String,Integer>(uniqueString);
for (Entry<String, Integer> entry : map.entrySet()) {
int index=0;
//find the index of the word
index = str.indexOf((String)entry.getKey());
while (index >= 0) {
output.collect(new Text((String)entry.getKey()),new IntWritable(index));
index = str.indexOf((String)entry.getKey(), index + 1);
}
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
while (values.hasNext()) {
output.collect(key, new IntWritable(values.next().get()));
}
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("indexfinder");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
Hi Shivendra I wrote the below logic of mapper that will help you to find the index of each string with sorted output.
Output of this code is sorted String with its index, then you can run reducer on this output.
String str=value.toString();
String[] tokens = str.split(" "); //split into words
//create hashmap for unique word
Map<String,Integer> uniqueString = new HashMap<String,Integer>();
for(int i=0;i<tokens.length;i++){
uniqueString.put(tokens[i],1);
}
//for sorting create TreeMap from above hash map
Map<String,Integer> map = new TreeMap<String,Integer>(uniqueString);
for (Map.Entry entry : map.entrySet()) {
int index=0;
//find the index of the word
index = str.indexOf((String)entry.getKey());
while (index >= 0) {
output.collect(new Text((String)entry.getKey()),new LongWritable(index));
index = str.indexOf((String)entry.getKey(), index + 1);
}
}
output of this logic:
am:20,
are:7,
are:50,
hi:0,
hi:15,
hi:47,
how:3,
how:30,
i:1,
i:16,
i:18,
i:24,
i:34,
i:48,
is:34,
job.:42,
ok.:58,
right.:23,
you:11,
you:37,
you:54,
your:37
It might be help you.
Please run the below code, its give expected output.
import java.io.IOException;
import java.util.*;
import java.util.Map.Entry;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class Index {
public static class Map extends Mapper<LongWritable, Text, Text, IntWritable> {
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String str=value.toString();
String[] tokens = str.split(" "); //split into words
//create hashmap for unique word
HashMap<String,Integer> uniqueString = new HashMap<String,Integer>();
for(int i=0;i<tokens.length;i++){
uniqueString.put(tokens[i],1);
}
//for sorting create TreeMap from above hash map
TreeMap<String, Integer> map = new TreeMap<String,Integer>(uniqueString);
Configuration conf=context.getConfiguration();
int strIndex = 0;
for (Entry<String, Integer> entry : map.entrySet()) {
//int index=0;
strIndex=conf.getInt("index", 0);
//find the index of the word
int index = str.indexOf((String)entry.getKey());
while (index >= 0) {
index+=strIndex;
context.write(new Text((String)entry.getKey()),new IntWritable(index));
index = str.indexOf((String)entry.getKey(), index + 1);
}
}
conf.setInt("index", strIndex+str.length());
}
}
public static class Reduce extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
for (IntWritable val : values) {
context.write(key, new IntWritable(val.get()));
}
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.setInt("index", 0);
Job job = new Job(conf, "index");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path("input"));
FileOutputFormat.setOutputPath(job, new Path("output"));
job.waitForCompletion(true);
}
}

FileAlreadyExistsException while running MapReduce code

This program is supposed to accomplish the MapReduce job. The output of the first job has to be taken as the input of the second job.
When I run it, I get two errors:
Exception in thread "main" org.apache.hadoop.mapred.FileAlreadyExistsException
The mapping part is running 100% but the reducer is not running.
Here's my code:
import java.io.IOException;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.io.LongWritable;
public class MaxPubYear {
public static class FrequencyMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
Text word = new Text();
String delim = ";";
Integer year = 0;
String tokens[] = value.toString().split(delim);
if (tokens.length >= 4) {
year = TryParseInt(tokens[3].replace("\"", "").trim());
if (year > 0) {
word = new Text(year.toString());
context.write(word, new IntWritable(1));
}
}
}
}
public static class FrequencyReducer extends
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
context.write(key, new IntWritable(sum));
}
}
public static class MaxPubYearMapper extends
Mapper<LongWritable, Text, IntWritable, Text> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String delim = "\t";
Text valtosend = new Text();
String tokens[] = value.toString().split(delim);
if (tokens.length == 2) {
valtosend.set(tokens[0] + ";" + tokens[1]);
context.write(new IntWritable(1), valtosend);
}
}
}
public static class MaxPubYearReducer extends
Reducer<IntWritable, Text, Text, IntWritable> {
public void reduce(IntWritable key, Iterable<Text> values,
Context context) throws IOException, InterruptedException {
int maxiValue = Integer.MIN_VALUE;
String maxiYear = "";
for (Text value : values) {
String token[] = value.toString().split(";");
if (token.length == 2
&& TryParseInt(token[1]).intValue() > maxiValue) {
maxiValue = TryParseInt(token[1]);
maxiYear = token[0];
}
}
context.write(new Text(maxiYear), new IntWritable(maxiValue));
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf, "Frequency");
job.setJarByClass(MaxPubYear.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(FrequencyMapper.class);
job.setCombinerClass(FrequencyReducer.class);
job.setReducerClass(FrequencyReducer.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1] + "_temp"));
int exitCode = job.waitForCompletion(true) ? 0 : 1;
if (exitCode == 0) {
Job SecondJob = new Job(conf, "Maximum Publication year");
SecondJob.setJarByClass(MaxPubYear.class);
SecondJob.setOutputKeyClass(Text.class);
SecondJob.setOutputValueClass(IntWritable.class);
SecondJob.setMapOutputKeyClass(IntWritable.class);
SecondJob.setMapOutputValueClass(Text.class);
SecondJob.setMapperClass(MaxPubYearMapper.class);
SecondJob.setReducerClass(MaxPubYearReducer.class);
FileInputFormat.addInputPath(SecondJob, new Path(args[1] + "_temp"));
FileOutputFormat.setOutputPath(SecondJob, new Path(args[1]));
System.exit(SecondJob.waitForCompletion(true) ? 0 : 1);
}
}
public static Integer TryParseInt(String trim) {
// TODO Auto-generated method stub
return(0);
}
}
Exception in thread "main"
org.apache.hadoop.mapred.FileAlreadyExistsException
Map-reduce job does not overwrite the contents in a existing directory. Output path to MR job must be a directory path which does not exist. MR job will create a directory at specified path with files within it.
In your code:
FileOutputFormat.setOutputPath(job, new Path(args[1] + "_temp"));
Make sure this path does not exist when you run MR job.

Hadoop WordCount sorted by word occurrences

I need to run WordCount which will give me all the words and their occurrences but sorted by the occurrences and not by the alphabet
I understand that I need to create two jobs for this and run one after the other
I used the mapper and the reducer from Sorted word count using Hadoop MapReduce
package org.myorg;
import java.io.IOException;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapreduce.Job;
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
class Map1 extends MapReduceBase implements Mapper<Object, Text, IntWritable, Text> {
public void map(Object key, Text value, OutputCollector<IntWritable, Text> collector, Reporter arg3) throws IOException {
String line = value.toString();
StringTokenizer stringTokenizer = new StringTokenizer(line);
{
int number = 999;
String word = "empty";
if (stringTokenizer.hasMoreTokens()) {
String str0 = stringTokenizer.nextToken();
word = str0.trim();
}
if (stringTokenizer.hasMoreElements()) {
String str1 = stringTokenizer.nextToken();
number = Integer.parseInt(str1.trim());
}
collector.collect(new IntWritable(number), new Text(word));
}
}
}
class Reduce1 extends MapReduceBase implements Reducer<IntWritable, Text, IntWritable, Text> {
public void reduce(IntWritable key, Iterator<Text> values, OutputCollector<IntWritable, Text> arg2, Reporter arg3) throws IOException {
while ((values.hasNext())) {
arg2.collect(key, values.next());
}
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path("/tmp/temp"));
//JobClient.runJob(conf);
//------------------------------------------------------------------
JobConf conf2 = new JobConf(WordCount.class);
conf2.setJobName("WordCount1");
conf2.setOutputKeyClass(Text.class);
conf2.setOutputValueClass(IntWritable.class);
conf2.setMapperClass(Map1.class);
conf2.setCombinerClass(Reduce1.class);
conf2.setReducerClass(Reduce1.class);
conf2.setInputFormat(TextInputFormat.class);
conf2.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf2, new Path("/tmp/temp/part-00000"));
FileOutputFormat.setOutputPath(conf2, new Path(args[1]));
Job job1 = new Job(conf);
Job job2 = new Job(conf2);
job1.submit();
if (job1.waitForCompletion(true)) {
job2.submit();
job1.waitForCompletion(true);
}
}
}
It's not working, what should I change here, or why it's not working ???
If the program runs until:
INFO input.FileInputFormat: Total input paths to process : 1
then the problem lies in your last line:
job2.submit();
the job has been submitted but not queued to be processed. Try this:
job1.submit();
if (job1.waitForCompletion(true)) {
job2.submit();
job2.waitForCompletion(true);
}
to process your sorter MR job. I've tried your code with the new API for MR and the flow works.
Just add the last line.

Categories

Resources