Home | About | Sematext search-lucene.com search-hadoop.com
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB
 Search Hadoop and all its subprojects:

Switch to Threaded View
HDFS >> mail # user >> Re: issue about no class find in running MR job


Copy link to this message
-
Re: issue about no class find in running MR job
no ,it need not , hadoop can run class directly,i try in other box,it work
fine,
# hadoop com/test/demo/WordCount
Error: Could not find or load main class com.test.demo.WordCount
[root@CHBM224 test]# hadoop classpath
/etc/hadoop/conf:/usr/lib/hadoop/lib/*:/usr/lib/hadoop/.//*:/usr/lib/hadoop-hdfs/./:/usr/lib/hadoop-hdfs/lib/*:/usr/lib/hadoop-hdfs/.//*:/usr/lib/hadoop-yarn/lib/*:/usr/lib/hadoop-yarn/.//*:/usr/lib/hadoop-mapreduce/lib/*:/usr/lib/hadoop-mapreduce/.//*
[root@CHBM224 test]# echo $CLASSPATH
.:/usr/java/jdk1.7.0_25/lib/dt.jar:/usr/java/jdk1.7.0_25/lib/tools.jar
---copy class path to another box ,and it's work fine

# cd test/
[root@CH22 test]# hadoop com/test/demo/WordCount
teragen <num rows> <output dir>
[root@CH22 test]# hadoop classpath
/etc/hadoop/conf:/usr/lib/hadoop/lib/*:/usr/lib/hadoop/.//*::/usr/lib/hadoop/lib:/usr/lib/hadoop-hdfs/./:/usr/lib/hadoop-hdfs/lib/*:/usr/lib/hadoop-hdfs/.//*:/usr/lib/hadoop-yarn/lib/*:/usr/lib/hadoop-yarn/.//*:/usr/lib/hadoop-mapreduce/lib/*:/usr/lib/hadoop-mapreduce/.//*
[root@CH22 test]# echo $CLASSPATH
.:/usr/java/jdk1.6.0_35/lib/dt.jar:/usr/java/jdk1.6.0_35/lib/tools.jar
On Fri, Dec 13, 2013 at 10:17 AM, Tao Xiao <[EMAIL PROTECTED]> wrote:

> how did you package and compile your jar ? did you specify the main class
> for the JAR file you generated ?
>
>
> 2013/12/13 ch huang <[EMAIL PROTECTED]>
>
>> hi,maillist:
>>
>>              i rewrite WordCount.java and try to compile and run  it but
>> it say not find main class ,why?
>>
>> [root@CHBM224 myMR]# cat WordCount.java
>> import java.io.IOException;
>> import java.util.StringTokenizer;
>> import org.apache.hadoop.conf.Configuration;
>> import org.apache.hadoop.conf.Configured;
>> import org.apache.hadoop.fs.Path;
>> import org.apache.hadoop.io.IntWritable;
>> import org.apache.hadoop.io.Text;
>> import org.apache.hadoop.mapreduce.Job;
>> import org.apache.hadoop.mapreduce.Mapper;
>> import org.apache.hadoop.mapreduce.Reducer;
>> import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
>> import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
>> import org.apache.hadoop.util.GenericOptionsParser;
>> import org.apache.hadoop.util.ToolRunner;
>> import org.apache.hadoop.util.Tool;
>> public class WordCount extends Configured implements Tool {
>>   public static class TokenizerMapper
>>        extends Mapper<Object, Text, Text, IntWritable>{
>>     private final static IntWritable one = new IntWritable(1);
>>     private Text word = new Text();
>>     public void map(Object key, Text value, Context context
>>                     ) throws IOException, InterruptedException {
>>       StringTokenizer itr = new StringTokenizer(value.toString());
>>       while (itr.hasMoreTokens()) {
>>         word.set(itr.nextToken());
>>         context.write(word, one);
>>       }
>>     }
>>   }
>>   public static class IntSumReducer
>>        extends Reducer<Text,IntWritable,Text,IntWritable> {
>>     private IntWritable result = new IntWritable();
>>     public void reduce(Text key, Iterable<IntWritable> values,
>>                        Context context
>>                        ) throws IOException, InterruptedException {
>>       int sum = 0;
>>       for (IntWritable val : values) {
>>         sum += val.get();
>>       }
>>       result.set(sum);
>>       context.write(key, result);
>>     }
>>   }
>>   private static void usage() throws IOException {
>>     System.err.println("teragen <num rows> <output dir>");
>>   }
>>   public int run(String[] args) throws IOException, InterruptedException,
>> ClassN
>> otFoundException {
>>         Job job = Job.getInstance(getConf());
>>         if (args.length != 2) {
>>               usage();
>>               return 2;
>>         }
>>         job.setJobName("wordcount");
>>         job.setJarByClass(WordCount.class);
>>         job.setMapperClass(TokenizerMapper.class);
>>         job.setCombinerClass(IntSumReducer.class);
>>         job.setReducerClass(IntSumReducer.class);
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB