Home | About | Sematext search-lucene.com search-hadoop.com
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB
 Search Hadoop and all its subprojects:

Switch to Plain View
HBase >> mail # user >> [Error]Finding average using hbase hadoop


+
manish dunani 2013-08-16, 10:42
+
Jean-Marc Spaggiari 2013-08-16, 11:32
+
manish dunani 2013-08-16, 13:09
+
Jean-Marc Spaggiari 2013-08-16, 13:48
+
Ted Yu 2013-08-16, 13:57
+
Ted Yu 2013-08-16, 16:34
+
manish dunani 2013-08-17, 07:10
+
Jean-Marc Spaggiari 2013-08-17, 11:54
Copy link to this message
-
Re: [Error]Finding average using hbase hadoop
Thanx a lot!!
Jean.

I am very thankful to you..And off course Ted also doing very good job.
*

Revised Code ::*

Package com.maddy;
>
> import java.io.IOException;
>
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.hbase.HBaseConfiguration;
> import org.apache.hadoop.hbase.client.Put;
> import org.apache.hadoop.hbase.client.Result;
> import org.apache.hadoop.hbase.client.Scan;
> import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
> import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
> import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
> import org.apache.hadoop.hbase.mapreduce.TableMapper;
> import org.apache.hadoop.hbase.mapreduce.TableReducer;
> import org.apache.hadoop.hbase.util.Bytes;
> //import org.apache.hadoop.io.DoubleWritable;
> import org.apache.hadoop.io.FloatWritable;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
>
>
> public class openaveragestock
> {
>     public static class map extends
> TableMapper<ImmutableBytesWritable,FloatWritable>
>     {
>         private static String col_family="stocks";
>         private static String qul="open";
>
>         private static String col_family1="stocks";
>         private static String qul1="symbol";
>
>         private static byte[] colfamily2=Bytes.toBytes(col_family);
>         private static byte[] qul2=Bytes.toBytes(qul);
>
>         private static byte[] colfamily3=Bytes.toBytes(col_family1);
>         private static byte[] qul3=Bytes.toBytes(qul1);
>
> //        public static float toFloat(int qul2)
> //        {
> //            return Float.intBitsToFloat((qul2));
> //
> //        }
> //
>
>
>
>         public void map(ImmutableBytesWritable row,Result value,Context
> context) throws IOException
>         {
>
>
>             //byte[]
> val1=(value.getValue("stocks".getBytes(),"symbol".getBytes()));
>            byte[] val=value.getValue(colfamily2,qul2);
>
>
>             ImmutableBytesWritable stock_symbol=new
> ImmutableBytesWritable(qul3);
>
>
>
>             try
>             {
>
>                 context.write(stock_symbol,new
> FloatWritable(Float.parseFloat(Bytes.toString(val))));
>             }
>
>             catch(InterruptedException e)
>
>             {
>                  throw new IOException(e);
>             }
>
>
>         }
>
>
>     }
>
>
>     public static class reduce extends
> TableReducer<ImmutableBytesWritable,FloatWritable,ImmutableBytesWritable>
>     {
>
>         @Override
>         public void reduce(ImmutableBytesWritable
> key,Iterable<FloatWritable>values,Context context) throws IOException,
> InterruptedException
>         {
>             float sum=0;
>             int count=0;
>             float average=0;
>             for(FloatWritable val:values)
>             {
>                 sum+=val.get();
>                 count++;
>             }
>             average=(sum/count);
>             Put put=new Put(key.get());
>
> put.add(Bytes.toBytes("stocks_output"),Bytes.toBytes("average"),Bytes.toBytes(average));
>             System.out.println("For\t"+count+"\t average is:"+average);
>             context.write(key,put);
>
>         }
>
>     }
>
>     public static void main(String args[]) throws IOException,
> ClassNotFoundException, InterruptedException
>     {
>         Configuration config=HBaseConfiguration.create();
>         config.addResource("/home/manish/workspace/hbase
> project/bin/hbase-site.xml");
>         Job job=new Job(config,"openstockaverage1");
>
>
>         Scan scan=new Scan();
>         scan.addFamily("stocks".getBytes());
>         scan.setFilter(new FirstKeyOnlyFilter());
>
>         TableMapReduceUtil.initTableMapperJob("nyse4",
>                 scan,
>                 map.class,
>                 ImmutableBytesWritable.class,
>                 FloatWritable.class,
>                 job);
>
>         TableMapReduceUtil.initTableReducerJob("nyse5",
>                 reduce.class,
*Sample output  at my eclipse:::*

13/08/17 07:27:21 INFO mapred.Merger: Merging 1 sorted segments

*
*
*Question:::(please don't laugh at me if i will ask you a silly question)

*
Here in code i set output directory.But,when i seen in hdfs directory it is
not contain any part-0000 file.It contains only SUCCESS file.*
*
Can i ask  why it is happen.??*

*
On Sat, Aug 17, 2013 at 5:24 PM, Jean-Marc Spaggiari <
[EMAIL PROTECTED]> wrote:
Regards

*Manish Dunani*
*Contact No* : +91 9408329137
*skype id* : manish.dunani*
*
+
Jean-Marc Spaggiari 2013-08-17, 18:29
+
manish dunani 2013-08-18, 02:25
+
manish dunani 2013-08-18, 02:58
+
manish dunani 2013-08-19, 12:32
+
Jean-Marc Spaggiari 2013-08-19, 13:46
+
manish dunani 2013-08-19, 14:01
+
Jean-Marc Spaggiari 2013-08-19, 14:10
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB