Home | About | Sematext search-lucene.com search-hadoop.com
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB
 Search Hadoop and all its subprojects:

Switch to Threaded View
HBase >> mail # user >> [Error]Finding average using hbase hadoop


Copy link to this message
-
Re: [Error]Finding average using hbase hadoop
hello,
jean

Did u find it??
On Sun, Aug 18, 2013 at 8:28 AM, manish dunani <[EMAIL PROTECTED]> wrote:

> But i want my output likewise::
>
>
> ROW                    CELL+COLUMN
>
> QXM                      column=stocks_output:average,
> timestamp=XXXXXXXXXX, val
>                       ue=XXXX
> QTM                      column=stocks_output:average,
> timestamp=XXXXXXXXXX, val
>                       ue=XXXX
>
>
> *sample dataset in hbase::**(table name:nyse4)*
>
>
>  2010-02-04           column=stocks:open, timestamp=1376567559424,
> value=2.5
>  2010-02-04           column=stocks:symbol, timestamp=1376567559424,
> value=QXM
>  2010-02-05           column=stocks:open, timestamp=1376567559429,
> value=2.42
>  2010-02-05           column=stocks:symbol, timestamp=1376567559429,
> value=QXM
>
>
> ===>>In my previous output i didn't get any symbol(qulifier)'s values in
> my table as Row key..
>
>
>
> hbase(main):004:0> scan 'nyse5'
> ROW                   COLUMN+CELL
> symbol               column=stocks_output:average,
> timestamp=1376749641978, val
>                       ue=@\xC6o\x11
>
> *So,that i changed my programme like wise::*
>
>
> package com.maddy;
>
> import java.io.IOException;
>
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.hbase.HBaseConfiguration;
> import org.apache.hadoop.hbase.client.Result;
> import org.apache.hadoop.hbase.client.Scan;
> import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
> import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
> import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
> import org.apache.hadoop.hbase.mapreduce.TableMapper;
> import org.apache.hadoop.hbase.util.Bytes;
> //import org.apache.hadoop.io.DoubleWritable;
> import org.apache.hadoop.io.FloatWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Reducer;
>
> import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
>
>
> public class openaveragestock
> {
>     public static class map extends TableMapper<Text,FloatWritable>
>     {
>         private static String col_family="stocks";
>         private static String qul="open";
>
>         private static String col_family1="stocks";
>         private static String qul1="symbol";
>
>         private static byte[] colfamily2=Bytes.toBytes(col_family);
>         private static byte[] qul2=Bytes.toBytes(qul);
>
>         private static byte[] colfamily3=Bytes.toBytes(col_family1);
>         private static byte[] qul3=Bytes.toBytes(qul1);
>
> //        public static float toFloat(int qul2)
> //        {
> //            return Float.intBitsToFloat((qul2));
> //
> //        }
> //
>         private static Text k1=new Text();
>
>
>
>         public void map(ImmutableBytesWritable row,Result value,Context
> context) throws IOException
>         {
>
>
>             //byte[]
> val1=(value.getValue("stocks".getBytes(),"symbol".getBytes()));
>             //String
> k=Bytes.toString(value.getValue(Bytes.toBytes("stocks"),Bytes.toBytes("symbol")));
>            byte[] val=value.getValue(colfamily2,qul2);
>           String k=Bytes.toString(value.getValue(colfamily3,qul3));
>
>             //ImmutableBytesWritable stock_symbol=new
> ImmutableBytesWritable(qul3);
>
>         k1.set(k);
>
>             try
>             {
>
>                 context.write(k1,new
> FloatWritable(Float.parseFloat(Bytes.toString(val))));
>
>             }
>
>             catch(InterruptedException e)
>
>             {
>                  throw new IOException(e);
>             }
>
>
>         }
>
>
>     }
>
>
>     public static class Reduce extends Reducer<Text,FloatWritable,
> Text,FloatWritable>
>     {
>
>         public void reduce(Text key,Iterable<FloatWritable>values,Context
> context) throws IOException, InterruptedException
>
>         {
>             float sum=0;
>             int count=0;
>             float average=0;
>             for(FloatWritable val:values)
Regards

*Manish Dunani*
*Contact No* : +91 9408329137
*skype id* : manish.dunani*
*
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB