Home | About | Sematext search-lucene.com search-hadoop.com
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB
 Search Hadoop and all its subprojects:

Switch to Threaded View
MapReduce >> mail # user >> Re: " No FileSystem for scheme: hdfs " in namenode HA


Copy link to this message
-
Re: " No FileSystem for scheme: hdfs " in namenode HA
You require hadoop-hdfs dependency for HDFS FS to get initialized.
Your issue lies in how you're running the application, not your code.
If you use Maven, include "hadoop-client" dependency to get all the
required dependency for a hadoop client program. Otherwise, run your
program with "hadoop jar", after ensuring "hadoop classpath" is indeed
including your HDFS directories too.

On Fri, Aug 16, 2013 at 12:03 PM, ch huang <[EMAIL PROTECTED]> wrote:
> hi,all i setup namenode HA hadoop cluster
>
> and write some demo code
>
> import java.io.FileNotFoundException;
> import java.io.IOException;
> import java.net.URI;
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.fs.FSDataOutputStream;
> import org.apache.hadoop.fs.FileStatus;
> import org.apache.hadoop.fs.FileSystem;
> import org.apache.hadoop.fs.Path;
>
> public class TestConnect {
>  private static void appendToHdfs(String content,String dst) throws
> FileNotFoundException,IOException {
>
>     Configuration conf = new Configuration();
>     conf.set("dfs.replication", "2");
>   //  System.out.println("append is : "+conf.get("dfs.support.append"));
>   //  System.out.println("append is : "+conf.get("dfs.name.dir"));
>     FileSystem fs = FileSystem.get(URI.create(dst), conf);
>     FSDataOutputStream out = fs.append(new Path(dst));
>     int readLen = content.getBytes().length;
>
>     out.write(content.getBytes(), 0, readLen);
>
>     out.close();
>     fs.close();
>    }
>
>     public static void createNewHDFSFile(String toCreateFilePath, String
> content) throws IOException
>      {
>          Configuration config = new Configuration();
>          FileSystem hdfs > FileSystem.get(URI.create(toCreateFilePath),config);
>
>          FSDataOutputStream os = hdfs.create(new Path(toCreateFilePath));
>          os.write(content.getBytes("UTF-8"));
>
>          os.close();
>
>          hdfs.close();
>      }
>
>     public static void listAll(String dir) throws IOException
>      {
>          Configuration conf = new Configuration();
>          FileSystem fs = FileSystem.get(URI.create(dir),conf);
>
>          FileStatus[] stats = fs.listStatus(new Path(dir));
>
>          for(int i = 0; i < stats.length; ++i)
>          {
>              if (stats[i].isFile())
>              {
>                  // regular file
>                  System.out.println(stats[i].getPath().toString());
>              }
>              else if (stats[i].isDirectory())
>              {
>                  // dir
>                  System.out.println(stats[i].getPath().toString());
>              }
>              else if(stats[i].isSymlink())
>              {
>                  // is s symlink in linux
>                  System.out.println(stats[i].getPath().toString());
>              }
>
>          }
>          fs.close();
>      }
>  public static void main(String[] args) {
>
>   // TODO Auto-generated method stub
>   try {
>
>    createNewHDFSFile("hdfs://mycluster/alex","mycluster");
>    listAll("hdfs://mycluster/alex");
>    Configuration config = new Configuration();
>    System.out.println("append is : "+config.get("dfs.hosts"));
>   } catch (FileNotFoundException e) {
>    // TODO Auto-generated catch block
>    e.printStackTrace();
>   } catch (IOException e) {
>    // TODO Auto-generated catch block
>    e.printStackTrace();
>   }
>  }
>
> }
> and client configuration file :hdfs-site.xml
>
> <property>
>  <name>fs.defaultFS</name>
>  <value>hdfs://mycluster</value>
> </property>
> <property>
>       <name>ha.zookeeper.quorum</name>
>       <value>node1:2181,node2:2181,node3:2181</value>
> </property>
>
> <property>
>         <name>dfs.nameservices</name>
>         <value>mycluster</value>
> </property>
> <property>
>         <name>dfs.ha.namenodes.mycluster</name>
>         <value>nn1,nn2</value>
> </property>
> <property>
>         <name>dfs.namenode.rpc-address.mycluster.nn1</name>
>         <value>node1:8020</value>
> </property>
> <property>
>         <name>dfs.namenode.rpc-address.mycluster.nn2</name>

Harsh J
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB