Home | About | Sematext search-lucene.com search-hadoop.com
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB
 Search Hadoop and all its subprojects:

Switch to Threaded View
Hadoop >> mail # user >> " No FileSystem for scheme: hdfs " in namenode HA


Copy link to this message
-
" No FileSystem for scheme: hdfs " in namenode HA
hi,all i setup namenode HA hadoop cluster

and write some demo code

import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestConnect {
 private static void appendToHdfs(String content,String dst) throws
FileNotFoundException,IOException {

    Configuration conf = new Configuration();
    conf.set("dfs.replication", "2");
  //  System.out.println("append is : "+conf.get("dfs.support.append"));
  //  System.out.println("append is : "+conf.get("dfs.name.dir"));
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    FSDataOutputStream out = fs.append(new Path(dst));
    int readLen = content.getBytes().length;

    out.write(content.getBytes(), 0, readLen);

    out.close();
    fs.close();
   }

    public static void createNewHDFSFile(String toCreateFilePath, String
content) throws IOException
     {
         Configuration config = new Configuration();
         FileSystem hdfs FileSystem.get(URI.create(toCreateFilePath),config);

         FSDataOutputStream os = hdfs.create(new Path(toCreateFilePath));
         os.write(content.getBytes("UTF-8"));

         os.close();

         hdfs.close();
     }

    public static void listAll(String dir) throws IOException
     {
         Configuration conf = new Configuration();
         FileSystem fs = FileSystem.get(URI.create(dir),conf);

         FileStatus[] stats = fs.listStatus(new Path(dir));

         for(int i = 0; i < stats.length; ++i)
         {
             if (stats[i].isFile())
             {
                 // regular file
                 System.out.println(stats[i].getPath().toString());
             }
             else if (stats[i].isDirectory())
             {
                 // dir
                 System.out.println(stats[i].getPath().toString());
             }
             else if(stats[i].isSymlink())
             {
                 // is s symlink in linux
                 System.out.println(stats[i].getPath().toString());
             }

         }
         fs.close();
     }
 public static void main(String[] args) {

  // TODO Auto-generated method stub
  try {

   createNewHDFSFile("hdfs://mycluster/alex","mycluster");
   listAll("hdfs://mycluster/alex");
   Configuration config = new Configuration();
   System.out.println("append is : "+config.get("dfs.hosts"));
  } catch (FileNotFoundException e) {
   // TODO Auto-generated catch block
   e.printStackTrace();
  } catch (IOException e) {
   // TODO Auto-generated catch block
   e.printStackTrace();
  }
 }

}
and client configuration file :hdfs-site.xml

<property>
 <name>fs.defaultFS</name>
 <value>hdfs://mycluster</value>
</property>
<property>
      <name>ha.zookeeper.quorum</name>
      <value>node1:2181,node2:2181,node3:2181</value>
</property>

<property>
        <name>dfs.nameservices</name>
        <value>mycluster</value>
</property>
<property>
        <name>dfs.ha.namenodes.mycluster</name>
        <value>nn1,nn2</value>
</property>
<property>
        <name>dfs.namenode.rpc-address.mycluster.nn1</name>
        <value>node1:8020</value>
</property>
<property>
        <name>dfs.namenode.rpc-address.mycluster.nn2</name>
        <value>node2:8020</value>
</property>
<property>
        <name>dfs.namenode.shared.edits.dir</name>
        <value>qjournal://node1:8485;node2:8485;node3:8485/mycluster</value>
</property>

<property>
        <name>dfs.client.failover.proxy.provider.mycluster</name>

<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

when i run the test ,i get some error information,any one can help?

log4j:WARN No appenders could be found for logger
(org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for
more info.
java.io.IOException: No FileSystem for scheme: hdfs
 at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2296)
 at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2303)
 at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:87)
 at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2342)
 at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2324)
 at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:351)
 at TestConnect.createNewHDFSFile(TestConnect.java:35)
 at TestConnect.main(TestConnect.java:80)
NEW: Monitor These Apps!
elasticsearch, apache solr, apache hbase, hadoop, redis, casssandra, amazon cloudwatch, mysql, memcached, apache kafka, apache zookeeper, apache storm, ubuntu, centOS, red hat, debian, puppet labs, java, senseiDB