Java访问Hadoop分布式文件系统HDFS的配置说明
配置文件
m103替换为hdfs服务地址。
要利用Java客户端来存取HDFS上的文件,不得不说的是配置文件hadoop-0.20.2/conf/core-site.xml了,最初我就是在这里吃了大亏,所以我死活连不上HDFS,文件无法创建、读取。
<?xmlversion="1.0"?> <?xml-stylesheettype="text/xsl"href="configuration.xsl"?> <configuration> <!---globalproperties--> <property> <name>hadoop.tmp.dir</name> <value>/home/zhangzk/hadoop</value> <description>Abaseforothertemporarydirectories.</description> </property> <!--filesystemproperties--> <property> <name>fs.default.name</name> <value>hdfs://linux-zzk-113:9000</value> </property> </configuration>
配置项:hadoop.tmp.dir表示命名节点上存放元数据的目录位置,对于数据节点则为该节点上存放文件数据的目录。
配置项:fs.default.name表示命名的IP地址和端口号,缺省值是file:///,对于JavaAPI来讲,连接HDFS必须使用这里的配置的URL地址,对于数据节点来讲,数据节点通过该URL来访问命名节点。
hdfs-site.xml
<?xmlversion="1.0"encoding="UTF-8"?> <!--AutogeneratedbyClouderaManager--> <configuration> <property> <name>dfs.namenode.name.dir</name> <value>file:///mnt/sdc1/dfs/nn</value> </property> <property> <name>dfs.namenode.servicerpc-address</name> <value>m103:8022</value> </property> <property> <name>dfs.https.address</name> <value>m103:50470</value> </property> <property> <name>dfs.https.port</name> <value>50470</value> </property> <property> <name>dfs.namenode.http-address</name> <value>m103:50070</value> </property> <property> <name>dfs.replication</name> <value>3</value> </property> <property> <name>dfs.blocksize</name> <value>134217728</value> </property> <property> <name>dfs.client.use.datanode.hostname</name> <value>false</value> </property> <property> <name>fs.permissions.umask-mode</name> <value>022</value> </property> <property> <name>dfs.namenode.acls.enabled</name> <value>false</value> </property> <property> <name>dfs.block.local-path-access.user</name> <value>cloudera-scm</value> </property> <property> <name>dfs.client.read.shortcircuit</name> <value>false</value> </property> <property> <name>dfs.domain.socket.path</name> <value>/var/run/hdfs-sockets/dn</value> </property> <property> <name>dfs.client.read.shortcircuit.skip.checksum</name> <value>false</value> </property> <property> <name>dfs.client.domain.socket.data.traffic</name> <value>false</value> </property> <property> <name>dfs.datanode.hdfs-blocks-metadata.enabled</name> <value>true</value> </property> <property> <name>fs.http.impl</name> <value>com.scistor.datavision.fs.HTTPFileSystem</value> </property> </configuration>
mapred-site.xml
<?xmlversion="1.0"encoding="UTF-8"?> <!--AutogeneratedbyClouderaManager--> <configuration> <property> <name>mapreduce.job.split.metainfo.maxsize</name> <value>10000000</value> </property> <property> <name>mapreduce.job.counters.max</name> <value>120</value> </property> <property> <name>mapreduce.output.fileoutputformat.compress</name> <value>true</value> </property> <property> <name>mapreduce.output.fileoutputformat.compress.type</name> <value>BLOCK</value> </property> <property> <name>mapreduce.output.fileoutputformat.compress.codec</name> <value>org.apache.hadoop.io.compress.SnappyCodec</value> </property> <property> <name>mapreduce.map.output.compress.codec</name> <value>org.apache.hadoop.io.compress.SnappyCodec</value> </property> <property> <name>mapreduce.map.output.compress</name> <value>true</value> </property> <property> <name>zlib.compress.level</name> <value>DEFAULT_COMPRESSION</value> </property> <property> <name>mapreduce.task.io.sort.factor</name> <value>64</value> </property> <property> <name>mapreduce.map.sort.spill.percent</name> <value>0.8</value> </property> <property> <name>mapreduce.reduce.shuffle.parallelcopies</name> <value>10</value> </property> <property> <name>mapreduce.task.timeout</name> <value>600000</value> </property> <property> <name>mapreduce.client.submit.file.replication</name> <value>1</value> </property> <property> <name>mapreduce.job.reduces</name> <value>24</value> </property> <property> <name>mapreduce.task.io.sort.mb</name> <value>256</value> </property> <property> <name>mapreduce.map.speculative</name> <value>false</value> </property> <property> <name>mapreduce.reduce.speculative</name> <value>false</value> </property> <property> <name>mapreduce.job.reduce.slowstart.completedmaps</name> <value>0.8</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>m103:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>m103:19888</value> </property> <property> <name>mapreduce.jobhistory.webapp.https.address</name> <value>m103:19890</value> </property> <property> <name>mapreduce.jobhistory.admin.address</name> <value>m103:10033</value> </property> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>yarn.app.mapreduce.am.staging-dir</name> <value>/user</value> </property> <property> <name>mapreduce.am.max-attempts</name> <value>2</value> </property> <property> <name>yarn.app.mapreduce.am.resource.mb</name> <value>2048</value> </property> <property> <name>yarn.app.mapreduce.am.resource.cpu-vcores</name> <value>1</value> </property> <property> <name>mapreduce.job.ubertask.enable</name> <value>false</value> </property> <property> <name>yarn.app.mapreduce.am.command-opts</name> <value>-Djava.net.preferIPv4Stack=true-Xmx1717986918</value> </property> <property> <name>mapreduce.map.java.opts</name> <value>-Djava.net.preferIPv4Stack=true-Xmx1717986918</value> </property> <property> <name>mapreduce.reduce.java.opts</name> <value>-Djava.net.preferIPv4Stack=true-Xmx2576980378</value> </property> <property> <name>yarn.app.mapreduce.am.admin.user.env</name> <value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value> </property> <property> <name>mapreduce.map.memory.mb</name> <value>2048</value> </property> <property> <name>mapreduce.map.cpu.vcores</name> <value>1</value> </property> <property> <name>mapreduce.reduce.memory.mb</name> <value>3072</value> </property> <property> <name>mapreduce.reduce.cpu.vcores</name> <value>1</value> </property> <property> <name>mapreduce.application.classpath</name> <value>$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,$MR2_CLASSPATH,$CDH_HCAT_HOME/share/hcatalog/*,$CDH_HIVE_HOME/lib/*,/etc/hive/conf,/opt/cloudera/parcels/CDH/lib/udps/*</value> </property> <property> <name>mapreduce.admin.user.env</name> <value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value> </property> <property> <name>mapreduce.shuffle.max.connections</name> <value>80</value> </property> </configuration>
利用JavaAPI来访问HDFS的文件与目录
packagecom.demo.hdfs;
importjava.io.BufferedInputStream;
importjava.io.FileInputStream;
importjava.io.FileNotFoundException;
importjava.io.FileOutputStream;
importjava.io.IOException;
importjava.io.InputStream;
importjava.io.OutputStream;
importjava.net.URI;
importorg.apache.hadoop.conf.Configuration;
importorg.apache.hadoop.fs.FSDataInputStream;
importorg.apache.hadoop.fs.FSDataOutputStream;
importorg.apache.hadoop.fs.FileStatus;
importorg.apache.hadoop.fs.FileSystem;
importorg.apache.hadoop.fs.Path;
importorg.apache.hadoop.io.IOUtils;
importorg.apache.hadoop.util.Progressable;
/**
*@authorzhangzk
*
*/
publicclassFileCopyToHdfs{
publicstaticvoidmain(String[]args)throwsException{
try{
//uploadToHdfs();
//deleteFromHdfs();
//getDirectoryFromHdfs();
appendToHdfs();
readFromHdfs();
}catch(Exceptione){
//TODOAuto-generatedcatchblock
e.printStackTrace();
}
finally
{
System.out.println("SUCCESS");
}
}
/**上传文件到HDFS上去*/
privatestaticvoiduploadToHdfs()throwsFileNotFoundException,IOException{
StringlocalSrc="d://qq.txt";
Stringdst="hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";
InputStreamin=newBufferedInputStream(newFileInputStream(localSrc));
Configurationconf=newConfiguration();
FileSystemfs=FileSystem.get(URI.create(dst),conf);
OutputStreamout=fs.create(newPath(dst),newProgressable(){
publicvoidprogress(){
System.out.print(".");
}
});
IOUtils.copyBytes(in,out,4096,true);
}
/**从HDFS上读取文件*/
privatestaticvoidreadFromHdfs()throwsFileNotFoundException,IOException{
Stringdst="hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";
Configurationconf=newConfiguration();
FileSystemfs=FileSystem.get(URI.create(dst),conf);
FSDataInputStreamhdfsInStream=fs.open(newPath(dst));
OutputStreamout=newFileOutputStream("d:/qq-hdfs.txt");
byte[]ioBuffer=newbyte[1024];
intreadLen=hdfsInStream.read(ioBuffer);
while(-1!=readLen){
out.write(ioBuffer,0,readLen);
readLen=hdfsInStream.read(ioBuffer);
}
out.close();
hdfsInStream.close();
fs.close();
}
/**以append方式将内容添加到HDFS上文件的末尾;注意:文件更新,需要在hdfs-site.xml中添<property><name>dfs.append.support</name><value>true</value></property>*/
privatestaticvoidappendToHdfs()throwsFileNotFoundException,IOException{
Stringdst="hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";
Configurationconf=newConfiguration();
FileSystemfs=FileSystem.get(URI.create(dst),conf);
FSDataOutputStreamout=fs.append(newPath(dst));
intreadLen="zhangzkaddbyhdfsjavaapi".getBytes().length;
while(-1!=readLen){
out.write("zhangzkaddbyhdfsjavaapi".getBytes(),0,readLen);
}
out.close();
fs.close();
}
/**从HDFS上删除文件*/
privatestaticvoiddeleteFromHdfs()throwsFileNotFoundException,IOException{
Stringdst="hdfs://192.168.0.113:9000/user/zhangzk/qq-bak.txt";
Configurationconf=newConfiguration();
FileSystemfs=FileSystem.get(URI.create(dst),conf);
fs.deleteOnExit(newPath(dst));
fs.close();
}
/**遍历HDFS上的文件和目录*/
privatestaticvoidgetDirectoryFromHdfs()throwsFileNotFoundException,IOException{
Stringdst="hdfs://192.168.0.113:9000/user/zhangzk";
Configurationconf=newConfiguration();
FileSystemfs=FileSystem.get(URI.create(dst),conf);
FileStatusfileList[]=fs.listStatus(newPath(dst));
intsize=fileList.length;
for(inti=0;i<size;i++){
System.out.println("name:"+fileList[i].getPath().getName()+"/t/tsize:"+fileList[i].getLen());
}
fs.close();
}
}
注意:对于append操作,从hadoop-0.21版本开始就不支持了,关于Append的操作可以参考Javaeye上的一篇文档。