项目作者: chenxingxing6

项目描述 :
基于hadoop+hbase+springboot实现分布式网盘系统
高级语言: JavaScript
项目地址: git://github.com/chenxingxing6/disk.git
创建时间: 2019-01-23T06:37:27Z
项目社区:https://github.com/chenxingxing6/disk

开源协议:

下载


分布式网盘系统

这个版本比较干净,整个demo在Hadoop,和Hbase环境搭建好了,可以启动起来。


技术选型

1.Hadoop
2.Hbase
3.SpringBoot
……


系统实现的功能

1.用户登录与注册
2.用户网盘管理
3.文件在线浏览功能
4.文件上传与下载
……


avatar


avatar


avatar


Hbase创建表语句

avatar


hbase-daemon.sh start master ## 启动Hbase
create ‘email_user’,’user’
create ‘user_id’,’id’
create ‘gid_disk’,’gid’
create ‘user_file’,’file’
create ‘file’,’file’
create ‘follow’,’name’
create ‘followed’,’userid’
create ‘share’,’content’
create ‘shareed’,’shareid’


HdfsConn

  1. package com.netpan.dao.conn;
  2. import org.apache.hadoop.conf.Configuration;
  3. import org.apache.hadoop.fs.FileSystem;
  4. import org.apache.hadoop.fs.Path;
  5. import java.io.IOException;
  6. public class HdfsConn {
  7. private FileSystem fileSystem = null;
  8. private Configuration configuration = null;
  9. private static class SingletonHolder {
  10. private static final HdfsConn INSTANCE = new HdfsConn();
  11. }
  12. private HdfsConn() {
  13. try {
  14. configuration = new Configuration();
  15. configuration.set("fs.defaultFS", "hdfs://localhost:9000/");
  16. System.setProperty("HADOOP_USER_NAME", "root");
  17. configuration.set("dfs.permissions", "false");
  18. fileSystem = FileSystem.get(configuration);
  19. } catch (IOException e) {
  20. e.printStackTrace();
  21. }
  22. }
  23. public static FileSystem getFileSystem() {
  24. return SingletonHolder.INSTANCE.fileSystem;
  25. }
  26. public static Configuration getConfiguration() {
  27. return SingletonHolder.INSTANCE.configuration;
  28. }
  29. }

HbaseConn

  1. package com.netpan.dao.conn;
  2. import org.apache.hadoop.conf.Configuration;
  3. import org.apache.hadoop.hbase.HBaseConfiguration;
  4. import org.apache.hadoop.hbase.HTableDescriptor;
  5. import org.apache.hadoop.hbase.client.Admin;
  6. import org.apache.hadoop.hbase.client.Connection;
  7. import org.apache.hadoop.hbase.client.ConnectionFactory;
  8. import org.apache.hadoop.hbase.client.Table;
  9. import java.io.IOException;
  10. import java.util.ArrayList;
  11. import java.util.List;
  12. public class HbaseConn {
  13. private Connection conn;
  14. private Table table;
  15. private Admin admin;
  16. private static class SingletonHolder {
  17. private static final HbaseConn INSTANCE = new HbaseConn();
  18. }
  19. private HbaseConn() {
  20. try {
  21. Configuration hconf = new Configuration();
  22. Configuration conf = HBaseConfiguration.create(hconf);
  23. conf.set("hbase.zookeeper.quorum","localhost"); //hbase 服务地址
  24. conf.set("hbase.zookeeper.property.clientPort","2181"); //端口号
  25. conn = ConnectionFactory.createConnection(conf);
  26. admin = conn.getAdmin();
  27. } catch (Exception e) {
  28. e.printStackTrace();
  29. }
  30. }
  31. //获取连接
  32. public static final Connection getConn() {
  33. return SingletonHolder.INSTANCE.conn;
  34. }
  35. // Hbase获取所有的表信息
  36. public List getAllTables() {
  37. List<String> tables = null;
  38. if (admin != null) {
  39. try {
  40. HTableDescriptor[] allTable = admin.listTables();
  41. if (allTable.length > 0)
  42. tables = new ArrayList<String>();
  43. for (HTableDescriptor hTableDescriptor : allTable) {
  44. tables.add(hTableDescriptor.getNameAsString());
  45. System.out.println(hTableDescriptor.getNameAsString());
  46. }
  47. }catch (IOException e) {
  48. e.printStackTrace();
  49. }
  50. }
  51. return tables;
  52. }
  53. }