glusterfs搭建

####環境api

gluster-server: 192.168.10.91 192.168.10.92 192.168.10.93 192.168.10.94 client: 192.168.10.95app

####安裝clustertcp

  • 準備磁盤 咱們準備的是/dev/mapper/vg_ops1-lv_gfs 這個邏輯卷

# mkfs.ext4 /dev/mapper/vg_ops1-lv_gfside

# mkdir -p /media/gfs && mount /dev/mapper/vg_ops1-lv_gfs /media/gfsui

  • 安裝glusterfs

# yum install http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-6.6/x86_64/glusterfs{,-server,-fuse,-geo-replication,-libs,-api,-cli,-client-xlators}-3.7.11-1.el6.x86_64.rpm -ythis

  • 啓動glusterfs

# /etc/init.d/glusterd start3d

####配置glustercode

  • 192.168.10.91添加節點

# glusterorm

gluster> peer probe 192.168.10.92
peer probe: success. 
gluster> peer probe 192.168.10.93
peer probe: success. 
gluster> peer probe 192.168.10.94
peer probe: success. 
gluster> peer status
Number of Peers: 3

Hostname: 192.168.10.92
Uuid: bfda894d-d644-415c-8a96-d5e28402da9e
State: Peer in Cluster (Connected)

Hostname: 192.168.10.93
Uuid: 8db3897b-8c11-4abf-9730-d3d254288e21
State: Peer in Cluster (Connected)

Hostname: 192.168.10.94
Uuid: 43e2d410-11b9-49d2-ad47-1943ae24c997
State: Peer in Cluster (Connected)
gluster> pool list
UUID                                    Hostname        State
bfda894d-d644-415c-8a96-d5e28402da9e    192.168.10.92   Connected 
8db3897b-8c11-4abf-9730-d3d254288e21    192.168.10.93   Connected 
43e2d410-11b9-49d2-ad47-1943ae24c997    192.168.10.94   Connected 
a43aa927-7a70-44ad-b5d1-14dc616b561c    localhost       Connected

-配置卷server

發生一個報錯,提示/media/gfs是掛載點,使用force後返回成功,節點數需爲配置的replica數的倍數

gluster> volume create vol1 replica 2 192.168.10.91:/media/gfs 192.168.10.92:/media/gfs
volume create: vol1: failed: The brick 192.168.10.91:/media/gfs is a mount point. Please create a sub-directory under the mount point and use that as the brick directory. Or use 'force' at the end of the command if you want to override this behavior.
gluster> volume create vol1 replica 2 192.168.10.91:/media/gfs 192.168.10.92:/media/gfs force
volume create: vol1: success: please start the volume to access data
  • 查看卷信息
gluster> volume info  #查看卷信息
 
Volume Name: vol1
Type: Replicate
Volume ID: b085c24d-162e-4fc2-9de9-554299f2aa18
Status: Created
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: 192.168.10.91:/media/gfs
Brick2: 192.168.10.92:/media/gfs
Options Reconfigured:
performance.readdir-ahead: on
  • 激活卷
gluster> volume start vol1     #必須激活卷後才能掛載
volume start: vol1: success

####client安裝fuse及使用

  • 安裝fuse

# yum install http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-6.6/x86_64/glusterfs{,-fuse,-libs,-client-xlators}-3.7.11-1.el6.x86_64.rpm -y

  • 掛載vol1,可多臺client掛載,同時讀寫

# mkdir -p /medis/gfs-vol1 && mount.glusterfs 192.168.10.92:vol1 /medis/gfs-vol1

  • 可直接cd到/medis/gfs-vol1目錄下進行文件的操做
[root@ops5 gfs-vol1]# echo 112233 > d
[root@ops5 gfs-vol1]# cat d
112233

全部文件

# ls
a  b  c  d  file1  file10  file11  file2  file3  file4  file5  file6  file7  file8  file9  lost+found

各節點文件分佈

[root@ops1 ~]# ls /media/gfs/
file1  file2  file5  file6  file8  lost+found
[root@ops2 ~]# ls /media/gfs/
file1  file2  file5  file6  file8  lost+found
[root@ops3 ~]# ls /media/gfs/
a  b  c  d  file10  file11  file3  file4  file7  file9  lost+found
[root@ops4 gfs-vol1]# ls /media/gfs/
a  b  c  d  file10  file11  file3  file4  file7  file9  lost+found

####gluster的其餘操做

#####vloume的擴容

gluster> volume add-brick vol1 192.168.10.93:/media/gfs 192.168.10.94:/media/gfs force
volume add-brick: success
gluster> volume info
 
Volume Name: vol1
Type: Distributed-Replicate           #注意這裏的變化
Volume ID: b085c24d-162e-4fc2-9de9-554299f2aa18
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.10.91:/media/gfs
Brick2: 192.168.10.92:/media/gfs
Brick3: 192.168.10.93:/media/gfs
Brick4: 192.168.10.94:/media/gfs
Options Reconfigured:
performance.readdir-ahead: on

#####vloume的平衡

gluster> volume rebalance vol1 start 
volume rebalance: vol1: success: Rebalance on vol1 has been started successfully. Use rebalance status command to check status of the rebalance process.
ID: b10dd77e-a1b6-4c7c-abcc-b8b96792c1b2
gluster> volume rebalance vol1 status
                                    Node Rebalanced-files          size       scanned      failures       skipped               status  run time in h:m:s
                               ---------      -----------   -----------   -----------   -----------   -----------         ------------     --------------
                               localhost                3        5Bytes             3             0             0            completed        0:0:0
                           192.168.10.92                0        0Bytes             0             0             0            completed        0:0:0
                           192.168.10.93                0        0Bytes             3             0             0            completed        0:0:0
                           192.168.10.94                0        0Bytes             0             0             0            completed        0:0:0
volume rebalance: vol1: success

#####查看全部卷

gluster> volume list
vol1

#####查看volume狀態

後面加detail可查看容量,clients查看客戶端鏈接及發送數據量,tasks爲任務,其餘具體見help

gluster> volume status vol1      #不加volume name則爲全部
Status of volume: vol1
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick 192.168.10.91:/media/gfs              49152     0          Y       20959
Brick 192.168.10.92:/media/gfs              49152     0          Y       5288 
Brick 192.168.10.93:/media/gfs              49152     0          Y       15863
Brick 192.168.10.94:/media/gfs              49152     0          Y       3840 
NFS Server on localhost                     N/A       N/A        N       N/A  
Self-heal Daemon on localhost               N/A       N/A        Y       21207
NFS Server on 192.168.10.92                 N/A       N/A        N       N/A  
Self-heal Daemon on 192.168.10.92           N/A       N/A        Y       5359 
NFS Server on 192.168.10.94                 N/A       N/A        N       N/A  
Self-heal Daemon on 192.168.10.94           N/A       N/A        Y       3868 
NFS Server on 192.168.10.93                 N/A       N/A        N       N/A  
Self-heal Daemon on 192.168.10.93           N/A       N/A        Y       15891
 
Task Status of Volume vol1
------------------------------------------------------------------------------
Task                 : Rebalance           
ID                   : b10dd77e-a1b6-4c7c-abcc-b8b96792c1b2
Status               : completed
相關文章
相關標籤/搜索