(1)、配置hosts
10.1.10.129 node1.cluster.local node110.1.10.130 node2.cluster.local node210.1.10.128 master.cluster.local master
(2)、安装
# yum install centos-release-gluster -y# yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma
(3)、启动并配置开机自启动
# systemctl start glusterd.service && systemctl enable glusterd.service# iptables -I INPUT -p tcp --dport 24007 -j ACCEPT
(4)、将节点添加入集群
# gluster peer probe master# gluster peer probe node1# gluster peer probe node2
(5)、查看集群状态
# gluster peer statusNumber of Peers: 2Hostname: node1Uuid: 67c49963-79b2-4fda-acf8-019491164dcfState: Peer in Cluster (Connected)Hostname: node2Uuid: 78e0bf3b-5b3a-4663-928b-1830e16fe0d9State: Peer in Cluster (Connected)
(6)、安装client测试
# yum install -y glusterfs glusterfs-fuse# 创建数据目录,节点都要操作# mkdir /data/gluster/data -p
(7)、创建volume
# gluster volume create models replica 3 master:/data/gluster/data node1:/data/gluster/data node2:/data/gluster/data force
(8)、查看volume
# gluster volume infoVolume Name: modelsType: ReplicateVolume ID: 53bdad7b-d40f-4160-bd42-4b70c8278506Status: CreatedSnapshot Count: 0Number of Bricks: 1 x 3 = 3Transport-type: tcpBricks:Brick1: master:/data/gluster/dataBrick2: node1:/data/gluster/dataBrick3: node2:/data/gluster/dataOptions Reconfigured:transport.address-family: inetstorage.fips-mode-rchecksum: onnfs.disable: onperformance.client-io-threads: off
(9)、启动models
# gluster volume start models
(10)、挂载
# mount -t glusterfs master:models /data
(1)、调优
# 开启 指定 volume 的配额$ gluster volume quota k8s-volume enable# 限制 指定 volume 的配额$ gluster volume quota k8s-volume limit-usage / 1TB# 设置 cache 大小, 默认32MB$ gluster volume set k8s-volume performance.cache-size 4GB# 设置 io 线程, 太大会导致进程崩溃$ gluster volume set k8s-volume performance.io-thread-count 16# 设置 网络检测时间, 默认42s$ gluster volume set k8s-volume network.ping-timeout 10# 设置 写缓冲区的大小, 默认1M$ gluster volume set k8s-volume performance.write-behind-window-size 1024MB
