首页 > 技术文章 > Kubernetes-glusterfs配置

cainiaoit 2018-03-16 14:25 原文

############################################
#pvc与pv的区别
#pv可以看做一块硬盘,pv可以有很多块不同大小的硬盘,比如有10G,50G,100G的3个PV
#pvc向pv申请40G的,那么就会匹配到50G这个pv,那么实际大小pvc有50G而不是原来限定的40G
#所有节点
#glusterfs
#http://www.cnblogs.com/jicki/p/5801712.html
#https://jimmysong.io/blogs/kubernetes-with-glusterfs/
yum install centos-release-gluster
yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma
# 创建 glusterfs 目录
mkdir /data/glusterd
sed -i 's#var/lib#dara#g' /etc/glusterfs/glusterd.vol
# 启动 glusterfs
systemctl start glusterd.service
# 设置开机启动
systemctl enable glusterd.service
#查看状态
systemctl status glusterd.service
#开放端口,只对节点IP开放
for i in 192.168.1.1,192.168.1.2,192.168.1.3
do
    iptables -I INPUT -s $i  -p tcp -m multiport --dport 24007,49152 -j ACCEPT
done

#创建存储目录
mkdir /data/gfs_data
#添加节点,在master主机上执行
gluster peer probe minion-1
gluster peer probe minion-2
#查看状态
gluster peer status
#允许所有
#gluster volume reset disp_vol auth.allow
#限制IP
gluster volume set disp_vol auth.allow 192.168.1.1,192.168.1.2,192.168.1.3
#创建复制卷,副本数为3
gluster volume create test-volume replica 3 transport tcp master:/data/gfs_data minion-1:/data/gfs_data minion-2:/data/gfs_data
#调优,缓存过大可能突然重启断电等情况导致数据丢失
#启动卷
gluster volume start test-volume
#查看卷状态
gluster volume info
#设置配额
gluster volume quota test-volume enable
gluster volume quota test-volume limit-usage / 300GB
#设置缓存
gluster volume set test-volume performance.cache-size 2GB
#设置io线程
gluster volume set test-volume performance.io-thread-count 16
#设置网络检测时间
gluster volume set test-volume network.ping-timeout 10
#设置写缓冲大小
gluster volume set test-volume performance.write-behind-window-size 512MB
#修改addresses ip每一个一组,port改为24007
#kubectl apply更新
#配置glusterfs节点ip和端口
kubectl apply -f ./kubernetes/examples/volumes/glusterfs/glusterfs-endpoints.json
#配置集群端口
kubectl apply -f ./kubernetes/examples/volumes/glusterfs/glusterfs-service.json
#kubectl apply -f demo.yum,添加了下面两段,挂载到/data目录,安装好demo.yaml后可df -h查看data目录是否挂载了300G
#在yaml的containers里添加
"volumes": [
            "volumeMounts": [
                {
                    "mountPath": "/data",
                    "name": "glusterfdata"
                }
            ]
          }
#在yaml的containers下面添加
"volumes": [
            {
                "name": "glusterfdata",
                "glusterfs": {
                    "endpoints": "glusterfs-cluster",
                    "path": "test-volume",
                    "readOnly": false
                }
            }
        ],
#也可直接挂载的物理主机
mount.glusterfs 192.168.1.1:/test-volume /data/mnt
#创建pv
cat << EOF > glusterfs-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: gluster-disk-1
spec:
  capacity:
    storage: 300Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: "glusterfs-cluster"
    path: "test-volume"
    readOnly: false
EOF
kubectl apply -f glusterfs-pv.yaml
kubectl get pv
#创建PVC
cat << EOF>glusterfs-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: glusterfs-disk-1
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 300Gi
EOF
kubectl apply -f glusterfs-pvc.yaml
kubectl get pvc
kubect apply demo.yaml

推荐阅读