学海无涯
go,go,go

k8s使用glusterfs做存储

复制卷搭建
# 在所有节点执行:
yum install -y glusterfs-server glusterfs-common glusterfs-client fuse
systemctl start glusterfs-server
systemctl enable glusterfs-server

# 在211机器上执行:
gluster peer probe 192.168.2.211
gluster peer probe 192.168.2.212
gluster peer probe 192.168.2.213

# 创建
gluster volume create test-volume replica 2 192.168.2.211:/home/gluterfs/data 192.168.2.211:/home/glusterfs/data force

# 挂载
mount -t glusterfs 192.168.2.211:/test-volume /mnt/mytest


要修改文件请挂载到客户端后对文件修改不要直接在3个节点的/opt/gluster/data/卷目录修改直接在卷目录修改
会造成数据不同步
挂载命令;mount -t glusterfs k8s-master2-node2:test /mnt
             mount -t glusterfs 主机名称:卷名称 /mnt

#创建Endpoints
cat >gluster-ep.yaml <<'EOF'
apiVersion: v1
kind: Endpoints
metadata:
  name: glusterfs-cluster
subsets:
- addresses:
  - ip: 192.168.2.211
  - ip: 192.168.2.212
  - ip: 192.168.2.213
  ports:
  - port: 1990
    protocol: TCP
---
kind: Service
apiVersion: v1
metadata:
  name: glusterfs-cluster
spec:
  ports:
  - port: 1990
EOF
#开始创建
[root@k8s-master1-node1 tmp]# kubectl  apply -f gluster-ep.yaml 
endpoints/glusterfs-cluster created
service/glusterfs-cluster created

#检查
[root@k8s-master1-node1 tmp]# kubectl  get ep,svc|grep glusterfs
endpoints/glusterfs-cluster   192.168.2.211:1990,192.168.2.212:1990,192.168.2.213:1990   9m56s
service/glusterfs-cluster   ClusterIP   172.0.0.243   <none>        1990/TCP       9m56s


#创建pv和pvc
cat >gluster-pvc.yaml<<'EOF'
---
apiVersion: v1
kind: PersistentVolume   # pv
metadata:
  name: test-pv
  namespace: default
  labels:
    alicloud-pvname: test-pv
spec:     # 定义pv属性
  capacity:         # 容量
    storage: 5Gi   # 存储容量
  accessModes:    # 访问模式,支持ReadWriteOnce、ReadOnlyMany和ReadWriteMany
    - ReadWriteMany  
  glusterfs:
    endpoints: 'glusterfs-cluster'
    path: 'models'   #glusterfs卷的名称,注意此名称要修改
    readOnly: false
  persistentVolumeReclaimPolicy: Recycle  
---
kind: PersistentVolumeClaim  # pvc
apiVersion: v1
metadata:
  name: test-pvc
  namespace: default
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 5Gi
  selector:
    matchLabels:
      alicloud-pvname: test-pv
EOF
#创建pv和pvc
[root@k8s-master1-node1 tmp]# kubectl  apply -f gluster-pvc.yaml 
persistentvolume/dt-pv created
persistentvolumeclaim/dt-pvc created
#检查
# 查看结果状态
[root@k8s-master1-node1 tmp]# kubectl  get pv,pvc
NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM            STORAGECLASS   REASON   AGE
persistentvolume/test-pv   2Gi        RWX            Recycle          Bound    default/test-pvc                           50s
NAME                           STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/test-pvc   Bound    test-pv    2Gi        RWX                           50s
#创建pod进行测试
cat >ng-deploy.yaml<<'EOF'
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    run: nginx01
  name: nginx01
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-pod1  
  template:
    metadata:
      labels:
        app: nginx-pod1
    spec:
      containers:
      - name: nginx11
        image: nginx
        imagePullPolicy: Always
        volumeMounts:
        - mountPath: /usr/share/nginx/html   # 挂载到容器的路径
          name: glu
      restartPolicy: Always
      volumes: 
      - name: glu   #定义卷
        persistentVolumeClaim:
          claimName: test-pvc    #使用pvc
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx1
  name: nginx1
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30003
  selector:
    app: nginx-pod1
  type: NodePort
EOF

#创建pod
[root@k8s-master1-node1 tmp]# kubectl  apply -f ng-deploy.yaml 
deployment.apps/nginx01 created
service/nginx1 created

#检查
[root@k8s-master1-node1 tmp]# kubectl  get pod,svc|grep nginx
pod/nginx01-65c889f87c-5w26q                  1/1     Running   0          5m5s
service/nginx1              NodePort    172.0.0.105   <none>        80:30003/TCP   6m55s

#挂载卷
mount -t glusterfs k8s-master2-node2:test /mnt


#测试
#在glusterfs存储卷任意节点中创建index.html,并写入内容
[root@k8s-master1-node1 data]# cd /mnt/
[root@k8s-master1-node1 mnt]# ls
index.html
[root@k8s-master1-node1 mnt]# cat index.html 
uio
赞(2) 打赏
未经允许不得转载:YYQ运维技术博客_运维的工作学习之路 » k8s使用glusterfs做存储
分享到: 更多 (0)

评论 抢沙发

  • 昵称 (必填)
  • 邮箱 (必填)
  • 网址

运维devops

联系我们关于本博客

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

微信扫一扫打赏