k8s接入ceph文件系统

ceph服务端

1.安装mds

ceph-deploy mds create ceph01 ceph02 ceph03

2.创建cephfs存储池与元数据池,用以为k8s提供文件系统服务

ceph osd pool create cephfs_data 64
ceph osd pool create cephfs_metadata 64

3.创建文件系统

ceph fs new k8s-cephfs cephfs_metadata cephfs_data

4.关联应用

[root@ceph01 ~]# ceph osd pool application enable cephfs_data cephfs
enabled application 'cephfs' on pool 'cephfs_data'

5.设置配额

ceph osd pool set-quota cephfs_data max_bytes 100G

6.创建用户

[root@ceph01 kubernetes]# ceph auth get-or-create client.cephfs mon 'allow r' mds 'allow r, allow rw path=/' osd 'allow rw pool=cephfs_data'
[client.cephfs]
        key = AQCoW0dgQk4qGhAAwayKv70OSyyWB3XpZ1JLYQ==

k8s节点

1.下载配置文件

2.上传配置文件解压

unzip ceph-csi-3.2.0.zip

3.创建一个命名空间,用于管理ceph-csi

kubectl create ns ceph-csi

4.更改ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-config-map.yaml

获取集群信息(ceph管理节点执行)

[root@ceph01 ~]# ceph mon dump
    dumped monmap epoch 2
    epoch 2
    fsid b1c2511e-a1a5-4d6d-a4be-0e7f0d6d4294
    last_changed 2021-02-22 14:36:08.199609
    created 2021-02-22 14:27:26.357269
    min_mon_release 14 (nautilus)
    0: [v2:192.168.1.69:3300/0,v1:192.168.1.69:6789/0] mon.ceph01
    1: [v2:192.168.1.70:3300/0,v1:192.168.1.70:6789/0] mon.ceph02
    2: [v2:192.168.1.71:3300/0,v1:192.168.1.71:6789/0] mon.ceph03
  • b1c2511e-a1a5-4d6d-a4be-0e7f0d6d4294为集群ID
  • 监控节点地址:192.168.1.69:6789,192.168.1.70:6789,192.168.1.71:6789

更改

vim ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-config-map.yaml

更改后csi-config-map.yaml内容如下:

---
apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "b1c2511e-a1a5-4d6d-a4be-0e7f0d6d4294",
        "monitors": [
          "192.168.1.69:6789",
          "192.168.1.70:6789",
          "192.168.1.71:6789"
        ]
      }
    ]
metadata:
  name: ceph-csi-config

5.创建csi-config-map

kubectl -n ceph-csi apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-config-map.yaml

6.创建csi-cephfs-secret

  • 创建
cat <<EOF > ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-cephfs-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
  name: csi-cephfs-secret
  namespace: ceph-csi
stringData:
  # Required for statically provisioned volumes
  userID: admin
  userKey: AQCJe+Bfb6JtOhAANdn/FmcTj179PW6EI4KTng==

  # Required for dynamically provisioned volumes
  adminID: admin
  adminKey: AQCJe+Bfb6JtOhAANdn/FmcTj179PW6EI4KTng==
EOF

AQCoW0dgQk4qGhAAwayKv70OSyyWB3XpZ1JLYQ====可通过在ceph服务端执行ceph auth get client.cephfs获取

  • 发布
kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-cephfs-secret.yaml

7.配置清单中的namespace改成ceph-csi

sed -i "s/namespace: default/namespace: ceph-csi/g" $(grep -rl "namespace: default" ./ceph-csi-3.2.0/deploy/cephfs/kubernetes)
sed -i -e "/^kind: ServiceAccount/{N;N;a\  namespace: ceph-csi  # 输入到这里的时候需要按一下回车键,在下一行继续输入
}" $(egrep -rl "^kind: ServiceAccount" ./ceph-csi-3.2.0/deploy/cephfs/kubernetes)

8.创建ServiceAccountRBAC ClusterRole/ClusterRoleBinding资源对象

kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml
kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml

9.创建PodSecurityPolicy

kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-provisioner-psp.yaml
kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-nodeplugin-psp.yaml

10.调整csi-cephfsplugin-provisioner.yamlcsi-cephfsplugin.yaml

  • csi-cephfsplugin.yaml中的image部分调整为可访问镜像地址
k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1
quay.io/cephcsi/cephcsi:v3.2.0
  • csi-cephfsplugin-provisioner.yaml中的image部分调整为可访问镜像地址
quay.io/cephcsi/cephcsi:v3.2.0
k8s.gcr.io/sig-storage/csi-provisioner:v2.0.4
k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2
k8s.gcr.io/sig-storage/csi-attacher:v3.0.2
k8s.gcr.io/sig-storage/csi-resizer:v1.0.1

11.发布csi-cephfsplugin-provisioner.yamlcsi-cephfsplugin.yaml

kubectl -n ceph-csi apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml
kubectl -n ceph-csi apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml

12.查看运行状态

kubectl get pod -n ceph-csi

13.生成StorageClass配置文件

b1c2511e-a1a5-4d6d-a4be-0e7f0d6d4294ceph集群ID注意替换

  • 生成配置文件
cat <<EOF > ceph-csi-3.2.0/deploy/cephfs/kubernetes/storageclass.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: kubernetes-csi-fs-sc
provisioner: cephfs.csi.ceph.com
parameters:
  clusterID: 1fc9f495-498c-4fe2-b3d5-80a041bc5c49
  pool: cephfs_data
  fsName: k8s-cephfs
  imageFeatures: layering
  csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi
  csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi
  csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi
  csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
  - discard
EOF

14.创建StorageClass

kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/storageclass.yaml

15.配置为默认storage class(已有默认,需要编辑更改)

kubectl patch storageclass ceph-csi-rbd-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

16.查看storage class

[root@ceph01 ~]# kubectl get sc
NAME                        PROVISIONER           RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
ceph-csi-cephfs-sc          cephfs.csi.ceph.com   Delete          Immediate              true                   5s
ceph-csi-rbd-sc (default)   rbd.csi.ceph.com      Delete          Immediate              true                   27h

17.创建pvc验证可用性

  • 生成配置
cat <<EOF > ceph-csi-3.2.0/deploy/cephfs/kubernetes/pvc-demo.yaml 
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: cephfs-pvc-demo
  namespace: default
spec:
  storageClassName: ceph-csi-cephfs-sc
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
EOF
  • 创建
kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/pvc-demo.yaml
  • 查看
[root@ceph01 ~]# kubectl get pvc
NAME                                   STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS         AGE
ceph-pvc-demo                          Bound    pvc-360f8c5b-0f82-4f17-957b-a7eb5cf93f7e   20Gi       RWO            ceph-csi-rbd-sc      28h
cephfs-pvc-demo                        Bound    pvc-9400e0ab-2e44-4ce6-af39-a403441931e5   1Gi        RWX            ceph-csi-cephfs-sc   71s

18.pvc扩容

  • 生成配置
cat <<EOF > ceph-csi-3.2.0/deploy/cephfs/kubernetes/nginx-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: cephfs-testpv
  labels:
    role: web-frontend
spec:
  containers:
  - name: web
    image: nginx
    ports:
      - name: web
        containerPort: 80
    volumeMounts:
      - name: cephfs-pvc-demo
        mountPath: "/usr/share/nginx/html"
  volumes:
  - name: cephfs-pvc-demo
    persistentVolumeClaim:
      claimName: cephfs-pvc-demo
EOF
  • 发布
kubectl apply -f ceph-csi-3.2.0/deploy/cephfs/kubernetes/nginx-demo.yaml
  • 查看pvc
[root@ceph01 ~]# kubectl get pvc cephfs-pvc-demo
NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS         AGE
cephfs-pvc-demo   Bound    pvc-9400e0ab-2e44-4ce6-af39-a403441931e5   1Gi        RWX            ceph-csi-cephfs-sc   7m2s
  • 编辑修改pvc
kubectl edit pvc cephfs-pvc-demo
  • 修改以下内容,storage: 1Gi调整为storage: 10Gi
...
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
...
  • 再次查看pvc
[root@ceph01 ~]# kubectl get pvc cephfs-pvc-demo
NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS         AGE
cephfs-pvc-demo   Bound    pvc-9400e0ab-2e44-4ce6-af39-a403441931e5   10Gi       RWX            ceph-csi-cephfs-sc   9m

ceph rbd不同的是,扩容pvc时不需重启后端应用

Copyright © weiliang 2021 all right reserved,powered by Gitbook本书发布时间: 2024-04-22 16:03:41

results matching ""

    No results matching ""