基于ceph的rbd的pvc配置
https://kubernetes.io/docs/concepts/storage/storage-classes/
https://github.com/ceph/ceph-container/tree/master/examples/kubernetes/rbd-provisioner
环境
k8s:1.14.2
os:centos7
ceph: 13.2.6
ceph-monitors:10.0.3.43:6789,10.0.3.44:6789,10.0.3.45:6789
获取ceph相关配置
- 所有节点安装ceph包
yum install -y ceph-common
获取cephadmin的配置,ceph集群上面操作
ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n 请记住输出,存到文件kube_ceph_admin.secret [root@k8s-master-01.novalocal 14:54 ~/k8s/ceph/ceph-rbd/rbac] # cat kube_ceph_admin.secret AQAikS1dwG5qMRAAo1/X31vsna24nAFhaW8dxw==
配置kube用户
ceph osd pool create kube 128 128 ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=kube' ceph auth get-key client.kube ceph osd pool application enable kube rbd 请记住get-key的输出,存到文件kube_ceph.secret [root@k8s-master-01.novalocal 14:54 ~/k8s/ceph/ceph-rbd/rbac] # cat kube_ceph.secret AQBosi1dwMXkBxAAi3EUA0Rx7FlsHPy3Pd/jtg==
配置ns
[root@k8s-master-01.novalocal 14:44 ~/k8s/ceph/ceph-rbd/rbac] # cat ceph-rbd-ns.yaml apiVersion: v1 kind: Namespace metadata: name: ceph-rbd labels: name: ceph-rbd
配置serveraccount
说明:由于使用动态存储,controller-manager需要使用rbd创建image 所以controller-manager需要适应rbd命令 官方的controller-manager中没有rbd命令,所以在执行的时候会失败。 具体可以查看https://github.com/kubernetes/kubernetes/issues/38923 所以采用kubeadm部署的集群需要配置rbac,并部署相关的rbd-provisoiner
serveraccount的配置
apiVersion: v1 kind: ServiceAccount metadata: name: rbd-provisioner namespace: ceph-rbd
- 配置clusterrole
kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-provisioner namespace: ceph-rbd rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] - apiGroups: [""] resources: ["services"] resourceNames: ["kube-dns","coredns"] verbs: ["list", "get"] - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["secrets"] verbs: ["get", "create", "delete"]
- 配置cluster绑定
kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-provisioner subjects: - kind: ServiceAccount name: rbd-provisioner namespace: ceph-rbd roleRef: kind: ClusterRole name: rbd-provisioner apiGroup: rbac.authorization.k8s.io
- 配置role ``` apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: rbd-provisioner namespace: ceph-rbd rules:
- apiGroups: [""] resources: ["secrets"] verbs: ["get"]
- apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"]
- 配置role绑定
apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: rbd-provisioner roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: rbd-provisioner subjects:
kind: ServiceAccount name: rbd-provisioner namespace: ceph-rbd ```
配置deployment
apiVersion: extensions/v1beta1 kind: Deployment metadata: name: rbd-provisioner namespace: ceph-rbd spec: replicas: 1 strategy: type: Recreate template: metadata: labels: app: rbd-provisioner spec: containers: - name: rbd-provisioner image: "dchub.sreblog.com/k8s/rbd-provisioner:v2.1.1-k8s1.11" env: - name: PROVISIONER_NAME value: ceph.com/rbd serviceAccount: rbd-provisioner
- 配置秘钥 ``` 对key进行加密 #cat kube_ceph_admin.secret | base64 #cat kube_ceph.secret | base64 创建secret文件
[root@k8s-master-01.novalocal 14:56 ~/k8s/ceph/ceph-rbd/rbac]
cat kube_ceph_secret.yaml
apiVersion: v1 kind: Secret metadata: name: ceph-admin-in-secret namespace: kube-system type: "kubernetes.io/rbd" data:
key: QVFBaWtTMWR3RzVxTVJBQW8xL1gzMXZzbmEyNG5BRmhhVzhkeHc9PQo=
apiVersion: v1 kind: Secret metadata: name: ceph-in-secret namespace: kube-system type: "kubernetes.io/rbd" data: key: QVFCb3NpMWR3TVhrQnhBQWkzRVVBMFJ4N0Zsc0hQeTNQZC9qdGc9PQo=
- 配置storageclass(修改provisoiner的配置)
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: ceph-rbd provisioner: ceph.com/rbd
provisioner: kubernetes.io/rbd
parameters: monitors: 10.0.3.43:6789,10.0.3.44:6789,10.0.3.45:6789 adminId: admin adminSecretName: ceph-admin-in-secret adminSecretNamespace: kube-system pool: kube userId: kube userSecretName: ceph-in-secret userSecretNamespace: kube-system fsType: ext4 imageFormat: "2" imageFeatures: "layering"
- 部署上线
kubectl apply -f .
- 验证
[root@k8s-master-01.novalocal 15:06 ~/k8s/ceph/ceph-rbd/rbac]
kubectl get sc
NAME PROVISIONER AGE ceph-rbd ceph.com/rbd 22m
[root@k8s-master-01.novalocal 15:06 ~/k8s/ceph/ceph-rbd/rbac]
kubectl get pod -n ceph-rbd
NAME READY STATUS RESTARTS AGE rbd-provisioner-5b67f487fd-g6k59 1/1 Running 0 10s
## 测试
- 创建pvc
[root@k8s-master-01.novalocal 15:06 ~/k8s/ceph/ceph-rbd/rbac/test]
cat test-pvc.yaml
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: test-pvc spec: accessModes:
- ReadWriteOnce
storageClassName: ceph-rbd resources: requests: storage: 3Gi
- 创建pod
[root@k8s-master-01.novalocal 15:06 ~/k8s/ceph/ceph-rbd/rbac/test]
cat test-ceph-pvc.yaml
apiVersion: extensions/v1beta1 kind: Deployment metadata: name: ceph-rbd-pvc-test spec: replicas: 1 template: metadata: labels: app: ceph-rbd-pvc-test spec: containers:
- name: ceph-rbd-pvc-test
image: busybox:latest
command:
- sleep
- "360000"
volumeMounts:
- name: ceph-rbd-test
mountPath: /data/ceph-rbd
readOnly: false
volumes:
- name: ceph-rbd-test
persistentVolumeClaim:
claimName: test-pvc
- 验证
kubectl apply -f .
[root@k8s-master-01.novalocal 15:07 ~/k8s/ceph/ceph-rbd/rbac/test]
kubectl get pvc,pv
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/test-pvc Bound pvc-3537d99e-a9f0-11e9-9496-fa163ee96380 3Gi RWO ceph-rbd 26m
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/pvc-3537d99e-a9f0-11e9-9496-fa163ee96380 3Gi RWO Delete Bound default/test-pvc ceph-rbd 26m