k8s下的csi-ceph-rbd安装配置
环境
os: centos8 k8s: v1.17.5 csi-ceph-rbd: v3.0.1 ceph: 14.0.2
配置csi
配置cesph
- 获取fsid
#ceph mon dump
[root@xk-ceph-1 ~]# ceph mon dump
dumped monmap epoch 3
epoch 3
fsid 412523a5-88e6-4786-9da2-1f4eea2e5142
last_changed 2020-08-17 15:59:37.653380
created 2020-08-17 15:35:46.492288
min_mon_release 14 (nautilus)
0: [v2:10.0.4.171:3300/0,v1:10.0.4.171:6789/0] mon.xk-ceph-1
1: [v2:10.0.4.196:3300/0,v1:10.0.4.196:6789/0] mon.xk-ceph-2
2: [v2:10.0.4.183:3300/0,v1:10.0.4.183:6789/0] mon.xk-ceph-3
记住fsid和机器的相关ip和端口
#ceph fsid
- 创建pool
# ceph osd pool create wh-k8s 20 20
[root@xk-ceph-1 ~]# ceph osd lspools
2 .rgw.root
3 default.rgw.control
4 default.rgw.meta
5 default.rgw.log
6 default.rgw.buckets.index
7 default.rgw.buckets.data
8 rbd
10 rbd-clone
11 kubernetes
12 wh-k8s
- 配置账号
[root@xk-ceph-1 ~]# ceph auth get-or-create client.wh-k8s-user mon 'profile rbd' osd 'profile rbd pool=wh-k8s' mgr 'profile rbd pool=wh-k8s'
[client.wh-k8s-user]
key = AQANCGhftYLTBxAAZIboUDd1lVsZcf2N9aWXYQ==
[root@xk-ceph-1 ~]# ceph auth get client.wh-k8s-user
exported keyring for client.wh-k8s-user
[client.wh-k8s-user]
key = AQANCGhftYLTBxAAZIboUDd1lVsZcf2N9aWXYQ==
caps mgr = "profile rbd pool=wh-k8s"
caps mon = "profile rbd"
caps osd = "profile rbd pool=wh-k8s"
这里我们创建好相关账号
配置csi
配置provision
- clone相关的
https://github.com/ceph/ceph-csi.git
- 配置configmap
在目录ceph-csi-master/deploy/rbd/kubernetes下面
# cat csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "412523a5-88e6-4786-9da2-1f4eea2e5142",
"monitors": [
"10.0.4.171:6789"
]
}
]
metadata:
name: ceph-csi-config
namespace: ceph-csi
- 配置secret
# cat csi-rbd-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: ceph-csi
stringData:
userID: wh-k8s-user
userKey: AQANCGhftYLTBxAAZIboUDd1lVsZcf2N9aWXYQ==
创建configmap和secret
#kubectl apply -f csi-config-map.yaml #kubectl apply -f csi-rbd-secret.yaml
创建rbac权限
# cat csi-provisioner-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-provisioner
namespace: ceph-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
namespace: ceph-csi
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
namespace: ceph-csi
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: ceph-csi
name: rbd-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: ceph-csi
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
# replace with non-default namespace name
namespace: ceph-csi
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
#kubectl apply -f csi-provisioner-rbac.yaml
# cat csi-nodeplugin-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-nodeplugin
namespace: ceph-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
namespace: ceph-csi
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
namespace: ceph-csi
subjects:
- kind: ServiceAccount
name: rbd-csi-nodeplugin
namespace: ceph-csi
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
#kubectl apply -f csi-nodeplugin-rbac.yaml
创建pod的权限(可选)
#kubectl apply -f csi-provisioner-psp.yaml -n ceph-csi #kubectl apply -f csi-nodeplugin-psp.yaml -n ceph-csi
创建csi-rbdplugin-provisioner.yaml
# cat csi-rbdplugin-provisioner.yaml
---
kind: Service
apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
namespace: ceph-csi
labels:
app: csi-metrics
spec:
selector:
app: csi-rbdplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-rbdplugin-provisioner
namespace: ceph-csi
spec:
replicas: 3
selector:
matchLabels:
app: csi-rbdplugin-provisioner
template:
metadata:
labels:
app: csi-rbdplugin-provisioner
spec:
serviceAccount: rbd-csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.6.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--retry-interval-start=500ms"
- "--enable-leader-election=true"
- "--leader-election-type=leases"
- "--feature-gates=Topology=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v2.1.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--leader-election=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v2.1.1
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: quay.io/k8scsi/csi-resizer:v0.5.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--csiTimeout=150s"
- "--leader-election"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
- "--rbdhardmaxclonedepth=8"
- "--rbdsoftmaxclonedepth=4"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
#- name: ceph-csi-encryption-kms-config
# mountPath: /etc/ceph-csi-encryption-kms-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-config
configMap:
name: ceph-csi-config
#- name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
需要注释掉kms的配置。这里kms是用于加密的,我们不需要
#kubectl apply -f csi-rbdplugin-provisioner.yaml -n ceph-csi
## cat csi-rbdplugin.yaml
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-rbdplugin
namespace: ceph-csi
spec:
selector:
matchLabels:
app: csi-rbdplugin
template:
metadata:
labels:
app: csi-rbdplugin
spec:
serviceAccount: rbd-csi-nodeplugin
hostNetwork: true
hostPID: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
# If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertize
# its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /run/mount
name: host-mount
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
#- name: ceph-csi-encryption-kms-config
# mountPath: /etc/ceph-csi-encryption-kms-config/
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
securityContext:
privileged: true
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: host-mount
hostPath:
path: /run/mount
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-csi-config
configMap:
name: ceph-csi-config
#- name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-rbdplugin
namespace: ceph-csi
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
selector:
app: csi-rbdplugin
# kubectl apply -f csi-rbdplugin.yaml -n ceph-csi
同样注释kms的配置
配置cs与测试
- 配置sc
# cat csi-rbd-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 412523a5-88e6-4786-9da2-1f4eea2e5142
pool: wh-k8s
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi
csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
# kubectl apply -f csi-rbd-sc.yaml
storageclass.storage.k8s.io/csi-rbd-sc created
- 测试
测试的用例都在examples/rbd下面
# kubectl apply -f pvc.yaml
persistentvolumeclaim/pvc created
# kubectl apply -f pod.yaml
登录到机器里面
# kubectl exec -it csi-rbd-demo-pod /bin/sh
# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
loop0 7:0 0 1G 0 loop
sda 8:0 0 100G 0 disk
`-sda1 8:1 0 100G 0 part /etc/hosts
rbd0 253:0 0 1G 0 disk /var/lib/www/html
rbd1 253:16 0 1G 0 disk
# df -hT
Filesystem Type Size Used Avail Use% Mounted on
overlay overlay 100G 17G 84G 17% /
tmpfs tmpfs 64M 0 64M 0% /dev
tmpfs tmpfs 16G 0 16G 0% /sys/fs/cgroup
/dev/sda1 xfs 100G 17G 84G 17% /etc/hosts
shm tmpfs 64M 0 64M 0% /dev/shm
/dev/rbd0 ext4 976M 2.6M 958M 1% /var/lib/www/html
tmpfs tmpfs 16G 12K 16G 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs tmpfs 16G 0 16G 0% /proc/acpi
tmpfs tmpfs 16G 0 16G 0% /proc/scsi
tmpfs tmpfs 16G 0 16G 0% /sys/firmware
在ceph中查看
[root@k8s-ceph-test-1.novalocal 12:11 ~]
# rbd ls -p wh-k8s csi-vol-38c319d8-fd51-11ea-9877-925310eaa78d
csi-vol-38c319d8-fd51-11ea-9877-925310eaa78d
[root@k8s-ceph-test-1.novalocal 12:11 ~]
# rbd info -p wh-k8s csi-vol-38c319d8-fd51-11ea-9877-925310eaa78d
rbd image 'csi-vol-38c319d8-fd51-11ea-9877-925310eaa78d':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1bf8dfe410baa
block_name_prefix: rbd_data.1bf8dfe410baa
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Sep 23 11:59:47 2020
access_timestamp: Wed Sep 23 11:59:47 2020
modify_timestamp: Wed Sep 23 11:59:47 2020