环境说明

k8s集群

[root@master ceph]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 23h v1.19.4
work Ready node 23h v1.19.4

ceph集群

[root@cephcluster-ceph01 ~]# ceph -s
cluster:
id: 2c44c0fd-4969-4f31-8cd8-359e710827e8
health: HEALTH_OK

services:
mon: 3 daemons, quorum cephcluster-ceph01,cephcluster-ceph02,cephcluster-ceph03 (age 19h)
mgr: cephcluster-ceph02(active, since 2w), standbys: cephcluster-ceph03, cephcluster-ceph01
mds: cephfs:1 {0=cephcluster-ceph01=up:active} 2 up:standby
osd: 10 osds: 10 up (since 19h), 10 in (since 3d)
rgw: 3 daemons active (cephcluster-ceph01, cephcluster-ceph02, cephcluster-ceph03)

task status:
scrub status:
mds.cephcluster-ceph01: idle

data:
pools: 13 pools, 1664 pgs
objects: 47.89k objects, 59 GiB
usage: 182 GiB used, 318 GiB / 500 GiB avail
pgs: 1664 active+clean

io:
client: 2.7 MiB/s wr, 0 op/s rd, 187 op/s wr
[root@cephcluster-ceph01 ~]# ceph --version
ceph version 14.2.10 (b340acf629a010a74d90da5782a2c5fe0b54ac20) nautilus (stable)

work节点安装ceph-common

[root@work ~]# vim /etc/yum.repos.d/ceph.repo 
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[root@work ~]# yum install -y epel-release
[root@work ~]# yum install -y ceph-common-14.2.10

创建sc

sc需要获取ceph的mon地址、pool、密钥等消息

[root@cephcluster-ceph01 ~]# ceph auth get-key client.admin
AQAJjmxfz8qQGRAABfjR32C9aUW3Oc/8GKQgfA==
[root@cephcluster-ceph01 ~]# ceph osd pool create tenx-pool 128

配置sc

[root@master ceph]# echo -n "AQAJjmxfz8qQGRAABfjR32C9aUW3Oc/8GKQgfA=="|base64
QVFBSmpteGZ6OHFRR1JBQUJmalIzMkM5YVVXM09jLzhHS1FnZkE9PQ==
[root@master ceph]# vim ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
namespace: kube-system
data:
key: QVFBSmpteGZ6OHFRR1JBQUJmalIzMkM5YVVXM09jLzhHS1FnZkE9PQ==
type:
kubernetes.io/rbd
[root@master ceph]# kubectl apply -f ceph-secret.yaml
secret/ceph-secret created

[root@master ceph]# vim sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cephrbd
provisioner: ceph.com/rbd
parameters:
monitors: 192.168.3.179:6789,192.168.3.180:6789,192.168.3.181:6789
adminId: admin
adminSecretName: ceph-secret
adminSecretNamespace: kube-system
pool: tenx-pool
userId: admin
userSecretName: ceph-secret
userSecretNamespace: kube-system
fsType: xfs
imageFormat: "2"
imageFeatures: "layering"
[root@master ceph]# kubectl apply -f sc.yaml
storageclass.storage.k8s.io/cephrbd created
[root@master ceph]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
cephrbd ceph.com/rbd Delete Immediate false 19m

创建pvc测试

[root@master ceph]# vim ceph-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-pvc
spec:
storageClassName: cephrbd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
[root@master ceph]# kubectl apply -f ceph-pvc.yaml
persistentvolumeclaim/ceph-pvc created

[root@master ceph]# kubectl describe pvc ceph-pvc
Name: ceph-pvc
Namespace: default
StorageClass: cephrbd
Status: Pending
Volume:
Labels: <none>
Annotations: volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Mounted By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ProvisioningFailed 4s (x3 over 32s) persistentvolume-controller Failed to provision volume with StorageClass "cephrbd": failed to create rbd image: executable file not found in $PATH, command output:

创建存储卷报错,因为使用StorageClass动态创建PV时,controller-manager未打包ceph-common组件,缺少了rbd命令,因此无法通过rbd命令为pod创建rbd image.

解决方法:

[root@master ceph]# git clone https://github.com/kubernetes-incubator/external-storage.git
[root@master ceph]# cd external-storage/ceph/rbd/deploy/rbac
[root@master rbac]# NAMESPACE=kube-system
[root@master rbac]# sed -r -i "s/namespace: [^ ]+/namespace: $NAMESPACE/g" ./clusterrolebinding.yaml ./rolebinding.yaml
[root@master deploy]# kubectl -n $NAMESPACE apply -f ./rbac
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.apps/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
serviceaccount/rbd-provisioner created

[root@master ceph]# kubectl apply -f ceph-pvc.yaml
persistentvolumeclaim/ceph-pvc created
[root@master ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-pvc Bound pvc-0c2e350b-9d56-4d7f-bb2c-91bdc263a09e 1Gi RWO cephrbd 6s

测试挂载

[root@master ceph]# vim ceph-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- name: ceph-rdb
mountPath: /usr/share/nginx/html
volumes:
- name: ceph-rdb
persistentVolumeClaim:
claimName: ceph-pvc
[root@master ceph]# kubectl apply -f ceph-pod.yaml
pod/nginx created
[root@master ceph]# kubectl exec -ti nginx -- /bin/sh -c 'echo this is from Ceph RBD!!! > /usr/share/nginx/html/index.html'
[root@master ceph]# kubectl get po -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 2m10s 10.244.80.13 work <none> <none>
[root@master ceph]# curl 10.244.80.13
this is from Ceph RBD!!!
[root@master ceph]# kubectl exec -it nginx sh
/ # df -h|grep nginx
/dev/rbd0 1014.0M 32.4M 981.6M 3% /usr/share/nginx/html