k8s对接ceph
1,ceph创建rbd存储池并创建镜像
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create mypool 32 32
pool 'mypool' created
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool ls
device_health_metrics
mypool
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool application enable mypool rbd
enabled application 'rbd' on pool 'mypool'
cephadmin@ceph-deploy:~/ceph-cluster$ rbd pool init -p mypool
cephadmin@ceph-deploy:~/ceph-cluster$ rbd create myimg2 --size 3G --pool mypool --image-format 2 --image-feature layering
cephadmin@ceph-deploy:~/ceph-cluster$ rbd ls --pool mypool
myimg2
cephadmin@ceph-deploy:~/ceph-cluster$ rbd --image myimg2 --pool mypool info
rbd image 'myimg2':
size 3 GiB in 768 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 376dfc557a8d
block_name_prefix: rbd_data.376dfc557a8d
format: 2
features: layering
op_features:
2,master和node节点需要安装
root@k8s:~# wget -q -O- 'http://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
root@k8s:~# cat /etc/apt/sources.list
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific focal main
ceph版本 ububtu版本
root@k8s:~# apt install ceph-common -y
root@k8s:~# cat /etc/ceph/rbdmap
# RbdDevice Parameters
#poolname/imagename id=client,keyring=/etc/ceph/ceph.client.keyring
3,ceph认证文件拷贝到k8s节点
cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get-or-create client.chuan mon 'allow r' osd 'allow * pool=mypool'
[client.chuan]
key = AQCetoZhASwdJBAAFmKO4+HM+/26PG/kGbWDfw==
cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get client.chuan
[client.chuan]
key = AQCetoZhASwdJBAAFmKO4+HM+/26PG/kGbWDfw==
caps mon = "allow r"
caps osd = "allow * pool=mypool"
exported keyring for client.chuan
cephadmin@ceph-deploy:~$ ceph auth get client.chuan -o ceph.client.chuan.keyring
exported keyring for client.chuan
cephadmin@ceph-deploy:/etc/ceph$ scp /etc/ceph/ceph.conf ceph.client.chuan.keyring root@192.168.192.15x:/root
4,ceph普通用户验证
root@master001:~# cp ceph.client.chuan.keyring /etc/ceph/
root@master001:~# ceph --user chuan -s
cluster:
id: d2cca32b-57dc-409f-9605-b19a373ce759
health: HEALTH_OK
services:
mon: 1 daemons, quorum ceph-mon01-mgr01 (age 2h)
mgr: ceph-mon01-mgr01(active, since 2h)
osd: 9 osds: 9 up (since 2h), 9 in (since 3h)
data:
pools: 2 pools, 256 pgs
objects: 4 objects, 35 B
usage: 70 MiB used, 450 GiB / 450 GiB avail
root@master001:~# rbd --id chuan ls --pool=mypool
myimg2
root@k8s:~# cp ceph.conf /etc/ceph/
k8s节点 /etc/hosts 追加
192.168.192.171 ceph-deploy
192.168.192.172 ceph-mon01-mgr01
192.168.192.173 ceph-node01
192.168.192.174 ceph-node02
192.168.192.175 ceph-node03
5,两种方式挂载 认证文件 和 secret
case1-case2
root@slave002:~/cephcase# cat case1-busybox-keyring.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: Always
name: busybox
#restartPolicy: Always
volumeMounts:
- name: rbd-data1
mountPath: /data
volumes:
- name: rbd-data1
rbd:
monitors:
- '192.168.192.172:6789'
pool: mypool
image: myimg2
fsType: ext4
readOnly: false
user: chuan
keyring: /etc/ceph/ceph.client.chuan.keyring
root@master001:/etc/ceph# kubectl exec -it busybox sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cd /data/
/data # ls
lost+found
root@master001:/etc/ceph# ll
-rwxrwxrwx+ 1 root root ceph.client.chuan.keyring*
-rwxrwxrwx 1 root root ceph.conf
root@slave002:~/cephcase# cat case2-nginx-keyring.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- name: rbd-data1
mountPath: /data
volumes:
- name: rbd-data1
rbd:
monitors:
- '192.168.192.172:6789'
pool: mypool
image: myimg2
fsType: ext4
readOnly: false
user: chuan
keyring: /etc/ceph/ceph.client.chuan.keyring
root@master001:~# kubectl exec -it nginx-deployment-549df9d478-4dvlv bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-549df9d478-4dvlv:/# cd /data/
root@nginx-deployment-549df9d478-4dvlv:/data# ls
lost+found
root@nginx-deployment-549df9d478-4dvlv:/data# echo "chuan" > chuan.txt
root@nginx-deployment-549df9d478-4dvlv:/data# cat chuan.txt
chuan
root@slave001:~# rbd showmapped id pool namespace image snap device 0 mypool myimg2 - /dev/rbd0
df -h /dev/rbd0 2.9G 9.1M 2.9G 1% /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/mypool-image-myimg2
通过secret挂载
case3
root@slave002:~/cephcase# cat /etc/ceph/ceph.client.chuan.keyring [client.chuan] key = AQCetoZhASwdJBAAFmKO4+HM+/26PG/kGbWDfw== caps mon = "allow r" caps osd = "allow * pool=mypool" root@slave002:~/cephcase# echo AQCetoZhASwdJBAAFmKO4+HM+/26PG/kGbWDfw== | base64 #解密 -d QVFDZXRvWmhBU3dkSkJBQUZtS080K0hNKy8yNlBHL2tHYldEZnc9PQo=
root@slave002:~/cephcase# cat case3-secret-client-chuan.yaml apiVersion: v1 kind: Secret metadata: name: ceph-secret-chuan type: "kubernetes.io/rbd" data: key: QVFDZXRvWmhBU3dkSkJBQUZtS080K0hNKy8yNlBHL2tHYldEZnc9PQo=
root@slave002:~/cephcase# kubectl exec -it nginx-deployment-74b6ccb686-678xr bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-74b6ccb686-678xr:/# cd /data/
root@nginx-deployment-74b6ccb686-678xr:/data# ls
chuan.txt lost+found
root@nginx-deployment-74b6ccb686-678xr:/data# cat chuan.txt
chuan
root@slave002:~/cephcase# cat case3-secret-client-shijie.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-chuan
type: "kubernetes.io/rbd"
data:
key: QVFDZXRvWmhBU3dkSkJBQUZtS080K0hNKy8yNlBHL2tHYldEZnc9PQo=
root@slave002:~/cephcase# cat case4-nginx-secret.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- name: rbd-data1
mountPath: /data
volumes:
- name: rbd-data1
rbd:
monitors:
- '192.168.192.172:6789'
pool: mypool
image: myimg2
fsType: ext4
readOnly: false
user: chuan
secretRef:
name: ceph-secret-chuan
case3 5 6 7 8
k8s master节点使用ceph-admin权限在ceph集群创建pv
root@slave002:~/cephcase# cat case3-secret-client-shijie.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-chuan
type: "kubernetes.io/rbd"
data:
key: QVFDZXRvWmhBU3dkSkJBQUZtS080K0hNKy8yNlBHL2tHYldEZnc9PQo=
root@slave002:~/cephcase# cat case5-secret-admin.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
key: QVFBYkpZWmhQNFc3RmhBQTRrTzA3alFWVDFEbmxTcGw4dTRxOUE9PQo=
root@slave002:~/cephcase# cat case6-ceph-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-storage-class-chuan
annotations:
storageclass.kubernetes.io/is-default-class: "true" #设置为默认存储类
provisioner: kubernetes.io/rbd
parameters:
monitors: 192.168.192.172:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: default
pool: mypool
userId: chuan
userSecretName: ceph-secret-chuan
root@slave002:~/cephcase# cat case7-mysql-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-data-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: ceph-storage-class-chuan
resources:
requests:
storage: '5Gi'
root@slave002:~/cephcase# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-02f88cf0-7ded-4405-8616-4d102c767795 5Gi RWO Delete Bound default/mysql-data-pvc ceph-storage-class-chuan 4m32s
root@slave002:~/cephcase# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mysql-data-pvc Bound pvc-02f88cf0-7ded-4405-8616-4d102c767795 5Gi RWO ceph-storage-class-chuan 4m34s
root@slave002:~/cephcase# cat case8-mysql-single.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
spec:
selector:
matchLabels:
app: mysql
strategy:
type: Recreate
template:
metadata:
labels:
app: mysql
spec:
containers:
- image: 192.168.192.155:80/chuan/mysql:5.6.46
name: mysql
env:
# Use secret in real usage
- name: MYSQL_ROOT_PASSWORD
value: chuan123456
ports:
- containerPort: 3306
name: mysql
volumeMounts:
- name: mysql-persistent-storage
mountPath: /var/lib/mysql
volumes:
- name: mysql-persistent-storage
persistentVolumeClaim:
claimName: mysql-data-pvc
---
kind: Service
apiVersion: v1
metadata:
labels:
app: mysql-service-label
name: mysql-service
spec:
type: NodePort
ports:
- name: http
port: 3306
protocol: TCP
targetPort: 3306
nodePort: 43306
selector:
app: mysql
root@slave002:~/cephcase# kubectl exec -it mysql-6ff497f6d5-cdl4m bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@mysql-6ff497f6d5-cdl4m:/# ls
bin boot dev docker-entrypoint-initdb.d entrypoint.sh etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var
root@mysql-6ff497f6d5-cdl4m:/# cd /var/lib/mysql
root@mysql-6ff497f6d5-cdl4m:/var/lib/mysql# ls
auto.cnf ib_logfile0 ib_logfile1 ibdata1 lost+found mysql performance_schema
root@mysql-6ff497f6d5-cdl4m:/var/lib/mysql# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 4.9G 136M 4.8G 3% /var/lib/mysql
root@ceph-deploy:~# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 450 GiB 449 GiB 521 MiB 521 MiB 0.11
TOTAL 450 GiB 449 GiB 521 MiB 521 MiB 0.11
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 128 0 B 0 0 B 0 142 GiB
mypool 2 128 124 MiB 60 371 MiB 0.08 142 GiB
cephfs
创建cephfs存储池,n个pod挂载同一份数据
cephadmin@ceph-mon01-mgr01:~$ sudo apt install ceph-mds
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mds create ceph-mon01-mgr01
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-metadata 32 32 pool 'cephfs-metadata' created cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-data 64 64 pool 'cephfs-data' created cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs new mycephfs cephfs-metadata cephfs-data new fs with metadata pool 4 and data pool 5 cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs ls name: mycephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ] cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status mycephfs mycephfs - 0 clients ======== RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS 0 active ceph-mon01-mgr01 Reqs: 0 /s 10 13 12 0 POOL TYPE USED AVAIL cephfs-metadata metadata 96.0k 150G cephfs-data data 0 142G MDS version: ceph version 16.2.6 (ee28fb57e47e9f88813e24bbf4c14496ca299d31) pacific (stable) cephadmin@ceph-deploy:~/ceph-cluster$ ceph mds stat mycephfs:1 {0=ceph-mon01-mgr01=up:active}
cephadmin@ceph-deploy:/etc/ceph$ scp ceph.client.admin.keyring root@k8sx:/etc/ceph
root@slave002:~/cephcase# cat case5-secret-admin.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
key: QVFBYkpZWmhQNFc3RmhBQTRrTzA3alFWVDFEbmxTcGw4dTRxOUE9PQo=
root@slave002:~/cephcase# cat case9-nginx-cephfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- name: chuan-staticdata-cephfs
mountPath: /usr/share/nginx/html/
volumes:
- name: chuan-staticdata-cephfs
cephfs:
monitors:
- '192.168.192.172:6789'
path: /
user: admin
secretRef:
name: ceph-secret-admin
root@slave002:~/cephcase# kubectl get po -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-5b687774f4-2nggp 1/1 Running 0 7s 10.200.111.58 192.168.192.153
nginx-deployment-5b687774f4-b7l4x 1/1 Running 0 48s 10.200.122.193 192.168.192.152
nginx-deployment-5b687774f4-qpqv7 1/1 Running 0 7s 10.200.122.194 192.168.192.152
root@nginx-deployment-5b687774f4-b7l4x:/# df -hT
Filesystem Type Size Used Avail Use% Mounted on
192.168.192.172:6789:/ ceph 143G 0 143G 0% /usr/share/nginx/html
root@nginx-deployment-5b687774f4-qpqv7:/usr/share/nginx/html# cat aa.txt
chuan
root@nginx-deployment-5b687774f4-zq7jg:/usr/share/nginx/html# cat aa.txt
chuan
root@nginx-deployment-5b687774f4-b7l4x:/usr/share/nginx/html# cat aa.txt
chuan