ceph cluster 部署 (cephadm)
https://docs.ceph.com/en/pacific/cephadm
https://www.cnblogs.com/st2021/p/14970266.html
https://blog.csdn.net/get_set/article/details/108092248
ceph
hostname | category | hardware | eth0 - public | eth1 | eth2 - cluster | eth3 | gateway |
---|---|---|---|---|---|---|---|
vm-201 | ceph-mon | core*1 / 2g / 20GB | 192.168.100.201 | 10.0.100.201 | 10.0.110.201 | 10.0.120.201 | 192.168.100.1 |
vm-202 | ceph-mon | core*1 / 2g / 20GB | 192.168.100.202 | 10.0.100.202 | 10.0.110.202 | 10.0.120.202 | 192.168.100.1 |
vm-203 | ceph-mon | core*1 / 2g / 20GB | 192.168.100.203 | 10.0.100.203 | 10.0.110.203 | 10.0.120.203 | 192.168.100.1 |
vm-204 | ceph-osd | core*4 / 4g / 20GB,10GBx2,30GBx4 | 192.168.100.204 | 10.0.100.204 | 10.0.110.204 | 10.0.120.204 | 192.168.100.1 |
vm-205 | ceph-osd | core*4 / 4g / 20GB,10GBx2,30GBx4 | 192.168.100.205 | 10.0.100.205 | 10.0.110.205 | 10.0.120.205 | 192.168.100.1 |
vm-206 | ceph-osd | core*4 / 4g / 20GB,10GBx2,30GBx4 | 192.168.100.206 | 10.0.100.206 | 10.0.110.206 | 10.0.120.206 | 192.168.100.1 |
CentOS Linux release 7.9.2009 (Core)
1. ssh信任登录
ssh-keygen -b 1024 -t rsa -P '' -f ~/.ssh/id_rsa
for i in {202..206}; do ssh-copy-id -i .ssh/id_rsa.pub 192.168.100.$i; done
2. 静态指向
cat > /etc/hosts <
3. docker
cat > /etc/yum.repos.d/docker-ce.repo << EOF
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.nju.edu.cn/docker-ce/linux/centos/\$releasever/\$basearch/stable
enabled=1
gpgcheck=0
gpgkey=https://mirrors.nju.edu.cn/docker-ce/linux/centos/gpg
EOF
yum install -y docker-ce && systemctl restart docker
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://registry.docker-cn.com", "http://hub-mirror.c.163.com", "https://docker.mirrors.ustc.edu.cn"],
"insecure-registries": ["https://192.168.100.198:5000"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl restart docker && systemctl enable docker
docker version && docker info
for i in {202..206}; do scp /etc/yum.repos.d/docker-ce.repo vm-$i:/etc/yum.repos.d; done
for i in {202..206}; do ssh vm-$i 'yum install -y docker-ce && systemctl restart docker'; done
for i in {202..206}; do scp /etc/docker/daemon.json vm-$i:/etc/docker/; done
for i in {202..206}; do ssh vm-$i 'systemctl enable docker && systemctl restart docker'; done
4. python
pip3 install pip -U -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
for i in {202..206}; do ssh vm-$i 'pip3 install pip -U -i https://pypi.tuna.tsinghua.edu.cn/simple; pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple'; done
5. time & lvm2
yum install -y chrony lvm2; systemctl enable chronyd && systemctl restart chronyd
for i in {202..206}; do ssh vm-$i 'yum install -y chrony lvm2; systemctl enable chronyd && systemctl restart chronyd'; done
6. cephadm
curl -k https://raw.fastgit.org/ceph/ceph/v15.2.15/src/cephadm/cephadm -o /usr/sbin/cephadm
chmod 755 /usr/sbin/cephadm
sed -e 's|quay.io/ceph/ceph:v15|192.168.100.198:5000/ceph/ceph:v15.2.13|g' \
-e 's|quay.io/prometheus/prometheus:v2.18.1|192.168.100.198:5000/prometheus/prometheus:2.31.1|g' \
-e 's|quay.io/prometheus/node-exporter:v0.18.1|192.168.100.198:5000/prometheus/node-exporter:1.2.2|g' \
-e 's|quay.io/prometheus/alertmanager:v0.20.0|192.168.100.198:5000/prometheus/alertmanager:0.23.0|g' \
-e 's|quay.io/ceph/ceph-grafana:6.7.4|192.168.100.198:5000/ceph/ceph-grafana:6.7.4|g' \
-i /usr/sbin/cephadm
head -n 10 /usr/sbin/cephadm
#!/usr/bin/python3
# Default container images -----------------------------------------------------
DEFAULT_IMAGE = '192.168.100.198:5000/ceph/ceph:v15.2.13'
DEFAULT_IMAGE_IS_MASTER = False
DEFAULT_PROMETHEUS_IMAGE = '192.168.100.198:5000/prometheus/prometheus:2.31.1'
DEFAULT_NODE_EXPORTER_IMAGE = '192.168.100.198:5000/prometheus/node-exporter:1.2.2'
DEFAULT_ALERT_MANAGER_IMAGE = '192.168.100.198:5000/prometheus/alertmanager:0.23.0'
DEFAULT_GRAFANA_IMAGE = '192.168.100.198:5000/ceph/ceph-grafana:6.7.4'
# ------------------------------------------------------------------------------
for i in {202..203}; do scp /usr/sbin/cephadm vm-$i:/usr/sbin; done
for i in {202..203}; do ssh vm-$i 'chmod 755 /usr/sbin/cephadm'; done
7. bootstrap
# cephadm bootstrap --mon-ip 192.168.100.201
Ceph Dashboard is now available at:
URL: https://vm-201:8443/
User: admin
Password: fkrf1t3p89
You can access the Ceph CLI with:
sudo /usr/sbin/cephadm shell --fsid 75600af8-477e-11ec-85f6-f2b532235db6 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
Please consider enabling telemetry to help improve Ceph:
ceph telemetry on
For more information see:
https://docs.ceph.com/docs/master/mgr/telemetry/
Bootstrap complete.
# cephadm shell -- ceph -s
Inferring fsid 75600af8-477e-11ec-85f6-f2b532235db6
Inferring config /var/lib/ceph/75600af8-477e-11ec-85f6-f2b532235db6/mon.vm-201/config
Using recent ceph image 192.168.100.198:5000/ceph/ceph@sha256:0368cf225b3a13b7bdeb3d81ecf370a62931ffa5ff87af880d66aebae74f910a
cluster:
id: 75600af8-477e-11ec-85f6-f2b532235db6
health: HEALTH_WARN
OSD count 0 < osd_pool_default_size 3
services:
mon: 1 daemons, quorum vm-201 (age 8m)
mgr: vm-201.feujdg(active, since 8m)
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
# or
cephadm add-repo --release octopus
sed -e 's|download.ceph.com|mirrors.nju.edu.cn/ceph|g' -i /etc/yum.repos.d/ceph.repo
cephadm install ceph-common
ceph -s
ceph config set global public_network 192.168.100.0/24
ceph config set global cluster_network 10.100.110.0/24
ceph config set global public_network_interface eth0
ceph config set global cluster_network_interface eth2
8. unmanaged
ceph orch apply mon --unmanaged
ceph orch apply mgr --unmanaged
ceph orch apply osd --all-available-devices --unmanaged
9. add hosts
ceph cephadm get-pub-key > ~/ceph.pub
for i in {202..206}; do ssh-copy-id -f -i ~/ceph.pub root@vm-$i; done
ceph orch host add vm-201 192.168.100.201 --labels mon mgr rgw
ceph orch host add vm-202 192.168.100.202 --labels mon mgr rgw
ceph orch host add vm-203 192.168.100.203 --labels mon mgr rgw
ceph orch host add vm-204 192.168.100.204 --labels osd
ceph orch host add vm-205 192.168.100.205 --labels osd
ceph orch host add vm-206 192.168.100.206 --labels osd
# ceph orch host ls
HOST ADDR LABELS STATUS
vm-201 192.168.100.201 mon
vm-202 192.168.100.202 mon
vm-203 192.168.100.203 mon
vm-204 192.168.100.204 osd
vm-205 192.168.100.205 osd
vm-206 192.168.100.206 osd
10. deploy mon
ceph orch apply mon vm-201,vm-202,vm-203
11. deploy mgr
ceph orch apply mgr vm-201,vm-202,vm-203
12. deploy osd
# ceph orch device ls --wide
Hostname Path Type Transport RPM Vendor Model Serial Size Health Ident Fault Available Reject Reasons
vm-204 /dev/sdb hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi6 32.2G Unknown N/A N/A Yes
vm-204 /dev/sdc hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi5 32.2G Unknown N/A N/A Yes
vm-204 /dev/sdd hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi4 32.2G Unknown N/A N/A Yes
vm-204 /dev/sde hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi3 32.2G Unknown N/A N/A Yes
vm-204 /dev/sdf hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi2 10.7G Unknown N/A N/A Yes
vm-204 /dev/sdg hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi1 10.7G Unknown N/A N/A Yes
vm-205 /dev/sdb hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi6 32.2G Unknown N/A N/A Yes
vm-205 /dev/sdc hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi5 32.2G Unknown N/A N/A Yes
vm-205 /dev/sdd hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi4 32.2G Unknown N/A N/A Yes
vm-205 /dev/sde hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi3 32.2G Unknown N/A N/A Yes
vm-205 /dev/sdf hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi2 10.7G Unknown N/A N/A Yes
vm-205 /dev/sdg hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi1 10.7G Unknown N/A N/A Yes
vm-206 /dev/sdb hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi6 32.2G Unknown N/A N/A Yes
vm-206 /dev/sdc hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi5 32.2G Unknown N/A N/A Yes
vm-206 /dev/sdd hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi4 32.2G Unknown N/A N/A Yes
vm-206 /dev/sde hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi3 32.2G Unknown N/A N/A Yes
vm-206 /dev/sdf hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi2 10.7G Unknown N/A N/A Yes
vm-206 /dev/sdg hdd Unknown Unknown QEMU QEMU HARDDISK drive-scsi1 10.7G Unknown N/A N/A Yes
cat > /tmp/osds.yaml << EOF
service_type: osd
service_id: osd_using_paths
placement:
hosts:
- vm-204
- vm-205
- vm-206
spec:
data_devices:
paths:
- /dev/sdb
- /dev/sdc
- /dev/sdd
- /dev/sde
db_devices:
paths:
- /dev/sdf
wal_devices:
paths:
- /dev/sdg
EOF
ceph orch apply -i /tmp/osds.yaml
# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.38031 root default
-5 0.12677 host vm-204
1 hdd 0.03169 osd.1 up 1.00000 1.00000
4 hdd 0.03169 osd.4 up 1.00000 1.00000
7 hdd 0.03169 osd.7 up 1.00000 1.00000
10 hdd 0.03169 osd.10 up 1.00000 1.00000
-3 0.12677 host vm-205
0 hdd 0.03169 osd.0 up 1.00000 1.00000
3 hdd 0.03169 osd.3 up 1.00000 1.00000
6 hdd 0.03169 osd.6 up 1.00000 1.00000
9 hdd 0.03169 osd.9 up 1.00000 1.00000
-7 0.12677 host vm-206
2 hdd 0.03169 osd.2 up 1.00000 1.00000
5 hdd 0.03169 osd.5 up 1.00000 1.00000
8 hdd 0.03169 osd.8 up 1.00000 1.00000
11 hdd 0.03169 osd.11 up 1.00000 1.00000
# ceph osd df
ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS
1 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 4 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
4 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 3 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
7 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 4 KiB 1024 MiB 29 GiB 10.79 1.00 1 up
10 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 5 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
0 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 4 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
3 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 2 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
6 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 3 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
9 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 4 KiB 1024 MiB 29 GiB 10.79 1.00 1 down
2 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 3 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
5 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 5 KiB 1024 MiB 29 GiB 10.79 1.00 1 up
8 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 3 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
11 hdd 0.03169 1.00000 32 GiB 3.5 GiB 11 MiB 4 KiB 1024 MiB 29 GiB 10.79 1.00 0 up
TOTAL 390 GiB 42 GiB 132 MiB 50 KiB 12 GiB 348 GiB 10.79
MIN/MAX VAR: 1.00/1.00 STDDEV: 0
13. deploy rgw
radosgw-admin realm create --rgw-realm=default-realm --default
radosgw-admin zonegroup create --rgw-zonegroup=default-zonegroup --master --default
radosgw-admin zone create --rgw-zonegroup=default-zonegroup --rgw-zone=default-zone --master --default
radosgw-admin period update --rgw-realm=default-realm --commit
ceph orch apply rgw default-realm default-zone --unmanaged
ceph orch daemon add rgw default-realm default-zone --placement="vm-201 vm-202 vm-203"
# ceph -s
cluster:
id: cc84d9b4-4830-11ec-a506-f2b532235db6
health: HEALTH_WARN
1 failed cephadm daemon(s)
services:
mon: 3 daemons, quorum vm-201,vm-202,vm-203 (age 65m)
mgr: vm-201.zgeeaz(active, since 94m), standbys: vm-202.tzqyjz, vm-203.tosmgb
osd: 12 osds: 12 up (since 18m), 12 in (since 19m)
rgw: 3 daemons active (default-realm.default-zone.vm-201.gbechp, default-realm.default-zone.vm-202.ojodvg, default-realm.default-zone.vm-203.ecllzd)
task status:
data:
pools: 5 pools, 105 pgs
objects: 201 objects, 7.4 KiB
usage: 42 GiB used, 348 GiB / 390 GiB avail
pgs: 105 active+clean
io:
client: 43 KiB/s rd, 170 B/s wr, 43 op/s rd, 23 op/s wr
pool
ceph config set mon mon_allow_pool_delete true
ceph osd pool rm .rgw.root .rgw.root --yes-i-really-really-mean-it
service
ceph orch rm node-exporter
ceph orch apply node-exporter '*'
ceph orch redeploy node-exporter
* cluster network没找到解决方法