ceph扩容+rgw部署


#新增3台服务器
root@ceph-deploy:~# cat /etc/hosts 127.0.0.1 localhost 127.0.1.1 ubuntu.example.local ubuntu # The following lines are desirable for IPv6 capable hosts ::1 localhost ip6-localhost ip6-loopback ff02::1 ip6-allnodes ff02::2 ip6-allrouters 192.168.192.171 ceph-deploy 192.168.192.172 ceph-mon01-mgr01 ceph-mds01 192.168.192.173 ceph-node01 192.168.192.174 ceph-node02 192.168.192.175 ceph-node03 192.168.192.176 ceph-mon02-mgr02 ceph-mds02 192.168.192.177 ceph-mon03 ceph-mds03 192.168.192.178 ceph-mds04
root@ceph-mon02-mgr02_ceph-mds02:~# groupadd -r -g 2021 cephadmin && useradd -r -m -s /bin/bash -u 2021 -g 2021 cephadmin && echo cephadmin:123456 | chpasswd
root@ceph-mon02-mgr02_ceph-mds02:~# echo "cephadmin ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
root@ceph-mon02-mgr02_ceph-mds02:~# cat /etc/apt/sources.list
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main
wget -q -O- 'http://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
apt update
apt install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute  gcc openssh-server lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip openjdk-8-jdk -y
cat >/etc/sysctl.conf <
cat > /etc/security/limits.conf <
apt install python2.7 -y
ln -sv /usr/bin/python2.7 /usr/bin/python2

拓展ceph集群高可用

1,扩展mon

Ceph-mon 是原生具备自选举以实现高可用机制的ceph 服务,节点数量通常是奇数。

root@ceph-mon02-mgr02_ceph-mds02:~# apt install ceph-mon -y
root@ceph-mon03_ceph-mds03:~# apt install ceph-mon -y
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mon add ceph-mon02-mgr02
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mon add ceph-mon03
cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     d2cca32b-57dc-409f-9605-b19a373ce759
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon01-mgr01,ceph-mon02-mgr02_ceph-mds02,ceph-mon03_ceph-mds03 (age 2m)
    mgr: ceph-mon01-mgr01(active, since 54m)
    osd: 9 osds: 9 up (since 53m), 9 in (since 2d)
 
  data:
    pools:   3 pools, 384 pgs
    objects: 19 objects, 9.4 MiB
    usage:   122 MiB used, 450 GiB / 450 GiB avail
    pgs:     384 active+clean
cephadmin@ceph-deploy:~/ceph-cluster$ ceph quorum_status --format json-pretty

{
    "election_epoch": 16,
    "quorum": [
        0,
        1,
        2
    ],
    "quorum_names": [
        "ceph-mon01-mgr01",
        "ceph-mon02-mgr02_ceph-mds02",
        "ceph-mon03_ceph-mds03"
    ],
    "quorum_leader_name": "ceph-mon01-mgr01", #当前mon的lead
    "quorum_age": 206,
    "features": {
        "quorum_con": "4540138297136906239",
        "quorum_mon": [
            "kraken",
            "luminous",
            "mimic",
            "osdmap-prune",
            "nautilus",
            "octopus",
            "pacific",
            "elector-pinging"
        ]
    },
    "monmap": {
        "epoch": 3,
        "fsid": "d2cca32b-57dc-409f-9605-b19a373ce759",
        "modified": "2021-11-08T14:37:40.323082Z",
        "created": "2021-11-06T06:47:54.550256Z",
        "min_mon_release": 16,
        "min_mon_release_name": "pacific",
        "election_strategy": 1,
        "disallowed_leaders: ": "",
        "stretch_mode": false,
        "features": {
            "persistent": [
                "kraken",
                "luminous",
                "mimic",
                "osdmap-prune",
                "nautilus",
                "octopus",
                "pacific",
                "elector-pinging"
            ],
            "optional": []
        },
        "mons": [
            {
                "rank": 0,  ##ceph-mon01的等级
                "name": "ceph-mon01-mgr01", ##mon的节点名称
                "public_addrs": {
                    "addrvec": [
                        {
                            "type": "v2",
                            "addr": "192.168.192.172:3300", ##监控地址
                            "nonce": 0
                        },
                        {
                            "type": "v1",
                            "addr": "192.168.192.172:6789", ##监控地址
                            "nonce": 0
                        }
                    ]
                },
                "addr": "192.168.192.172:6789/0",
                "public_addr": "192.168.192.172:6789/0",
                "priority": 0,
                "weight": 0,
                "crush_location": "{}"
            },
            {
                "rank": 1,
                "name": "ceph-mon02-mgr02_ceph-mds02",
                "public_addrs": {
                    "addrvec": [
                        {
                            "type": "v2",
                            "addr": "192.168.192.176:3300",
                            "nonce": 0
                        },
                        {
                            "type": "v1",
                            "addr": "192.168.192.176:6789",
                            "nonce": 0
                        }
                    ]
                },
                "addr": "192.168.192.176:6789/0",
                "public_addr": "192.168.192.176:6789/0",
                "priority": 0,
                "weight": 0,
                "crush_location": "{}"
            },
            {
                "rank": 2,
                "name": "ceph-mon03_ceph-mds03",
                "public_addrs": {
                    "addrvec": [
                        {
                            "type": "v2",
                            "addr": "192.168.192.177:3300",
                            "nonce": 0
                        },
                        {
                            "type": "v1",
                            "addr": "192.168.192.177:6789",
                            "nonce": 0
                        }
                    ]
                },
                "addr": "192.168.192.177:6789/0",
                "public_addr": "192.168.192.177:6789/0",
                "priority": 0,
                "weight": 0,
                "crush_location": "{}"
            }
        ]
    }
}

2,扩展mgr节点

root@ceph-mon02-mgr02_ceph-mds02:~# apt install -y ceph-mgr
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mgr create ceph-mon02-mgr02
cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     d2cca32b-57dc-409f-9605-b19a373ce759
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon01-mgr01,ceph-mon02-mgr02_ceph-mds02,ceph-mon03_ceph-mds03 (age 10m)
    mgr: ceph-mon01-mgr01(active, since 62m), standbys: ceph-mon02-mgr02 #备用的ceph-mon02-mgr02已经添加完成
    osd: 9 osds: 9 up (since 61m), 9 in (since 2d)

3,部署cephfs

如果要使用cephFS,需要部署cephfs 服务。MDS服务与mon服务部署在一起(最好分开部署)。

在所有ceph-mon上和ceph-mds04 上安装ceph-mds

root@ceph-mds04:/etc/netplan# apt install ceph-mds -y
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mds create ceph-mds01
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mds create ceph-mds02
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mds create ceph-mds03
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mds create ceph-mds04
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-metadata 32 32
pool 'cephfs-metadata' created
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-data 64 64
pool 'cephfs-data' created
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs new mycephfs cephfs-metadata cephfs-data
new fs with metadata pool 4 and data pool 5
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs ls
name: mycephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ]
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status mycephfs
mycephfs - 0 clients
========
RANK  STATE      MDS         ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds04  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata  96.0k   142G  
  cephfs-data      data       0    142G  
STANDBY MDS  
 ceph-mds01  
 ceph-mds02  
 ceph-mds03  
MDS version: ceph version 16.2.6 (ee28fb57e47e9f88813e24bbf4c14496ca299d31) pacific (stable)
cephadmin@ceph-deploy:~/ceph-cluster$ ceph mds stat
mycephfs:1 {0=ceph-mds04=up:active} 3 up:standby

设置每个Rank 的备份MDS,也就是如果此Rank 当前的MDS 出现问题马上切换到另个MDS。设置备份的方法有很多,常用选项如下。

设置每个Rank 的备份MDS,也就是如果此Rank 当前的MDS 出现问题马上切换到另个MDS。设置备份的方法有很多,常用选项如下。

mds_standby_replay:值为true 或false,true 表示开启replay 模式,这种模式下主MDS 内的数量将实时与从MDS 同步,如果主宕机,从可以快速的切换。如果为false 只有宕机的时候才去同步数据,这样会有一段时间的中断。
 
mds_standby_for_name:设置当前MDS 进程只用于备份于指定名称的MDS。
 
mds_standby_for_rank:设置当前MDS 进程只用于备份于哪个Rank,通常为Rank 编号。另外在存在之个CephFS 文件系统中,还可以使用mds_standby_for_fscid 参数来为指定不同的文件系统。
 
mds_standby_for_fscid:指定CephFS 文件系统ID,需要联合mds_standby_for_rank 生效,如果设置mds_standby_for_rank,那么就是用于指定文件系统的指定Rank,如果没有设置,就是指定文件系统的所有Rank。
ceph fs get mycephfs
max_mds    1
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs set mycephfs max_mds 2
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status
mycephfs - 0 clients
========
RANK  STATE      MDS         ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds04  Reqs:    0 /s    10     13     12      0   
 1    active  ceph-mds03  Reqs:    0 /s    10     13     11      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata   168k   142G  
  cephfs-data      data       0    142G  
STANDBY MDS  
 ceph-mds01  
 ceph-mds02  
MDS version: ceph version 16.2.6 (ee28fb57e47e9f88813e24bbf4c14496ca299d31) pacific (stable)

目前的状态是ceph-mds03 和ceph-mgr04 分别是active 状态,ceph-mds01和ceph-mds02 分别处于standby 状态,现在可以将ceph-mds03设置为ceph-mds01 的standby,将ceph-mds04 设置为ceph-mgr02的standby,以实现每个主都有一个固定备份角色的结构,则修改配置文件如.

cephadmin@ceph-deploy:~/ceph-cluster$ cat ceph.conf 
[global]
fsid = d2cca32b-57dc-409f-9605-b19a373ce759
public_network = 192.168.192.0/24
cluster_network = 192.168.227.0/24
mon_initial_members = ceph-mon01-mgr01
mon_host = 192.168.192.172
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

[mds.ceph-mds01]
#mds_standby_for_fscid = mycephfs
mds_standby_for_name = ceph-mds03
mds_standby_replay = true
[mds.ceph-mds02]
mds_standby_for_name = ceph-mgr04
mds_standby_replay = true
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy --overwrite-conf config push ceph-mds01-04
root@ceph-mds04:/etc/netplan# systemctl restart ceph-mds@ceph-mds01-04.service  #先重启1 2
root@ceph-deploy:/etc# ceph fs status
mycephfs - 0 clients
========
RANK  STATE      MDS         ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds01  Reqs:    0 /s    10     13     12      0   
 1    active  ceph-mds02  Reqs:    0 /s    10     13     11      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata   180k   142G  
  cephfs-data      data       0    142G  
STANDBY MDS  
 ceph-mds03  
 ceph-mds04  
MDS version: ceph version 16.2.6 (ee28fb57e47e9f88813e24bbf4c14496ca299d31) pacific (stable)

3 部署radosgw服务,不要启用CephFS

RGW提供的事REST接口,客户端通过http与其交互,完成数据的增删改查等管理操作。radosgw用在需要使用RESTful API接口访问ceph数据的场合,因此在使用RBD或者CephFS的场合可以不启用该功能。

cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool ls
device_health_metrics
mypool
rbdpool
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
root@ceph-mon01-mgr01:~# apt install radosgw -y
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy rgw create ceph-mon01-mgr01
cephadmin@ceph-deploy:~/ceph-cluster$ curl http://192.168.192.172:7480
<?xml version="1.0" encoding="UTF-8"?>anonymous