环境准备

主机名 IP 安装组件 磁盘
ceph01 192.168.1.30 ceph-deploy & mon & mgr & osd & ntp 3*20g
ceph02 192.168.1.32 mon & mgr & osd & ntp 3*20g
ceph03 192.168.1.34 mon & mgr & osd & ntp 3*20g

免秘钥

1
2
3
4
5
6
7
8
9
[root@ceph01 ~]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.30 ceph01
192.168.1.32 ceph02
192.168.1.34 ceph03
[root@ceph01 ~]# ssh-keygen -t rsa
[root@ceph01 ~]# ssh-copy-id root@ceph02
[root@ceph01 ~]# ssh-copy-id root@ceph03

配置时间同步

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@ceph01 ~]# yum install -y ntp
[root@ceph01 ~]# vim /etc/ntp.conf
driftfile /var/lib/ntp/drift
restrict default nomodify
restrict 127.0.0.1
restrict ::1
# Enable public key cryptography.
# #crypto
#
includefile /etc/ntp/crypto/pw
#
# # Key file containing the keys and key identifiers used when operating
# # with symmetric key cryptography.
keys /etc/ntp/keys
#
server 127.127.1.0
fudge 127.127.1.0 stratum 10

其他节点

1
2
3
4
5
6
7
8
9
10
11
12
13
# yum install -y ntp
# vim /etc/ntp.conf
driftfile /var/lib/ntp/drift
restrict default nomodify
restrict 127.0.0.1
restrict ::1
# Enable public key cryptography.
#crypto
includefile /etc/ntp/crypto/pw
# Key file containing the keys and key identifiers used when operating
# with symmetric key cryptography.
keys /etc/ntp/keys
server 192.168.1.30 #主节点ip

启动服务 (所有节点)

1
2
3
# systemctl enable ntpd
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service.
# systemctl start ntpd

ceph安装

配置ceph源 (所有节点)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@ceph01 ~]# vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
# yum makecache

安装ceph (所有节点)

1
2
3
[root@ceph01 ~]# yum install ceph -y
[root@ceph01 ~]# ceph -v
ceph version 14.2.10 (b340acf629a010a74d90da5782a2c5fe0b54ac20) nautilus (stable)

主节点安装ceph-deploy

1
2
3
[root@ceph01 ~]# yum install ceph-deploy -y
[root@ceph01 ~]# ceph-deploy --version
2.0.1

部署ceph集群

创建集群

1
2
3
[root@ceph01 ~]# mkdir ceph-cluster
[root@ceph01 ~]# cd ceph-cluster
[root@ceph01 ceph-cluster]# ceph-deploy new ceph01 ceph02 ceph03

部署mon

1
2
3
[root@ceph01 ceph-cluster]# vim ceph.conf 
+ public_network=192.168.1.0/24
[root@ceph01 ceph-cluster]# ceph-deploy mon create-initial

查看集群状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
将keyring文件分发到各个节点
[root@ceph01 ceph-cluster]# ceph-deploy admin ceph01 ceph02 ceph03
[root@ceph01 ceph-cluster]# ceph -s
cluster:
id: ffdf23eb-6b2a-4596-baa5-c9441e5ba07c
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03
mgr: no daemons active
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0B
usage: 0B used, 0B / 0B avail
pgs:

创建osd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
查看设备信息
[root@ceph01 ceph-cluster]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 200M 0 part /boot
└─sda2 8:2 0 19.8G 0 part
└─centos_centos7-root 253:0 0 19.8G 0 lvm /
sdb 8:16 0 20G 0 disk
sdc 8:32 0 20G 0 disk
sdd 8:48 0 20G 0 disk
sr0 11:0 1 4.4G 0 rom
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph01 --bluestore --data /dev/sdb --block-db /dev/sdc --block-wal /dev/sdd
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph02 --bluestore --data /dev/sdb --block-db /dev/sdc --block-wal /dev/sdd
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph03 --bluestore --data /dev/sdb --block-db /dev/sdc --block-wal /dev/sdd
查看osd信息
[root@ceph01 ceph-cluster]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.11728 root default
-3 0.03909 host ceph01
0 hdd 0.03909 osd.0 up 1.00000 1.00000
-5 0.03909 host ceph02
1 hdd 0.03909 osd.1 up 1.00000 1.00000
-7 0.03909 host ceph03
2 hdd 0.03909 osd.2 up 1.00000 1.00000

[root@ceph01 ceph-cluster]# ceph -s
cluster:
id: a9f8cf77-cd0c-439e-990c-0a417166cd6a
health: HEALTH_WARN
no active mgr
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 4m)
mgr: no daemons active
osd: 3 osds: 3 up (since 21s), 3 in (since 21s)
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:

部署mgr

1
[root@ceph01 ceph-cluster]# ceph-deploy mgr create ceph01 ceph02 ceph03

启用dashboard

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@ceph01 ceph-cluster]# ceph mgr module enable dashboard
Error ENOENT: all mgr daemons do not support module 'dashboard', pass --force to force enablement
[root@ceph01 ceph-cluster]# yum install ceph-mgr-dashboard (所有节点)
默认情况下,仪表板的所有HTTP连接均使用SSL/TLS进行保护。
内置命令生成并安装自签名证书
[root@ceph01 ceph-cluster]# ceph dashboard create-self-signed-cert
Self-signed certificate created
创建具有管理员角色的用户
[root@ceph01 ceph-cluster]# ceph dashboard set-login-credentials admin admin
******************************************************************
*** WARNING: this command is deprecated. ***
*** Please use the ac-user-* related commands to manage users. ***
******************************************************************
Username and password updated
查看ceph-mgr服务
[root@ceph01 ceph-cluster]# ceph mgr services
{
"dashboard": "https://ceph02:8443/"
}

浏览器访问:https://ip:8443/即可看到dashboard 注:ip为主mgr所在主机的ip

部署mds

1
[root@ceph01 ceph-cluster]# ceph-deploy mds create ceph01 ceph02 ceph03