通过nfs-ganesha方式共享cephfs存储,nfs-ganesha使用方式与nfs一致

nfs-ganesha_wiki

创建pool

1
2
3
4
[root@ceph01 ceph-cluster]# ceph osd pool create cephfs_data 128
pool 'cephfs_data' created
[root@ceph01 ceph-cluster]# ceph osd pool create cephfs_metadata 128
pool 'cephfs_metadata' created

创建fs

1
2
3
4
5
6
[root@ceph01 ceph-cluster]# ceph fs new k8s-cephfs cephfs_metadata cephfs_data
new fs with metadata pool 10 and data pool 8
[root@ceph01 ceph-cluster]# ceph fs ls
name: k8s-cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@ceph01 ceph-cluster]# ceph mds stat
k8s-cephfs:1 {0=ceph02=up:active} 2 up:standby

测试挂载cephfs

1
2
3
4
5
6
7
8
9
10
[root@ceph01 ceph-cluster]# cat ceph.client.admin.keyring
[client.admin]
key = AQC4ovheG6t4AhAAPQiE19+kG7hSlraPAjhWRg==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
[root@ceph01 ceph-cluster]# echo 'AQC4ovheG6t4AhAAPQiE19+kG7hSlraPAjhWRg==' >admin.secret
[root@ceph01 ceph-cluster]# mount -t ceph ceph01:6789,ceph02:6789,ceph03:6789:/ /opt/ceph -o name=admin,secretfile=admin.secret
mount error 110 = Connection timed out

问题解决:

挂载超时,messages日志:

1
2
Jun 24 18:50:07 ceph01 kernel: libceph: mon1 192.168.1.32:6789 feature set mismatch, my 107b84a842aca < server's 40107b84a842aca, missing 400000000000000
Jun 24 18:50:07 ceph01 kernel: libceph: mon1 192.168.1.32:6789 missing required protocol featur

​ 1. 解决一:升级kernel

https://tracker.ceph.com/issues/21220?journal_id=98261缺失的400000000000000 (CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING),无法通过设置集群来解决,也就是说必须升级kernel(到4.5以上)

​ 2. 解决二:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# ceph osd crush tunables legacy
使用这种方式ceph会出现报错crush map has straw_calc_version=0
[root@ceph01 ~]# ceph -s
cluster:
id: a9f8cf77-cd0c-439e-990c-0a417166cd6a
health: HEALTH_WARN
crush map has straw_calc_version=0
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 2h)
mgr: ceph01(active, since 10h), standbys: ceph02, ceph03
mds: k8s-cephfs:1 {0=ceph02=up:active} 2 up:standby
osd: 4 osds: 4 up (since 10h), 4 in (since 10h)
task status:
scrub status:
mds.ceph02: idle
data:
pools: 2 pools, 256 pgs
objects: 22 objects, 16 KiB
usage: 84 GiB used, 76 GiB / 160 GiB avail
pgs: 256 active+clean
[root@ceph01 ~]# ceph osd crush tunables optimal
adjusted tunables profile to optimal
[root@ceph01 ~]# ceph -s
cluster:
id: a9f8cf77-cd0c-439e-990c-0a417166cd6a
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 2h)
mgr: ceph01(active, since 10h), standbys: ceph02, ceph03
mds: k8s-cephfs:1 {0=ceph02=up:active} 2 up:standby
osd: 4 osds: 4 up (since 10h), 4 in (since 10h)
task status:
scrub status:
mds.ceph02: idle
data:
pools: 2 pools, 256 pgs
objects: 22 objects, 16 KiB
usage: 84 GiB used, 76 GiB / 160 GiB avail
pgs: 256 active+clean

再次测试挂载

1
2
3
[root@ceph01 ceph-cluster]# mount -t ceph ceph01:6789,ceph02:6789,ceph03:6789:/ /opt/ceph -o name=admin,secretfile=admin.secret
[root@ceph01 ceph-cluster]# df -h|grep /opt/ceph
192.168.1.30:6789,192.168.1.32:6789,192.168.1.34:6789:/ 160G 85G 76G 53% /opt/ceph

配置ganesha

安装ganesha

1
2
3
4
5
6
7
[root@ceph01 ~]# vim /etc/yum.repos.d/nfs-ganesha.repo
[nfs-ganesha]
name=nfs-ganesha
baseurl=https://mirrors.aliyun.com/ceph/nfs-ganesha/rpm-V3.3-stable/octopus/el7/x86_64/
enabled=1
gpgcheck=0
[root@ceph01 ~]# yum install nfs-ganesha nfs-ganesha-ceph -y

配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
[root@ceph01 ~]# vim /etc/ganesha/ganesha.conf
EXPORT
{
Export_ID=1; # 导出ID
Path = "/"; # cephfs的/目录
Pseudo = /cephfs; # ganesha共享出去的目录,虚假的(不真实存在)
Access_Type = RW; # 权限
Protocols = 4; # nfs协议
Transports_Protocols = TCP;
Squash = no_root_squash; # 是否压缩用户
FSAL { # 导出ceph
Name = CEPH;
}
}
LOG { # 日志模块
Facility {
name = FILE;
destination = "/var/log/ganesha/ganesha.log";
enable = active;
}
}

启动服务

1
2
[root@ceph01 ~]# systemctl start nfs-ganesha
[root@ceph01 ~]# systemctl enable nfs-ganesha

测试挂载

1
2
3
4
5
6
[root@ceph01 ~]# yum install -y nfs-utils
[root@ceph01 ~]# systemctl start rpcbind
[root@ceph01 ~]# mount -t nfs ceph01:/cephfs /opt/ganesha/
[root@ceph01 ~]# df -h|grep opt
192.168.1.30:6789,192.168.1.32:6789,192.168.1.34:6789:/ 160G 85G 76G 53% /opt/ceph
ceph01:/cephfs 23G 0 23G 0% /opt/ganesha