127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.253.135 controller
192.168.253.194 compute
192.168.253.15 storage
192.168.253.10 dlp
useradd cent && echo "123" | passwd --stdin cent
echo -e ‘Defaults:cent !requiretty\ncent ALL = (root) NOPASSWD:ALL‘ | tee /etc/sudoers.d/ceph
chmod 440 /etc/sudoers.d/ceph
ceph@dlp15:17:01~#ssh-keygen
ceph@dlp15:17:01~#ssh-copy-id dlp
ceph@dlp15:17:01~#ssh-copy-id controller
ceph@dlp15:17:01~#ssh-copy-id compute
ceph@dlp15:17:01~#ssh-copy-id storage
(4)在部署节点切换为cent用户,在cent用户家目录,设置如下文件:vi config
然后设置如下权限:
Host dlp
Hostname dlp
User cent
Host controller
Hostname controller
User cent
Host compute
Hostname compute
User cent
Host storage
Hostname storage
User cent
chmod 600 ./ssh/config
wget https://mirrors.aliyun.com/centos/7/storage/x86_64/ceph-jewel/
ceph-deploy
new
controller compute storage
vim conf
添加:osd_pool_
default
_size = 2
eph部署monitor时出现"monitor is not yet in quorum.
这是因为防火墙没关闭,去各个节点关闭所有防火墙
再执行:
ceph-deploy --overwrite-conf mon create-initial
[ceph_deploy.mon][ERROR ] RuntimeError: config file /etc/ceph/ceph.conf exists with different content; use --overwrite-conf to overwrite
[ceph_deploy][ERROR ] GenericError: Failed to create 3 monitors
原因:修改了ceph用户里的ceph.conf文件内容,但是没有把这个文件里的最新消息发送给其他节点,所有要推送消息
解决:ceph-deploy --overwrite-conf config push node1-4
或ceph-deploy --overwrite-conf mon create node1-4
ceph-deploy osd prepare controller:/dev/sdb1 compute:/dev/sdb1 storage:/dev/sdc1
ceph-deploy osd activate controller:/dev/sdb1 compute:/dev/sdb1 storage:/dev/sdc1
ceph-deploy admin dlp controller compute storage
sudo chmod 644 /etc/ceph/ceph.client.admin.keyring(在其他节点上)
[root@controller old]# ceph -s
cluster 8e03f0d7-06cb-49c6-b0fa-b9764e85e61a
health HEALTH_OK
monmap e1: 3 mons at {compute=192.168.253.194:6789/0,controller=192.168.253.135:6789/0,storage=192.168.253.15:6789/0}
election epoch 6, quorum 0,1,2 storage,controller,compute
osdmap e14: 3 osds: 3 up, 3 in
flags sortbitwise,require_jewel_osds
pgmap v2230: 64 pgs, 1 pools, 0 bytes data, 0 objects
24995 MB used, 27186 MB / 52182 MB avail
64 active+clean
创建rbd:rbd create disk01 --size 10G --image-feature layering 删除:rbd rm disk01
列示rbd:rbd ls -l
[cent@dlp ceph]$ rbd create disk01 --size 10G --image-feature layering
[cent@dlp ceph]$ rbd ls -l
NAME SIZE PARENT FMT PROT LOCK
disk01 10240M 2
映射rbd的image map:sudo rbd map disk01 取消映射:sudo rbd unmap disk01
[root@controller ~]# rbd map disk01 #在root目录下这么些,其他目录下要加sudo在前面/dev/rbd0
将rbd0成功映射到了/dev/目录下,但是lsblk并无法查看到/dev/rbd0,这是因为还没有格式化和挂载。
显示map:rbd showmapped
格式化disk01文件系统xfs:sudo mkfs.xfs /dev/rbd0
[root@controller ~]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=17, agsize=162816 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2621440, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
挂载硬盘:sudo mount /dev/rbd0 /mnt
[root@controller ~]# mount /dev/rbd0 /mnt
[root@controller ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 19G 0 part
├─centos-root 253:0 0 17G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─sdb1 8:17 0 20G 0 part /var/lib/ceph/osd/ceph-0
sdc 8:32 0 10G 0 disk
└─sdc1 8:33 0 10G 0 part /var/lib/ceph/osd/ceph-3
sr0 11:0 1 4.2G 0 rom /mnt
rbd0 252:0 0 10G 0 disk /mnt
验证是否挂着成功:df -hT
systemctl stop ceph-mds@node1
ceph mds fail 0
列示存储池
ceph osd lspools
显示结果:0 rbd,
ceph osd pool rm rdb
--yes-i-really-really-mean-it
清空集群信息
ceph-deploy purge dlp node1 node2 node3 controller
ceph-deploy purgedata dlp node1 node2 node3 controller
ceph-deploy forgetkeys
rm -rf ceph*
原文:https://www.cnblogs.com/xiaoxiaotiejiang/p/11084614.html