创建存储池
#若少于5个OSD, 设置pg_num为128
#5~10个OSD,设置pg_num为512
#10~50个OSD,设置pg_num为4096

ceph osd pool create ceph-demo 64 64
#查看存储池
ceph osd lspools
查看存储池参数
[root@node-1 ~]# ceph osd pool get ceph-demo pg_num
pg_num: 64
[root@node-1 ~]# ceph osd pool get ceph-demo pgp_num
pgp_num: 64
[root@node-1 ~]# ceph osd pool get ceph-demo size
size: 3
修改存储池参数
ceph osd pool get ceph-demo pg_num 32
ceph osd pool get ceph-demo pgp_num 32
创建镜像
rbd create -p ceph-demo --image rbd-demo.img --size 10G
或者 rbd create ceph-demo/rbd-demo-1.img --size 10G
[root@node-1 ~]# rbd -p ceph-demo ls
rbd-demo.img
rbd-demo-1.img
查看镜像详细信息
root@node-1 ~]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
    size 10 GiB in 2560 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1134749f26f5
    block_name_prefix: rbd_data.1134749f26f5
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
    op_features: 
    flags: 
    create_timestamp: Tue Sep 22 15:39:03 2020
    access_timestamp: Tue Sep 22 15:39:03 2020
    modify_timestamp: Tue Sep 22 15:39:03 2020
删除镜像
rbd rm  ceph-demo/rbd-demo-1.img
[root@node-1 ~]# rbd -p ceph-demo ls
rbd-demo.img
块设备映射
rbd map ceph-demo/rbd-demo.img
rbd: sysfs write failed
RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address

上面有报错是因为有些特性centos7的内核不支持,所以需要关了

[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img deep-flatten
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img fast-diff
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img object-map
rbd: failed to update image features: (22) Invalid argument
2020-09-25 14:03:56.251 7efe8d341c80 -1 librbd::Operations: one or more requested features are already disabled
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
再次映射
[root@node-1 ~]# rbd map ceph-demo/rbd-demo.img
/dev/rbd0
#查看设备
[root@node-1 ~]# rbd device list 
id pool      namespace image        snap device    
0  ceph-demo           rbd-demo.img -    /dev/rbd0
格式化
mkfs.ext4 /dev/rbd0
查看设备
[root@node-1 ~]# lsblk 
NAME                                                                       MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                                                                          8:0    0  100G  0 disk 
├─sda1                                                                       8:1    0    1G  0 part /boot
└─sda2                                                                       8:2    0   99G  0 part 
  └─centos-root                                                            253:0    0   99G  0 lvm  /
sdb                                                                          8:16   0   50G  0 disk 
└─ceph--82366968--ae58--446f--88db--57f5246aa08d-osd--block--19d85d44--9015--4580--bf2d--eae1afe0c0d6
                                                                           253:1    0   50G  0 lvm  
sdc                                                                          8:32   0   50G  0 disk 
sr0                                                                         11:0    1  792M  0 rom  
rbd0                                                                       252:0    0   10G  0 disk 
挂载块设备
mkdir /mnt/rbd-demo
mount /dev/rbd0 /mnt/rbd-demo
[root@node-1 ~]# df -h /dev/rbd0
Filesystem      Size  Used Avail Use% Mounted on
/dev/rbd0       9.8G   37M  9.2G   1% /mnt/rbd-demo
rbd扩容
[root@node-1 rbd-demo]# rbd resize ceph-demo/rbd-demo.img --size 20G
Resizing image: 100% complete...done.
[root@node-1 rbd-demo]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
    size 20 GiB in 5120 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1134749f26f5
    block_name_prefix: rbd_data.1134749f26f5
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Tue Sep 22 15:39:03 2020
    access_timestamp: Tue Sep 22 15:39:03 2020
    modify_timestamp: Tue Sep 22 15:39:03 2020
# 查看设备已经扩容到20G
主机扩容
[root@node-1 rbd-demo]# df -h /dev/rbd0
Filesystem      Size  Used Avail Use% Mounted on
/dev/rbd0       9.8G   37M  9.2G   1% /mnt/rbd-demo
#查看发现挂载的盘没有扩容刷新磁盘空间
[root@node-1 rbd-demo]# resize2fs /dev/rbd0
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/rbd0 is mounted on /mnt/rbd-demo; on-line resizing required
old_desc_blocks = 2, new_desc_blocks = 3
The filesystem on /dev/rbd0 is now 5242880 blocks long.
#再次查看设备大小
[root@node-1 rbd-demo]# df -h /dev/rbd0
Filesystem      Size  Used Avail Use% Mounted on
/dev/rbd0        20G   44M   19G   1% /mnt/rbd-demo
查看数据object
#grep 的为rbd info 查看的block_name_prefix的值,他作为object的前缀
rados -p ceph-demo ls | grep rbd_data.1134749f26f5
查看object
[root@node-1 rbd-demo]# rados -p ceph-demo stat rbd_data.1134749f26f5.0000000000000e00
ceph-demo/rbd_data.1134749f26f5.0000000000000e00 mtime 2020-09-25 14:19:39.000000, size 4096
# 可以看到每个object大小默认4M
查看objec数据存储
[root@node-1 rbd-demo]# ceph osd map ceph-demo rbd_data.1134749f26f5.0000000000000120
osdmap e35 pool 'ceph-demo' (1) object 'rbd_data.1134749f26f5.0000000000000120' -> pg 1.b2f3e87f (1.3f) -> up ([1,2,0], p1) acting ([1,2,0], p1)

查看当前object落在了id为1.3f的pg上,pg存放在id为[1,2,0]三个OSD上,可以通过ceph osd tree查看osd分别在那个主机

测试写入数据
dd if=/dev/zero of=test.image bs=1M count=1024
rados -p ceph-demo ls | grep rbd_data.1134749f26f5 | wc -l
299
#查看object数量在增加
报错处理
[root@node-1 ~]# ceph health detail
HEALTH_WARN 1 pool(s) full
POOL_FULL 1 pool(s) full
    pool 'ceph-demo' is full (running out of quota)
    

#每个object默认4M 大概算下设置pool的最大object值   
ceph osd pool set-quota ceph-demo max_objects 12800

Cherish
216 声望4 粉丝

引用和评论

0 条评论