OSD 操作
[root@test-ceph1 ~]
true
[root@test-ceph1 ~]
[root@test-ceph1 ~]
ceph orch device zap test-ceph1 /dev/nvme0n4 --force
ceph orch apply osd --all-available-devices
ceph orch apply osd --all-available-devices --unmanaged=true
ceph orch daemon add osd test-ceph1:/dev/nvme0n4
ceph orch daemon stop osd.1
ceph orch daemon rm osd.1
ceph osd rm 1
service_type: osd
service_id: osd_size_and_model
placement:host_pattern: '*'
data_devices:size: '100G'
db_devices:model: My-Disk
wal_devices:size: '10G:20G'
unmanaged:true
---
service_type: osd
service_id: osd_host_and_path
placement:host_pattern: 'test-ceph[7-10]'
data_devices:paths:- /dev/nvme0n4
db_devices:paths:- /dev/nvme0n3
wal_devices:paths:- /dev/nvme0n2
encrypted: true
ceph orch apply -i service_spec.yaml
ceph-volume lvm create --bluestore --data /dev/nvme0n2ceph-volume lvm prepare --bluestore --data /dev/nvme0n2
ceph-volume lvm list
ceph-volume lvm activate <sod-fsid>
ceph-volume lvm batch --bluestore /dev/nvme0n2 /dev/nvme0n3 /dev/nvme0n4
ceph-volume inventory
使用逻辑卷创建 BluseStore OSD
ceph -s
[root@test-ceph1 ~]
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
ssd 1.5 TiB 1.5 TiB 4.3 GiB 4.3 GiB 0.28
TOTAL 1.5 TiB 1.5 TiB 4.3 GiB 4.3 GiB 0.28--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
.mgr 1 1 449 KiB 2 1.3 MiB 0 474 GiB
[root@test-ceph1 ~]
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF-1 1.46530 root default -7 0.29306 host test-ceph1 1 ssd 0.09769 osd.1 up 1.00000 1.000007 ssd 0.09769 osd.7 up 1.00000 1.0000012 ssd 0.09769 osd.12 up 1.00000 1.00000-9 0.29306 host test-ceph2 3 ssd 0.09769 osd.3 up 1.00000 1.000008 ssd 0.09769 osd.8 up 1.00000 1.0000013 ssd 0.09769 osd.13 up 1.00000 1.00000
-11 0.29306 host test-ceph3 4 ssd 0.09769 osd.4 up 1.00000 1.000009 ssd 0.09769 osd.9 up 1.00000 1.0000014 ssd 0.09769 osd.14 up 1.00000 1.00000-3 0.29306 host test-ceph4 0 ssd 0.09769 osd.0 up 1.00000 1.000005 ssd 0.09769 osd.5 up 1.00000 1.0000010 ssd 0.09769 osd.10 up 1.00000 1.00000-5 0.29306 host test-ceph6 2 ssd 0.09769 osd.2 up 1.00000 1.000006 ssd 0.09769 osd.6 up 1.00000 1.0000011 ssd 0.09769 osd.11 up 1.00000 1.00000
ceph orch devices ls
ceph orch devices ls |awk test-ceph|grep Yes
ceph orch daemon add osd test-ceph1:/dev/nvme0n2
ceph orch daemon add osd test-ceph1:/dev/nvme0n3
ceph orch ps |grep -ie osd.1 -ie osd.7
ceph df
ceph osd tree
ceph orch apply osd --all-available-devices
ceph orch ls |grep all-available-devices
ceph osd tree
ceph devices ls | grep 'test-ceph1'
ceph orch daemon stop osd.ID
ceph orch daemon rm osd.ID --force
ceph osd rm ID
ceph orch osd rm status
ceph orch osd rm status
ceph orch device zap --force test-ceph1 /dev/nvm0n2
ceph orch device ls |grep /test-ceph1/
ceph device ls |grep 'test-ceph1:nvm0n2'
ceph orch ps |grep osd.ID
ceph orch ls --service-type osd --format yaml
ceph orch apply osd --all-available-devices --umanaged=true
ceph device ls |grep 'test-ceph2:nvm0n4'
ceph orch daemon stop osd.ID
ceph orch daemon rm osd.ID --force
ceph orch osd rm status
ceph osd rm ID
ceph orch device zap --force test-ceph2 /dev/nvm0n4
ceph orch device ls |awk /test-ceph2/
ceph orch device ls | awk /test-ceph2/
创建并配置存储池 复制池
ceph -s
[root@test-ceph1 ~]
pool 'replpool1' created
[root@test-ceph1 ~]
pg_autoscale_mode: on
[root@test-ceph1 ~]
on
[root@test-ceph1 ~]
1 .mgr
2 replpool1
[root@test-ceph1 ~]
POOL SIZE TARGET SIZE RATE RAW CAPACITY RATIO TARGET RATIO EFFECTIVE RATIO BIAS PG_NUM NEW PG_NUM AUTOSCALE BULK
.mgr 452.0k 3.0 1499G 0.0000 1.0 1 on False
replpool1 0 3.0 1499G 0.0000 1.0 64 on False
[root@test-ceph1 ~]
set pool 2 size to 4
[root@test-ceph1 ~]
set pool 2 min_size to 2
[root@test-ceph1 ~]
enabled application 'rbd' on pool 'replpool1'
[root@test-ceph1 ~]
pool 1 '.mgr' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 32 flags hashpspool stripe_width 0 pg_num_max 32 pg_num_min 1 application mgr
pool 2 'replpool1' replicated size 4 min_size 2 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode on last_change 54 flags hashpspool stripe_width 0 application rbd
[root@test-ceph1 ~]
size: 4
[root@test-ceph1 ~]
pool 'replpool1' renamed to 'newpool'
[root@test-ceph1 ~]
.mgr
newpool
[root@test-ceph1 ~]
mon.test-ceph1: {"success": "mon_allow_pool_delete = '' "
}
mon.test-ceph3: {"success": "mon_allow_pool_delete = '' "
}
mon.test-ceph2: {"success": "mon_allow_pool_delete = '' "
}
[root@test-ceph1 ~]
[root@test-ceph1 ~]
pool 'newpool' removed
[root@test-ceph1 ~]
Error EPERM: WARNING: this will *PERMANENTLY DESTROY* all data stored in pool newpool. If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, followed by --yes-i-really-really-mean-it.
创建并配置存储池 纠删码池
ceph -s
[root@test-ceph1 ~]
default
[root@test-ceph1 ~]
k=2
m=2
plugin=jerasure
[root@test-ceph1 ~]
[root@test-ceph1 ~]
pool 'ecpool1' created
[root@test-ceph1 ~]
enabled application 'rgw' on pool 'ecpool1'
[root@test-ceph1 ~]
pool 1 '.mgr' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 32 flags hashpspool stripe_width 0 pg_num_max 32 pg_num_min 1 application mgr
pool 3 'ecpool1' erasure profile ecprofile-k4-m2 size 6 min_size 5 crush_rule 1 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode on last_change 62 flags hashpspool stripe_width 16384 application rgw
[root@test-ceph1 ~]
set pool 3 allow_ec_overwrites to true
[root@test-ceph1 ~]
pool 'ecpool1' removed
存储池命名空间
rados -p mytestpool -N system put srv /etc/services
rados -p mytestpool -N system ls
rados -p mytestpool --all ls
rados -p mytestpool --all ls --format=json-pretty
用户认证
[root@test-ceph1 ~]
pool 'replpool1' created
[root@test-ceph1 ~]
pool 'replpool1' created
[root@test-ceph1 ~]
mon 'allow r' osd 'allow rw pool=replpool1 namespace=docs' \
> /etc/ceph/ceph.client.docedit.keyring
[root@test-ceph1 ~]
mon 'allow r' osd 'allow r pool=replpool1 namespace=docs' \
> /etc/ceph/ceph.client.docget.keyring
[root@test-ceph1 ~]
client.doceditkey: AQAGHrRo1ERcBRAAcVS07Xpp00PbMZljAJL3Kw==caps: [mon] allow rcaps: [osd] allow rw pool=replpool1 namespace=docs
client.docgetkey: AQANHrRoU5+HNBAAuc44PrhlDpZj5IrLktaF6g==caps: [mon] allow rcaps: [osd] allow r pool=replpool1 namespace=docs
[root@test-ceph1 ~]
[root@test-ceph1 ~]
[root@test-ceph1 ~]
[root@test-ceph6 ~]
[root@test-ceph6 ~]
[root@test-ceph6 ~]
[root@test-ceph6 ~]
error putting replpool1/writetest: (1) Operation not permitted
[root@test-ceph1 ~]
osd 'allow rw pool=replpool1 namespace=docs,allow rw pool=docarchive'
updated caps for client.docget
[root@test-ceph6 ~]
[root@test-ceph6 ~]
[root@test-ceph1 ~]
[root@test-ceph1 ~][root@test-ceph6 ~]
[root@test-ceph1 ~]
[root@test-ceph1 ~]
[root@test-ceph1 ~]
mon 'profile rbd' osd 'profile rbd'
[client.forrbd]key = AQAsIrRoEwY1ABAAz3Hx7wbajIUQE8ErrAMtyw==
[root@test-ceph1 ~]
mon 'allow r' \
osd 'allow rw pool=myapp-pool object_prefix pref-'
[client.formyapp3]key = AQCWIrRo6R25HRAAwey6HjKp/Az3uFX2GX7KFA==
[root@test-ceph1 ~]
mon 'allow r,allow command "auth get-or-create",allow command "auth list"'
[client.operator1]key = AQBPIrRoeVoXEBAACsAC1JM8Ygzmq6mE3quEYg==