local slow=slow_eviction
local fast=fast_eviction
ceph osd pool create $slow 1 1
+ ceph osd pool application enable $slow rados
ceph osd pool create $fast 1 1
ceph osd tier add $slow $fast
ceph osd tier cache-mode $fast writeback
{
# tiering
ceph osd pool create slow 2
+ ceph osd pool application enable slow rados
ceph osd pool create slow2 2
+ ceph osd pool application enable slow2 rados
ceph osd pool create cache 2
ceph osd pool create cache2 2
ceph osd tier add slow cache
{
# make sure we can't clobber snapshot state
ceph osd pool create snap_base 2
+ ceph osd pool application enable snap_base rados
ceph osd pool create snap_cache 2
ceph osd pool mksnap snap_cache snapname
expect_false ceph osd tier add snap_base snap_cache
{
# make sure we can't create snapshot on tier
ceph osd pool create basex 2
+ ceph osd pool application enable basex rados
ceph osd pool create cachex 2
ceph osd tier add basex cachex
expect_false ceph osd pool mksnap cache snapname
ceph osd pool create eccache 2 2 erasure
expect_false ceph osd set-require-min-compat-client bobtail
ceph osd pool create repbase 2
+ ceph osd pool application enable repbase rados
expect_false ceph osd tier add repbase eccache
ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
{
# convenient add-cache command
ceph osd pool create slow 2
+ ceph osd pool application enable slow rados
ceph osd pool create cache3 2
ceph osd tier add-cache slow cache3 1024000
ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
{
# check add-cache whether work
ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd tier cache-mode cachepool writeback
{
# protection against pool removal when used as tiers
ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
## check health check
ceph osd set notieragent
ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
ceph osd pool create cache4 2
ceph osd tier add-cache datapool cache4 1024000
ceph osd tier cache-mode cache4 writeback
# results in a 'pool foo is now (or already was) not a tier of bar'
#
ceph osd pool create basepoolA 2
+ ceph osd pool application enable basepoolA rados
ceph osd pool create basepoolB 2
+ ceph osd pool application enable basepoolB rados
poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
ceph osd ls
ceph osd pool create data 10
+ ceph osd pool application enable data rados
ceph osd lspools | grep data
ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
# osd pool
#
ceph osd pool create data 10
+ ceph osd pool application enable data rados
ceph osd pool mksnap data datasnap
rados -p data lssnap | grep datasnap
ceph osd pool rmsnap data datasnap
ceph osd pool delete data data --yes-i-really-really-mean-it
ceph osd pool create data2 10
+ ceph osd pool application enable data2 rados
ceph osd pool rename data2 data3
ceph osd lspools | grep data3
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph osd pool create replicated 12 12 replicated
ceph osd pool create replicated 12 12 # default is replicated
ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
+ ceph osd pool application enable replicated rados
# should fail because the type is not the same
expect_false ceph osd pool create replicated 12 12 erasure
ceph osd lspools | grep replicated
ceph osd pool create ec_test 1 1 erasure
+ ceph osd pool application enable ec_test rados
set +e
ceph osd metadata | grep osd_objectstore_type | grep -qc bluestore
if [ $? -eq 0 ]; then
# create tmp pool
ceph osd pool create tmp-quota-pool 36
+ ceph osd pool application enable tmp-quota-pool rados
#
# set erroneous quotas
#
{
TEST_POOL_GETSET=pool_getset
ceph osd pool create $TEST_POOL_GETSET 1
+ ceph osd pool application enable $TEST_POOL_GETSET rados
wait_for_clean
ceph osd pool get $TEST_POOL_GETSET all
ceph osd pool set $TEST_POOL_GETSET size $old_size
ceph osd pool create pool_erasure 1 1 erasure
+ ceph osd pool application enable pool_erasure rados
wait_for_clean
set +e
ceph osd pool set pool_erasure size 4444 2>$TMPFILE
# this is not a tier pool
ceph osd pool create fake-tier 2
+ ceph osd pool application enable fake-tier rados
wait_for_clean
expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
# RAW USED The near raw used per pool in raw total
ceph osd pool create cephdf_for_test 32 32 replicated
+ ceph osd pool application enable cephdf_for_test rados
ceph osd pool set cephdf_for_test size 2
dd if=/dev/zero of=./cephdf_for_test bs=4k count=1