From fa90be842e67891542f7b4224435125d075b1ef9 Mon Sep 17 00:00:00 2001 From: Jason Dillaman Date: Mon, 26 Jun 2017 21:15:57 -0400 Subject: [PATCH] test: enable pool applications for new pools Signed-off-by: Jason Dillaman --- .../all/export-after-evict.yaml | 1 + .../singleton-nomsgr/all/full-tiering.yaml | 1 + .../all/multi-backfill-reject.yaml | 1 + .../all/pg-removal-interruption.yaml | 1 + .../thrash_cache_writeback_proxy_none.yaml | 1 + .../thrash/d-require-luminous/at-end.yaml | 1 + .../thrash/workloads/cache-agent-big.yaml | 1 + .../thrash/workloads/cache-agent-small.yaml | 1 + .../workloads/cache-pool-snaps-readproxy.yaml | 1 + .../thrash/workloads/cache-pool-snaps.yaml | 1 + .../rados/thrash/workloads/cache-snaps.yaml | 1 + qa/suites/rados/thrash/workloads/cache.yaml | 1 + qa/suites/rbd/cli/pool/ec-data-pool.yaml | 1 + .../rbd/cli/pool/replicated-data-pool.yaml | 1 + .../rbd/librbd/pool/replicated-data-pool.yaml | 1 + qa/suites/rbd/qemu/pool/ec-cache-pool.yaml | 1 + qa/suites/rbd/qemu/pool/ec-data-pool.yaml | 1 + .../rbd/qemu/pool/replicated-data-pool.yaml | 1 + .../smoke/basic/tasks/rados_cache_snaps.yaml | 1 + .../0-create-base-tier/create-ec-pool.yaml | 1 + .../create-replicated-pool.yaml | 1 + qa/workunits/ceph-helpers-root.sh | 1 + qa/workunits/ceph-helpers.sh | 1 + qa/workunits/cephtool/test.sh | 22 +++++++++++++++++++ qa/workunits/mon/rbd_snaps_ops.sh | 2 ++ qa/workunits/rados/test_alloc_hint.sh | 2 ++ qa/workunits/rados/test_cache_pool.sh | 2 ++ qa/workunits/rados/test_pool_quota.sh | 2 ++ qa/workunits/rbd/cli_generic.sh | 1 + qa/workunits/rbd/krbd_data_pool.sh | 4 ++++ qa/workunits/rbd/permissions.sh | 2 ++ qa/workunits/rbd/verify_pool.sh | 1 + 32 files changed, 61 insertions(+) diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml index 1b777ab0f00..e7fafe9ac48 100644 --- a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -18,6 +18,7 @@ tasks: - exec: client.0: - ceph osd pool create base-pool 4 + - ceph osd pool application enable base-pool rados - ceph osd pool create cache-pool 4 - ceph osd tier add base-pool cache-pool - ceph osd tier cache-mode cache-pool writeback diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml index 5eb42f4dd63..b811199d1ea 100644 --- a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -20,6 +20,7 @@ tasks: client.0: - ceph osd pool create ec-ca 1 1 - ceph osd pool create ec 1 1 erasure default + - ceph osd pool application enable ec rados - ceph osd tier add ec ec-ca - ceph osd tier cache-mode ec-ca readproxy - ceph osd tier set-overlay ec ec-ca diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml index cadf3044a1d..3a9dbde3164 100644 --- a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -26,6 +26,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create foo 64 + - sudo ceph osd pool application enable foo rados - rados -p foo bench 60 write -b 1024 --no-cleanup - sudo ceph osd pool set foo size 3 - sudo ceph osd out 0 1 diff --git a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml index 856b08dd437..563e0b0e010 100644 --- a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml +++ b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml @@ -22,6 +22,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create foo 128 128 + - sudo ceph osd pool application enable foo rados - sleep 5 - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml index 02fee3e88ea..f6b7d2b535c 100644 --- a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml +++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml @@ -23,6 +23,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback diff --git a/qa/suites/rados/thrash/d-require-luminous/at-end.yaml b/qa/suites/rados/thrash/d-require-luminous/at-end.yaml index bb1d7073ccc..de21c635e7f 100644 --- a/qa/suites/rados/thrash/d-require-luminous/at-end.yaml +++ b/qa/suites/rados/thrash/d-require-luminous/at-end.yaml @@ -6,6 +6,7 @@ tasks: - exec: mon.a: - ceph osd require-osd-release luminous + - ceph osd pool application enable base rados || true # make sure osds have latest map - rados -p rbd bench 5 write -b 4096 - ceph.healthy: diff --git a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml index 492ab8d458d..bbfe7bf50d1 100644 --- a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml +++ b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml @@ -7,6 +7,7 @@ tasks: client.0: - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - sudo ceph osd pool create base 4 4 erasure teuthologyprofile + - sudo ceph osd pool application enable base rados - sudo ceph osd pool set base min_size 2 - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache diff --git a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml index efa84193719..10d47356a7e 100644 --- a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml +++ b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml @@ -7,6 +7,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml index 007775cee07..43497431b8d 100644 --- a/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml +++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml @@ -6,6 +6,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache readproxy diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml index a568a340265..dc3385cb9ab 100644 --- a/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml +++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml @@ -6,6 +6,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback diff --git a/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-snaps.yaml index f4e2ffe133c..486d6dbfafa 100644 --- a/qa/suites/rados/thrash/workloads/cache-snaps.yaml +++ b/qa/suites/rados/thrash/workloads/cache-snaps.yaml @@ -6,6 +6,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback diff --git a/qa/suites/rados/thrash/workloads/cache.yaml b/qa/suites/rados/thrash/workloads/cache.yaml index 4c5c1b6057d..d63018f0f74 100644 --- a/qa/suites/rados/thrash/workloads/cache.yaml +++ b/qa/suites/rados/thrash/workloads/cache.yaml @@ -6,6 +6,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback diff --git a/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/qa/suites/rbd/cli/pool/ec-data-pool.yaml index 32dd2ab9079..9e90369ef50 100644 --- a/qa/suites/rbd/cli/pool/ec-data-pool.yaml +++ b/qa/suites/rbd/cli/pool/ec-data-pool.yaml @@ -4,6 +4,7 @@ tasks: - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool overrides: thrashosds: diff --git a/qa/suites/rbd/cli/pool/replicated-data-pool.yaml b/qa/suites/rbd/cli/pool/replicated-data-pool.yaml index 91c8c01e201..c5647dba1c6 100644 --- a/qa/suites/rbd/cli/pool/replicated-data-pool.yaml +++ b/qa/suites/rbd/cli/pool/replicated-data-pool.yaml @@ -2,6 +2,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create datapool 4 + - rbd pool init datapool overrides: ceph: diff --git a/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml b/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml index 91c8c01e201..c5647dba1c6 100644 --- a/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml +++ b/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml @@ -2,6 +2,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create datapool 4 + - rbd pool init datapool overrides: ceph: diff --git a/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml b/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml index 09e8bc3f24b..cfa0fcd7025 100644 --- a/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml +++ b/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml @@ -17,3 +17,4 @@ tasks: - sudo ceph osd pool set cache hit_set_count 8 - sudo ceph osd pool set cache hit_set_period 60 - sudo ceph osd pool set cache target_max_objects 250 + - rbd pool init rbd diff --git a/qa/suites/rbd/qemu/pool/ec-data-pool.yaml b/qa/suites/rbd/qemu/pool/ec-data-pool.yaml index 75dfc6a4553..a63ab270340 100644 --- a/qa/suites/rbd/qemu/pool/ec-data-pool.yaml +++ b/qa/suites/rbd/qemu/pool/ec-data-pool.yaml @@ -4,6 +4,7 @@ tasks: - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool overrides: thrashosds: diff --git a/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml b/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml index 91c8c01e201..c5647dba1c6 100644 --- a/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml +++ b/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml @@ -2,6 +2,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create datapool 4 + - rbd pool init datapool overrides: ceph: diff --git a/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml b/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml index fa593f49685..e1512cf735e 100644 --- a/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml +++ b/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml @@ -11,6 +11,7 @@ tasks: - exec: client.0: - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml index f0043afbdf1..f0e22bf82ec 100644 --- a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml @@ -3,3 +3,4 @@ tasks: client.0: - ceph osd erasure-code-profile set t-profile crush-failure-domain=osd k=2 m=1 - ceph osd pool create base-pool 4 4 erasure t-profile + - ceph osd pool application enable base-pool rados diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml index 5a1358149f7..36dc06d91b8 100644 --- a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml @@ -2,3 +2,4 @@ tasks: - exec: client.0: - ceph osd pool create base-pool 4 + - ceph osd pool application enable base-pool rados diff --git a/qa/workunits/ceph-helpers-root.sh b/qa/workunits/ceph-helpers-root.sh index a622bd5ab96..f65f591f4c8 100755 --- a/qa/workunits/ceph-helpers-root.sh +++ b/qa/workunits/ceph-helpers-root.sh @@ -76,6 +76,7 @@ function pool_read_write() { ceph osd pool create $test_pool 4 || return 1 ceph osd pool set $test_pool size $size || return 1 ceph osd pool set $test_pool min_size $size || return 1 + ceph osd pool application enable $test_pool rados echo FOO > $dir/BAR timeout $timeout rados --pool $test_pool put BAR $dir/BAR || return 1 diff --git a/qa/workunits/ceph-helpers.sh b/qa/workunits/ceph-helpers.sh index 47174ef3ceb..265e8a94372 100755 --- a/qa/workunits/ceph-helpers.sh +++ b/qa/workunits/ceph-helpers.sh @@ -419,6 +419,7 @@ function run_mon() { ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1 ceph osd pool create rbd $PG_NUM || return 1 ceph osd set-backfillfull-ratio .99 + rbd pool init rbd fi } diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 4c8c9e1f319..5ac3911dc7e 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -264,6 +264,7 @@ function test_tiering_agent() local slow=slow_eviction local fast=fast_eviction ceph osd pool create $slow 1 1 + ceph osd pool application enable $slow rados ceph osd pool create $fast 1 1 ceph osd tier add $slow $fast ceph osd tier cache-mode $fast writeback @@ -306,7 +307,9 @@ function test_tiering_1() { # tiering ceph osd pool create slow 2 + ceph osd pool application enable slow rados ceph osd pool create slow2 2 + ceph osd pool application enable slow2 rados ceph osd pool create cache 2 ceph osd pool create cache2 2 ceph osd tier add slow cache @@ -392,6 +395,7 @@ function test_tiering_2() { # make sure we can't clobber snapshot state ceph osd pool create snap_base 2 + ceph osd pool application enable snap_base rados ceph osd pool create snap_cache 2 ceph osd pool mksnap snap_cache snapname expect_false ceph osd tier add snap_base snap_cache @@ -403,6 +407,7 @@ function test_tiering_3() { # make sure we can't create snapshot on tier ceph osd pool create basex 2 + ceph osd pool application enable basex rados ceph osd pool create cachex 2 ceph osd tier add basex cachex expect_false ceph osd pool mksnap cache snapname @@ -417,6 +422,7 @@ function test_tiering_4() ceph osd pool create eccache 2 2 erasure expect_false ceph osd set-require-min-compat-client bobtail ceph osd pool create repbase 2 + ceph osd pool application enable repbase rados expect_false ceph osd tier add repbase eccache ceph osd pool delete repbase repbase --yes-i-really-really-mean-it ceph osd pool delete eccache eccache --yes-i-really-really-mean-it @@ -426,6 +432,7 @@ function test_tiering_5() { # convenient add-cache command ceph osd pool create slow 2 + ceph osd pool application enable slow rados ceph osd pool create cache3 2 ceph osd tier add-cache slow cache3 1024000 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4' @@ -443,6 +450,7 @@ function test_tiering_6() { # check add-cache whether work ceph osd pool create datapool 2 + ceph osd pool application enable datapool rados ceph osd pool create cachepool 2 ceph osd tier add-cache datapool cachepool 1024000 ceph osd tier cache-mode cachepool writeback @@ -460,6 +468,7 @@ function test_tiering_7() { # protection against pool removal when used as tiers ceph osd pool create datapool 2 + ceph osd pool application enable datapool rados ceph osd pool create cachepool 2 ceph osd tier add-cache datapool cachepool 1024000 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true @@ -477,6 +486,7 @@ function test_tiering_8() ## check health check ceph osd set notieragent ceph osd pool create datapool 2 + ceph osd pool application enable datapool rados ceph osd pool create cache4 2 ceph osd tier add-cache datapool cache4 1024000 ceph osd tier cache-mode cache4 writeback @@ -503,7 +513,9 @@ function test_tiering_9() # results in a 'pool foo is now (or already was) not a tier of bar' # ceph osd pool create basepoolA 2 + ceph osd pool application enable basepoolA rados ceph osd pool create basepoolB 2 + ceph osd pool application enable basepoolB rados poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}') poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}') @@ -1581,6 +1593,7 @@ function test_mon_osd() ceph osd ls ceph osd pool create data 10 + ceph osd pool application enable data rados ceph osd lspools | grep data ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting' ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting' @@ -1640,6 +1653,7 @@ function test_mon_osd_pool() # osd pool # ceph osd pool create data 10 + ceph osd pool application enable data rados ceph osd pool mksnap data datasnap rados -p data lssnap | grep datasnap ceph osd pool rmsnap data datasnap @@ -1647,6 +1661,7 @@ function test_mon_osd_pool() ceph osd pool delete data data --yes-i-really-really-mean-it ceph osd pool create data2 10 + ceph osd pool application enable data2 rados ceph osd pool rename data2 data3 ceph osd lspools | grep data3 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it @@ -1655,10 +1670,12 @@ function test_mon_osd_pool() ceph osd pool create replicated 12 12 replicated ceph osd pool create replicated 12 12 # default is replicated ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num + ceph osd pool application enable replicated rados # should fail because the type is not the same expect_false ceph osd pool create replicated 12 12 erasure ceph osd lspools | grep replicated ceph osd pool create ec_test 1 1 erasure + ceph osd pool application enable ec_test rados set +e ceph osd metadata | grep osd_objectstore_type | grep -qc bluestore if [ $? -eq 0 ]; then @@ -1681,6 +1698,7 @@ function test_mon_osd_pool_quota() # create tmp pool ceph osd pool create tmp-quota-pool 36 + ceph osd pool application enable tmp-quota-pool rados # # set erroneous quotas # @@ -1857,6 +1875,7 @@ function test_mon_osd_pool_set() { TEST_POOL_GETSET=pool_getset ceph osd pool create $TEST_POOL_GETSET 1 + ceph osd pool application enable $TEST_POOL_GETSET rados wait_for_clean ceph osd pool get $TEST_POOL_GETSET all @@ -1871,6 +1890,7 @@ function test_mon_osd_pool_set() ceph osd pool set $TEST_POOL_GETSET size $old_size ceph osd pool create pool_erasure 1 1 erasure + ceph osd pool application enable pool_erasure rados wait_for_clean set +e ceph osd pool set pool_erasure size 4444 2>$TMPFILE @@ -2058,6 +2078,7 @@ function test_mon_osd_tiered_pool_set() # this is not a tier pool ceph osd pool create fake-tier 2 + ceph osd pool application enable fake-tier rados wait_for_clean expect_false ceph osd pool set fake-tier hit_set_type explicit_hash @@ -2295,6 +2316,7 @@ function test_mon_cephdf_commands() # RAW USED The near raw used per pool in raw total ceph osd pool create cephdf_for_test 32 32 replicated + ceph osd pool application enable cephdf_for_test rados ceph osd pool set cephdf_for_test size 2 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1 diff --git a/qa/workunits/mon/rbd_snaps_ops.sh b/qa/workunits/mon/rbd_snaps_ops.sh index a11172d1014..3ff7e760337 100755 --- a/qa/workunits/mon/rbd_snaps_ops.sh +++ b/qa/workunits/mon/rbd_snaps_ops.sh @@ -23,11 +23,13 @@ expect 'ceph osd pool create test 256 256' 0 expect 'ceph osd pool mksnap test snapshot' 0 expect 'ceph osd pool rmsnap test snapshot' 0 +expect 'rbd --pool=test pool init' 0 expect 'rbd --pool=test --rbd_validate_pool=false create --size=102400 image' 0 expect 'rbd --pool=test snap create image@snapshot' 22 expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0 expect 'ceph osd pool create test 256 256' 0 +expect 'rbd --pool=test pool init' 0 expect 'rbd --pool=test create --size=102400 image' 0 expect 'rbd --pool=test snap create image@snapshot' 0 expect 'rbd --pool=test snap ls image' 0 diff --git a/qa/workunits/rados/test_alloc_hint.sh b/qa/workunits/rados/test_alloc_hint.sh index 85be5d75c1f..3e246949df4 100755 --- a/qa/workunits/rados/test_alloc_hint.sh +++ b/qa/workunits/rados/test_alloc_hint.sh @@ -109,6 +109,7 @@ setup_osd_data POOL="alloc_hint-rep" ceph osd pool create "${POOL}" "${NUM_PG}" ceph osd pool set "${POOL}" size "${NUM_OSDS}" +ceph osd pool application enable "${POOL}" rados OBJ="foo" setup_pgid "${POOL}" "${OBJ}" @@ -156,6 +157,7 @@ POOL="alloc_hint-ec" ceph osd erasure-code-profile set "${PROFILE}" k=2 m=1 crush-failure-domain=osd ceph osd erasure-code-profile get "${PROFILE}" # just so it's logged ceph osd pool create "${POOL}" "${NUM_PG}" "${NUM_PGP}" erasure "${PROFILE}" +ceph osd pool application enable "${POOL}" rados OBJ="baz" setup_pgid "${POOL}" "${OBJ}" diff --git a/qa/workunits/rados/test_cache_pool.sh b/qa/workunits/rados/test_cache_pool.sh index 308cb3c03a1..5975893600a 100755 --- a/qa/workunits/rados/test_cache_pool.sh +++ b/qa/workunits/rados/test_cache_pool.sh @@ -10,6 +10,7 @@ expect_false() # create pools, set up tier relationship ceph osd pool create base_pool 2 +ceph osd pool application enable base_pool rados ceph osd pool create partial_wrong 2 ceph osd pool create wrong_cache 2 ceph osd tier add base_pool partial_wrong @@ -89,6 +90,7 @@ ceph osd pool delete partial_wrong partial_wrong --yes-i-really-really-mean-it ## set of base, cache ceph osd pool create base 8 +ceph osd pool application enable base rados ceph osd pool create cache 8 ceph osd tier add base cache diff --git a/qa/workunits/rados/test_pool_quota.sh b/qa/workunits/rados/test_pool_quota.sh index 71a9e52fe0c..0eacefc64b1 100755 --- a/qa/workunits/rados/test_pool_quota.sh +++ b/qa/workunits/rados/test_pool_quota.sh @@ -5,6 +5,7 @@ p=`uuidgen` # objects ceph osd pool create $p 12 ceph osd pool set-quota $p max_objects 10 +ceph osd pool application enable $p rados for f in `seq 1 10` ; do rados -p $p put obj$f /etc/passwd @@ -41,6 +42,7 @@ rados -p $p put three /etc/passwd pp=`uuidgen` ceph osd pool create $pp 12 +ceph osd pool application enable $pp rados # set objects quota ceph osd pool set-quota $pp max_objects 10 diff --git a/qa/workunits/rbd/cli_generic.sh b/qa/workunits/rbd/cli_generic.sh index 24f2439a590..1c839d23428 100755 --- a/qa/workunits/rbd/cli_generic.sh +++ b/qa/workunits/rbd/cli_generic.sh @@ -290,6 +290,7 @@ test_pool_image_args() { ceph osd pool delete test test --yes-i-really-really-mean-it || true ceph osd pool create test 100 + rbd pool init test truncate -s 1 /tmp/empty /tmp/empty@snap rbd ls | wc -l | grep 0 diff --git a/qa/workunits/rbd/krbd_data_pool.sh b/qa/workunits/rbd/krbd_data_pool.sh index 9c37ff2f81c..7d728821339 100755 --- a/qa/workunits/rbd/krbd_data_pool.sh +++ b/qa/workunits/rbd/krbd_data_pool.sh @@ -99,11 +99,15 @@ function get_num_clones() { } ceph osd pool create repdata 24 24 +rbd pool init repdata ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 ceph osd pool create ecdata 24 24 erasure teuthologyprofile +rbd pool init ecdata ceph osd pool set ecdata allow_ec_overwrites true ceph osd pool create rbdnonzero 24 24 +rbd pool init rbdnonzero ceph osd pool create clonesonly 24 24 +rbd pool init clonesonly for pool in rbd rbdnonzero; do rbd create --size 200 --image-format 1 $pool/img0 diff --git a/qa/workunits/rbd/permissions.sh b/qa/workunits/rbd/permissions.sh index 643b9740e63..a435a67bb92 100755 --- a/qa/workunits/rbd/permissions.sh +++ b/qa/workunits/rbd/permissions.sh @@ -4,7 +4,9 @@ IMAGE_FEATURES="layering,exclusive-lock,object-map,fast-diff" create_pools() { ceph osd pool create images 100 + rbd pool init images ceph osd pool create volumes 100 + rbd pool init volumes } delete_pools() { diff --git a/qa/workunits/rbd/verify_pool.sh b/qa/workunits/rbd/verify_pool.sh index 9033343f46f..48d069160d3 100755 --- a/qa/workunits/rbd/verify_pool.sh +++ b/qa/workunits/rbd/verify_pool.sh @@ -11,6 +11,7 @@ set_up () { tear_down ceph osd pool create $POOL_NAME $PG_NUM ceph osd pool mksnap $POOL_NAME snap + rbd pool init images } trap tear_down EXIT HUP INT -- 2.39.5