From 7294e8c4df6df9d0898f82bb6e0839ed98149310 Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 27 May 2014 11:04:43 +0100 Subject: [PATCH] test/qa: update for MDSMonitor changes Accomodate changes: * data and metadata pools no longer exist by default * filesystem-using tests must use `fs new` to create the filesystem first. Signed-off-by: John Spray --- qa/workunits/cephtool/test.sh | 106 +++++++++++++++---------- src/test/ceph-disk.sh | 7 +- src/test/cli/osdmaptool/clobber.t | 8 +- src/test/cli/osdmaptool/create-print.t | 10 +-- src/test/cli/osdmaptool/create-racks.t | 10 +-- src/test/cli/osdmaptool/pool.t | 10 +-- src/test/cli/osdmaptool/test-map-pgs.t | 10 +-- src/test/mon/misc.sh | 12 +-- src/test/omap_bench.h | 2 +- src/test/osd/TestOSDMap.cc | 14 +++- src/test/osd/TestRados.cc | 2 +- src/test/pybind/test_ceph_argparse.py | 13 +++ src/test/pybind/test_rados.py | 2 - src/test/test_backfill.sh | 6 +- src/test/test_lost.sh | 14 ++-- src/test/test_missing_unfound.sh | 6 +- src/test/test_rw.sh | 12 +-- src/test/test_split.sh | 8 +- src/test/test_unfound.sh | 6 +- 19 files changed, 150 insertions(+), 108 deletions(-) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 3c1387af2b3a9..39f8b7f242912 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -117,11 +117,13 @@ function test_mon_injectargs_SI() function test_tiering() { # tiering + ceph osd pool create slow 2 + ceph osd pool create slow2 2 ceph osd pool create cache 2 ceph osd pool create cache2 2 - ceph osd tier add data cache - ceph osd tier add data cache2 - expect_false ceph osd tier add metadata cache + ceph osd tier add slow cache + ceph osd tier add slow cache2 + expect_false ceph osd tier add slow2 cache # test some state transitions ceph osd tier cache-mode cache writeback ceph osd tier cache-mode cache forward @@ -158,19 +160,19 @@ function test_tiering() done expect_false ceph osd pool set cache pg_num 4 ceph osd tier cache-mode cache none - ceph osd tier set-overlay data cache - expect_false ceph osd tier set-overlay data cache2 - expect_false ceph osd tier remove data cache - ceph osd tier remove-overlay data - ceph osd tier set-overlay data cache2 - ceph osd tier remove-overlay data - ceph osd tier remove data cache - ceph osd tier add metadata cache - expect_false ceph osd tier set-overlay data cache - ceph osd tier set-overlay metadata cache - ceph osd tier remove-overlay metadata - ceph osd tier remove metadata cache - ceph osd tier remove data cache2 + ceph osd tier set-overlay slow cache + expect_false ceph osd tier set-overlay slow cache2 + expect_false ceph osd tier remove slow cache + ceph osd tier remove-overlay slow + ceph osd tier set-overlay slow cache2 + ceph osd tier remove-overlay slow + ceph osd tier remove slow cache + ceph osd tier add slow2 cache + expect_false ceph osd tier set-overlay slow cache + ceph osd tier set-overlay slow2 cache + ceph osd tier remove-overlay slow2 + ceph osd tier remove slow2 cache + ceph osd tier remove slow cache2 # make sure a non-empty pool fails rados -p cache2 put /etc/passwd /etc/passwd @@ -178,18 +180,18 @@ function test_tiering() echo waiting for pg stats to flush sleep 2 done - expect_false ceph osd tier add data cache2 - ceph osd tier add data cache2 --force-nonempty - ceph osd tier remove data cache2 + expect_false ceph osd tier add slow cache2 + ceph osd tier add slow cache2 --force-nonempty + ceph osd tier remove slow cache2 ceph osd pool delete cache cache --yes-i-really-really-mean-it ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it # convenient add-cache command ceph osd pool create cache3 2 - ceph osd tier add-cache data cache3 1024000 + ceph osd tier add-cache slow cache3 1024000 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4' - ceph osd tier remove data cache3 + ceph osd tier remove slow cache3 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it # protection against pool removal when used as tiers @@ -291,6 +293,10 @@ function test_mon_misc() function test_mon_mds() { + ceph osd pool create fs_data 10 + ceph osd pool create fs_metadata 10 + ceph fs new default fs_metadata fs_data + ceph mds cluster_down ceph mds cluster_up @@ -308,14 +314,16 @@ function test_mon_mds() ceph mds setmap -i $mdsmapfile $epoch rm $mdsmapfile - ceph mds newfs 0 1 --yes-i-really-mean-it ceph osd pool create data2 10 - poolnum=$(ceph osd dump | grep 'pool.*data2' | awk '{print $2;}') - ceph mds add_data_pool $poolnum - ceph mds add_data_pool rbd - ceph mds remove_data_pool $poolnum - ceph mds remove_data_pool rbd + ceph osd pool create data3 10 + data2_pool=$(ceph osd dump | grep 'pool.*data2' | awk '{print $2;}') + data3_pool=$(ceph osd dump | grep 'pool.*data3' | awk '{print $2;}') + ceph mds add_data_pool $data2_pool + ceph mds add_data_pool $data3_pool + ceph mds remove_data_pool $data2_pool + ceph mds remove_data_pool $data3_pool ceph osd pool delete data2 data2 --yes-i-really-really-mean-it + ceph osd pool delete data3 data3 --yes-i-really-really-mean-it ceph mds set_max_mds 4 ceph mds set_max_mds 3 ceph mds set max_mds 4 @@ -364,6 +372,10 @@ function test_mon_mds() # ceph mds rmfailed # ceph mds set_state # ceph mds stop + + ceph fs rm default --yes-i-really-mean-it + ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it + ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it } function test_mon_mon() @@ -490,8 +502,10 @@ function test_mon_osd() ceph osd rm $id ceph osd ls + ceph osd pool create data 10 ceph osd lspools | grep data ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting' + ceph osd pool delete data data --yes-i-really-really-mean-it ceph osd pause ceph osd dump | grep 'flags pauserd,pausewr' @@ -507,9 +521,11 @@ function test_mon_osd_pool() # # osd pool # + ceph osd pool create data 10 ceph osd pool mksnap data datasnap rados -p data lssnap | grep datasnap ceph osd pool rmsnap data datasnap + ceph osd pool delete data data --yes-i-really-really-mean-it ceph osd pool create data2 10 ceph osd pool rename data2 data3 @@ -599,16 +615,18 @@ function test_mon_pg() function test_mon_osd_pool_set() { + TEST_POOL_GETSET=pool_getset + ceph osd pool create $TEST_POOL_GETSET 10 for s in pg_num pgp_num size min_size crash_replay_interval crush_ruleset; do - ceph osd pool get data $s + ceph osd pool get $TEST_POOL_GETSET $s done - old_size=$(ceph osd pool get data size | sed -e 's/size: //') + old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //') (( new_size = old_size + 1 )) - ceph osd pool set data size $new_size - ceph osd pool get data size | grep "size: $new_size" - ceph osd pool set data size $old_size + ceph osd pool set $TEST_POOL_GETSET size $new_size + ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size" + ceph osd pool set $TEST_POOL_GETSET size $old_size ceph osd pool create pool_erasure 12 12 erasure set +e @@ -617,17 +635,19 @@ function test_mon_osd_pool_set() set -e auid=5555 - ceph osd pool set data auid $auid - ceph osd pool get data auid | grep $auid - ceph --format=xml osd pool get data auid | grep $auid - ceph osd pool set data auid 0 - - ceph osd pool set data hashpspool true - ceph osd pool set data hashpspool false - ceph osd pool set data hashpspool 0 - ceph osd pool set data hashpspool 1 - expect_false ceph osd pool set data hashpspool asdf - expect_false ceph osd pool set data hashpspool 2 + ceph osd pool set $TEST_POOL_GETSET auid $auid + ceph osd pool get $TEST_POOL_GETSET auid | grep $auid + ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid + ceph osd pool set $TEST_POOL_GETSET auid 0 + + ceph osd pool set $TEST_POOL_GETSET hashpspool true + ceph osd pool set $TEST_POOL_GETSET hashpspool false + ceph osd pool set $TEST_POOL_GETSET hashpspool 0 + ceph osd pool set $TEST_POOL_GETSET hashpspool 1 + expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool asdf + expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 2 + + ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it ceph osd pool set rbd hit_set_type explicit_hash ceph osd pool get rbd hit_set_type | grep "hit_set_type: explicit_hash" diff --git a/src/test/ceph-disk.sh b/src/test/ceph-disk.sh index 91e99ec4c9c38..cadf43cf92117 100755 --- a/src/test/ceph-disk.sh +++ b/src/test/ceph-disk.sh @@ -21,6 +21,7 @@ export PATH=:$PATH # make sure program from sources are prefered DIR=test-ceph-disk MON_ID=a MONA=127.0.0.1:7451 +TEST_POOL=rbd FSID=$(uuidgen) export CEPH_CONF=/dev/null export CEPH_ARGS="--fsid $FSID" @@ -192,13 +193,13 @@ function test_activate_dir() { activate \ --mark-init=none \ $osd_data || return 1 - $timeout $TIMEOUT ./ceph osd pool set data size 1 || return 1 + $timeout $TIMEOUT ./ceph osd pool set $TEST_POOL size 1 || return 1 local id=$($cat $osd_data/whoami) local weight=1 ./ceph osd crush add osd.$id $weight root=default host=localhost || return 1 echo FOO > $DIR/BAR - $timeout $TIMEOUT ./rados --pool data put BAR $DIR/BAR || return 1 - $timeout $TIMEOUT ./rados --pool data get BAR $DIR/BAR.copy || return 1 + $timeout $TIMEOUT ./rados --pool $TEST_POOL put BAR $DIR/BAR || return 1 + $timeout $TIMEOUT ./rados --pool $TEST_POOL get BAR $DIR/BAR.copy || return 1 $diff $DIR/BAR $DIR/BAR.copy || return 1 } diff --git a/src/test/cli/osdmaptool/clobber.t b/src/test/cli/osdmaptool/clobber.t index 5f37f0348efdd..820a47b573c59 100644 --- a/src/test/cli/osdmaptool/clobber.t +++ b/src/test/cli/osdmaptool/clobber.t @@ -20,9 +20,7 @@ modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re) flags - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 - pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 - pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 max_osd 3 @@ -43,9 +41,7 @@ modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re) flags - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 - pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool stripe_width 0 - pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool stripe_width 0 max_osd 1 diff --git a/src/test/cli/osdmaptool/create-print.t b/src/test/cli/osdmaptool/create-print.t index 9ebd27470b8a0..6389d800d235e 100644 --- a/src/test/cli/osdmaptool/create-print.t +++ b/src/test/cli/osdmaptool/create-print.t @@ -75,9 +75,7 @@ modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re) flags - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 - pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 - pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 max_osd 3 @@ -86,19 +84,19 @@ osdmaptool: writing epoch 1 to myosdmap $ osdmaptool --print myosdmap | grep 'pool 0' osdmaptool: osdmap file 'myosdmap' - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 66 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 66 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 $ osdmaptool --clobber --createsimple 3 --osd_pool_default_crush_rule 55 myosdmap 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//' osdmaptool: osdmap file 'myosdmap' default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 0 $ osdmaptool --print myosdmap | grep 'pool 0' osdmaptool: osdmap file 'myosdmap' - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 $ osdmaptool --clobber --createsimple 3 --osd_pool_default_crush_replicated_ruleset 66 --osd_pool_default_crush_rule 55 myosdmap 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//' osdmaptool: osdmap file 'myosdmap' default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 66 $ osdmaptool --print myosdmap | grep 'pool 0' osdmaptool: osdmap file 'myosdmap' - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0 $ rm -f myosdmap diff --git a/src/test/cli/osdmaptool/create-racks.t b/src/test/cli/osdmaptool/create-racks.t index 33fa9eefa992c..38a20a560281b 100644 --- a/src/test/cli/osdmaptool/create-racks.t +++ b/src/test/cli/osdmaptool/create-racks.t @@ -788,9 +788,7 @@ modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re) flags - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 - pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0 - pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0 max_osd 239 @@ -800,19 +798,19 @@ osdmaptool: writing epoch 1 to om $ osdmaptool --print om | grep 'pool 0' osdmaptool: osdmap file 'om' - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0 $ osdmaptool --clobber --create-from-conf --osd_pool_default_crush_rule 55 om -c $TESTDIR/ceph.conf.withracks 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//' osdmaptool: osdmap file 'om' default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 0 $ osdmaptool --print om | grep 'pool 0' osdmaptool: osdmap file 'om' - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0 $ osdmaptool --clobber --create-from-conf --osd_pool_default_crush_replicated_ruleset 66 --osd_pool_default_crush_rule 55 om -c $TESTDIR/ceph.conf.withracks 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//' osdmaptool: osdmap file 'om' default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 66 $ osdmaptool --print om | grep 'pool 0' osdmaptool: osdmap file 'om' - pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0 + pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0 $ rm -f om diff --git a/src/test/cli/osdmaptool/pool.t b/src/test/cli/osdmaptool/pool.t index 0adb24088cefc..7a74c51dcf3e5 100644 --- a/src/test/cli/osdmaptool/pool.t +++ b/src/test/cli/osdmaptool/pool.t @@ -18,9 +18,9 @@ There is no pool 123 [1] - $ osdmaptool myosdmap --test-map-object foo --pool 2 + $ osdmaptool myosdmap --test-map-object foo --pool 0 osdmaptool: osdmap file 'myosdmap' - object 'foo' \-\> 2\..* (re) + object 'foo' \-\> 0\..* (re) $ osdmaptool myosdmap --test-map-object foo osdmaptool: osdmap file 'myosdmap' @@ -43,12 +43,10 @@ There is no pool 123 [1] - $ osdmaptool myosdmap --mark-up-in --test-map-pgs --pool 2 | grep pool + $ osdmaptool myosdmap --mark-up-in --test-map-pgs --pool 0 | grep pool osdmaptool: osdmap file 'myosdmap' - pool 2 pg_num .* (re) + pool 0 pg_num .* (re) $ osdmaptool myosdmap --mark-up-in --test-map-pgs | grep pool osdmaptool: osdmap file 'myosdmap' pool 0 pg_num .* (re) - pool 1 pg_num .* (re) - pool 2 pg_num .* (re) diff --git a/src/test/cli/osdmaptool/test-map-pgs.t b/src/test/cli/osdmaptool/test-map-pgs.t index 3496a86dff47c..13b8c5c090053 100644 --- a/src/test/cli/osdmaptool/test-map-pgs.t +++ b/src/test/cli/osdmaptool/test-map-pgs.t @@ -1,5 +1,5 @@ $ NUM_OSDS=500 - $ POOL_COUNT=3 # data + metadata + rbd + $ POOL_COUNT=1 # data + metadata + rbd $ SIZE=3 $ PG_BITS=4 # @@ -21,11 +21,9 @@ $ PG_NUM=$(($NUM_OSDS << $PG_BITS)) $ grep "pg_num $PG_NUM" "$OUT" || cat $OUT pool 0 pg_num 8000 - pool 1 pg_num 8000 - pool 2 pg_num 8000 $ TOTAL=$((POOL_COUNT * $PG_NUM)) $ grep -P "size $SIZE\t$TOTAL" $OUT || cat $OUT - size 3\t24000 (esc) + size 3\t8000 (esc) $ STATS_CRUSH=$(grep '^ avg ' "$OUT") # # --test-map-pgs --test-random is expected to change nothing regarding the totals @@ -35,11 +33,9 @@ $ PG_NUM=$(($NUM_OSDS << $PG_BITS)) $ grep "pg_num $PG_NUM" "$OUT" || cat $OUT pool 0 pg_num 8000 - pool 1 pg_num 8000 - pool 2 pg_num 8000 $ TOTAL=$((POOL_COUNT * $PG_NUM)) $ grep -P "size $SIZE\t$TOTAL" $OUT || cat $OUT - size 3\t24000 (esc) + size 3\t8000 (esc) $ STATS_RANDOM=$(grep '^ avg ' "$OUT") # it is almost impossible to get the same stats with random and crush # if they are, it most probably means something went wrong somewhere diff --git a/src/test/mon/misc.sh b/src/test/mon/misc.sh index f481a7202d179..1c8caa0a7a41f 100755 --- a/src/test/mon/misc.sh +++ b/src/test/mon/misc.sh @@ -35,19 +35,21 @@ function run() { teardown $dir || return 1 } +TEST_POOL=rbd + function TEST_osd_pool_get_set() { local dir=$1 ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1 - ./ceph osd pool set data hashpspool 0 || return 1 + ./ceph osd pool set $TEST_POOL hashpspool 0 || return 1 ! ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1 - ./ceph osd pool set data hashpspool 1 || return 1 + ./ceph osd pool set $TEST_POOL hashpspool 1 || return 1 ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1 - ./ceph osd pool set data hashpspool false || return 1 + ./ceph osd pool set $TEST_POOL hashpspool false || return 1 ! ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1 - ./ceph osd pool set data hashpspool false || return 1 + ./ceph osd pool set $TEST_POOL hashpspool false || return 1 # check that setting false twice does not toggle to true (bug) ! ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1 - ./ceph osd pool set data hashpspool true || return 1 + ./ceph osd pool set $TEST_POOL hashpspool true || return 1 ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1 } diff --git a/src/test/omap_bench.h b/src/test/omap_bench.h index 12b34f902ae9d..d764a5d9ba35a 100644 --- a/src/test/omap_bench.h +++ b/src/test/omap_bench.h @@ -115,7 +115,7 @@ public: data_lock("OmapBench::data_lock"), busythreads_count(0), comp(NULL), safe(aio_is_safe), - pool_name("data"), + pool_name("rbd"), rados_id("admin"), prefix(rados_id+".obj."), threads(3), objects(100), entries_per_omap(10), key_size(10), diff --git a/src/test/osd/TestOSDMap.cc b/src/test/osd/TestOSDMap.cc index 0ff12c80be721..274e17c019328 100644 --- a/src/test/osd/TestOSDMap.cc +++ b/src/test/osd/TestOSDMap.cc @@ -50,13 +50,23 @@ public: } osdmap.apply_incremental(pending_inc); - // kludge to get an erasure coding rule and pool + // Create an EC ruleset and a pool using it int r = osdmap.crush->add_simple_ruleset("erasure", "default", "osd", "indep", pg_pool_t::TYPE_ERASURE, &cerr); - pg_pool_t *p = (pg_pool_t *)osdmap.get_pg_pool(2); + + OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1); + new_pool_inc.new_pool_max = osdmap.get_pool_max(); + new_pool_inc.fsid = osdmap.get_fsid(); + pg_pool_t empty; + uint64_t pool_id = ++new_pool_inc.new_pool_max; + pg_pool_t *p = new_pool_inc.get_new_pool(pool_id, &empty); + p->size = 3; + p->set_pg_num(64); + p->set_pgp_num(64); p->type = pg_pool_t::TYPE_ERASURE; p->crush_ruleset = r; + osdmap.apply_incremental(new_pool_inc); } unsigned int get_num_osds() { return num_osds; } diff --git a/src/test/osd/TestRados.cc b/src/test/osd/TestRados.cc index b61ceb3ddfdbd..01a465f9639c2 100644 --- a/src/test/osd/TestRados.cc +++ b/src/test/osd/TestRados.cc @@ -261,7 +261,7 @@ int main(int argc, char **argv) }; map op_weights; - string pool_name = "data"; + string pool_name = "rbd"; bool ec_pool = false; bool no_omap = false; diff --git a/src/test/pybind/test_ceph_argparse.py b/src/test/pybind/test_ceph_argparse.py index ddf040972c037..a16f72e434609 100755 --- a/src/test/pybind/test_ceph_argparse.py +++ b/src/test/pybind/test_ceph_argparse.py @@ -474,6 +474,19 @@ class TestMDS(TestArgparse): '--yes-i-really-mean-it'])) +class TestFS(TestArgparse): + def test_fs_new(self): + self.assert_valid_command(['fs', 'new', 'default', 'metadata', 'data']) + + def test_fs_rm(self): + self.assert_valid_command(['fs', 'rm', 'default']) + self.assert_valid_command(['fs', 'rm', 'default', '--yes-i-really-mean-it']) + assert_equal({}, validate_command(sigdict, ['fs', 'rm', 'default', '--yes-i-really-mean-it', 'toomany'])) + + def test_fs_ls(self): + self.assert_valid_command(['fs', 'ls']) + assert_equal({}, validate_command(sigdict, ['fs', 'ls', 'toomany'])) + class TestMon(TestArgparse): def test_dump(self): diff --git a/src/test/pybind/test_rados.py b/src/test/pybind/test_rados.py index 7efed7dfc967c..87a3ccab8e89a 100644 --- a/src/test/pybind/test_rados.py +++ b/src/test/pybind/test_rados.py @@ -63,8 +63,6 @@ class TestRados(object): def list_non_default_pools(self): pools = self.rados.list_pools() - pools.remove('data') - pools.remove('metadata') pools.remove('rbd') return set(pools) diff --git a/src/test/test_backfill.sh b/src/test/test_backfill.sh index 9b187b7839be4..3565724f4738a 100755 --- a/src/test/test_backfill.sh +++ b/src/test/test_backfill.sh @@ -1,9 +1,11 @@ #!/bin/sh +TEST_POOL=rbd + ./stop.sh CEPH_NUM_OSD=3 ./vstart.sh -d -n -x -o 'osd min pg log entries = 5' -./rados -p data bench 15 write -b 4096 +./rados -p $TEST_POOL bench 15 write -b 4096 ./ceph osd out 0 ./init-ceph stop osd.0 ./ceph osd down 0 -./rados -p data bench 600 write -b 4096 +./rados -p $TEST_POOL bench 600 write -b 4096 diff --git a/src/test/test_lost.sh b/src/test/test_lost.sh index 2cdd991dc17d6..eec7d9ac69c1f 100755 --- a/src/test/test_lost.sh +++ b/src/test/test_lost.sh @@ -7,6 +7,8 @@ # Includes source "`dirname $0`/test_common.sh" +TEST_POOL=rbd + # Functions setup() { export CEPH_NUM_OSD=$1 @@ -21,13 +23,13 @@ setup() { recovery1_impl() { # Write lots and lots of objects - write_objects 1 1 200 4000 data + write_objects 1 1 200 4000 $TEST_POOL # Take down osd1 stop_osd 1 # Continue writing a lot of objects - write_objects 2 2 200 4000 data + write_objects 2 2 200 4000 $TEST_POOL # Bring up osd1 restart_osd 1 @@ -66,13 +68,13 @@ lost1_impl() { try_to_fetch_unfound=$1 # Write lots and lots of objects - write_objects 1 1 20 8000 data + write_objects 1 1 20 8000 $TEST_POOL # Take down osd1 stop_osd 1 # Continue writing a lot of objects - write_objects 2 2 20 8000 data + write_objects 2 2 20 8000 $TEST_POOL # Bring up osd1 restart_osd 1 @@ -94,7 +96,7 @@ lost1_impl() { # verify we get woken to an error when it's declared lost. echo "trying to get one of the unfound objects" ( - ./rados -c ./ceph.conf -p data get obj02 $TEMPDIR/obj02 &&\ + ./rados -c ./ceph.conf -p $TEST_POOL get obj02 $TEMPDIR/obj02 &&\ die "expected radostool error" ) & fi @@ -108,7 +110,7 @@ lost1_impl() { # Reading from a lost object gives back an error code. # TODO: check error code - ./rados -c ./ceph.conf -p data get obj01 $TEMPDIR/obj01 &&\ + ./rados -c ./ceph.conf -p $TEST_POOL get obj01 $TEMPDIR/obj01 &&\ die "expected radostool error" if [ "$try_to_fetch_unfound" -eq 1 ]; then diff --git a/src/test/test_missing_unfound.sh b/src/test/test_missing_unfound.sh index 89949e98485a6..d5db1e42d847b 100755 --- a/src/test/test_missing_unfound.sh +++ b/src/test/test_missing_unfound.sh @@ -2,7 +2,9 @@ CEPH_NUM_OSD=3 ./vstart.sh -d -n -x -o 'osd recovery max active = 1' -./ceph -c ./ceph.conf osd pool set data size 3 +TEST_POOL=rbd + +./ceph -c ./ceph.conf osd pool set $TEST_POOL size 3 sleep 20 @@ -11,7 +13,7 @@ sleep 20 for f in `seq 1 100` do - ./rados -c ./ceph.conf -p data put test_$f /etc/passwd + ./rados -c ./ceph.conf -p $TEST_POOL put test_$f /etc/passwd done # zap some objects on both replicas diff --git a/src/test/test_rw.sh b/src/test/test_rw.sh index 0cfcec85557da..37e8ebd99283b 100755 --- a/src/test/test_rw.sh +++ b/src/test/test_rw.sh @@ -7,9 +7,11 @@ # Includes source "`dirname $0`/test_common.sh" +TEST_POOL=rbd + # Functions my_write_objects() { - write_objects $1 $2 10 1000000 data + write_objects $1 $2 10 1000000 $TEST_POOL } setup() { @@ -22,16 +24,16 @@ setup() { } read_write_1_impl() { - write_objects 1 2 100 8192 data + write_objects 1 2 100 8192 $TEST_POOL read_objects 2 100 8192 - write_objects 3 3 10 81920 data + write_objects 3 3 10 81920 $TEST_POOL read_objects 3 10 81920 - write_objects 4 4 100 4 data + write_objects 4 4 100 4 $TEST_POOL read_objects 4 100 4 - write_objects 1 2 100 8192 data + write_objects 1 2 100 8192 $TEST_POOL read_objects 2 100 8192 # success diff --git a/src/test/test_split.sh b/src/test/test_split.sh index 25223ec6bd89d..4fdedd5cf68fc 100755 --- a/src/test/test_split.sh +++ b/src/test/test_split.sh @@ -7,9 +7,11 @@ # Includes source "`dirname $0`/test_common.sh" +TEST_POOL=rbd + # Constants my_write_objects() { - write_objects $1 $2 10 1000000 data + write_objects $1 $2 10 1000000 $TEST_POOL } setup() { @@ -22,7 +24,7 @@ setup() { } get_pgp_num() { - ./ceph -c ./ceph.conf osd pool get data pgp_num > $TEMPDIR/pgp_num + ./ceph -c ./ceph.conf osd pool get $TEST_POOL pgp_num > $TEMPDIR/pgp_num [ $? -eq 0 ] || die "failed to get pgp_num" PGP_NUM=`grep PGP_NUM $TEMPDIR/pgp_num | sed 's/.*PGP_NUM:\([ 0123456789]*\).*$/\1/'` } @@ -37,7 +39,7 @@ split1_impl() { # Double the number of PGs PGP_NUM=$((PGP_NUM*2)) echo "doubling PGP_NUM to $PGP_NUM..." - ./ceph -c ./ceph.conf osd pool set data pgp_num $PGP_NUM + ./ceph -c ./ceph.conf osd pool set $TEST_POOL pgp_num $PGP_NUM sleep 30 diff --git a/src/test/test_unfound.sh b/src/test/test_unfound.sh index 4f0c9c5a96803..35e8483fea073 100755 --- a/src/test/test_unfound.sh +++ b/src/test/test_unfound.sh @@ -7,9 +7,11 @@ # Includes source "`dirname $0`/test_common.sh" +TEST_POOL=rbd + # Functions my_write_objects() { - write_objects $1 $2 10 1000000 data + write_objects $1 $2 10 1000000 $TEST_POOL } setup() { @@ -49,7 +51,7 @@ osd_resurrection_1_impl() { echo "Got unfound objects." ( - ./rados -c ./ceph.conf -p data get obj01 $TEMPDIR/obj01 || die "radostool failed" + ./rados -c ./ceph.conf -p $TEST_POOL get obj01 $TEMPDIR/obj01 || die "radostool failed" ) & sleep 5 [ -e $TEMPDIR/obj01 ] && die "unexpected error: fetched unfound object?" -- 2.39.5