function test_tiering()
{
# tiering
+ ceph osd pool create slow 2
+ ceph osd pool create slow2 2
ceph osd pool create cache 2
ceph osd pool create cache2 2
- ceph osd tier add data cache
- ceph osd tier add data cache2
- expect_false ceph osd tier add metadata cache
+ ceph osd tier add slow cache
+ ceph osd tier add slow cache2
+ expect_false ceph osd tier add slow2 cache
# test some state transitions
ceph osd tier cache-mode cache writeback
ceph osd tier cache-mode cache forward
done
expect_false ceph osd pool set cache pg_num 4
ceph osd tier cache-mode cache none
- ceph osd tier set-overlay data cache
- expect_false ceph osd tier set-overlay data cache2
- expect_false ceph osd tier remove data cache
- ceph osd tier remove-overlay data
- ceph osd tier set-overlay data cache2
- ceph osd tier remove-overlay data
- ceph osd tier remove data cache
- ceph osd tier add metadata cache
- expect_false ceph osd tier set-overlay data cache
- ceph osd tier set-overlay metadata cache
- ceph osd tier remove-overlay metadata
- ceph osd tier remove metadata cache
- ceph osd tier remove data cache2
+ ceph osd tier set-overlay slow cache
+ expect_false ceph osd tier set-overlay slow cache2
+ expect_false ceph osd tier remove slow cache
+ ceph osd tier remove-overlay slow
+ ceph osd tier set-overlay slow cache2
+ ceph osd tier remove-overlay slow
+ ceph osd tier remove slow cache
+ ceph osd tier add slow2 cache
+ expect_false ceph osd tier set-overlay slow cache
+ ceph osd tier set-overlay slow2 cache
+ ceph osd tier remove-overlay slow2
+ ceph osd tier remove slow2 cache
+ ceph osd tier remove slow cache2
# make sure a non-empty pool fails
rados -p cache2 put /etc/passwd /etc/passwd
echo waiting for pg stats to flush
sleep 2
done
- expect_false ceph osd tier add data cache2
- ceph osd tier add data cache2 --force-nonempty
- ceph osd tier remove data cache2
+ expect_false ceph osd tier add slow cache2
+ ceph osd tier add slow cache2 --force-nonempty
+ ceph osd tier remove slow cache2
ceph osd pool delete cache cache --yes-i-really-really-mean-it
ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
# convenient add-cache command
ceph osd pool create cache3 2
- ceph osd tier add-cache data cache3 1024000
+ ceph osd tier add-cache slow cache3 1024000
ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
- ceph osd tier remove data cache3
+ ceph osd tier remove slow cache3
ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
# protection against pool removal when used as tiers
function test_mon_mds()
{
+ ceph osd pool create fs_data 10
+ ceph osd pool create fs_metadata 10
+ ceph fs new default fs_metadata fs_data
+
ceph mds cluster_down
ceph mds cluster_up
ceph mds setmap -i $mdsmapfile $epoch
rm $mdsmapfile
- ceph mds newfs 0 1 --yes-i-really-mean-it
ceph osd pool create data2 10
- poolnum=$(ceph osd dump | grep 'pool.*data2' | awk '{print $2;}')
- ceph mds add_data_pool $poolnum
- ceph mds add_data_pool rbd
- ceph mds remove_data_pool $poolnum
- ceph mds remove_data_pool rbd
+ ceph osd pool create data3 10
+ data2_pool=$(ceph osd dump | grep 'pool.*data2' | awk '{print $2;}')
+ data3_pool=$(ceph osd dump | grep 'pool.*data3' | awk '{print $2;}')
+ ceph mds add_data_pool $data2_pool
+ ceph mds add_data_pool $data3_pool
+ ceph mds remove_data_pool $data2_pool
+ ceph mds remove_data_pool $data3_pool
ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
+ ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph mds set_max_mds 4
ceph mds set_max_mds 3
ceph mds set max_mds 4
# ceph mds rmfailed
# ceph mds set_state
# ceph mds stop
+
+ ceph fs rm default --yes-i-really-mean-it
+ ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
+ ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mon_mon()
ceph osd rm $id
ceph osd ls
+ ceph osd pool create data 10
ceph osd lspools | grep data
ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
+ ceph osd pool delete data data --yes-i-really-really-mean-it
ceph osd pause
ceph osd dump | grep 'flags pauserd,pausewr'
#
# osd pool
#
+ ceph osd pool create data 10
ceph osd pool mksnap data datasnap
rados -p data lssnap | grep datasnap
ceph osd pool rmsnap data datasnap
+ ceph osd pool delete data data --yes-i-really-really-mean-it
ceph osd pool create data2 10
ceph osd pool rename data2 data3
function test_mon_osd_pool_set()
{
+ TEST_POOL_GETSET=pool_getset
+ ceph osd pool create $TEST_POOL_GETSET 10
for s in pg_num pgp_num size min_size crash_replay_interval crush_ruleset; do
- ceph osd pool get data $s
+ ceph osd pool get $TEST_POOL_GETSET $s
done
- old_size=$(ceph osd pool get data size | sed -e 's/size: //')
+ old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
(( new_size = old_size + 1 ))
- ceph osd pool set data size $new_size
- ceph osd pool get data size | grep "size: $new_size"
- ceph osd pool set data size $old_size
+ ceph osd pool set $TEST_POOL_GETSET size $new_size
+ ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
+ ceph osd pool set $TEST_POOL_GETSET size $old_size
ceph osd pool create pool_erasure 12 12 erasure
set +e
set -e
auid=5555
- ceph osd pool set data auid $auid
- ceph osd pool get data auid | grep $auid
- ceph --format=xml osd pool get data auid | grep $auid
- ceph osd pool set data auid 0
-
- ceph osd pool set data hashpspool true
- ceph osd pool set data hashpspool false
- ceph osd pool set data hashpspool 0
- ceph osd pool set data hashpspool 1
- expect_false ceph osd pool set data hashpspool asdf
- expect_false ceph osd pool set data hashpspool 2
+ ceph osd pool set $TEST_POOL_GETSET auid $auid
+ ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
+ ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
+ ceph osd pool set $TEST_POOL_GETSET auid 0
+
+ ceph osd pool set $TEST_POOL_GETSET hashpspool true
+ ceph osd pool set $TEST_POOL_GETSET hashpspool false
+ ceph osd pool set $TEST_POOL_GETSET hashpspool 0
+ ceph osd pool set $TEST_POOL_GETSET hashpspool 1
+ expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool asdf
+ expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 2
+
+ ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
ceph osd pool set rbd hit_set_type explicit_hash
ceph osd pool get rbd hit_set_type | grep "hit_set_type: explicit_hash"
DIR=test-ceph-disk
MON_ID=a
MONA=127.0.0.1:7451
+TEST_POOL=rbd
FSID=$(uuidgen)
export CEPH_CONF=/dev/null
export CEPH_ARGS="--fsid $FSID"
activate \
--mark-init=none \
$osd_data || return 1
- $timeout $TIMEOUT ./ceph osd pool set data size 1 || return 1
+ $timeout $TIMEOUT ./ceph osd pool set $TEST_POOL size 1 || return 1
local id=$($cat $osd_data/whoami)
local weight=1
./ceph osd crush add osd.$id $weight root=default host=localhost || return 1
echo FOO > $DIR/BAR
- $timeout $TIMEOUT ./rados --pool data put BAR $DIR/BAR || return 1
- $timeout $TIMEOUT ./rados --pool data get BAR $DIR/BAR.copy || return 1
+ $timeout $TIMEOUT ./rados --pool $TEST_POOL put BAR $DIR/BAR || return 1
+ $timeout $TIMEOUT ./rados --pool $TEST_POOL get BAR $DIR/BAR.copy || return 1
$diff $DIR/BAR $DIR/BAR.copy || return 1
}
modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re)
flags
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
- pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
- pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
max_osd 3
modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re)
flags
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
- pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool stripe_width 0
- pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 flags hashpspool stripe_width 0
max_osd 1
modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re)
flags
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
- pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
- pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
max_osd 3
osdmaptool: writing epoch 1 to myosdmap
$ osdmaptool --print myosdmap | grep 'pool 0'
osdmaptool: osdmap file 'myosdmap'
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 66 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 66 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
$ osdmaptool --clobber --createsimple 3 --osd_pool_default_crush_rule 55 myosdmap 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//'
osdmaptool: osdmap file 'myosdmap'
default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead
default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 0
$ osdmaptool --print myosdmap | grep 'pool 0'
osdmaptool: osdmap file 'myosdmap'
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
$ osdmaptool --clobber --createsimple 3 --osd_pool_default_crush_replicated_ruleset 66 --osd_pool_default_crush_rule 55 myosdmap 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//'
osdmaptool: osdmap file 'myosdmap'
default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead
default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 66
$ osdmaptool --print myosdmap | grep 'pool 0'
osdmaptool: osdmap file 'myosdmap'
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 flags hashpspool stripe_width 0
$ rm -f myosdmap
modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re)
flags
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
- pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0
- pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0
max_osd 239
osdmaptool: writing epoch 1 to om
$ osdmaptool --print om | grep 'pool 0'
osdmaptool: osdmap file 'om'
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0
$ osdmaptool --clobber --create-from-conf --osd_pool_default_crush_rule 55 om -c $TESTDIR/ceph.conf.withracks 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//'
osdmaptool: osdmap file 'om'
default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead
default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 0
$ osdmaptool --print om | grep 'pool 0'
osdmaptool: osdmap file 'om'
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0
$ osdmaptool --clobber --create-from-conf --osd_pool_default_crush_replicated_ruleset 66 --osd_pool_default_crush_rule 55 om -c $TESTDIR/ceph.conf.withracks 2>&1 >/dev/null | sed -e 's/^.* 0 osd_pool_//'
osdmaptool: osdmap file 'om'
default_crush_rule is deprecated use osd_pool_default_crush_replicated_ruleset instead
default_crush_rule = 55 overrides osd_pool_default_crush_replicated_ruleset = 66
$ osdmaptool --print om | grep 'pool 0'
osdmaptool: osdmap file 'om'
- pool 0 'data' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool crash_replay_interval 45 stripe_width 0
+ pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 55 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 flags hashpspool stripe_width 0
$ rm -f om
There is no pool 123
[1]
- $ osdmaptool myosdmap --test-map-object foo --pool 2
+ $ osdmaptool myosdmap --test-map-object foo --pool 0
osdmaptool: osdmap file 'myosdmap'
- object 'foo' \-\> 2\..* (re)
+ object 'foo' \-\> 0\..* (re)
$ osdmaptool myosdmap --test-map-object foo
osdmaptool: osdmap file 'myosdmap'
There is no pool 123
[1]
- $ osdmaptool myosdmap --mark-up-in --test-map-pgs --pool 2 | grep pool
+ $ osdmaptool myosdmap --mark-up-in --test-map-pgs --pool 0 | grep pool
osdmaptool: osdmap file 'myosdmap'
- pool 2 pg_num .* (re)
+ pool 0 pg_num .* (re)
$ osdmaptool myosdmap --mark-up-in --test-map-pgs | grep pool
osdmaptool: osdmap file 'myosdmap'
pool 0 pg_num .* (re)
- pool 1 pg_num .* (re)
- pool 2 pg_num .* (re)
$ NUM_OSDS=500
- $ POOL_COUNT=3 # data + metadata + rbd
+ $ POOL_COUNT=1 # data + metadata + rbd
$ SIZE=3
$ PG_BITS=4
#
$ PG_NUM=$(($NUM_OSDS << $PG_BITS))
$ grep "pg_num $PG_NUM" "$OUT" || cat $OUT
pool 0 pg_num 8000
- pool 1 pg_num 8000
- pool 2 pg_num 8000
$ TOTAL=$((POOL_COUNT * $PG_NUM))
$ grep -P "size $SIZE\t$TOTAL" $OUT || cat $OUT
- size 3\t24000 (esc)
+ size 3\t8000 (esc)
$ STATS_CRUSH=$(grep '^ avg ' "$OUT")
#
# --test-map-pgs --test-random is expected to change nothing regarding the totals
$ PG_NUM=$(($NUM_OSDS << $PG_BITS))
$ grep "pg_num $PG_NUM" "$OUT" || cat $OUT
pool 0 pg_num 8000
- pool 1 pg_num 8000
- pool 2 pg_num 8000
$ TOTAL=$((POOL_COUNT * $PG_NUM))
$ grep -P "size $SIZE\t$TOTAL" $OUT || cat $OUT
- size 3\t24000 (esc)
+ size 3\t8000 (esc)
$ STATS_RANDOM=$(grep '^ avg ' "$OUT")
# it is almost impossible to get the same stats with random and crush
# if they are, it most probably means something went wrong somewhere
teardown $dir || return 1
}
+TEST_POOL=rbd
+
function TEST_osd_pool_get_set() {
local dir=$1
./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1
- ./ceph osd pool set data hashpspool 0 || return 1
+ ./ceph osd pool set $TEST_POOL hashpspool 0 || return 1
! ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1
- ./ceph osd pool set data hashpspool 1 || return 1
+ ./ceph osd pool set $TEST_POOL hashpspool 1 || return 1
./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1
- ./ceph osd pool set data hashpspool false || return 1
+ ./ceph osd pool set $TEST_POOL hashpspool false || return 1
! ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1
- ./ceph osd pool set data hashpspool false || return 1
+ ./ceph osd pool set $TEST_POOL hashpspool false || return 1
# check that setting false twice does not toggle to true (bug)
! ./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1
- ./ceph osd pool set data hashpspool true || return 1
+ ./ceph osd pool set $TEST_POOL hashpspool true || return 1
./ceph osd dump | grep 'pool 0' | grep hashpspool || return 1
}
data_lock("OmapBench::data_lock"),
busythreads_count(0),
comp(NULL), safe(aio_is_safe),
- pool_name("data"),
+ pool_name("rbd"),
rados_id("admin"),
prefix(rados_id+".obj."),
threads(3), objects(100), entries_per_omap(10), key_size(10),
}
osdmap.apply_incremental(pending_inc);
- // kludge to get an erasure coding rule and pool
+ // Create an EC ruleset and a pool using it
int r = osdmap.crush->add_simple_ruleset("erasure", "default", "osd",
"indep", pg_pool_t::TYPE_ERASURE,
&cerr);
- pg_pool_t *p = (pg_pool_t *)osdmap.get_pg_pool(2);
+
+ OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1);
+ new_pool_inc.new_pool_max = osdmap.get_pool_max();
+ new_pool_inc.fsid = osdmap.get_fsid();
+ pg_pool_t empty;
+ uint64_t pool_id = ++new_pool_inc.new_pool_max;
+ pg_pool_t *p = new_pool_inc.get_new_pool(pool_id, &empty);
+ p->size = 3;
+ p->set_pg_num(64);
+ p->set_pgp_num(64);
p->type = pg_pool_t::TYPE_ERASURE;
p->crush_ruleset = r;
+ osdmap.apply_incremental(new_pool_inc);
}
unsigned int get_num_osds() { return num_osds; }
};
map<TestOpType, unsigned int> op_weights;
- string pool_name = "data";
+ string pool_name = "rbd";
bool ec_pool = false;
bool no_omap = false;
'--yes-i-really-mean-it']))
+class TestFS(TestArgparse):
+ def test_fs_new(self):
+ self.assert_valid_command(['fs', 'new', 'default', 'metadata', 'data'])
+
+ def test_fs_rm(self):
+ self.assert_valid_command(['fs', 'rm', 'default'])
+ self.assert_valid_command(['fs', 'rm', 'default', '--yes-i-really-mean-it'])
+ assert_equal({}, validate_command(sigdict, ['fs', 'rm', 'default', '--yes-i-really-mean-it', 'toomany']))
+
+ def test_fs_ls(self):
+ self.assert_valid_command(['fs', 'ls'])
+ assert_equal({}, validate_command(sigdict, ['fs', 'ls', 'toomany']))
+
class TestMon(TestArgparse):
def test_dump(self):
def list_non_default_pools(self):
pools = self.rados.list_pools()
- pools.remove('data')
- pools.remove('metadata')
pools.remove('rbd')
return set(pools)
#!/bin/sh
+TEST_POOL=rbd
+
./stop.sh
CEPH_NUM_OSD=3 ./vstart.sh -d -n -x -o 'osd min pg log entries = 5'
-./rados -p data bench 15 write -b 4096
+./rados -p $TEST_POOL bench 15 write -b 4096
./ceph osd out 0
./init-ceph stop osd.0
./ceph osd down 0
-./rados -p data bench 600 write -b 4096
+./rados -p $TEST_POOL bench 600 write -b 4096
# Includes
source "`dirname $0`/test_common.sh"
+TEST_POOL=rbd
+
# Functions
setup() {
export CEPH_NUM_OSD=$1
recovery1_impl() {
# Write lots and lots of objects
- write_objects 1 1 200 4000 data
+ write_objects 1 1 200 4000 $TEST_POOL
# Take down osd1
stop_osd 1
# Continue writing a lot of objects
- write_objects 2 2 200 4000 data
+ write_objects 2 2 200 4000 $TEST_POOL
# Bring up osd1
restart_osd 1
try_to_fetch_unfound=$1
# Write lots and lots of objects
- write_objects 1 1 20 8000 data
+ write_objects 1 1 20 8000 $TEST_POOL
# Take down osd1
stop_osd 1
# Continue writing a lot of objects
- write_objects 2 2 20 8000 data
+ write_objects 2 2 20 8000 $TEST_POOL
# Bring up osd1
restart_osd 1
# verify we get woken to an error when it's declared lost.
echo "trying to get one of the unfound objects"
(
- ./rados -c ./ceph.conf -p data get obj02 $TEMPDIR/obj02 &&\
+ ./rados -c ./ceph.conf -p $TEST_POOL get obj02 $TEMPDIR/obj02 &&\
die "expected radostool error"
) &
fi
# Reading from a lost object gives back an error code.
# TODO: check error code
- ./rados -c ./ceph.conf -p data get obj01 $TEMPDIR/obj01 &&\
+ ./rados -c ./ceph.conf -p $TEST_POOL get obj01 $TEMPDIR/obj01 &&\
die "expected radostool error"
if [ "$try_to_fetch_unfound" -eq 1 ]; then
CEPH_NUM_OSD=3 ./vstart.sh -d -n -x -o 'osd recovery max active = 1'
-./ceph -c ./ceph.conf osd pool set data size 3
+TEST_POOL=rbd
+
+./ceph -c ./ceph.conf osd pool set $TEST_POOL size 3
sleep 20
for f in `seq 1 100`
do
- ./rados -c ./ceph.conf -p data put test_$f /etc/passwd
+ ./rados -c ./ceph.conf -p $TEST_POOL put test_$f /etc/passwd
done
# zap some objects on both replicas
# Includes
source "`dirname $0`/test_common.sh"
+TEST_POOL=rbd
+
# Functions
my_write_objects() {
- write_objects $1 $2 10 1000000 data
+ write_objects $1 $2 10 1000000 $TEST_POOL
}
setup() {
}
read_write_1_impl() {
- write_objects 1 2 100 8192 data
+ write_objects 1 2 100 8192 $TEST_POOL
read_objects 2 100 8192
- write_objects 3 3 10 81920 data
+ write_objects 3 3 10 81920 $TEST_POOL
read_objects 3 10 81920
- write_objects 4 4 100 4 data
+ write_objects 4 4 100 4 $TEST_POOL
read_objects 4 100 4
- write_objects 1 2 100 8192 data
+ write_objects 1 2 100 8192 $TEST_POOL
read_objects 2 100 8192
# success
# Includes
source "`dirname $0`/test_common.sh"
+TEST_POOL=rbd
+
# Constants
my_write_objects() {
- write_objects $1 $2 10 1000000 data
+ write_objects $1 $2 10 1000000 $TEST_POOL
}
setup() {
}
get_pgp_num() {
- ./ceph -c ./ceph.conf osd pool get data pgp_num > $TEMPDIR/pgp_num
+ ./ceph -c ./ceph.conf osd pool get $TEST_POOL pgp_num > $TEMPDIR/pgp_num
[ $? -eq 0 ] || die "failed to get pgp_num"
PGP_NUM=`grep PGP_NUM $TEMPDIR/pgp_num | sed 's/.*PGP_NUM:\([ 0123456789]*\).*$/\1/'`
}
# Double the number of PGs
PGP_NUM=$((PGP_NUM*2))
echo "doubling PGP_NUM to $PGP_NUM..."
- ./ceph -c ./ceph.conf osd pool set data pgp_num $PGP_NUM
+ ./ceph -c ./ceph.conf osd pool set $TEST_POOL pgp_num $PGP_NUM
sleep 30
# Includes
source "`dirname $0`/test_common.sh"
+TEST_POOL=rbd
+
# Functions
my_write_objects() {
- write_objects $1 $2 10 1000000 data
+ write_objects $1 $2 10 1000000 $TEST_POOL
}
setup() {
echo "Got unfound objects."
(
- ./rados -c ./ceph.conf -p data get obj01 $TEMPDIR/obj01 || die "radostool failed"
+ ./rados -c ./ceph.conf -p $TEST_POOL get obj01 $TEMPDIR/obj01 || die "radostool failed"
) &
sleep 5
[ -e $TEMPDIR/obj01 ] && die "unexpected error: fetched unfound object?"