(good) purpose: the OSD should always check heartbeats on both the
public and cluster networks.
+* The ``rados`` tool's ``mkpool`` and ``rmpool`` commands have been
+ removed because they are redundant; please use the ``ceph osd pool
+ create`` and ``ceph osd pool rm`` commands instead.
* During the upgrade from luminous to nautilus, it will not be possible to create
a new OSD using a luminous ceph-osd daemon after the monitors have been
upgraded to nautilus.
+
Now put something in usin rados, check that it made it, get it back, and remove it.::
- ./rados mkpool test-blkin
+ ./ceph osd pool create test-blkin 8
./rados put test-object-1 ./vstart.sh --pool=test-blkin
./rados -p test-blkin ls
./ceph osd map test-blkin test-object-1
.. code::
- $ bin/rados mkpool mypool
+ $ bin/ceph osd pool create mypool 8
$ bin/rados -p mypool bench 10 write -b 123
Place a file into the new pool:
Show utilization statistics, including disk usage (bytes) and object
counts, over the entire system and broken down by pool.
-:command:`mkpool` *foo*
- Create a pool with name foo.
-
-:command:`rmpool` *foo* [ *foo* --yes-i-really-really-mean-it ]
- Delete the pool foo (and all its data).
-
:command:`list-inconsistent-pg` *pool*
List inconsistent PGs in given pool.
::
- # rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
- # rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
- # rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
- # rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
- # rados rmpool default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.control default.rgw.control --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.log default.rgw.log --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
Create a System User
--------------------
::
- # rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
- # rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
- # rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
- # rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
- # rados rmpool default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.control default.rgw.control --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.log default.rgw.log --yes-i-really-really-mean-it
+ # ceph osd pool rm default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
Update the Ceph Configuration File
----------------------------------
::
- # rados rmpool <del-zone>.rgw.control <del-zone>.rgw.control --yes-i-really-really-mean-it
- # rados rmpool <del-zone>.rgw.data.root <del-zone>.rgw.data.root --yes-i-really-really-mean-it
- # rados rmpool <del-zone>.rgw.gc <del-zone>.rgw.gc --yes-i-really-really-mean-it
- # rados rmpool <del-zone>.rgw.log <del-zone>.rgw.log --yes-i-really-really-mean-it
- # rados rmpool <del-zone>.rgw.users.uid <del-zone>.rgw.users.uid --yes-i-really-really-mean-it
+ # ceph osd pool rm <del-zone>.rgw.control <del-zone>.rgw.control --yes-i-really-really-mean-it
+ # ceph osd pool rm <del-zone>.rgw.data.root <del-zone>.rgw.data.root --yes-i-really-really-mean-it
+ # ceph osd pool rm <del-zone>.rgw.gc <del-zone>.rgw.gc --yes-i-really-really-mean-it
+ # ceph osd pool rm <del-zone>.rgw.log <del-zone>.rgw.log --yes-i-really-really-mean-it
+ # ceph osd pool rm <del-zone>.rgw.users.uid <del-zone>.rgw.users.uid --yes-i-really-really-mean-it
Modify a Zone
~~~~~~~~~~~~~
return 1
fi
- rados rmpool $poolname $poolname --yes-i-really-really-mean-it
+ ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
teardown $dir || return 1
}
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1
fi
- rados rmpool $poolname $poolname --yes-i-really-really-mean-it
+ ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
teardown $dir || return 1
}
return 1
fi
- rados rmpool $poolname $poolname --yes-i-really-really-mean-it
+ ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
teardown $dir || return 1
}
if EXP_ERRORS == 0:
NEWPOOL = "rados-import-pool"
- cmd = "{path}/rados mkpool {pool}".format(pool=NEWPOOL, path=CEPH_BIN)
+ cmd = "{path}/ceph osd pool create {pool} 8".format(pool=NEWPOOL, path=CEPH_BIN)
logging.debug(cmd)
ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
assert pool_name in self.pools
self.log("removing pool_name %s" % (pool_name,))
del self.pools[pool_name]
- self.do_rados(self.controller,
- ['rmpool', pool_name, pool_name,
- "--yes-i-really-really-mean-it"])
+ self.raw_cluster_cmd('osd', 'pool', 'rm', pool_name, pool_name,
+ "--yes-i-really-really-mean-it")
def get_pool(self):
"""
log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
proc = remote.run(
args=[
- 'rados',
+ 'ceph',
'--name', role_,
- 'mkpool', 'pool{num}'.format(num=poolnum), '-1',
+ 'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
run.Raw('&&'),
'rados',
'--name', role_,
set -e
touch foo.$$
-rados mkpool foo.$$
+ceph osd pool create foo.$$ 8
ceph fs add_data_pool cephfs foo.$$
setfattr -n ceph.file.layout.pool -v foo.$$ foo.$$
# cleanup
rm foo.$$
ceph fs rm_data_pool cephfs foo.$$
-rados rmpool foo.$$ foo.$$ --yes-i-really-really-mean-it
+ceph osd pool rm foo.$$ foo.$$ --yes-i-really-really-mean-it
echo OK
# be an unmanaged snapshot from a not-unmanaged pool
ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it || true
-expect 'rados mkpool test-foo' 0
+expect 'ceph osd pool create test-foo 8' 0
expect 'rbd pool init test-foo'
expect 'rbd --pool test-foo create --size 1024 image' 0
expect 'rbd --pool test-foo snap create image@snapshot' 0
ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it || true
-expect 'rados mkpool test-bar' 0
+expect 'ceph osd pool create test-bar 8' 0
expect 'rbd pool init test-bar'
expect 'rados cppool test-foo test-bar --yes-i-really-mean-it' 0
expect 'rbd --pool test-bar snap rm image@snapshot' 95
# pool ops
delay_mon omap rados lspools
-delay_mon poolopreply rados mkpool test
+delay_mon poolopreply ceph osd pool create test 8
delay_mon poolopreply rados mksnap -p test snap
-delay_mon poolopreply rados rmpool test test --yes-i-really-really-mean-it
+delay_mon poolopreply ceph osd pool rm test test --yes-i-really-really-mean-it
# other mon ops
delay_mon getpoolstats rados df
run_expect_nosignal "$RADOS_TOOL" --object_locator "asdf" ls
run_expect_nosignal "$RADOS_TOOL" --namespace "asdf" ls
-run_expect_succ "$RADOS_TOOL" mkpool "$POOL"
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
run_expect_succ "$CEPH_TOOL" osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_EC" 100 100 erasure myprofile
run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expc"
# make sure that --create works
-run "$RADOS_TOOL" rmpool "$POOL" "$POOL" --yes-i-really-really-mean-it
+run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
# make sure that lack of --create fails
-run_expect_succ "$RADOS_TOOL" rmpool "$POOL" "$POOL" --yes-i-really-really-mean-it
+run_expect_succ "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/expa"
run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
# test copy pool
-run "$RADOS_TOOL" rmpool "$POOL" "$POOL" --yes-i-really-really-mean-it
-run "$RADOS_TOOL" rmpool "$POOL_CP_TARGET" "$POOL_CP_TARGET" --yes-i-really-really-mean-it
-run_expect_succ "$RADOS_TOOL" mkpool "$POOL"
-run_expect_succ "$RADOS_TOOL" mkpool "$POOL_CP_TARGET"
+run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
+run "$CEPH_TOOL" osd pool rm "$POOL_CP_TARGET" "$POOL_CP_TARGET" --yes-i-really-really-mean-it
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_CP_TARGET" 8
# create src files
mkdir -p "$TDIR/dir_cp_src"
run_expect_succ "$RADOS_TOOL" truncate f.1 0 --pool "$POOL"
run_expect_fail "$RADOS_TOOL" truncate f.1 0k --pool "$POOL"
-run "$RADOS_TOOL" rmpool delete_me_mkpool_test delete_me_mkpool_test --yes-i-really-really-mean-it
-run_expect_succ "$RADOS_TOOL" mkpool delete_me_mkpool_test 0 0
-run_expect_fail "$RADOS_TOOL" mkpool delete_me_mkpool_test2 0k 0
-run_expect_fail "$RADOS_TOOL" mkpool delete_me_mkpool_test3 0 0k
+run "$CEPH_TOOL" osd pool rm delete_me_mkpool_test delete_me_mkpool_test --yes-i-really-really-mean-it
+run_expect_succ "$CEPH_TOOL" osd pool create delete_me_mkpool_test 1
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1k write
sleep 2
done
$RADOS_TOOL -p $p rm $OBJ --force-full
- $RADOS_TOOL rmpool $p $p --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
rm $V1
}
die "Created $TOTAL objects but saw $CHECK"
fi
- $RADOS_TOOL rmpool $p $p --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
}
test_cleanup() {
run_expect_succ $RADOS_TOOL -p $p cleanup --prefix benchmark_data_otherhost
set -e
- $RADOS_TOOL rmpool $p $p --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
}
function test_append()
}
pool="pool-$$"
-rados mkpool $pool
+ceph osd pool create $pool 8
rados -p $pool tmap set foo key1 value1
rados -p $pool tmap set foo key2 value2
rados -p $pool getomapval foo key1 | grep value1
rados -p $pool getomapval foo key2 | grep value2
-rados rmpool $pool $pool --yes-i-really-really-mean-it
+ceph osd pool rm $pool $pool --yes-i-really-really-mean-it
echo OK
rbd rename bar bar2
rbd rename bar2 foo2 2>&1 | grep exists
- rados mkpool rbd2
+ ceph osd pool create rbd2 8
rbd pool init rbd2
rbd create -p rbd2 -s 1 foo
rbd rename rbd2/foo rbd2/bar
! rbd rename rbd2/bar --dest-pool rbd foo
rbd rename --pool rbd2 bar --dest-pool rbd2 foo
rbd -p rbd2 ls | grep foo
- rados rmpool rbd2 rbd2 --yes-i-really-really-mean-it
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
remove_images
}
rbd snap create test1@s1
rbd snap protect test1@s1
- rados mkpool rbd2
+ ceph osd pool create rbd2 8
rbd pool init rbd2
rbd clone test1@s1 rbd2/clone
rbd -p rbd2 ls | grep clone
rbd snap unprotect test1@s1
rbd snap rm test1@s1
rbd rm test1
- rados rmpool rbd2 rbd2 --yes-i-really-really-mean-it
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_trash() {
return 0
;;
rados)
- COMPREPLY=( $(compgen -W "lspools mkpool rmpool df ls chown get put create rm listxattr getxattr setxattr rmxattr stat stat2 mapext lssnap mksnap rmsnap rollback bench" -- ${cur}) )
+ COMPREPLY=( $(compgen -W "lspools df ls chown get put create rm listxattr getxattr setxattr rmxattr stat stat2 mapext lssnap mksnap rmsnap rollback bench" -- ${cur}) )
return 0
;;
esac
for j in `seq 0 9`; do
poolnum=$((i*10+j))
poolname="pool$poolnum"
- ./rados mkpool $poolname &
+ ./ceph osd pool create $poolname 8 &
done
wait
done
for j in `seq 0 9`; do
poolnum=$((i*10+j))
poolname="pool$poolnum"
- ./rados mkpool $poolname &
+ ./ceph osd pool create $poolname 8 &
done
wait
done
test629_impl() {
# create the pool
- ./rados -c ./ceph.conf mkpool foo || die "mkpool failed"
+ ./ceph -c ./ceph.conf osd pool create foo 8 || die "pool create failed"
# Write lots and lots of objects
write_objects 1 1 10 1000000 foo
poll_cmd "./ceph pg debug degraded_pgs_exist" TRUE 3 120
# delete the pool
- ./rados -c ./ceph.conf rmpool foo || die "rmpool failed"
+ ./ceph -c ./ceph.conf osd pool rm foo foo --yes-i-really-really-mean-it || die "pool rm failed"
# make sure the system is stable
sleep 10
many_pools() {
setup 3
for i in `seq 1 3000`; do
- ./rados -c ./ceph.conf mkpool "pool${i}" || die "mkpool failed"
+ ./ceph -c ./ceph.conf osd pool create "pool${i}" 8 || die "pool create failed"
done
my_write_objects 1 10
}
"usage: rados [options] [commands]\n"
"POOL COMMANDS\n"
" lspools list pools\n"
-" mkpool <pool-name> [123[ 4]] create pool <pool-name>'\n"
-" [with auid 123[and using crush rule 4]]\n"
" cppool <pool-name> <dest-pool> copy content of a pool\n"
-" rmpool <pool-name> [<pool-name> --yes-i-really-really-mean-it]\n"
-" remove pool <pool-name>'\n"
" purge <pool-name> --yes-i-really-really-mean-it\n"
" remove all objects from pool <pool-name> without removing it\n"
" df show per-pool and total usage\n"
ret = 0;
}
- else if (strcmp(nargs[0], "mkpool") == 0) {
- int auid = 0;
- __u8 crush_rule = 0;
- if (nargs.size() < 2)
- usage_exit();
- if (nargs.size() > 2) {
- char* endptr = NULL;
- auid = strtol(nargs[2], &endptr, 10);
- if (*endptr) {
- cerr << "Invalid value for auid: '" << nargs[2] << "'" << std::endl;
- ret = -EINVAL;
- goto out;
- }
- cerr << "setting auid:" << auid << std::endl;
- if (nargs.size() > 3) {
- crush_rule = (__u8)strtol(nargs[3], &endptr, 10);
- if (*endptr) {
- cerr << "Invalid value for crush-rule: '" << nargs[3] << "'" << std::endl;
- ret = -EINVAL;
- goto out;
- }
- cerr << "using crush rule " << (int)crush_rule << std::endl;
- }
- }
- ret = rados.pool_create_with_rule(nargs[1], crush_rule);
- if (ret < 0) {
- cerr << "error creating pool " << nargs[1] << ": "
- << cpp_strerror(ret) << std::endl;
- goto out;
- }
- cout << "successfully created pool " << nargs[1] << std::endl;
- }
else if (strcmp(nargs[0], "cppool") == 0) {
bool force = nargs.size() == 4 && !strcmp(nargs[3], "--yes-i-really-mean-it");
if (nargs.size() != 3 && !(nargs.size() == 4 && force))
}
cout << "successfully copied pool " << nargs[1] << std::endl;
}
- else if (strcmp(nargs[0], "rmpool") == 0) {
- if (nargs.size() < 2)
- usage_exit();
- if (nargs.size() < 4 ||
- strcmp(nargs[1], nargs[2]) != 0 ||
- strcmp(nargs[3], "--yes-i-really-really-mean-it") != 0) {
- cerr << "WARNING:\n"
- << " This will PERMANENTLY DESTROY an entire pool of objects with no way back.\n"
- << " To confirm, pass the pool to remove twice, followed by\n"
- << " --yes-i-really-really-mean-it" << std::endl;
- ret = -1;
- goto out;
- }
- ret = rados.pool_delete(nargs[1]);
- if (ret >= 0) {
- cout << "successfully deleted pool " << nargs[1] << std::endl;
- } else {
- cerr << "pool " << nargs[1] << " could not be removed" << std::endl;
- if (ret == -EPERM) {
- cerr << "Check your monitor configuration - `mon allow pool delete` is set to false by default,"
- << " change it to true to allow deletion of pools" << std::endl;
- }
- }
- }
else if (strcmp(nargs[0], "purge") == 0) {
if (nargs.size() < 2)
usage_exit();