From 052c3d3f681017d56b5b0ee5cf6f65bffc952a4c Mon Sep 17 00:00:00 2001 From: Michal Jarzabek Date: Sat, 7 Jan 2017 15:41:46 +0000 Subject: [PATCH] mon/MDSMonitor.cc:refuse fs new on pools with obj Fixes: http://tracker.ceph.com/issues/11124 Signed-off-by: Michal Jarzabek --- qa/tasks/cephfs/test_misc.py | 58 +++++++++++++++++++++++++++++++++++ qa/workunits/cephtool/test.sh | 12 ++++---- src/mon/MDSMonitor.cc | 10 ++++++ src/mon/MonCommands.h | 3 +- 4 files changed, 76 insertions(+), 7 deletions(-) diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index bd8ba64478f48..3f98c3719b88c 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -2,6 +2,8 @@ from unittest import SkipTest from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.orchestra.run import CommandFailedError +import errno class TestMisc(CephFSTestCase): CLIENTS_REQUIRED = 2 @@ -31,3 +33,59 @@ class TestMisc(CephFSTestCase): self.mount_b.run_shell(["cat", "testfile"]) self.mount_a.kill_background(p) + + def test_fs_new(self): + data_pool_name = self.fs.get_data_pool_name() + + self.fs.mds_stop() + self.fs.mds_fail() + + self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name, + '--yes-i-really-mean-it') + + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', + self.fs.metadata_pool_name, + self.fs.metadata_pool_name, + '--yes-i-really-really-mean-it') + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', + self.fs.metadata_pool_name, + self.fs.get_pgs_per_fs_pool().__str__()) + + dummyfile = '/etc/fstab' + + self.fs.put_metadata_object_raw("key", dummyfile) + + timeout = 10 + + get_pool_df = self.fs.get_pool_df + self.wait_until_true( + lambda: get_pool_df(self.fs.metadata_pool_name)['objects'] > 0, + timeout=timeout) + + try: + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, + self.fs.metadata_pool_name, + data_pool_name) + except CommandFailedError as e: + self.assertEqual(e.exitstatus, errno.EINVAL) + else: + raise AssertionError("Expected EINVAL") + + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, + self.fs.metadata_pool_name, + data_pool_name, "--force") + + self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name, + '--yes-i-really-mean-it') + + + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', + self.fs.metadata_pool_name, + self.fs.metadata_pool_name, + '--yes-i-really-really-mean-it') + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', + self.fs.metadata_pool_name, + self.fs.get_pgs_per_fs_pool().__str__()) + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, + self.fs.metadata_pool_name, + data_pool_name) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 3bd4a9fb4e7ff..e830f6d44495b 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -909,7 +909,7 @@ function test_mon_mds() ceph fs rm $FS_NAME --yes-i-really-mean-it set +e - ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE + ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE check_response 'erasure-code' $? 22 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE check_response 'erasure-code' $? 22 @@ -926,13 +926,13 @@ function test_mon_mds() # Use of a readonly tier should be forbidden ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it set +e - ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE + ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE check_response 'has a write tier (mds-tier) that is configured to forward' $? 22 set -e # Use of a writeback tier should enable FS creation ceph osd tier cache-mode mds-tier writeback - ceph fs new $FS_NAME fs_metadata mds-ec-pool + ceph fs new $FS_NAME fs_metadata mds-ec-pool --force # While a FS exists using the tiered pools, I should not be allowed # to remove the tier @@ -948,7 +948,7 @@ function test_mon_mds() # ... but we should be forbidden from using the cache pool in the FS directly. set +e - ceph fs new $FS_NAME fs_metadata mds-tier 2>$TMPFILE + ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE check_response 'in use as a cache tier' $? 22 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE check_response 'in use as a cache tier' $? 22 @@ -961,7 +961,7 @@ function test_mon_mds() ceph osd tier remove mds-ec-pool mds-tier # Create a FS using the 'cache' pool now that it's no longer a tier - ceph fs new $FS_NAME fs_metadata mds-tier + ceph fs new $FS_NAME fs_metadata mds-tier --force # We should be forbidden from using this pool as a tier now that # it's in use for CephFS @@ -975,7 +975,7 @@ function test_mon_mds() ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it # Create a FS and check that we can subsequently add a cache tier to it - ceph fs new $FS_NAME fs_metadata fs_data + ceph fs new $FS_NAME fs_metadata fs_data --force # Adding overlay to FS pool should be permitted, RADOS clients handle this. ceph osd tier add fs_metadata mds-tier diff --git a/src/mon/MDSMonitor.cc b/src/mon/MDSMonitor.cc index c19d7a70d8b92..c26e1ce8c166a 100644 --- a/src/mon/MDSMonitor.cc +++ b/src/mon/MDSMonitor.cc @@ -19,6 +19,7 @@ #include "Monitor.h" #include "MonitorDBStore.h" #include "OSDMonitor.h" +#include "PGMonitor.h" #include "common/strtol.h" #include "common/perf_counters.h" @@ -1581,6 +1582,15 @@ int MDSMonitor::management_command( return -ENOENT; } + string force; + cmd_getval(g_ceph_context,cmdmap, "force", force); + int64_t metadata_num_objects = mon->pgmon()->pg_map.pg_pool_sum[metadata].stats.sum.num_objects; + if (force != "--force" && metadata_num_objects > 0) { + ss << "pool '" << metadata_name + << "' already contains some objects. Use an empty pool instead."; + return -EINVAL; + } + string data_name; cmd_getval(g_ceph_context, cmdmap, "data", data_name); int64_t data = mon->osdmon()->osdmap.lookup_pg_pool_name(data_name); diff --git a/src/mon/MonCommands.h b/src/mon/MonCommands.h index c6f731bab494c..a2962c45e6f38 100644 --- a/src/mon/MonCommands.h +++ b/src/mon/MonCommands.h @@ -376,7 +376,8 @@ COMMAND_WITH_FLAG("mds newfs " \ COMMAND("fs new " \ "name=fs_name,type=CephString " \ "name=metadata,type=CephString " \ - "name=data,type=CephString ", \ + "name=data,type=CephString " \ + "name=force,type=CephChoices,strings=--force,req=false", \ "make new filesystem using named pools and ", \ "fs", "rw", "cli,rest") COMMAND("fs rm " \ -- 2.39.5