From: Ramana Raja Date: Sat, 11 Apr 2020 07:15:39 +0000 (+0530) Subject: mon/FSCommands: Fix 'add_data_pool' command X-Git-Tag: v16.1.0~2549^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=3c49092322720b7f1d612f45e73fb26820ff7885;p=ceph.git mon/FSCommands: Fix 'add_data_pool' command After making a RADOS pool a filesystem's data pool using the 'add_data_pool' command, the value of the 'data' key of the pool's application metadata 'cephfs' should be the filesystem's name. This didn't happen when the pool's application metadata 'cephfs' was enabled before the pool was made the data pool. Fix this during the handling of the 'add_data_pool' command by setting the value of the 'data' key of the pool's application metadata 'cephfs' to the filesystem's name even when the application metadata 'cephfs' is already enabled or set. Fixes: https://tracker.ceph.com/issues/43061 Signed-off-by: Ramana Raja --- diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index 374f75a72cf0..b8d82afb7410 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -1,3 +1,5 @@ +import json + from teuthology.orchestra.run import CommandFailedError from tasks.cephfs.cephfs_test_case import CephFSTestCase @@ -30,6 +32,11 @@ class TestAdminCommands(CephFSTestCase): if overwrites: self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true') + def _check_pool_application_metadata_key_value(self, pool, app, key, value): + output = self.fs.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'application', 'get', pool, app, key) + self.assertEqual(str(output.strip()), value) + def test_add_data_pool_root(self): """ That a new data pool can be added and used for the root directory. @@ -38,6 +45,19 @@ class TestAdminCommands(CephFSTestCase): p = self.fs.add_data_pool("foo") self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p)) + def test_add_data_pool_application_metadata(self): + """ + That the application metadata set on a newly added data pool is as expected. + """ + pool_name = "foo" + mon_cmd = self.fs.mon_manager.raw_cluster_cmd + mon_cmd('osd', 'pool', 'create', pool_name, str(self.fs.pgs_per_fs_pool)) + # Check whether https://tracker.ceph.com/issues/43061 is fixed + mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs') + self.fs.add_data_pool(pool_name, create=False) + self._check_pool_application_metadata_key_value( + pool_name, 'cephfs', 'data', self.fs.name) + def test_add_data_pool_subdir(self): """ That a new data pool can be added and used for a sub-directory. diff --git a/src/mon/FSCommands.cc b/src/mon/FSCommands.cc index 34a786cf39ff..88d5329ac9a4 100644 --- a/src/mon/FSCommands.cc +++ b/src/mon/FSCommands.cc @@ -715,7 +715,7 @@ class AddDataPoolHandler : public FileSystemCommandHandler } mon->osdmon()->do_application_enable(poolid, pg_pool_t::APPLICATION_NAME_CEPHFS, - "data", fs_name); + "data", fs_name, true); mon->osdmon()->propose_pending(); fsmap.modify_filesystem( diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index b2ac411f2dc1..14a595a297e1 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -4769,7 +4769,8 @@ void OSDMonitor::check_pg_creates_sub(Subscription *sub) void OSDMonitor::do_application_enable(int64_t pool_id, const std::string &app_name, const std::string &app_key, - const std::string &app_value) + const std::string &app_value, + bool force) { ceph_assert(paxos->is_plugged() && is_writeable()); @@ -4789,7 +4790,11 @@ void OSDMonitor::do_application_enable(int64_t pool_id, if (app_key.empty()) { p.application_metadata.insert({app_name, {}}); } else { - p.application_metadata.insert({app_name, {{app_key, app_value}}}); + if (force) { + p.application_metadata[app_name][app_key] = app_value; + } else { + p.application_metadata.insert({app_name, {{app_key, app_value}}}); + } } p.last_change = pending_inc.epoch; pending_inc.new_pools[pool_id] = p; diff --git a/src/mon/OSDMonitor.h b/src/mon/OSDMonitor.h index 37082c8a2558..06ad9219946d 100644 --- a/src/mon/OSDMonitor.h +++ b/src/mon/OSDMonitor.h @@ -756,7 +756,8 @@ public: void do_application_enable(int64_t pool_id, const std::string &app_name, const std::string &app_key="", - const std::string &app_value=""); + const std::string &app_value="", + bool force=false); void do_set_pool_opt(int64_t pool_id, pool_opts_t::key_t opt, pool_opts_t::value_t);