]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mon/FSCommands: Fix 'add_data_pool' command
authorRamana Raja <rraja@redhat.com>
Sat, 11 Apr 2020 07:15:39 +0000 (12:45 +0530)
committerRamana Raja <rraja@redhat.com>
Wed, 15 Apr 2020 10:26:56 +0000 (15:56 +0530)
After making a RADOS pool a filesystem's data pool using the
'add_data_pool' command, the value of the 'data' key of the pool's
application metadata 'cephfs' should be the filesystem's name. This
didn't happen when the pool's application metadata 'cephfs' was
enabled before the pool was made the data pool. Fix this during the
handling of the 'add_data_pool' command by setting the value of
the 'data' key of the pool's application metadata 'cephfs' to the
filesystem's name even when the application metadata 'cephfs' is
already enabled or set.

Fixes: https://tracker.ceph.com/issues/43061
Signed-off-by: Ramana Raja <rraja@redhat.com>
qa/tasks/cephfs/test_admin.py
src/mon/FSCommands.cc
src/mon/OSDMonitor.cc
src/mon/OSDMonitor.h

index 374f75a72cf0c3a9c57a05054ed134ea848056d9..b8d82afb7410dcd1e6118275b15e6ba298926a5e 100644 (file)
@@ -1,3 +1,5 @@
+import json
+
 from teuthology.orchestra.run import CommandFailedError
 
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
@@ -30,6 +32,11 @@ class TestAdminCommands(CephFSTestCase):
         if overwrites:
             self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
 
+    def _check_pool_application_metadata_key_value(self, pool, app, key, value):
+        output = self.fs.mon_manager.raw_cluster_cmd(
+            'osd', 'pool', 'application', 'get', pool, app, key)
+        self.assertEqual(str(output.strip()), value)
+
     def test_add_data_pool_root(self):
         """
         That a new data pool can be added and used for the root directory.
@@ -38,6 +45,19 @@ class TestAdminCommands(CephFSTestCase):
         p = self.fs.add_data_pool("foo")
         self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
 
+    def test_add_data_pool_application_metadata(self):
+        """
+        That the application metadata set on a newly added data pool is as expected.
+        """
+        pool_name = "foo"
+        mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+        mon_cmd('osd', 'pool', 'create', pool_name, str(self.fs.pgs_per_fs_pool))
+        # Check whether https://tracker.ceph.com/issues/43061 is fixed
+        mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs')
+        self.fs.add_data_pool(pool_name, create=False)
+        self._check_pool_application_metadata_key_value(
+            pool_name, 'cephfs', 'data', self.fs.name)
+
     def test_add_data_pool_subdir(self):
         """
         That a new data pool can be added and used for a sub-directory.
index 34a786cf39ff5b8a693964131cbfe38d7272daa0..88d5329ac9a46c48c532f98204587decdc0c389a 100644 (file)
@@ -715,7 +715,7 @@ class AddDataPoolHandler : public FileSystemCommandHandler
     }
     mon->osdmon()->do_application_enable(poolid,
                                         pg_pool_t::APPLICATION_NAME_CEPHFS,
-                                        "data", fs_name);
+                                        "data", fs_name, true);
     mon->osdmon()->propose_pending();
 
     fsmap.modify_filesystem(
index b2ac411f2dc1ebbd792b8369792d04bd0c719711..14a595a297e16f66fbdf125abcaa2db4981c0032 100644 (file)
@@ -4769,7 +4769,8 @@ void OSDMonitor::check_pg_creates_sub(Subscription *sub)
 void OSDMonitor::do_application_enable(int64_t pool_id,
                                        const std::string &app_name,
                                       const std::string &app_key,
-                                      const std::string &app_value)
+                                      const std::string &app_value,
+                                      bool force)
 {
   ceph_assert(paxos->is_plugged() && is_writeable());
 
@@ -4789,7 +4790,11 @@ void OSDMonitor::do_application_enable(int64_t pool_id,
   if (app_key.empty()) {
     p.application_metadata.insert({app_name, {}});
   } else {
-    p.application_metadata.insert({app_name, {{app_key, app_value}}});
+    if (force) {
+      p.application_metadata[app_name][app_key] = app_value;
+    } else {
+      p.application_metadata.insert({app_name, {{app_key, app_value}}});
+    }
   }
   p.last_change = pending_inc.epoch;
   pending_inc.new_pools[pool_id] = p;
index 37082c8a25581d387b1f9f152389ed2908b67363..06ad9219946d3b18b3602b77763b1a0f832a6515 100644 (file)
@@ -756,7 +756,8 @@ public:
 
   void do_application_enable(int64_t pool_id, const std::string &app_name,
                             const std::string &app_key="",
-                            const std::string &app_value="");
+                            const std::string &app_value="",
+                            bool force=false);
   void do_set_pool_opt(int64_t pool_id, pool_opts_t::key_t opt,
                       pool_opts_t::value_t);