]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mon/FSCommands: Fix 'add_data_pool' command
authorRamana Raja <rraja@redhat.com>
Sat, 11 Apr 2020 07:15:39 +0000 (12:45 +0530)
committerRamana Raja <rraja@redhat.com>
Mon, 27 Apr 2020 16:05:55 +0000 (21:35 +0530)
After making a RADOS pool a filesystem's data pool using the
'add_data_pool' command, the value of the 'data' key of the pool's
application metadata 'cephfs' should be the filesystem's name. This
didn't happen when the pool's application metadata 'cephfs' was
enabled before the pool was made the data pool. Fix this during the
handling of the 'add_data_pool' command by setting the value of
the 'data' key of the pool's application metadata 'cephfs' to the
filesystem's name even when the application metadata 'cephfs' is
already enabled or set.

Fixes: https://tracker.ceph.com/issues/43061
Signed-off-by: Ramana Raja <rraja@redhat.com>
(cherry picked from commit 3c49092322720b7f1d612f45e73fb26820ff7885)

qa/tasks/cephfs/test_admin.py
src/mon/FSCommands.cc
src/mon/OSDMonitor.cc
src/mon/OSDMonitor.h

index 374f75a72cf0c3a9c57a05054ed134ea848056d9..b8d82afb7410dcd1e6118275b15e6ba298926a5e 100644 (file)
@@ -1,3 +1,5 @@
+import json
+
 from teuthology.orchestra.run import CommandFailedError
 
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
@@ -30,6 +32,11 @@ class TestAdminCommands(CephFSTestCase):
         if overwrites:
             self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
 
+    def _check_pool_application_metadata_key_value(self, pool, app, key, value):
+        output = self.fs.mon_manager.raw_cluster_cmd(
+            'osd', 'pool', 'application', 'get', pool, app, key)
+        self.assertEqual(str(output.strip()), value)
+
     def test_add_data_pool_root(self):
         """
         That a new data pool can be added and used for the root directory.
@@ -38,6 +45,19 @@ class TestAdminCommands(CephFSTestCase):
         p = self.fs.add_data_pool("foo")
         self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
 
+    def test_add_data_pool_application_metadata(self):
+        """
+        That the application metadata set on a newly added data pool is as expected.
+        """
+        pool_name = "foo"
+        mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+        mon_cmd('osd', 'pool', 'create', pool_name, str(self.fs.pgs_per_fs_pool))
+        # Check whether https://tracker.ceph.com/issues/43061 is fixed
+        mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs')
+        self.fs.add_data_pool(pool_name, create=False)
+        self._check_pool_application_metadata_key_value(
+            pool_name, 'cephfs', 'data', self.fs.name)
+
     def test_add_data_pool_subdir(self):
         """
         That a new data pool can be added and used for a sub-directory.
index 6581227b3b6720c1dda07017b5dd23c18c81a6c4..785d30f63316e6de833b73dcfeddd912b0e8a782 100644 (file)
@@ -690,7 +690,7 @@ class AddDataPoolHandler : public FileSystemCommandHandler
     }
     mon->osdmon()->do_application_enable(poolid,
                                         pg_pool_t::APPLICATION_NAME_CEPHFS,
-                                        "data", fs_name);
+                                        "data", fs_name, true);
     mon->osdmon()->propose_pending();
 
     fsmap.modify_filesystem(
index b4cc322b9f0b5a350b544b3e33d161ac379a19fe..84e014f12421b91281a39a2ab1740eb2fd8f1a75 100644 (file)
@@ -4722,7 +4722,8 @@ void OSDMonitor::check_pg_creates_sub(Subscription *sub)
 void OSDMonitor::do_application_enable(int64_t pool_id,
                                        const std::string &app_name,
                                       const std::string &app_key,
-                                      const std::string &app_value)
+                                      const std::string &app_value,
+                                      bool force)
 {
   ceph_assert(paxos->is_plugged() && is_writeable());
 
@@ -4742,7 +4743,11 @@ void OSDMonitor::do_application_enable(int64_t pool_id,
   if (app_key.empty()) {
     p.application_metadata.insert({app_name, {}});
   } else {
-    p.application_metadata.insert({app_name, {{app_key, app_value}}});
+    if (force) {
+      p.application_metadata[app_name][app_key] = app_value;
+    } else {
+      p.application_metadata.insert({app_name, {{app_key, app_value}}});
+    }
   }
   p.last_change = pending_inc.epoch;
   pending_inc.new_pools[pool_id] = p;
index 2b3a47bfc1b1866baaacea822fe4c32fb7e07631..5a83daa305b315719375da794ee7adcbfcf784d4 100644 (file)
@@ -758,7 +758,8 @@ public:
 
   void do_application_enable(int64_t pool_id, const std::string &app_name,
                             const std::string &app_key="",
-                            const std::string &app_value="");
+                            const std::string &app_value="",
+                            bool force=false);
   void do_set_pool_opt(int64_t pool_id, pool_opts_t::key_t opt,
                       pool_opts_t::value_t);