]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mon/FSCommands: Fix 'add_data_pool' command
authorRamana Raja <rraja@redhat.com>
Sat, 11 Apr 2020 07:15:39 +0000 (12:45 +0530)
committerRamana Raja <rraja@redhat.com>
Mon, 27 Apr 2020 13:24:52 +0000 (18:54 +0530)
After making a RADOS pool a filesystem's data pool using the
'add_data_pool' command, the value of the 'data' key of the pool's
application metadata 'cephfs' should be the filesystem's name. This
didn't happen when the pool's application metadata 'cephfs' was
enabled before the pool was made the data pool. Fix this during the
handling of the 'add_data_pool' command by setting the value of
the 'data' key of the pool's application metadata 'cephfs' to the
filesystem's name even when the application metadata 'cephfs' is
already enabled or set.

Fixes: https://tracker.ceph.com/issues/43061
Signed-off-by: Ramana Raja <rraja@redhat.com>
(cherry picked from commit 3c49092322720b7f1d612f45e73fb26820ff7885)

qa/tasks/cephfs/test_admin.py
src/mon/FSCommands.cc
src/mon/OSDMonitor.cc
src/mon/OSDMonitor.h

index ccc7847f7ac9d060d1450b5197203c4e5e0ee79b..ba1cd9ac731876ef5f527938f7177a0195435f68 100644 (file)
@@ -1,3 +1,5 @@
+import json
+
 from teuthology.orchestra.run import CommandFailedError
 
 from unittest import case
@@ -31,6 +33,11 @@ class TestAdminCommands(CephFSTestCase):
         if overwrites:
             self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
 
+    def _check_pool_application_metadata_key_value(self, pool, app, key, value):
+        output = self.fs.mon_manager.raw_cluster_cmd(
+            'osd', 'pool', 'application', 'get', pool, app, key)
+        self.assertEqual(str(output.strip()), value)
+
     def test_add_data_pool_root(self):
         """
         That a new data pool can be added and used for the root directory.
@@ -39,6 +46,19 @@ class TestAdminCommands(CephFSTestCase):
         p = self.fs.add_data_pool("foo")
         self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
 
+    def test_add_data_pool_application_metadata(self):
+        """
+        That the application metadata set on a newly added data pool is as expected.
+        """
+        pool_name = "foo"
+        mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+        mon_cmd('osd', 'pool', 'create', pool_name, str(self.fs.pgs_per_fs_pool))
+        # Check whether https://tracker.ceph.com/issues/43061 is fixed
+        mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs')
+        self.fs.add_data_pool(pool_name, create=False)
+        self._check_pool_application_metadata_key_value(
+            pool_name, 'cephfs', 'data', self.fs.name)
+
     def test_add_data_pool_subdir(self):
         """
         That a new data pool can be added and used for a sub-directory.
index d45afb84072434145bd070a7ecf180bf51a24cb8..e2099bea81df8b83e80aa9b7ae43aab6ce3127b1 100644 (file)
@@ -697,7 +697,7 @@ class AddDataPoolHandler : public FileSystemCommandHandler
     }
     mon->osdmon()->do_application_enable(poolid,
                                         pg_pool_t::APPLICATION_NAME_CEPHFS,
-                                        "data", fs_name);
+                                        "data", fs_name, true);
     mon->osdmon()->propose_pending();
 
     fsmap.modify_filesystem(
index 0cde22a38bd981d6ca730cdebe61d6f28cd4347c..004ba7d786e77cb36a9024185c65297b9648f4d4 100644 (file)
@@ -4467,7 +4467,8 @@ void OSDMonitor::check_pg_creates_sub(Subscription *sub)
 void OSDMonitor::do_application_enable(int64_t pool_id,
                                        const std::string &app_name,
                                       const std::string &app_key,
-                                      const std::string &app_value)
+                                      const std::string &app_value,
+                                      bool force)
 {
   ceph_assert(paxos->is_plugged() && is_writeable());
 
@@ -4487,7 +4488,11 @@ void OSDMonitor::do_application_enable(int64_t pool_id,
   if (app_key.empty()) {
     p.application_metadata.insert({app_name, {}});
   } else {
-    p.application_metadata.insert({app_name, {{app_key, app_value}}});
+    if (force) {
+      p.application_metadata[app_name][app_key] = app_value;
+    } else {
+      p.application_metadata.insert({app_name, {{app_key, app_value}}});
+    }
   }
   p.last_change = pending_inc.epoch;
   pending_inc.new_pools[pool_id] = p;
index 27f56129d944d5fac102c5401867c1ed03c391c5..f4ed142865f6902325678cb546408f151264167a 100644 (file)
@@ -746,7 +746,8 @@ public:
 
   void do_application_enable(int64_t pool_id, const std::string &app_name,
                             const std::string &app_key="",
-                            const std::string &app_value="");
+                            const std::string &app_value="",
+                            bool force=false);
   void do_set_pool_opt(int64_t pool_id, pool_opts_t::key_t opt,
                       pool_opts_t::value_t);