]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mon/MDSMonitor.cc:refuse fs new on pools with obj
authorMichal Jarzabek <stiopa@gmail.com>
Sat, 7 Jan 2017 15:41:46 +0000 (15:41 +0000)
committerMichal Jarzabek <stiopa@gmail.com>
Mon, 23 Jan 2017 19:48:53 +0000 (19:48 +0000)
Fixes: http://tracker.ceph.com/issues/11124
Signed-off-by: Michal Jarzabek <stiopa@gmail.com>
qa/tasks/cephfs/test_misc.py
qa/workunits/cephtool/test.sh
src/mon/MDSMonitor.cc
src/mon/MonCommands.h

index bd8ba64478f48ab3392de23809374b4eaf18c406..3f98c3719b88c6898ec26461942e3a96da319a98 100644 (file)
@@ -2,6 +2,8 @@
 from unittest import SkipTest
 from tasks.cephfs.fuse_mount import FuseMount
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from teuthology.orchestra.run import CommandFailedError
+import errno
 
 class TestMisc(CephFSTestCase):
     CLIENTS_REQUIRED = 2
@@ -31,3 +33,59 @@ class TestMisc(CephFSTestCase):
         self.mount_b.run_shell(["cat", "testfile"])
 
         self.mount_a.kill_background(p)
+
+    def test_fs_new(self):
+        data_pool_name = self.fs.get_data_pool_name()
+
+        self.fs.mds_stop()
+        self.fs.mds_fail()
+
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
+                                            '--yes-i-really-mean-it')
+
+        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
+                                            self.fs.metadata_pool_name,
+                                            self.fs.metadata_pool_name,
+                                            '--yes-i-really-really-mean-it')
+        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
+                                            self.fs.metadata_pool_name,
+                                            self.fs.get_pgs_per_fs_pool().__str__())
+
+        dummyfile = '/etc/fstab'
+
+        self.fs.put_metadata_object_raw("key", dummyfile)
+
+        timeout = 10
+
+        get_pool_df = self.fs.get_pool_df
+        self.wait_until_true(
+                lambda: get_pool_df(self.fs.metadata_pool_name)['objects'] > 0,
+                timeout=timeout)
+
+        try:
+            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
+                                                self.fs.metadata_pool_name,
+                                                data_pool_name)
+        except CommandFailedError as e:
+            self.assertEqual(e.exitstatus, errno.EINVAL)
+        else:
+            raise AssertionError("Expected EINVAL")
+
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
+                                            self.fs.metadata_pool_name,
+                                            data_pool_name, "--force")
+
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
+                                            '--yes-i-really-mean-it')
+
+
+        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
+                                            self.fs.metadata_pool_name,
+                                            self.fs.metadata_pool_name,
+                                            '--yes-i-really-really-mean-it')
+        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
+                                            self.fs.metadata_pool_name,
+                                            self.fs.get_pgs_per_fs_pool().__str__())
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
+                                            self.fs.metadata_pool_name,
+                                            data_pool_name)
index 3bd4a9fb4e7ff1ce8554ef0fd59661c285496064..e830f6d44495becad686b9c188346a9059a94bb8 100755 (executable)
@@ -909,7 +909,7 @@ function test_mon_mds()
   ceph fs rm $FS_NAME --yes-i-really-mean-it
 
   set +e
-  ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE
+  ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
   check_response 'erasure-code' $? 22
   ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
   check_response 'erasure-code' $? 22
@@ -926,13 +926,13 @@ function test_mon_mds()
   # Use of a readonly tier should be forbidden
   ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
   set +e
-  ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE
+  ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
   check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
   set -e
 
   # Use of a writeback tier should enable FS creation
   ceph osd tier cache-mode mds-tier writeback
-  ceph fs new $FS_NAME fs_metadata mds-ec-pool
+  ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
 
   # While a FS exists using the tiered pools, I should not be allowed
   # to remove the tier
@@ -948,7 +948,7 @@ function test_mon_mds()
 
   # ... but we should be forbidden from using the cache pool in the FS directly.
   set +e
-  ceph fs new $FS_NAME fs_metadata mds-tier 2>$TMPFILE
+  ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
   check_response 'in use as a cache tier' $? 22
   ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
   check_response 'in use as a cache tier' $? 22
@@ -961,7 +961,7 @@ function test_mon_mds()
   ceph osd tier remove mds-ec-pool mds-tier
 
   # Create a FS using the 'cache' pool now that it's no longer a tier
-  ceph fs new $FS_NAME fs_metadata mds-tier
+  ceph fs new $FS_NAME fs_metadata mds-tier --force
 
   # We should be forbidden from using this pool as a tier now that
   # it's in use for CephFS
@@ -975,7 +975,7 @@ function test_mon_mds()
   ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
 
   # Create a FS and check that we can subsequently add a cache tier to it
-  ceph fs new $FS_NAME fs_metadata fs_data
+  ceph fs new $FS_NAME fs_metadata fs_data --force
 
   # Adding overlay to FS pool should be permitted, RADOS clients handle this.
   ceph osd tier add fs_metadata mds-tier
index c19d7a70d8b928f7063e3e89beecd8277b6765c1..c26e1ce8c166a4f504b178dde672f164bd95c8f0 100644 (file)
@@ -19,6 +19,7 @@
 #include "Monitor.h"
 #include "MonitorDBStore.h"
 #include "OSDMonitor.h"
+#include "PGMonitor.h"
 
 #include "common/strtol.h"
 #include "common/perf_counters.h"
@@ -1581,6 +1582,15 @@ int MDSMonitor::management_command(
       return -ENOENT;
     }
 
+    string force;
+    cmd_getval(g_ceph_context,cmdmap, "force", force);
+    int64_t metadata_num_objects = mon->pgmon()->pg_map.pg_pool_sum[metadata].stats.sum.num_objects;
+    if (force != "--force" && metadata_num_objects > 0) {
+      ss << "pool '" << metadata_name
+        << "' already contains some objects. Use an empty pool instead.";
+      return -EINVAL;
+    }
+
     string data_name;
     cmd_getval(g_ceph_context, cmdmap, "data", data_name);
     int64_t data = mon->osdmon()->osdmap.lookup_pg_pool_name(data_name);
index c6f731bab494c57a85d301e1c0c54c600d4c6782..a2962c45e6f3843310552ba2db363551ec6231b6 100644 (file)
@@ -376,7 +376,8 @@ COMMAND_WITH_FLAG("mds newfs " \
 COMMAND("fs new " \
        "name=fs_name,type=CephString " \
        "name=metadata,type=CephString " \
-       "name=data,type=CephString ", \
+       "name=data,type=CephString " \
+       "name=force,type=CephChoices,strings=--force,req=false", \
        "make new filesystem using named pools <metadata> and <data>", \
        "fs", "rw", "cli,rest")
 COMMAND("fs rm " \