from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from teuthology.orchestra.run import CommandFailedError
+import errno
class TestMisc(CephFSTestCase):
CLIENTS_REQUIRED = 2
self.mount_b.run_shell(["cat", "testfile"])
self.mount_a.kill_background(p)
+
+ def test_fs_new(self):
+ data_pool_name = self.fs.get_data_pool_name()
+
+ self.fs.mds_stop()
+ self.fs.mds_fail()
+
+ self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
+ '--yes-i-really-mean-it')
+
+ self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ self.fs.get_pgs_per_fs_pool().__str__())
+
+ dummyfile = '/etc/fstab'
+
+ self.fs.put_metadata_object_raw("key", dummyfile)
+
+ timeout = 10
+
+ get_pool_df = self.fs.get_pool_df
+ self.wait_until_true(
+ lambda: get_pool_df(self.fs.metadata_pool_name)['objects'] > 0,
+ timeout=timeout)
+
+ try:
+ self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name)
+ except CommandFailedError as e:
+ self.assertEqual(e.exitstatus, errno.EINVAL)
+ else:
+ raise AssertionError("Expected EINVAL")
+
+ self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name, "--force")
+
+ self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
+ '--yes-i-really-mean-it')
+
+
+ self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ self.fs.get_pgs_per_fs_pool().__str__())
+ self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name)
ceph fs rm $FS_NAME --yes-i-really-mean-it
set +e
- ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE
+ ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
check_response 'erasure-code' $? 22
ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
check_response 'erasure-code' $? 22
# Use of a readonly tier should be forbidden
ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
set +e
- ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE
+ ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
set -e
# Use of a writeback tier should enable FS creation
ceph osd tier cache-mode mds-tier writeback
- ceph fs new $FS_NAME fs_metadata mds-ec-pool
+ ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
# While a FS exists using the tiered pools, I should not be allowed
# to remove the tier
# ... but we should be forbidden from using the cache pool in the FS directly.
set +e
- ceph fs new $FS_NAME fs_metadata mds-tier 2>$TMPFILE
+ ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
ceph osd tier remove mds-ec-pool mds-tier
# Create a FS using the 'cache' pool now that it's no longer a tier
- ceph fs new $FS_NAME fs_metadata mds-tier
+ ceph fs new $FS_NAME fs_metadata mds-tier --force
# We should be forbidden from using this pool as a tier now that
# it's in use for CephFS
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
# Create a FS and check that we can subsequently add a cache tier to it
- ceph fs new $FS_NAME fs_metadata fs_data
+ ceph fs new $FS_NAME fs_metadata fs_data --force
# Adding overlay to FS pool should be permitted, RADOS clients handle this.
ceph osd tier add fs_metadata mds-tier
#include "Monitor.h"
#include "MonitorDBStore.h"
#include "OSDMonitor.h"
+#include "PGMonitor.h"
#include "common/strtol.h"
#include "common/perf_counters.h"
return -ENOENT;
}
+ string force;
+ cmd_getval(g_ceph_context,cmdmap, "force", force);
+ int64_t metadata_num_objects = mon->pgmon()->pg_map.pg_pool_sum[metadata].stats.sum.num_objects;
+ if (force != "--force" && metadata_num_objects > 0) {
+ ss << "pool '" << metadata_name
+ << "' already contains some objects. Use an empty pool instead.";
+ return -EINVAL;
+ }
+
string data_name;
cmd_getval(g_ceph_context, cmdmap, "data", data_name);
int64_t data = mon->osdmon()->osdmap.lookup_pg_pool_name(data_name);
COMMAND("fs new " \
"name=fs_name,type=CephString " \
"name=metadata,type=CephString " \
- "name=data,type=CephString ", \
+ "name=data,type=CephString " \
+ "name=force,type=CephChoices,strings=--force,req=false", \
"make new filesystem using named pools <metadata> and <data>", \
"fs", "rw", "cli,rest")
COMMAND("fs rm " \