set -e
ceph fs new cephfs fs_metadata mds-ec-pool
+
+ # While a FS exists using the tiered pools, I should not be allowed
+ # to remove the tier
+ set +e
+ ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
+ check_response 'in use by CephFS' $? 16
+ ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
+ check_response 'in use by CephFS' $? 16
+ set -e
+
ceph fs rm cephfs --yes-i-really-mean-it
# ... but we should be forbidden from using the cache pool in the FS directly.
# Clean up tier + EC pools
ceph osd tier remove-overlay mds-ec-pool
ceph osd tier remove mds-ec-pool mds-tier
+
+ # Create a FS using the 'cache' pool now that it's no longer a tier
+ ceph fs new cephfs fs_metadata mds-tier
+
+ # We should be forbidden from using this pool as a tier now that
+ # it's in use for CephFS
+ set +e
+ ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
+ check_response 'in use by CephFS' $? 16
+ set -e
+
+ ceph fs rm cephfs --yes-i-really-mean-it
+
+
ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
assert(p);
const pg_pool_t *tp = osdmap.get_pg_pool(tierpool_id);
assert(tp);
- if (p->tiers.count(tierpool_id)) {
- assert(tp->tier_of == pool_id);
- err = 0;
- ss << "pool '" << tierpoolstr << "' is now (or already was) a tier of '" << poolstr << "'";
- goto reply;
- }
- if (tp->is_tier()) {
- ss << "tier pool '" << tierpoolstr << "' is already a tier of '"
- << osdmap.get_pool_name(tp->tier_of) << "'";
- err = -EINVAL;
+
+ if (!_check_become_tier(tierpool_id, tp, pool_id, p, &err, &ss)) {
goto reply;
}
+
// make sure new tier is empty
string force_nonempty;
cmd_getval(g_ceph_context, cmdmap, "force_nonempty", force_nonempty);
assert(p);
const pg_pool_t *tp = osdmap.get_pg_pool(tierpool_id);
assert(tp);
+
+ if (!_check_remove_tier(pool_id, p, &err, &ss)) {
+ goto reply;
+ }
+
if (p->tiers.count(tierpool_id) == 0) {
ss << "pool '" << tierpoolstr << "' is now (or already was) not a tier of '" << poolstr << "'";
err = 0;
err = -EINVAL;
goto reply;
}
+
+ if (!_check_remove_tier(pool_id, p, &err, &ss)) {
+ goto reply;
+ }
+
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
np->read_tier = overlaypool_id;
ss << "there is now (or already was) no overlay for '" << poolstr << "'";
goto reply;
}
+
+ if (!_check_remove_tier(pool_id, p, &err, &ss)) {
+ goto reply;
+ }
+
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
np->clear_read_tier();
assert(p);
const pg_pool_t *tp = osdmap.get_pg_pool(tierpool_id);
assert(tp);
- if (p->tiers.count(tierpool_id)) {
- assert(tp->tier_of == pool_id);
- err = 0;
- ss << "pool '" << tierpoolstr << "' is now (or already was) a tier of '" << poolstr << "'";
- goto reply;
- }
- if (tp->is_tier()) {
- ss << "tier pool '" << tierpoolstr << "' is already a tier of '"
- << osdmap.get_pool_name(tp->tier_of) << "'";
- err = -EINVAL;
+
+ if (!_check_become_tier(tierpool_id, tp, pool_id, p, &err, &ss)) {
goto reply;
}
+
int64_t size = 0;
if (!cmd_getval(g_ceph_context, cmdmap, "size", size)) {
ss << "unable to parse 'size' value '"
// If the Pool is in use by CephFS, refuse to delete it
MDSMap const &pending_mdsmap = mon->mdsmon()->pending_mdsmap;
- if (pending_mdsmap.get_enabled() && (pending_mdsmap.is_data_pool(pool) ||
- pending_mdsmap.get_metadata_pool() == pool)) {
+ if (pending_mdsmap.pool_in_use(pool)) {
*ss << "pool '" << poolstr << "' is in use by CephFS";
return -EBUSY;
}
return 0;
}
+/**
+ * Check if it is safe to add a tier to a base pool
+ *
+ * @return
+ * True if the operation should proceed, false if we should abort here
+ * (abort doesn't necessarily mean error, could be idempotency)
+ */
+bool OSDMonitor::_check_become_tier(
+ const int64_t tier_pool_id, const pg_pool_t *tier_pool,
+ const int64_t base_pool_id, const pg_pool_t *base_pool,
+ int *err,
+ ostream *ss) const
+{
+ const std::string &tier_pool_name = osdmap.get_pool_name(tier_pool_id);
+ const std::string &base_pool_name = osdmap.get_pool_name(base_pool_id);
+
+ const MDSMap &pending_mdsmap = mon->mdsmon()->pending_mdsmap;
+ if (pending_mdsmap.pool_in_use(tier_pool_id)) {
+ *ss << "pool '" << tier_pool_name << "' is in use by CephFS";
+ *err = -EBUSY;
+ return false;
+ }
+
+ if (base_pool->tiers.count(tier_pool_id)) {
+ assert(tier_pool->tier_of == base_pool_id);
+ *err = 0;
+ *ss << "pool '" << tier_pool_name << "' is now (or already was) a tier of '"
+ << base_pool_name << "'";
+ return false;
+ }
+
+ if (tier_pool->is_tier()) {
+ *ss << "tier pool '" << tier_pool_name << "' is already a tier of '"
+ << osdmap.get_pool_name(tier_pool->tier_of) << "'";
+ *err = -EINVAL;
+ return false;
+ }
+
+ *err = 0;
+ return true;
+}
+
+
+/**
+ * Check if it is safe to remove a tier from this base pool
+ *
+ * @return
+ * True if the operation should proceed, false if we should abort here
+ * (abort doesn't necessarily mean error, could be idempotency)
+ */
+bool OSDMonitor::_check_remove_tier(
+ const int64_t base_pool_id, const pg_pool_t *base_pool,
+ int *err, ostream *ss) const
+{
+ const std::string &base_pool_name = osdmap.get_pool_name(base_pool_id);
+
+ // If the pool is in use by CephFS, then refuse to remove its
+ // tier
+ const MDSMap &pending_mdsmap = mon->mdsmon()->pending_mdsmap;
+ if (pending_mdsmap.pool_in_use(base_pool_id)) {
+ *ss << "pool '" << base_pool_name << "' is in use by CephFS via its tier";
+ *err = -EBUSY;
+ return false;
+ }
+
+ *err = 0;
+ return true;
+}
+
int OSDMonitor::_prepare_remove_pool(int64_t pool, ostream *ss)
{
dout(10) << "_prepare_remove_pool " << pool << dendl;