From 8bcb281a7d91e6daa842aed4ffb416d764dc6bf1 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 13 Mar 2021 11:34:43 -0500 Subject: [PATCH] osd: propagate base pool application_metadata to tiers If there is application metadata on the base pool, it should be mirrored to any other tiers in the set. This aligns with the fact that the 'ceph osd pool application ...' commands refuse to operate on a non-base pool. This fixes problems with accessing tiers (e.g., cache tiers) when the cephx cap is written in terms of application metadata. Fixes: https://tracker.ceph.com/issues/49788 Signed-off-by: Sage Weil --- qa/workunits/cephtool/test.sh | 5 +++++ src/mon/OSDMonitor.cc | 2 +- src/osd/OSDMap.cc | 6 ++++-- src/osd/OSDMap.h | 4 ++-- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 6657eea30381b..3c79ea634ca09 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -345,6 +345,11 @@ function test_tiering_1() ceph osd tier add slow cache ceph osd tier add slow cache2 expect_false ceph osd tier add slow2 cache + # application metadata should propagate to the tiers + ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}' + ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}' + ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}' + ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}' # forward and proxy are removed/deprecated expect_false ceph osd tier cache-mode cache forward expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 1a1bfb37f2715..d1c415ae24d5b 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -1564,7 +1564,7 @@ void OSDMonitor::encode_pending(MonitorDBStore::TransactionRef t) // finalize up pending_inc pending_inc.modified = ceph_clock_now(); - int r = pending_inc.propagate_snaps_to_tiers(cct, osdmap); + int r = pending_inc.propagate_base_properties_to_tiers(cct, osdmap); ceph_assert(r == 0); if (mapping_job) { diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc index a5af942c67043..e720a5ebd1b17 100644 --- a/src/osd/OSDMap.cc +++ b/src/osd/OSDMap.cc @@ -238,8 +238,8 @@ int OSDMap::Incremental::identify_osd(uuid_d u) const return -1; } -int OSDMap::Incremental::propagate_snaps_to_tiers(CephContext *cct, - const OSDMap& osdmap) +int OSDMap::Incremental::propagate_base_properties_to_tiers(CephContext *cct, + const OSDMap& osdmap) { ceph_assert(epoch == osdmap.get_epoch() + 1); @@ -279,6 +279,8 @@ int OSDMap::Incremental::propagate_snaps_to_tiers(CephContext *cct, if (new_rem_it != new_removed_snaps.end()) { new_removed_snaps[tier_pool] = new_rem_it->second; } + + tier->application_metadata = base.application_metadata; } } } diff --git a/src/osd/OSDMap.h b/src/osd/OSDMap.h index e0f496138c445..711badddec066 100644 --- a/src/osd/OSDMap.h +++ b/src/osd/OSDMap.h @@ -465,8 +465,8 @@ public: return new_erasure_code_profiles; } - /// propagate update pools' snap metadata to any of their tiers - int propagate_snaps_to_tiers(CephContext *cct, const OSDMap &base); + /// propagate update pools' (snap and other) metadata to any of their tiers + int propagate_base_properties_to_tiers(CephContext *cct, const OSDMap &base); /// filter out osds with any pending state changing size_t get_pending_state_osds(std::vector *osds) { -- 2.39.5