From 846a342c0d473ff7d85ddf79c238ac714f23b945 Mon Sep 17 00:00:00 2001 From: Sridhar Seshasayee Date: Fri, 4 Aug 2023 02:02:06 +0530 Subject: [PATCH] osd/scheduler/mClockScheduler: Use same profile and client ids for all clients Currently, without the distributed mClock (dmClock) feature, all external clients classified under op_scheduler_class::client must share the allocated reservation and limit as set within default_external_client_info. This is realized only if all the external clients are assigned the same id. This was not true because the client_id field within the client_profile_id_t was set to item.get_owner() which uniquely identified each client. As a result, an increase in the client count resulted in a proportional increase in the consumption of the allocated limit which is not the desired outcome in the current implementation. The fix is to use the same client_id and profile_id within the client_profile_id_t structure for all clients. These fields are set to '0'. This ensures that all such clients get the same QoS controls which means reservation and limit is shared between them. In other words, regardless of the number of clients active, the reservation and limit consumption remains constant as per the mClock profile setting and is shared amongst them. NOTE: When dmClock feature (client vs client QoS) is implemented, the above fields would change with client_id set to the global id (client.####) and the profile_id set to the id generated by the client's QoS profile. Fixes: https://tracker.ceph.com/issues/62293 Signed-off-by: Sridhar Seshasayee (cherry picked from commit 79a123c2b969e98c87bcadb38aee9562bc58472d) --- src/osd/scheduler/mClockScheduler.h | 33 +++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/src/osd/scheduler/mClockScheduler.h b/src/osd/scheduler/mClockScheduler.h index dd016645e998b..90eea1e7eff38 100644 --- a/src/osd/scheduler/mClockScheduler.h +++ b/src/osd/scheduler/mClockScheduler.h @@ -38,12 +38,30 @@ constexpr double default_max = std::numeric_limits::is_iec559 ? std::numeric_limits::infinity() : std::numeric_limits::max(); -using client_id_t = uint64_t; -using profile_id_t = uint64_t; - +/** + * client_profile_id_t + * + * client_id - global id (client.####) for client QoS + * profile_id - id generated by client's QoS profile + * + * Currently (Reef and below), both members are set to + * 0 which ensures that all external clients share the + * mClock profile allocated reservation and limit + * bandwidth. + * + * Note: Post Reef, both members will be set to non-zero + * values when the distributed feature of the mClock + * algorithm is utilized. + */ struct client_profile_id_t { - client_id_t client_id; - profile_id_t profile_id; + uint64_t client_id = 0; + uint64_t profile_id = 0; + + client_profile_id_t(uint64_t _client_id, uint64_t _profile_id) : + client_id(_client_id), + profile_id(_profile_id) {} + + client_profile_id_t() = default; friend std::ostream& operator<<(std::ostream& out, const client_profile_id_t& client_profile) { @@ -171,10 +189,7 @@ class mClockScheduler : public OpScheduler, md_config_obs_t { static scheduler_id_t get_scheduler_id(const OpSchedulerItem &item) { return scheduler_id_t{ item.get_scheduler_class(), - client_profile_id_t{ - item.get_owner(), - 0 - } + client_profile_id_t() }; } -- 2.39.5