OPTION(osd_op_queue_mclock_client_op_res, OPT_DOUBLE)
OPTION(osd_op_queue_mclock_client_op_wgt, OPT_DOUBLE)
OPTION(osd_op_queue_mclock_client_op_lim, OPT_DOUBLE)
-OPTION(osd_op_queue_mclock_osd_subop_res, OPT_DOUBLE)
-OPTION(osd_op_queue_mclock_osd_subop_wgt, OPT_DOUBLE)
-OPTION(osd_op_queue_mclock_osd_subop_lim, OPT_DOUBLE)
+OPTION(osd_op_queue_mclock_osd_rep_op_res, OPT_DOUBLE)
+OPTION(osd_op_queue_mclock_osd_rep_op_wgt, OPT_DOUBLE)
+OPTION(osd_op_queue_mclock_osd_rep_op_lim, OPT_DOUBLE)
OPTION(osd_op_queue_mclock_snap_res, OPT_DOUBLE)
OPTION(osd_op_queue_mclock_snap_wgt, OPT_DOUBLE)
OPTION(osd_op_queue_mclock_snap_lim, OPT_DOUBLE)
.add_see_also("osd_op_queue")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_scrub_wgt")
.add_see_also("osd_op_queue_mclock_scrub_lim"),
- Option("osd_op_queue_mclock_osd_subop_res", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
+ Option("osd_op_queue_mclock_osd_rep_op_res", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
.set_default(1000.0)
- .set_description("mclock reservation of osd sub-operation requests")
- .set_long_description("mclock reservation of osd sub-operation requests when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the reservation")
+ .set_description("mclock reservation of osd replication operation requests and replies")
+ .set_long_description("mclock reservation of replication operation requests and replies when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the reservation")
.add_see_also("osd_op_queue")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_scrub_wgt")
.add_see_also("osd_op_queue_mclock_scrub_lim"),
- Option("osd_op_queue_mclock_osd_subop_wgt", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
+ Option("osd_op_queue_mclock_osd_rep_op_wgt", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
.set_default(500.0)
- .set_description("mclock weight of osd sub-operation requests")
- .set_long_description("mclock weight of osd sub-operation requests when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the weight")
+ .set_description("mclock weight of osd replication operation requests and replies")
+ .set_long_description("mclock weight of osd replication operation requests and replies when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the weight")
.add_see_also("osd_op_queue")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_scrub_wgt")
.add_see_also("osd_op_queue_mclock_scrub_lim"),
- Option("osd_op_queue_mclock_osd_subop_lim", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
+ Option("osd_op_queue_mclock_osd_rep_op_lim", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
.set_default(0.0)
- .set_description("mclock limit of osd sub-operation requests")
+ .set_description("mclock limit of osd replication operation requests and replies")
.set_long_description("mclock limit of osd sub-operation requests when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the limit")
.add_see_also("osd_op_queue")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_recov_res")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_recov_res")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_recov_res")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
.add_see_also("osd_op_queue_mclock_client_op_res")
.add_see_also("osd_op_queue_mclock_client_op_wgt")
.add_see_also("osd_op_queue_mclock_client_op_lim")
- .add_see_also("osd_op_queue_mclock_osd_subop_res")
- .add_see_also("osd_op_queue_mclock_osd_subop_wgt")
- .add_see_also("osd_op_queue_mclock_osd_subop_lim")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+ .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
.add_see_also("osd_op_queue_mclock_snap_res")
.add_see_also("osd_op_queue_mclock_snap_wgt")
.add_see_also("osd_op_queue_mclock_snap_lim")
osd_types.cc
ECUtil.cc
ExtentCache.cc
+ mClockOpClassSupport.cc
mClockOpClassQueue.cc
mClockClientQueue.cc
OpQueueItem.cc
public:
enum class op_type_t {
client_op,
- osd_subop,
bg_snaptrim,
bg_recovery,
bg_scrub
<< " cost " << item.get_cost()
<< " e" << item.get_map_epoch() << ")";
}
-};
+}; // class OpQueueItem
/// Implements boilerplate for operations queued for the pg lock
class PGOpQueueable : public OpQueueItem::OpQueueable {
#include "osd/mClockClientQueue.h"
#include "common/dout.h"
-
namespace dmc = crimson::dmclock;
-
+using namespace std::placeholders;
#define dout_context cct
#define dout_subsys ceph_subsys_osd
namespace ceph {
- mClockClientQueue::mclock_op_tags_t::mclock_op_tags_t(CephContext *cct) :
- client_op(cct->_conf->osd_op_queue_mclock_client_op_res,
- cct->_conf->osd_op_queue_mclock_client_op_wgt,
- cct->_conf->osd_op_queue_mclock_client_op_lim),
- osd_subop(cct->_conf->osd_op_queue_mclock_osd_subop_res,
- cct->_conf->osd_op_queue_mclock_osd_subop_wgt,
- cct->_conf->osd_op_queue_mclock_osd_subop_lim),
- snaptrim(cct->_conf->osd_op_queue_mclock_snap_res,
- cct->_conf->osd_op_queue_mclock_snap_wgt,
- cct->_conf->osd_op_queue_mclock_snap_lim),
- recov(cct->_conf->osd_op_queue_mclock_recov_res,
- cct->_conf->osd_op_queue_mclock_recov_wgt,
- cct->_conf->osd_op_queue_mclock_recov_lim),
- scrub(cct->_conf->osd_op_queue_mclock_scrub_res,
- cct->_conf->osd_op_queue_mclock_scrub_wgt,
- cct->_conf->osd_op_queue_mclock_scrub_lim)
- {
- dout(20) <<
- "mClockClientQueue settings:: " <<
- "client_op:" << client_op <<
- "; osd_subop:" << osd_subop <<
- "; snaptrim:" << snaptrim <<
- "; recov:" << recov <<
- "; scrub:" << scrub <<
- dendl;
- }
-
-
- const dmc::ClientInfo*
- mClockClientQueue::op_class_client_info_f(
- const mClockClientQueue::InnerClient& client)
- {
- switch(client.second) {
- case osd_op_type_t::client_op:
- return &mclock_op_tags->client_op;
- case osd_op_type_t::osd_subop:
- return &mclock_op_tags->osd_subop;
- case osd_op_type_t::bg_snaptrim:
- return &mclock_op_tags->snaptrim;
- case osd_op_type_t::bg_recovery:
- return &mclock_op_tags->recov;
- case osd_op_type_t::bg_scrub:
- return &mclock_op_tags->scrub;
- default:
- assert(0);
- return nullptr;
- }
- }
-
-
/*
* class mClockClientQueue
*/
- std::unique_ptr<mClockClientQueue::mclock_op_tags_t>
- mClockClientQueue::mclock_op_tags(nullptr);
-
- mClockClientQueue::pg_queueable_visitor_t
- mClockClientQueue::pg_queueable_visitor;
-
mClockClientQueue::mClockClientQueue(CephContext *cct) :
- queue(&mClockClientQueue::op_class_client_info_f)
+ queue(std::bind(&mClockClientQueue::op_class_client_info_f, this, _1)),
+ client_info_mgr(cct)
{
- // manage the singleton
- if (!mclock_op_tags) {
- mclock_op_tags.reset(new mclock_op_tags_t(cct));
- }
+ // empty
}
- mClockClientQueue::osd_op_type_t
- mClockClientQueue::get_osd_op_type(const Request& request) {
- switch (request.get_op_type()) {
- // if we got client_op back then we need to distinguish between
- // a client op and an osd subop.
- case OpQueueItem::op_type_t::client_op:
- return osd_op_type_t::client_op;
- case OpQueueItem::op_type_t::osd_subop:
- return osd_op_type_t::osd_subop;
- case OpQueueItem::op_type_t::bg_snaptrim:
- return osd_op_type_t::bg_snaptrim;
- case OpQueueItem::op_type_t::bg_recovery:
- return osd_op_type_t::bg_recovery;
- case OpQueueItem::op_type_t::bg_scrub:
- return osd_op_type_t::bg_scrub;
- default:
- assert(0);
- }
+ const dmc::ClientInfo* mClockClientQueue::op_class_client_info_f(
+ const mClockClientQueue::InnerClient& client)
+ {
+ return client_info_mgr.get_client_info(client.second);
}
mClockClientQueue::InnerClient
inline mClockClientQueue::get_inner_client(const Client& cl,
- const Request& request) {
- return InnerClient(cl, get_osd_op_type(request));
+ const Request& request) {
+ return InnerClient(cl, client_info_mgr.osd_op_type(request));
}
// Formatted output of the queue
#include "common/config.h"
#include "common/ceph_context.h"
-#include "osd/OpQueueItem.h"
-
#include "common/mClockPriorityQueue.h"
+#include "osd/OpQueueItem.h"
+#include "osd/mClockOpClassSupport.h"
namespace ceph {
// appropriately.
class mClockClientQueue : public OpQueue<Request, Client> {
- enum class osd_op_type_t {
- client_op, osd_subop, bg_snaptrim, bg_recovery, bg_scrub };
+ using osd_op_type_t = ceph::mclock::osd_op_type_t;
using InnerClient = std::pair<uint64_t,osd_op_type_t>;
queue_t queue;
- struct mclock_op_tags_t {
- crimson::dmclock::ClientInfo client_op;
- crimson::dmclock::ClientInfo osd_subop;
- crimson::dmclock::ClientInfo snaptrim;
- crimson::dmclock::ClientInfo recov;
- crimson::dmclock::ClientInfo scrub;
-
- mclock_op_tags_t(CephContext *cct);
- };
-
- static std::unique_ptr<mclock_op_tags_t> mclock_op_tags;
+ ceph::mclock::OpClassClientInfoMgr client_info_mgr;
public:
mClockClientQueue(CephContext *cct);
- static const crimson::dmclock::ClientInfo*
- op_class_client_info_f(const InnerClient& client);
+ const crimson::dmclock::ClientInfo* op_class_client_info_f(const InnerClient& client);
inline unsigned length() const override final {
return queue.length();
protected:
- struct pg_queueable_visitor_t : public boost::static_visitor<osd_op_type_t> {
- osd_op_type_t operator()(const OpRequestRef& o) const {
- // don't know if it's a client_op or a
- return osd_op_type_t::client_op;
- }
-
- osd_op_type_t operator()(const PGSnapTrim& o) const {
- return osd_op_type_t::bg_snaptrim;
- }
-
- osd_op_type_t operator()(const PGScrub& o) const {
- return osd_op_type_t::bg_scrub;
- }
-
- osd_op_type_t operator()(const PGRecovery& o) const {
- return osd_op_type_t::bg_recovery;
- }
- }; // class pg_queueable_visitor_t
-
- static pg_queueable_visitor_t pg_queueable_visitor;
-
- osd_op_type_t get_osd_op_type(const Request& request);
InnerClient get_inner_client(const Client& cl, const Request& request);
}; // class mClockClientAdapter
#include "osd/mClockOpClassQueue.h"
#include "common/dout.h"
-
namespace dmc = crimson::dmclock;
-
+using namespace std::placeholders;
#define dout_context cct
#define dout_subsys ceph_subsys_osd
namespace ceph {
- mClockOpClassQueue::mclock_op_tags_t::mclock_op_tags_t(CephContext *cct) :
- client_op(cct->_conf->osd_op_queue_mclock_client_op_res,
- cct->_conf->osd_op_queue_mclock_client_op_wgt,
- cct->_conf->osd_op_queue_mclock_client_op_lim),
- osd_subop(cct->_conf->osd_op_queue_mclock_osd_subop_res,
- cct->_conf->osd_op_queue_mclock_osd_subop_wgt,
- cct->_conf->osd_op_queue_mclock_osd_subop_lim),
- snaptrim(cct->_conf->osd_op_queue_mclock_snap_res,
- cct->_conf->osd_op_queue_mclock_snap_wgt,
- cct->_conf->osd_op_queue_mclock_snap_lim),
- recov(cct->_conf->osd_op_queue_mclock_recov_res,
- cct->_conf->osd_op_queue_mclock_recov_wgt,
- cct->_conf->osd_op_queue_mclock_recov_lim),
- scrub(cct->_conf->osd_op_queue_mclock_scrub_res,
- cct->_conf->osd_op_queue_mclock_scrub_wgt,
- cct->_conf->osd_op_queue_mclock_scrub_lim)
- {
- dout(20) <<
- "mClockOpClassQueue settings:: " <<
- "client_op:" << client_op <<
- "; osd_subop:" << osd_subop <<
- "; snaptrim:" << snaptrim <<
- "; recov:" << recov <<
- "; scrub:" << scrub <<
- dendl;
- }
-
-
- const dmc::ClientInfo*
- mClockOpClassQueue::op_class_client_info_f(const osd_op_type_t& op_type) {
- switch(op_type) {
- case osd_op_type_t::client_op:
- return &mclock_op_tags->client_op;
- case osd_op_type_t::osd_subop:
- return &mclock_op_tags->osd_subop;
- case osd_op_type_t::bg_snaptrim:
- return &mclock_op_tags->snaptrim;
- case osd_op_type_t::bg_recovery:
- return &mclock_op_tags->recov;
- case osd_op_type_t::bg_scrub:
- return &mclock_op_tags->scrub;
- default:
- assert(0);
- return nullptr;
- }
- }
-
/*
* class mClockOpClassQueue
*/
- std::unique_ptr<mClockOpClassQueue::mclock_op_tags_t>
- mClockOpClassQueue::mclock_op_tags(nullptr);
-
- mClockOpClassQueue::pg_queueable_visitor_t
- mClockOpClassQueue::pg_queueable_visitor;
-
mClockOpClassQueue::mClockOpClassQueue(CephContext *cct) :
- queue(&mClockOpClassQueue::op_class_client_info_f)
+ queue(std::bind(&mClockOpClassQueue::op_class_client_info_f, this, _1)),
+ client_info_mgr(cct)
{
- // manage the singleton
- if (!mclock_op_tags) {
- mclock_op_tags.reset(new mclock_op_tags_t(cct));
- }
+ // empty
}
- mClockOpClassQueue::osd_op_type_t
- mClockOpClassQueue::get_osd_op_type(const Request& request) {
- switch (request.get_op_type()) {
- // if we got client_op back then we need to distinguish between
- // a client op and an osd subop.
- case OpQueueItem::op_type_t::client_op:
- return osd_op_type_t::client_op;
- case OpQueueItem::op_type_t::osd_subop:
- return osd_op_type_t::osd_subop;
- case OpQueueItem::op_type_t::bg_snaptrim:
- return osd_op_type_t::bg_snaptrim;
- case OpQueueItem::op_type_t::bg_recovery:
- return osd_op_type_t::bg_recovery;
- case OpQueueItem::op_type_t::bg_scrub:
- return osd_op_type_t::bg_scrub;
- default:
- assert(0);
- }
+ const dmc::ClientInfo* mClockOpClassQueue::op_class_client_info_f(
+ const osd_op_type_t& op_type)
+ {
+ return client_info_mgr.get_client_info(op_type);
}
// Formatted output of the queue
void mClockOpClassQueue::dump(ceph::Formatter *f) const {
queue.dump(f);
}
-
} // namespace ceph
#include <ostream>
#include "boost/variant.hpp"
+#include "boost/container/flat_set.hpp"
#include "common/config.h"
#include "common/ceph_context.h"
-#include "osd/OpQueueItem.h"
-
#include "common/mClockPriorityQueue.h"
+#include "osd/OpQueueItem.h"
+#include "osd/mClockOpClassSupport.h"
namespace ceph {
// appropriately.
class mClockOpClassQueue : public OpQueue<Request, Client> {
- enum class osd_op_type_t {
- client_op, osd_subop, bg_snaptrim, bg_recovery, bg_scrub };
+ using osd_op_type_t = ceph::mclock::osd_op_type_t;
using queue_t = mClockQueue<Request, osd_op_type_t>;
-
queue_t queue;
- struct mclock_op_tags_t {
- crimson::dmclock::ClientInfo client_op;
- crimson::dmclock::ClientInfo osd_subop;
- crimson::dmclock::ClientInfo snaptrim;
- crimson::dmclock::ClientInfo recov;
- crimson::dmclock::ClientInfo scrub;
-
- mclock_op_tags_t(CephContext *cct);
- };
-
- static std::unique_ptr<mclock_op_tags_t> mclock_op_tags;
+ ceph::mclock::OpClassClientInfoMgr client_info_mgr;
public:
mClockOpClassQueue(CephContext *cct);
- static const crimson::dmclock::ClientInfo*
+ const crimson::dmclock::ClientInfo*
op_class_client_info_f(const osd_op_type_t& op_type);
inline unsigned length() const override final {
inline void enqueue_strict(Client cl,
unsigned priority,
Request&& item) override final {
- queue.enqueue_strict(get_osd_op_type(item), priority, std::move(item));
+ queue.enqueue_strict(client_info_mgr.osd_op_type(item),
+ priority,
+ std::move(item));
}
// Enqueue op in the front of the strict queue
inline void enqueue_strict_front(Client cl,
unsigned priority,
Request&& item) override final {
- queue.enqueue_strict_front(get_osd_op_type(item), priority, std::move(item));
+ queue.enqueue_strict_front(client_info_mgr.osd_op_type(item),
+ priority,
+ std::move(item));
}
// Enqueue op in the back of the regular queue
unsigned priority,
unsigned cost,
Request&& item) override final {
- queue.enqueue(get_osd_op_type(item), priority, cost, std::move(item));
+ queue.enqueue(client_info_mgr.osd_op_type(item),
+ priority,
+ cost,
+ std::move(item));
}
// Enqueue the op in the front of the regular queue
unsigned priority,
unsigned cost,
Request&& item) override final {
- queue.enqueue_front(get_osd_op_type(item), priority, cost, std::move(item));
+ queue.enqueue_front(client_info_mgr.osd_op_type(item),
+ priority,
+ cost,
+ std::move(item));
}
// Returns if the queue is empty
// Formatted output of the queue
void dump(ceph::Formatter *f) const override final;
-
- protected:
-
- struct pg_queueable_visitor_t : public boost::static_visitor<osd_op_type_t> {
- osd_op_type_t operator()(const OpRequestRef& o) const {
- // don't know if it's a client_op or a
- return osd_op_type_t::client_op;
- }
-
- osd_op_type_t operator()(const PGSnapTrim& o) const {
- return osd_op_type_t::bg_snaptrim;
- }
-
- osd_op_type_t operator()(const PGScrub& o) const {
- return osd_op_type_t::bg_scrub;
- }
-
- osd_op_type_t operator()(const PGRecovery& o) const {
- return osd_op_type_t::bg_recovery;
- }
- }; // class pg_queueable_visitor_t
-
- static pg_queueable_visitor_t pg_queueable_visitor;
-
- osd_op_type_t get_osd_op_type(const Request& request);
}; // class mClockOpClassAdapter
-
} // namespace ceph
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+
+#include "common/dout.h"
+#include "osd/mClockOpClassSupport.h"
+#include "osd/OpQueueItem.h"
+
+#include "include/assert.h"
+
+namespace ceph {
+
+ namespace mclock {
+
+ OpClassClientInfoMgr::OpClassClientInfoMgr(CephContext *cct) :
+ client_op(cct->_conf->osd_op_queue_mclock_client_op_res,
+ cct->_conf->osd_op_queue_mclock_client_op_wgt,
+ cct->_conf->osd_op_queue_mclock_client_op_lim),
+ osd_rep_op(cct->_conf->osd_op_queue_mclock_osd_rep_op_res,
+ cct->_conf->osd_op_queue_mclock_osd_rep_op_wgt,
+ cct->_conf->osd_op_queue_mclock_osd_rep_op_lim),
+ snaptrim(cct->_conf->osd_op_queue_mclock_snap_res,
+ cct->_conf->osd_op_queue_mclock_snap_wgt,
+ cct->_conf->osd_op_queue_mclock_snap_lim),
+ recov(cct->_conf->osd_op_queue_mclock_recov_res,
+ cct->_conf->osd_op_queue_mclock_recov_wgt,
+ cct->_conf->osd_op_queue_mclock_recov_lim),
+ scrub(cct->_conf->osd_op_queue_mclock_scrub_res,
+ cct->_conf->osd_op_queue_mclock_scrub_wgt,
+ cct->_conf->osd_op_queue_mclock_scrub_lim)
+ {
+ constexpr int rep_ops[] = {
+ MSG_OSD_REPOP,
+ MSG_OSD_REPOPREPLY,
+ MSG_OSD_PG_UPDATE_LOG_MISSING,
+ MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY,
+ MSG_OSD_EC_WRITE,
+ MSG_OSD_EC_WRITE_REPLY,
+ MSG_OSD_EC_READ,
+ MSG_OSD_EC_READ_REPLY
+ };
+ for (auto op : rep_ops) {
+ add_rep_op_msg(op);
+ }
+
+ lgeneric_subdout(cct, osd, 20) <<
+ "mClock OpClass settings:: " <<
+ "client_op:" << client_op <<
+ "; osd_rep_op:" << osd_rep_op <<
+ "; snaptrim:" << snaptrim <<
+ "; recov:" << recov <<
+ "; scrub:" << scrub <<
+ dendl;
+
+ lgeneric_subdout(cct, osd, 30) <<
+ "mClock OpClass message bit set:: " <<
+ rep_op_msg_bitset.to_string() << dendl;
+ }
+
+ void OpClassClientInfoMgr::add_rep_op_msg(int message_code) {
+ assert(message_code >= 0 && message_code < int(rep_op_msg_bitset_size));
+ rep_op_msg_bitset.set(message_code);
+ }
+
+ osd_op_type_t
+ OpClassClientInfoMgr::osd_op_type(const OpQueueItem& op) const {
+ osd_op_type_t type = convert_op_type(op.get_op_type());
+ if (osd_op_type_t::client_op != type) {
+ return type;
+ } else {
+ // get_header returns ceph_msg_header type, ceph_msg_header
+ // stores type as unsigned little endian, so be sure to
+ // convert to CPU byte ordering
+ boost::optional<OpRequestRef> op_ref_maybe = op.maybe_get_op();
+ assert(op_ref_maybe);
+ __le16 mtype_le = (*op_ref_maybe)->get_req()->get_header().type;
+ __u16 mtype = le16_to_cpu(mtype_le);
+ if (rep_op_msg_bitset.test(mtype)) {
+ return osd_op_type_t::osd_rep_op;
+ } else {
+ return osd_op_type_t::client_op;
+ }
+ }
+ }
+
+ // used for debugging since faster implementation can be done
+ // with rep_op_msg_bitmap
+ bool OpClassClientInfoMgr::is_rep_op(uint16_t mtype) {
+ return
+ MSG_OSD_REPOP == mtype ||
+ MSG_OSD_REPOPREPLY == mtype ||
+ MSG_OSD_PG_UPDATE_LOG_MISSING == mtype ||
+ MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY == mtype ||
+ MSG_OSD_EC_WRITE == mtype ||
+ MSG_OSD_EC_WRITE_REPLY == mtype ||
+ MSG_OSD_EC_READ == mtype ||
+ MSG_OSD_EC_READ_REPLY == mtype;
+ }
+ } // namespace mclock
+} // namespace ceph
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+
+#pragma once
+
+#include <bitset>
+
+#include "dmclock/src/dmclock_server.h"
+#include "osd/OpRequest.h"
+#include "osd/OpQueueItem.h"
+
+
+namespace ceph {
+ namespace mclock {
+
+ using op_item_type_t = OpQueueItem::OpQueueable::op_type_t;
+
+ enum class osd_op_type_t {
+ client_op, osd_rep_op, bg_snaptrim, bg_recovery, bg_scrub
+ };
+
+ class OpClassClientInfoMgr {
+ crimson::dmclock::ClientInfo client_op;
+ crimson::dmclock::ClientInfo osd_rep_op;
+ crimson::dmclock::ClientInfo snaptrim;
+ crimson::dmclock::ClientInfo recov;
+ crimson::dmclock::ClientInfo scrub;
+
+ static constexpr std::size_t rep_op_msg_bitset_size = 128;
+ std::bitset<rep_op_msg_bitset_size> rep_op_msg_bitset;
+ void add_rep_op_msg(int message_code);
+
+ public:
+
+ OpClassClientInfoMgr(CephContext *cct);
+
+ inline const crimson::dmclock::ClientInfo*
+ get_client_info(osd_op_type_t type) {
+ switch(type) {
+ case osd_op_type_t::client_op:
+ return &client_op;
+ case osd_op_type_t::osd_rep_op:
+ return &osd_rep_op;
+ case osd_op_type_t::bg_snaptrim:
+ return &snaptrim;
+ case osd_op_type_t::bg_recovery:
+ return &recov;
+ case osd_op_type_t::bg_scrub:
+ return &scrub;
+ default:
+ ceph_abort();
+ return nullptr;
+ }
+ }
+
+ // converts operation type from op queue internal to mclock
+ // equivalent
+ inline static osd_op_type_t convert_op_type(op_item_type_t t) {
+ switch(t) {
+ case op_item_type_t::client_op:
+ return osd_op_type_t::client_op;
+ case op_item_type_t::bg_snaptrim:
+ return osd_op_type_t::bg_snaptrim;
+ case op_item_type_t::bg_recovery:
+ return osd_op_type_t::bg_recovery;
+ case op_item_type_t::bg_scrub:
+ return osd_op_type_t::bg_scrub;
+ default:
+ ceph_abort();
+ }
+ }
+
+ osd_op_type_t osd_op_type(const OpQueueItem&) const;
+
+ // used for debugging since faster implementation can be done
+ // with rep_op_msg_bitmap
+ static bool is_rep_op(uint16_t);
+ }; // OpClassClientInfoMgr
+ } // namespace mclock
+} // namespace ceph