};
op_queue_type_t op_queue = get_op_queue_type();
+ // Determine op queue cutoff
+ auto get_op_queue_cut_off = [&conf = cct->_conf]() {
+ if (conf.get_val<std::string>("osd_op_queue_cut_off") == "debug_random") {
+ std::random_device rd;
+ std::mt19937 random_gen(rd());
+ return (random_gen() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
+ } else if (conf.get_val<std::string>("osd_op_queue_cut_off") == "high") {
+ return CEPH_MSG_PRIO_HIGH;
+ } else {
+ // default / catch-all is 'low'
+ return CEPH_MSG_PRIO_LOW;
+ }
+ };
+ unsigned op_queue_cut_off = get_op_queue_cut_off();
+
// initialize shards
num_shards = get_num_op_shards();
for (uint32_t i = 0; i < num_shards; i++) {
i,
cct,
this,
- op_queue);
+ op_queue,
+ op_queue_cut_off);
shards.push_back(one_shard);
}
}
int id,
CephContext *cct,
OSD *osd,
- op_queue_type_t osd_op_queue)
+ op_queue_type_t osd_op_queue,
+ unsigned osd_op_queue_cut_off)
: shard_id(id),
cct(cct),
osd(osd),
shard_lock{make_mutex(shard_lock_name)},
scheduler(ceph::osd::scheduler::make_scheduler(
cct, osd->whoami, osd->num_shards, id, osd->store->is_rotational(),
- osd->store->get_type(), osd_op_queue, osd->monc)),
+ osd->store->get_type(), osd_op_queue, osd_op_queue_cut_off, osd->monc)),
context_queue(sdata_wait_lock, sdata_cond)
{
dout(0) << "using op scheduler " << *scheduler << dendl;
int id,
CephContext *cct,
OSD *osd,
- op_queue_type_t osd_op_queue);
+ op_queue_type_t osd_op_queue,
+ unsigned osd_op_queue_cut_off);
};
class OSD : public Dispatcher,
OpSchedulerRef make_scheduler(
CephContext *cct, int whoami, uint32_t num_shards, int shard_id,
bool is_rotational, std::string_view osd_objectstore,
- op_queue_type_t osd_scheduler, MonClient *monc)
+ op_queue_type_t osd_scheduler, unsigned op_queue_cut_off, MonClient *monc)
{
// Force the use of 'wpq' scheduler for filestore OSDs.
// The 'mclock_scheduler' is not supported for filestore OSDs.
return std::make_unique<
ClassedOpQueueScheduler<WeightedPriorityQueue<OpSchedulerItem, client>>>(
cct,
+ op_queue_cut_off,
cct->_conf->osd_op_pq_max_tokens_per_priority,
cct->_conf->osd_op_pq_min_cost
);
} else if (op_queue_type_t::mClockScheduler == osd_scheduler) {
// default is 'mclock_scheduler'
return std::make_unique<
- mClockScheduler>(cct, whoami, num_shards, shard_id, is_rotational, monc);
+ mClockScheduler>(cct, whoami, num_shards, shard_id, is_rotational,
+ op_queue_cut_off, monc);
} else {
ceph_assert("Invalid choice of wq" == 0);
}
OpSchedulerRef make_scheduler(
CephContext *cct, int whoami, uint32_t num_shards, int shard_id,
bool is_rotational, std::string_view osd_objectstore,
- op_queue_type_t osd_scheduler, MonClient *monc);
+ op_queue_type_t osd_scheduler, unsigned op_queue_cut_off, MonClient *monc);
/**
* Implements OpScheduler in terms of OpQueue
unsigned cutoff;
T queue;
- static unsigned int get_io_prio_cut(CephContext *cct) {
- if (cct->_conf->osd_op_queue_cut_off == "debug_random") {
- srand(time(NULL));
- return (rand() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
- } else if (cct->_conf->osd_op_queue_cut_off == "high") {
- return CEPH_MSG_PRIO_HIGH;
- } else {
- // default / catch-all is 'low'
- return CEPH_MSG_PRIO_LOW;
- }
- }
public:
template <typename... Args>
- ClassedOpQueueScheduler(CephContext *cct, Args&&... args) :
- cutoff(get_io_prio_cut(cct)),
+ ClassedOpQueueScheduler(CephContext *cct, unsigned prio_cut, Args&&... args) :
+ cutoff(prio_cut),
queue(std::forward<Args>(args)...)
{}
uint32_t num_shards,
int shard_id,
bool is_rotational,
+ unsigned cutoff_priority,
MonClient *monc)
: cct(cct),
whoami(whoami),
num_shards(num_shards),
shard_id(shard_id),
is_rotational(is_rotational),
+ cutoff_priority(cutoff_priority),
monc(monc),
scheduler(
std::bind(&mClockScheduler::ClientRegistry::get_info,
const uint32_t num_shards;
const int shard_id;
const bool is_rotational;
+ const unsigned cutoff_priority;
MonClient *monc;
/**
};
}
- static unsigned int get_io_prio_cut(CephContext *cct) {
- if (cct->_conf->osd_op_queue_cut_off == "debug_random") {
- std::random_device rd;
- std::mt19937 random_gen(rd());
- return (random_gen() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
- } else if (cct->_conf->osd_op_queue_cut_off == "high") {
- return CEPH_MSG_PRIO_HIGH;
- } else {
- // default / catch-all is 'low'
- return CEPH_MSG_PRIO_LOW;
- }
- }
-
- unsigned cutoff_priority = get_io_prio_cut(cct);
-
/**
* set_osd_capacity_params_from_config
*
public:
mClockScheduler(CephContext *cct, int whoami, uint32_t num_shards,
- int shard_id, bool is_rotational, MonClient *monc);
+ int shard_id, bool is_rotational, unsigned cutoff_priority,
+ MonClient *monc);
~mClockScheduler() override;
/// Calculate scaled cost per item
void print(std::ostream &ostream) const final {
ostream << get_op_queue_type_name(get_type());
+ ostream << ", cutoff=" << cutoff_priority;
}
// Update data associated with the modified mclock config key(s)
uint32_t num_shards;
int shard_id;
bool is_rotational;
+ unsigned cutoff_priority;
MonClient *monc;
mClockScheduler q;
num_shards(1),
shard_id(0),
is_rotational(false),
+ cutoff_priority(12),
monc(nullptr),
- q(g_ceph_context, whoami, num_shards, shard_id, is_rotational, monc),
+ q(g_ceph_context, whoami, num_shards, shard_id, is_rotational,
+ cutoff_priority, monc),
client1(1001),
client2(9999),
client3(100000001)