* RGW: `radosgw-admin realm delete` is now renamed to `radosgw-admin realm rm`. This
is consistent with the help message.
-* OSD: Ceph now uses mclock_scheduler as its default osd_op_queue to provide QoS.
+* OSD: Ceph now uses mclock_scheduler for bluestore OSDs as its default osd_op_queue
+ to provide QoS. The 'mclock_scheduler' is not supported for filestore OSDs.
+ Therefore, the default 'osd_op_queue' is set to 'wpq' for filestore OSDs
+ and is enforced even if the user attempts to change it.
* CephFS: Failure to replay the journal by a standby-replay daemon will now
cause the rank to be marked damaged.
ss << " [Deprecated]";
auto& d = checks->add("OSD_FILESTORE", HEALTH_WARN, ss.str(),
filestore_osds.size());
- deprecated_tip << ", which has been deprecated.";
+ deprecated_tip << ", which has been deprecated and"
+ << " not been optimized for QoS"
+ << " (Filestore OSDs will use 'osd_op_queue = wpq' strictly)";
detail.push_back(deprecated_tip.str());
d.detail.swap(detail);
}
// osd capacity with the value obtained from running the
// osd bench test. This is later used to setup mclock.
if ((cct->_conf.get_val<std::string>("osd_op_queue") == "mclock_scheduler") &&
- (cct->_conf.get_val<bool>("osd_mclock_skip_benchmark") == false)) {
+ (cct->_conf.get_val<bool>("osd_mclock_skip_benchmark") == false) &&
+ (!unsupported_objstore_for_qos())) {
std::string max_capacity_iops_config;
bool force_run_benchmark =
cct->_conf.get_val<bool>("osd_mclock_force_run_benchmark_on_init");
{
// If the scheduler enabled is mclock, override the recovery, backfill
// and sleep options so that mclock can meet the QoS goals.
- if (cct->_conf.get_val<std::string>("osd_op_queue") == "mclock_scheduler") {
+ if (cct->_conf.get_val<std::string>("osd_op_queue") == "mclock_scheduler" &&
+ !unsupported_objstore_for_qos()) {
dout(1) << __func__
<< ": Changing recovery/backfill/sleep settings for QoS" << dendl;
return 0;
}
+bool OSD::unsupported_objstore_for_qos()
+{
+ static const std::vector<std::string> unsupported_objstores = { "filestore" };
+ return std::find(unsupported_objstores.begin(),
+ unsupported_objstores.end(),
+ store->get_type()) != unsupported_objstores.end();
+}
+
void OSD::update_log_config()
{
auto parsed_options = clog->parse_client_options(cct);
shard_lock_name(shard_name + "::shard_lock"),
shard_lock{make_mutex(shard_lock_name)},
scheduler(ceph::osd::scheduler::make_scheduler(
- cct, osd->num_shards, osd->store->is_rotational())),
+ cct, osd->num_shards, osd->store->is_rotational(),
+ osd->store->get_type())),
context_queue(sdata_wait_lock, sdata_cond)
{
dout(0) << "using op scheduler " << *scheduler << dendl;
double *elapsed,
std::ostream& ss);
int mon_cmd_set_config(const std::string &key, const std::string &val);
+ bool unsupported_objstore_for_qos();
void scrub_purged_snaps();
void probe_smart(const std::string& devid, std::ostream& ss);
namespace ceph::osd::scheduler {
OpSchedulerRef make_scheduler(
- CephContext *cct, uint32_t num_shards, bool is_rotational)
+ CephContext *cct, uint32_t num_shards,
+ bool is_rotational, std::string_view osd_objectstore)
{
const std::string *type = &cct->_conf->osd_op_queue;
if (*type == "debug_random") {
type = &index_lookup[which];
}
- if (*type == "wpq" ) {
- // default is 'wpq'
+ // Force the use of 'wpq' scheduler for filestore OSDs.
+ // The 'mclock_scheduler' is not supported for filestore OSDs.
+ if (*type == "wpq" || osd_objectstore == "filestore") {
return std::make_unique<
ClassedOpQueueScheduler<WeightedPriorityQueue<OpSchedulerItem, client>>>(
cct,
cct->_conf->osd_op_pq_min_cost
);
} else if (*type == "mclock_scheduler") {
+ // default is 'mclock_scheduler'
return std::make_unique<mClockScheduler>(cct, num_shards, is_rotational);
} else {
ceph_assert("Invalid choice of wq" == 0);
using OpSchedulerRef = std::unique_ptr<OpScheduler>;
OpSchedulerRef make_scheduler(
- CephContext *cct, uint32_t num_shards, bool is_rotational);
+ CephContext *cct, uint32_t num_shards, bool is_rotational,
+ std::string_view osd_objectstore);
/**
* Implements OpScheduler in terms of OpQueue