ceph config set global bluestore_warn_on_legacy_statfs false
+BLUESTORE_NO_PER_POOL_OMAP
+__________________________
+
+Starting with the Octopus release, BlueStore tracks omap space utilization
+by pool, and one or more OSDs have volumes that were created prior to
+Octopus. If all OSDs are not running BlueStore with the new tracking
+enabled, the cluster will report and approximate value for per-pool omap usage
+based on the most recent deep-scrub.
+
+The old OSDs can be updated to track by pool by stopping each OSD,
+running a repair operation, and the restarting it. For example, if
+``osd.123`` needed to be updated,::
+
+ systemctl stop ceph-osd@123
+ ceph-bluestore-tool repair --path /var/lib/ceph/osd/ceph-123
+ systemctl start ceph-osd@123
+
+This warning can be disabled with::
+
+ ceph config set global bluestore_warn_on_no_per_pool_omap false
+
BLUESTORE_DISK_SIZE_MISMATCH
____________________________
conf:
global:
bluestore warn on legacy statfs: false
+ bluestore warn on no per pool omap: false
mon:
mon warn on osd down out interval zero: false
conf:
global:
bluestore warn on legacy statfs: false
+ bluestore warn on no per pool omap: false
mon:
mon warn on osd down out interval zero: false
ms dump corrupt message level: 0
ms bind msgr2: false
bluestore warn on legacy statfs: false
+ bluestore warn on no per pool omap: false
mds:
debug ms: 1
debug mds: 20
global:
mon warn on pool no app: false
bluestore_warn_on_legacy_statfs: false
+ bluestore warn on no per pool omap: false
- exec:
osd.0:
- ceph osd require-osd-release mimic
conf:
global:
bluestore_warn_on_legacy_statfs: false
+ bluestore warn on no per pool omap: false
- exec:
osd.0:
- ceph osd require-osd-release mimic
global:
ms dump corrupt message level: 0
ms bind msgr2: false
+ bluestore warn on no per pool omap: false
mds:
debug ms: 1
debug mds: 20
global:
mon warn on pool no app: false
bluestore_warn_on_legacy_statfs: false
+ bluestore warn on no per pool omap: false
- exec:
osd.0:
- ceph osd set-require-min-compat-client nautilus
conf:
global:
bluestore_warn_on_legacy_statfs: false
+ bluestore warn on no per pool omap: false
- exec:
osd.0:
- ceph osd require-osd-release nautilus
OPTION(bluestore_warn_on_bluefs_spillover, OPT_BOOL)
OPTION(bluestore_warn_on_legacy_statfs, OPT_BOOL)
OPTION(bluestore_fsck_error_on_no_per_pool_omap, OPT_BOOL)
+OPTION(bluestore_warn_on_no_per_pool_omap, OPT_BOOL)
OPTION(bluestore_log_op_age, OPT_DOUBLE)
OPTION(bluestore_log_omap_iterator_age, OPT_DOUBLE)
OPTION(bluestore_log_collection_list_age, OPT_DOUBLE)
.set_default(false)
.set_description("Make fsck error (instead of warn) when objects without per-pool omap are found"),
+ Option("bluestore_warn_on_no_per_pool_omap", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(true)
+ .set_description("Enable health indication on lack of per-pool omap"),
+
Option("bluestore_log_op_age", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
.set_default(5)
.set_description("log operation if it's slower than this age (seconds)"),
summary = "Legacy BlueStore stats reporting detected";
} else if (asum.first == "BLUESTORE_DISK_SIZE_MISMATCH") {
summary = "BlueStore has dangerous mismatch between block device and free list sizes";
+ } else if (asum.first == "BLUESTORE_NO_PER_POOL_OMAP") {
+ summary = "Legacy BlueStore does not track omap usage by pool";
}
summary += " on ";
summary += stringify(asum.second.first);
"bluestore_cache_autotune_interval",
"bluestore_no_per_pool_stats_tolerance",
"bluestore_warn_on_legacy_statfs",
+ "bluestore_warn_on_no_per_pool_omap",
NULL
};
return KEYS;
changed.count("bluestore_warn_on_legacy_statfs")) {
_check_legacy_statfs_alert();
}
+ if (changed.count("bluestore_warn_on_no_per_pool_omap")) {
+ _check_no_per_pool_omap_alert();
+ }
if (changed.count("bluestore_csum_type")) {
_set_csum();
legacy_statfs_alert = s;
}
+void BlueStore::_check_no_per_pool_omap_alert()
+{
+ string s;
+ if (!per_pool_omap &&
+ cct->_conf->bluestore_warn_on_no_per_pool_omap) {
+ s = "legacy (not per-pool) omap detected, "
+ "suggest to run store repair to measure per-pool omap usage";
+ }
+ std::lock_guard l(qlock);
+ no_per_pool_omap_alert = s;
+}
+
// ---------------
// cache
} else {
dout(10) << __func__ << " per_pool_omap not present" << dendl;
}
+ _check_no_per_pool_omap_alert();
}
_open_statfs();
"BLUEFS_SPILLOVER",
spillover_alert);
}
+ if (!no_per_pool_omap_alert.empty()) {
+ alerts.emplace(
+ "BLUESTORE_NO_PER_POOL_OMAP",
+ no_per_pool_omap_alert);
+ }
string s0(failed_cmode);
if (!failed_compressors.empty()) {
set<string> failed_compressors;
string spillover_alert;
string legacy_statfs_alert;
+ string no_per_pool_omap_alert;
string disk_size_mismatch_alert;
void _log_alerts(osd_alert_list_t& alerts);
}
void _check_legacy_statfs_alert();
+ void _check_no_per_pool_omap_alert();
void _set_disk_size_mismatch_alert(const string& s) {
std::lock_guard l(qlock);
disk_size_mismatch_alert = s;