From 59ed16f8403a222e9c367794268b46f1dd9ea494 Mon Sep 17 00:00:00 2001 From: Ronen Friedman Date: Mon, 22 May 2023 18:09:28 +0300 Subject: [PATCH] osd/scrub: increasing max_osd_scrubs to 3 Bug reports seem to hint that the current default value of '1' is too low: the cluster is susceptible to scrub scheduling delays and issues stemming from local software/networking/hardware problems, even if affecting a very small number of OSDs. Squid will include a major overhaul of the way scrubs are counted in the cluster, providing a better solution to the problem. For now - modifying the default is an effective stop-gap measure. Signed-off-by: Ronen Friedman (cherry picked from commit cc7b4afda972c144d7ebc679ff7f42d86f1dc493) --- src/common/options/osd.yaml.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/options/osd.yaml.in b/src/common/options/osd.yaml.in index 7291ce11dc1..88dd08a9f17 100644 --- a/src/common/options/osd.yaml.in +++ b/src/common/options/osd.yaml.in @@ -182,7 +182,7 @@ options: desc: Maximum concurrent scrubs on a single OSD fmt_desc: The maximum number of simultaneous scrub operations for a Ceph OSD Daemon. - default: 1 + default: 3 with_legacy: true - name: osd_scrub_during_recovery type: bool -- 2.39.5