#osd_mkfs_options_xfs: -f -i size=2048
#osd_mount_options_xfs: noatime
#osd_mon_heartbeat_interval: 30
+
# CRUSH
#pool_default_crush_rule: 0
#osd_crush_update_on_start: "true"
# Object backend
#osd_objectstore: filestore
+
# Performance tuning
#filestore_merge_threshold: 40
#filestore_split_multiple: 8
#filestore_op_threads: 8
#filestore_max_sync_interval: 5
#osd_max_scrubs: 1
+
# Recovery tuning
#osd_recovery_max_active: 5
#osd_max_backfills: 2
#osd_recovery_max_chunk: 1048576
#osd_recovery_threads: 1
+# Deep scrub
+#osd_scrub_sleep: .1
+#osd_disk_thread_ioprio_class: idle
+#osd_disk_thread_ioprio_priority: 0
+#osd_scrub_chunk_max: 5
+#osd_deep_scrub_stride: 1048576
+
## MDS options
#
#mds: false # disable mds configuration in ceph.conf
osd_recovery_max_chunk: 1048576\r
osd_recovery_threads: 1\r
\r
+# Deep scrub\r
+osd_scrub_sleep: .1\r
+osd_disk_thread_ioprio_class: idle\r
+osd_disk_thread_ioprio_priority: 0\r
+osd_scrub_chunk_max: 5\r
+osd_deep_scrub_stride: 1048576\r
+\r
## MDS options\r
#\r
mds: false # disable mds configuration in ceph.conf\r
debug journal = {{ debug_osd_level }}
debug monc = {{ debug_osd_level }}
{% endif %}
+ # Deep scrub impact
+ osd scrub sleep = {{ osd_scrub_sleep }}
+ osd disk thread ioprio class = {{ osd_disk_thread_ioprio_class }}
+ osd disk thread ioprio priority = {{ osd_disk_thread_ioprio_priority }}
+ osd scrub chunk max = {{ osd_scrub_chunk_max }}
+ osd deep scrub stride = {{ osd_deep_scrub_stride }}
{% if mds %}
[mds]