--- /dev/null
+# Enable mgr modules now before any CephFS mounts are created by the mgr. This
+# avoids the potential race of the mgr mounting CephFS and then getting failed
+# over by the monitors before the monitors have a chance to note the new client
+# session from the mgr beacon. In that case, the monitors will not blocklist
+# that client mount automatically so the MDS will eventually do the eviction
+# (and create a cluster log warning which we want to avoid).
+#
+# Note: ideally the mgr would gently stop mgr modules before respawning so that
+# the client mounts can be unmounted but this caused issues historically with
+# modules like the dashboard so an abrupt restart was chosen instead.
+
+mgrmodules:
+ sequential:
+ - print: "Enabling mgr modules"
+ # other fragments append to this
+
+tasks:
+ - sequential:
+ - mgrmodules
+mgrmodules:
+ sequential:
+ - exec:
+ mon.a:
+ - ceph mgr module enable snap_schedule
+ - ceph config set mgr mgr/snap_schedule/allow_m_granularity true
+ - ceph config set mgr mgr/snap_schedule/dump_on_update true
overrides:
ceph:
conf:
tasks:
- exec:
mon.a:
- - ceph mgr module enable snap_schedule
- - ceph config set mgr mgr/snap_schedule/allow_m_granularity true
- - ceph config set mgr mgr/snap_schedule/dump_on_update true
- ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1m
- ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6m3h
- ceph fs snap-schedule status --fs=cephfs --path=/