upgrade-sequence:
sequential:
- ceph.restart:
- daemons: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart do not wait for healthy"
+++ /dev/null
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** running mixed versions of osds and mons"
-#do we need to use "ceph osd crush tunables hammer" ?
- - exec:
- mon.b:
- - sudo ceph osd crush tunables hammer
- - print: "**** done ceph osd crush tunables hammer"
- - ceph.restart:
- daemons: [mon.b, mon.c]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: false
- - exec:
- osd.0:
- - sleep 300 # http://tracker.ceph.com/issues/17808
- - ceph osd set require_jewel_osds
- - ceph.healthy:
- - sleep:
- duration: 60
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+#do we need to use "ceph osd crush tunables hammer" ?
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables hammer
+ - print: "**** done ceph osd crush tunables hammer"
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: false
+ - exec:
+ osd.0:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - sleep:
+ duration: 60