]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
Added require_jewel_osds flag
authorYuri Weinstein <yweinste@redhat.com>
Thu, 10 Nov 2016 17:28:18 +0000 (17:28 +0000)
committerYuri Weinstein <yweinste@redhat.com>
Fri, 11 Nov 2016 17:56:36 +0000 (17:56 +0000)
Added to point-to-point as well

Signed-off-by: Yuri Weinstein <yweinste@redhat.com>
suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-all.yaml
suites/upgrade/infernalis-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
suites/upgrade/infernalis-x/point-to-point-x/point-to-point.yaml

index 2e41f7354c1de7dafdb56cd87d46cdbafb2a9a7e..f56effdde45504a3699be32dee0dcb5d1478dd60 100644 (file)
@@ -3,5 +3,14 @@ meta:
    upgrade the ceph cluster
 upgrade-sequence:
    sequential:
-   - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
-   - print: "**** done ceph.restart all"
+   - ceph.restart:
+       daemons: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+       wait-for-healthy: false
+       wait-for-osds-up: true
+   - print: "**** done ceph.restart do not wait for healthy"
+   - exec:
+       mon.a:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
+   - print: "**** done ceph.healthy"
index 70d57dc4054c011b829e812ef94840c6b53383c8..eaefce7d473cf88cc5a4be74d410b00acc8825db 100644 (file)
@@ -33,6 +33,11 @@ upgrade-sequence:
        duration: 60
    - ceph.restart:
        daemons: [osd.2, osd.3]
-       wait-for-healthy: true
+       wait-for-healthy: false
+   - exec:
+       osd.0:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
    - sleep:
        duration: 60
index cc49096b4c116718b1cbb1fc800dd9f1e5e6c307..c553c022dcde45948c2a38fbee20210cd6f75881 100644 (file)
@@ -171,22 +171,25 @@ upgrade-sequence_x:
    - ceph.restart: [mds.a]
    - sleep:
        duration: 60
-   - ceph.restart: [osd.0]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.1]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.2]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.3]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.4]
+
+   - ceph.restart:
+       daemons: [osd.0, osd.1, osd.2]
+       wait-for-healthy: false
+   - exec:
+       osd.0:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
    - sleep:
-       duration: 30
-   - ceph.restart: [osd.5]
+       duration: 60
+   - ceph.restart:
+       daemons: [osd.3, osd.4, osd.5]
+       wait-for-healthy: false
+   - exec:
+       osd.0:
+         - sleep 300 # http://tracker.ceph.com/issues/17808
+         - ceph osd set require_jewel_osds
+   - ceph.healthy:
    - sleep:
        duration: 60
    - ceph.restart: [mon.a]