From: Nathan Cutler Date: Fri, 27 Jan 2017 21:27:18 +0000 (+0100) Subject: tests: add require_jewel_osds before upgrading last hammer node X-Git-Tag: v10.2.6~95^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=75d05809a66bee219031a7ccb64d414a2d6c8775;p=ceph.git tests: add require_jewel_osds before upgrading last hammer node Note: this commit was inspired by http://github.com/ceph/ceph-qa-suite/commit/50758a4810794d265c5d36a71d1e16799251a00d As of 10.2.4, when upgrading a cluster from hammer to jewel, after the last node is upgraded the MON will put the cluster into HEALTH_WARN and say: "all OSDs are running jewel or later but the 'require_jewel_osds' osdmap flag is not set". The release notes say: This is a signal for the admin to do "ceph osd set require_jewel_osds" – by doing this, the upgrade path is complete and no more pre-Jewel OSDs may be added to the cluster. Fixes: http://tracker.ceph.com/issues/18719 Signed-off-by: Nathan Cutler --- diff --git a/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-hammer.yaml b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-hammer.yaml index 26fc51128c4..b9c3146b12b 100644 --- a/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-hammer.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-hammer.yaml @@ -84,7 +84,14 @@ tasks: - 'namespace' num_objects: 5 name_length: [400, 800, 1600] -- ceph.restart: [osd.2] +- ceph.restart: + daemons: [osd.2] + wait-for-healthy: false + wait-for-osds-up: true +- exec: + mon.a: + - sleep 60 + - ceph osd set require_jewel_osds - ceph_manager.wait_for_clean: null - ceph_manager.do_pg_scrub: args: ['test', 0, 'scrub'] diff --git a/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-infernalis.yaml b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-infernalis.yaml index 4c072c12192..d7de28e154b 100644 --- a/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-infernalis.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/lfn-upgrade-infernalis.yaml @@ -84,7 +84,14 @@ tasks: - 'namespace' num_objects: 5 name_length: [400, 800, 1600] -- ceph.restart: [osd.2] +- ceph.restart: + daemons: [osd.2] + wait-for-healthy: false + wait-for-osds-up: true +- exec: + mon.a: + - sleep 60 + - ceph osd set require_jewel_osds - ceph_manager.wait_for_clean: null - ceph_manager.do_pg_scrub: args: ['test', 0, 'scrub']