From d3b9e5b7e35be280a8b824e110459440de90dc0f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 14 Oct 2016 13:35:30 -0400 Subject: [PATCH] rados/singleton-nomsgr: drop 16113 and 13234 A stop at jewel is required, and these are pre-jewel bugs. Signed-off-by: Sage Weil --- suites/rados/singleton-nomsgr/all/13234.yaml | 250 ------------------- suites/rados/singleton-nomsgr/all/16113.yaml | 97 ------- 2 files changed, 347 deletions(-) delete mode 100644 suites/rados/singleton-nomsgr/all/13234.yaml delete mode 100644 suites/rados/singleton-nomsgr/all/16113.yaml diff --git a/suites/rados/singleton-nomsgr/all/13234.yaml b/suites/rados/singleton-nomsgr/all/13234.yaml deleted file mode 100644 index f5fd00f4ca307..0000000000000 --- a/suites/rados/singleton-nomsgr/all/13234.yaml +++ /dev/null @@ -1,250 +0,0 @@ -# we don't have el7 packages for old releases -# http://tracker.ceph.com/issues/15139 -os_type: ubuntu -overrides: - ceph: - conf: - mon: - debug mon: 20 - debug ms: 1 - debug paxos: 20 - mon warn on legacy crush tunables: false - mon min osdmap epochs: 99999 - osd: - osd map cache size: 2 - osd map max advance: 1 - debug filestore: 20 - debug journal: 20 - debug ms: 1 - debug osd: 20 - log-whitelist: - - osd_map_cache_size - - slow request - - scrub mismatch - - ScrubResult - - failed to encode -roles: -- - mon.a - - osd.0 - - osd.1 - - mon.b - - mon.c - - osd.2 - - client.0 -openstack: - - volumes: # attached to each instance - count: 3 - size: 10 # GB -tasks: -- install: - tag: v0.67.10 -- print: '**** done installing dumpling' -- ceph: - fs: xfs -- print: '**** done ceph' -- full_sequential: - - ceph_manager.create_pool: - args: - - newpool - kwargs: - pg_num: 32 - - sleep: - duration: 30 - - ceph_manager.wait_for_clean: null - - ceph_manager.kill_osd: - kwargs: - osd: 0 - - ceph_manager.kill_osd: - kwargs: - osd: 1 - - ceph_manager.kill_osd: - kwargs: - osd: 2 - - print: '**** done killing osds' - - loop: - body: - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 2 - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 1 - count: 10 - - install.upgrade: - mon.a: - branch: firefly - - print: '**** killing mons' - - sleep: - duration: 10 - - ceph_manager.kill_mon: - kwargs: - mon: a - - ceph_manager.kill_mon: - kwargs: - mon: b - - ceph_manager.kill_mon: - kwargs: - mon: c - - print: '**** reviving osds' - - ceph_manager.revive_osd: - kwargs: - skip_admin_check: True - osd: 0 - - ceph_manager.revive_osd: - kwargs: - skip_admin_check: True - osd: 1 - - ceph_manager.revive_osd: - kwargs: - skip_admin_check: True - osd: 2 - - sleep: - duration: 60 - - print: '**** killing osds' - - ceph_manager.kill_osd: - kwargs: - osd: 0 - - ceph_manager.kill_osd: - kwargs: - osd: 1 - - ceph_manager.kill_osd: - kwargs: - osd: 2 - - print: '**** reviving mons' - - ceph_manager.revive_mon: - kwargs: - mon: a - - ceph_manager.revive_mon: - kwargs: - mon: b - - ceph_manager.revive_mon: - kwargs: - mon: c - - sleep: - duration: 10 - - print: '**** done upgrading restarting osds and reviving mons' - - loop: - body: - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 2 - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 1 - count: 10 - - sleep: - duration: 10 - - install.upgrade: - mon.a: - branch: hammer - - print: '**** done upgrading to hammer' - - print: '**** killing mons' - - ceph_manager.kill_mon: - kwargs: - mon: a - - ceph_manager.kill_mon: - kwargs: - mon: b - - ceph_manager.kill_mon: - kwargs: - mon: c - - print: '**** reviving osds' - - ceph_manager.revive_osd: - kwargs: - skip_admin_check: True - osd: 0 - - ceph_manager.revive_osd: - kwargs: - skip_admin_check: True - osd: 1 - - ceph_manager.revive_osd: - kwargs: - skip_admin_check: True - osd: 2 - - sleep: - duration: 60 - - print: '**** killing osds' - - ceph_manager.kill_osd: - kwargs: - osd: 0 - - ceph_manager.kill_osd: - kwargs: - osd: 1 - - ceph_manager.kill_osd: - kwargs: - osd: 2 - - print: '**** reviving mons' - - ceph_manager.revive_mon: - kwargs: - mon: a - - ceph_manager.revive_mon: - kwargs: - mon: b - - ceph_manager.revive_mon: - kwargs: - mon: c - - sleep: - duration: 10 - - print: '**** done upgrading restarting osds and reviving mons' - - loop: - body: - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 2 - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 1 - count: 10 - - sleep: - duration: 10 - - install.upgrade: - mon.a: null - - print: '**** done upgrading to branch' - - ceph.restart: - - mon.a - - mon.b - - mon.c - - exec: - mon.a: - - ceph osd set require_kraken_osds - - loop: - body: - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 2 - - ceph_manager.set_pool_property: - args: - - newpool - - min_size - - 1 - count: 10 - - sleep: - duration: 10 - - print: '**** about to start osds' - - ceph_manager.revive_osd: - kwargs: - osd: 0 - - ceph_manager.revive_osd: - kwargs: - osd: 1 - - ceph_manager.revive_osd: - kwargs: - osd: 2 - - sleep: - duration: 30 - - ceph_manager.wait_for_clean: null - - print: '**** done!' diff --git a/suites/rados/singleton-nomsgr/all/16113.yaml b/suites/rados/singleton-nomsgr/all/16113.yaml deleted file mode 100644 index 16fdcb70af595..0000000000000 --- a/suites/rados/singleton-nomsgr/all/16113.yaml +++ /dev/null @@ -1,97 +0,0 @@ -os_type: ubuntu -overrides: - ceph: - conf: - mon: - debug mon: 20 - debug ms: 1 - debug paxos: 20 - mon warn on legacy crush tunables: false - mon min osdmap epochs: 3 - osd pool default size: 2 - osd pool default min size: 1 - osd: - osd map cache size: 2 - osd map max advance: 1 - debug filestore: 20 - debug journal: 20 - debug ms: 1 - debug osd: 20 - log-whitelist: - - osd_map_cache_size - - slow request - - scrub mismatch - - ScrubResult - - failed to encode - - wrongly marked me down -roles: -- - mon.a - - osd.0 - - osd.1 - - mon.b - - mon.c - - osd.2 - - client.0 -openstack: - - volumes: # attached to each instance - count: 3 - size: 10 # GB -tasks: -- install: - branch: hammer -- print: '**** done installing hammer' -- ceph: - fs: xfs -- print: '**** done ceph' -- ceph_manager.create_pool: - args: ['test'] - kwargs: - pg_num: 1024 -- sleep: - duration: 10 -- ceph_manager.wait_for_clean: null -- sequential: - - radosbench: - pool: test - size: 1 - time: 100 - cleanup: false - create_pool: false -- install.upgrade: - mon.a: null -- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2] -- sleep: - duration: 10 -- exec: - mon.a: - - ceph osd set require_kraken_osds -- ceph_manager.wait_for_clean: null -- exec: - mon.a: - - ceph osd set sortbitwise -- sleep: - duration: 10 -- ceph_manager.wait_for_clean: null -- sequential: - - radosbench: - pool: test - size: 1 - time: 400 - cleanup: false - create_pool: false - - sleep: - duration: 30 - - ceph_manager.kill_osd: - kwargs: - osd: 0 - - sleep: - duration: 30 - - ceph_manager.revive_osd: - kwargs: - osd: 0 - - sleep: - duration: 30 - - ceph_manager.wait_for_clean: null -- sleep: - duration: 30 -- ceph_manager.wait_for_clean: null -- 2.39.5