From d6c66f3fa6141bbb50a782c15196bdeb0e80e21d Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Fri, 30 Apr 2021 19:31:35 -0700 Subject: [PATCH] qa,pybind/mgr: allow disabling .mgr pool This is mostly for testing: a lot of tests assume that there are no existing pools. These tests relied on a config to turn off creating the "device_health_metrics" pool which generally exists for any new Ceph cluster. It would be better to make these tests tolerant of the new .mgr pool but clearly there's a lot of these. So just convert the config to make it work. Signed-off-by: Patrick Donnelly --- qa/standalone/ceph-helpers.sh | 2 +- qa/suites/rados/multimon/no_pools.yaml | 2 +- .../rados/singleton-nomsgr/all/admin_socket_output.yaml | 2 +- qa/suites/rados/singleton-nomsgr/all/balancer.yaml | 2 +- qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml | 2 +- .../rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml | 2 +- .../rados/singleton-nomsgr/all/export-after-evict.yaml | 2 +- qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml | 2 +- .../rados/singleton-nomsgr/all/health-warnings.yaml | 2 +- .../singleton-nomsgr/all/large-omap-object-warnings.yaml | 2 +- .../singleton-nomsgr/all/lazy_omap_stats_output.yaml | 2 +- .../rados/singleton-nomsgr/all/librados_hello_world.yaml | 2 +- qa/suites/rados/singleton-nomsgr/all/msgr.yaml | 2 +- .../singleton-nomsgr/all/multi-backfill-reject.yaml | 2 +- .../rados/singleton-nomsgr/all/osd_stale_reads.yaml | 2 +- qa/suites/rados/singleton-nomsgr/all/pool-access.yaml | 2 +- .../singleton-nomsgr/all/recovery-unfound-found.yaml | 2 +- .../singleton-nomsgr/all/version-number-sanity.yaml | 2 +- qa/suites/rados/singleton/all/deduptool.yaml | 2 +- qa/suites/rados/singleton/all/divergent_priors.yaml | 2 +- qa/suites/rados/singleton/all/divergent_priors2.yaml | 2 +- qa/suites/rados/singleton/all/dump-stuck.yaml | 2 +- qa/suites/rados/singleton/all/ec-lost-unfound.yaml | 2 +- qa/suites/rados/singleton/all/lost-unfound-delete.yaml | 2 +- qa/suites/rados/singleton/all/lost-unfound.yaml | 2 +- .../rados/singleton/all/max-pg-per-osd.from-mon.yaml | 2 +- .../rados/singleton/all/max-pg-per-osd.from-primary.yaml | 2 +- .../rados/singleton/all/max-pg-per-osd.from-replica.yaml | 2 +- qa/suites/rados/singleton/all/mon-auth-caps.yaml | 2 +- qa/suites/rados/singleton/all/mon-config-key-caps.yaml | 2 +- qa/suites/rados/singleton/all/mon-config-keys.yaml | 2 +- qa/suites/rados/singleton/all/mon-config.yaml | 2 +- .../all/mon-memory-target-compliance.yaml.disabled | 2 +- qa/suites/rados/singleton/all/osd-backfill.yaml | 2 +- .../rados/singleton/all/osd-recovery-incomplete.yaml | 2 +- qa/suites/rados/singleton/all/osd-recovery.yaml | 2 +- qa/suites/rados/singleton/all/peer.yaml | 2 +- .../rados/singleton/all/pg-autoscaler-progress-off.yaml | 2 +- qa/suites/rados/singleton/all/pg-autoscaler.yaml | 2 +- .../rados/singleton/all/pg-removal-interruption.yaml | 2 +- qa/suites/rados/singleton/all/radostool.yaml | 2 +- qa/suites/rados/singleton/all/random-eio.yaml | 2 +- qa/suites/rados/singleton/all/rebuild-mondb.yaml | 2 +- qa/suites/rados/singleton/all/recovery-preemption.yaml | 2 +- qa/suites/rados/singleton/all/resolve_stuck_peering.yaml | 2 +- qa/suites/rados/singleton/all/test-crash.yaml | 2 +- .../singleton/all/test_envlibrados_for_rocksdb.yaml | 2 +- qa/suites/rados/singleton/all/thrash-backfill-full.yaml | 2 +- qa/suites/rados/singleton/all/thrash-eio.yaml | 2 +- .../singleton/all/thrash_cache_writeback_proxy_none.yaml | 2 +- .../rados/singleton/all/watch-notify-same-primary.yaml | 2 +- .../thrash-erasure-code/thrashers/minsize_recovery.yaml | 2 +- src/common/options/mgr.yaml.in | 9 +++++++++ src/pybind/mgr/mgr_module.py | 3 +++ 54 files changed, 64 insertions(+), 52 deletions(-) diff --git a/qa/standalone/ceph-helpers.sh b/qa/standalone/ceph-helpers.sh index f0ba10f76a4b6..71201b3ee984a 100755 --- a/qa/standalone/ceph-helpers.sh +++ b/qa/standalone/ceph-helpers.sh @@ -556,7 +556,7 @@ function run_mgr() { shift local data=$dir/$id - ceph config set mgr mgr/devicehealth/enable_monitoring off --force + ceph config set mgr mgr_pool false --force ceph-mgr \ --id $id \ $EXTRA_OPTS \ diff --git a/qa/suites/rados/multimon/no_pools.yaml b/qa/suites/rados/multimon/no_pools.yaml index dc6c769ca0768..32ef2439f4ba6 100644 --- a/qa/suites/rados/multimon/no_pools.yaml +++ b/qa/suites/rados/multimon/no_pools.yaml @@ -2,4 +2,4 @@ overrides: ceph: create_rbd_pool: false pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml index 04c40197a8562..a9f8316172d6e 100644 --- a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -19,7 +19,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - rgw: - client.0 - exec: diff --git a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml index d4c6e3ca5dac0..eb30c663a72ac 100644 --- a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml @@ -4,7 +4,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force fs: xfs log-ignorelist: - \(PG_AVAILABILITY\) diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml index f998c51c9ecaf..d7699d042c101 100644 --- a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml @@ -8,7 +8,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml index e116b5ae0e884..a3085ad3d9c0c 100644 --- a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml @@ -8,7 +8,7 @@ roles: overrides: ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml index ee800e5a72c10..7785f2453e6b6 100644 --- a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -13,7 +13,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml index 8d26cd32384aa..a3704069f9d25 100644 --- a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -18,7 +18,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: global: osd max object name len: 460 diff --git a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml index bc57a9cd926e2..7b7bf592ff26c 100644 --- a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml @@ -4,7 +4,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: osd: # we may land on ext4 diff --git a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml index b08ab343f812f..e1e9d34ef9f6d 100644 --- a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml @@ -7,7 +7,7 @@ roles: overrides: ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - \(OSDMAP_FLAGS\) - \(OSD_FULL\) diff --git a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml index 7228522be05b2..61c2fa66333a5 100644 --- a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml @@ -12,7 +12,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - exec: client.0: - ceph_test_lazy_omap_stats diff --git a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml index f670a0849e0ba..0c0a071e9af7e 100644 --- a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml @@ -17,7 +17,7 @@ tasks: - libradospp-devel - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml index d1852ae2bd3cc..4eb376fcf6254 100644 --- a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml @@ -17,7 +17,7 @@ openstack: overrides: ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: client: debug ms: 20 diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml index a3ce46e6acab8..8b95603d16669 100644 --- a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -16,7 +16,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(PG_ diff --git a/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml b/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml index 408268a0962e8..5beb2015f800c 100644 --- a/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml @@ -23,7 +23,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - exec: client.0: - ceph_test_osd_stale_read diff --git a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml index 6485a68717913..e79e1aaf5700d 100644 --- a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml @@ -8,7 +8,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: diff --git a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml index 9cf4eec89a885..d949a5005d34c 100644 --- a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml @@ -13,7 +13,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force fs: xfs conf: osd: diff --git a/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml b/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml index 6d48796f03a28..daeeeef4eba2c 100644 --- a/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml @@ -8,7 +8,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: diff --git a/qa/suites/rados/singleton/all/deduptool.yaml b/qa/suites/rados/singleton/all/deduptool.yaml index 616a0b33cad87..3a34cb309a670 100644 --- a/qa/suites/rados/singleton/all/deduptool.yaml +++ b/qa/suites/rados/singleton/all/deduptool.yaml @@ -13,7 +13,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - had wrong client addr diff --git a/qa/suites/rados/singleton/all/divergent_priors.yaml b/qa/suites/rados/singleton/all/divergent_priors.yaml index 24b42557f9831..81d68654a9549 100644 --- a/qa/suites/rados/singleton/all/divergent_priors.yaml +++ b/qa/suites/rados/singleton/all/divergent_priors.yaml @@ -24,5 +24,5 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - divergent_priors: diff --git a/qa/suites/rados/singleton/all/divergent_priors2.yaml b/qa/suites/rados/singleton/all/divergent_priors2.yaml index 6bef639582fd4..baac3110cf1be 100644 --- a/qa/suites/rados/singleton/all/divergent_priors2.yaml +++ b/qa/suites/rados/singleton/all/divergent_priors2.yaml @@ -24,5 +24,5 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - divergent_priors2: diff --git a/qa/suites/rados/singleton/all/dump-stuck.yaml b/qa/suites/rados/singleton/all/dump-stuck.yaml index c1d28ee8e7254..eb70b70fff40e 100644 --- a/qa/suites/rados/singleton/all/dump-stuck.yaml +++ b/qa/suites/rados/singleton/all/dump-stuck.yaml @@ -11,7 +11,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml index 9c423c8d879c7..050365ff4df0c 100644 --- a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml +++ b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml @@ -16,7 +16,7 @@ tasks: - ceph: create_rbd_pool: false pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml index bb170b5069a8a..e4b7b11e2e1b4 100644 --- a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml +++ b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml @@ -14,7 +14,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/lost-unfound.yaml b/qa/suites/rados/singleton/all/lost-unfound.yaml index fceee20c04e98..280dac87b5204 100644 --- a/qa/suites/rados/singleton/all/lost-unfound.yaml +++ b/qa/suites/rados/singleton/all/lost-unfound.yaml @@ -14,7 +14,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml index e5999bc9b9f03..7ab94589893ca 100644 --- a/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml @@ -11,7 +11,7 @@ overrides: ceph: create_rbd_pool: False pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: mon: osd pool default size: 2 diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml index 075d6be1f7a5d..a51e8921f9268 100644 --- a/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml @@ -13,7 +13,7 @@ overrides: ceph: create_rbd_pool: False pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: mon: osd pool default size: 2 diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml index db2856484ef72..e3658ef77b6e8 100644 --- a/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml @@ -13,7 +13,7 @@ overrides: ceph: create_rbd_pool: False pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: mon: osd pool default size: 2 diff --git a/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/qa/suites/rados/singleton/all/mon-auth-caps.yaml index 8c23c0bc9dcd7..f7c45d43fd42a 100644 --- a/qa/suites/rados/singleton/all/mon-auth-caps.yaml +++ b/qa/suites/rados/singleton/all/mon-auth-caps.yaml @@ -9,7 +9,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) diff --git a/qa/suites/rados/singleton/all/mon-config-key-caps.yaml b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml index f987f3c9898e9..f254754e270c2 100644 --- a/qa/suites/rados/singleton/all/mon-config-key-caps.yaml +++ b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml @@ -9,7 +9,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) diff --git a/qa/suites/rados/singleton/all/mon-config-keys.yaml b/qa/suites/rados/singleton/all/mon-config-keys.yaml index 7d8b920cb41d0..117b6d0554968 100644 --- a/qa/suites/rados/singleton/all/mon-config-keys.yaml +++ b/qa/suites/rados/singleton/all/mon-config-keys.yaml @@ -15,7 +15,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: diff --git a/qa/suites/rados/singleton/all/mon-config.yaml b/qa/suites/rados/singleton/all/mon-config.yaml index 3627e17dfc08a..15d48f2380f4b 100644 --- a/qa/suites/rados/singleton/all/mon-config.yaml +++ b/qa/suites/rados/singleton/all/mon-config.yaml @@ -15,7 +15,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: diff --git a/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled index 120e073a72306..e1f79c16811ae 100644 --- a/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled +++ b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled @@ -43,7 +43,7 @@ tasks: - ceph: create_rbd_pool: false pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/qa/suites/rados/singleton/all/osd-backfill.yaml b/qa/suites/rados/singleton/all/osd-backfill.yaml index bbbd9b4b3574a..1a24f4a2938a0 100644 --- a/qa/suites/rados/singleton/all/osd-backfill.yaml +++ b/qa/suites/rados/singleton/all/osd-backfill.yaml @@ -14,7 +14,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml index 15a0ea3424648..7ade95c9ece0f 100644 --- a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml +++ b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml @@ -15,7 +15,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/osd-recovery.yaml b/qa/suites/rados/singleton/all/osd-recovery.yaml index 75cea6a94b981..94ab85a778d2c 100644 --- a/qa/suites/rados/singleton/all/osd-recovery.yaml +++ b/qa/suites/rados/singleton/all/osd-recovery.yaml @@ -14,7 +14,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/peer.yaml b/qa/suites/rados/singleton/all/peer.yaml index 24fd74b82ed34..99183c460f1d9 100644 --- a/qa/suites/rados/singleton/all/peer.yaml +++ b/qa/suites/rados/singleton/all/peer.yaml @@ -14,7 +14,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force config: global: osd pool default min size : 1 diff --git a/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml index 042c3d78e7d7d..90fb128f0b9cd 100644 --- a/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml +++ b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml @@ -21,7 +21,7 @@ tasks: - ceph: create_rbd_pool: false pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/qa/suites/rados/singleton/all/pg-autoscaler.yaml index abc6f7bd1ab17..c7c7e68e35501 100644 --- a/qa/suites/rados/singleton/all/pg-autoscaler.yaml +++ b/qa/suites/rados/singleton/all/pg-autoscaler.yaml @@ -21,7 +21,7 @@ tasks: - ceph: create_rbd_pool: false pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml index b3f11264fe79d..2c00192f137a8 100644 --- a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml +++ b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml @@ -13,7 +13,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - slow request diff --git a/qa/suites/rados/singleton/all/radostool.yaml b/qa/suites/rados/singleton/all/radostool.yaml index fa3a1b0f7e3dc..6a3998ed26e0e 100644 --- a/qa/suites/rados/singleton/all/radostool.yaml +++ b/qa/suites/rados/singleton/all/radostool.yaml @@ -13,7 +13,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - had wrong client addr diff --git a/qa/suites/rados/singleton/all/random-eio.yaml b/qa/suites/rados/singleton/all/random-eio.yaml index 782b906d62a7c..258ae90edbf81 100644 --- a/qa/suites/rados/singleton/all/random-eio.yaml +++ b/qa/suites/rados/singleton/all/random-eio.yaml @@ -16,7 +16,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - missing primary copy of - objects unfound and apparently lost diff --git a/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/qa/suites/rados/singleton/all/rebuild-mondb.yaml index f678d08ce98c1..a6c0b7839a1ab 100644 --- a/qa/suites/rados/singleton/all/rebuild-mondb.yaml +++ b/qa/suites/rados/singleton/all/rebuild-mondb.yaml @@ -15,7 +15,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - no reply from - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/recovery-preemption.yaml b/qa/suites/rados/singleton/all/recovery-preemption.yaml index 7438f9e775cfa..ce51688e50a2b 100644 --- a/qa/suites/rados/singleton/all/recovery-preemption.yaml +++ b/qa/suites/rados/singleton/all/recovery-preemption.yaml @@ -15,7 +15,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: osd: osd recovery sleep: .1 diff --git a/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml index 2756ebe8269fc..41a011bd468a2 100644 --- a/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml +++ b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml @@ -6,7 +6,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force fs: xfs log-ignorelist: - overall HEALTH_ diff --git a/qa/suites/rados/singleton/all/test-crash.yaml b/qa/suites/rados/singleton/all/test-crash.yaml index beb83f0bb0bd4..deab84a5a3206 100644 --- a/qa/suites/rados/singleton/all/test-crash.yaml +++ b/qa/suites/rados/singleton/all/test-crash.yaml @@ -5,7 +5,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - Reduced data availability - OSD_.*DOWN diff --git a/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml index a76f6a8f03e42..fcdd2f7f9f8f5 100644 --- a/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml +++ b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml @@ -12,7 +12,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) diff --git a/qa/suites/rados/singleton/all/thrash-backfill-full.yaml b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml index 0f2924db3f808..eb1d5eb83ba05 100644 --- a/qa/suites/rados/singleton/all/thrash-backfill-full.yaml +++ b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml @@ -23,7 +23,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - missing primary copy of diff --git a/qa/suites/rados/singleton/all/thrash-eio.yaml b/qa/suites/rados/singleton/all/thrash-eio.yaml index 5d9770061d1fd..5ae8e69635cb0 100644 --- a/qa/suites/rados/singleton/all/thrash-eio.yaml +++ b/qa/suites/rados/singleton/all/thrash-eio.yaml @@ -21,7 +21,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - missing primary copy of diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml index ab210abd7773e..d61ad0c421b3d 100644 --- a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml +++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml @@ -16,7 +16,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - slow request diff --git a/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml index eeb585c88fc3a..1ff69c300cc9f 100644 --- a/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml +++ b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml @@ -15,7 +15,7 @@ tasks: - install: - ceph: pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force config: global: osd pool default min size : 1 diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml index 8362b6b1d5a28..2e375c7aa954c 100644 --- a/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml +++ b/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml @@ -5,7 +5,7 @@ overrides: - objects unfound and apparently lost create_rbd_pool: False pre-mgr-commands: - - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + - sudo ceph config set mgr mgr_pool false --force conf: osd: osd debug reject backfill probability: .3 diff --git a/src/common/options/mgr.yaml.in b/src/common/options/mgr.yaml.in index ff120736db5a6..6fc8cb2e1c69a 100644 --- a/src/common/options/mgr.yaml.in +++ b/src/common/options/mgr.yaml.in @@ -12,6 +12,15 @@ options: - mgr flags: - no_mon_update +- name: mgr_pool + type: bool + level: dev + desc: Allow use/creation of .mgr pool. + default: true + services: + - mgr + flags: + - startup - name: mgr_stats_period type: int level: basic diff --git a/src/pybind/mgr/mgr_module.py b/src/pybind/mgr/mgr_module.py index 79f5aa8e66b38..610fb4eb911d6 100644 --- a/src/pybind/mgr/mgr_module.py +++ b/src/pybind/mgr/mgr_module.py @@ -1127,6 +1127,9 @@ class MgrModule(ceph_module.BaseMgrModule, MgrModuleLoggingMixin): assert self._db_lock.locked() if self._db is not None: return self._db + db_allowed = self.get_ceph_option("mgr_pool") + if not db_allowed: + raise MgrDBNotReady(); self._db = self.open_db() if self._db is None: raise MgrDBNotReady(); -- 2.39.5