From e5127b914cbabbd8b46a803c099ccd575d6df6c0 Mon Sep 17 00:00:00 2001 From: Nitzan Mordechai Date: Thu, 20 Feb 2025 07:59:39 +0000 Subject: [PATCH] suites/rados: cache tier deprecated, no need to keep the tests for it Fixes: https://tracker.ceph.com/issues/68628 Signed-off-by: Nitzan Mordechai --- .../singleton-nomsgr/all/cache-fs-trunc.yaml | 55 -------------- .../all/export-after-evict.yaml | 41 ----------- .../singleton-nomsgr/all/full-tiering.yaml | 41 ----------- .../thrash_cache_writeback_proxy_none.yaml | 71 ------------------- .../workloads/cache-snaps.yaml | 34 --------- .../thrash/workloads/cache-agent-big.yaml | 37 ---------- .../thrash/workloads/cache-agent-small.yaml | 35 --------- .../workloads/cache-pool-snaps-readproxy.yaml | 40 ----------- .../thrash/workloads/cache-pool-snaps.yaml | 45 ------------ .../rados/thrash/workloads/cache-snaps.yaml | 40 ----------- qa/suites/rados/thrash/workloads/cache.yaml | 37 ---------- .../basic/tasks/test/rados_cache_snaps.yaml | 50 ------------- 12 files changed, 526 deletions(-) delete mode 100644 qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml delete mode 100644 qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml delete mode 100644 qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml delete mode 100644 qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml delete mode 100644 qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml delete mode 100644 qa/suites/rados/thrash/workloads/cache-agent-big.yaml delete mode 100644 qa/suites/rados/thrash/workloads/cache-agent-small.yaml delete mode 100644 qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml delete mode 100644 qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml delete mode 100644 qa/suites/rados/thrash/workloads/cache-snaps.yaml delete mode 100644 qa/suites/rados/thrash/workloads/cache.yaml delete mode 100644 qa/suites/smoke/basic/tasks/test/rados_cache_snaps.yaml diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml deleted file mode 100644 index fddbd072306..00000000000 --- a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml +++ /dev/null @@ -1,55 +0,0 @@ -openstack: - - volumes: # attached to each instance - count: 3 - size: 10 # GB -roles: -- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1] -tasks: -- install: -- ceph: - pre-mgr-commands: - - sudo ceph config set mgr mgr_pool false --force - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - conf: - global: - osd max object name len: 460 - osd max object namespace len: 64 - debug client: 20 - debug mds: 20 - debug ms: 1 -- exec: - client.0: - - ceph osd pool create data_cache 4 - - ceph osd tier add cephfs_data data_cache - - ceph osd tier cache-mode data_cache writeback - - ceph osd tier set-overlay cephfs_data data_cache - - ceph osd pool set data_cache hit_set_type bloom - - ceph osd pool set data_cache hit_set_count 8 - - ceph osd pool set data_cache hit_set_period 3600 - - ceph osd pool set data_cache min_read_recency_for_promote 0 -- ceph-fuse: -- exec: - client.0: - - sudo chmod 777 $TESTDIR/mnt.0/ - - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 - - ls -al $TESTDIR/mnt.0/foo - - truncate --size 0 $TESTDIR/mnt.0/foo - - ls -al $TESTDIR/mnt.0/foo - - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 - - ls -al $TESTDIR/mnt.0/foo - - cp $TESTDIR/mnt.0/foo /tmp/foo - - sync - - rados -p data_cache ls - - - sleep 10 - - rados -p data_cache ls - - - rados -p data_cache cache-flush-evict-all - - rados -p data_cache ls - - - sleep 1 -- exec: - client.1: - - hexdump -C /tmp/foo | head - - hexdump -C $TESTDIR/mnt.1/foo | head - - cmp $TESTDIR/mnt.1/foo /tmp/foo diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml deleted file mode 100644 index b4ce5468a0b..00000000000 --- a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml +++ /dev/null @@ -1,41 +0,0 @@ -openstack: - - volumes: # attached to each instance - count: 3 - size: 10 # GB -roles: -- - mon.a - - mgr.x - - osd.0 - - osd.1 - - osd.2 - - client.0 -tasks: -- install: -- ceph: - pre-mgr-commands: - - sudo ceph config set mgr mgr_pool false --force - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - conf: - global: - osd max object name len: 460 - osd max object namespace len: 64 -- exec: - client.0: - - ceph osd pool create base-pool 4 - - ceph osd pool application enable base-pool rados - - ceph osd pool create cache-pool 4 - - ceph osd tier add base-pool cache-pool - - ceph osd tier cache-mode cache-pool writeback - - ceph osd tier set-overlay base-pool cache-pool - - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1 - - rbd import --image-format 2 $TESTDIR/foo base-pool/bar - - rbd snap create base-pool/bar@snap - - rados -p base-pool cache-flush-evict-all - - rbd export base-pool/bar $TESTDIR/bar - - rbd export base-pool/bar@snap $TESTDIR/snap - - cmp $TESTDIR/foo $TESTDIR/bar - - cmp $TESTDIR/foo $TESTDIR/snap - - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml deleted file mode 100644 index a06221449ff..00000000000 --- a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# verify #13098 fix -openstack: - - volumes: # attached to each instance - count: 3 - size: 10 # GB -roles: -- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] -overrides: - ceph: - log-ignorelist: - - is full - - overall HEALTH_ - - \(POOL_FULL\) - - \(POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) - - \(CACHE_POOL_NEAR_FULL\) - - \(POOL_APP_NOT_ENABLED\) -tasks: -- install: -- ceph: - pre-mgr-commands: - - sudo ceph config set mgr mgr_pool false --force - conf: - global: - osd max object name len: 460 - osd max object namespace len: 64 -- exec: - client.0: - - ceph osd pool create ec-ca 1 1 - - ceph osd pool create ec 1 1 erasure default - - ceph osd pool application enable ec rados - - ceph osd tier add ec ec-ca - - ceph osd tier cache-mode ec-ca readproxy - - ceph osd tier set-overlay ec ec-ca - - ceph osd pool set ec-ca hit_set_type bloom - - ceph osd pool set-quota ec-ca max_bytes 20480000 - - ceph osd pool set-quota ec max_bytes 20480000 - - ceph osd pool set ec-ca target_max_bytes 20480000 - - timeout 30 rados -p ec-ca bench 30 write || true - - ceph osd pool set-quota ec-ca max_bytes 0 - - ceph osd pool set-quota ec max_bytes 0 diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml deleted file mode 100644 index e58fb4ef4ba..00000000000 --- a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml +++ /dev/null @@ -1,71 +0,0 @@ -roles: -- - mon.a - - mgr.x - - osd.0 - - osd.1 - - osd.2 -- - osd.3 - - osd.4 - - osd.5 - - client.0 -openstack: - - volumes: # attached to each instance - count: 3 - size: 30 # GB -tasks: -- install: -- ceph: - pre-mgr-commands: - - sudo ceph config set mgr mgr_pool false --force - log-ignorelist: - - but it is still running - - slow request - - overall HEALTH_ - - \(CACHE_POOL_ - - \(POOL_APP_NOT_ENABLED\) -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 500 -- background_exec: - mon.a: - - while true - - do sleep 30 - - sudo ceph osd pool set cache cache_target_full_ratio .001 - - echo cache-try-flush-evict-all - - rados -p cache cache-try-flush-evict-all - - sleep 5 - - echo cache-flush-evict-all - - rados -p cache cache-flush-evict-all - - sleep 5 - - echo remove overlay - - sudo ceph osd tier remove-overlay base - - sleep 20 - # Disabled due to https://tracker.ceph.com/issues/46323 - #- echo add writeback overlay - #- sudo ceph osd tier cache-mode cache writeback - #- sudo ceph osd pool set cache cache_target_full_ratio .8 - #- sudo ceph osd tier set-overlay base cache - #- sleep 30 - #- sudo ceph osd tier cache-mode cache readproxy - - done -- rados: - clients: [client.0] - pools: [base] - max_seconds: 600 - ops: 400000 - objects: 10000 - size: 1024 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 diff --git a/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml b/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml deleted file mode 100644 index 33f667ffd93..00000000000 --- a/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml +++ /dev/null @@ -1,34 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - must scrub before tier agent can activate -tasks: -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 3600 - - sudo ceph osd pool set cache target_max_objects 250 - - sudo ceph osd pool set cache min_read_recency_for_promote 2 -- rados: - clients: [client.2] - pools: [base] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 - cache_flush: 50 - cache_try_flush: 50 - cache_evict: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml deleted file mode 100644 index 9ca2576d469..00000000000 --- a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml +++ /dev/null @@ -1,37 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - must scrub before tier agent can activate - - \(POOL_APP_NOT_ENABLED\) - conf: - osd: - # override short_pg_log_entries.yaml (which sets these under [global]) - osd_min_pg_log_entries: 3000 - osd_max_pg_log_entries: 3000 -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2 - - sudo ceph osd pool create base 4 4 erasure myprofile - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool set base min_size 2 - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 5000 -- rados: - clients: [client.0] - pools: [base] - ops: 10000 - objects: 6600 - max_seconds: 1200 - size: 1024 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml deleted file mode 100644 index 108009e3b0b..00000000000 --- a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml +++ /dev/null @@ -1,35 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - must scrub before tier agent can activate - - \(POOL_APP_NOT_ENABLED\) - conf: - osd: - # override short_pg_log_entries.yaml (which sets these under [global]) - osd_min_pg_log_entries: 3000 - osd_max_pg_log_entries: 3000 -tasks: -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 - - sudo ceph osd pool set cache min_read_recency_for_promote 2 - - sudo ceph osd pool set cache min_write_recency_for_promote 2 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml deleted file mode 100644 index f864e117056..00000000000 --- a/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml +++ /dev/null @@ -1,40 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - must scrub before tier agent can activate - - \(POOL_APP_NOT_ENABLED\) - conf: - osd: - # override short_pg_log_entries.yaml (which sets these under [global]) - osd_min_pg_log_entries: 3000 - osd_max_pg_log_entries: 3000 -tasks: -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache readproxy - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 3600 - - sudo ceph osd pool set cache target_max_objects 250 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - pool_snaps: true - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 - cache_flush: 50 - cache_try_flush: 50 - cache_evict: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml deleted file mode 100644 index 6bf97c6920d..00000000000 --- a/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml +++ /dev/null @@ -1,45 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - must scrub before tier agent can activate - - \(POOL_APP_NOT_ENABLED\) - conf: - osd: - # override short_pg_log_entries.yaml (which sets these under [global]) - osd_min_pg_log_entries: 3000 - osd_max_pg_log_entries: 3000 -tasks: -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 3600 - - sudo ceph osd pool set cache target_max_objects 250 - - sudo ceph osd pool set cache min_read_recency_for_promote 0 - - sudo ceph osd pool set cache min_write_recency_for_promote 0 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - pool_snaps: true - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 - cache_flush: 50 - cache_try_flush: 50 - cache_evict: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 -openstack: - - machine: - ram: 15000 # MB diff --git a/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-snaps.yaml deleted file mode 100644 index 6d11f4cf12c..00000000000 --- a/qa/suites/rados/thrash/workloads/cache-snaps.yaml +++ /dev/null @@ -1,40 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - must scrub before tier agent can activate - - \(POOL_APP_NOT_ENABLED\) - conf: - osd: - # override short_pg_log_entries.yaml (which sets these under [global]) - osd_min_pg_log_entries: 3000 - osd_max_pg_log_entries: 3000 -tasks: -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 3600 - - sudo ceph osd pool set cache target_max_objects 250 - - sudo ceph osd pool set cache min_read_recency_for_promote 2 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 - cache_flush: 50 - cache_try_flush: 50 - cache_evict: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/qa/suites/rados/thrash/workloads/cache.yaml b/qa/suites/rados/thrash/workloads/cache.yaml deleted file mode 100644 index bd9daac7af3..00000000000 --- a/qa/suites/rados/thrash/workloads/cache.yaml +++ /dev/null @@ -1,37 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - must scrub before tier agent can activate - - \(POOL_APP_NOT_ENABLED\) - conf: - osd: - # override short_pg_log_entries.yaml (which sets these under [global]) - osd_min_pg_log_entries: 3000 - osd_max_pg_log_entries: 3000 -tasks: -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 3600 - - sudo ceph osd pool set cache min_read_recency_for_promote 0 - - sudo ceph osd pool set cache min_write_recency_for_promote 0 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 - cache_flush: 50 - cache_try_flush: 50 - cache_evict: 50 diff --git a/qa/suites/smoke/basic/tasks/test/rados_cache_snaps.yaml b/qa/suites/smoke/basic/tasks/test/rados_cache_snaps.yaml deleted file mode 100644 index 7178f68245b..00000000000 --- a/qa/suites/smoke/basic/tasks/test/rados_cache_snaps.yaml +++ /dev/null @@ -1,50 +0,0 @@ -tasks: -- ceph: - log-ignorelist: - - overall HEALTH_ - - \(OSDMAP_FLAGS\) - - \(OSD_ - - \(PG_ - - \(POOL_ - - \(CACHE_POOL_ - - \(SMALLER_PGP_NUM\) - - \(OBJECT_ - - \(SLOW_OPS\) - - \(TOO_FEW_PGS\) - - slow request -- thrashosds: - chance_pgnum_grow: 2 - chance_pgnum_shrink: 2 - chance_pgpnum_fix: 1 - timeout: 1200 -- exec: - client.0: - - sudo ceph osd pool create base 4 - - sudo ceph osd pool application enable base rados - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add base cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay base cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 3600 - - sudo ceph osd pool set cache target_max_objects 250 -- rados: - clients: - - client.0 - objects: 500 - op_weights: - copy_from: 50 - delete: 50 - cache_evict: 50 - cache_flush: 50 - read: 100 - rollback: 50 - snap_create: 50 - snap_remove: 50 - cache_try_flush: 50 - write: 100 - ops: 4000 - pool_snaps: true - pools: - - base -- 2.39.5