+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-roles:
-- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
-tasks:
-- install:
-- ceph:
- pre-mgr-commands:
- - sudo ceph config set mgr mgr_pool false --force
- log-ignorelist:
- - overall HEALTH_
- - \(CACHE_POOL_NO_HIT_SET\)
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
- debug client: 20
- debug mds: 20
- debug ms: 1
-- exec:
- client.0:
- - ceph osd pool create data_cache 4
- - ceph osd tier add cephfs_data data_cache
- - ceph osd tier cache-mode data_cache writeback
- - ceph osd tier set-overlay cephfs_data data_cache
- - ceph osd pool set data_cache hit_set_type bloom
- - ceph osd pool set data_cache hit_set_count 8
- - ceph osd pool set data_cache hit_set_period 3600
- - ceph osd pool set data_cache min_read_recency_for_promote 0
-- ceph-fuse:
-- exec:
- client.0:
- - sudo chmod 777 $TESTDIR/mnt.0/
- - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
- - ls -al $TESTDIR/mnt.0/foo
- - truncate --size 0 $TESTDIR/mnt.0/foo
- - ls -al $TESTDIR/mnt.0/foo
- - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
- - ls -al $TESTDIR/mnt.0/foo
- - cp $TESTDIR/mnt.0/foo /tmp/foo
- - sync
- - rados -p data_cache ls -
- - sleep 10
- - rados -p data_cache ls -
- - rados -p data_cache cache-flush-evict-all
- - rados -p data_cache ls -
- - sleep 1
-- exec:
- client.1:
- - hexdump -C /tmp/foo | head
- - hexdump -C $TESTDIR/mnt.1/foo | head
- - cmp $TESTDIR/mnt.1/foo /tmp/foo
+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-tasks:
-- install:
-- ceph:
- pre-mgr-commands:
- - sudo ceph config set mgr mgr_pool false --force
- log-ignorelist:
- - overall HEALTH_
- - \(CACHE_POOL_NO_HIT_SET\)
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
-- exec:
- client.0:
- - ceph osd pool create base-pool 4
- - ceph osd pool application enable base-pool rados
- - ceph osd pool create cache-pool 4
- - ceph osd tier add base-pool cache-pool
- - ceph osd tier cache-mode cache-pool writeback
- - ceph osd tier set-overlay base-pool cache-pool
- - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
- - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
- - rbd snap create base-pool/bar@snap
- - rados -p base-pool cache-flush-evict-all
- - rbd export base-pool/bar $TESTDIR/bar
- - rbd export base-pool/bar@snap $TESTDIR/snap
- - cmp $TESTDIR/foo $TESTDIR/bar
- - cmp $TESTDIR/foo $TESTDIR/snap
- - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
+++ /dev/null
-# verify #13098 fix
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
-overrides:
- ceph:
- log-ignorelist:
- - is full
- - overall HEALTH_
- - \(POOL_FULL\)
- - \(POOL_NEAR_FULL\)
- - \(CACHE_POOL_NO_HIT_SET\)
- - \(CACHE_POOL_NEAR_FULL\)
- - \(POOL_APP_NOT_ENABLED\)
-tasks:
-- install:
-- ceph:
- pre-mgr-commands:
- - sudo ceph config set mgr mgr_pool false --force
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
-- exec:
- client.0:
- - ceph osd pool create ec-ca 1 1
- - ceph osd pool create ec 1 1 erasure default
- - ceph osd pool application enable ec rados
- - ceph osd tier add ec ec-ca
- - ceph osd tier cache-mode ec-ca readproxy
- - ceph osd tier set-overlay ec ec-ca
- - ceph osd pool set ec-ca hit_set_type bloom
- - ceph osd pool set-quota ec-ca max_bytes 20480000
- - ceph osd pool set-quota ec max_bytes 20480000
- - ceph osd pool set ec-ca target_max_bytes 20480000
- - timeout 30 rados -p ec-ca bench 30 write || true
- - ceph osd pool set-quota ec-ca max_bytes 0
- - ceph osd pool set-quota ec max_bytes 0
+++ /dev/null
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
-tasks:
-- install:
-- ceph:
- pre-mgr-commands:
- - sudo ceph config set mgr mgr_pool false --force
- log-ignorelist:
- - but it is still running
- - slow request
- - overall HEALTH_
- - \(CACHE_POOL_
- - \(POOL_APP_NOT_ENABLED\)
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 60
- - sudo ceph osd pool set cache target_max_objects 500
-- background_exec:
- mon.a:
- - while true
- - do sleep 30
- - sudo ceph osd pool set cache cache_target_full_ratio .001
- - echo cache-try-flush-evict-all
- - rados -p cache cache-try-flush-evict-all
- - sleep 5
- - echo cache-flush-evict-all
- - rados -p cache cache-flush-evict-all
- - sleep 5
- - echo remove overlay
- - sudo ceph osd tier remove-overlay base
- - sleep 20
- # Disabled due to https://tracker.ceph.com/issues/46323
- #- echo add writeback overlay
- #- sudo ceph osd tier cache-mode cache writeback
- #- sudo ceph osd pool set cache cache_target_full_ratio .8
- #- sudo ceph osd tier set-overlay base cache
- #- sleep 30
- #- sudo ceph osd tier cache-mode cache readproxy
- - done
-- rados:
- clients: [client.0]
- pools: [base]
- max_seconds: 600
- ops: 400000
- objects: 10000
- size: 1024
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache target_max_objects 250
- - sudo ceph osd pool set cache min_read_recency_for_promote 2
-- rados:
- clients: [client.2]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- # override short_pg_log_entries.yaml (which sets these under [global])
- osd_min_pg_log_entries: 3000
- osd_max_pg_log_entries: 3000
-tasks:
-- exec:
- client.0:
- - sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
- - sudo ceph osd pool create base 4 4 erasure myprofile
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool set base min_size 2
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 60
- - sudo ceph osd pool set cache target_max_objects 5000
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 10000
- objects: 6600
- max_seconds: 1200
- size: 1024
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- # override short_pg_log_entries.yaml (which sets these under [global])
- osd_min_pg_log_entries: 3000
- osd_max_pg_log_entries: 3000
-tasks:
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 60
- - sudo ceph osd pool set cache target_max_objects 250
- - sudo ceph osd pool set cache min_read_recency_for_promote 2
- - sudo ceph osd pool set cache min_write_recency_for_promote 2
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- # override short_pg_log_entries.yaml (which sets these under [global])
- osd_min_pg_log_entries: 3000
- osd_max_pg_log_entries: 3000
-tasks:
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache readproxy
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache target_max_objects 250
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- pool_snaps: true
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- # override short_pg_log_entries.yaml (which sets these under [global])
- osd_min_pg_log_entries: 3000
- osd_max_pg_log_entries: 3000
-tasks:
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache target_max_objects 250
- - sudo ceph osd pool set cache min_read_recency_for_promote 0
- - sudo ceph osd pool set cache min_write_recency_for_promote 0
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- pool_snaps: true
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-openstack:
- - machine:
- ram: 15000 # MB
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- # override short_pg_log_entries.yaml (which sets these under [global])
- osd_min_pg_log_entries: 3000
- osd_max_pg_log_entries: 3000
-tasks:
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache target_max_objects 250
- - sudo ceph osd pool set cache min_read_recency_for_promote 2
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- # override short_pg_log_entries.yaml (which sets these under [global])
- osd_min_pg_log_entries: 3000
- osd_max_pg_log_entries: 3000
-tasks:
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache min_read_recency_for_promote 0
- - sudo ceph osd pool set cache min_write_recency_for_promote 0
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
+++ /dev/null
-tasks:
-- ceph:
- log-ignorelist:
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(POOL_
- - \(CACHE_POOL_
- - \(SMALLER_PGP_NUM\)
- - \(OBJECT_
- - \(SLOW_OPS\)
- - \(TOO_FEW_PGS\)
- - slow request
-- thrashosds:
- chance_pgnum_grow: 2
- chance_pgnum_shrink: 2
- chance_pgpnum_fix: 1
- timeout: 1200
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache target_max_objects 250
-- rados:
- clients:
- - client.0
- objects: 500
- op_weights:
- copy_from: 50
- delete: 50
- cache_evict: 50
- cache_flush: 50
- read: 100
- rollback: 50
- snap_create: 50
- snap_remove: 50
- cache_try_flush: 50
- write: 100
- ops: 4000
- pool_snaps: true
- pools:
- - base