mon osdmap full prune txsize: 2
tasks:
- thrashosds:
+ timeout: 2400
dump_ops_enable: false
sighup_delay: 0
min_in: 3
+ noscrub_toggle_delay: 0
+ chance_down: 0
+ chance_thrash_pg_upmap: 0
+ reweight_osd: 0
+ thrash_primary_affinity: false
+ ceph_objectstore_tool: false
+ chance_inject_pause_short: 0
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 150
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
+ - sudo ceph osd pool create base 4 4 erasure myprofile
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool set base min_size 2
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 5000
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 10000
+ objects: 6600
+ max_seconds: 1200
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+ - sudo ceph osd pool set cache min_write_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache readproxy
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ # TODO: CEPH_OSD_OP_CACHE_FLUSH
+ cache_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
+ cache_try_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_EVICT
+ cache_evict: 0
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ # TODO: CEPH_OSD_OP_CACHE_FLUSH
+ cache_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
+ cache_try_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_EVICT
+ cache_evict: 0
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+openstack:
+ - machine:
+ ram: 15000 # MB
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ balance_reads: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ # TODO: CEPH_OSD_OP_CACHE_FLUSH
+ cache_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
+ cache_try_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_EVICT
+ cache_evict: 0
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ # TODO: CEPH_OSD_OP_CACHE_FLUSH
+ cache_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
+ cache_try_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_EVICT
+ cache_evict: 0
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ # TODO: CEPH_OSD_OP_CACHE_FLUSH
+ cache_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
+ cache_try_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_EVICT
+ cache_evict: 0
+
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 1500
+ objects: 50
+ set_chunk: true
+ enable_dedup: true
+ dedup_chunk_size: '131072'
+ dedup_chunk_algo: 'fastcdc'
+ op_weights:
+ read: 100
+ write: 50
+ # TODO: CEPH_OSD_OP_SET_CHUNK
+ set_chunk: 0
+ # TODO: CEPH_OSD_OP_TIER_PROMOTE
+ tier_promote: 0
+ # TODO: CEPH_OSD_OP_TIER_FLUSH
+ tier_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_EVICT
+ cache_evict: 0
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 1500
+ objects: 50
+ set_chunk: true
+ enable_dedup: true
+ dedup_chunk_size: '131072'
+ dedup_chunk_algo: 'fastcdc'
+ op_weights:
+ read: 100
+ write: 50
+ # TODO: CEPH_OSD_OP_SET_CHUNK
+ set_chunk: 0
+ # TODO: CEPH_OSD_OP_TIER_PROMOTE
+ tier_promote: 0
+ # TODO: CEPH_OSD_OP_TIER_FLUSH
+ tier_flush: 0
+ # TODO: CEPH_OSD_OP_CACHE_EVICT
+ cache_evict: 0
+ snap_create: 10
+ snap_remove: 10
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
--- /dev/null
+override:
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - reached quota
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_AVAILABILITY\)
+ crush_tunables: jewel
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ mon:
+ mon warn on pool no app: false
+ debug mgrc: 20
+ osd:
+ osd class load list: "*"
+ osd class default list: "*"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 500
+ # TODO: CEPH_OSD_OP_SET_REDIRECT
+ set_redirect: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 500
+ # TODO: CEPH_OSD_OP_SET_REDIRECT
+ set_redirect: false
+ op_weights:
+ # TODO: CEPH_OSD_OP_SET_REDIRECT
+ set_redirect: 0
+ read: 50
+ # TODO: CEPH_OSD_OP_TIER_PROMOTE
+ tier_promote: 0
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ # TODO: CEPH_OSD_OP_SET_REDIRECT
+ set_redirect: 0
+ read: 50
+ # TODO: CEPH_OSD_OP_TIER_PROMOTE
+ tier_promote: 0
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ # TODO: CEPH_OSD_OP_SET_REDIRECT
+ set_redirect: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 300
+ set_chunk: true
+ op_weights:
+ chunk_read: 0
+ # TODO: CEPH_OSD_OP_TIER_PROMOTE
+ tier_promote: 0
--- /dev/null
+overrides:
+ ceph:
+ crush_tunables: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ balance_reads: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ setattr: 25
+ rmattr: 25
--- /dev/null
+overrides:
+ ceph:
+ crush_tunables: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ localize_reads: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ setattr: 25
+ rmattr: 25
op_weights:
read: 100
write: 100
+ delete: 50
+ snap_create: 0
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ setattr: 25
+ rmattr: 25
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ balance_reads: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ localize_reads: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 0
+ # TODO: CEPH_OSD_OP_ROLLBACK
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_fadvise_dontneed: true
+ op_weights:
+ write: 100