- ceph:
- exec:
client.0:
- - ceph mds set inline_data true --yes-i-really-mean-it
+ - sudo ceph mds set inline_data true --yes-i-really-mean-it
- ceph:
- exec:
client.0:
- - ceph mds set inline_data true --yes-i-really-mean-it
+ - sudo ceph mds set inline_data true --yes-i-really-mean-it
tasks:
- exec:
client.0:
- - ceph mds set inline_data true --yes-i-really-mean-it
+ - sudo ceph mds set inline_data true --yes-i-really-mean-it
tasks:
- exec:
client.0:
- - ceph mds set inline_data true --yes-i-really-mean-it
+ - sudo ceph mds set inline_data true --yes-i-really-mean-it
osd max pg log entries: 100
- exec:
client.0:
- - ceph osd pool create foo 64
+ - sudo ceph osd pool create foo 64
- rados -p foo bench 60 write -b 1024 --no-cleanup
- - ceph osd pool set foo size 3
- - ceph osd out 0 1
+ - sudo ceph osd pool set foo size 3
+ - sudo ceph osd out 0 1
- sleep:
duration: 60
- exec:
client.0:
- - ceph osd in 0 1
+ - sudo ceph osd in 0 1
- sleep:
duration: 60
- slow request
- exec:
client.0:
- - ceph osd pool create foo 128 128
+ - sudo ceph osd pool create foo 128 128
- sleep 5
- - ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
- - ceph osd pool delete foo foo --yes-i-really-really-mean-it
+ - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+ - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
- ceph.wait_for_failure: [osd.0]
- ceph.restart: [osd.0]
- exec:
client.0:
- - ceph tell osd.0 flush_pg_stats
+ - sudo ceph tell osd.0 flush_pg_stats
- ceph.healthy:
- slow request
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 500
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 500
- background_exec:
mon.a:
- while true
- do sleep 30
- echo forward
- - ceph osd tier cache-mode cache forward
+ - sudo ceph osd tier cache-mode cache forward
- sleep 10
- - ceph osd pool set cache cache_target_full_ratio .001
+ - sudo ceph osd pool set cache cache_target_full_ratio .001
- echo cache-try-flush-evict-all
- rados -p cache cache-try-flush-evict-all
- sleep 5
- rados -p cache cache-flush-evict-all
- sleep 5
- echo remove overlay
- - ceph osd tier remove-overlay base
+ - sudo ceph osd tier remove-overlay base
- sleep 20
- echo add writeback overlay
- - ceph osd tier cache-mode cache writeback
- - ceph osd pool set cache cache_target_full_ratio .8
- - ceph osd tier set-overlay base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd pool set cache cache_target_full_ratio .8
+ - sudo ceph osd tier set-overlay base cache
- sleep 30
- - ceph osd tier cache-mode cache readforward
+ - sudo ceph osd tier cache-mode cache readforward
- done
- rados:
clients: [client.0]
tasks:
- exec:
client.0:
- - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd
- m=1 k=2
- - ceph osd pool create base 4 4 erasure teuthologyprofile
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 5000
+ - sudo ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool create base 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 5000
- rados:
clients: [client.0]
pools: [base]
tasks:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
tasks:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache readproxy
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache readproxy
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
tasks:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
tasks:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
tasks:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
- rados:
clients: [client.0]
pools: [base]
sequential:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
tasks:
- exec:
client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
tasks:
- exec:
client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
tasks:
- exec:
client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
tasks:
- exec:
client.0:
- - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd m=1 k=2
- - ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
- - ceph osd pool create rbd 4 4 erasure teuthologyprofile
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
+ - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
tasks:
- exec:
client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
tasks:
- exec:
client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
- thrashosds:
timeout: 1200
timeout: 1200
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients:
- client.0
- print: "**** running mixed versions of osds and mons"
- exec:
mon.b:
- - ceph osd crush tunables firefly
+ - sudo ceph osd crush tunables firefly
- install.upgrade:
mon.b:
- print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
- exec:
mon.b:
# is this command valid?
- - ceph osd crush tunables firefly
+ - sudo ceph osd crush tunables firefly
- install.upgrade:
mon.b:
branch: hammer
- print: "**** running mixed versions of osds and mons"
- exec:
mon.b:
- - ceph osd crush tunables hammer
+ - sudo ceph osd crush tunables hammer
- install.upgrade:
mon.b:
- print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
- print: "**** running mixed versions of osds and mons"
- exec:
mon.b:
- - ceph osd crush tunables firefly
+ - sudo ceph osd crush tunables firefly
- install.upgrade:
mon.b:
- print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
tasks:
- exec:
client.0:
- - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd k=2 m=1
- - ceph osd pool create ecbase 4 4 erasure teuthologyprofile
- - ceph osd pool create eccache 4
- - ceph osd tier add ecbase eccache
- - ceph osd tier cache-mode eccache writeback
- - ceph osd tier set-overlay ecbase eccache
- - ceph osd pool set eccache hit_set_type bloom
- - ceph osd pool set eccache hit_set_count 8
- - ceph osd pool set eccache hit_set_period 60
- - ceph osd pool set eccache target_max_objects 250
+ - sudo ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd k=2 m=1
+ - sudo ceph osd pool create ecbase 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool create eccache 4
+ - sudo ceph osd tier add ecbase eccache
+ - sudo ceph osd tier cache-mode eccache writeback
+ - sudo ceph osd tier set-overlay ecbase eccache
+ - sudo ceph osd pool set eccache hit_set_type bloom
+ - sudo ceph osd pool set eccache hit_set_count 8
+ - sudo ceph osd pool set eccache hit_set_period 60
+ - sudo ceph osd pool set eccache target_max_objects 250
- exec:
client.0:
- - ceph osd deep-scrub '*'
+ - sudo ceph osd deep-scrub '*'
- sequential:
- rados:
clients: [client.0]
tasks:
- exec:
client.0:
- - ceph osd deep-scrub '*'
+ - sudo ceph osd deep-scrub '*'
- sequential:
- rados:
clients: [client.0]
- exec:
mon.a:
- |-
- ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by:"
+ sudo ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by:"
- sequential:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
sequential:
- exec:
client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
- - ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
#do we need to use "ceph osd crush tunables giant" ?
- exec:
mon.b:
- - ceph osd crush tunables firefly
+ - sudo ceph osd crush tunables firefly
- print: "**** done ceph osd crush tunables firefly"
- install.upgrade:
mon.b:
#do we need to use "ceph osd crush tunables hammer" ?
- exec:
mon.b:
- - ceph osd crush tunables hammer
+ - sudo ceph osd crush tunables hammer
- print: "**** done ceph osd crush tunables hammer"
- install.upgrade:
mon.b:
- exec:
mon.a:
- |-
- ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by"
+ sudo ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by"