--- /dev/null
+../../../../overrides/2-size-2-min-size.yaml
\ No newline at end of file
--- /dev/null
+../../../../overrides/3-size-2-min-size.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- install.upgrade:
+ mon.a:
+ mon.b:
--- /dev/null
+tasks:
+- install:
+ branch: luminous
+- install.upgrade:
+ mon.a:
+ mon.b:
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
+ osd backoff on degraded: true
--- /dev/null
+tasks:
+- ceph:
--- /dev/null
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
--- /dev/null
+roles:
+- [mon.a, mon.c, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0]
+- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1]
+- [client.2]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode crush-compat
+ - ceph balancer on
--- /dev/null
+../basic/msgr
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms tcp read timeout: 5
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ osd:
+ osd heartbeat use min delay socket: true
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: osd
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
--- /dev/null
+../../../config/rados.yaml
\ No newline at end of file
--- /dev/null
+../../../mon_kv_backend/rocksdb.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd scrub during recovery: false
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 0.25
+ chance_pgpnum_fix: 0.25
+ chance_test_map_discontinuity: 2
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ journal throttle high multiple: 2
+ journal throttle max multiple: 10
+ filestore queue throttle high multiple: 2
+ filestore queue throttle max multiple: 10
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+openstack:
+- volumes:
+ size: 50
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ filestore odsync write: true
+ osd max backfills: 2
+ osd snap trim sleep: .5
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
--- /dev/null
+../../../tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.2]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.2:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
--- /dev/null
+tasks:
+- rados:
+ clients: [client.2]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50