]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rados singleton reorg
authorShylesh Kumar <shylesh.mohan@gmail.com>
Mon, 26 Mar 2018 17:34:45 +0000 (23:04 +0530)
committerShylesh Kumar <shylesh.mohan@gmail.com>
Wed, 9 May 2018 03:31:45 +0000 (09:01 +0530)
Signed-off-by: Shylesh Kumar <shylesh.mohan@gmail.com>
36 files changed:
qa/downstream-config/clusters/fixed-3.yaml
qa/downstream-config/clusters/fixed-6.yaml
qa/suites/rados/downstream/singleton/% [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/admin-socket.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/divergent_priors.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/divergent_priors2.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/dump-stuck.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/ec-lost-unfound.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/lost-unfound-delete.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/lost-unfound.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-mon.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-primary.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-replica.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/mon-auth-caps.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/mon-config-keys.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/mon-seesaw.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/osd-backfill.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/osd-recovery-incomplete.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/osd-recovery.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/peer.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/pg-removal-interruption.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/radostool.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/random-eio.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/rebuild-mondb.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/reg11184.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/resolve_stuck_peering.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/rest-api.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/thrash-eio.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/thrash-rados/+ [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/thrash-rados/thrash-rados.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/all/thrash-rados/thrashosds-health.yaml [new symlink]
qa/suites/rados/downstream/singleton/all/watch-notify-same-primary.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/msgr [new symlink]
qa/suites/rados/downstream/singleton/msgr-failures/many.yaml [new file with mode: 0644]
qa/suites/rados/downstream/singleton/objectstore [new symlink]
qa/suites/rados/downstream/singleton/rados.yaml [new symlink]

index ddc79a84b60c64ec7bcfd8227fd6953d571e04e2..8dcb9ac3a61537fcd2fb1ca4208b5f4e1e3bd4e1 100644 (file)
@@ -1,7 +1,7 @@
 roles:
-- [mon.a, mon.c, mgr.x, osd.0, osd.1, osd.2, osd.3]
-- [mon.b, mgr.y, osd.4, osd.5, osd.6, osd.7]
-- [client.0]
+- [mon.a, mon.c, mgr.x, osd.0, osd.1, osd.2]
+- [mon.b, mgr.y, osd.3, osd.4, osd.5]
+- [osd.6, osd.7, osd.8, client.0]
 openstack:
 - volumes: # attached to each instance
     count: 4
index a2669b7508148e52a93185cb86b59411e5658ae8..db8f36ca3507694e6a471bc9b34c7cb5fe1e58ab 100644 (file)
@@ -1,6 +1,8 @@
 roles:
 - [mon.a, mon.c, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0]
-- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1]
+- [mon.b, mgr.x, client.1]
+- [osd.4, osd.5]
+- [osd.6, osd.7]
 - [client.2, client.3]
 - [client.4, client.5]
 openstack:
diff --git a/qa/suites/rados/downstream/singleton/% b/qa/suites/rados/downstream/singleton/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/rados/downstream/singleton/all/admin-socket.yaml b/qa/suites/rados/downstream/singleton/all/admin-socket.yaml
new file mode 100644 (file)
index 0000000..13af813
--- /dev/null
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - client.a
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+- admin_socket:
+    osd.0:
+      version:
+      git_version:
+      help:
+      config show:
+      config help:
+      config set filestore_dump_file /tmp/foo:
+      perf dump:
+      perf schema:
+      get_heap_property tcmalloc.max_total_thread_cache_byte:
+      set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
+      set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/qa/suites/rados/downstream/singleton/all/divergent_priors.yaml b/qa/suites/rados/downstream/singleton/all/divergent_priors.yaml
new file mode 100644 (file)
index 0000000..bdb1b51
--- /dev/null
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+
+overrides:
+  ceph:
+    log-whitelist:
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_
+      - \(POOL_APP_NOT_ENABLED\)
+    conf:
+      osd:
+        debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors:
diff --git a/qa/suites/rados/downstream/singleton/all/divergent_priors2.yaml b/qa/suites/rados/downstream/singleton/all/divergent_priors2.yaml
new file mode 100644 (file)
index 0000000..e2f0245
--- /dev/null
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+
+overrides:
+  ceph:
+    log-whitelist:
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_DEGRADED\)
+      - \(POOL_APP_NOT_ENABLED\)
+    conf:
+      osd:
+        debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors2:
diff --git a/qa/suites/rados/downstream/singleton/all/dump-stuck.yaml b/qa/suites/rados/downstream/singleton/all/dump-stuck.yaml
new file mode 100644 (file)
index 0000000..59085ff
--- /dev/null
@@ -0,0 +1,19 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - but it is still running
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+- dump_stuck:
diff --git a/qa/suites/rados/downstream/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/downstream/singleton/all/ec-lost-unfound.yaml
new file mode 100644 (file)
index 0000000..2f4e7d2
--- /dev/null
@@ -0,0 +1,24 @@
+roles:
+- - mon.a
+  - mgr.x
+- - mon.b
+  - osd.0
+  - osd.1
+- - osd.2
+  - osd.3
+  - mon.c
+openstack:
+  - volumes: # attached to each instance
+      count: 4
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - objects unfound and apparently lost
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_
+- ec_lost_unfound:
diff --git a/qa/suites/rados/downstream/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/downstream/singleton/all/lost-unfound-delete.yaml
new file mode 100644 (file)
index 0000000..bcaef78
--- /dev/null
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - objects unfound and apparently lost
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_
+- rep_lost_unfound_delete:
diff --git a/qa/suites/rados/downstream/singleton/all/lost-unfound.yaml b/qa/suites/rados/downstream/singleton/all/lost-unfound.yaml
new file mode 100644 (file)
index 0000000..a4a309d
--- /dev/null
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - objects unfound and apparently lost
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_
+- lost_unfound:
diff --git a/qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-mon.yaml b/qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-mon.yaml
new file mode 100644 (file)
index 0000000..accdd96
--- /dev/null
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+overrides:
+  ceph:
+    create_rbd_pool: False
+    conf:
+      mon:
+        osd pool default size: 2
+      osd:
+        mon max pg per osd : 2
+        osd max pg per osd hard ratio : 1
+    log-whitelist:
+      - \(TOO_FEW_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+    test_create_from_mon: True
+    pg_num: 2
diff --git a/qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-primary.yaml b/qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-primary.yaml
new file mode 100644 (file)
index 0000000..a926fe5
--- /dev/null
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+- - osd.2
+  - osd.3
+openstack:
+  - volumes: # attached to each instance
+      count: 4
+      size: 10 # GB
+overrides:
+  ceph:
+    create_rbd_pool: False
+    conf:
+      mon:
+        osd pool default size: 2
+      osd:
+        mon max pg per osd : 1
+        osd max pg per osd hard ratio : 1
+    log-whitelist:
+      - \(TOO_FEW_PGS\)
+      - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+    test_create_from_mon: False
+    pg_num: 1
+    pool_size: 2
+    from_primary: True
diff --git a/qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-replica.yaml b/qa/suites/rados/downstream/singleton/all/max-pg-per-osd.from-replica.yaml
new file mode 100644 (file)
index 0000000..9718ad1
--- /dev/null
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+- - osd.2
+  - osd.3
+openstack:
+  - volumes: # attached to each instance
+      count: 4
+      size: 10 # GB
+overrides:
+  ceph:
+    create_rbd_pool: False
+    conf:
+      mon:
+        osd pool default size: 2
+      osd:
+        mon max pg per osd : 1
+        osd max pg per osd hard ratio : 1
+    log-whitelist:
+      - \(TOO_FEW_PGS\)
+      - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+    test_create_from_mon: False
+    pg_num: 1
+    pool_size: 2
+    from_primary: False
diff --git a/qa/suites/rados/downstream/singleton/all/mon-auth-caps.yaml b/qa/suites/rados/downstream/singleton/all/mon-auth-caps.yaml
new file mode 100644 (file)
index 0000000..318af5e
--- /dev/null
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      all:
+        - mon/auth_caps.sh
diff --git a/qa/suites/rados/downstream/singleton/all/mon-config-keys.yaml b/qa/suites/rados/downstream/singleton/all/mon-config-keys.yaml
new file mode 100644 (file)
index 0000000..7bb4f65
--- /dev/null
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+- workunit:
+    clients:
+      all:
+        - mon/test_mon_config_key.py
diff --git a/qa/suites/rados/downstream/singleton/all/mon-seesaw.yaml b/qa/suites/rados/downstream/singleton/all/mon-seesaw.yaml
new file mode 100644 (file)
index 0000000..ccd980f
--- /dev/null
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    config:
+      global:
+        osd pool default min size : 1
+      osd:
+        debug monc: 1
+        debug ms: 1
+- mon_seesaw:
+- ceph_manager.create_pool:
+    kwargs:
+      pool_name: test
+      pg_num: 1
+- ceph_manager.wait_for_clean:
+    kwargs:
+      timeout: 60
diff --git a/qa/suites/rados/downstream/singleton/all/osd-backfill.yaml b/qa/suites/rados/downstream/singleton/all/osd-backfill.yaml
new file mode 100644 (file)
index 0000000..5b37407
--- /dev/null
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - but it is still running
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_
+    conf:
+      osd:
+        osd min pg log entries: 5
+- osd_backfill:
diff --git a/qa/suites/rados/downstream/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/downstream/singleton/all/osd-recovery-incomplete.yaml
new file mode 100644 (file)
index 0000000..fe1a4b7
--- /dev/null
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+- - mgr.x
+  - osd.0
+  - osd.1
+  - mon.b
+- - osd.2
+  - osd.3
+  - mon.c
+openstack:
+  - volumes: # attached to each instance
+      count: 4
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - but it is still running
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_
+    conf:
+      osd:
+        osd min pg log entries: 5
+        osd_fast_fail_on_connection_refused: false
+- osd_recovery.test_incomplete_pgs:
diff --git a/qa/suites/rados/downstream/singleton/all/osd-recovery.yaml b/qa/suites/rados/downstream/singleton/all/osd-recovery.yaml
new file mode 100644 (file)
index 0000000..a887ac5
--- /dev/null
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - but it is still running
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_DEGRADED\)
+    conf:
+      osd:
+        osd min pg log entries: 5
+        osd_fast_fail_on_connection_refused: false
+- osd_recovery:
diff --git a/qa/suites/rados/downstream/singleton/all/peer.yaml b/qa/suites/rados/downstream/singleton/all/peer.yaml
new file mode 100644 (file)
index 0000000..645034a
--- /dev/null
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    config:
+      global:
+        osd pool default min size : 1
+    log-whitelist:
+      - objects unfound and apparently lost
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+- peer:
diff --git a/qa/suites/rados/downstream/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/downstream/singleton/all/pg-removal-interruption.yaml
new file mode 100644 (file)
index 0000000..10f18e2
--- /dev/null
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - but it is still running
+      - slow request
+      - overall HEALTH_
+      - (OSDMAP_FLAGS)
+      - (OSD_
+      - (PG_
+- exec:
+    client.0:
+      - sudo ceph osd pool create foo 128 128
+      - sudo ceph osd pool application enable foo rados
+      - sleep 5
+      - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+      - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
+- ceph.wait_for_failure: [osd.0]
+- exec:
+    client.0:
+      - sudo ceph osd down 0
+- ceph.restart: [osd.0]
+- ceph.healthy:
diff --git a/qa/suites/rados/downstream/singleton/all/radostool.yaml b/qa/suites/rados/downstream/singleton/all/radostool.yaml
new file mode 100644 (file)
index 0000000..a9d4b2b
--- /dev/null
@@ -0,0 +1,33 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+- - osd.6
+  - osd.7
+  - osd.8
+
+openstack:
+  - volumes: # attached to each instance
+      count: 2
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - but it is still running
+    - had wrong client addr
+    - had wrong cluster addr
+    - reached quota
+    - overall HEALTH_
+    - \(POOL_FULL\)
+    - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+    clients:
+      all:
+        - rados/test_rados_tool.sh
diff --git a/qa/suites/rados/downstream/singleton/all/random-eio.yaml b/qa/suites/rados/downstream/singleton/all/random-eio.yaml
new file mode 100644 (file)
index 0000000..fc5ab10
--- /dev/null
@@ -0,0 +1,43 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - missing primary copy of
+    - objects unfound and apparently lost
+    - overall HEALTH_
+    - (POOL_APP_NOT_ENABLED)
+    - (PG_DEGRADED)
+- full_sequential:
+  - exec:
+      client.0:
+        - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33
+        - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33
+        - sudo ceph osd pool create test 16 16
+        - sudo ceph osd pool set test size 3
+        - sudo ceph pg  dump pgs --format=json-pretty
+  - radosbench:
+      clients: [client.0]
+      time: 360
+      type: rand
+      objectsize: 1048576
+      pool: test
+      create_pool: false
+  - exec:
+      client.0:
+        - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0
+        - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
diff --git a/qa/suites/rados/downstream/singleton/all/rebuild-mondb.yaml b/qa/suites/rados/downstream/singleton/all/rebuild-mondb.yaml
new file mode 100644 (file)
index 0000000..78d77c8
--- /dev/null
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - no reply from
+      - overall HEALTH_
+      - \(MON_DOWN\)
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+- full_sequential:
+  - radosbench:
+      clients: [client.0]
+      time: 30
+  - rebuild_mondb:
+  - radosbench:
+      clients: [client.0]
+      time: 30
diff --git a/qa/suites/rados/downstream/singleton/all/reg11184.yaml b/qa/suites/rados/downstream/singleton/all/reg11184.yaml
new file mode 100644 (file)
index 0000000..f3c8575
--- /dev/null
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+
+overrides:
+  ceph:
+    conf:
+      osd:
+        debug osd: 5
+    log-whitelist:
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(SMALLER_PGP_NUM\)
+      - \(OBJECT_
+tasks:
+- install:
+- ceph:
+- reg11184:
diff --git a/qa/suites/rados/downstream/singleton/all/resolve_stuck_peering.yaml b/qa/suites/rados/downstream/singleton/all/resolve_stuck_peering.yaml
new file mode 100644 (file)
index 0000000..3eddce8
--- /dev/null
@@ -0,0 +1,17 @@
+roles:
+- [mon.a, mgr.x]
+- [osd.0, osd.1, osd.2, client.0]
+
+tasks:
+- install:
+- ceph:
+    fs: xfs
+    log-whitelist:
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_DEGRADED\)
+      - \(POOL_APP_NOT_ENABLED\)
+- resolve_stuck_peering:
+
diff --git a/qa/suites/rados/downstream/singleton/all/rest-api.yaml b/qa/suites/rados/downstream/singleton/all/rest-api.yaml
new file mode 100644 (file)
index 0000000..d988d1a
--- /dev/null
@@ -0,0 +1,35 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - mds.a
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - but it is still running
+      - had wrong client addr
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_DEGRADED\)
+    conf:
+      client.rest0:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+- rest-api: [client.0]
+- workunit:
+    clients:
+      all:
+        - rest/test.py
diff --git a/qa/suites/rados/downstream/singleton/all/thrash-eio.yaml b/qa/suites/rados/downstream/singleton/all/thrash-eio.yaml
new file mode 100644 (file)
index 0000000..fdb8dcd
--- /dev/null
@@ -0,0 +1,43 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+override:
+  ceph:
+    conf:
+      mon:
+        osd default pool size: 3
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+    - but it is still running
+    - missing primary copy of
+    - objects unfound and apparently lost
+    - overall HEALTH_
+    - (OSDMAP_FLAGS)
+    - (REQUEST_SLOW)
+    - (PG_
+    - (OSD_
+- thrashosds:
+    op_delay: 30
+    clean_interval: 120
+    chance_down: .5
+    random_eio: .33
+    min_live: 5
+    min_in: 5
+- radosbench:
+    clients: [client.0]
+    time: 720
+    type: rand
+    objectsize: 1048576
diff --git a/qa/suites/rados/downstream/singleton/all/thrash-rados/+ b/qa/suites/rados/downstream/singleton/all/thrash-rados/+
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/rados/downstream/singleton/all/thrash-rados/thrash-rados.yaml b/qa/suites/rados/downstream/singleton/all/thrash-rados/thrash-rados.yaml
new file mode 100644 (file)
index 0000000..37be8df
--- /dev/null
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+- - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    log-whitelist:
+      - but it is still running
+- thrashosds:
+    op_delay: 30
+    clean_interval: 120
+    chance_down: .5
+- workunit:
+    clients:
+      all:
+      - rados/load-gen-mix-small.sh
diff --git a/qa/suites/rados/downstream/singleton/all/thrash-rados/thrashosds-health.yaml b/qa/suites/rados/downstream/singleton/all/thrash-rados/thrashosds-health.yaml
new file mode 120000 (symlink)
index 0000000..1387298
--- /dev/null
@@ -0,0 +1 @@
+../../../../../../tasks/thrashosds-health.yaml
\ No newline at end of file
diff --git a/qa/suites/rados/downstream/singleton/all/watch-notify-same-primary.yaml b/qa/suites/rados/downstream/singleton/all/watch-notify-same-primary.yaml
new file mode 100644 (file)
index 0000000..48ef78f
--- /dev/null
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+  - mon.b
+  - mon.c
+  - mgr.x
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+- ceph:
+    config:
+      global:
+        osd pool default min size : 1
+      client:
+        debug ms: 1
+        debug objecter: 20
+        debug rados: 20
+    log-whitelist:
+      - objects unfound and apparently lost
+      - overall HEALTH_
+      - \(OSDMAP_FLAGS\)
+      - \(OSD_
+      - \(PG_
+      - \(OBJECT_DEGRADED\)
+- watch_notify_same_primary:
+    clients: [client.0]
diff --git a/qa/suites/rados/downstream/singleton/msgr b/qa/suites/rados/downstream/singleton/msgr
new file mode 120000 (symlink)
index 0000000..b29ecdd
--- /dev/null
@@ -0,0 +1 @@
+../basic/msgr
\ No newline at end of file
diff --git a/qa/suites/rados/downstream/singleton/msgr-failures/many.yaml b/qa/suites/rados/downstream/singleton/msgr-failures/many.yaml
new file mode 100644 (file)
index 0000000..3b495f9
--- /dev/null
@@ -0,0 +1,7 @@
+overrides:
+  ceph:
+    conf:
+      global:
+        ms inject socket failures: 500
+      mgr:
+        debug monc: 10
diff --git a/qa/suites/rados/downstream/singleton/objectstore b/qa/suites/rados/downstream/singleton/objectstore
new file mode 120000 (symlink)
index 0000000..071b204
--- /dev/null
@@ -0,0 +1 @@
+../../../../downstream-config/objectstore/
\ No newline at end of file
diff --git a/qa/suites/rados/downstream/singleton/rados.yaml b/qa/suites/rados/downstream/singleton/rados.yaml
new file mode 120000 (symlink)
index 0000000..b81af13
--- /dev/null
@@ -0,0 +1 @@
+../../../../downstream-config/config/rados.yaml
\ No newline at end of file