]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add script to test how libceph handles huge osdmaps 30363/head
authorIlya Dryomov <idryomov@gmail.com>
Wed, 11 Sep 2019 11:15:59 +0000 (13:15 +0200)
committerIlya Dryomov <idryomov@gmail.com>
Fri, 13 Sep 2019 17:21:54 +0000 (19:21 +0200)
That code will also handle moderately-sized osdmaps when the memory is
fragmented.

Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
qa/suites/krbd/rbd-nomount/tasks/krbd_huge_osdmap.yaml [new file with mode: 0644]
qa/workunits/rbd/krbd_huge_osdmap.sh [new file with mode: 0755]

diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_huge_osdmap.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_huge_osdmap.yaml
new file mode 100644 (file)
index 0000000..3148b32
--- /dev/null
@@ -0,0 +1,10 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        mon max osd: 60000
+tasks:
+- workunit:
+    clients:
+      all:
+        - rbd/krbd_huge_osdmap.sh
diff --git a/qa/workunits/rbd/krbd_huge_osdmap.sh b/qa/workunits/rbd/krbd_huge_osdmap.sh
new file mode 100755 (executable)
index 0000000..0a550d6
--- /dev/null
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/40481.
+#
+# An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
+# is ~13M.  However in-memory osdmap is larger than ~3M: in-memory osd_addr
+# array for 60000 OSDs is ~8M because of sockaddr_storage.
+#
+# Set mon_max_osd = 60000 in ceph.conf.
+
+set -ex
+
+function expect_false() {
+    if "$@"; then return 1; else return 0; fi
+}
+
+function run_test() {
+    local dev
+
+    # initially tiny, grow via incrementals
+    dev=$(sudo rbd map img)
+    for max in 8 60 600 6000 60000; do
+        ceph osd setmaxosd $max
+        expect_false sudo rbd map wait_for/latest_osdmap
+        xfs_io -c 'pwrite -w 0 12M' $DEV
+    done
+    ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+    expect_false sudo rbd map wait_for/latest_osdmap
+    xfs_io -c 'pwrite -w 0 12M' $DEV
+    sudo rbd unmap $dev
+
+    # initially huge, shrink via incrementals
+    dev=$(sudo rbd map img)
+    for max in 60000 6000 600 60 8; do
+        ceph osd setmaxosd $max
+        expect_false sudo rbd map wait_for/latest_osdmap
+        xfs_io -c 'pwrite -w 0 12M' $DEV
+    done
+    ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+    expect_false sudo rbd map wait_for/latest_osdmap
+    xfs_io -c 'pwrite -w 0 12M' $DEV
+    sudo rbd unmap $dev
+}
+
+rbd create --size 12M img
+run_test
+# repeat with primary affinity (adds an extra array)
+ceph osd primary-affinity osd.0 0.5
+run_test
+
+echo OK