From: Loic Dachary Date: Wed, 16 Nov 2016 09:18:02 +0000 (+0100) Subject: tests: osd-crush.sh must retry crush dump X-Git-Tag: v11.1.0~270^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=refs%2Fpull%2F12016%2Fhead;p=ceph.git tests: osd-crush.sh must retry crush dump After ceph osd setcrushmap, the script must retry because ceph osd crush dump races against the update of the crushmap. Fixes: http://tracker.ceph.com/issues/17919 Signed-off-by: Loic Dachary --- diff --git a/src/test/mon/osd-crush.sh b/src/test/mon/osd-crush.sh index 4fecd4a4d516..982ec904b25f 100755 --- a/src/test/mon/osd-crush.sh +++ b/src/test/mon/osd-crush.sh @@ -271,8 +271,19 @@ function TEST_crush_repair_faulty_crushmap() { ceph osd setcrushmap -i $empty_map.map || return 1 # should be an empty crush map without any buckets - ! test $(ceph osd crush dump --format=xml | \ - $XMLSTARLET sel -t -m "//buckets/bucket" -v .) || return 1 + success=false + for delay in 1 2 4 8 16 32 64 128 256 ; do + if ! test $(ceph osd crush dump --format=xml | \ + $XMLSTARLET sel -t -m "//buckets/bucket" -v .) ; then + success=true + break + fi + sleep $delay + done + if ! $success ; then + ceph osd crush dump --format=xml + return 1 + fi # bring them down, the "ceph" commands will try to hunt for other monitor in # vain, after mon.a is offline kill_daemons $dir || return 1