From: Josh Durgin Date: Fri, 20 Apr 2018 22:42:56 +0000 (-0400) Subject: osd/ECBackend: preserve requests for other objects when sending extra reads X-Git-Tag: v12.2.6~140^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=41f22cf3fc0c45675e6ffb75978490deb960f8b5;p=ceph.git osd/ECBackend: preserve requests for other objects when sending extra reads When multiple objects are in flight for the same ReadOp, swap() on the map would remove requests for all objects. We just want to replace the requests for the single object we're dealing with in send_all_remaining_reads(). This prevents crashing trying to look up rop.to_read[hoid] when another object in the same ReadOp gets an EIO and tries to send more requests. Test this by using osd-recovery-max-single-start to bundle multiple reads into one ReadOp. Save and restore CEPH_ARGS so custom settings are reset for each test. Fixes: http://tracker.ceph.com/issues/23195 (the 2nd crash there) Signed-off-by: Josh Durgin (cherry picked from commit d4808256d25851f85ce5e86704cc4e867e2fc905) --- diff --git a/qa/standalone/erasure-code/test-erasure-eio.sh b/qa/standalone/erasure-code/test-erasure-eio.sh index a4abad1c9e9a..32bef54ef45f 100755 --- a/qa/standalone/erasure-code/test-erasure-eio.sh +++ b/qa/standalone/erasure-code/test-erasure-eio.sh @@ -419,9 +419,10 @@ function TEST_ec_recovery_multiple_objects() { local dir=$1 local objname=myobject - export CEPH_ARGS + ORIG_ARGS=$CEPH_ARGS CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 ' setup_osds 7 || return 1 + CEPH_ARGS=$ORIG_ARGS local poolname=pool-jerasure create_erasure_coded_pool $poolname 3 2 || return 1 @@ -447,9 +448,10 @@ function TEST_ec_recovery_multiple_objects_eio() { local dir=$1 local objname=myobject - export CEPH_ARGS + ORIG_ARGS=$CEPH_ARGS CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 ' setup_osds 7 || return 1 + CEPH_ARGS=$ORIG_ARGS local poolname=pool-jerasure create_erasure_coded_pool $poolname 3 2 || return 1 @@ -480,9 +482,10 @@ function TEST_ec_backfill_unfound() { # Must be between 1 and $lastobj local testobj=obj250 - export CEPH_ARGS + ORIG_ARGS=$CEPH_ARGS CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10' setup_osds 5 || return 1 + CEPH_ARGS=$ORIG_ARGS local poolname=pool-jerasure create_erasure_coded_pool $poolname 3 2 || return 1 @@ -558,7 +561,11 @@ function TEST_ec_recovery_unfound() { # Must be between 1 and $lastobj local testobj=obj75 + ORIG_ARGS=$CEPH_ARGS + CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 ' + CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10' setup_osds 5 || return 1 + CEPH_ARGS=$ORIG_ARGS local poolname=pool-jerasure create_erasure_coded_pool $poolname 3 2 || return 1 diff --git a/src/osd/ECBackend.cc b/src/osd/ECBackend.cc index 9e29ecc02834..7941add96c99 100644 --- a/src/osd/ECBackend.cc +++ b/src/osd/ECBackend.cc @@ -2370,17 +2370,14 @@ int ECBackend::send_all_remaining_reads( GenContext &> *c = rop.to_read.find(hoid)->second.cb; - map for_read_op; - for_read_op.insert( - make_pair( + rop.to_read.erase(hoid); + rop.to_read.insert(make_pair( hoid, read_request_t( offsets, shards, false, c))); - - rop.to_read.swap(for_read_op); do_read_op(rop); return 0; }