From: Radosław Zarzyński Date: Wed, 13 Dec 2023 14:08:08 +0000 (+0100) Subject: osd: move templated ReadPipeline::check_recovery_sources() to .h X-Git-Tag: v19.3.0~13^2~6 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=e46590f76236cd1428e947fcbd14aacf58f238b0;p=ceph.git osd: move templated ReadPipeline::check_recovery_sources() to .h Behind shluffing, this commit includes also adding `std` namespaces to dissecting the `schedule_recovery_work()` to keep it in .cc. Signed-off-by: Radosław Zarzyński --- diff --git a/src/osd/ECCommon.cc b/src/osd/ECCommon.cc index 2c6534d081f1..5d7c7a48cb20 100644 --- a/src/osd/ECCommon.cc +++ b/src/osd/ECCommon.cc @@ -194,111 +194,17 @@ struct FinishReadOp : public GenContext { } }; -template -void ECCommon::ReadPipeline::filter_read_op( - const OSDMapRef& osdmap, - ReadOp &op, - F&& on_erase) +void ECCommon::ReadPipeline::schedule_recovery_work() { - set to_cancel; - for (map >::iterator i = op.source_to_obj.begin(); - i != op.source_to_obj.end(); - ++i) { - if (osdmap->is_down(i->first.osd)) { - to_cancel.insert(i->second.begin(), i->second.end()); - op.in_progress.erase(i->first); - continue; - } - } - - if (to_cancel.empty()) - return; - - for (map >::iterator i = op.source_to_obj.begin(); - i != op.source_to_obj.end(); - ) { - for (set::iterator j = i->second.begin(); - j != i->second.end(); - ) { - if (to_cancel.count(*j)) - i->second.erase(j++); - else - ++j; - } - if (i->second.empty()) { - op.source_to_obj.erase(i++); - } else { - ceph_assert(!osdmap->is_down(i->first.osd)); - ++i; - } - } - - for (set::iterator i = to_cancel.begin(); - i != to_cancel.end(); - ++i) { - get_parent()->cancel_pull(*i); - - ceph_assert(op.to_read.count(*i)); - read_request_t &req = op.to_read.find(*i)->second; - dout(10) << __func__ << ": canceling " << req - << " for obj " << *i << dendl; - op.to_read.erase(*i); - op.complete.erase(*i); - on_erase(*i); - } - - if (op.in_progress.empty()) { - /* This case is odd. filter_read_op gets called while processing - * an OSDMap. Normal, non-recovery reads only happen from acting - * set osds. For this op to have had a read source go down and - * there not be an interval change, it must be part of a pull during - * log-based recovery. - * - * This callback delays calling complete_read_op until later to avoid - * dealing with recovery while handling an OSDMap. We assign a - * cost here of 1 because: - * 1) This should be very rare, and the operation itself was already - * throttled. - * 2) It shouldn't result in IO, rather it should result in restarting - * the pull on the affected objects and pushes from in-memory buffers - * on any now complete unaffected objects. - */ #ifndef WITH_SEASTAR get_parent()->schedule_recovery_work( get_parent()->bless_unlocked_gencontext( - new FinishReadOp(*this, op.tid)), + nullptr), //new struct FinishReadOp(*this, op.tid)), 1); #else // TODO ceph_abort_msg("not yet implemented"); #endif - } -} - -template -void ECCommon::ReadPipeline::check_recovery_sources( - const OSDMapRef& osdmap, - F&& on_erase) -{ - set tids_to_filter; - for (map >::iterator - i = shard_to_read_map.begin(); - i != shard_to_read_map.end(); - ) { - if (osdmap->is_down(i->first.osd)) { - tids_to_filter.insert(i->second.begin(), i->second.end()); - shard_to_read_map.erase(i++); - } else { - ++i; - } - } - for (set::iterator i = tids_to_filter.begin(); - i != tids_to_filter.end(); - ++i) { - map::iterator j = tid_to_read_map.find(*i); - ceph_assert(j != tid_to_read_map.end()); - filter_read_op(osdmap, j->second, on_erase); - } } void ECCommon::ReadPipeline::on_change() diff --git a/src/osd/ECCommon.h b/src/osd/ECCommon.h index f074a79abaf8..1e30b6d80ff9 100644 --- a/src/osd/ECCommon.h +++ b/src/osd/ECCommon.h @@ -453,6 +453,8 @@ struct ECCommon { bool do_redundant_reads, ///< [in] true if we want to issue redundant reads to reduce latency std::map>> *to_read ///< [out] shards, corresponding subchunks to read ); ///< @return error code, 0 on success + + void schedule_recovery_work(); }; /** @@ -695,3 +697,99 @@ template <> struct fmt::formatter : fmt::ostream_forma template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; + +template +void ECCommon::ReadPipeline::check_recovery_sources( + const OSDMapRef& osdmap, + F&& on_erase) +{ + std::set tids_to_filter; + for (std::map >::iterator + i = shard_to_read_map.begin(); + i != shard_to_read_map.end(); + ) { + if (osdmap->is_down(i->first.osd)) { + tids_to_filter.insert(i->second.begin(), i->second.end()); + shard_to_read_map.erase(i++); + } else { + ++i; + } + } + for (std::set::iterator i = tids_to_filter.begin(); + i != tids_to_filter.end(); + ++i) { + std::map::iterator j = tid_to_read_map.find(*i); + ceph_assert(j != tid_to_read_map.end()); + filter_read_op(osdmap, j->second, on_erase); + } +} + +template +void ECCommon::ReadPipeline::filter_read_op( + const OSDMapRef& osdmap, + ReadOp &op, + F&& on_erase) +{ + std::set to_cancel; + for (std::map >::iterator i = op.source_to_obj.begin(); + i != op.source_to_obj.end(); + ++i) { + if (osdmap->is_down(i->first.osd)) { + to_cancel.insert(i->second.begin(), i->second.end()); + op.in_progress.erase(i->first); + continue; + } + } + + if (to_cancel.empty()) + return; + + for (std::map >::iterator i = op.source_to_obj.begin(); + i != op.source_to_obj.end(); + ) { + for (std::set::iterator j = i->second.begin(); + j != i->second.end(); + ) { + if (to_cancel.count(*j)) + i->second.erase(j++); + else + ++j; + } + if (i->second.empty()) { + op.source_to_obj.erase(i++); + } else { + ceph_assert(!osdmap->is_down(i->first.osd)); + ++i; + } + } + + for (std::set::iterator i = to_cancel.begin(); + i != to_cancel.end(); + ++i) { + get_parent()->cancel_pull(*i); + + ceph_assert(op.to_read.count(*i)); + op.to_read.erase(*i); + op.complete.erase(*i); + on_erase(*i); + } + + if (op.in_progress.empty()) { + /* This case is odd. filter_read_op gets called while processing + * an OSDMap. Normal, non-recovery reads only happen from acting + * set osds. For this op to have had a read source go down and + * there not be an interval change, it must be part of a pull during + * log-based recovery. + * + * This callback delays calling complete_read_op until later to avoid + * dealing with recovery while handling an OSDMap. We assign a + * cost here of 1 because: + * 1) This should be very rare, and the operation itself was already + * throttled. + * 2) It shouldn't result in IO, rather it should result in restarting + * the pull on the affected objects and pushes from in-memory buffers + * on any now complete unaffected objects. + */ + schedule_recovery_work(); + } +}