From 0ee409b609e06ddb493c2938868d196a6d69e4bb Mon Sep 17 00:00:00 2001 From: David Zafman Date: Tue, 6 May 2014 13:56:21 -0700 Subject: [PATCH] osd: Remove classic scrub code since Argonaut osd can't join Fixes: #7553 Signed-off-by: David Zafman --- src/osd/PG.cc | 270 ++++------------------------------------ src/osd/PG.h | 13 +- src/osd/ReplicatedPG.cc | 19 +-- 3 files changed, 31 insertions(+), 271 deletions(-) diff --git a/src/osd/PG.cc b/src/osd/PG.cc index dc9d4715eb932..8f8187af82063 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -3036,43 +3036,16 @@ void PG::sub_op_scrub_map(OpRequestRef op) dout(10) << " got " << m->from << " scrub map" << dendl; bufferlist::iterator p = m->get_data().begin(); - if (scrubber.is_chunky) { // chunky scrub - scrubber.received_maps[m->from].decode(p, info.pgid.pool()); - dout(10) << "map version is " + scrubber.received_maps[m->from].decode(p, info.pgid.pool()); + dout(10) << "map version is " << scrubber.received_maps[m->from].valid_through << dendl; - } else { // classic scrub - if (scrubber.received_maps.count(m->from)) { - ScrubMap incoming; - incoming.decode(p, info.pgid.pool()); - dout(10) << "from replica " << m->from << dendl; - dout(10) << "map version is " << incoming.valid_through << dendl; - scrubber.received_maps[m->from].merge_incr(incoming); - } else { - scrubber.received_maps[m->from].decode(p, info.pgid.pool()); - } - } --scrubber.waiting_on; scrubber.waiting_on_whom.erase(m->from); if (scrubber.waiting_on == 0) { - if (scrubber.is_chunky) { // chunky scrub - osd->scrub_wq.queue(this); - } else { // classic scrub - if (scrubber.finalizing) { // incremental lists received - osd->scrub_finalize_wq.queue(this); - } else { // initial lists received - scrubber.block_writes = true; - if (last_update_applied == info.last_update) { - scrubber.finalizing = true; - scrub_gather_replica_maps(); - ++scrubber.waiting_on; - scrubber.waiting_on_whom.insert(pg_whoami); - osd->scrub_wq.queue(this); - } - } - } + osd->scrub_wq.queue(this); } } @@ -3465,19 +3438,6 @@ void PG::repair_object( } /* replica_scrub - * - * Classic behavior: - * - * If msg->scrub_from is not set, replica_scrub calls build_scrubmap to - * build a complete map (with the pg lock dropped). - * - * If msg->scrub_from is set, replica_scrub sets scrubber.finalizing. - * Similarly to scrub, if last_update_applied is behind info.last_update - * replica_scrub returns to be requeued by sub_op_modify_applied. - * replica_scrub then builds an incremental scrub map with the - * pg lock held. - * - * Chunky behavior: * * Wait for last_update_applied to match msg->scrub_to as above. Wait * for pushes to complete in case of recent recovery. Build a single @@ -3491,63 +3451,33 @@ void PG::replica_scrub( dout(7) << "replica_scrub" << dendl; if (msg->map_epoch < info.history.same_interval_since) { - if (scrubber.finalizing) { - dout(10) << "scrub pg changed, aborting" << dendl; - scrubber.finalizing = 0; - } else { - dout(10) << "replica_scrub discarding old replica_scrub from " - << msg->map_epoch << " < " << info.history.same_interval_since - << dendl; - } + dout(10) << "replica_scrub discarding old replica_scrub from " + << msg->map_epoch << " < " << info.history.same_interval_since + << dendl; return; } ScrubMap map; - if (msg->chunky) { // chunky scrub - if (last_update_applied < msg->scrub_to) { - dout(10) << "waiting for last_update_applied to catch up" << dendl; - scrubber.active_rep_scrub = msg; - msg->get(); - return; - } - - if (active_pushes > 0) { - dout(10) << "waiting for active pushes to finish" << dendl; - scrubber.active_rep_scrub = msg; - msg->get(); - return; - } - - build_scrub_map_chunk( - map, msg->start, msg->end, msg->deep, - handle); - - } else { - if (msg->scrub_from > eversion_t()) { - if (scrubber.finalizing) { - assert(last_update_applied == info.last_update); - assert(last_update_applied == msg->scrub_to); - } else { - scrubber.finalizing = 1; - if (last_update_applied != msg->scrub_to) { - scrubber.active_rep_scrub = msg; - msg->get(); - return; - } - } - build_inc_scrub_map(map, msg->scrub_from, handle); - scrubber.finalizing = 0; - } else { - build_scrub_map(map, handle); - } + assert(msg->chunky); + if (last_update_applied < msg->scrub_to) { + dout(10) << "waiting for last_update_applied to catch up" << dendl; + scrubber.active_rep_scrub = msg; + msg->get(); + return; + } - if (msg->map_epoch < info.history.same_interval_since) { - dout(10) << "scrub pg changed, aborting" << dendl; - return; - } + if (active_pushes > 0) { + dout(10) << "waiting for active pushes to finish" << dendl; + scrubber.active_rep_scrub = msg; + msg->get(); + return; } + build_scrub_map_chunk( + map, msg->start, msg->end, msg->deep, + handle); + vector scrub(1); scrub[0].op.op = CEPH_OSD_OP_SCRUB_MAP; hobject_t poid; @@ -3596,7 +3526,6 @@ void PG::scrub(ThreadPool::TPHandle &handle) // when we're starting a scrub, we need to determine which type of scrub to do if (!scrubber.active) { OSDMapRef curmap = osd->get_osdmap(); - scrubber.is_chunky = true; assert(backfill_targets.empty()); for (unsigned i=0; iinc_scrubs_active(scrubber.reserved); - if (scrubber.reserved) { - scrubber.reserved = false; - scrubber.reserved_peers.clear(); - } - - /* scrubber.waiting_on == 0 iff all replicas have sent the requested maps and - * the primary has done a final scrub (which in turn can only happen if - * last_update_applied == info.last_update) - */ - scrubber.waiting_on = acting.size(); - scrubber.waiting_on_whom.insert( - actingbackfill.begin(), actingbackfill.end()); - scrubber.waiting_on_whom.erase(pg_whoami); - - // request maps from replicas - for (set::iterator i = actingbackfill.begin(); - i != actingbackfill.end(); - ++i) { - if (*i == pg_whoami) continue; - _request_scrub_map_classic(*i, eversion_t()); - } - - // Unlocks and relocks... - scrubber.primary_scrubmap = ScrubMap(); - build_scrub_map(scrubber.primary_scrubmap, handle); - - if (scrubber.epoch_start != info.history.same_interval_since) { - dout(10) << "scrub pg changed, aborting" << dendl; - scrub_clear_state(); - scrub_unreserve_replicas(); - return; - } - - --scrubber.waiting_on; - scrubber.waiting_on_whom.erase(pg_whoami); - - if (scrubber.waiting_on == 0) { - // the replicas have completed their scrub map, so lock out writes - scrubber.block_writes = true; - } else { - dout(10) << "wait for replicas to build initial scrub map" << dendl; - return; - } - - if (last_update_applied != info.last_update) { - dout(10) << "wait for cleanup" << dendl; - return; - } - - // fall through if last_update_applied == info.last_update and scrubber.waiting_on == 0 - - // request incrementals from replicas - scrub_gather_replica_maps(); - ++scrubber.waiting_on; - scrubber.waiting_on_whom.insert(pg_whoami); - } - - dout(10) << "clean up scrub" << dendl; - assert(last_update_applied == info.last_update); - - scrubber.finalizing = true; - - if (scrubber.epoch_start != info.history.same_interval_since) { - dout(10) << "scrub pg changed, aborting" << dendl; - scrub_clear_state(); - scrub_unreserve_replicas(); - return; - } - - if (scrubber.primary_scrubmap.valid_through != pg_log.get_head()) { - ScrubMap incr; - build_inc_scrub_map(incr, scrubber.primary_scrubmap.valid_through, handle); - scrubber.primary_scrubmap.merge_incr(incr); - } - - --scrubber.waiting_on; - scrubber.waiting_on_whom.erase(pg_whoami); - if (scrubber.waiting_on == 0) { - assert(last_update_applied == info.last_update); - osd->scrub_finalize_wq.queue(this); - } -} - /* * Chunky scrub scrubs objects one chunk at a time with writes blocked for that * chunk. @@ -6398,7 +6184,6 @@ boost::statechart::result PG::RecoveryState::Active::react(const QueryState& q) q.f->dump_stream("scrubber.epoch_start") << pg->scrubber.epoch_start; q.f->dump_int("scrubber.active", pg->scrubber.active); q.f->dump_int("scrubber.block_writes", pg->scrubber.block_writes); - q.f->dump_int("scrubber.finalizing", pg->scrubber.finalizing); q.f->dump_int("scrubber.waiting_on", pg->scrubber.waiting_on); { q.f->open_array_section("scrubber.waiting_on_whom"); @@ -6529,12 +6314,9 @@ boost::statechart::result PG::RecoveryState::ReplicaActive::react(const MQuery& boost::statechart::result PG::RecoveryState::ReplicaActive::react(const QueryState& q) { - PG *pg = context< RecoveryMachine >().pg; - q.f->open_object_section("state"); q.f->dump_string("name", state_name); q.f->dump_stream("enter_time") << enter_time; - q.f->dump_int("scrubber.finalizing", pg->scrubber.finalizing); q.f->close_section(); return forward_event(); } diff --git a/src/osd/PG.h b/src/osd/PG.h index 6a15f2b366a91..f7b719a2871e9 100644 --- a/src/osd/PG.h +++ b/src/osd/PG.h @@ -947,8 +947,7 @@ public: waiting_on(0), shallow_errors(0), deep_errors(0), fixed(0), active_rep_scrub(0), must_scrub(false), must_deep_scrub(false), must_repair(false), - classic(false), - finalizing(false), is_chunky(false), state(INACTIVE), + state(INACTIVE), deep(false) { } @@ -983,12 +982,7 @@ public: // Map from object with errors to good peer map > authoritative; - // classic scrub - bool classic; - bool finalizing; - // chunky scrub - bool is_chunky; hobject_t start, end; eversion_t subset_last_update; @@ -1045,9 +1039,6 @@ public: if (!block_writes) return false; - if (!is_chunky) - return true; - if (soid >= start && soid < end) return true; @@ -1056,8 +1047,6 @@ public: // clear all state void reset() { - classic = false; - finalizing = false; block_writes = false; active = false; queue_snap_trim = false; diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc index 94eec05d44458..a34249eb66e30 100644 --- a/src/osd/ReplicatedPG.cc +++ b/src/osd/ReplicatedPG.cc @@ -1015,12 +1015,6 @@ void ReplicatedPG::do_pg_op(OpRequestRef op) void ReplicatedPG::calc_trim_to() { - if (is_scrubbing() && scrubber.classic) { - dout(10) << "calc_trim_to no trim during classic scrub" << dendl; - pg_trim_to = eversion_t(); - return; - } - size_t target = cct->_conf->osd_min_pg_log_entries; if (is_degraded() || state_test(PG_STATE_RECOVERING | @@ -5292,7 +5286,7 @@ void ReplicatedPG::finish_ctx(OpContext *ctx, int log_op_type, bool maintain_ssc ctx->obs->oi.category); } - if (scrubber.active && scrubber.is_chunky) { + if (scrubber.active) { assert(soid < scrubber.start || soid >= scrubber.end); if (soid < scrubber.start) scrub_cstat.add(ctx->delta_stats, ctx->obs->oi.category); @@ -6505,17 +6499,12 @@ void ReplicatedPG::op_applied(const eversion_t &applied_version) assert(applied_version <= info.last_update); last_update_applied = applied_version; if (is_primary()) { - if (scrubber.active && scrubber.is_chunky) { + if (scrubber.active) { if (last_update_applied == scrubber.subset_last_update) { osd->scrub_wq.queue(this); } - } else if (last_update_applied == info.last_update && scrubber.block_writes) { - dout(10) << "requeueing scrub for cleanup" << dendl; - scrubber.finalizing = true; - scrub_gather_replica_maps(); - ++scrubber.waiting_on; - scrubber.waiting_on_whom.insert(pg_whoami); - osd->scrub_wq.queue(this); + } else { + assert(!scrubber.block_writes); } } else { dout(10) << "op_applied on replica on version " << applied_version << dendl; -- 2.39.5