From: Sage Weil Date: Fri, 2 May 2014 00:24:48 +0000 (-0700) Subject: osd: prevent pgs from getting too far ahead of the min pg epoch X-Git-Tag: v0.80.6~33^2~6 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=289360c5dc57a4788289472f90d63781143539be;p=ceph.git osd: prevent pgs from getting too far ahead of the min pg epoch Bound the range of PG epochs between the slowest and fastest pg (epoch-wise) with 'osd map max advance'. This value should be set to something less than 'osd map cache size' so that the maps we are processing will be in memory as many PGs advance forward in time in loose synchrony. This is part of the solution to #7576. Signed-off-by: Sage Weil (cherry picked from commit cf25bdf6b0090379903981fe8cee5ea75efd7ba0) --- diff --git a/src/common/config_opts.h b/src/common/config_opts.h index fdd35575dfe..0d328e07583 100644 --- a/src/common/config_opts.h +++ b/src/common/config_opts.h @@ -451,6 +451,7 @@ OPTION(osd_tier_default_cache_hit_set_period, OPT_INT, 1200) OPTION(osd_tier_default_cache_hit_set_type, OPT_STR, "bloom") OPTION(osd_map_dedup, OPT_BOOL, true) +OPTION(osd_map_max_advance, OPT_INT, 200) // make this < cache_size! OPTION(osd_map_cache_size, OPT_INT, 500) OPTION(osd_map_message_max, OPT_INT, 100) // max maps per MOSDMap message OPTION(osd_map_share_max_epochs, OPT_INT, 100) // cap on # of inc maps we send to peers, clients diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 747894acf6d..9bcee749ed5 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -5752,7 +5752,7 @@ void OSD::check_osdmap_features(ObjectStore *fs) } } -void OSD::advance_pg( +bool OSD::advance_pg( epoch_t osd_epoch, PG *pg, ThreadPool::TPHandle &handle, PG::RecoveryCtx *rctx, @@ -5763,11 +5763,19 @@ void OSD::advance_pg( OSDMapRef lastmap = pg->get_osdmap(); if (lastmap->get_epoch() == osd_epoch) - return; + return true; assert(lastmap->get_epoch() < osd_epoch); + epoch_t min_epoch = service.get_min_pg_epoch(); + epoch_t max; + if (min_epoch) { + max = min_epoch + g_conf->osd_map_max_advance; + } else { + max = next_epoch + g_conf->osd_map_max_advance; + } + for (; - next_epoch <= osd_epoch; + next_epoch <= osd_epoch && next_epoch <= max; ++next_epoch) { OSDMapRef nextmap = service.try_get_map(next_epoch); if (!nextmap) @@ -5801,6 +5809,13 @@ void OSD::advance_pg( } service.pg_update_epoch(pg->info.pgid, lastmap->get_epoch()); pg->handle_activate_map(rctx); + if (next_epoch <= osd_epoch) { + dout(10) << __func__ << " advanced by max " << g_conf->osd_map_max_advance + << " past min epoch " << min_epoch + << " ... will requeue " << *pg << dendl; + return false; + } + return true; } /** @@ -7813,8 +7828,9 @@ void OSD::process_peering_events( pg->unlock(); continue; } - advance_pg(curmap->get_epoch(), pg, handle, &rctx, &split_pgs); - if (!pg->peering_queue.empty()) { + if (!advance_pg(curmap->get_epoch(), pg, handle, &rctx, &split_pgs)) { + pg->queue_null(curmap->get_epoch(), curmap->get_epoch()); + } else if (!pg->peering_queue.empty()) { PG::CephPeeringEvtRef evt = pg->peering_queue.front(); pg->peering_queue.pop_front(); pg->handle_peering_event(evt, &rctx); diff --git a/src/osd/OSD.h b/src/osd/OSD.h index bc3eb4eaa74..bd994df7cba 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -1291,7 +1291,7 @@ private: void note_down_osd(int osd); void note_up_osd(int osd); - void advance_pg( + bool advance_pg( epoch_t advance_to, PG *pg, ThreadPool::TPHandle &handle, PG::RecoveryCtx *rctx,