map_cache.cached_lb() provides us with a lower bound across
all pgs for in-use osdmaps. We cannot trim past this since
those maps are still in use.
backport: bobtail
Fixes: #3770
Signed-off-by: Samuel Just <sam.just@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
Reviewed-by: Greg Farnum <greg@inktank.com>
(cherry picked from commit
66eb93b83648b4561b77ee6aab5b484e6dba4771)
}
}
+ // Returns K key s.t. key <= k for all currently cached k,v
+ K cached_key_lower_bound() {
+ Mutex::Locker l(lock);
+ return weak_refs.begin()->first;
+ }
+
VPtr lower_bound(K key) {
VPtr val;
list<VPtr> to_release;
if (superblock.oldest_map) {
int num = 0;
- for (epoch_t e = superblock.oldest_map; e < m->oldest_map; ++e) {
+ epoch_t min(
+ MIN(m->oldest_map,
+ service.map_cache.cached_key_lower_bound()));
+ for (epoch_t e = superblock.oldest_map; e < min; ++e) {
dout(20) << " removing old osdmap epoch " << e << dendl;
t.remove(coll_t::META_COLL, get_osdmap_pobject_name(e));
t.remove(coll_t::META_COLL, get_inc_osdmap_pobject_name(e));