pending_inc.fsid = mon.monmap->fsid;
pending_metadata.clear();
pending_metadata_rm.clear();
- pending_pseudo_purged_snaps.clear();
dout(10) << "create_pending e " << pending_inc.epoch << dendl;
t);
}
}
- for (auto& [pool, snaps] : pending_pseudo_purged_snaps) {
- for (auto snap : snaps) {
- insert_purged_snap_update(pool, snap, snap + 1,
- pending_inc.epoch,
- t);
- }
- }
// health
health_check_map_t next;
m->snapid,
osdmap.require_osd_release < ceph_release_t::octopus);
pending_inc.new_removed_snaps[m->pool].insert(m->snapid);
- // also record the new seq as purged: this avoids a discontinuity
- // after all of the snaps have been purged, since the seq assigned
- // during removal lives in the same namespace as the actual snaps.
- pending_pseudo_purged_snaps[m->pool].insert(pp.get_snap_seq());
changed = true;
}
break;
std::map<int, failure_info_t> failure_info;
std::map<int,utime_t> down_pending_out; // osd down -> out
bool priority_convert = false;
- std::map<int64_t,std::set<snapid_t>> pending_pseudo_purged_snaps;
std::shared_ptr<PriorityCache::PriCache> rocksdb_binned_kv_cache = nullptr;
std::shared_ptr<PriorityCache::Manager> pcm = nullptr;
ceph::mutex balancer_lock = ceph::make_mutex("OSDMonitor::balancer_lock");