ThreadPool::TPHandle& handle)
{
PG::RecoveryCtx rctx = create_context();
- auto curmap = sdata->osdmap;
+ auto curmap = sdata->get_osdmap();
epoch_t need_up_thru = 0, same_interval_since = 0;
if (!pg) {
if (const MQuery *q = dynamic_cast<const MQuery*>(evt->evt.get())) {
set<spg_t> *new_children)
{
Mutex::Locker l(sdata_op_ordering_lock);
- OSDMapRef old_osdmap = std::move(osdmap);
- osdmap = new_osdmap;
+ OSDMapRef old_osdmap;
+ {
+ Mutex::Locker l(osdmap_lock);
+ old_osdmap = std::move(osdmap);
+ osdmap = new_osdmap;
+ }
dout(10) << new_osdmap->get_epoch()
<< " (was " << (old_osdmap ? old_osdmap->get_epoch() : 0) << ")"
<< dendl;
string sdata_op_ordering_lock_name;
Mutex sdata_op_ordering_lock; ///< protects all members below
+ string osdmap_lock_name;
+ Mutex osdmap_lock;
OSDMapRef osdmap;
+ OSDMapRef get_osdmap() {
+ Mutex::Locker l(osdmap_lock);
+ return osdmap;
+ }
+
/// map of slots for each spg_t. maintains ordering of items dequeued
/// from pqueue while _process thread drops shard lock to acquire the
/// pg lock. stale slots are removed by consume_map.
sdata_op_ordering_lock_name(shard_name + "::sdata_op_ordering_lock"),
sdata_op_ordering_lock(sdata_op_ordering_lock_name.c_str(), false, true,
false, cct),
+ osdmap_lock_name(shard_name + "::osdmap_lock"),
+ osdmap_lock(osdmap_lock_name.c_str(), false, false) {
if (opqueue == io_queue::weightedpriority) {
pqueue = std::make_unique<
WeightedPriorityQueue<OpQueueItem,uint64_t>>(