}
}
-PG *OSD::_open_lock_pg(pg_t pgid, bool no_lockdep_check, bool hold_map_lock)
+PG *OSD::_open_lock_pg(
+ OSDMapRef createmap,
+ pg_t pgid, bool no_lockdep_check, bool hold_map_lock)
{
assert(osd_lock.is_locked());
hobject_t logoid = make_pg_log_oid(pgid);
hobject_t infooid = make_pg_biginfo_oid(pgid);
if (osdmap->get_pg_type(pgid) == pg_pool_t::TYPE_REP)
- pg = new ReplicatedPG(&service, osdmap, pool, pgid, logoid, infooid);
+ pg = new ReplicatedPG(&service, createmap, pool, pgid, logoid, infooid);
else
assert(0);
return pg;
}
-PG *OSD::_create_lock_pg(pg_t pgid, bool newly_created, bool hold_map_lock,
- int role, vector<int>& up, vector<int>& acting, pg_history_t history,
- pg_interval_map_t& pi,
- ObjectStore::Transaction& t)
+PG *OSD::_create_lock_pg(
+ OSDMapRef createmap,
+ pg_t pgid, bool newly_created, bool hold_map_lock,
+ int role, vector<int>& up, vector<int>& acting, pg_history_t history,
+ pg_interval_map_t& pi,
+ ObjectStore::Transaction& t)
{
assert(osd_lock.is_locked());
dout(20) << "_create_lock_pg pgid " << pgid << dendl;
- PG *pg = _open_lock_pg(pgid, true, hold_map_lock);
+ PG *pg = _open_lock_pg(createmap, pgid, true, hold_map_lock);
t.create_collection(coll_t(pgid));
if (it->is_removal(&seq, &pgid)) {
if (seq >= next_removal_seq)
next_removal_seq = seq + 1;
+ dout(10) << "queueing coll " << *it << " for removal, seq is "
+ << seq << "pgid is " << pgid << dendl;
boost::tuple<coll_t, SequencerRef, DeletingStateRef> *to_queue =
new boost::tuple<coll_t, SequencerRef, DeletingStateRef>;
to_queue->get<0>() = *it;
continue;
}
- PG *pg = _open_lock_pg(pgid);
+ PG *pg = _open_lock_pg(osdmap, pgid);
// read pg state, log
pg->read_state(store);
// ok, create PG locally using provided Info and History
PG::RecoveryCtx rctx = create_context();
- pg = _create_lock_pg(info.pgid, create, false, role, up, acting, history, pi,
- *rctx.transaction);
+ pg = _create_lock_pg(
+ get_map(epoch),
+ info.pgid, create, false, role, up, acting, history, pi,
+ *rctx.transaction);
pg->handle_create(&rctx);
dispatch_context(rctx, pg);
history.same_interval_since = history.same_primary_since =
osdmap->get_epoch();
pg_interval_map_t pi;
- PG *pg = _create_lock_pg(*q, true, true,
+ PG *pg = _create_lock_pg(service.get_osdmap(), *q, true, true,
parent->get_role(), parent->up, parent->acting, history, pi, t);
children[*q] = pg;
dout(10) << " child " << *pg << dendl;
if (can_create_pg(pgid)) {
pg_interval_map_t pi;
PG::RecoveryCtx rctx = create_context();
- PG *pg = _create_lock_pg(pgid, true, false,
- 0, creating_pgs[pgid].acting, creating_pgs[pgid].acting,
- history, pi,
- *rctx.transaction);
+ PG *pg = _create_lock_pg(
+ osdmap, pgid, true, false,
+ 0, creating_pgs[pgid].acting, creating_pgs[pgid].acting,
+ history, pi,
+ *rctx.transaction);
creating_pgs.erase(pgid);
wake_pg_waiters(pg->info.pgid);
pg->handle_create(&rctx);
Mutex pg_temp_lock;
map<pg_t, vector<int> > pg_temp_wanted;
void queue_want_pg_temp(pg_t pgid, vector<int>& want);
+ void remove_want_pg_temp(pg_t pgid) {
+ Mutex::Locker l(pg_temp_lock);
+ pg_temp_wanted.erase(pgid);
+ }
void send_pg_temp();
void queue_for_peering(PG *pg);
bool _have_pg(pg_t pgid);
PG *_lookup_lock_pg(pg_t pgid);
PG *_lookup_lock_pg_with_map_lock_held(pg_t pgid);
- PG *_open_lock_pg(pg_t pg, bool no_lockdep_check=false, bool hold_map_lock=false);
- PG *_create_lock_pg(pg_t pgid, bool newly_created, bool hold_map_lock,
- int role, vector<int>& up, vector<int>& acting,
+ PG *_open_lock_pg(OSDMapRef createmap,
+ pg_t pg, bool no_lockdep_check=false,
+ bool hold_map_lock=false);
+ PG *_create_lock_pg(OSDMapRef createmap,
+ pg_t pgid, bool newly_created,
+ bool hold_map_lock, int role,
+ vector<int>& up, vector<int>& acting,
pg_history_t history,
- pg_interval_map_t& pi, ObjectStore::Transaction& t);
+ pg_interval_map_t& pi,
+ ObjectStore::Transaction& t);
PG *lookup_lock_raw_pg(pg_t pgid);
assert(!is_active());
// -- crash recovery?
- if (pool->info.crash_replay_interval > 0 &&
+ if (is_primary() &&
+ pool->info.crash_replay_interval > 0 &&
may_need_replay(get_osdmap())) {
replay_until = ceph_clock_now(g_ceph_context);
replay_until += pool->info.crash_replay_interval;
}
}
// make sure we clear out any pg_temp change requests
- osd->pg_temp_wanted.erase(info.pgid);
+ osd->remove_want_pg_temp(info.pgid);
cancel_recovery();
if (acting.empty() && up.size() && up[0] == osd->whoami) {
PG *pg = context< RecoveryMachine >().pg;
pg->proc_replica_info(notify.from, notify.notify.info);
pg->update_heartbeat_peers();
+ pg->set_last_peering_reset();
return transit< Primary >();
}
void PG::RecoveryState::Initial::exit()
{
- PG *pg = context< RecoveryMachine >().pg;
- pg->set_last_peering_reset();
context< RecoveryMachine >().log_exit(state_name, enter_time);
}