#ifdef PG_DEBUG_REFS
void OSDService::add_pgid(spg_t pgid, PG *pg){
- Mutex::Locker l(pgid_lock);
+ std::lock_guard l(pgid_lock);
if (!pgid_tracker.count(pgid)) {
live_pgs[pgid] = pg;
}
}
void OSDService::remove_pgid(spg_t pgid, PG *pg)
{
- Mutex::Locker l(pgid_lock);
+ std::lock_guard l(pgid_lock);
ceph_assert(pgid_tracker.count(pgid));
ceph_assert(pgid_tracker[pgid] > 0);
pgid_tracker[pgid]--;
}
void OSDService::dump_live_pgids()
{
- Mutex::Locker l(pgid_lock);
+ std::lock_guard l(pgid_lock);
derr << "live pgids:" << dendl;
for (map<spg_t, int>::const_iterator i = pgid_tracker.cbegin();
i != pgid_tracker.cend();
void OSDService::start_shutdown()
{
{
- Mutex::Locker l(agent_timer_lock);
+ std::lock_guard l(agent_timer_lock);
agent_timer.shutdown();
}
{
- Mutex::Locker l(sleep_lock);
+ std::lock_guard l(sleep_lock);
sleep_timer.shutdown();
}
}
void OSDService::shutdown()
{
{
- Mutex::Locker l(watch_lock);
+ std::lock_guard l(watch_lock);
watch_timer.shutdown();
}
}
{
- Mutex::Locker l(recovery_request_lock);
+ std::lock_guard l(recovery_request_lock);
recovery_request_timer.shutdown();
}
void OSDService::agent_stop()
{
{
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
// By this time all ops should be cancelled
ceph_assert(agent_ops == 0);
void OSDService::check_full_status(float ratio)
{
- Mutex::Locker l(full_status_lock);
+ std::lock_guard l(full_status_lock);
cur_ratio = ratio;
bool OSDService::_check_full(DoutPrefixProvider *dpp, s_names type) const
{
- Mutex::Locker l(full_status_lock);
+ std::lock_guard l(full_status_lock);
if (injectfull && injectfull_state >= type) {
// injectfull is either a count of the number of times to return failsafe full
bool OSDService::is_failsafe_full() const
{
- Mutex::Locker l(full_status_lock);
+ std::lock_guard l(full_status_lock);
return cur_state == FAILSAFE;
}
bool OSDService::is_full() const
{
- Mutex::Locker l(full_status_lock);
+ std::lock_guard l(full_status_lock);
return cur_state >= FULL;
}
bool OSDService::is_backfillfull() const
{
- Mutex::Locker l(full_status_lock);
+ std::lock_guard l(full_status_lock);
return cur_state >= BACKFILLFULL;
}
bool OSDService::is_nearfull() const
{
- Mutex::Locker l(full_status_lock);
+ std::lock_guard l(full_status_lock);
return cur_state >= NEARFULL;
}
void OSDService::set_injectfull(s_names type, int64_t count)
{
- Mutex::Locker l(full_status_lock);
+ std::lock_guard l(full_status_lock);
injectfull_state = type;
injectfull = count;
}
osd->logger->set(l_osd_stat_bytes_used, used);
osd->logger->set(l_osd_stat_bytes_avail, avail);
- Mutex::Locker l(stat_lock);
+ std::lock_guard l(stat_lock);
osd_stat.kb = bytes >> 10;
osd_stat.kb_used = used >> 10;
osd_stat.kb_avail = avail >> 10;
osd_stat_t OSDService::set_osd_stat(vector<int>& hb_peers,
int num_pgs)
{
- Mutex::Locker l(stat_lock);
+ std::lock_guard l(stat_lock);
osd_stat.hb_peers.swap(hb_peers);
osd->op_tracker.get_age_ms_histogram(&osd_stat.op_queue_age_hist);
osd_stat.num_pgs = num_pgs;
const vector<int>& want,
bool forced)
{
- Mutex::Locker l(pg_temp_lock);
+ std::lock_guard l(pg_temp_lock);
auto p = pg_temp_pending.find(pgid);
if (p == pg_temp_pending.end() ||
p->second.acting != want ||
void OSDService::remove_want_pg_temp(pg_t pgid)
{
- Mutex::Locker l(pg_temp_lock);
+ std::lock_guard l(pg_temp_lock);
pg_temp_wanted.erase(pgid);
pg_temp_pending.erase(pgid);
}
void OSDService::requeue_pg_temp()
{
- Mutex::Locker l(pg_temp_lock);
+ std::lock_guard l(pg_temp_lock);
// wanted overrides pending. note that remove_want_pg_temp
// clears the item out of both.
unsigned old_wanted = pg_temp_wanted.size();
void OSDService::send_pg_temp()
{
- Mutex::Locker l(pg_temp_lock);
+ std::lock_guard l(pg_temp_lock);
if (pg_temp_wanted.empty())
return;
dout(10) << "send_pg_temp " << pg_temp_wanted << dendl;
epoch_t OSDService::get_peer_epoch(int peer)
{
- Mutex::Locker l(peer_map_epoch_lock);
+ std::lock_guard l(peer_map_epoch_lock);
map<int,epoch_t>::iterator p = peer_map_epoch.find(peer);
if (p == peer_map_epoch.end())
return 0;
epoch_t OSDService::note_peer_epoch(int peer, epoch_t e)
{
- Mutex::Locker l(peer_map_epoch_lock);
+ std::lock_guard l(peer_map_epoch_lock);
map<int,epoch_t>::iterator p = peer_map_epoch.find(peer);
if (p != peer_map_epoch.end()) {
if (p->second < e) {
void OSDService::forget_peer_epoch(int peer, epoch_t as_of)
{
- Mutex::Locker l(peer_map_epoch_lock);
+ std::lock_guard l(peer_map_epoch_lock);
map<int,epoch_t>::iterator p = peer_map_epoch.find(peer);
if (p != peer_map_epoch.end()) {
if (p->second <= as_of) {
bool OSDService::can_inc_scrubs_pending()
{
bool can_inc = false;
- Mutex::Locker l(sched_scrub_lock);
+ std::lock_guard l(sched_scrub_lock);
if (scrubs_pending + scrubs_active < cct->_conf->osd_max_scrubs) {
dout(20) << __func__ << " " << scrubs_pending << " -> " << (scrubs_pending+1)
void OSDService::retrieve_epochs(epoch_t *_boot_epoch, epoch_t *_up_epoch,
epoch_t *_bind_epoch) const
{
- Mutex::Locker l(epoch_lock);
+ std::lock_guard l(epoch_lock);
if (_boot_epoch)
*_boot_epoch = boot_epoch;
if (_up_epoch)
void OSDService::set_epochs(const epoch_t *_boot_epoch, const epoch_t *_up_epoch,
const epoch_t *_bind_epoch)
{
- Mutex::Locker l(epoch_lock);
+ std::lock_guard l(epoch_lock);
if (_boot_epoch) {
ceph_assert(*_boot_epoch == 0 || *_boot_epoch >= boot_epoch);
boot_epoch = *_boot_epoch;
bool OSDService::prepare_to_stop()
{
- Mutex::Locker l(is_stopping_lock);
+ std::lock_guard l(is_stopping_lock);
if (get_state() != NOT_STOPPING)
return false;
void OSDService::got_stop_ack()
{
- Mutex::Locker l(is_stopping_lock);
+ std::lock_guard l(is_stopping_lock);
if (get_state() == PREPARING_TO_STOP) {
dout(0) << __func__ << " starting shutdown" << dendl;
set_state(STOPPING);
bool OSDService::get_inc_map_bl(epoch_t e, bufferlist& bl)
{
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
bool found = map_bl_inc_cache.lookup(e, &bl);
if (found) {
if (logger)
int OSDService::get_deleted_pool_pg_num(int64_t pool)
{
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
auto p = deleted_pool_pg_nums.find(pool);
if (p != deleted_pool_pg_nums.end()) {
return p->second;
OSDMapRef OSDService::try_get_map(epoch_t epoch)
{
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
OSDMapRef retval = map_cache.lookup(epoch);
if (retval) {
dout(30) << "get_map " << epoch << " -cached" << dendl;
void OSDService::set_ready_to_merge_source(PG *pg)
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << pg->pg_id << dendl;
ready_to_merge_source.insert(pg->pg_id.pgid);
assert(not_ready_to_merge_source.count(pg->pg_id.pgid) == 0);
void OSDService::set_ready_to_merge_target(PG *pg, epoch_t last_epoch_clean)
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << pg->pg_id << dendl;
ready_to_merge_target.insert(make_pair(pg->pg_id.pgid, last_epoch_clean));
assert(not_ready_to_merge_target.count(pg->pg_id.pgid) == 0);
void OSDService::set_not_ready_to_merge_source(pg_t source)
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << source << dendl;
not_ready_to_merge_source.insert(source);
assert(ready_to_merge_source.count(source) == 0);
void OSDService::set_not_ready_to_merge_target(pg_t target, pg_t source)
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << target << " source " << source << dendl;
not_ready_to_merge_target[target] = source;
assert(ready_to_merge_target.count(target) == 0);
void OSDService::send_ready_to_merge()
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
_send_ready_to_merge();
}
void OSDService::clear_ready_to_merge(PG *pg)
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << pg->pg_id << dendl;
ready_to_merge_source.erase(pg->pg_id.pgid);
ready_to_merge_target.erase(pg->pg_id.pgid);
void OSDService::clear_sent_ready_to_merge()
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
sent_ready_to_merge_source.clear();
}
void OSDService::prune_sent_ready_to_merge(OSDMapRef& osdmap)
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
auto i = sent_ready_to_merge_source.begin();
while (i != sent_ready_to_merge_source.end()) {
if (!osdmap->pg_exists(*i)) {
int OSD::pre_init()
{
- Mutex::Locker lock(osd_lock);
+ std::lock_guard lock(osd_lock);
if (is_stopping())
return 0;
int OSD::init()
{
CompatSet initial, diff;
- Mutex::Locker lock(osd_lock);
+ std::lock_guard lock(osd_lock);
if (is_stopping())
return 0;
// initialize osdmap references in sharded wq
for (auto& shard : shards) {
- Mutex::Locker l(shard->osdmap_lock);
+ std::lock_guard l(shard->osdmap_lock);
shard->shard_osdmap = osdmap;
}
tick_timer.add_event_after(get_tick_interval(),
new C_Tick(this));
{
- Mutex::Locker l(tick_timer_lock);
+ std::lock_guard l(tick_timer_lock);
tick_timer_without_osd_lock.add_event_after(get_tick_interval(),
new C_Tick_WithoutOSDLock(this));
}
tick_timer.shutdown();
{
- Mutex::Locker l(tick_timer_lock);
+ std::lock_guard l(tick_timer_lock);
tick_timer_without_osd_lock.shutdown();
}
osdmap = OSDMapRef();
for (auto s : shards) {
- Mutex::Locker l(s->osdmap_lock);
+ std::lock_guard l(s->osdmap_lock);
s->shard_osdmap = OSDMapRef();
}
service.shutdown();
{
v->clear();
for (auto& s : shards) {
- Mutex::Locker l(s->shard_lock);
+ std::lock_guard l(s->shard_lock);
for (auto& j : s->pg_slots) {
if (j.second->pg &&
!j.second->pg->is_deleted()) {
{
v->clear();
for (auto& s : shards) {
- Mutex::Locker l(s->shard_lock);
+ std::lock_guard l(s->shard_lock);
for (auto& j : s->pg_slots) {
if (j.second->pg &&
!j.second->pg->is_deleted()) {
spg_t pgid = pg->get_pgid();
uint32_t shard_index = pgid.hash_to_shard(num_shards);
auto sdata = shards[shard_index];
- Mutex::Locker l(sdata->shard_lock);
+ std::lock_guard l(sdata->shard_lock);
auto r = sdata->pg_slots.emplace(pgid, make_unique<OSDShardPGSlot>());
ceph_assert(r.second);
auto *slot = r.first->second.get();
auto sdata = pg->osd_shard;
ceph_assert(sdata);
{
- Mutex::Locker l(sdata->shard_lock);
+ std::lock_guard l(sdata->shard_lock);
auto p = sdata->pg_slots.find(pg->pg_id);
if (p == sdata->pg_slots.end() ||
!p->second->pg) {
{
uint32_t shard_index = pgid.hash_to_shard(num_shards);
auto sdata = shards[shard_index];
- Mutex::Locker l(sdata->shard_lock);
+ std::lock_guard l(sdata->shard_lock);
auto p = sdata->pg_slots.find(pgid);
if (p == sdata->pg_slots.end()) {
return nullptr;
return;
heartbeat_clear_peers_need_update();
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
dout(10) << "maybe_update_heartbeat_peers updating" << dendl;
{
ceph_assert(osd_lock.is_locked());
dout(10) << "reset_heartbeat_peers" << dendl;
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
while (!heartbeat_peers.empty()) {
HeartbeatInfo& hi = heartbeat_peers.begin()->second;
hi.con_back->mark_down();
void OSD::heartbeat_entry()
{
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
if (is_stopping())
return;
while (!heartbeat_stop) {
start_boot();
if (is_waiting_for_healthy()) {
// failed to boot
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
utime_t now = ceph_clock_now();
if (now - last_mon_heartbeat > cct->_conf->osd_mon_heartbeat_interval) {
last_mon_heartbeat = now;
heartbeat_lock.Unlock();
map_lock.get_read();
- Mutex::Locker l(mon_report_lock);
+ std::lock_guard l(mon_report_lock);
// mon report?
utime_t now = ceph_clock_now();
const auto now = ceph::coarse_mono_clock::now();
{
// borrow lec lock to pretect last_sent_beacon from changing
- Mutex::Locker l{min_last_epoch_clean_lock};
+ std::lock_guard l{min_last_epoch_clean_lock};
const auto elapsed = now - last_sent_beacon;
if (chrono::duration_cast<chrono::seconds>(elapsed).count() >
cct->_conf->osd_beacon_report_interval) {
{
dout(10) << __func__ << " con " << con << dendl;
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
- Mutex::Locker l(osd_lock);
+ std::lock_guard l(osd_lock);
if (is_stopping())
return;
dout(10) << __func__ << " on mon" << dendl;
_send_boot(); // resend boot message
} else {
map_lock.get_read();
- Mutex::Locker l2(mon_report_lock);
+ std::lock_guard l2(mon_report_lock);
utime_t now = ceph_clock_now();
last_mon_report = now;
void OSD::_got_mon_epochs(epoch_t oldest, epoch_t newest)
{
- Mutex::Locker l(osd_lock);
+ std::lock_guard l(osd_lock);
if (is_preboot()) {
_preboot(oldest, newest);
}
boot_finisher.queue(
new FunctionContext(
[this](int r) {
- Mutex::Locker l(osd_lock);
+ std::lock_guard l(osd_lock);
if (is_preboot()) {
dout(10) << __func__ << " waiting for peering work to drain"
<< dendl;
}
if (is_waiting_for_healthy()) {
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
utime_t now = ceph_clock_now();
int num = 0, up = 0;
for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin();
{
map_lock.get_read();
epoch_t cur = osdmap->get_up_thru(whoami);
- Mutex::Locker l(mon_report_lock);
+ std::lock_guard l(mon_report_lock);
if (want > up_thru_wanted) {
dout(10) << "queue_want_up_thru now " << want << " (was " << up_thru_wanted << ")"
<< ", currently " << cur
void OSD::requeue_failures()
{
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
unsigned old_queue = failure_queue.size();
unsigned old_pending = failure_pending.size();
for (auto p = failure_pending.begin(); p != failure_pending.end(); ) {
{
ceph_assert(map_lock.is_locked());
ceph_assert(mon_report_lock.is_locked());
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
utime_t now = ceph_clock_now();
while (!failure_queue.empty()) {
int osd = failure_queue.begin()->first;
dout(20) << __func__ << " sending" << dendl;
MOSDBeacon* beacon = nullptr;
{
- Mutex::Locker l{min_last_epoch_clean_lock};
+ std::lock_guard l{min_last_epoch_clean_lock};
beacon = new MOSDBeacon(osdmap->get_epoch(), min_last_epoch_clean);
std::swap(beacon->pgs, min_last_epoch_clean_pgs);
last_sent_beacon = now;
// them to an spg_t while preserving delivery order.
auto priv = m->get_connection()->get_priv();
if (auto session = static_cast<Session*>(priv.get()); session) {
- Mutex::Locker l{session->session_dispatch_lock};
+ std::lock_guard l{session->session_dispatch_lock};
op->get();
session->waiting_on_map.push_back(*op);
OSDMapRef nextmap = service.get_nextmap_reserved();
auto m = new MPGStats(monc->get_fsid(), osdmap->get_epoch(), had_for);
m->osd_stat = cur_stat;
- Mutex::Locker lec{min_last_epoch_clean_lock};
+ std::lock_guard lec{min_last_epoch_clean_lock};
min_last_epoch_clean = osdmap->get_epoch();
min_last_epoch_clean_pgs.clear();
vector<PGRef> pgs;
void OSD::osdmap_subscribe(version_t epoch, bool force_request)
{
- Mutex::Locker l(osdmap_subscribe_lock);
+ std::lock_guard l(osdmap_subscribe_lock);
if (latest_subscribed_epoch >= epoch && !force_request)
return;
dout(10) << __func__ << " bailing, we are shutting down" << dendl;
return;
}
- Mutex::Locker l(osd_lock);
+ std::lock_guard l(osd_lock);
if (is_stopping()) {
dout(10) << __func__ << " bailing, we are shutting down" << dendl;
return;
if (do_shutdown) {
if (network_error) {
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
auto it = failure_pending.begin();
while (it != failure_pending.end()) {
dout(10) << "handle_osd_ping canceling in-flight failure report for osd."
bool OSD::add_merge_waiter(OSDMapRef nextmap, spg_t target, PGRef src,
unsigned need)
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
auto& p = merge_waiters[nextmap->get_epoch()][target];
p[src->pg_id] = src;
dout(10) << __func__ << " added merge_waiter " << src->pg_id
pg->on_shutdown();
OSDShard *sdata = pg->osd_shard;
{
- Mutex::Locker l(sdata->shard_lock);
+ std::lock_guard l(sdata->shard_lock);
if (pg->pg_slot) {
sdata->_detach_pg(pg->pg_slot);
// update pg count now since we might not get an osdmap
<< dendl;
map<spg_t,PGRef> sources;
{
- Mutex::Locker l(merge_lock);
+ std::lock_guard l(merge_lock);
auto& s = merge_waiters[nextmap->get_epoch()][pg->pg_id];
unsigned need = children.size();
dout(20) << __func__ << " have " << s.size() << "/"
*/
float recovery_sleep = get_osd_recovery_sleep();
{
- Mutex::Locker l(service.sleep_lock);
+ std::lock_guard l(service.sleep_lock);
if (recovery_sleep > 0 && service.recovery_needs_sleep) {
PGRef pgref(pg);
auto recovery_requeue_callback = new FunctionContext([this, pgref, queued, reserved_pushes](int r) {
dout(20) << "do_recovery wake up at "
<< ceph_clock_now()
<< ", re-queuing recovery" << dendl;
- Mutex::Locker l(service.sleep_lock);
+ std::lock_guard l(service.sleep_lock);
service.recovery_needs_sleep = false;
service.queue_recovery_after_sleep(pgref.get(), queued, reserved_pushes);
});
{
{
- Mutex::Locker l(service.sleep_lock);
+ std::lock_guard l(service.sleep_lock);
service.recovery_needs_sleep = true;
}
void OSDService::start_recovery_op(PG *pg, const hobject_t& soid)
{
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
dout(10) << "start_recovery_op " << *pg << " " << soid
<< " (" << recovery_ops_active << "/"
<< cct->_conf->osd_recovery_max_active << " rops)"
void OSDService::finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue)
{
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
dout(10) << "finish_recovery_op " << *pg << " " << soid
<< " dequeue=" << dequeue
<< " (" << recovery_ops_active << "/" << cct->_conf->osd_recovery_max_active << " rops)"
void OSDService::release_reserved_pushes(uint64_t pushes)
{
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
dout(10) << __func__ << "(" << pushes << "), recovery_ops_reserved "
<< recovery_ops_reserved << " -> " << (recovery_ops_reserved-pushes)
<< dendl;
void OSDShard::update_pg_epoch(OSDShardPGSlot *slot, epoch_t e)
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
dout(30) << "min was " << pg_slots_by_epoch.begin()->epoch
<< " on " << pg_slots_by_epoch.begin()->pg->pg_id << dendl;
pg_slots_by_epoch.erase(pg_slots_by_epoch.iterator_to(*slot));
epoch_t OSDShard::get_min_pg_epoch()
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
auto p = pg_slots_by_epoch.begin();
if (p == pg_slots_by_epoch.end()) {
return 0;
void OSDShard::wait_min_pg_epoch(epoch_t need)
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
++waiting_for_min_pg_epoch;
while (!pg_slots_by_epoch.empty() &&
pg_slots_by_epoch.begin()->epoch < need) {
epoch_t OSDShard::get_max_waiting_epoch()
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
epoch_t r = 0;
for (auto& i : pg_slots) {
if (!i.second->waiting_peering.empty()) {
OSDMapRef& new_osdmap,
unsigned *pushes_to_free)
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
OSDMapRef old_osdmap;
{
- Mutex::Locker l(osdmap_lock);
+ std::lock_guard l(osdmap_lock);
old_osdmap = std::move(shard_osdmap);
shard_osdmap = new_osdmap;
}
set<pair<spg_t,epoch_t>> *split_pgs,
set<pair<spg_t,epoch_t>> *merge_pgs)
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
if (shard_osdmap) {
for (auto& i : pg_slots) {
const spg_t& pgid = i.first;
void OSDShard::prime_splits(const OSDMapRef& as_of_osdmap,
set<pair<spg_t,epoch_t>> *pgids)
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
_prime_splits(pgids);
if (shard_osdmap->get_epoch() > as_of_osdmap->get_epoch()) {
set<pair<spg_t,epoch_t>> newer_children;
void OSDShard::prime_merges(const OSDMapRef& as_of_osdmap,
set<pair<spg_t,epoch_t>> *merge_pgs)
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
dout(20) << __func__ << " checking shard " << shard_id
<< " for remaining merge pgs " << merge_pgs << dendl;
auto p = merge_pgs->begin();
{
epoch_t epoch;
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
dout(10) << pg->pg_id << " " << pg << dendl;
auto p = pg_slots.find(pg->pg_id);
ceph_assert(p != pg_slots.end());
void OSDShard::unprime_split_children(spg_t parent, unsigned old_pg_num)
{
- Mutex::Locker l(shard_lock);
+ std::lock_guard l(shard_lock);
vector<spg_t> to_delete;
for (auto& i : pg_slots) {
if (i.first != parent &&
public:
OSDSuperblock get_superblock() {
- Mutex::Locker l(publish_lock);
+ std::lock_guard l(publish_lock);
return superblock;
}
void publish_superblock(const OSDSuperblock &block) {
- Mutex::Locker l(publish_lock);
+ std::lock_guard l(publish_lock);
superblock = block;
}
public:
OSDMapRef get_osdmap() {
- Mutex::Locker l(publish_lock);
+ std::lock_guard l(publish_lock);
return osdmap;
}
epoch_t get_osdmap_epoch() {
- Mutex::Locker l(publish_lock);
+ std::lock_guard l(publish_lock);
return osdmap ? osdmap->get_epoch() : 0;
}
void publish_map(OSDMapRef map) {
- Mutex::Locker l(publish_lock);
+ std::lock_guard l(publish_lock);
osdmap = map;
}
public:
void pre_publish_map(OSDMapRef map) {
- Mutex::Locker l(pre_publish_lock);
+ std::lock_guard l(pre_publish_lock);
next_osdmap = std::move(map);
}
/// gets ref to next_osdmap and registers the epoch as reserved
OSDMapRef get_nextmap_reserved() {
- Mutex::Locker l(pre_publish_lock);
+ std::lock_guard l(pre_publish_lock);
if (!next_osdmap)
return OSDMapRef();
epoch_t e = next_osdmap->get_epoch();
}
/// releases reservation on map
void release_map(OSDMapRef osdmap) {
- Mutex::Locker l(pre_publish_lock);
+ std::lock_guard l(pre_publish_lock);
map<epoch_t, unsigned>::iterator i =
map_reservations.find(osdmap->get_epoch());
ceph_assert(i != map_reservations.end());
}
/// blocks until there are no reserved maps prior to next_osdmap
void await_reserved_maps() {
- Mutex::Locker l(pre_publish_lock);
+ std::lock_guard l(pre_publish_lock);
ceph_assert(next_osdmap);
while (true) {
map<epoch_t, unsigned>::const_iterator i = map_reservations.cbegin();
}
}
OSDMapRef get_next_osdmap() {
- Mutex::Locker l(pre_publish_lock);
+ std::lock_guard l(pre_publish_lock);
if (!next_osdmap)
return OSDMapRef();
return next_osdmap;
double pool_scrub_max_interval, bool must) {
ScrubJob scrub(cct, pgid, t, pool_scrub_min_interval, pool_scrub_max_interval,
must);
- Mutex::Locker l(sched_scrub_lock);
+ std::lock_guard l(sched_scrub_lock);
sched_scrub_pg.insert(scrub);
return scrub.sched_time;
}
void unreg_pg_scrub(spg_t pgid, utime_t t) {
- Mutex::Locker l(sched_scrub_lock);
+ std::lock_guard l(sched_scrub_lock);
size_t removed = sched_scrub_pg.erase(ScrubJob(cct, pgid, t));
ceph_assert(removed);
}
bool first_scrub_stamp(ScrubJob *out) {
- Mutex::Locker l(sched_scrub_lock);
+ std::lock_guard l(sched_scrub_lock);
if (sched_scrub_pg.empty())
return false;
set<ScrubJob>::iterator iter = sched_scrub_pg.begin();
}
bool next_scrub_stamp(const ScrubJob& next,
ScrubJob *out) {
- Mutex::Locker l(sched_scrub_lock);
+ std::lock_guard l(sched_scrub_lock);
if (sched_scrub_pg.empty())
return false;
set<ScrubJob>::const_iterator iter = sched_scrub_pg.lower_bound(next);
void dumps_scrub(Formatter *f) {
ceph_assert(f != nullptr);
- Mutex::Locker l(sched_scrub_lock);
+ std::lock_guard l(sched_scrub_lock);
f->open_array_section("scrubs");
for (const auto &i: sched_scrub_pg) {
/// enable agent for a pg
void agent_enable_pg(PG *pg, uint64_t priority) {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
_enqueue(pg, priority);
}
/// adjust priority for an enagled pg
void agent_adjust_pg(PG *pg, uint64_t old_priority, uint64_t new_priority) {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
ceph_assert(new_priority != old_priority);
_enqueue(pg, new_priority);
_dequeue(pg, old_priority);
/// disable agent for a pg
void agent_disable_pg(PG *pg, uint64_t old_priority) {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
_dequeue(pg, old_priority);
}
/// note start of an async (evict) op
void agent_start_evict_op() {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
++agent_ops;
}
/// note finish or cancellation of an async (evict) op
void agent_finish_evict_op() {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
ceph_assert(agent_ops > 0);
--agent_ops;
agent_cond.Signal();
/// note start of an async (flush) op
void agent_start_op(const hobject_t& oid) {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
++agent_ops;
ceph_assert(agent_oids.count(oid) == 0);
agent_oids.insert(oid);
/// note finish or cancellation of an async (flush) op
void agent_finish_op(const hobject_t& oid) {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
ceph_assert(agent_ops > 0);
--agent_ops;
ceph_assert(agent_oids.count(oid) == 1);
/// check if we are operating on an object
bool agent_is_active_oid(const hobject_t& oid) {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
return agent_oids.count(oid);
}
/// get count of active agent ops
int agent_get_num_ops() {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
return agent_ops;
}
void agent_inc_high_count() {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
flush_mode_high_count ++;
}
void agent_dec_high_count() {
- Mutex::Locker l(agent_lock);
+ std::lock_guard l(agent_lock);
flush_mode_high_count --;
}
SafeTimer watch_timer;
uint64_t next_notif_id;
uint64_t get_next_id(epoch_t cur_epoch) {
- Mutex::Locker l(watch_lock);
+ std::lock_guard l(watch_lock);
return (((uint64_t)cur_epoch) << 32) | ((uint64_t)(next_notif_id++));
}
defer_recovery_until += defer_for;
}
void pause_recovery() {
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
recovery_paused = true;
}
bool recovery_is_paused() {
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
return recovery_paused;
}
void unpause_recovery() {
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
recovery_paused = false;
_maybe_queue_recovery();
}
void kick_recovery_queue() {
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
_maybe_queue_recovery();
}
void clear_queued_recovery(PG *pg) {
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
awaiting_throttle.remove_if(
[pg](decltype(awaiting_throttle)::const_reference awaiting ) {
return awaiting.second.get() == pg;
}
// delayed pg activation
void queue_for_recovery(PG *pg) {
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
if (pg->is_forced_recovery_or_backfill()) {
awaiting_throttle.push_front(make_pair(pg->get_osdmap()->get_epoch(), pg));
_maybe_queue_recovery();
}
void queue_recovery_after_sleep(PG *pg, epoch_t queued, uint64_t reserved_pushes) {
- Mutex::Locker l(recovery_lock);
+ std::lock_guard l(recovery_lock);
_queue_for_recovery(make_pair(queued, pg), reserved_pushes);
}
return ret;
}
OSDMapRef add_map(OSDMap *o) {
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
return _add_map(o);
}
OSDMapRef _add_map(OSDMap *o);
void add_map_bl(epoch_t e, bufferlist& bl) {
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
return _add_map_bl(e, bl);
}
void _add_map_bl(epoch_t e, bufferlist& bl);
bool get_map_bl(epoch_t e, bufferlist& bl) {
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
return _get_map_bl(e, bl);
}
bool _get_map_bl(epoch_t e, bufferlist& bl);
void add_map_inc_bl(epoch_t e, bufferlist& bl) {
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
return _add_map_inc_bl(e, bl);
}
void _add_map_inc_bl(epoch_t e, bufferlist& bl);
int get_deleted_pool_pg_num(int64_t pool);
void store_deleted_pool_pg_num(int64_t pool, int pg_num) {
- Mutex::Locker l(map_cache_lock);
+ std::lock_guard l(map_cache_lock);
deleted_pool_pg_nums[pool] = pg_num;
}
void set_statfs(const struct store_statfs_t &stbuf);
osd_stat_t set_osd_stat(vector<int>& hb_peers, int num_pgs);
osd_stat_t get_osd_stat() {
- Mutex::Locker l(stat_lock);
+ std::lock_guard l(stat_lock);
++seq;
osd_stat.up_from = up_epoch;
osd_stat.seq = ((uint64_t)osd_stat.up_from << 32) + seq;
return osd_stat;
}
uint64_t get_osd_stat_seq() {
- Mutex::Locker l(stat_lock);
+ std::lock_guard l(stat_lock);
return osd_stat.seq;
}
OSDMapRef shard_osdmap;
OSDMapRef get_osdmap() {
- Mutex::Locker l(osdmap_lock);
+ std::lock_guard l(osdmap_lock);
return shard_osdmap;
}
/// Caller assumes refs for included Sessions
void get_sessions_waiting_for_map(set<SessionRef> *out) {
- Mutex::Locker l(session_waiting_lock);
+ std::lock_guard l(session_waiting_lock);
out->swap(session_waiting_for_map);
}
void register_session_waiting_on_map(SessionRef session) {
- Mutex::Locker l(session_waiting_lock);
+ std::lock_guard l(session_waiting_lock);
session_waiting_for_map.insert(session);
}
void clear_session_waiting_on_map(SessionRef session) {
- Mutex::Locker l(session_waiting_lock);
+ std::lock_guard l(session_waiting_lock);
session_waiting_for_map.erase(session);
}
void dispatch_sessions_waiting_on_map() {
for (auto i = sessions_to_check.begin();
i != sessions_to_check.end();
sessions_to_check.erase(i++)) {
- Mutex::Locker l{(*i)->session_dispatch_lock};
+ std::lock_guard l{(*i)->session_dispatch_lock};
SessionRef session = *i;
dispatch_session_waiting(session, osdmap);
}
}
void session_handle_reset(SessionRef session) {
- Mutex::Locker l(session->session_dispatch_lock);
+ std::lock_guard l(session->session_dispatch_lock);
clear_session_waiting_on_map(session);
session->clear_backoffs();
void need_heartbeat_peer_update();
void heartbeat_kick() {
- Mutex::Locker l(heartbeat_lock);
+ std::lock_guard l(heartbeat_lock);
heartbeat_cond.Signal();
}
uint32_t shard_index = thread_index % osd->num_shards;
auto &&sdata = osd->shards[shard_index];
ceph_assert(sdata);
- Mutex::Locker l(sdata->shard_lock);
+ std::lock_guard l(sdata->shard_lock);
if (thread_index < osd->num_shards) {
return sdata->pqueue->empty() && sdata->context_queue.empty();
} else {