From: Sage Weil Date: Wed, 10 Oct 2018 16:04:09 +0000 (-0500) Subject: os/bluestore: use deduction for lock_guard<>, unique_lock<> X-Git-Tag: v14.0.1~18^2~7 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=c0bc4f1033a0faeda6530943a727042fe88306e7;p=ceph-ci.git os/bluestore: use deduction for lock_guard<>, unique_lock<> Signed-off-by: Sage Weil --- diff --git a/src/os/bluestore/BitmapFreelistManager.cc b/src/os/bluestore/BitmapFreelistManager.cc index 87066f0942d..fac0e44d856 100644 --- a/src/os/bluestore/BitmapFreelistManager.cc +++ b/src/os/bluestore/BitmapFreelistManager.cc @@ -180,7 +180,7 @@ void BitmapFreelistManager::shutdown() void BitmapFreelistManager::enumerate_reset() { - std::lock_guard l(lock); + std::lock_guard l(lock); enumerate_offset = 0; enumerate_bl_pos = 0; enumerate_bl.clear(); @@ -221,7 +221,7 @@ int get_next_set_bit(bufferlist& bl, int start) bool BitmapFreelistManager::enumerate_next(uint64_t *offset, uint64_t *length) { - std::lock_guard l(lock); + std::lock_guard l(lock); // initial base case is a bit awkward if (enumerate_offset == 0 && enumerate_bl_pos == 0) { diff --git a/src/os/bluestore/BlockDevice.cc b/src/os/bluestore/BlockDevice.cc index ca19e54a867..ac2df83b735 100644 --- a/src/os/bluestore/BlockDevice.cc +++ b/src/os/bluestore/BlockDevice.cc @@ -44,7 +44,7 @@ void IOContext::aio_wait() { - std::unique_lock l(lock); + std::unique_lock l(lock); // see _aio_thread for waker logic while (num_running.load() > 0) { dout(10) << __func__ << " " << this @@ -139,7 +139,7 @@ BlockDevice *BlockDevice::create(CephContext* cct, const string& path, void BlockDevice::queue_reap_ioc(IOContext *ioc) { - std::lock_guard l(ioc_reap_lock); + std::lock_guard l(ioc_reap_lock); if (ioc_reap_count.load() == 0) ++ioc_reap_count; ioc_reap_queue.push_back(ioc); @@ -148,7 +148,7 @@ void BlockDevice::queue_reap_ioc(IOContext *ioc) void BlockDevice::reap_ioc() { if (ioc_reap_count.load()) { - std::lock_guard l(ioc_reap_lock); + std::lock_guard l(ioc_reap_lock); for (auto p : ioc_reap_queue) { dout(20) << __func__ << " reap ioc " << p << dendl; delete p; diff --git a/src/os/bluestore/BlockDevice.h b/src/os/bluestore/BlockDevice.h index 462891ca700..185abbf35b9 100644 --- a/src/os/bluestore/BlockDevice.h +++ b/src/os/bluestore/BlockDevice.h @@ -84,7 +84,7 @@ public: // as there is no lock protection for aio_submit. // Hence we might have false conditional trigger. // aio_wait has to handle that hence do not care here. - std::lock_guard l(lock); + std::lock_guard l(lock); cond.notify_all(); } } diff --git a/src/os/bluestore/BlueFS.cc b/src/os/bluestore/BlueFS.cc index 058541a30ed..af9a516391e 100644 --- a/src/os/bluestore/BlueFS.cc +++ b/src/os/bluestore/BlueFS.cc @@ -189,7 +189,7 @@ uint64_t BlueFS::get_block_device_size(unsigned id) void BlueFS::add_block_extent(unsigned id, uint64_t offset, uint64_t length) { - std::unique_lock l(lock); + std::unique_lock l(lock); dout(1) << __func__ << " bdev " << id << " 0x" << std::hex << offset << "~" << length << std::dec << dendl; @@ -213,7 +213,7 @@ void BlueFS::add_block_extent(unsigned id, uint64_t offset, uint64_t length) int BlueFS::reclaim_blocks(unsigned id, uint64_t want, PExtentVector *extents) { - std::unique_lock l(lock); + std::unique_lock l(lock); dout(1) << __func__ << " bdev " << id << " want 0x" << std::hex << want << std::dec << dendl; ceph_assert(id < alloc.size()); @@ -253,7 +253,7 @@ void BlueFS::handle_discard(unsigned id, interval_set& to_release) uint64_t BlueFS::get_used() { - std::lock_guard l(lock); + std::lock_guard l(lock); uint64_t used = 0; for (unsigned id = 0; id < MAX_BDEV; ++id) { if (alloc[id]) { @@ -265,14 +265,14 @@ uint64_t BlueFS::get_used() uint64_t BlueFS::get_total(unsigned id) { - std::lock_guard l(lock); + std::lock_guard l(lock); ceph_assert(id < block_all.size()); return block_all[id].size(); } uint64_t BlueFS::get_free(unsigned id) { - std::lock_guard l(lock); + std::lock_guard l(lock); ceph_assert(id < alloc.size()); return alloc[id]->get_free(); } @@ -302,7 +302,7 @@ void BlueFS::dump_block_extents(ostream& out) void BlueFS::get_usage(vector> *usage) { - std::lock_guard l(lock); + std::lock_guard l(lock); usage->resize(bdev.size()); for (unsigned id = 0; id < bdev.size(); ++id) { if (!bdev[id]) { @@ -325,7 +325,7 @@ void BlueFS::get_usage(vector> *usage) int BlueFS::get_block_extents(unsigned id, interval_set *extents) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " bdev " << id << dendl; if (id >= block_all.size()) return -EINVAL; @@ -335,7 +335,7 @@ int BlueFS::get_block_extents(unsigned id, interval_set *extents) int BlueFS::mkfs(uuid_d osd_uuid) { - std::unique_lock l(lock); + std::unique_lock l(lock); dout(1) << __func__ << " osd_uuid " << osd_uuid << dendl; @@ -510,7 +510,7 @@ void BlueFS::get_devices(set *ls) int BlueFS::fsck() { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(1) << __func__ << dendl; // hrm, i think we check everything on mount... return 0; @@ -1178,7 +1178,7 @@ uint64_t BlueFS::_estimate_log_size() void BlueFS::compact_log() { - std::unique_lock l(lock); + std::unique_lock l(lock); if (cct->_conf->bluefs_compact_log_sync) { _compact_log_sync(); } else { @@ -1471,7 +1471,7 @@ void BlueFS::_pad_bl(bufferlist& bl) void BlueFS::flush_log() { - std::unique_lock l(lock); + std::unique_lock l(lock); flush_bdev(); _flush_and_sync_log(l); } @@ -2057,7 +2057,7 @@ int BlueFS::_preallocate(FileRef f, uint64_t off, uint64_t len) void BlueFS::sync_metadata() { - std::unique_lock l(lock); + std::unique_lock l(lock); if (log_t.empty()) { dout(10) << __func__ << " - no pending log events" << dendl; } else { @@ -2083,7 +2083,7 @@ int BlueFS::open_for_write( FileWriter **h, bool overwrite) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << "/" << filename << dendl; map::iterator p = dir_map.find(dirname); DirRef dir; @@ -2201,7 +2201,7 @@ int BlueFS::open_for_read( FileReader **h, bool random) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << "/" << filename << (random ? " (random)":" (sequential)") << dendl; map::iterator p = dir_map.find(dirname); @@ -2230,7 +2230,7 @@ int BlueFS::rename( const string& old_dirname, const string& old_filename, const string& new_dirname, const string& new_filename) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << old_dirname << "/" << old_filename << " -> " << new_dirname << "/" << new_filename << dendl; map::iterator p = dir_map.find(old_dirname); @@ -2277,7 +2277,7 @@ int BlueFS::rename( int BlueFS::mkdir(const string& dirname) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << dendl; map::iterator p = dir_map.find(dirname); if (p != dir_map.end()) { @@ -2291,7 +2291,7 @@ int BlueFS::mkdir(const string& dirname) int BlueFS::rmdir(const string& dirname) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << dendl; map::iterator p = dir_map.find(dirname); if (p == dir_map.end()) { @@ -2310,7 +2310,7 @@ int BlueFS::rmdir(const string& dirname) bool BlueFS::dir_exists(const string& dirname) { - std::lock_guard l(lock); + std::lock_guard l(lock); map::iterator p = dir_map.find(dirname); bool exists = p != dir_map.end(); dout(10) << __func__ << " " << dirname << " = " << (int)exists << dendl; @@ -2320,7 +2320,7 @@ bool BlueFS::dir_exists(const string& dirname) int BlueFS::stat(const string& dirname, const string& filename, uint64_t *size, utime_t *mtime) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << "/" << filename << dendl; map::iterator p = dir_map.find(dirname); if (p == dir_map.end()) { @@ -2348,7 +2348,7 @@ int BlueFS::stat(const string& dirname, const string& filename, int BlueFS::lock_file(const string& dirname, const string& filename, FileLock **plock) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << "/" << filename << dendl; map::iterator p = dir_map.find(dirname); if (p == dir_map.end()) { @@ -2386,7 +2386,7 @@ int BlueFS::lock_file(const string& dirname, const string& filename, int BlueFS::unlock_file(FileLock *fl) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << fl << " on " << fl->file->fnode << dendl; ceph_assert(fl->file->locked); fl->file->locked = false; @@ -2396,7 +2396,7 @@ int BlueFS::unlock_file(FileLock *fl) int BlueFS::readdir(const string& dirname, vector *ls) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << dendl; if (dirname.empty()) { // list dirs @@ -2424,7 +2424,7 @@ int BlueFS::readdir(const string& dirname, vector *ls) int BlueFS::unlink(const string& dirname, const string& filename) { - std::lock_guard l(lock); + std::lock_guard l(lock); dout(10) << __func__ << " " << dirname << "/" << filename << dendl; map::iterator p = dir_map.find(dirname); if (p == dir_map.end()) { diff --git a/src/os/bluestore/BlueFS.h b/src/os/bluestore/BlueFS.h index 333c16b54bb..a761f8150cb 100644 --- a/src/os/bluestore/BlueFS.h +++ b/src/os/bluestore/BlueFS.h @@ -370,7 +370,7 @@ public: bool random = false); void close_writer(FileWriter *h) { - std::lock_guard l(lock); + std::lock_guard l(lock); _close_writer(h); } @@ -412,15 +412,15 @@ public: void handle_discard(unsigned dev, interval_set& to_release); void flush(FileWriter *h) { - std::lock_guard l(lock); + std::lock_guard l(lock); _flush(h, false); } void flush_range(FileWriter *h, uint64_t offset, uint64_t length) { - std::lock_guard l(lock); + std::lock_guard l(lock); _flush_range(h, offset, length); } int fsync(FileWriter *h) { - std::unique_lock l(lock); + std::unique_lock l(lock); return _fsync(h, l); } int read(FileReader *h, FileReaderBuffer *buf, uint64_t offset, size_t len, @@ -438,15 +438,15 @@ public: return _read_random(h, offset, len, out); } void invalidate_cache(FileRef f, uint64_t offset, uint64_t len) { - std::lock_guard l(lock); + std::lock_guard l(lock); _invalidate_cache(f, offset, len); } int preallocate(FileRef f, uint64_t offset, uint64_t len) { - std::lock_guard l(lock); + std::lock_guard l(lock); return _preallocate(f, offset, len); } int truncate(FileWriter *h, uint64_t offset) { - std::lock_guard l(lock); + std::lock_guard l(lock); return _truncate(h, offset); } diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 4dd49cf2d92..3d0774419f8 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -785,13 +785,13 @@ BlueStore::Cache *BlueStore::Cache::create(CephContext* cct, string type, void BlueStore::Cache::trim(uint64_t onode_max, uint64_t buffer_max) { - std::lock_guard l(lock); + std::lock_guard l(lock); _trim(onode_max, buffer_max); } void BlueStore::Cache::trim_all() { - std::lock_guard l(lock); + std::lock_guard l(lock); _trim(0, 0); } @@ -1315,7 +1315,7 @@ void BlueStore::BufferSpace::read( uint32_t end = offset + length; { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); for (auto i = _data_lower_bound(offset); i != buffer_map.end() && offset < end && i->first < end; ++i) { @@ -1408,7 +1408,7 @@ void BlueStore::BufferSpace::_finish_write(Cache* cache, uint64_t seq) void BlueStore::BufferSpace::split(Cache* cache, size_t pos, BlueStore::BufferSpace &r) { - std::lock_guard lk(cache->lock); + std::lock_guard lk(cache->lock); if (buffer_map.empty()) return; @@ -1463,7 +1463,7 @@ void BlueStore::BufferSpace::split(Cache* cache, size_t pos, BlueStore::BufferSp BlueStore::OnodeRef BlueStore::OnodeSpace::add(const ghobject_t& oid, OnodeRef o) { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); auto p = onode_map.find(oid); if (p != onode_map.end()) { ldout(cache->cct, 30) << __func__ << " " << oid << " " << o @@ -1484,7 +1484,7 @@ BlueStore::OnodeRef BlueStore::OnodeSpace::lookup(const ghobject_t& oid) bool hit = false; { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); ceph::unordered_map::iterator p = onode_map.find(oid); if (p == onode_map.end()) { ldout(cache->cct, 30) << __func__ << " " << oid << " miss" << dendl; @@ -1507,7 +1507,7 @@ BlueStore::OnodeRef BlueStore::OnodeSpace::lookup(const ghobject_t& oid) void BlueStore::OnodeSpace::clear() { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); ldout(cache->cct, 10) << __func__ << dendl; for (auto &p : onode_map) { cache->_rm_onode(p.second); @@ -1517,7 +1517,7 @@ void BlueStore::OnodeSpace::clear() bool BlueStore::OnodeSpace::empty() { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); return onode_map.empty(); } @@ -1527,7 +1527,7 @@ void BlueStore::OnodeSpace::rename( const ghobject_t& new_oid, const mempool::bluestore_cache_other::string& new_okey) { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); ldout(cache->cct, 30) << __func__ << " " << old_oid << " -> " << new_oid << dendl; ceph::unordered_map::iterator po, pn; @@ -1558,7 +1558,7 @@ void BlueStore::OnodeSpace::rename( bool BlueStore::OnodeSpace::map_any(std::function f) { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); ldout(cache->cct, 20) << __func__ << dendl; for (auto& i : onode_map) { if (f(i.second)) { @@ -1618,7 +1618,7 @@ void BlueStore::SharedBlob::put() again: auto coll_snap = coll; if (coll_snap) { - std::lock_guard l(coll_snap->cache->lock); + std::lock_guard l(coll_snap->cache->lock); if (coll_snap != coll) { goto again; } @@ -1650,7 +1650,7 @@ void BlueStore::SharedBlob::finish_write(uint64_t seq) { while (true) { Cache *cache = coll->cache; - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); if (coll->cache != cache) { ldout(coll->store->cct, 20) << __func__ << " raced with sb cache update, was " << cache @@ -1671,7 +1671,7 @@ void BlueStore::SharedBlob::finish_write(uint64_t seq) template void BlueStore::SharedBlobSet::dump(CephContext *cct) { - std::lock_guard l(lock); + std::lock_guard l(lock); for (auto& i : sb_map) { ldout(cct, LogLevelV) << i.first << " : " << *i.second << dendl; } @@ -3037,7 +3037,7 @@ void BlueStore::Onode::flush() { if (flushing_count.load()) { ldout(c->store->cct, 20) << __func__ << " cnt:" << flushing_count << dendl; - std::unique_lock l(flush_lock); + std::unique_lock l(flush_lock); while (flushing_count.load()) { flush_cond.wait(l); } @@ -3365,8 +3365,8 @@ void BlueStore::Collection::split_cache( // lock (one or both) cache shards std::lock(cache->lock, dest->cache->lock); - std::lock_guard l(cache->lock, std::adopt_lock); - std::lock_guard l2(dest->cache->lock, std::adopt_lock); + std::lock_guard l(cache->lock, std::adopt_lock); + std::lock_guard l2(dest->cache->lock, std::adopt_lock); int destbits = dest->cnode.bits; spg_t destpg; @@ -7100,7 +7100,7 @@ int BlueStore::statfs(struct store_statfs_t *buf) } { - std::lock_guard l(vstatfs_lock); + std::lock_guard l(vstatfs_lock); buf->allocated = vstatfs.allocated(); buf->data_stored = vstatfs.stored(); buf->data_compressed = vstatfs.compressed(); @@ -8660,7 +8660,7 @@ void BlueStore::_txc_update_store_statfs(TransContext *txc) logger->inc(l_bluestore_compressed_original, txc->statfs_delta.compressed_original()); { - std::lock_guard l(vstatfs_lock); + std::lock_guard l(vstatfs_lock); vstatfs += txc->statfs_delta; } @@ -8728,7 +8728,7 @@ void BlueStore::_txc_state_proc(TransContext *txc) } } { - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); kv_queue.push_back(txc); kv_cond.notify_one(); if (txc->state != TransContext::STATE_KV_SUBMITTED) { @@ -8783,7 +8783,7 @@ void BlueStore::_txc_finish_io(TransContext *txc) */ OpSequencer *osr = txc->osr.get(); - std::lock_guard l(osr->qlock); + std::lock_guard l(osr->qlock); txc->state = TransContext::STATE_IO_DONE; txc->ioc.release_running_aios(); OpSequencer::q_list_t::iterator p = osr->q.iterator_to(*txc); @@ -8921,7 +8921,7 @@ void BlueStore::_txc_applied_kv(TransContext *txc) dout(20) << __func__ << " onode " << o << " had " << o->flushing_count << dendl; if (--o->flushing_count == 0) { - std::lock_guard l(o->flush_lock); + std::lock_guard l(o->flush_lock); o->flush_cond.notify_all(); } } @@ -8932,7 +8932,7 @@ void BlueStore::_txc_committed_kv(TransContext *txc) { dout(20) << __func__ << " txc " << txc << dendl; { - std::lock_guard l(txc->osr->qlock); + std::lock_guard l(txc->osr->qlock); txc->state = TransContext::STATE_KV_DONE; if (txc->ch->commit_queue) { txc->ch->commit_queue->queue(txc->oncommits); @@ -8964,7 +8964,7 @@ void BlueStore::_txc_finish(TransContext *txc) bool submit_deferred = false; OpSequencer::q_list_t releasing_txc; { - std::lock_guard l(osr->qlock); + std::lock_guard l(osr->qlock); txc->state = TransContext::STATE_DONE; bool notify = false; while (!osr->q.empty()) { @@ -9014,7 +9014,7 @@ void BlueStore::_txc_finish(TransContext *txc) } if (empty && osr->zombie) { - std::lock_guard l(zombie_osr_lock); + std::lock_guard l(zombie_osr_lock); if (zombie_osr_set.erase(osr->cid)) { dout(10) << __func__ << " reaping empty zombie osr " << osr << dendl; } else { @@ -9061,7 +9061,7 @@ void BlueStore::_osr_attach(Collection *c) << " reusing osr " << c->osr << " from existing coll " << q->second << dendl; } else { - std::lock_guard l(zombie_osr_lock); + std::lock_guard l(zombie_osr_lock); auto p = zombie_osr_set.find(c->cid); if (p == zombie_osr_set.end()) { c->osr = new OpSequencer(this, c->cid); @@ -9079,7 +9079,7 @@ void BlueStore::_osr_attach(Collection *c) void BlueStore::_osr_register_zombie(OpSequencer *osr) { - std::lock_guard l(zombie_osr_lock); + std::lock_guard l(zombie_osr_lock); dout(10) << __func__ << " " << osr << " " << osr->cid << dendl; osr->zombie = true; auto i = zombie_osr_set.emplace(osr->cid, osr); @@ -9103,7 +9103,7 @@ void BlueStore::_osr_drain_preceding(TransContext *txc) } { // wake up any previously finished deferred events - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); kv_cond.notify_one(); } osr->drain_preceding(txc); @@ -9126,7 +9126,7 @@ void BlueStore::_osr_drain(OpSequencer *osr) } { // wake up any previously finished deferred events - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); kv_cond.notify_one(); } osr->drain(); @@ -9147,7 +9147,7 @@ void BlueStore::_osr_drain_all() } } { - std::lock_guard l(zombie_osr_lock); + std::lock_guard l(zombie_osr_lock); for (auto& i : zombie_osr_set) { s.insert(i.second); zombies.push_back(i.second); @@ -9162,11 +9162,11 @@ void BlueStore::_osr_drain_all() } { // wake up any previously finished deferred events - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); kv_cond.notify_one(); } { - std::lock_guard l(kv_finalize_lock); + std::lock_guard l(kv_finalize_lock); kv_finalize_cond.notify_one(); } for (auto osr : s) { @@ -9176,7 +9176,7 @@ void BlueStore::_osr_drain_all() --deferred_aggressive; { - std::lock_guard l(zombie_osr_lock); + std::lock_guard l(zombie_osr_lock); for (auto& osr : zombies) { if (zombie_osr_set.erase(osr->cid)) { dout(10) << __func__ << " reaping empty zombie osr " << osr << dendl; @@ -9210,7 +9210,7 @@ void BlueStore::_kv_stop() { dout(10) << __func__ << dendl; { - std::unique_lock l(kv_lock); + std::unique_lock l(kv_lock); while (!kv_sync_started) { kv_cond.wait(l); } @@ -9218,7 +9218,7 @@ void BlueStore::_kv_stop() kv_cond.notify_all(); } { - std::unique_lock l(kv_finalize_lock); + std::unique_lock l(kv_finalize_lock); while (!kv_finalize_started) { kv_finalize_cond.wait(l); } @@ -9229,11 +9229,11 @@ void BlueStore::_kv_stop() kv_finalize_thread.join(); ceph_assert(removed_collections.empty()); { - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); kv_stop = false; } { - std::lock_guard l(kv_finalize_lock); + std::lock_guard l(kv_finalize_lock); kv_finalize_stop = false; } dout(10) << __func__ << " stopping finishers" << dendl; @@ -9248,7 +9248,7 @@ void BlueStore::_kv_sync_thread() { dout(10) << __func__ << " start" << dendl; deque deferred_stable_queue; ///< deferred ios done + stable - std::unique_lock l(kv_lock); + std::unique_lock l(kv_lock); ceph_assert(!kv_sync_started); kv_sync_started = true; kv_cond.notify_all(); @@ -9360,7 +9360,7 @@ void BlueStore::_kv_sync_thread() --txc->osr->kv_committing_serially; txc->state = TransContext::STATE_KV_SUBMITTED; if (txc->osr->kv_submitted_waiters) { - std::lock_guard l(txc->osr->qlock); + std::lock_guard l(txc->osr->qlock); txc->osr->qcond.notify_all(); } @@ -9416,7 +9416,7 @@ void BlueStore::_kv_sync_thread() ceph_assert(r == 0); { - std::unique_lock m(kv_finalize_lock); + std::unique_lock m(kv_finalize_lock); if (kv_committing_to_finalize.empty()) { kv_committing_to_finalize.swap(kv_committing); } else { @@ -9489,7 +9489,7 @@ void BlueStore::_kv_finalize_thread() deque kv_committed; deque deferred_stable; dout(10) << __func__ << " start" << dendl; - std::unique_lock l(kv_finalize_lock); + std::unique_lock l(kv_finalize_lock); ceph_assert(!kv_finalize_started); kv_finalize_started = true; kv_finalize_cond.notify_all(); @@ -9597,7 +9597,7 @@ void BlueStore::deferred_try_submit() { dout(20) << __func__ << " " << deferred_queue.size() << " osrs, " << deferred_queue_size << " txcs" << dendl; - std::lock_guard l(deferred_lock); + std::lock_guard l(deferred_lock); vector osrs; osrs.reserve(deferred_queue.size()); for (auto& osr : deferred_queue) { @@ -9690,7 +9690,7 @@ void BlueStore::_deferred_aio_finish(OpSequencer *osr) DeferredBatch *b = osr->deferred_running; { - std::lock_guard l(deferred_lock); + std::lock_guard l(deferred_lock); ceph_assert(osr->deferred_running == b); osr->deferred_running = nullptr; if (!osr->deferred_pending) { @@ -9708,7 +9708,7 @@ void BlueStore::_deferred_aio_finish(OpSequencer *osr) { uint64_t costs = 0; { - std::lock_guard l2(osr->qlock); + std::lock_guard l2(osr->qlock); for (auto& i : b->txcs) { TransContext *txc = &i; txc->log_state_latency(logger, l_bluestore_state_deferred_aio_wait_lat); @@ -9717,14 +9717,14 @@ void BlueStore::_deferred_aio_finish(OpSequencer *osr) } } throttle_deferred_bytes.put(costs); - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); deferred_done_queue.emplace_back(b); } // in the normal case, do not bother waking up the kv thread; it will // catch us on the next commit anyway. if (deferred_aggressive) { - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); kv_cond.notify_one(); } } @@ -9842,7 +9842,7 @@ int BlueStore::queue_transactions( deferred_try_submit(); { // wake up any previously finished deferred events - std::lock_guard l(kv_lock); + std::lock_guard l(kv_lock); kv_cond.notify_one(); } throttle_deferred_bytes.get(txc->cost); @@ -10302,7 +10302,7 @@ void BlueStore::_dump_extent_map(ExtentMap &em) dout(LogLevelV) << __func__ << " csum: " << std::hex << v << std::dec << dendl; } - std::lock_guard l(e.blob->shared_blob->get_cache()->lock); + std::lock_guard l(e.blob->shared_blob->get_cache()->lock); for (auto& i : e.blob->shared_blob->bc.buffer_map) { dout(LogLevelV) << __func__ << " 0x" << std::hex << i.first << "~" << i.second->length << std::dec diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index c1dee1e5662..52fedd6ccee 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -328,14 +328,14 @@ public: // return value is the highest cache_private of a trimmed buffer, or 0. int discard(Cache* cache, uint32_t offset, uint32_t length) { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); return _discard(cache, offset, length); } int _discard(Cache* cache, uint32_t offset, uint32_t length); void write(Cache* cache, uint64_t seq, uint32_t offset, bufferlist& bl, unsigned flags) { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); Buffer *b = new Buffer(this, Buffer::STATE_WRITING, seq, offset, bl, flags); b->cache_private = _discard(cache, offset, bl.length()); @@ -343,7 +343,7 @@ public: } void _finish_write(Cache* cache, uint64_t seq); void did_read(Cache* cache, uint32_t offset, bufferlist& bl) { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); Buffer *b = new Buffer(this, Buffer::STATE_CLEAN, 0, offset, bl); b->cache_private = _discard(cache, offset, bl.length()); _add_buffer(cache, b, 1, nullptr); @@ -361,7 +361,7 @@ public: void split(Cache* cache, size_t pos, BufferSpace &r); void dump(Cache* cache, Formatter *f) const { - std::lock_guard l(cache->lock); + std::lock_guard l(cache->lock); f->open_array_section("buffers"); for (auto& i : buffer_map) { f->open_object_section("buffer"); @@ -445,7 +445,7 @@ public: mempool::bluestore_cache_other::unordered_map sb_map; SharedBlobRef lookup(uint64_t sbid) { - std::lock_guard l(lock); + std::lock_guard l(lock); auto p = sb_map.find(sbid); if (p == sb_map.end() || p->second->nref == 0) { @@ -455,13 +455,13 @@ public: } void add(Collection* coll, SharedBlob *sb) { - std::lock_guard l(lock); + std::lock_guard l(lock); sb_map[sb->get_sbid()] = sb; sb->coll = coll; } void remove(SharedBlob *sb) { - std::lock_guard l(lock); + std::lock_guard l(lock); ceph_assert(sb->get_parent() == this); // only remove if it still points to us auto p = sb_map.find(sb->get_sbid()); @@ -472,7 +472,7 @@ public: } bool empty() { - std::lock_guard l(lock); + std::lock_guard l(lock); return sb_map.empty(); } @@ -521,7 +521,7 @@ public: } bool can_split() const { - std::lock_guard l(shared_blob->get_cache()->lock); + std::lock_guard l(shared_blob->get_cache()->lock); // splitting a BufferSpace writing list is too hard; don't try. return shared_blob->bc.writing.empty() && used_in_blob.can_split() && @@ -1117,7 +1117,7 @@ public: uint64_t *bytes) = 0; bool empty() { - std::lock_guard l(lock); + std::lock_guard l(lock); return _get_num_onodes() == 0 && _get_buffer_bytes() == 0; } @@ -1207,7 +1207,7 @@ public: uint64_t *blobs, uint64_t *buffers, uint64_t *bytes) override { - std::lock_guard l(lock); + std::lock_guard l(lock); *onodes += onode_lru.size(); *extents += num_extents; *blobs += num_blobs; @@ -1302,7 +1302,7 @@ public: uint64_t *blobs, uint64_t *buffers, uint64_t *bytes) override { - std::lock_guard l(lock); + std::lock_guard l(lock); *onodes += onode_lru.size(); *extents += num_extents; *blobs += num_blobs; @@ -1712,19 +1712,19 @@ public: } void queue_new(TransContext *txc) { - std::lock_guard l(qlock); + std::lock_guard l(qlock); txc->seq = ++last_seq; q.push_back(*txc); } void drain() { - std::unique_lock l(qlock); + std::unique_lock l(qlock); while (!q.empty()) qcond.wait(l); } void drain_preceding(TransContext *txc) { - std::unique_lock l(qlock); + std::unique_lock l(qlock); while (!q.empty() && &q.front() != txc) qcond.wait(l); } @@ -1740,7 +1740,7 @@ public: } void flush() { - std::unique_lock l(qlock); + std::unique_lock l(qlock); while (true) { // set flag before the check because the condition // may become true outside qlock, and we need to make @@ -1756,7 +1756,7 @@ public: } void flush_all_but_last() { - std::unique_lock l(qlock); + std::unique_lock l(qlock); assert (q.size() >= 1); while (true) { // set flag before the check because the condition @@ -1779,7 +1779,7 @@ public: } bool flush_commit(Context *c) { - std::lock_guard l(qlock); + std::lock_guard l(qlock); if (q.empty()) { return true; } diff --git a/src/os/bluestore/KernelDevice.cc b/src/os/bluestore/KernelDevice.cc index f6ba2ce37d8..243d0f6f8cd 100644 --- a/src/os/bluestore/KernelDevice.cc +++ b/src/os/bluestore/KernelDevice.cc @@ -312,7 +312,7 @@ int KernelDevice::flush() // aio completion notification will not return before that aio is // stable on disk: whichever thread sees the flag first will block // followers until the aio is stable. - std::lock_guard l(flush_mutex); + std::lock_guard l(flush_mutex); bool expect = true; if (!io_since_flush.compare_exchange_strong(expect, false)) { @@ -385,7 +385,7 @@ void KernelDevice::_discard_stop() { dout(10) << __func__ << dendl; { - std::unique_lock l(discard_lock); + std::unique_lock l(discard_lock); while (!discard_started) { discard_cond.wait(l); } @@ -394,7 +394,7 @@ void KernelDevice::_discard_stop() } discard_thread.join(); { - std::lock_guard l(discard_lock); + std::lock_guard l(discard_lock); discard_stop = false; } dout(10) << __func__ << " stopped" << dendl; @@ -403,7 +403,7 @@ void KernelDevice::_discard_stop() void KernelDevice::discard_drain() { dout(10) << __func__ << dendl; - std::unique_lock l(discard_lock); + std::unique_lock l(discard_lock); while (!discard_queued.empty() || discard_running) { discard_cond.wait(l); } @@ -438,7 +438,7 @@ void KernelDevice::_aio_thread() IOContext *ioc = static_cast(aio[i]->priv); _aio_log_finish(ioc, aio[i]->offset, aio[i]->length); if (aio[i]->queue_item.is_linked()) { - std::lock_guard l(debug_queue_lock); + std::lock_guard l(debug_queue_lock); debug_aio_unlink(*aio[i]); } @@ -487,7 +487,7 @@ void KernelDevice::_aio_thread() } if (cct->_conf->bdev_debug_aio) { utime_t now = ceph_clock_now(); - std::lock_guard l(debug_queue_lock); + std::lock_guard l(debug_queue_lock); if (debug_oldest) { if (debug_stall_since == utime_t()) { debug_stall_since = now; @@ -522,7 +522,7 @@ void KernelDevice::_aio_thread() void KernelDevice::_discard_thread() { - std::unique_lock l(discard_lock); + std::unique_lock l(discard_lock); ceph_assert(!discard_started); discard_started = true; discard_cond.notify_all(); @@ -562,7 +562,7 @@ int KernelDevice::queue_discard(interval_set &to_release) if (to_release.empty()) return 0; - std::lock_guard l(discard_lock); + std::lock_guard l(discard_lock); discard_queued.insert(to_release); discard_cond.notify_all(); return 0; @@ -651,7 +651,7 @@ void KernelDevice::aio_submit(IOContext *ioc) list::iterator p = ioc->running_aios.begin(); while (p != e) { dout(30) << __func__ << " " << *p << dendl; - std::lock_guard l(debug_queue_lock); + std::lock_guard l(debug_queue_lock); debug_aio_link(*p++); } } diff --git a/src/os/bluestore/NVMEDevice.cc b/src/os/bluestore/NVMEDevice.cc index cce6faece30..293259e43eb 100644 --- a/src/os/bluestore/NVMEDevice.cc +++ b/src/os/bluestore/NVMEDevice.cc @@ -595,7 +595,7 @@ int NVMEManager::try_get(const spdk_nvme_transport_id& trid, SharedDriverData ** if (spdk_nvme_retry_count < 0) spdk_nvme_retry_count = SPDK_NVME_DEFAULT_RETRY_COUNT; - std::unique_lock l(probe_queue_lock); + std::unique_lock l(probe_queue_lock); while (true) { if (!probe_queue.empty()) { ProbeContext* ctxt = probe_queue.front(); @@ -618,7 +618,7 @@ int NVMEManager::try_get(const spdk_nvme_transport_id& trid, SharedDriverData ** ProbeContext ctx{trid, this, nullptr, false}; { - std::unique_lock l(probe_queue_lock); + std::unique_lock l(probe_queue_lock); probe_queue.push_back(&ctx); while (!ctx.done) probe_queue_cond.wait(l); diff --git a/src/os/bluestore/StupidAllocator.cc b/src/os/bluestore/StupidAllocator.cc index 899716e014e..411711e2857 100644 --- a/src/os/bluestore/StupidAllocator.cc +++ b/src/os/bluestore/StupidAllocator.cc @@ -65,7 +65,7 @@ int64_t StupidAllocator::allocate_int( uint64_t want_size, uint64_t alloc_unit, int64_t hint, uint64_t *offset, uint32_t *length) { - std::lock_guard l(lock); + std::lock_guard l(lock); ldout(cct, 10) << __func__ << " want_size 0x" << std::hex << want_size << " alloc_unit 0x" << alloc_unit << " hint 0x" << hint << std::dec @@ -229,7 +229,7 @@ int64_t StupidAllocator::allocate( void StupidAllocator::release( const interval_set& release_set) { - std::lock_guard l(lock); + std::lock_guard l(lock); for (interval_set::const_iterator p = release_set.begin(); p != release_set.end(); ++p) { @@ -244,7 +244,7 @@ void StupidAllocator::release( uint64_t StupidAllocator::get_free() { - std::lock_guard l(lock); + std::lock_guard l(lock); return num_free; } @@ -255,7 +255,7 @@ double StupidAllocator::get_fragmentation(uint64_t alloc_unit) uint64_t max_intervals = 0; uint64_t intervals = 0; { - std::lock_guard l(lock); + std::lock_guard l(lock); max_intervals = p2roundup(num_free, alloc_unit) / alloc_unit; for (unsigned bin = 0; bin < free.size(); ++bin) { intervals += free[bin].num_intervals(); @@ -275,7 +275,7 @@ double StupidAllocator::get_fragmentation(uint64_t alloc_unit) void StupidAllocator::dump() { - std::lock_guard l(lock); + std::lock_guard l(lock); for (unsigned bin = 0; bin < free.size(); ++bin) { ldout(cct, 0) << __func__ << " free bin " << bin << ": " << free[bin].num_intervals() << " extents" << dendl; @@ -290,7 +290,7 @@ void StupidAllocator::dump() void StupidAllocator::init_add_free(uint64_t offset, uint64_t length) { - std::lock_guard l(lock); + std::lock_guard l(lock); ldout(cct, 10) << __func__ << " 0x" << std::hex << offset << "~" << length << std::dec << dendl; _insert_free(offset, length); @@ -299,7 +299,7 @@ void StupidAllocator::init_add_free(uint64_t offset, uint64_t length) void StupidAllocator::init_rm_free(uint64_t offset, uint64_t length) { - std::lock_guard l(lock); + std::lock_guard l(lock); ldout(cct, 10) << __func__ << " 0x" << std::hex << offset << "~" << length << std::dec << dendl; interval_set_t rm; diff --git a/src/os/bluestore/fastbmap_allocator_impl.h b/src/os/bluestore/fastbmap_allocator_impl.h index 0ad3f9a7ad6..83b6bbbb221 100755 --- a/src/os/bluestore/fastbmap_allocator_impl.h +++ b/src/os/bluestore/fastbmap_allocator_impl.h @@ -481,20 +481,20 @@ class AllocatorLevel02 : public AllocatorLevel public: uint64_t debug_get_free(uint64_t pos0 = 0, uint64_t pos1 = 0) { - std::lock_guard l(lock); + std::lock_guard l(lock); return l1.debug_get_free(pos0 * l1._children_per_slot() * bits_per_slot, pos1 * l1._children_per_slot() * bits_per_slot); } uint64_t debug_get_allocated(uint64_t pos0 = 0, uint64_t pos1 = 0) { - std::lock_guard l(lock); + std::lock_guard l(lock); return l1.debug_get_allocated(pos0 * l1._children_per_slot() * bits_per_slot, pos1 * l1._children_per_slot() * bits_per_slot); } uint64_t get_available() { - std::lock_guard l(lock); + std::lock_guard l(lock); return available; } inline uint64_t get_min_alloc_size() const @@ -622,7 +622,7 @@ protected: uint64_t l1_w = slotset_width * l1._children_per_slot(); - std::lock_guard l(lock); + std::lock_guard l(lock); if (available < min_length) { return; @@ -692,7 +692,7 @@ protected: void _free_l2(const interval_set & rr) { uint64_t released = 0; - std::lock_guard l(lock); + std::lock_guard l(lock); for (auto r : rr) { released += l1._free_l1(r.first, r.second); uint64_t l2_pos = r.first / l2_granularity; @@ -708,7 +708,7 @@ protected: void _free_l2(const T& rr) { uint64_t released = 0; - std::lock_guard l(lock); + std::lock_guard l(lock); for (auto r : rr) { released += l1._free_l1(r.offset, r.length); uint64_t l2_pos = r.offset / l2_granularity; @@ -724,7 +724,7 @@ protected: uint64_t l2_pos = o / l2_granularity; uint64_t l2_pos_end = p2roundup(int64_t(o + len), int64_t(l2_granularity)) / l2_granularity; - std::lock_guard l(lock); + std::lock_guard l(lock); auto allocated = l1._mark_alloc_l1(o, len); ceph_assert(available >= allocated); available -= allocated; @@ -736,7 +736,7 @@ protected: uint64_t l2_pos = o / l2_granularity; uint64_t l2_pos_end = p2roundup(int64_t(o + len), int64_t(l2_granularity)) / l2_granularity; - std::lock_guard l(lock); + std::lock_guard l(lock); available += l1._free_l1(o, len); _mark_l2_free(l2_pos, l2_pos_end); } @@ -745,7 +745,7 @@ protected: last_pos = 0; } double _get_fragmentation() { - std::lock_guard l(lock); + std::lock_guard l(lock); return l1.get_fragmentation(); } };