#include <string>
#include <mutex>
+#include "common/ceph_mutex.h"
#include "include/buffer.h"
#include "kv/KeyValueDB.h"
std::string meta_prefix, bitmap_prefix;
KeyValueDB *kvdb;
std::shared_ptr<KeyValueDB::MergeOperator> merge_op;
- std::mutex lock;
+ ceph::mutex lock = ceph::make_mutex("BitmapFreelistManager::lock");
uint64_t size; ///< size of device (bytes)
uint64_t bytes_per_block; ///< bytes per block (bdev_block_size)
#include <vector>
#include "acconfig.h"
+#include "common/ceph_mutex.h"
+
#ifdef HAVE_LIBAIO
#include "aio.h"
#endif
/// track in-flight io
struct IOContext {
private:
- std::mutex lock;
- std::condition_variable cond;
+ ceph::mutex lock = ceph::make_mutex("IOContext::lock");
+ ceph::condition_variable cond;
int r = 0;
public:
CephContext* cct;
typedef void (*aio_callback_t)(void *handle, void *aio);
private:
- std::mutex ioc_reap_lock;
+ ceph::mutex ioc_reap_lock = ceph::make_mutex("BlockDevice::ioc_reap_lock");
std::vector<IOContext*> ioc_reap_queue;
std::atomic_int ioc_reap_count = {0};
*
* 8. Release the old log space. Clean up.
*/
-void BlueFS::_compact_log_async(std::unique_lock<std::mutex>& l)
+void BlueFS::_compact_log_async(std::unique_lock<ceph::mutex>& l)
{
dout(10) << __func__ << dendl;
File *log_file = log_writer->file.get();
_flush_and_sync_log(l);
}
-int BlueFS::_flush_and_sync_log(std::unique_lock<std::mutex>& l,
+int BlueFS::_flush_and_sync_log(std::unique_lock<ceph::mutex>& l,
uint64_t want_seq,
uint64_t jump_to)
{
return 0;
}
-int BlueFS::_fsync(FileWriter *h, std::unique_lock<std::mutex>& l)
+int BlueFS::_fsync(FileWriter *h, std::unique_lock<ceph::mutex>& l)
{
dout(10) << __func__ << " " << h << " " << h->file->fnode << dendl;
int r = _flush(h, true);
bufferlist::page_aligned_appender buffer_appender; //< for const char* only
int writer_type = 0; ///< WRITER_*
- std::mutex lock;
+ ceph::mutex lock = ceph::make_mutex("BlueFS::FileWriter::lock");
std::array<IOContext*,MAX_BDEV> iocv; ///< for each bdev
std::array<bool, MAX_BDEV> dirty_devs;
};
private:
- std::mutex lock;
+ ceph::mutex lock = ceph::make_mutex("BlueFS::lock");
PerfCounters *logger = nullptr;
FileWriter *log_writer = 0; ///< writer for the log
bluefs_transaction_t log_t; ///< pending, unwritten log transaction
bool log_flushing = false; ///< true while flushing the log
- std::condition_variable log_cond;
+ ceph::condition_variable log_cond;
uint64_t new_log_jump_to = 0;
uint64_t old_log_jump_to = 0;
bluefs_fnode_t* node);
int _flush_range(FileWriter *h, uint64_t offset, uint64_t length);
int _flush(FileWriter *h, bool force);
- int _fsync(FileWriter *h, std::unique_lock<std::mutex>& l);
+ int _fsync(FileWriter *h, std::unique_lock<ceph::mutex>& l);
#ifdef HAVE_LIBAIO
void _claim_completed_aios(FileWriter *h, list<aio_t> *ls);
void wait_for_aio(FileWriter *h); // safe to call without a lock
#endif
- int _flush_and_sync_log(std::unique_lock<std::mutex>& l,
+ int _flush_and_sync_log(std::unique_lock<ceph::mutex>& l,
uint64_t want_seq = 0,
uint64_t jump_to = 0);
uint64_t _estimate_log_size();
bool _should_compact_log();
void _compact_log_dump_metadata(bluefs_transaction_t *t);
void _compact_log_sync();
- void _compact_log_async(std::unique_lock<std::mutex>& l);
+ void _compact_log_async(std::unique_lock<ceph::mutex>& l);
//void _aio_finish(void *priv);
/// a lookup table of SharedBlobs
struct SharedBlobSet {
- std::mutex lock; ///< protect lookup, insertion, removal
+ /// protect lookup, insertion, removal
+ ceph::mutex lock = ceph::make_mutex("BlueStore::SharedBlobSet::lock");
// we use a bare pointer because we don't want to affect the ref
// count
// track txc's that have not been committed to kv store (and whose
// effects cannot be read via the kvdb read methods)
std::atomic<int> flushing_count = {0};
- std::mutex flush_lock; ///< protect flush_txns
- std::condition_variable flush_cond; ///< wait here for uncommitted txns
+ /// protect flush_txns
+ ceph::mutex flush_lock = ceph::make_mutex("BlueStore::Onode::flush_lock");
+ ceph::condition_variable flush_cond; ///< wait here for uncommitted txns
Onode(Collection *c, const ghobject_t& o,
const mempool::bluestore_cache_other::string& k)
class OpSequencer : public RefCountedObject {
public:
- std::mutex qlock;
- std::condition_variable qcond;
+ ceph::mutex qlock = ceph::make_mutex("BlueStore::OpSequencer::qlock");
+ ceph::condition_variable qcond;
typedef boost::intrusive::list<
TransContext,
boost::intrusive::member_hook<
vector<Cache*> cache_shards;
- std::mutex zombie_osr_lock; ///< protect zombie_osr_set
+ /// protect zombie_osr_set
+ ceph::mutex zombie_osr_lock = ceph::make_mutex("BlueStore::zombie_osr_lock");
std::map<coll_t,OpSequencerRef> zombie_osr_set; ///< set of OpSequencers for deleted collections
std::atomic<uint64_t> nid_last = {0};
interval_set<uint64_t> bluefs_extents; ///< block extents owned by bluefs
interval_set<uint64_t> bluefs_extents_reclaiming; ///< currently reclaiming
- std::mutex deferred_lock;
+ ceph::mutex deferred_lock = ceph::make_mutex("BlueStore::deferred_lock");
std::atomic<uint64_t> deferred_seq = {0};
deferred_osr_queue_t deferred_queue; ///< osr's with deferred io pending
int deferred_queue_size = 0; ///< num txc's queued across all osrs
Finisher deferred_finisher, finisher;
KVSyncThread kv_sync_thread;
- std::mutex kv_lock;
- std::condition_variable kv_cond;
+ ceph::mutex kv_lock = ceph::make_mutex("BlueStore::kv_lock");
+ ceph::condition_variable kv_cond;
bool _kv_only = false;
bool kv_sync_started = false;
bool kv_stop = false;
deque<DeferredBatch*> deferred_done_queue; ///< deferred ios done
KVFinalizeThread kv_finalize_thread;
- std::mutex kv_finalize_lock;
- std::condition_variable kv_finalize_cond;
+ ceph::mutex kv_finalize_lock = ceph::make_mutex("BlueStore::kv_finalize_lock");
+ ceph::condition_variable kv_finalize_cond;
deque<TransContext*> kv_committing_to_finalize; ///< pending finalization
deque<DeferredBatch*> deferred_stable_to_finalize; ///< pending finalization
double osd_memory_expected_fragmentation = 0; ///< expected memory fragmentation
uint64_t osd_memory_cache_min = 0; ///< Min memory to assign when autotuning cache
double osd_memory_cache_resize_interval = 0; ///< Time to wait between cache resizing
- std::mutex vstatfs_lock;
+ ceph::mutex vstatfs_lock = ceph::make_mutex("BlueStore::vstatfs_lock");
volatile_statfs vstatfs;
struct MempoolThread : public Thread {
interval_set<uint64_t> debug_inflight;
std::atomic<bool> io_since_flush = {false};
- std::mutex flush_mutex;
+ ceph::mutex flush_mutex = ceph::make_mutex("KernelDevice::flush_mutex");
aio_queue_t aio_queue;
aio_callback_t discard_callback;
bool discard_started;
bool discard_stop;
- std::mutex discard_lock;
- std::condition_variable discard_cond;
+ ceph::mutex discard_lock = ceph::make_mutex("KernelDevice::discard_lock");
+ ceph::condition_variable discard_cond;
bool discard_running = false;
interval_set<uint64_t> discard_queued;
interval_set<uint64_t> discard_finishing;
// stalled aio debugging
aio_list_t debug_queue;
- std::mutex debug_queue_lock;
+ ceph::mutex debug_queue_lock = ceph::make_mutex("KernelDevice::debug_queue_lock");
aio_t *debug_oldest = nullptr;
utime_t debug_stall_since;
void debug_aio_link(aio_t& aio);
int64_t return_code;
ceph::coarse_real_clock::time_point start;
IORequest io_request;
- std::mutex lock;
- std::condition_variable cond;
+ ceph::mutex lock = ceph::make_mutex("Task::lock");
+ ceph::condition_variable cond;
SharedDriverQueueData *queue = nullptr;
Task(NVMEDevice *dev, IOCommand c, uint64_t off, uint64_t l, int64_t rc = 0)
: device(dev), command(c), offset(off), len(l),
bool init = false;
std::vector<SharedDriverData*> shared_driver_datas;
std::thread dpdk_thread;
- std::mutex probe_queue_lock;
- std::condition_variable probe_queue_cond;
+ ceph::mutex probe_queue_lock = ceph::make_mutex("NVMEManager::probe_queue_lock");
+ ceph::condition_variable probe_queue_cond;
std::list<ProbeContext*> probe_queue;
public:
#include "include/interval_set.h"
#include "os/bluestore/bluestore_types.h"
#include "include/mempool.h"
+#include "common/ceph_mutex.h"
class StupidAllocator : public Allocator {
CephContext* cct;
- std::mutex lock;
+ ceph::mutex lock = ceph::make_mutex("StupidAllocator::lock");
int64_t num_free; ///< total bytes in freelist
#include "common/likely.h"
#include "os/bluestore/bluestore_types.h"
#include "include/mempool.h"
+#include "common/ceph_mutex.h"
typedef bluestore_pextent_t interval_t;
typedef PExtentVector interval_vector_t;
}
protected:
- std::mutex lock;
+ ceph::mutex lock = ceph::make_mutex("AllocatorLevel02::lock");
L1 l1;
slot_vector_t l2;
uint64_t l2_granularity = 0; // space per entry