This has a measurable overhead even when turned off, and we do not use it.
Signed-off-by: Sage Weil <sage@redhat.com>
#include "common/valgrind.h"
Mutex::Mutex(const std::string &n, bool r, bool ld,
- bool bt,
- CephContext *cct) :
+ bool bt) :
name(n), id(-1), recursive(r), lockdep(ld), backtrace(bt), nlock(0),
- locked_by(0), cct(cct), logger(0)
+ locked_by(0)
{
ANNOTATE_BENIGN_RACE_SIZED(&id, sizeof(id), "Mutex lockdep id");
ANNOTATE_BENIGN_RACE_SIZED(&nlock, sizeof(nlock), "Mutex nlock");
ANNOTATE_BENIGN_RACE_SIZED(&locked_by, sizeof(locked_by), "Mutex locked_by");
- if (cct) {
- PerfCountersBuilder b(cct, string("mutex-") + name,
- l_mutex_first, l_mutex_last);
- b.add_time_avg(l_mutex_wait, "wait", "Average time of mutex in locked state");
- logger = b.create_perf_counters();
- cct->get_perfcounters_collection()->add(logger);
- logger->set(l_mutex_wait, 0);
- }
if (recursive) {
// Mutexes of type PTHREAD_MUTEX_RECURSIVE do all the same checks as
// mutexes of type PTHREAD_MUTEX_ERRORCHECK.
ANNOTATE_BENIGN_RACE_SIZED(&_m, sizeof(_m), "Mutex primitive");
pthread_mutex_destroy(&_m);
- if (cct && logger) {
- cct->get_perfcounters_collection()->remove(logger);
- delete logger;
- }
if (lockdep && g_lockdep) {
lockdep_unregister(id);
}
}
void Mutex::Lock(bool no_lockdep) {
- int r;
-
if (lockdep && g_lockdep && !no_lockdep && !recursive) _will_lock();
-
- if (logger && cct && cct->_conf->mutex_perf_counter) {
- utime_t start;
- // instrumented mutex enabled
- start = ceph_clock_now();
- if (TryLock()) {
- goto out;
- }
-
- r = pthread_mutex_lock(&_m);
-
- logger->tinc(l_mutex_wait,
- ceph_clock_now() - start);
- } else {
- r = pthread_mutex_lock(&_m);
- }
-
+ int r = pthread_mutex_lock(&_m);
ceph_assert(r == 0);
if (lockdep && g_lockdep) _locked();
_post_lock();
-
-out:
- ;
}
void Mutex::Unlock() {
using namespace ceph;
-class CephContext;
-class PerfCounters;
-
-enum {
- l_mutex_first = 999082,
- l_mutex_wait,
- l_mutex_last
-};
-
class Mutex {
private:
std::string name;
pthread_mutex_t _m;
int nlock;
pthread_t locked_by;
- CephContext *cct;
- PerfCounters *logger;
// don't allow copying.
void operator=(const Mutex &M);
}
public:
- Mutex(const std::string &n, bool r = false, bool ld=true, bool bt=false,
- CephContext *cct = 0);
+ Mutex(const std::string &n, bool r = false, bool ld=true, bool bt=false);
~Mutex();
bool is_locked() const {
return (nlock > 0);
OPTION(rgw_rest_getusage_op_compat, OPT_BOOL) // dump description of total stats for s3 GetUsage API
-OPTION(mutex_perf_counter, OPT_BOOL) // enable/disable mutex perf counter
OPTION(throttler_perf_counter, OPT_BOOL) // enable/disable throttler perf counter
/* The following are tunables for torrent data */
l_mutex_last
};
-mutex_debugging_base::mutex_debugging_base(const std::string &n, bool bt,
- CephContext *cct) :
- id(-1), backtrace(bt), nlock(0), locked_by(thread::id()),
- cct(cct), logger(0) {
+mutex_debugging_base::mutex_debugging_base(const std::string &n, bool bt) :
+ id(-1), backtrace(bt), nlock(0), locked_by(thread::id()) {
if (n.empty()) {
uuid_d uu;
uu.generate_random();
} else {
name = n;
}
- if (cct) {
- PerfCountersBuilder b(cct, string("mutex-") + name,
- l_mutex_first, l_mutex_last);
- b.add_time_avg(l_mutex_wait, "wait",
- "Average time of mutex in locked state");
- logger = b.create_perf_counters();
- cct->get_perfcounters_collection()->add(logger);
- logger->set(l_mutex_wait, 0);
- }
if (g_lockdep)
_register();
}
mutex_debugging_base::~mutex_debugging_base() {
ceph_assert(nlock == 0);
- if (cct && logger) {
- cct->get_perfcounters_collection()->remove(logger);
- delete logger;
- }
if (g_lockdep) {
lockdep_unregister(id);
}
}
ceph::mono_time mutex_debugging_base::before_lock_blocks() {
- if (logger && cct && cct->_conf->mutex_perf_counter)
- return ceph::mono_clock::now();
return ceph::mono_time::min();
}
void mutex_debugging_base::after_lock_blocks(ceph::mono_time start,
bool no_lockdep) {
- if (logger && cct && cct->_conf->mutex_perf_counter)
- logger->tinc(l_mutex_wait,
- ceph::mono_clock::now() - start);
if (!no_lockdep && g_lockdep)
_locked();
}
int nlock;
std::thread::id locked_by;
- CephContext *cct;
- PerfCounters *logger;
+
void _register();
void _will_lock(); // about to lock
void _locked(); // just locked
void _will_unlock(); // about to unlock
- mutex_debugging_base(const std::string &n = std::string(), bool bt = false,
- CephContext *cct = nullptr);
+ mutex_debugging_base(const std::string &n = std::string(), bool bt = false);
~mutex_debugging_base();
ceph::mono_time before_lock_blocks();
Mutex* impl;
public:
- mutex_debugging(const std::string &n = std::string(), bool bt = false,
- CephContext *cct = nullptr) :
- mutex_debugging_base(n, bt, cct), impl(static_cast<Mutex*>(this)) {}
+ mutex_debugging(const std::string &n = std::string(), bool bt = false) :
+ mutex_debugging_base(n, bt), impl(static_cast<Mutex*>(this)) {}
~mutex_debugging() = default;
static constexpr bool recursive = Recursive;
// Mutex concept is DefaultConstructible
- mutex_debug_impl(const std::string &n = std::string(), bool bt = false,
- CephContext *cct = nullptr) :
- mutex_debugging<mutex_debug_impl<Recursive> >(n, bt, cct) {
+ mutex_debug_impl(const std::string &n = std::string(), bool bt = false) :
+ mutex_debugging<mutex_debug_impl<Recursive> >(n, bt) {
pthread_mutexattr_t a;
pthread_mutexattr_init(&a);
int r;
"manager before this is reported as an ERR rather than "
"a WARN"),
- Option("mutex_perf_counter", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
- .set_default(false)
- .set_description(""),
-
Option("throttler_perf_counter", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_default(true)
.set_description(""),
bool track_lock,
bool enable_lock_dep,
bool prioritize_write)
- : mutex_debugging_base{n, false /* backtrace */,
- nullptr /* cct for perf counter*/},
+ : mutex_debugging_base{n, false /* backtrace */},
track(track_lock),
lockdep(enable_lock_dep)
{
FileJournal(CephContext* cct, uuid_d fsid, Finisher *fin, Cond *sync_cond,
const char *f, bool dio=false, bool ai=true, bool faio=false) :
Journal(cct, fsid, fin, sync_cond),
- finisher_lock("FileJournal::finisher_lock", false, true, false, cct),
+ finisher_lock("FileJournal::finisher_lock", false, true, false),
journaled_seq(0),
plug_journal_completions(false),
- writeq_lock("FileJournal::writeq_lock", false, true, false, cct),
+ writeq_lock("FileJournal::writeq_lock", false, true, false),
completions_lock(
- "FileJournal::completions_lock", false, true, false, cct),
+ "FileJournal::completions_lock", false, true, false),
fn(f),
zero_buf(NULL),
max_size(0), block_size(0),
fd(-1),
writing_seq(0),
throttle(cct->_conf->filestore_caller_concurrency),
- write_lock("FileJournal::write_lock", false, true, false, cct),
+ write_lock("FileJournal::write_lock", false, true, false),
write_stop(true),
aio_stop(true),
write_thread(this),
uint64_t op_submitted;
public:
SubmitManager(CephContext* cct) :
- cct(cct), lock("JOS::SubmitManager::lock", false, true, false, cct),
+ cct(cct), lock("JOS::SubmitManager::lock", false, true, false),
op_seq(0), op_submitted(0)
{}
uint64_t op_submit_start();
public:
ApplyManager(CephContext* cct, Journal *&j, Finisher &f) :
cct(cct), journal(j), finisher(f),
- apply_lock("JOS::ApplyManager::apply_lock", false, true, false, cct),
+ apply_lock("JOS::ApplyManager::apply_lock", false, true, false),
blocked(false),
open_ops(0),
max_applied_seq(0),
- com_lock("JOS::ApplyManager::com_lock", false, true, false, cct),
+ com_lock("JOS::ApplyManager::com_lock", false, true, false),
committing_seq(0), committed_seq(0) {}
void reset() {
ceph_assert(open_ops == 0);
cct(cct),
logger(NULL),
stopping(true),
- lock("WBThrottle::lock", false, true, false, cct),
+ lock("WBThrottle::lock", false, true, false),
fs(XFS)
{
{
osd(osd),
shard_name(string("OSDShard.") + stringify(id)),
sdata_wait_lock_name(shard_name + "::sdata_wait_lock"),
- sdata_wait_lock(sdata_wait_lock_name.c_str(), false, true, false, cct),
+ sdata_wait_lock(sdata_wait_lock_name.c_str(), false, true, false),
osdmap_lock_name(shard_name + "::osdmap_lock"),
osdmap_lock(osdmap_lock_name.c_str(), false, false),
shard_lock_name(shard_name + "::shard_lock"),
- shard_lock(shard_lock_name.c_str(), false, true, false, cct),
+ shard_lock(shard_lock_name.c_str(), false, true, false),
context_queue(sdata_wait_lock, sdata_cond) {
if (opqueue == io_queue::weightedpriority) {
pqueue = std::make_unique<