}
AsyncOpTracker::~AsyncOpTracker() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
ceph_assert(m_pending_ops == 0);
}
void AsyncOpTracker::start_op() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
++m_pending_ops;
}
void AsyncOpTracker::finish_op() {
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
ceph_assert(m_pending_ops > 0);
if (--m_pending_ops == 0) {
std::swap(on_finish, m_on_finish);
void AsyncOpTracker::wait_for_ops(Context *on_finish) {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
ceph_assert(m_on_finish == nullptr);
if (m_pending_ops > 0) {
m_on_finish = on_finish;
}
bool AsyncOpTracker::empty() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
return (m_pending_ops == 0);
}
lock("AsyncReserver::lock") {}
void set_max(unsigned max) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
max_allowed = max;
do_queues();
}
void set_min_priority(unsigned min) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
min_priority = min;
do_queues();
}
void dump(Formatter *f) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
_dump(f);
}
void _dump(Formatter *f) {
unsigned prio, ///< [in] priority
Context *on_preempt = 0 ///< [in] callback to be called if we are preempted (optional)
) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
Reservation r(item, prio, on_reserved, on_preempt);
rdout(10) << __func__ << " queue " << r << dendl;
ceph_assert(!queue_pointers.count(item) &&
void cancel_reservation(
T item ///< [in] key for reservation to cancel
) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
auto i = queue_pointers.find(item);
if (i != queue_pointers.end()) {
unsigned prio = i->second.first;
* Return true if there are reservations in progress
*/
bool has_reservation() {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
return !in_progress.empty();
}
static const unsigned MAX_PRIORITY = (unsigned)-1;
/// We overload complete in order to not delete the context
void complete(int r) override {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
done = true;
rval = r;
cond.Signal();
/// Returns rval once the Context is called
int wait() {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
while (!done)
cond.Wait(lock);
return rval;
int wait_for(double secs) {
utime_t interval;
interval.set_from_double(secs);
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
if (done) {
return rval;
}
void ContextCompletion::finish_adding_requests() {
bool complete;
{
- Mutex::Locker l(m_lock);
+ std::lock_guard<Mutex> l(m_lock);
m_building = false;
complete = (m_current_ops == 0);
}
}
void ContextCompletion::start_op() {
- Mutex::Locker l(m_lock);
+ std::lock_guard<Mutex> l(m_lock);
++m_current_ops;
}
void ContextCompletion::finish_op(int r) {
bool complete;
{
- Mutex::Locker l(m_lock);
+ std::lock_guard<Mutex> l(m_lock);
if (r < 0 && m_ret == 0 && (!m_ignore_enoent || r != -ENOENT)) {
m_ret = r;
}
void LogChannel::do_log(clog_type prio, const std::string& s)
{
- Mutex::Locker l(channel_lock);
+ std::lock_guard<Mutex> l(channel_lock);
if (CLOG_ERROR == prio) {
ldout(cct,-1) << "log " << prio << " : " << s << dendl;
} else {
Message *LogClient::get_mon_log_message(bool flush)
{
- Mutex::Locker l(log_lock);
+ std::lock_guard<Mutex> l(log_lock);
if (flush) {
if (log_queue.empty())
return nullptr;
bool LogClient::are_pending()
{
- Mutex::Locker l(log_lock);
+ std::lock_guard<Mutex> l(log_lock);
return last_log > last_log_sent;
}
version_t LogClient::queue(LogEntry &entry)
{
- Mutex::Locker l(log_lock);
+ std::lock_guard<Mutex> l(log_lock);
entry.seq = ++last_log;
log_queue.push_back(entry);
uint64_t LogClient::get_next_seq()
{
- Mutex::Locker l(log_lock);
+ std::lock_guard<Mutex> l(log_lock);
return ++last_log;
}
bool LogClient::handle_log_ack(MLogAck *m)
{
- Mutex::Locker l(log_lock);
+ std::lock_guard<Mutex> l(log_lock);
ldout(cct,10) << "handle_log_ack " << *m << dendl;
version_t last = m->last;
void OutputDataSocket::append_output(bufferlist& bl)
{
- Mutex::Locker l(m_lock);
+ std::lock_guard<Mutex> l(m_lock);
if (data_size + bl.length() > data_max_backlog) {
ldout(m_cct, 20) << "dropping data output, max backlog reached" << dendl;
Plugin *PluginRegistry::get_with_load(const std::string& type,
const std::string& name)
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
Plugin* ret = get(type, name);
if (!ret) {
int err = load(type, name);
const std::string &directory,
ostream &ss)
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
list<string> plugins_list;
get_str_list(plugins, plugins_list);
for (list<string>::iterator i = plugins_list.begin();
}
uint64_t Readahead::get_min_readahead_size(void) {
- Mutex::Locker lock(m_lock);
+ std::lock_guard<Mutex> lock(m_lock);
return m_readahead_min_bytes;
}
uint64_t Readahead::get_max_readahead_size(void) {
- Mutex::Locker lock(m_lock);
+ std::lock_guard<Mutex> lock(m_lock);
return m_readahead_max_bytes;
}
RefCountedCond() : complete(false), lock("RefCountedCond"), rval(0) {}
int wait() {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
while (!complete) {
cond.Wait(lock);
}
}
void done(int r) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
rval = r;
complete = true;
cond.SignalAll();
m_avg(avg), m_timer(timer), m_timer_lock(timer_lock),
m_lock("token_bucket_throttle_lock")
{
- Mutex::Locker timer_locker(*m_timer_lock);
+ std::lock_guard<Mutex> timer_locker(*m_timer_lock);
schedule_timer();
}
{
// cancel the timer events.
{
- Mutex::Locker timer_locker(*m_timer_lock);
+ std::lock_guard<Mutex> timer_locker(*m_timer_lock);
cancel_timer();
}
list<Blocker> tmp_blockers;
{
- Mutex::Locker blockers_lock(m_lock);
+ std::lock_guard<Mutex> blockers_lock(m_lock);
tmp_blockers.splice(tmp_blockers.begin(), m_blockers, m_blockers.begin(), m_blockers.end());
}
}
void TokenBucketThrottle::set_max(uint64_t m) {
- Mutex::Locker lock(m_lock);
+ std::lock_guard<Mutex> lock(m_lock);
m_throttle.set_max(m);
}
list<Blocker> tmp_blockers;
{
// put m_avg tokens into bucket.
- Mutex::Locker lock(m_lock);
+ std::lock_guard<Mutex> lock(m_lock);
m_throttle.put(m_avg);
// check the m_blockers from head to tail, if blocker can get
// enough tokens, let it go.
bool wait = false;
uint64_t got = 0;
- Mutex::Locker lock(m_lock);
+ std::lock_guard<Mutex> lock(m_lock);
if (!m_blockers.empty()) {
// Keep the order of requests, add item after previous blocked requests.
wait = true;
}
void TracepointProvider::verify_config(const ConfigProxy& conf) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
if (m_handle) {
return;
}
{
opsvc.break_thread();
opsvc.join();
- Mutex::Locker history_lock(ops_history_lock);
+ std::lock_guard<Mutex> history_lock(ops_history_lock);
arrived.clear();
duration.clear();
slow_op.clear();
void OpHistory::_insert_delayed(const utime_t& now, TrackedOpRef op)
{
- Mutex::Locker history_lock(ops_history_lock);
+ std::lock_guard<Mutex> history_lock(ops_history_lock);
if (shutdown)
return;
double opduration = op->get_duration();
void OpHistory::dump_ops(utime_t now, Formatter *f, set<string> filters, bool by_duration)
{
- Mutex::Locker history_lock(ops_history_lock);
+ std::lock_guard<Mutex> history_lock(ops_history_lock);
cleanup(now);
f->open_object_section("op_history");
f->dump_int("size", history_size);
void OpHistory::dump_slow_ops(utime_t now, Formatter *f, set<string> filters)
{
- Mutex::Locker history_lock(ops_history_lock);
+ std::lock_guard<Mutex> history_lock(ops_history_lock);
cleanup(now);
f->open_object_section("OpHistory slow ops");
f->dump_int("num to keep", history_slow_op_size);
for (uint32_t i = 0; i < num_optracker_shards; i++) {
ShardedTrackingData* sdata = sharded_in_flight_list[i];
ceph_assert(NULL != sdata);
- Mutex::Locker locker(sdata->ops_in_flight_lock_sharded);
+ std::lock_guard<Mutex> locker(sdata->ops_in_flight_lock_sharded);
for (auto& op : sdata->ops_in_flight_sharded) {
if (print_only_blocked && (now - op.get_initiated() <= complaint_time))
break;
ShardedTrackingData* sdata = sharded_in_flight_list[shard_index];
ceph_assert(NULL != sdata);
{
- Mutex::Locker locker(sdata->ops_in_flight_lock_sharded);
+ std::lock_guard<Mutex> locker(sdata->ops_in_flight_lock_sharded);
sdata->ops_in_flight_sharded.push_back(*i);
i->seq = current_seq;
}
ShardedTrackingData* sdata = sharded_in_flight_list[shard_index];
ceph_assert(NULL != sdata);
{
- Mutex::Locker locker(sdata->ops_in_flight_lock_sharded);
+ std::lock_guard<Mutex> locker(sdata->ops_in_flight_lock_sharded);
auto p = sdata->ops_in_flight_sharded.iterator_to(*i);
sdata->ops_in_flight_sharded.erase(p);
}
RWLock::RLocker l(lock);
for (const auto sdata : sharded_in_flight_list) {
ceph_assert(sdata);
- Mutex::Locker locker(sdata->ops_in_flight_lock_sharded);
+ std::lock_guard<Mutex> locker(sdata->ops_in_flight_lock_sharded);
if (!sdata->ops_in_flight_sharded.empty()) {
utime_t oldest_op_tmp =
sdata->ops_in_flight_sharded.front().get_initiated();
for (uint32_t iter = 0; iter < num_optracker_shards; iter++) {
ShardedTrackingData* sdata = sharded_in_flight_list[iter];
ceph_assert(NULL != sdata);
- Mutex::Locker locker(sdata->ops_in_flight_lock_sharded);
+ std::lock_guard<Mutex> locker(sdata->ops_in_flight_lock_sharded);
for (auto& op : sdata->ops_in_flight_sharded) {
if (!visit(op))
break;
for (uint32_t iter = 0; iter < num_optracker_shards; iter++) {
ShardedTrackingData* sdata = sharded_in_flight_list[iter];
ceph_assert(NULL != sdata);
- Mutex::Locker locker(sdata->ops_in_flight_lock_sharded);
+ std::lock_guard<Mutex> locker(sdata->ops_in_flight_lock_sharded);
for (auto& i : sdata->ops_in_flight_sharded) {
utime_t age = now - i.get_initiated();
return;
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
events.emplace_back(stamp, event);
current = events.back().c_str();
}
return;
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
events.emplace_back(stamp, event);
current = event;
}
const char *get_desc() const {
if (!desc || want_new_desc.load()) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
_gen_desc();
}
return desc;
}
double get_duration() const {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
if (!events.empty() && events.rbegin()->compare("done") == 0)
return events.rbegin()->stamp - get_initiated();
else
}
virtual const char *state_string() const {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
return events.rbegin()->c_str();
}
void ThreadPool::set_ioprio(int cls, int priority)
{
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
ioprio_class = cls;
ioprio_priority = priority;
for (set<WorkThread*>::iterator p = _threads.begin();
void *_void_dequeue() override {
{
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
if (_empty())
return 0;
U u = _dequeue();
pool->remove_work_queue(this);
}
void queue(T item) {
- Mutex::Locker l(pool->_lock);
+ std::lock_guard<Mutex> l(pool->_lock);
_enqueue(item);
pool->_cond.SignalOne();
}
void queue_front(T item) {
- Mutex::Locker l(pool->_lock);
+ std::lock_guard<Mutex> l(pool->_lock);
_enqueue_front(item);
pool->_cond.SignalOne();
}
{
// if this queue is empty and not processing, don't wait for other
// queues to finish processing
- Mutex::Locker l(m_pool->_lock);
+ std::lock_guard<Mutex> l(m_pool->_lock);
if (m_processing == 0 && m_items.empty()) {
return;
}
m_pool->drain(this);
}
void queue(T *item) {
- Mutex::Locker l(m_pool->_lock);
+ std::lock_guard<Mutex> l(m_pool->_lock);
m_items.push_back(item);
m_pool->_cond.SignalOne();
}
bool empty() {
- Mutex::Locker l(m_pool->_lock);
+ std::lock_guard<Mutex> l(m_pool->_lock);
return _empty();
}
protected:
virtual void process(T *item) = 0;
void process_finish() {
- Mutex::Locker locker(m_pool->_lock);
+ std::lock_guard<Mutex> locker(m_pool->_lock);
_void_process_finish(nullptr);
}
return m_items.front();
}
void requeue(T *item) {
- Mutex::Locker pool_locker(m_pool->_lock);
+ std::lock_guard<Mutex> pool_locker(m_pool->_lock);
_void_process_finish(nullptr);
m_items.push_front(item);
}
void signal() {
- Mutex::Locker pool_locker(m_pool->_lock);
+ std::lock_guard<Mutex> pool_locker(m_pool->_lock);
m_pool->_cond.SignalOne();
}
Mutex &get_pool_lock() {
/// return number of threads currently running
int get_num_threads() {
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
return _num_threads;
}
/// assign a work queue to this thread pool
void add_work_queue(WorkQueue_* wq) {
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
work_queues.push_back(wq);
}
/// remove a work queue from this thread pool
void remove_work_queue(WorkQueue_* wq) {
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
unsigned i = 0;
while (work_queues[i] != wq)
i++;
}
/// wake up a waiter (without lock held)
void wake() {
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
_cond.Signal();
}
void _wait() {
void queue(Context *ctx, int result = 0) {
if (result != 0) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
m_context_results[ctx] = result;
}
ThreadPool::PointerWQ<Context>::queue(ctx);
void _clear() override {
ThreadPool::PointerWQ<Context>::_clear();
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
m_context_results.clear();
}
void process(Context *ctx) override {
int result = 0;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard<Mutex> locker(m_lock);
ceph::unordered_map<Context *, int>::iterator it =
m_context_results.find(ctx);
if (it != m_context_results.end()) {
void *entry() override
{
while (1) {
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
if (_cct->_conf->heartbeat_interval) {
utime_t interval(_cct->_conf->heartbeat_interval, 0);
void reopen_logs()
{
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
_reopen_logs = true;
_cond.Signal();
}
void exit_thread()
{
- Mutex::Locker l(_lock);
+ std::lock_guard<Mutex> l(_lock);
_exit_thread = true;
_cond.Signal();
}
return &values;
}
int get_val(const std::string& key, char** buf, int len) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.get_val(values, key, buf, len);
}
int get_val(const std::string &key, std::string *val) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.get_val(values, key, val);
}
template<typename T>
const T get_val(const std::string& key) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.template get_val<T>(values, key);
}
template<typename T, typename Callback, typename...Args>
auto with_val(const string& key, Callback&& cb, Args&&... args) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.template with_val<T>(values, key,
std::forward<Callback>(cb),
std::forward<Args>(args)...);
return config.find_option(name);
}
void diff(Formatter *f, const std::string& name=string{}) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.diff(values, f, name);
}
void get_my_sections(std::vector <std::string> §ions) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.get_my_sections(values, sections);
}
int get_all_sections(std::vector<std::string>& sections) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.get_all_sections(sections);
}
int get_val_from_conf_file(const std::vector<std::string>& sections,
const std::string& key, std::string& out,
bool emeta) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.get_val_from_conf_file(values,
sections, key, out, emeta);
}
}
void early_expand_meta(std::string &val,
std::ostream *oss) const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.early_expand_meta(values, val, oss);
}
// for those want to reexpand special meta, e.g, $pid
void finalize_reexpand_meta() {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
if (config.finalize_reexpand_meta(values, obs_mgr)) {
obs_mgr.apply_changes(values.changed, *this, nullptr);
values.changed.clear();
}
}
void add_observer(md_config_obs_t* obs) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
obs_mgr.add_observer(obs);
}
void remove_observer(md_config_obs_t* obs) {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
obs_mgr.remove_observer(obs);
}
void call_all_observers() {
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
// Have the scope of the lock extend to the scope of
// handle_conf_change since that function expects to be called with
// the lock held. (And the comment in config.h says that is the
config._clear_safe_to_start_threads();
}
void show_config(std::ostream& out) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.show_config(values, out);
}
void show_config(Formatter *f) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.show_config(values, f);
}
void config_options(Formatter *f) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.config_options(f);
}
int rm_val(const std::string& key) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.rm_val(values, key);
}
// Expand all metavariables. Make any pending observer callbacks.
void apply_changes(std::ostream* oss) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
// apply changes until the cluster name is assigned
if (!values.cluster.empty()) {
// meta expands could have modified anything. Copy it all out again.
}
int set_val(const std::string& key, const std::string& s,
std::stringstream* err_ss=nullptr) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.set_val(values, obs_mgr, key, s);
}
void set_val_default(const std::string& key, const std::string& val) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.set_val_default(values, obs_mgr, key, val);
}
void set_val_or_die(const std::string& key, const std::string& val) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.set_val_or_die(values, obs_mgr, key, val);
}
int set_mon_vals(CephContext *cct,
const map<std::string,std::string>& kv,
md_config_t::config_callback config_cb) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
int ret = config.set_mon_vals(cct, values, obs_mgr, kv, config_cb);
obs_mgr.apply_changes(values.changed, *this, nullptr);
values.changed.clear();
return ret;
}
int injectargs(const std::string &s, std::ostream *oss) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
int ret = config.injectargs(values, obs_mgr, s, oss);
obs_mgr.apply_changes(values.changed, *this, oss);
values.changed.clear();
return ret;
}
void parse_env(const char *env_var = "CEPH_ARGS") {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.parse_env(values, obs_mgr, env_var);
}
int parse_argv(std::vector<const char*>& args, int level=CONF_CMDLINE) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.parse_argv(values, obs_mgr, args, level);
}
int parse_config_files(const char *conf_files,
std::ostream *warnings, int flags) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
return config.parse_config_files(values, obs_mgr,
conf_files, warnings, flags);
}
return config.complain_about_parse_errors(cct);
}
void do_argv_commands() const {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.do_argv_commands(values);
}
void get_config_bl(uint64_t have_version,
bufferlist *bl,
uint64_t *got_version) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.get_config_bl(values, have_version, bl, got_version);
}
void get_defaults_bl(bufferlist *bl) {
- Mutex::Locker l{lock};
+ std::lock_guard<Mutex> l{lock};
config.get_defaults_bl(values, bl);
}
};
void DNSResolver::put_state(res_state s)
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
states.push_back(s);
}
#endif
#else
{
# ifndef HAVE_THREAD_SAFE_RES_QUERY
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
# endif
len = resolv_h->res_query(origname, ns_c_in, ns_t_cname, buf, sizeof(buf));
}
#else
{
# ifndef HAVE_THREAD_SAFE_RES_QUERY
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
# endif
len = resolv_h->res_query(hostname.c_str(), ns_c_in, type, nsbuf, sizeof(nsbuf));
}
#else
{
# ifndef HAVE_THREAD_SAFE_RES_QUERY
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
# endif
len = resolv_h->res_search(query_str.c_str(), ns_c_in, ns_t_srv, nsbuf,
sizeof(nsbuf));
template <class K, class V>
bool lru_map<K, V>::find(const K& key, V& value)
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
return _find(key, &value, NULL);
}
template <class K, class V>
bool lru_map<K, V>::find_and_update(const K& key, V *value, UpdateContext *ctx)
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
return _find(key, value, ctx);
}
template <class K, class V>
void lru_map<K, V>::add(const K& key, V& value)
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
_add(key, value);
}
template <class K, class V>
void lru_map<K, V>::erase(const K& key)
{
- Mutex::Locker l(lock);
+ std::lock_guard<Mutex> l(lock);
typename std::map<K, entry>::iterator iter = entries.find(key);
if (iter == entries.end())
return;
void PerfCountersCollection::add(class PerfCounters *l)
{
- Mutex::Locker lck(m_lock);
+ std::lock_guard<Mutex> lck(m_lock);
// make sure the name is unique
perf_counters_set_t::iterator i;
void PerfCountersCollection::remove(class PerfCounters *l)
{
- Mutex::Locker lck(m_lock);
+ std::lock_guard<Mutex> lck(m_lock);
for (unsigned int i = 0; i < l->m_data.size(); ++i) {
PerfCounters::perf_counter_data_any_d &data = l->m_data[i];
void PerfCountersCollection::clear()
{
- Mutex::Locker lck(m_lock);
+ std::lock_guard<Mutex> lck(m_lock);
perf_counters_set_t::iterator i = m_loggers.begin();
perf_counters_set_t::iterator i_end = m_loggers.end();
for (; i != i_end; ) {
bool PerfCountersCollection::reset(const std::string &name)
{
bool result = false;
- Mutex::Locker lck(m_lock);
+ std::lock_guard<Mutex> lck(m_lock);
perf_counters_set_t::iterator i = m_loggers.begin();
perf_counters_set_t::iterator i_end = m_loggers.end();
const std::string &logger,
const std::string &counter)
{
- Mutex::Locker lck(m_lock);
+ std::lock_guard<Mutex> lck(m_lock);
f->open_object_section("perfcounter_collection");
for (perf_counters_set_t::iterator l = m_loggers.begin();
void PerfCountersCollection::with_counters(std::function<void(
const PerfCountersCollection::CounterMap &)> fn) const
{
- Mutex::Locker lck(m_lock);
+ std::lock_guard<Mutex> lck(m_lock);
fn(by_path);
}