/// We overload complete in order to not delete the context
void complete(int r) override {
- std::lock_guard<Mutex> l(lock);
+ std::lock_guard l(lock);
done = true;
rval = r;
cond.Signal();
/// Returns rval once the Context is called
int wait() {
- std::lock_guard<Mutex> l(lock);
+ std::lock_guard l(lock);
while (!done)
cond.Wait(lock);
return rval;
int wait_for(double secs) {
utime_t interval;
interval.set_from_double(secs);
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
if (done) {
return rval;
}
void ContextCompletion::finish_adding_requests() {
bool complete;
{
- std::lock_guard<Mutex> l(m_lock);
+ std::lock_guard l(m_lock);
m_building = false;
complete = (m_current_ops == 0);
}
}
void ContextCompletion::start_op() {
- std::lock_guard<Mutex> l(m_lock);
+ std::lock_guard l(m_lock);
++m_current_ops;
}
void ContextCompletion::finish_op(int r) {
bool complete;
{
- std::lock_guard<Mutex> l(m_lock);
+ std::lock_guard l(m_lock);
if (r < 0 && m_ret == 0 && (!m_ignore_enoent || r != -ENOENT)) {
m_ret = r;
}
}
uint64_t Readahead::get_min_readahead_size(void) {
- std::lock_guard<Mutex> lock(m_lock);
+ std::lock_guard lock(m_lock);
return m_readahead_min_bytes;
}
uint64_t Readahead::get_max_readahead_size(void) {
- std::lock_guard<Mutex> lock(m_lock);
+ std::lock_guard lock(m_lock);
return m_readahead_max_bytes;
}
TokenBucketThrottle::~TokenBucketThrottle() {
// cancel the timer events.
{
- std::lock_guard<Mutex> timer_locker(*m_timer_lock);
+ std::lock_guard timer_locker(*m_timer_lock);
cancel_timer();
}
list<Blocker> tmp_blockers;
{
- std::lock_guard<Mutex> blockers_lock(m_lock);
+ std::lock_guard blockers_lock(m_lock);
tmp_blockers.splice(tmp_blockers.begin(), m_blockers, m_blockers.begin(), m_blockers.end());
}
}
void TokenBucketThrottle::set_schedule_tick_min(uint64_t tick) {
- std::lock_guard<Mutex> lock(m_lock);
+ std::lock_guard lock(m_lock);
if (tick != 0) {
m_tick_min = tick;
}
void TokenBucketThrottle::add_tokens() {
list<Blocker> tmp_blockers;
{
- std::lock_guard<Mutex> lock(m_lock);
+ std::lock_guard lock(m_lock);
// put tokens into bucket.
m_throttle.put(tokens_this_tick());
// check the m_blockers from head to tail, if blocker can get
bool wait = false;
uint64_t got = 0;
- std::lock_guard<Mutex> lock(m_lock);
+ std::lock_guard lock(m_lock);
if (!m_blockers.empty()) {
// Keep the order of requests, add item after previous blocked requests.
wait = true;
void *_void_dequeue() override {
{
- std::lock_guard<Mutex> l(_lock);
+ std::lock_guard l(_lock);
if (_empty())
return 0;
U u = _dequeue();
pool->remove_work_queue(this);
}
void queue(T item) {
- std::lock_guard<Mutex> l(pool->_lock);
+ std::lock_guard l(pool->_lock);
_enqueue(item);
pool->_cond.SignalOne();
}
void queue_front(T item) {
- std::lock_guard<Mutex> l(pool->_lock);
+ std::lock_guard l(pool->_lock);
_enqueue_front(item);
pool->_cond.SignalOne();
}
{
// if this queue is empty and not processing, don't wait for other
// queues to finish processing
- std::lock_guard<Mutex> l(m_pool->_lock);
+ std::lock_guard l(m_pool->_lock);
if (m_processing == 0 && m_items.empty()) {
return;
}
m_pool->drain(this);
}
void queue(T *item) {
- std::lock_guard<Mutex> l(m_pool->_lock);
+ std::lock_guard l(m_pool->_lock);
m_items.push_back(item);
m_pool->_cond.SignalOne();
}
bool empty() {
- std::lock_guard<Mutex> l(m_pool->_lock);
+ std::lock_guard l(m_pool->_lock);
return _empty();
}
protected:
virtual void process(T *item) = 0;
void process_finish() {
- std::lock_guard<Mutex> locker(m_pool->_lock);
+ std::lock_guard locker(m_pool->_lock);
_void_process_finish(nullptr);
}
return m_items.front();
}
void requeue(T *item) {
- std::lock_guard<Mutex> pool_locker(m_pool->_lock);
+ std::lock_guard pool_locker(m_pool->_lock);
_void_process_finish(nullptr);
m_items.push_front(item);
}
void signal() {
- std::lock_guard<Mutex> pool_locker(m_pool->_lock);
+ std::lock_guard pool_locker(m_pool->_lock);
m_pool->_cond.SignalOne();
}
Mutex &get_pool_lock() {
/// return number of threads currently running
int get_num_threads() {
- std::lock_guard<Mutex> l(_lock);
+ std::lock_guard l(_lock);
return _num_threads;
}
/// assign a work queue to this thread pool
void add_work_queue(WorkQueue_* wq) {
- std::lock_guard<Mutex> l(_lock);
+ std::lock_guard l(_lock);
work_queues.push_back(wq);
}
/// remove a work queue from this thread pool
void remove_work_queue(WorkQueue_* wq) {
- std::lock_guard<Mutex> l(_lock);
+ std::lock_guard l(_lock);
unsigned i = 0;
while (work_queues[i] != wq)
i++;
}
/// wake up a waiter (without lock held)
void wake() {
- std::lock_guard<Mutex> l(_lock);
+ std::lock_guard l(_lock);
_cond.Signal();
}
void _wait() {
void queue(Context *ctx, int result = 0) {
if (result != 0) {
- std::lock_guard<Mutex> locker(m_lock);
+ std::lock_guard locker(m_lock);
m_context_results[ctx] = result;
}
ThreadPool::PointerWQ<Context>::queue(ctx);
void _clear() override {
ThreadPool::PointerWQ<Context>::_clear();
- std::lock_guard<Mutex> locker(m_lock);
+ std::lock_guard locker(m_lock);
m_context_results.clear();
}
void process(Context *ctx) override {
int result = 0;
{
- std::lock_guard<Mutex> locker(m_lock);
+ std::lock_guard locker(m_lock);
ceph::unordered_map<Context *, int>::iterator it =
m_context_results.find(ctx);
if (it != m_context_results.end()) {
#define CEPH_BUFFER_APPEND_SIZE (CEPH_BUFFER_ALLOC_UNIT - sizeof(raw_combined))
#ifdef BUFFER_DEBUG
-# define bdout { std::lock_guard<ceph::spinlock> lg(ceph::spinlock()); std::cout
+# define bdout { std::lock_guard lg(ceph::spinlock()); std::cout
# define bendl std::endl; }
#else
# define bdout if (0) { std::cout
const std::set <std::string> &changed) override {
if (changed.count(
"enable_experimental_unrecoverable_data_corrupting_features")) {
- std::lock_guard<ceph::spinlock> lg(cct->_feature_lock);
+ std::lock_guard lg(cct->_feature_lock);
get_str_set(
conf->enable_experimental_unrecoverable_data_corrupting_features,
cct->_experimental_features);
void CephContext::start_service_thread()
{
{
- std::lock_guard<ceph::spinlock> lg(_service_thread_lock);
+ std::lock_guard lg(_service_thread_lock);
if (_service_thread) {
return;
}
void CephContext::reopen_logs()
{
- std::lock_guard<ceph::spinlock> lg(_service_thread_lock);
+ std::lock_guard lg(_service_thread_lock);
if (_service_thread)
_service_thread->reopen_logs();
}
void CephContext::notify_pre_fork()
{
{
- std::lock_guard<ceph::spinlock> lg(_fork_watchers_lock);
+ std::lock_guard lg(_fork_watchers_lock);
for (auto &&t : _fork_watchers) {
t->handle_pre_fork();
}
};
void register_fork_watcher(ForkWatcher *w) {
- std::lock_guard<ceph::spinlock> lg(_fork_watchers_lock);
+ std::lock_guard lg(_fork_watchers_lock);
_fork_watchers.push_back(w);
}
template<typename Callable, typename... Args>
uint64_t add_event(typename TC::time_point when,
Callable&& f, Args&&... args) {
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
event& e = *(new event(
when, ++next_id,
std::forward<std::function<void()> >(
// Adjust the timeout of a currently-scheduled event (absolute)
bool adjust_event(uint64_t id, typename TC::time_point when) {
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
event key(id);
typename event_set_type::iterator it = events.find(key);
// never submitted it) you will receive false. Otherwise you will
// receive true and it is guaranteed the event will not execute.
bool cancel_event(const uint64_t id) {
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
event dummy(id);
auto p = events.find(dummy);
if (p == events.end()) {
uint64_t reschedule_me(typename TC::time_point when) {
if (std::this_thread::get_id() != thread.get_id())
throw std::make_error_condition(std::errc::operation_not_permitted);
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
running->t = when;
uint64_t id = ++next_id;
running->id = id;
// Remove all events from the queue.
void cancel_all_events() {
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
while (!events.empty()) {
auto p = events.begin();
event& e = *p;
return &values;
}
int get_val(const std::string& key, char** buf, int len) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.get_val(values, key, buf, len);
}
int get_val(const std::string &key, std::string *val) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.get_val(values, key, val);
}
template<typename T>
const T get_val(const std::string& key) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.template get_val<T>(values, key);
}
template<typename T, typename Callback, typename...Args>
auto with_val(const string& key, Callback&& cb, Args&&... args) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.template with_val<T>(values, key,
std::forward<Callback>(cb),
std::forward<Args>(args)...);
return config.find_option(name);
}
void diff(Formatter *f, const std::string& name=string{}) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.diff(values, f, name);
}
void get_my_sections(std::vector <std::string> §ions) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.get_my_sections(values, sections);
}
int get_all_sections(std::vector<std::string>& sections) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.get_all_sections(sections);
}
int get_val_from_conf_file(const std::vector<std::string>& sections,
const std::string& key, std::string& out,
bool emeta) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.get_val_from_conf_file(values,
sections, key, out, emeta);
}
}
void early_expand_meta(std::string &val,
std::ostream *oss) const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.early_expand_meta(values, val, oss);
}
// for those want to reexpand special meta, e.g, $pid
void finalize_reexpand_meta() {
- std::lock_guard<Mutex> l(lock);
+ std::lock_guard l(lock);
if (config.finalize_reexpand_meta(values, obs_mgr)) {
obs_mgr.apply_changes(values.changed, *this, nullptr);
values.changed.clear();
}
}
void add_observer(md_config_obs_t* obs) {
- std::lock_guard<Mutex> l(lock);
+ std::lock_guard l(lock);
obs_mgr.add_observer(obs);
}
void remove_observer(md_config_obs_t* obs) {
- std::lock_guard<Mutex> l(lock);
+ std::lock_guard l(lock);
obs_mgr.remove_observer(obs);
}
void call_all_observers() {
- std::lock_guard<Mutex> l(lock);
+ std::lock_guard l(lock);
// Have the scope of the lock extend to the scope of
// handle_conf_change since that function expects to be called with
// the lock held. (And the comment in config.h says that is the
config._clear_safe_to_start_threads();
}
void show_config(std::ostream& out) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.show_config(values, out);
}
void show_config(Formatter *f) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.show_config(values, f);
}
void config_options(Formatter *f) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.config_options(f);
}
int rm_val(const std::string& key) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.rm_val(values, key);
}
// Expand all metavariables. Make any pending observer callbacks.
void apply_changes(std::ostream* oss) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
// apply changes until the cluster name is assigned
if (!values.cluster.empty()) {
// meta expands could have modified anything. Copy it all out again.
}
int set_val(const std::string& key, const std::string& s,
std::stringstream* err_ss=nullptr) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.set_val(values, obs_mgr, key, s);
}
void set_val_default(const std::string& key, const std::string& val) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.set_val_default(values, obs_mgr, key, val);
}
void set_val_or_die(const std::string& key, const std::string& val) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.set_val_or_die(values, obs_mgr, key, val);
}
int set_mon_vals(CephContext *cct,
const map<std::string,std::string>& kv,
md_config_t::config_callback config_cb) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
int ret = config.set_mon_vals(cct, values, obs_mgr, kv, config_cb);
obs_mgr.apply_changes(values.changed, *this, nullptr);
values.changed.clear();
return ret;
}
int injectargs(const std::string &s, std::ostream *oss) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
int ret = config.injectargs(values, obs_mgr, s, oss);
obs_mgr.apply_changes(values.changed, *this, oss);
values.changed.clear();
return ret;
}
void parse_env(const char *env_var = "CEPH_ARGS") {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.parse_env(values, obs_mgr, env_var);
}
int parse_argv(std::vector<const char*>& args, int level=CONF_CMDLINE) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.parse_argv(values, obs_mgr, args, level);
}
int parse_config_files(const char *conf_files,
std::ostream *warnings, int flags) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
return config.parse_config_files(values, obs_mgr,
conf_files, warnings, flags);
}
return config.complain_about_parse_errors(cct);
}
void do_argv_commands() const {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.do_argv_commands(values);
}
void get_config_bl(uint64_t have_version,
bufferlist *bl,
uint64_t *got_version) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.get_config_bl(values, have_version, bl, got_version);
}
void get_defaults_bl(bufferlist *bl) {
- std::lock_guard<Mutex> l{lock};
+ std::lock_guard l{lock};
config.get_defaults_bl(values, bl);
}
};
// writes are serialized
const T& operator=(const T& other) {
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
history.push_back(other);
current = &history.back();
return *current;
void prune() {
// note: this is not necessarily thread-safe wrt readers
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
while (history.size() > 1) {
history.pop_front();
}
// writes are serialized
const T& operator=(const T& other) {
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
history.push_back(other);
current = &history.back();
return *current;
void prune() {
// note: this is not necessarily thread-safe wrt readers
- std::lock_guard<std::mutex> l(lock);
+ std::lock_guard l(lock);
while (history.size() > 1) {
history.pop_front();
}
total->bytes += shard[i].bytes;
}
if (debug_mode) {
- std::lock_guard<std::mutex> shard_lock(lock);
+ std::lock_guard shard_lock(lock);
for (auto &p : type_map) {
std::string n = ceph_demangle(p.second.type_name);
stats_t &s = (*by_type)[n];