So begins the ruthless annihilation of g_ceph_context.
Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
class buffer::list *inbl, class buffer::list *outbl);
class PGLSFilter {
+ CephContext* cct;
protected:
string xattr;
public:
#include "common/config.h"
#include "common/debug.h"
-#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix *_dout
int ClassHandler::open_all_classes()
{
- dout(10) << __func__ << dendl;
+ ldout(cct, 10) << __func__ << dendl;
DIR *dir = ::opendir(cct->_conf->osd_class_dir.c_str());
if (!dir)
return -errno;
char cname[PATH_MAX + 1];
strncpy(cname, pde->d_name + sizeof(CLS_PREFIX) - 1, sizeof(cname) -1);
cname[strlen(cname) - (sizeof(CLS_SUFFIX) - 1)] = '\0';
- dout(10) << __func__ << " found " << cname << dendl;
+ ldout(cct, 10) << __func__ << " found " << cname << dendl;
ClassData *cls;
// skip classes that aren't in 'osd class load list'
r = open_class(cname, &cls);
cls = &iter->second;
} else {
if (check_allowed && !in_class_list(cname, cct->_conf->osd_class_load_list)) {
- dout(0) << "_get_class not permitted to load " << cname << dendl;
+ ldout(cct, 0) << "_get_class not permitted to load " << cname << dendl;
return NULL;
}
cls = &classes[cname];
- dout(10) << "_get_class adding new class name " << cname << " " << cls << dendl;
+ ldout(cct, 10) << "_get_class adding new class name " << cname << " " << cls << dendl;
cls->name = cname;
cls->handler = this;
cls->whitelisted = in_class_list(cname, cct->_conf->osd_class_default_list);
snprintf(fname, sizeof(fname), "%s/" CLS_PREFIX "%s" CLS_SUFFIX,
cct->_conf->osd_class_dir.c_str(),
cls->name.c_str());
- dout(10) << "_load_class " << cls->name << " from " << fname << dendl;
+ ldout(cct, 10) << "_load_class " << cls->name << " from " << fname << dendl;
cls->handle = dlopen(fname, RTLD_NOW);
if (!cls->handle) {
int r = ::stat(fname, &st);
if (r < 0) {
r = -errno;
- dout(0) << __func__ << " could not stat class " << fname
- << ": " << cpp_strerror(r) << dendl;
+ ldout(cct, 0) << __func__ << " could not stat class " << fname
+ << ": " << cpp_strerror(r) << dendl;
} else {
- dout(0) << "_load_class could not open class " << fname
- << " (dlopen failed): " << dlerror() << dendl;
- r = -EIO;
+ ldout(cct, 0) << "_load_class could not open class " << fname
+ << " (dlopen failed): " << dlerror() << dendl;
+ r = -EIO;
}
cls->status = ClassData::CLASS_MISSING;
return r;
cls->status = ClassData::CLASS_MISSING_DEPS;
return r;
}
-
- dout(10) << "_load_class " << cls->name << " satisfied dependency " << dc->name << dendl;
+
+ ldout(cct, 10) << "_load_class " << cls->name << " satisfied dependency " << dc->name << dendl;
cls->missing_dependencies.erase(p++);
}
-
+
// initialize
void (*cls_init)() = (void (*)())dlsym(cls->handle, "__cls_init");
if (cls_init) {
cls->status = ClassData::CLASS_INITIALIZING;
cls_init();
}
-
- dout(10) << "_load_class " << cls->name << " success" << dendl;
+
+ ldout(cct, 10) << "_load_class " << cls->name << " success" << dendl;
cls->status = ClassData::CLASS_OPEN;
return 0;
}
assert(mutex.is_locked());
ClassData *cls = _get_class(cname, false);
- dout(10) << "register_class " << cname << " status " << cls->status << dendl;
+ ldout(cct, 10) << "register_class " << cname << " status " << cls->status << dendl;
if (cls->status != ClassData::CLASS_INITIALIZING) {
- dout(0) << "class " << cname << " isn't loaded; is the class registering under the wrong name?" << dendl;
+ ldout(cct, 0) << "class " << cname << " isn't loaded; is the class registering under the wrong name?" << dendl;
return NULL;
}
return cls;
{
/* no need for locking, called under the class_init mutex */
if (!flags) {
- derr << "register_method " << name << "." << mname << " flags " << flags << " " << (void*)func
- << " FAILED -- flags must be non-zero" << dendl;
+ lderr(handler->cct) << "register_method " << name << "." << mname
+ << " flags " << flags << " " << (void*)func
+ << " FAILED -- flags must be non-zero" << dendl;
return NULL;
}
- dout(10) << "register_method " << name << "." << mname << " flags " << flags << " " << (void*)func << dendl;
+ ldout(handler->cct, 10) << "register_method " << name << "." << mname << " flags " << flags << " " << (void*)func << dendl;
ClassMethod& method = methods_map[mname];
method.func = func;
method.name = mname;
cls_method_cxx_call_t func)
{
/* no need for locking, called under the class_init mutex */
- dout(10) << "register_cxx_method " << name << "." << mname << " flags " << flags << " " << (void*)func << dendl;
+ ldout(handler->cct, 10) << "register_cxx_method " << name << "." << mname << " flags " << flags << " " << (void*)func << dendl;
ClassMethod& method = methods_map[mname];
method.cxx_func = func;
method.name = mname;
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLASSHANDLER_H
#define CEPH_CLASSHANDLER_H
#include "PrimaryLogPG.h"
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#define DOUT_PREFIX_ARGS this
#undef dout_prefix
CephContext *cct,
ErasureCodeInterfaceRef ec_impl,
uint64_t stripe_width)
- : PGBackend(pg, store, coll, ch),
- cct(cct),
+ : PGBackend(cct, pg, store, coll, ch),
ec_impl(ec_impl),
sinfo(ec_impl->get_data_chunk_count(), stripe_width) {
assert((ec_impl->get_data_chunk_count() *
bool try_finish_rmw();
void check_ops();
- CephContext *cct;
ErasureCodeInterfaceRef ec_impl;
#define tracepoint(...)
#endif
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, whoami, get_osdmap_epoch())
<< ", top is " << level
<< " with pgs " << top.size()
<< ", ops " << agent_ops << "/"
- << g_conf->osd_agent_max_ops
+ << cct->_conf->osd_agent_max_ops
<< (agent_active ? " active" : " NOT ACTIVE")
<< dendl;
dout(20) << __func__ << " oids " << agent_oids << dendl;
- int max = g_conf->osd_agent_max_ops - agent_ops;
+ int max = cct->_conf->osd_agent_max_ops - agent_ops;
int agent_flush_quota = max;
if (!flush_mode_high_count)
- agent_flush_quota = g_conf->osd_agent_max_low_ops - agent_ops;
+ agent_flush_quota = cct->_conf->osd_agent_max_low_ops - agent_ops;
if (agent_flush_quota <= 0 || top.empty() || !agent_active) {
agent_cond.Wait(agent_lock);
continue;
agent_lock.Unlock();
if (!pg->agent_work(max, agent_flush_quota)) {
dout(10) << __func__ << " " << pg->get_pgid()
- << " no agent_work, delay for " << g_conf->osd_agent_delay_time
+ << " no agent_work, delay for " << cct->_conf->osd_agent_delay_time
<< " seconds" << dendl;
osd->logger->inc(l_osd_tier_delay);
// Queue a timer to call agent_choose_mode for this pg in 5 seconds
agent_timer_lock.Lock();
Context *cb = new AgentTimeoutCB(pg);
- agent_timer.add_event_after(g_conf->osd_agent_delay_time, cb);
+ agent_timer.add_event_after(cct->_conf->osd_agent_delay_time, cb);
agent_timer_lock.Unlock();
}
agent_lock.Lock();
last_recalibrate = now;
unsigned prob = promote_probability_millis;
- uint64_t target_obj_sec = g_conf->osd_tier_promote_max_objects_sec;
- uint64_t target_bytes_sec = g_conf->osd_tier_promote_max_bytes_sec;
+ uint64_t target_obj_sec = cct->_conf->osd_tier_promote_max_objects_sec;
+ uint64_t target_bytes_sec = cct->_conf->osd_tier_promote_max_bytes_sec;
unsigned min_prob = 1;
goto free_store;
}
- store->set_cache_shards(g_conf->osd_op_num_shards);
+ store->set_cache_shards(cct->_conf->osd_op_num_shards);
ret = store->mount();
if (ret) {
&command_tp),
replay_queue_lock("OSD::replay_queue_lock"),
remove_wq(
+ cct,
store,
cct->_conf->osd_remove_thread_timeout,
cct->_conf->osd_remove_thread_suicide_timeout,
{
#ifdef HAVE_LIBFUSE
int r;
- string mntpath = g_conf->osd_data + "/fuse";
- if (fuse_store && (stop || !g_conf->osd_objectstore_fuse)) {
+ string mntpath = cct->_conf->osd_data + "/fuse";
+ if (fuse_store && (stop || !cct->_conf->osd_objectstore_fuse)) {
dout(1) << __func__ << " disabling" << dendl;
fuse_store->stop();
delete fuse_store;
}
return 0;
}
- if (!fuse_store && g_conf->osd_objectstore_fuse) {
+ if (!fuse_store && cct->_conf->osd_objectstore_fuse) {
dout(1) << __func__ << " enabling" << dendl;
r = ::mkdir(mntpath.c_str(), 0700);
if (r < 0)
<< (journal_path.empty() ? "(no journal)" : journal_path) << dendl;
assert(store); // call pre_init() first!
- store->set_cache_shards(g_conf->osd_op_num_shards);
+ store->set_cache_shards(cct->_conf->osd_op_num_shards);
int r = store->mount();
if (r < 0) {
// sanity check long object name handling
{
hobject_t l;
- l.oid.name = string(g_conf->osd_max_object_name_len, 'n');
- l.set_key(string(g_conf->osd_max_object_name_len, 'k'));
- l.nspace = string(g_conf->osd_max_object_namespace_len, 's');
+ l.oid.name = string(cct->_conf->osd_max_object_name_len, 'n');
+ l.set_key(string(cct->_conf->osd_max_object_name_len, 'k'));
+ l.nspace = string(cct->_conf->osd_max_object_namespace_len, 's');
r = store->validate_hobject_key(l);
if (r < 0) {
derr << "backend (" << store->get_type() << ") is unable to support max "
<< "object name[space] len" << dendl;
derr << " osd max object name len = "
- << g_conf->osd_max_object_name_len << dendl;
+ << cct->_conf->osd_max_object_name_len << dendl;
derr << " osd max object namespace len = "
- << g_conf->osd_max_object_namespace_len << dendl;
+ << cct->_conf->osd_max_object_namespace_len << dendl;
derr << cpp_strerror(r) << dendl;
- if (g_conf->osd_check_max_object_name_len_on_startup) {
+ if (cct->_conf->osd_check_max_object_name_len_on_startup) {
goto out;
}
derr << "osd_check_max_object_name_len_on_startup = false, starting anyway"
dout(10) << "syncing store" << dendl;
enable_disable_fuse(true);
- if (g_conf->osd_journal_flush_on_shutdown) {
+ if (cct->_conf->osd_journal_flush_on_shutdown) {
dout(10) << "flushing journal" << dendl;
store->flush_journal();
}
int OSD::update_crush_location()
{
- if (!g_conf->osd_crush_update_on_start) {
+ if (!cct->_conf->osd_crush_update_on_start) {
dout(10) << __func__ << " osd_crush_update_on_start = false" << dendl;
return 0;
}
char weight[32];
- if (g_conf->osd_crush_initial_weight >= 0) {
- snprintf(weight, sizeof(weight), "%.4lf", g_conf->osd_crush_initial_weight);
+ if (cct->_conf->osd_crush_initial_weight >= 0) {
+ snprintf(weight, sizeof(weight), "%.4lf", cct->_conf->osd_crush_initial_weight);
} else {
struct store_statfs_t st;
int r = store->statfs(&st);
}
}
-void OSD::recursive_remove_collection(ObjectStore *store, spg_t pgid, coll_t tmp)
+void OSD::recursive_remove_collection(CephContext* cct,
+ ObjectStore *store, spg_t pgid,
+ coll_t tmp)
{
OSDriver driver(
store,
ceph::shared_ptr<ObjectStore::Sequencer> osr (std::make_shared<
ObjectStore::Sequencer>("rm"));
ObjectStore::Transaction t;
- SnapMapper mapper(&driver, 0, 0, 0, pgid.shard);
+ SnapMapper mapper(cct, &driver, 0, 0, 0, pgid.shard);
vector<ghobject_t> objects;
store->collection_list(tmp, ghobject_t(), ghobject_t::get_max(), true,
if (r != 0 && r != -ENOENT)
ceph_abort();
t.remove(tmp, *p);
- if (removed > g_conf->osd_target_transaction_size) {
+ if (removed > cct->_conf->osd_target_transaction_size) {
int r = store->apply_transaction(osr.get(), std::move(t));
assert(r == 0);
t = ObjectStore::Transaction();
ceph_abort();
}
- PGPool p = PGPool(createmap, id);
+ PGPool p = PGPool(cct, createmap, id);
dout(10) << "_get_pool " << p.id << dendl;
return p;
if (it->is_temp(&pgid) ||
(it->is_pg(&pgid) && PG::_has_removal_flag(store, pgid))) {
dout(10) << "load_pgs " << *it << " clearing temp" << dendl;
- recursive_remove_collection(store, pgid, *it);
+ recursive_remove_collection(cct, store, pgid, *it);
continue;
}
bool report = false;
utime_t now = ceph_clock_now();
pg_stat_queue_lock.Lock();
- double backoff = stats_ack_timeout / g_conf->osd_mon_ack_timeout;
+ double backoff = stats_ack_timeout / cct->_conf->osd_mon_ack_timeout;
double adjusted_min = cct->_conf->osd_mon_report_interval_min * backoff;
// note: we shouldn't adjust max because it must remain < the
// mon's mon_osd_report_timeout (which defaults to 1.5x our
last_pg_stats_ack = now; // reset clock
last_pg_stats_sent = utime_t();
stats_ack_timeout =
- MAX(g_conf->osd_mon_ack_timeout,
- stats_ack_timeout * g_conf->osd_stats_ack_timeout_factor);
+ MAX(cct->_conf->osd_mon_ack_timeout,
+ stats_ack_timeout * cct->_conf->osd_stats_ack_timeout_factor);
outstanding_pg_stats.clear();
}
if (now - last_pg_stats_sent > max) {
ObjectStore::Transaction t;
PGLog::clear_info_log(pg->info.pgid, &t);
- if (g_conf->osd_inject_failure_on_pg_removal) {
+ if (cct->_conf->osd_inject_failure_on_pg_removal) {
generic_derr << "osd_inject_failure_on_pg_removal" << dendl;
exit(1);
}
(*pm)["hb_back_addr"] = stringify(hb_back_server_messenger->get_myaddr());
// backend
- (*pm)["osd_objectstore"] = g_conf->osd_objectstore;
+ (*pm)["osd_objectstore"] = cct->_conf->osd_objectstore;
store->collect_metadata(pm);
- collect_sys_info(pm, g_ceph_context);
+ collect_sys_info(pm, cct);
dout(10) << __func__ << " " << *pm << dendl;
}
// decay timeout slowly (analogous to TCP)
stats_ack_timeout =
- MAX(g_conf->osd_mon_ack_timeout,
- stats_ack_timeout * g_conf->osd_stats_ack_timeout_decay);
+ MAX(cct->_conf->osd_mon_ack_timeout,
+ stats_ack_timeout * cct->_conf->osd_stats_ack_timeout_decay);
dout(20) << __func__ << " timeout now " << stats_ack_timeout << dendl;
if (ack->get_tid() > pg_stat_tid_flushed) {
ceph::shared_ptr<ObjectStore::Sequencer> osr (std::make_shared<
ObjectStore::Sequencer>("bench"));
- uint32_t duration = g_conf->osd_bench_duration;
+ uint32_t duration = cct->_conf->osd_bench_duration;
- if (bsize > (int64_t) g_conf->osd_bench_max_block_size) {
+ if (bsize > (int64_t) cct->_conf->osd_bench_max_block_size) {
// let us limit the block size because the next checks rely on it
// having a sane value. If we allow any block size to be set things
// can still go sideways.
ss << "block 'size' values are capped at "
- << prettybyte_t(g_conf->osd_bench_max_block_size) << ". If you wish to use"
+ << prettybyte_t(cct->_conf->osd_bench_max_block_size) << ". If you wish to use"
<< " a higher value, please adjust 'osd_bench_max_block_size'";
r = -EINVAL;
goto out;
// IOPS and duration, so that the OSD doesn't get hung up on this,
// preventing timeouts from going off
int64_t max_count =
- bsize * duration * g_conf->osd_bench_small_size_max_iops;
+ bsize * duration * cct->_conf->osd_bench_small_size_max_iops;
if (count > max_count) {
ss << "'count' values greater than " << max_count
<< " for a block size of " << prettybyte_t(bsize) << ", assuming "
- << g_conf->osd_bench_small_size_max_iops << " IOPS,"
+ << cct->_conf->osd_bench_small_size_max_iops << " IOPS,"
<< " for " << duration << " seconds,"
<< " can cause ill effects on osd. "
<< " Please adjust 'osd_bench_small_size_max_iops' with a higher"
// way too big. Given we already check how big the block size
// is, it's safe to assume everything will check out.
int64_t max_count =
- g_conf->osd_bench_large_size_max_throughput * duration;
+ cct->_conf->osd_bench_large_size_max_throughput * duration;
if (count > max_count) {
ss << "'count' values greater than " << max_count
<< " for a block size of " << prettybyte_t(bsize) << ", assuming "
- << prettybyte_t(g_conf->osd_bench_large_size_max_throughput) << "/s,"
+ << prettybyte_t(cct->_conf->osd_bench_large_size_max_throughput) << "/s,"
<< " for " << duration << " seconds,"
<< " can cause ill effects on osd. "
<< " Please adjust 'osd_bench_large_size_max_throughput'"
return false;
}
-OSDService::ScrubJob::ScrubJob(const spg_t& pg, const utime_t& timestamp,
+OSDService::ScrubJob::ScrubJob(CephContext* cct,
+ const spg_t& pg, const utime_t& timestamp,
double pool_scrub_min_interval,
double pool_scrub_max_interval, bool must)
- : pgid(pg),
+ : cct(cct),
+ pgid(pg),
sched_time(timestamp),
deadline(timestamp)
{
// if not explicitly requested, postpone the scrub with a random delay
if (!must) {
double scrub_min_interval = pool_scrub_min_interval > 0 ?
- pool_scrub_min_interval : g_conf->osd_scrub_min_interval;
+ pool_scrub_min_interval : cct->_conf->osd_scrub_min_interval;
double scrub_max_interval = pool_scrub_max_interval > 0 ?
- pool_scrub_max_interval : g_conf->osd_scrub_max_interval;
+ pool_scrub_max_interval : cct->_conf->osd_scrub_max_interval;
sched_time += scrub_min_interval;
double r = rand() / (double)RAND_MAX;
sched_time +=
- scrub_min_interval * g_conf->osd_scrub_interval_randomize_ratio * r;
+ scrub_min_interval * cct->_conf->osd_scrub_interval_randomize_ratio * r;
deadline += scrub_max_interval;
}
}
o->encode(fbl, inc.encode_features | CEPH_FEATURE_RESERVED);
bool injected_failure = false;
- if (g_conf->osd_inject_bad_map_crc_probability > 0 &&
- (rand() % 10000) < g_conf->osd_inject_bad_map_crc_probability*10000.0) {
+ if (cct->_conf->osd_inject_bad_map_crc_probability > 0 &&
+ (rand() % 10000) < cct->_conf->osd_inject_bad_map_crc_probability*10000.0) {
derr << __func__ << " injecting map crc failure" << dendl;
injected_failure = true;
}
//add markdown log
utime_t now = ceph_clock_now();
- utime_t grace = utime_t(g_conf->osd_max_markdown_period, 0);
+ utime_t grace = utime_t(cct->_conf->osd_max_markdown_period, 0);
osd_markdown_log.push_back(now);
//clear all out-of-date log
while (!osd_markdown_log.empty() &&
osd_markdown_log.front() + grace < now)
osd_markdown_log.pop_front();
- if ((int)osd_markdown_log.size() > g_conf->osd_max_markdown_count) {
+ if ((int)osd_markdown_log.size() > cct->_conf->osd_max_markdown_count) {
dout(0) << __func__ << " marked down "
<< osd_markdown_log.size()
<< " > osd_max_markdown_count "
- << g_conf->osd_max_markdown_count
+ << cct->_conf->osd_max_markdown_count
<< " in last " << grace << " seconds, shutting down"
<< dendl;
do_restart = false;
epoch_t min_epoch = service.get_min_pg_epoch();
epoch_t max;
if (min_epoch) {
- max = min_epoch + g_conf->osd_map_max_advance;
+ max = min_epoch + cct->_conf->osd_map_max_advance;
} else {
- max = next_epoch + g_conf->osd_map_max_advance;
+ max = next_epoch + cct->_conf->osd_map_max_advance;
}
for (;
dout(20) << __func__ << " missing map " << next_epoch << dendl;
// make sure max is bumped up so that we can get past any
// gap in maps
- max = MAX(max, next_epoch + g_conf->osd_map_max_advance);
+ max = MAX(max, next_epoch + cct->_conf->osd_map_max_advance);
continue;
}
ThreadPool::TPHandle &handle)
{
uint64_t started = 0;
- if (g_conf->osd_recovery_sleep > 0) {
+ if (cct->_conf->osd_recovery_sleep > 0) {
handle.suspend_tp_timeout();
pg->unlock();
utime_t t;
- t.set_from_double(g_conf->osd_recovery_sleep);
+ t.set_from_double(cct->_conf->osd_recovery_sleep);
t.sleep();
dout(20) << __func__ << " slept for " << t << dendl;
pg->lock();
}
// ok, we didn't have the PG.
- if (!g_conf->osd_debug_misdirected_ops) {
+ if (!cct->_conf->osd_debug_misdirected_ops) {
return;
}
// let's see if it's our fault or the client's. note that this might
uuid_d fsid;
string host;
- if (parse_log_client_options(g_ceph_context, log_to_monitors, log_to_syslog,
+ if (parse_log_client_options(cct, log_to_monitors, log_to_syslog,
log_channel, log_prio, log_to_graylog,
log_to_graylog_host, log_to_graylog_port,
fsid, host) == 0)
void OSD::check_config()
{
// some sanity checks
- if (g_conf->osd_map_cache_size <= g_conf->osd_map_max_advance + 2) {
- clog->warn() << "osd_map_cache_size (" << g_conf->osd_map_cache_size << ")"
+ if (cct->_conf->osd_map_cache_size <= cct->_conf->osd_map_max_advance + 2) {
+ clog->warn() << "osd_map_cache_size (" << cct->_conf->osd_map_cache_size << ")"
<< " is not > osd_map_max_advance ("
- << g_conf->osd_map_max_advance << ")";
+ << cct->_conf->osd_map_max_advance << ")";
}
- if (g_conf->osd_map_cache_size <= (int)g_conf->osd_pg_epoch_persisted_max_stale + 2) {
- clog->warn() << "osd_map_cache_size (" << g_conf->osd_map_cache_size << ")"
- << " is not > osd_pg_epoch_persisted_max_stale ("
- << g_conf->osd_pg_epoch_persisted_max_stale << ")";
+ if (cct->_conf->osd_map_cache_size <= (int)cct->_conf->osd_pg_epoch_persisted_max_stale + 2) {
+ clog->warn() << "osd_map_cache_size (" << cct->_conf->osd_map_cache_size << ")"
+ << " is not > osd_pg_epoch_persisted_max_stale ("
+ << cct->_conf->osd_pg_epoch_persisted_max_stale << ")";
}
}
public:
struct ScrubJob {
+ CephContext* cct;
/// pg to be scrubbed
spg_t pgid;
/// a time scheduled for scrub. but the scrub could be delayed if system
/// the hard upper bound of scrub time
utime_t deadline;
ScrubJob() {}
- explicit ScrubJob(const spg_t& pg, const utime_t& timestamp,
+ explicit ScrubJob(CephContext* cct, const spg_t& pg,
+ const utime_t& timestamp,
double pool_scrub_min_interval = 0,
double pool_scrub_max_interval = 0, bool must = true);
/// order the jobs by sched_time
/// @returns the scrub_reg_stamp used for unregister the scrub job
utime_t reg_pg_scrub(spg_t pgid, utime_t t, double pool_scrub_min_interval,
double pool_scrub_max_interval, bool must) {
- ScrubJob scrub(pgid, t, pool_scrub_min_interval, pool_scrub_max_interval,
+ ScrubJob scrub(cct, pgid, t, pool_scrub_min_interval, pool_scrub_max_interval,
must);
Mutex::Locker l(sched_scrub_lock);
sched_scrub_pg.insert(scrub);
}
void unreg_pg_scrub(spg_t pgid, utime_t t) {
Mutex::Locker l(sched_scrub_lock);
- size_t removed = sched_scrub_pg.erase(ScrubJob(pgid, t));
+ size_t removed = sched_scrub_pg.erase(ScrubJob(cct, pgid, t));
assert(removed);
}
bool first_scrub_stamp(ScrubJob *out) {
hobject_t oid(sobject_t("infos", CEPH_NOSNAP));
return ghobject_t(oid);
}
- static void recursive_remove_collection(ObjectStore *store,
+ static void recursive_remove_collection(CephContext* cct,
+ ObjectStore *store,
spg_t pgid,
coll_t tmp);
explicit Session(CephContext *cct) :
RefCountedObject(cct),
- auid(-1), con(0),
+ auid(-1), con(0), wstate(cct),
session_dispatch_lock("Session::session_dispatch_lock"),
last_sent_epoch(0), received_map_epoch(0)
{}
// -- removing --
struct RemoveWQ :
public ThreadPool::WorkQueueVal<pair<PGRef, DeletingStateRef> > {
+ CephContext* cct;
ObjectStore *&store;
list<pair<PGRef, DeletingStateRef> > remove_queue;
- RemoveWQ(ObjectStore *&o, time_t ti, time_t si, ThreadPool *tp)
+ RemoveWQ(CephContext* cct, ObjectStore *&o, time_t ti, time_t si,
+ ThreadPool *tp)
: ThreadPool::WorkQueueVal<pair<PGRef, DeletingStateRef> >(
- "OSD::RemoveWQ", ti, si, tp),
- store(o) {}
+ "OSD::RemoveWQ", ti, si, tp), cct(cct), store(o) {}
bool _empty() override {
return remove_queue.empty();
#include <sstream>
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
newly_removed_snaps.subtract(cached_removed_snaps);
cached_removed_snaps.union_of(newly_removed_snaps);
} else {
- lgeneric_subdout(g_ceph_context, osd, 0) << __func__
+ lgeneric_subdout(cct, osd, 0) << __func__
<< " cached_removed_snaps shrank from " << cached_removed_snaps
<< " to " << newly_removed_snaps << dendl;
cached_removed_snaps = newly_removed_snaps;
newly_removed_snaps.clear();
}
cached_epoch = map->get_epoch();
- lgeneric_subdout(g_ceph_context, osd, 20)
+ lgeneric_subdout(cct, osd, 20)
<< "PGPool::update cached_removed_snaps "
<< cached_removed_snaps
<< " newly_removed_snaps "
cct(o->cct),
osdriver(osd->store, coll_t(), OSD::make_snapmapper_oid()),
snap_mapper(
+ cct,
&osdriver,
p.ps(),
p.get_split_bits(curmap->get_pg_num(_pool.id)),
auto missing_loc_entry = missing_loc.find(hoid);
if (missing_loc_entry == missing_loc.end()) return false;
const set<pg_shard_t> &locs = missing_loc_entry->second;
- dout(10) << __func__ << ": locs:" << locs << dendl;
+ ldout(pg->cct, 10) << __func__ << ": locs:" << locs << dendl;
set<pg_shard_t> have_acting;
for (set<pg_shard_t>::const_iterator i = locs.begin();
i != locs.end();
void PG::MissingLoc::add_batch_sources_info(
const set<pg_shard_t> &sources, ThreadPool::TPHandle* handle)
{
- dout(10) << __func__ << ": adding sources in batch " << sources.size() << dendl;
+ ldout(pg->cct, 10) << __func__ << ": adding sources in batch "
+ << sources.size() << dendl;
unsigned loop = 0;
for (map<hobject_t, pg_missing_item, hobject_t::ComparatorWithDefault>::const_iterator i = needs_recovery_map.begin();
i != needs_recovery_map.end();
++i) {
- if (handle && ++loop >= g_conf->osd_loop_before_reset_tphandle) {
+ if (handle && ++loop >= pg->cct->_conf->osd_loop_before_reset_tphandle) {
handle->reset_tp_timeout();
loop = 0;
}
++p) {
const hobject_t &soid(p->first);
eversion_t need = p->second.need;
- if (handle && ++loop >= g_conf->osd_loop_before_reset_tphandle) {
+ if (handle && ++loop >= pg->cct->_conf->osd_loop_before_reset_tphandle) {
handle->reset_tp_timeout();
loop = 0;
}
if (oinfo.last_update < need) {
- dout(10) << "search_for_missing " << soid << " " << need
- << " also missing on osd." << fromosd
- << " (last_update " << oinfo.last_update << " < needed " << need << ")"
- << dendl;
+ ldout(pg->cct, 10) << "search_for_missing " << soid << " " << need
+ << " also missing on osd." << fromosd
+ << " (last_update " << oinfo.last_update
+ << " < needed " << need << ")" << dendl;
continue;
}
if (!oinfo.last_backfill.is_max() &&
oinfo.last_backfill_bitwise != sort_bitwise) {
- dout(10) << "search_for_missing " << soid << " " << need
- << " also missing on osd." << fromosd
- << " (last_backfill " << oinfo.last_backfill
- << " but with wrong sort order)"
- << dendl;
+ ldout(pg->cct, 10) << "search_for_missing " << soid << " " << need
+ << " also missing on osd." << fromosd
+ << " (last_backfill " << oinfo.last_backfill
+ << " but with wrong sort order)"
+ << dendl;
continue;
}
if (cmp(p->first, oinfo.last_backfill, sort_bitwise) >= 0) {
// FIXME: this is _probably_ true, although it could conceivably
// be in the undefined region! Hmm!
- dout(10) << "search_for_missing " << soid << " " << need
- << " also missing on osd." << fromosd
- << " (past last_backfill " << oinfo.last_backfill << ")"
- << dendl;
+ ldout(pg->cct, 10) << "search_for_missing " << soid << " " << need
+ << " also missing on osd." << fromosd
+ << " (past last_backfill " << oinfo.last_backfill
+ << ")" << dendl;
continue;
}
if (oinfo.last_complete < need) {
if (omissing.is_missing(soid)) {
- dout(10) << "search_for_missing " << soid << " " << need
- << " also missing on osd." << fromosd << dendl;
+ ldout(pg->cct, 10) << "search_for_missing " << soid << " " << need
+ << " also missing on osd." << fromosd << dendl;
continue;
}
}
- dout(10) << "search_for_missing " << soid << " " << need
- << " is on osd." << fromosd << dendl;
+ ldout(pg->cct, 10) << "search_for_missing " << soid << " " << need
+ << " is on osd." << fromosd << dendl;
missing_loc[soid].insert(fromosd);
missing_loc_sources.insert(fromosd);
found_missing = true;
}
- dout(20) << "needs_recovery_map missing " << needs_recovery_map << dendl;
+ ldout(pg->cct, 20) << "needs_recovery_map missing " << needs_recovery_map
+ << dendl;
return found_missing;
}
}
prior_set.reset(
new PriorSet(
+ cct,
pool.info.ec_pool(),
get_pgbackend()->get_is_recoverable_predicate(),
*get_osdmap(),
{
// set a max on the number of blocking peers we report. if we go
// over, report a random subset. keep the result sorted.
- unsigned keep = MIN(blocked_by.size(), g_conf->osd_max_pg_blocked_by);
+ unsigned keep = MIN(blocked_by.size(), cct->_conf->osd_max_pg_blocked_by);
unsigned skip = blocked_by.size() - keep;
info.stats.blocked_by.clear();
info.stats.blocked_by.resize(keep);
bool publish = false;
utime_t cutoff = now;
- cutoff -= g_conf->osd_pg_stat_report_interval_max;
+ cutoff -= cct->_conf->osd_pg_stat_report_interval_max;
if (pg_stats_publish_valid && info.stats == pg_stats_publish &&
info.stats.last_fresh > cutoff) {
dout(15) << "publish_stats_to_osd " << pg_stats_publish.reported_epoch
#pragma GCC diagnostic pop
#pragma GCC diagnostic warning "-Wpragmas"
-int PG::_prepare_write_info(map<string,bufferlist> *km,
+int PG::_prepare_write_info(CephContext* cct,
+ map<string,bufferlist> *km,
epoch_t epoch,
pg_info_t &info, pg_info_t &last_written_info,
map<epoch_t,pg_interval_t> &past_intervals,
unstable_stats.clear();
bool need_update_epoch = last_epoch < get_osdmap()->get_epoch();
- int ret = _prepare_write_info(km, get_osdmap()->get_epoch(),
+ int ret = _prepare_write_info(cct, km, get_osdmap()->get_epoch(),
info,
last_written_info,
past_intervals,
dirty_big_info, need_update_epoch,
- g_conf->osd_fast_info,
+ cct->_conf->osd_fast_info,
osd->logger);
assert(ret == 0);
if (need_update_epoch)
auto last = logv.rbegin();
if (is_primary() && last != logv.rend()) {
projected_log.skip_can_rollback_to_to_head();
- projected_log.trim(last->version, nullptr);
+ projected_log.trim(cct, last->version, nullptr);
}
if (transaction_applied && roll_forward_to > pg_log.get_can_rollback_to()) {
utime_t reg_stamp;
if (scrubber.must_scrub ||
- (info.stats.stats_invalid && g_conf->osd_scrub_invalid_stats)) {
+ (info.stats.stats_invalid && cct->_conf->osd_scrub_invalid_stats)) {
reg_stamp = ceph_clock_now();
} else {
reg_stamp = info.history.last_scrub_stamp;
*/
void PG::scrub(epoch_t queued, ThreadPool::TPHandle &handle)
{
- if (g_conf->osd_scrub_sleep > 0 &&
+ if (cct->_conf->osd_scrub_sleep > 0 &&
(scrubber.state == PG::Scrubber::NEW_CHUNK ||
scrubber.state == PG::Scrubber::INACTIVE)) {
dout(20) << __func__ << " state is INACTIVE|NEW_CHUNK, sleeping" << dendl;
unlock();
utime_t t;
- t.set_from_double(g_conf->osd_scrub_sleep);
+ t.set_from_double(cct->_conf->osd_scrub_sleep);
handle.suspend_tp_timeout();
t.sleep();
handle.reset_tp_timeout();
do_sort_bitwise = osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE);
if (do_sort_bitwise) {
assert(get_min_upacting_features() & CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT);
- if (g_conf->osd_debug_randomize_hobject_sort_order) {
+ if (cct->_conf->osd_debug_randomize_hobject_sort_order) {
// randomly use a nibblewise sort (when we otherwise might have
// done bitwise) based on some *deterministic* function such that
// all peers/osds will agree.
bool PG::can_discard_op(OpRequestRef& op)
{
MOSDOp *m = static_cast<MOSDOp*>(op->get_req());
- if (g_conf->osd_discard_disconnected_ops && OSD::op_is_discardable(m)) {
+ if (cct->_conf->osd_discard_disconnected_ops && OSD::op_is_discardable(m)) {
dout(20) << " discard " << *m << dendl;
return true;
}
boost::statechart::result
PG::RecoveryState::Started::react(const IntervalFlush&)
{
- dout(10) << "Ending blocked outgoing recovery messages" << dendl;
+ PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "Ending blocked outgoing recovery messages" << dendl;
context< RecoveryMachine >().pg->recovery_state.end_block_outgoing();
return discard_event();
}
boost::statechart::result PG::RecoveryState::Started::react(const AdvMap& advmap)
{
- dout(10) << "Started advmap" << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "Started advmap" << dendl;
pg->check_full_transition(advmap.lastmap, advmap.osdmap);
if (pg->should_restart_peering(
advmap.up_primary,
advmap.newacting,
advmap.lastmap,
advmap.osdmap)) {
- dout(10) << "should_restart_peering, transitioning to Reset" << dendl;
+ ldout(pg->cct, 10) << "should_restart_peering, transitioning to Reset"
+ << dendl;
post_event(advmap);
return transit< Reset >();
}
boost::statechart::result
PG::RecoveryState::Reset::react(const IntervalFlush&)
{
- dout(10) << "Ending blocked outgoing recovery messages" << dendl;
+ PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "Ending blocked outgoing recovery messages" << dendl;
context< RecoveryMachine >().pg->recovery_state.end_block_outgoing();
return discard_event();
}
boost::statechart::result PG::RecoveryState::Reset::react(const AdvMap& advmap)
{
PG *pg = context< RecoveryMachine >().pg;
- dout(10) << "Reset advmap" << dendl;
+ ldout(pg->cct, 10) << "Reset advmap" << dendl;
// make sure we have past_intervals filled in. hopefully this will happen
// _before_ we are active.
advmap.newacting,
advmap.lastmap,
advmap.osdmap)) {
- dout(10) << "should restart peering, calling start_peering_interval again"
- << dendl;
+ ldout(pg->cct, 10) << "should restart peering, calling start_peering_interval again"
+ << dendl;
pg->start_peering_interval(
advmap.lastmap,
advmap.newup, advmap.up_primary,
PG *pg = context< RecoveryMachine >().pg;
if (pg->is_primary()) {
- dout(1) << "transitioning to Primary" << dendl;
+ ldout(pg->cct, 1) << "transitioning to Primary" << dendl;
post_event(MakePrimary());
} else { //is_stray
- dout(1) << "transitioning to Stray" << dendl;
+ ldout(pg->cct, 1) << "transitioning to Stray" << dendl;
post_event(MakeStray());
}
}
boost::statechart::result PG::RecoveryState::Primary::react(const MNotifyRec& notevt)
{
- dout(7) << "handle_pg_notify from osd." << notevt.from << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 7) << "handle_pg_notify from osd." << notevt.from << dendl;
if (pg->peer_info.count(notevt.from) &&
pg->peer_info[notevt.from].last_update == notevt.notify.info.last_update) {
- dout(10) << *pg << " got dup osd." << notevt.from << " info " << notevt.notify.info
- << ", identical to ours" << dendl;
+ ldout(pg->cct, 10) << *pg << " got dup osd." << notevt.from << " info "
+ << notevt.notify.info << ", identical to ours" << dendl;
} else {
pg->proc_replica_info(
notevt.from, notevt.notify.info, notevt.notify.epoch_sent);
boost::statechart::result PG::RecoveryState::Primary::react(const ActMap&)
{
- dout(7) << "handle ActMap primary" << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 7) << "handle ActMap primary" << dendl;
pg->publish_stats_to_osd();
pg->take_waiters();
return discard_event();
boost::statechart::result PG::RecoveryState::Peering::react(const AdvMap& advmap)
{
PG *pg = context< RecoveryMachine >().pg;
- dout(10) << "Peering advmap" << dendl;
+ ldout(pg->cct, 10) << "Peering advmap" << dendl;
if (prior_set.get()->affected_by_map(advmap.osdmap, pg)) {
- dout(1) << "Peering, affected_by_map, going to Reset" << dendl;
+ ldout(pg->cct, 1) << "Peering, affected_by_map, going to Reset" << dendl;
post_event(advmap);
return transit< Reset >();
}
void PG::RecoveryState::Peering::exit()
{
- dout(10) << "Leaving Peering" << dendl;
- context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "Leaving Peering" << dendl;
+ context< RecoveryMachine >().log_exit(state_name, enter_time);
pg->state_clear(PG_STATE_PEERING);
pg->clear_probe_targets();
context< RecoveryMachine >().log_enter(state_name);
}
-boost::statechart::result
+boost::statechart::result
PG::RecoveryState::RepNotRecovering::react(const RequestBackfillPrio &evt)
{
PG *pg = context< RecoveryMachine >().pg;
double ratio, max_ratio;
- if (g_conf->osd_debug_reject_backfill_probability > 0 &&
- (rand()%1000 < (g_conf->osd_debug_reject_backfill_probability*1000.0))) {
- dout(10) << "backfill reservation rejected: failure injection" << dendl;
+ if (pg->cct->_conf->osd_debug_reject_backfill_probability > 0 &&
+ (rand()%1000 < (pg->cct->_conf->osd_debug_reject_backfill_probability*1000.0))) {
+ ldout(pg->cct, 10) << "backfill reservation rejected: failure injection"
+ << dendl;
post_event(RemoteReservationRejected());
} else if (pg->osd->too_full_for_backfill(&ratio, &max_ratio) &&
!pg->cct->_conf->osd_debug_skip_full_check_in_backfill_reservation) {
- dout(10) << "backfill reservation rejected: full ratio is "
- << ratio << ", which is greater than max allowed ratio "
- << max_ratio << dendl;
+ ldout(pg->cct, 10) << "backfill reservation rejected: full ratio is "
+ << ratio << ", which is greater than max allowed ratio "
+ << max_ratio << dendl;
post_event(RemoteReservationRejected());
} else {
pg->osd->remote_reserver.request_reservation(
PG *pg = context< RecoveryMachine >().pg;
double ratio, max_ratio;
- if (g_conf->osd_debug_reject_backfill_probability > 0 &&
- (rand()%1000 < (g_conf->osd_debug_reject_backfill_probability*1000.0))) {
- dout(10) << "backfill reservation rejected after reservation: "
- << "failure injection" << dendl;
+ if (pg->cct->_conf->osd_debug_reject_backfill_probability > 0 &&
+ (rand()%1000 < (pg->cct->_conf->osd_debug_reject_backfill_probability*1000.0))) {
+ ldout(pg->cct, 10) << "backfill reservation rejected after reservation: "
+ << "failure injection" << dendl;
pg->osd->remote_reserver.cancel_reservation(pg->info.pgid);
post_event(RemoteReservationRejected());
return discard_event();
} else if (pg->osd->too_full_for_backfill(&ratio, &max_ratio) &&
!pg->cct->_conf->osd_debug_skip_full_check_in_backfill_reservation) {
- dout(10) << "backfill reservation rejected after reservation: full ratio is "
- << ratio << ", which is greater than max allowed ratio "
- << max_ratio << dendl;
+ ldout(pg->cct, 10) << "backfill reservation rejected after reservation: full ratio is "
+ << ratio << ", which is greater than max allowed ratio "
+ << max_ratio << dendl;
pg->osd->remote_reserver.cancel_reservation(pg->info.pgid);
post_event(RemoteReservationRejected());
return discard_event();
assert(!pg->backfill_reserving);
assert(!pg->backfill_reserved);
assert(pg->is_primary());
- dout(10) << "In Active, about to call activate" << dendl;
+ ldout(pg->cct, 10) << "In Active, about to call activate" << dendl;
pg->start_flush(
context< RecoveryMachine >().get_cur_transaction(),
context< RecoveryMachine >().get_on_applied_context_list(),
}
}
pg->publish_stats_to_osd();
- dout(10) << "Activate Finished" << dendl;
+ ldout(pg->cct, 10) << "Activate Finished" << dendl;
}
boost::statechart::result PG::RecoveryState::Active::react(const AdvMap& advmap)
{
PG *pg = context< RecoveryMachine >().pg;
- dout(10) << "Active advmap" << dendl;
+ ldout(pg->cct, 10) << "Active advmap" << dendl;
if (!pg->pool.newly_removed_snaps.empty()) {
pg->snap_trimq.union_of(pg->pool.newly_removed_snaps);
- dout(10) << *pg << " snap_trimq now " << pg->snap_trimq << dendl;
+ ldout(pg->cct, 10) << *pg << " snap_trimq now " << pg->snap_trimq << dendl;
pg->dirty_info = true;
pg->dirty_big_info = true;
}
// if we haven't reported our PG stats in a long time, do so now.
if (pg->info.stats.reported_epoch + pg->cct->_conf->osd_pg_stat_report_interval_max < advmap.osdmap->get_epoch()) {
- dout(20) << "reporting stats to osd after " << (advmap.osdmap->get_epoch() - pg->info.stats.reported_epoch)
- << " epochs" << dendl;
+ ldout(pg->cct, 20) << "reporting stats to osd after " << (advmap.osdmap->get_epoch() - pg->info.stats.reported_epoch)
+ << " epochs" << dendl;
need_publish = true;
}
boost::statechart::result PG::RecoveryState::Active::react(const ActMap&)
{
PG *pg = context< RecoveryMachine >().pg;
- dout(10) << "Active: handling ActMap" << dendl;
+ ldout(pg->cct, 10) << "Active: handling ActMap" << dendl;
assert(pg->is_primary());
if (pg->have_unfound()) {
}
if (pg->is_active()) {
- dout(10) << "Active: kicking snap trim" << dendl;
+ ldout(pg->cct, 10) << "Active: kicking snap trim" << dendl;
pg->kick_snap_trim();
}
PG *pg = context< RecoveryMachine >().pg;
assert(pg->is_primary());
if (pg->peer_info.count(notevt.from)) {
- dout(10) << "Active: got notify from " << notevt.from
- << ", already have info from that osd, ignoring"
- << dendl;
+ ldout(pg->cct, 10) << "Active: got notify from " << notevt.from
+ << ", already have info from that osd, ignoring"
+ << dendl;
} else if (pg->peer_purged.count(notevt.from)) {
- dout(10) << "Active: got notify from " << notevt.from
- << ", already purged that peer, ignoring"
- << dendl;
+ ldout(pg->cct, 10) << "Active: got notify from " << notevt.from
+ << ", already purged that peer, ignoring"
+ << dendl;
} else {
- dout(10) << "Active: got notify from " << notevt.from
- << ", calling proc_replica_info and discover_all_missing"
- << dendl;
+ ldout(pg->cct, 10) << "Active: got notify from " << notevt.from
+ << ", calling proc_replica_info and discover_all_missing"
+ << dendl;
pg->proc_replica_info(
notevt.from, notevt.notify.info, notevt.notify.epoch_sent);
if (pg->have_unfound()) {
// may be telling us they have activated (and committed) but we can't
// share that until _everyone_ does the same.
if (pg->is_actingbackfill(infoevt.from)) {
- dout(10) << " peer osd." << infoevt.from << " activated and committed"
- << dendl;
+ ldout(pg->cct, 10) << " peer osd." << infoevt.from
+ << " activated and committed" << dendl;
pg->peer_activated.insert(infoevt.from);
pg->blocked_by.erase(infoevt.from.shard);
pg->publish_stats_to_osd();
boost::statechart::result PG::RecoveryState::Active::react(const MLogRec& logevt)
{
- dout(10) << "searching osd." << logevt.from
- << " log for unfound items" << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "searching osd." << logevt.from
+ << " log for unfound items" << dendl;
pg->proc_replica_log(
*context<RecoveryMachine>().get_cur_transaction(),
logevt.msg->info, logevt.msg->log, logevt.msg->missing, logevt.from);
boost::statechart::result PG::RecoveryState::ReplicaActive::react(
const Activate& actevt) {
- dout(10) << "In ReplicaActive, about to call activate" << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "In ReplicaActive, about to call activate" << dendl;
map<int, map<spg_t, pg_query_t> > query_map;
pg->activate(*context< RecoveryMachine >().get_cur_transaction(),
actevt.activation_epoch,
*context< RecoveryMachine >().get_on_safe_context_list(),
query_map, NULL, NULL);
- dout(10) << "Activate Finished" << dendl;
+ ldout(pg->cct, 10) << "Activate Finished" << dendl;
return discard_event();
}
boost::statechart::result PG::RecoveryState::ReplicaActive::react(const MLogRec& logevt)
{
PG *pg = context< RecoveryMachine >().pg;
- dout(10) << "received log from " << logevt.from << dendl;
+ ldout(pg->cct, 10) << "received log from " << logevt.from << dendl;
ObjectStore::Transaction* t = context<RecoveryMachine>().get_cur_transaction();
pg->merge_log(*t,logevt.msg->info, logevt.msg->log, logevt.from);
assert(pg->pg_log.get_head() == pg->info.last_update);
{
PG *pg = context< RecoveryMachine >().pg;
MOSDPGLog *msg = logevt.msg.get();
- dout(10) << "got info+log from osd." << logevt.from << " " << msg->info << " " << msg->log << dendl;
+ ldout(pg->cct, 10) << "got info+log from osd." << logevt.from << " " << msg->info << " " << msg->log << dendl;
ObjectStore::Transaction* t = context<RecoveryMachine>().get_cur_transaction();
if (msg->info.last_backfill == hobject_t()) {
boost::statechart::result PG::RecoveryState::Stray::react(const MInfoRec& infoevt)
{
PG *pg = context< RecoveryMachine >().pg;
- dout(10) << "got info from osd." << infoevt.from << " " << infoevt.info << dendl;
+ ldout(pg->cct, 10) << "got info from osd." << infoevt.from << " " << infoevt.info << dendl;
if (pg->info.last_update > infoevt.info.last_update) {
// rewind divergent log entries
continue;
}
if (pg->peer_info.count(peer)) {
- dout(10) << " have osd." << peer << " info " << pg->peer_info[peer] << dendl;
+ ldout(pg->cct, 10) << " have osd." << peer << " info " << pg->peer_info[peer] << dendl;
continue;
}
if (peer_info_requested.count(peer)) {
- dout(10) << " already requested info from osd." << peer << dendl;
+ ldout(pg->cct, 10) << " already requested info from osd." << peer << dendl;
pg->blocked_by.insert(peer.osd);
} else if (!pg->get_osdmap()->is_up(peer.osd)) {
- dout(10) << " not querying info from down osd." << peer << dendl;
+ ldout(pg->cct, 10) << " not querying info from down osd." << peer << dendl;
} else {
- dout(10) << " querying info from osd." << peer << dendl;
+ ldout(pg->cct, 10) << " querying info from osd." << peer << dendl;
context< RecoveryMachine >().send_query(
peer, pg_query_t(pg_query_t::INFO,
it->shard, pg->pg_whoami.shard,
// we got something new ...
unique_ptr<PriorSet> &prior_set = context< Peering >().prior_set;
if (old_start < pg->info.history.last_epoch_started) {
- dout(10) << " last_epoch_started moved forward, rebuilding prior" << dendl;
+ ldout(pg->cct, 10) << " last_epoch_started moved forward, rebuilding prior" << dendl;
pg->build_prior(prior_set);
// filter out any osds that got dropped from the probe set from
set<pg_shard_t>::iterator p = peer_info_requested.begin();
while (p != peer_info_requested.end()) {
if (prior_set->probe.count(*p) == 0) {
- dout(20) << " dropping osd." << *p << " from info_requested, no longer in probe set" << dendl;
+ ldout(pg->cct, 20) << " dropping osd." << *p << " from info_requested, no longer in probe set" << dendl;
peer_info_requested.erase(p++);
} else {
++p;
}
get_infos();
}
- dout(20) << "Adding osd: " << infoevt.from.osd << " peer features: "
- << hex << infoevt.features << dec << dendl;
+ ldout(pg->cct, 20) << "Adding osd: " << infoevt.from.osd << " peer features: "
+ << hex << infoevt.features << dec << dendl;
pg->apply_peer_features(infoevt.features);
// are we done getting everything?
if (!p->second.maybe_went_rw)
continue;
pg_interval_t& interval = p->second;
- dout(10) << " last maybe_went_rw interval was " << interval << dendl;
+ ldout(pg->cct, 10) << " last maybe_went_rw interval was " << interval << dendl;
OSDMapRef osdmap = pg->get_osdmap();
/*
}
}
if (!any_up_complete_now && any_down_now) {
- dout(10) << " no osds up+complete from interval " << interval << dendl;
+ ldout(pg->cct, 10) << " no osds up+complete from interval " << interval << dendl;
post_event(IsDown());
return discard_event();
}
break;
}
}
- dout(20) << "Common peer features: " << hex << pg->get_min_peer_features() << dec << dendl;
- dout(20) << "Common acting features: " << hex << pg->get_min_acting_features() << dec << dendl;
- dout(20) << "Common upacting features: " << hex << pg->get_min_upacting_features() << dec << dendl;
+ ldout(pg->cct, 20) << "Common peer features: " << hex << pg->get_min_peer_features() << dec << dendl;
+ ldout(pg->cct, 20) << "Common acting features: " << hex << pg->get_min_acting_features() << dec << dendl;
+ ldout(pg->cct, 20) << "Common upacting features: " << hex << pg->get_min_upacting_features() << dec << dendl;
post_event(GotInfo());
}
}
// am i broken?
if (pg->info.last_update < best.log_tail) {
- dout(10) << " not contiguous with osd." << auth_log_shard << ", down" << dendl;
+ ldout(pg->cct, 10) << " not contiguous with osd." << auth_log_shard << ", down" << dendl;
post_event(IsIncomplete());
return;
}
}
// how much?
- dout(10) << " requesting log from osd." << auth_log_shard << dendl;
+ ldout(pg->cct, 10) << " requesting log from osd." << auth_log_shard << dendl;
context<RecoveryMachine>().send_query(
auth_log_shard,
pg_query_t(
boost::statechart::result PG::RecoveryState::GetLog::react(const AdvMap& advmap)
{
+ PG *pg = context< RecoveryMachine >().pg;
// make sure our log source didn't go down. we need to check
// explicitly because it may not be part of the prior set, which
// means the Peering state check won't catch it going down.
if (!advmap.osdmap->is_up(auth_log_shard.osd)) {
- dout(10) << "GetLog: auth_log_shard osd."
- << auth_log_shard.osd << " went down" << dendl;
+ ldout(pg->cct, 10) << "GetLog: auth_log_shard osd."
+ << auth_log_shard.osd << " went down" << dendl;
post_event(advmap);
return transit< Reset >();
}
boost::statechart::result PG::RecoveryState::GetLog::react(const MLogRec& logevt)
{
+ PG *pg = context< RecoveryMachine >().pg;
assert(!msg);
if (logevt.from != auth_log_shard) {
- dout(10) << "GetLog: discarding log from "
- << "non-auth_log_shard osd." << logevt.from << dendl;
+ ldout(pg->cct, 10) << "GetLog: discarding log from "
+ << "non-auth_log_shard osd." << logevt.from << dendl;
return discard_event();
}
- dout(10) << "GetLog: received master log from osd"
- << logevt.from << dendl;
+ ldout(pg->cct, 10) << "GetLog: received master log from osd"
+ << logevt.from << dendl;
msg = logevt.msg;
post_event(GotLog());
return discard_event();
boost::statechart::result PG::RecoveryState::GetLog::react(const GotLog&)
{
- dout(10) << "leaving GetLog" << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "leaving GetLog" << dendl;
if (msg) {
- dout(10) << "processing master log" << dendl;
+ ldout(pg->cct, 10) << "processing master log" << dendl;
pg->proc_master_log(*context<RecoveryMachine>().get_cur_transaction(),
- msg->info, msg->log, msg->missing,
+ msg->info, msg->log, msg->missing,
auth_log_shard);
}
pg->start_flush(
PG *pg = context< RecoveryMachine >().pg;
OSDMapRef osdmap = advmap.osdmap;
- dout(10) << "verifying no want_acting " << pg->want_acting << " targets didn't go down" << dendl;
+ ldout(pg->cct, 10) << "verifying no want_acting " << pg->want_acting << " targets didn't go down" << dendl;
for (vector<int>::iterator p = pg->want_acting.begin(); p != pg->want_acting.end(); ++p) {
if (!osdmap->is_up(*p)) {
- dout(10) << " want_acting target osd." << *p << " went down, resetting" << dendl;
+ ldout(pg->cct, 10) << " want_acting target osd." << *p << " went down, resetting" << dendl;
post_event(advmap);
return transit< Reset >();
}
boost::statechart::result PG::RecoveryState::WaitActingChange::react(const MLogRec& logevt)
{
- dout(10) << "In WaitActingChange, ignoring MLocRec" << dendl;
+ PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "In WaitActingChange, ignoring MLocRec" << dendl;
return discard_event();
}
boost::statechart::result PG::RecoveryState::WaitActingChange::react(const MInfoRec& evt)
{
- dout(10) << "In WaitActingChange, ignoring MInfoRec" << dendl;
+ PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "In WaitActingChange, ignoring MInfoRec" << dendl;
return discard_event();
}
boost::statechart::result PG::RecoveryState::WaitActingChange::react(const MNotifyRec& evt)
{
- dout(10) << "In WaitActingChange, ignoring MNotifyRec" << dendl;
+ PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "In WaitActingChange, ignoring MNotifyRec" << dendl;
return discard_event();
}
}
boost::statechart::result PG::RecoveryState::Incomplete::react(const MNotifyRec& notevt) {
- dout(7) << "handle_pg_notify from osd." << notevt.from << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 7) << "handle_pg_notify from osd." << notevt.from << dendl;
if (pg->peer_info.count(notevt.from) &&
pg->peer_info[notevt.from].last_update == notevt.notify.info.last_update) {
- dout(10) << *pg << " got dup osd." << notevt.from << " info " << notevt.notify.info
- << ", identical to ours" << dendl;
+ ldout(pg->cct, 10) << *pg << " got dup osd." << notevt.from << " info " << notevt.notify.info
+ << ", identical to ours" << dendl;
return discard_event();
} else {
pg->proc_replica_info(
continue; // no pg data, nothing divergent
if (pi.last_update < pg->pg_log.get_tail()) {
- dout(10) << " osd." << *i << " is not contiguous, will restart backfill" << dendl;
+ ldout(pg->cct, 10) << " osd." << *i << " is not contiguous, will restart backfill" << dendl;
pg->peer_missing[*i];
continue;
}
if (pi.last_backfill == hobject_t()) {
- dout(10) << " osd." << *i << " will fully backfill; can infer empty missing set" << dendl;
+ ldout(pg->cct, 10) << " osd." << *i << " will fully backfill; can infer empty missing set" << dendl;
pg->peer_missing[*i];
continue;
}
// pull anything.
// FIXME: we can do better here. if last_update==last_complete we
// can infer the rest!
- dout(10) << " osd." << *i << " has no missing, identical log" << dendl;
+ ldout(pg->cct, 10) << " osd." << *i << " has no missing, identical log" << dendl;
pg->peer_missing[*i];
continue;
}
eversion_t since(pi.last_epoch_started, 0);
assert(pi.last_update >= pg->info.log_tail); // or else choose_acting() did a bad thing
if (pi.log_tail <= since) {
- dout(10) << " requesting log+missing since " << since << " from osd." << *i << dendl;
+ ldout(pg->cct, 10) << " requesting log+missing since " << since << " from osd." << *i << dendl;
context< RecoveryMachine >().send_query(
*i,
pg_query_t(
since, pg->info.history,
pg->get_osdmap()->get_epoch()));
} else {
- dout(10) << " requesting fulllog+missing from osd." << *i
- << " (want since " << since << " < log.tail " << pi.log_tail << ")"
- << dendl;
+ ldout(pg->cct, 10) << " requesting fulllog+missing from osd." << *i
+ << " (want since " << since << " < log.tail "
+ << pi.log_tail << ")" << dendl;
context< RecoveryMachine >().send_query(
*i, pg_query_t(
pg_query_t::FULLLOG,
if (peer_missing_requested.empty()) {
if (pg->need_up_thru) {
- dout(10) << " still need up_thru update before going active" << dendl;
+ ldout(pg->cct, 10) << " still need up_thru update before going active"
+ << dendl;
post_event(NeedUpThru());
return;
}
if (peer_missing_requested.empty()) {
if (pg->need_up_thru) {
- dout(10) << " still need up_thru update before going active" << dendl;
+ ldout(pg->cct, 10) << " still need up_thru update before going active"
+ << dendl;
post_event(NeedUpThru());
} else {
- dout(10) << "Got last missing, don't need missing "
- << "posting Activate" << dendl;
+ ldout(pg->cct, 10) << "Got last missing, don't need missing "
+ << "posting Activate" << dendl;
post_event(Activate(pg->get_osdmap()->get_epoch()));
}
}
boost::statechart::result PG::RecoveryState::WaitUpThru::react(const MLogRec& logevt)
{
- dout(10) << "Noting missing from osd." << logevt.from << dendl;
PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 10) << "Noting missing from osd." << logevt.from << dendl;
pg->peer_missing[logevt.from].claim(logevt.msg->missing);
pg->peer_info[logevt.from] = logevt.msg->info;
return discard_event();
/*----RecoveryState::RecoveryMachine Methods-----*/
#undef dout_prefix
-#define dout_prefix *_dout << pg->gen_prefix()
+#define dout_prefix *_dout << pg->gen_prefix()
void PG::RecoveryState::RecoveryMachine::log_enter(const char *state_name)
{
- dout(5) << "enter " << state_name << dendl;
+ PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 5) << "enter " << state_name << dendl;
pg->osd->pg_recovery_stats.log_enter(state_name);
}
void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_t enter_time)
{
utime_t dur = ceph_clock_now() - enter_time;
- dout(5) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl;
+ PG *pg = context< RecoveryMachine >().pg;
+ ldout(pg->cct, 5) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl;
pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now() - enter_time,
event_count, event_time);
event_count = 0;
#undef dout_prefix
#define dout_prefix (*_dout << (debug_pg ? debug_pg->gen_prefix() : string()) << " PriorSet: ")
-PG::PriorSet::PriorSet(bool ec_pool,
+PG::PriorSet::PriorSet(CephContext* cct,
+ bool ec_pool,
IsPGRecoverablePredicate *c,
const OSDMap &osdmap,
const map<epoch_t, pg_interval_t> &past_intervals,
const vector<int> &acting,
const pg_info_t &info,
const PG *debug_pg)
- : ec_pool(ec_pool), pg_down(false), pcontdec(c)
+ : cct(cct), ec_pool(ec_pool), pg_down(false), pcontdec(c)
{
/*
* We have to be careful to gracefully deal with situations like
};
struct PGPool {
+ CephContext* cct;
epoch_t cached_epoch;
int64_t id;
string name;
interval_set<snapid_t> cached_removed_snaps; // current removed_snaps set
interval_set<snapid_t> newly_removed_snaps; // newly removed in the last epoch
- PGPool(OSDMapRef map, int64_t i)
- : cached_epoch(map->get_epoch()),
+ PGPool(CephContext* cct, OSDMapRef map, int64_t i)
+ : cct(cct),
+ cached_epoch(map->get_epoch()),
id(i),
name(map->get_pool_name(id)),
auid(map->get_pg_pool(id)->auid) {
// [primary only] content recovery state
protected:
struct PriorSet {
+ CephContext* cct;
const bool ec_pool;
set<pg_shard_t> probe; /// current+prior OSDs we need to probe.
set<int> down; /// down osds that would normally be in @a probe and might be interesting.
bool pg_down; /// some down osds are included in @a cur; the DOWN pg state bit should be set.
boost::scoped_ptr<IsPGRecoverablePredicate> pcontdec;
- PriorSet(bool ec_pool,
+ PriorSet(CephContext* cct,
+ bool ec_pool,
IsPGRecoverablePredicate *c,
const OSDMap &osdmap,
const map<epoch_t, pg_interval_t> &past_intervals,
const vector<int> &up,
const vector<int> &acting,
const pg_info_t &info,
- const PG *debug_pg=NULL);
+ const PG *debug_pg = nullptr);
bool affected_by_map(const OSDMapRef osdmap, const PG *debug_pg=0) const;
};
public:
static int _prepare_write_info(
+ CephContext* cct,
map<string,bufferlist> *km,
epoch_t epoch,
pg_info_t &info,
bool dirty_big_info,
bool dirty_epoch,
bool try_fast_info,
- PerfCounters *logger = NULL);
+ PerfCounters *logger = nullptr);
void write_if_dirty(ObjectStore::Transaction& t);
PGLog::IndexedLog projected_log;
#include "PGLog.h"
#include "common/LogClient.h"
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#define DOUT_PREFIX_ARGS this
#undef dout_prefix
stringstream ss;
ceph::ErasureCodePluginRegistry::instance().factory(
profile.find("plugin")->second,
- g_conf->erasure_code_dir,
+ cct->_conf->erasure_code_dir,
profile,
&ec_impl,
&ss);
update = MAYBE;
}
if (auth_object.digest_present && auth_object.omap_digest_present &&
- g_conf->osd_debug_scrub_chance_rewrite_digest &&
+ cct->_conf->osd_debug_scrub_chance_rewrite_digest &&
(((unsigned)rand() % 100) >
- g_conf->osd_debug_scrub_chance_rewrite_digest)) {
+ cct->_conf->osd_debug_scrub_chance_rewrite_digest)) {
dout(20) << __func__ << " randomly updating digest on " << *k << dendl;
update = MAYBE;
}
if (update != NO) {
utime_t age = now - auth_oi.local_mtime;
if (update == FORCE ||
- age > g_conf->osd_deep_scrub_update_digest_min_age) {
+ age > cct->_conf->osd_deep_scrub_update_digest_min_age) {
dout(20) << __func__ << " will update digest on " << *k << dendl;
missing_digest[*k] = make_pair(auth_object.digest,
auth_object.omap_digest);
} else {
dout(20) << __func__ << " missing digest but age " << age
- << " < " << g_conf->osd_deep_scrub_update_digest_min_age
+ << " < " << cct->_conf->osd_deep_scrub_update_digest_min_age
<< " on " << *k << dendl;
}
}
* 4) Handling scrub, deep-scrub, repair
*/
class PGBackend {
+ public:
+ CephContext* cct;
protected:
ObjectStore *store;
const coll_t coll;
};
Listener *parent;
Listener *get_parent() const { return parent; }
- PGBackend(Listener *l, ObjectStore *store, coll_t coll,
+ PGBackend(CephContext* cct, Listener *l, ObjectStore *store, coll_t coll,
ObjectStore::CollectionHandle &ch) :
+ cct(cct),
store(store),
coll(coll),
ch(ch),
#include "include/unordered_map.h"
#include "common/ceph_context.h"
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
}
void PGLog::IndexedLog::trim(
+ CephContext* cct,
eversion_t s,
set<eversion_t> *trimmed)
{
assert(trim_to <= info.last_complete);
dout(10) << "trim " << log << " to " << trim_to << dendl;
- log.trim(trim_to, &trimmed);
+ log.trim(cct, trim_to, &trimmed);
info.log_tail = log.tail;
}
}
#define PGLOG_INDEXED_EXTRA_CALLER_OPS (1 << 2)
#define PGLOG_INDEXED_ALL (PGLOG_INDEXED_OBJECTS | PGLOG_INDEXED_CALLER_OPS | PGLOG_INDEXED_EXTRA_CALLER_OPS)
-#define dout_context g_ceph_context
-
class CephContext;
struct PGLog : DoutPrefixProvider {
}
void trim(
+ CephContext* cct,
eversion_t s,
set<eversion_t> *trimmed);
continue;
if (i.second.need > log.tail ||
cmp(i.first, info.last_backfill, info.last_backfill_bitwise) > 0) {
- derr << __func__ << ": invalid missing set entry found "
- << i.first
- << dendl;
+ lderr(dpp->get_cct()) << __func__ << ": invalid missing set entry found "
+ << i.first
+ << dendl;
assert(0 == "invalid missing set entry found");
}
bufferlist bv;
}
};
-#undef dout_context
-
#endif // CEPH_PG_LOG_H
#define tracepoint(...)
#endif
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#define DOUT_PREFIX_ARGS this, osd->whoami, get_osdmap()
#undef dout_prefix
class PGLSParentFilter : public PGLSFilter {
inodeno_t parent_ino;
public:
- PGLSParentFilter() {
+ CephContext* cct;
+ PGLSParentFilter(CephContext* cct) : cct(cct) {
xattr = "_parent";
}
virtual int init(bufferlist::iterator ¶ms)
}
if (type.compare("parent") == 0) {
- filter = new PGLSParentFilter();
+ filter = new PGLSParentFilter(cct);
} else if (type.compare("plain") == 0) {
filter = new PGLSPlainFilter();
} else {
pgbackend(
PGBackend::build_pg_backend(
_pool.info, curmap, this, coll_t(p), ch, o->store, cct)),
- object_contexts(o->cct, g_conf->osd_pg_object_context_cache_count),
+ object_contexts(o->cct, o->cct->_conf->osd_pg_object_context_cache_count),
snapset_contexts_lock("PrimaryLogPG::snapset_contexts_lock"),
backfills_in_flight(hobject_t::Comparator(true)),
pending_backfill_updates(hobject_t::Comparator(true)),
info.pgid.pool(), m->get_object_locator().nspace);
// object name too long?
- if (m->get_oid().name.size() > g_conf->osd_max_object_name_len) {
+ if (m->get_oid().name.size() > cct->_conf->osd_max_object_name_len) {
dout(4) << "do_op name is longer than "
- << g_conf->osd_max_object_name_len
+ << cct->_conf->osd_max_object_name_len
<< " bytes" << dendl;
osd->reply_op_error(op, -ENAMETOOLONG);
return;
}
- if (m->get_object_locator().key.size() > g_conf->osd_max_object_name_len) {
+ if (m->get_object_locator().key.size() >
+ cct->_conf->osd_max_object_name_len) {
dout(4) << "do_op locator is longer than "
- << g_conf->osd_max_object_name_len
+ << cct->_conf->osd_max_object_name_len
<< " bytes" << dendl;
osd->reply_op_error(op, -ENAMETOOLONG);
return;
}
if (m->get_object_locator().nspace.size() >
- g_conf->osd_max_object_namespace_len) {
+ cct->_conf->osd_max_object_namespace_len) {
dout(4) << "do_op namespace is longer than "
- << g_conf->osd_max_object_namespace_len
+ << cct->_conf->osd_max_object_namespace_len
<< " bytes" << dendl;
osd->reply_op_error(op, -ENAMETOOLONG);
return;
if (deleting || pg_has_reset_since(queued)) {
return;
}
- if (g_conf->osd_snap_trim_sleep > 0) {
+ if (cct->_conf->osd_snap_trim_sleep > 0) {
unlock();
utime_t t;
- t.set_from_double(g_conf->osd_snap_trim_sleep);
+ t.set_from_double(cct->_conf->osd_snap_trim_sleep);
t.sleep();
lock();
dout(20) << __func__ << " slept for " << t << dendl;
tracepoint(osd, do_osd_op_pre_omapgetkeys, soid.oid.name.c_str(), soid.snap.val, "???", 0);
goto fail;
}
- if (max_return > g_conf->osd_max_omap_entries_per_request) {
- max_return = g_conf->osd_max_omap_entries_per_request;
+ if (max_return > cct->_conf->osd_max_omap_entries_per_request) {
+ max_return = cct->_conf->osd_max_omap_entries_per_request;
}
tracepoint(osd, do_osd_op_pre_omapgetkeys, soid.oid.name.c_str(), soid.snap.val, start_after.c_str(), max_return);
iter->upper_bound(start_after);
for (num = 0;
num < max_return &&
- bl.length() < g_conf->osd_max_omap_bytes_per_request &&
+ bl.length() < cct->_conf->osd_max_omap_bytes_per_request &&
iter->valid();
++num, iter->next(false)) {
::encode(iter->key(), bl);
tracepoint(osd, do_osd_op_pre_omapgetvals, soid.oid.name.c_str(), soid.snap.val, "???", 0, "???");
goto fail;
}
- if (max_return > g_conf->osd_max_omap_entries_per_request) {
- max_return = g_conf->osd_max_omap_entries_per_request;
+ if (max_return > cct->_conf->osd_max_omap_entries_per_request) {
+ max_return = cct->_conf->osd_max_omap_entries_per_request;
}
tracepoint(osd, do_osd_op_pre_omapgetvals, soid.oid.name.c_str(), soid.snap.val, start_after.c_str(), max_return, filter_prefix.c_str());
if (filter_prefix > start_after) iter->lower_bound(filter_prefix);
for (num = 0;
num < max_return &&
- bl.length() < g_conf->osd_max_omap_bytes_per_request &&
+ bl.length() < cct->_conf->osd_max_omap_bytes_per_request &&
iter->valid() &&
iter->key().substr(0, filter_prefix.size()) == filter_prefix;
++num, iter->next(false)) {
goto fail;
}
tracepoint(osd, do_osd_op_pre_omapsetvals, soid.oid.name.c_str(), soid.snap.val);
- if (g_ceph_context->_conf->subsys.should_gather(dout_subsys, 20)) {
+ if (cct->_conf->subsys.should_gather(dout_subsys, 20)) {
dout(20) << "setting vals: " << dendl;
map<string,bufferlist> to_set;
bufferlist::iterator pt = to_set_bl.begin();
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_RWORDERED)
flags |= CEPH_OSD_FLAG_RWORDERED;
- C_GatherBuilder gather(g_ceph_context);
+ C_GatherBuilder gather(cct);
if (cop->cursor.is_initial() && cop->mirror_snapset) {
// list snaps too.
r = -EIO;
goto out;
}
- if (g_conf->osd_debug_inject_copyfrom_error) {
+ if (cct->_conf->osd_debug_inject_copyfrom_error) {
derr << __func__ << " injecting copyfrom failure" << dendl;
r = -EIO;
goto out;
++p;
continue;
}
- dout(10) << "check_recovery_sources source osd." << *p << " now down" << dendl;
+ ldout(pg->cct, 10) << "check_recovery_sources source osd." << *p << " now down" << dendl;
now_down.insert(*p);
missing_loc_sources.erase(p++);
}
if (now_down.empty()) {
- dout(10) << "check_recovery_sources no source osds (" << missing_loc_sources << ") went down" << dendl;
+ ldout(pg->cct, 10) << "check_recovery_sources no source osds (" << missing_loc_sources << ") went down" << dendl;
} else {
- dout(10) << "check_recovery_sources sources osds " << now_down << " now down, remaining sources are "
- << missing_loc_sources << dendl;
+ ldout(pg->cct, 10) << "check_recovery_sources sources osds " << now_down << " now down, remaining sources are "
+ << missing_loc_sources << dendl;
// filter missing_loc
map<hobject_t, set<pg_shard_t>, hobject_t::BitwiseComparator>::iterator p = missing_loc.begin();
p->target_size = (double)unique * (double)pool.info.hit_set_period
/ (double)dur;
}
- if (p->target_size < static_cast<uint64_t>(g_conf->osd_hit_set_min_size))
- p->target_size = g_conf->osd_hit_set_min_size;
+ if (p->target_size <
+ static_cast<uint64_t>(cct->_conf->osd_hit_set_min_size))
+ p->target_size = cct->_conf->osd_hit_set_min_size;
- if (p->target_size > static_cast<uint64_t>(g_conf->osd_hit_set_max_size))
- p->target_size = g_conf->osd_hit_set_max_size;
+ if (p->target_size
+ > static_cast<uint64_t>(cct->_conf->osd_hit_set_max_size))
+ p->target_size = cct->_conf->osd_hit_set_max_size;
p->seed = now.sec();
}
}
- if (++agent_state->hist_age > g_conf->osd_agent_hist_halflife) {
+ if (++agent_state->hist_age > cct->_conf->osd_agent_hist_halflife) {
dout(20) << __func__ << " resetting atime and temp histograms" << dendl;
agent_state->hist_age = 0;
agent_state->temp_hist.decay();
// flush mode
uint64_t flush_target = pool.info.cache_target_dirty_ratio_micro;
uint64_t flush_high_target = pool.info.cache_target_dirty_high_ratio_micro;
- uint64_t flush_slop = (float)flush_target * g_conf->osd_agent_slop;
+ uint64_t flush_slop = (float)flush_target * cct->_conf->osd_agent_slop;
if (restart || agent_state->flush_mode == TierAgentState::FLUSH_MODE_IDLE) {
flush_target += flush_slop;
flush_high_target += flush_slop;
// evict mode
uint64_t evict_target = pool.info.cache_target_full_ratio_micro;
- uint64_t evict_slop = (float)evict_target * g_conf->osd_agent_slop;
+ uint64_t evict_slop = (float)evict_target * cct->_conf->osd_agent_slop;
if (restart || agent_state->evict_mode == TierAgentState::EVICT_MODE_IDLE)
evict_target += evict_slop;
else
uint64_t over = full_micro - evict_target;
uint64_t span = 1000000 - evict_target;
evict_effort = MAX(over * 1000000 / span,
- (unsigned)(1000000.0 * g_conf->osd_agent_min_evict_effort));
+ (unsigned)(1000000.0 * cct->_conf->osd_agent_min_evict_effort));
// quantize effort to avoid too much reordering in the agent_queue.
- uint64_t inc = g_conf->osd_agent_quantize_effort * 1000000;
+ uint64_t inc = cct->_conf->osd_agent_quantize_effort * 1000000;
assert(inc > 0);
uint64_t was = evict_effort;
evict_effort -= evict_effort % inc;
void PrimaryLogPG::SnapTrimmer::log_enter(const char *state_name)
{
- dout(20) << "enter " << state_name << dendl;
+ ldout(pg->cct, 20) << "enter " << state_name << dendl;
}
void PrimaryLogPG::SnapTrimmer::log_exit(const char *state_name, utime_t enter_time)
{
- dout(20) << "exit " << state_name << dendl;
+ ldout(pg->cct, 20) << "exit " << state_name << dendl;
}
/*---SnapTrimmer states---*/
boost::statechart::result PrimaryLogPG::NotTrimming::react(const KickTrim&)
{
PrimaryLogPG *pg = context< SnapTrimmer >().pg;
- dout(10) << "NotTrimming react KickTrim" << dendl;
+ ldout(pg->cct, 10) << "NotTrimming react KickTrim" << dendl;
assert(pg->is_primary() && pg->is_active());
if (!pg->is_clean() ||
pg->snap_trimq.empty()) {
- dout(10) << "NotTrimming not clean or nothing to trim" << dendl;
+ ldout(pg->cct, 10) << "NotTrimming not clean or nothing to trim" << dendl;
return discard_event();
}
if (pg->scrubber.active) {
- dout(10) << " scrubbing, will requeue snap_trimmer after" << dendl;
+ ldout(pg->cct, 10) << " scrubbing, will requeue snap_trimmer after" << dendl;
pg->scrubber.queue_snap_trim = true;
return transit< WaitScrub >();
} else {
context<SnapTrimmer>().snap_to_trim = pg->snap_trimq.range_start();
- dout(10) << "NotTrimming: trimming "
+ ldout(pg->cct, 10) << "NotTrimming: trimming "
<< pg->snap_trimq.range_start()
<< dendl;
return transit< AwaitAsyncWork >();
boost::statechart::result PrimaryLogPG::AwaitAsyncWork::react(const DoSnapWork&)
{
- dout(10) << "AwaitAsyncWork react" << dendl;
PrimaryLogPGRef pg = context< SnapTrimmer >().pg;
+ ldout(pg->cct, 10) << "AwaitAsyncWork react" << dendl;
snapid_t snap_to_trim = context<SnapTrimmer>().snap_to_trim;
auto &in_flight = context<SnapTrimmer>().in_flight;
assert(in_flight.empty());
assert(pg->is_primary() && pg->is_active());
if (!pg->is_clean() ||
pg->scrubber.active) {
- dout(10) << "something changed, reverting to NotTrimming" << dendl;
+ ldout(pg->cct, 10) << "something changed, reverting to NotTrimming" << dendl;
post_event(KickTrim());
return transit< NotTrimming >();
}
- dout(10) << "AwaitAsyncWork: trimming snap " << snap_to_trim << dendl;
+ ldout(pg->cct, 10) << "AwaitAsyncWork: trimming snap " << snap_to_trim << dendl;
vector<hobject_t> to_trim;
- unsigned max = g_conf->osd_pg_max_concurrent_snap_trims;
+ unsigned max = pg->cct->_conf->osd_pg_max_concurrent_snap_trims;
to_trim.reserve(max);
int r = pg->snap_mapper.get_next_objects_to_trim(
snap_to_trim,
max,
&to_trim);
if (r != 0 && r != -ENOENT) {
- derr << "get_next_objects_to_trim returned "
- << cpp_strerror(r) << dendl;
+ lderr(pg->cct) << "get_next_objects_to_trim returned "
+ << cpp_strerror(r) << dendl;
assert(0 == "get_next_objects_to_trim returned an invalid code");
} else if (r == -ENOENT) {
// Done!
- dout(10) << "got ENOENT" << dendl;
+ ldout(pg->cct, 10) << "got ENOENT" << dendl;
- dout(10) << "adding snap " << snap_to_trim
- << " to purged_snaps"
- << dendl;
+ ldout(pg->cct, 10) << "adding snap " << snap_to_trim
+ << " to purged_snaps"
+ << dendl;
pg->info.purged_snaps.insert(snap_to_trim);
pg->snap_trimq.erase(snap_to_trim);
- dout(10) << "purged_snaps now "
- << pg->info.purged_snaps << ", snap_trimq now "
- << pg->snap_trimq << dendl;
+ ldout(pg->cct, 10) << "purged_snaps now "
+ << pg->info.purged_snaps << ", snap_trimq now "
+ << pg->snap_trimq << dendl;
ObjectStore::Transaction t;
pg->dirty_big_info = true;
for (auto &&object: to_trim) {
// Get next
- dout(10) << "AwaitAsyncWork react trimming " << object << dendl;
+ ldout(pg->cct, 10) << "AwaitAsyncWork react trimming " << object << dendl;
OpContextUPtr ctx = pg->trim_object(in_flight.empty(), object);
if (!ctx) {
- dout(10) << "could not get write lock on obj "
- << object << dendl;
+ ldout(pg->cct, 10) << "could not get write lock on obj "
+ << object << dendl;
if (in_flight.empty()) {
- dout(10) << "waiting for it to clear"
- << dendl;
+ ldout(pg->cct, 10) << "waiting for it to clear"
+ << dendl;
return transit< WaitRWLock >();
} else {
- dout(10) << "letting the ones we already started finish"
- << dendl;
+ ldout(pg->cct, 10) << "letting the ones we already started finish"
+ << dendl;
return transit< WaitRepops >();
}
}
#include "messages/MOSDPGPull.h"
#include "messages/MOSDPGPushReply.h"
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#define DOUT_PREFIX_ARGS this
#undef dout_prefix
ObjectStore::CollectionHandle &c,
ObjectStore *store,
CephContext *cct) :
- PGBackend(pg, store, coll, c),
- cct(cct) {}
+ PGBackend(cct, pg, store, coll, c) {}
void ReplicatedBackend::run_recovery_op(
PGBackend::RecoveryHandle *_h,
};
friend struct C_ReplicatedBackend_OnPullComplete;
public:
- CephContext *cct;
-
ReplicatedBackend(
PGBackend::Listener *pg,
coll_t coll,
#include "SnapMapper.h"
-#define dout_context g_ceph_context
+#define dout_context cct
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix *_dout << "snap_mapper."
*/
class SnapMapper {
public:
+ CephContext* cct;
struct object_snaps {
hobject_t oid;
std::set<snapid_t> snaps;
const shard_id_t shard;
const string shard_prefix;
SnapMapper(
+ CephContext* cct,
MapCacher::StoreDriver<std::string, bufferlist> *driver,
uint32_t match, ///< [in] pgid
uint32_t bits, ///< [in] current split bits
int64_t pool, ///< [in] pool
shard_id_t shard ///< [in] shard
)
- : backend(driver), mask_bits(bits), match(match), pool(pool),
+ : cct(cct), backend(driver), mask_bits(bits), match(match), pool(pool),
shard(shard), shard_prefix(make_shard_prefix(shard)) {
update_bits(mask_bits);
}
virtual void cancel() = 0;
};
-#define dout_context g_ceph_context
+#define dout_context osd->cct
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
}
void finish(int) { ceph_abort(); /* not used */ }
void complete(int) {
- dout(10) << "HandleWatchTimeout" << dendl;
- boost::intrusive_ptr<PrimaryLogPG> pg(watch->pg);
OSDService *osd(watch->osd);
+ ldout(osd->cct, 10) << "HandleWatchTimeout" << dendl;
+ boost::intrusive_ptr<PrimaryLogPG> pg(watch->pg);
osd->watch_lock.Unlock();
pg->lock();
watch->cb = NULL;
canceled = true;
}
void finish(int) {
+ OSDService *osd(watch->osd);
dout(10) << "HandleWatchTimeoutDelayed" << dendl;
assert(watch->pg->is_locked());
watch->cb = NULL;
if ((*i)->is_connected(con)) {
(*i)->disconnect();
} else {
- generic_derr << __func__ << " not still connected to " << (*i) << dendl;
+ lgeneric_derr(cct) << __func__ << " not still connected to " << (*i) << dendl;
}
}
pg->unlock();
Mutex lock;
std::set<WatchRef> watches;
public:
- WatchConState() : lock("WatchConState") {}
+ CephContext* cct;
+ WatchConState(CephContext* cct) : lock("WatchConState"), cct(cct) {}
/// Add a watch
void addWatch(
uint32_t mask,
uint32_t bits)
: driver(driver),
- mapper(new SnapMapper(driver, mask, bits, 0, shard_id_t(1))),
+ mapper(new SnapMapper(g_ceph_context, driver, mask, bits, 0, shard_id_t(1))),
mask(mask), bits(bits),
lock("lock") {}
if (it->is_temp(&pgid) ||
(it->is_pg(&pgid) && PG::_has_removal_flag(store, pgid))) {
cout << "finish_remove_pgs " << *it << " removing " << pgid << std::endl;
- OSD::recursive_remove_collection(store, pgid, *it);
+ OSD::recursive_remove_collection(g_ceph_context, store, pgid, *it);
continue;
}
map<string,bufferlist> km;
pg_info_t last_written_info;
int ret = PG::_prepare_write_info(
+ g_ceph_context,
&km, epoch,
info,
last_written_info,
OSD::make_snapmapper_oid());
spg_t pg;
coll.is_pg_prefix(&pg);
- SnapMapper mapper(&driver, 0, 0, 0, pg.shard);
+ SnapMapper mapper(g_ceph_context, &driver, 0, 0, 0, pg.shard);
if (ob.hoid.hobj.is_temp()) {
cerr << "ERROR: Export contains temporary object '" << ob.hoid << "'" << std::endl;
store,
coll_t(),
OSD::make_snapmapper_oid());
- SnapMapper mapper(&driver, 0, 0, 0, pg.shard);
+ SnapMapper mapper(g_ceph_context, &driver, 0, 0, 0, pg.shard);
struct stat st;
int r = store->stat(coll, ghobj, &st);