#define dout_context g_ceph_context
#undef dout_prefix
-#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".bal "
+#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".bal " << __func__ << " "
#undef dout
#define dout(lvl) \
do {\
load.all.add(d->pop_nested);
}
} else {
- dout(20) << "get_load no root, no load" << dendl;
+ dout(20) << "no root, no load" << dendl;
}
uint64_t num_requests = mds->get_num_requests();
last_get_load = now;
}
- dout(15) << "get_load " << load << dendl;
+ dout(15) << load << dendl;
return load;
}
}
bal_code.assign(lua_src.to_str());
bal_version.assign(oid.name);
- dout(10) << "localized balancer, bal_code=" << bal_code << dendl;
+ dout(10) "bal_code=" << bal_code << dendl;
}
return r;
}
void MDBalancer::send_heartbeat()
{
if (mds->is_cluster_degraded()) {
- dout(10) << "send_heartbeat degraded" << dendl;
+ dout(10) << "degraded" << dendl;
return;
}
if (!mds->mdcache->is_open()) {
- dout(5) << "not open" << dendl;
+ dout(10) << "not open" << dendl;
mds->mdcache->wait_for_open(new C_Bal_SendHeartbeat(mds));
return;
}
mds_import_map[ mds->get_nodeid() ] = import_map;
- dout(5) << "mds." << mds->get_nodeid() << " epoch " << beat_epoch << " load " << load << dendl;
- for (map<mds_rank_t, float>::iterator it = import_map.begin();
- it != import_map.end();
- ++it) {
- dout(5) << " import_map from " << it->first << " -> " << it->second << dendl;
+ dout(3) << " epoch " << beat_epoch << " load " << load << dendl;
+ for (const auto& [rank, load] : import_map) {
+ dout(5) << " import_map from " << rank << " -> " << load << dendl;
}
};
if (merge_pending.count(frag) == 0) {
- dout(20) << __func__ << " enqueued dir " << *dir << dendl;
+ dout(20) << " enqueued dir " << *dir << dendl;
merge_pending.insert(frag);
mds->timer.add_event_after(bal_fragment_interval,
new LambdaContext(std::move(callback)));
} else {
- dout(20) << __func__ << " dir already in queue " << *dir << dendl;
+ dout(20) << " dir already in queue " << *dir << dendl;
}
}
mds_rank_t whoami = mds->get_nodeid();
rebalance_time = clock::now();
- dout(5) << " prep_rebalance: cluster loads are" << dendl;
+ dout(7) << "cluster loads are" << dendl;
mds->mdcache->migrator->clear_export_queue();
mds_meta_load[i] = l;
if (whoami == 0)
- dout(5) << " mds." << i
+ dout(7) << " mds." << i
<< " " << load
<< " = " << load.mds_load()
<< " ~ " << l << dendl;
// target load
target_load = total_load / (double)cluster_size;
- dout(5) << "prep_rebalance: my load " << my_load
+ dout(7) << "my load " << my_load
<< " target " << target_load
<< " total " << total_load
<< dendl;
// under or over?
- for (auto p : load_map) {
- if (p.first < target_load * (1.0 + g_conf()->mds_bal_min_rebalance)) {
- dout(5) << " mds." << p.second << " is underloaded or barely overloaded." << dendl;
- mds_last_epoch_under_map[p.second] = beat_epoch;
+ for (const auto& [load, rank] : load_map) {
+ if (load < target_load * (1.0 + g_conf()->mds_bal_min_rebalance)) {
+ dout(7) << " mds." << rank << " is underloaded or barely overloaded." << dendl;
+ mds_last_epoch_under_map[rank] = beat_epoch;
}
}
int last_epoch_under = mds_last_epoch_under_map[whoami];
if (last_epoch_under == beat_epoch) {
- dout(5) << " i am underloaded or barely overloaded, doing nothing." << dendl;
+ dout(7) << " i am underloaded or barely overloaded, doing nothing." << dendl;
return;
}
// am i over long enough?
if (last_epoch_under && beat_epoch - last_epoch_under < 2) {
- dout(5) << " i am overloaded, but only for " << (beat_epoch - last_epoch_under) << " epochs" << dendl;
+ dout(7) << " i am overloaded, but only for " << (beat_epoch - last_epoch_under) << " epochs" << dendl;
return;
}
- dout(5) << " i am sufficiently overloaded" << dendl;
+ dout(7) << " i am sufficiently overloaded" << dendl;
// first separate exporters and importers
/* execute the balancer */
Mantle mantle;
int ret = mantle.balance(bal_code, mds->get_nodeid(), metrics, state.targets);
- dout(5) << " mantle decided that new targets=" << state.targets << dendl;
+ dout(7) << " mantle decided that new targets=" << state.targets << dendl;
/* mantle doesn't know about cluster size, so check target len here */
if ((int) state.targets.size() != cluster_size)
// search imports from target
if (import_from_map.count(target)) {
- dout(5) << " aha, looking through imports from target mds." << target << dendl;
+ dout(7) << " aha, looking through imports from target mds." << target << dendl;
for (auto p = import_from_map.equal_range(target);
p.first != p.second; ) {
CDir *dir = p.first->second.first;
double pop = p.first->second.second;
- dout(5) << "considering " << *dir << " from " << (*p.first).first << dendl;
+ dout(7) << "considering " << *dir << " from " << (*p.first).first << dendl;
auto plast = p.first++;
if (dir->inode->is_base())
ceph_assert(dir->inode->authority().first == target); // cuz that's how i put it in the map, dummy
if (pop <= amount-have) {
- dout(5) << "reexporting " << *dir << " pop " << pop
+ dout(7) << "reexporting " << *dir << " pop " << pop
<< " back to mds." << target << dendl;
mds->mdcache->migrator->export_dir_nicely(dir, target);
have += pop;
q.first++;
}
} else {
- dout(5) << "can't reexport " << *dir << ", too big " << pop << dendl;
+ dout(7) << "can't reexport " << *dir << ", too big " << pop << dendl;
}
if (amount-have < MIN_OFFLOAD)
break;
double pop = p->first;
if (pop <= amount-have && pop > MIN_REEXPORT) {
- dout(0) << "reexporting " << *dir << " pop " << pop
+ dout(5) << "reexporting " << *dir << " pop " << pop
<< " to mds." << target << dendl;
have += pop;
mds->mdcache->migrator->export_dir_nicely(dir, target);
}
}
- dout(5) << "rebalance done" << dendl;
+ dout(7) << "done" << dendl;
mds->mdcache->show_subtrees();
}
multimap<double, CDir*> smaller;
double dir_pop = dir->pop_auth_subtree.meta_load();
- dout(7) << " find_exports in " << dir_pop << " " << *dir << " need " << need << " (" << needmin << " - " << needmax << ")" << dendl;
+ dout(7) << "in " << dir_pop << " " << *dir << " need " << need << " (" << needmin << " - " << needmax << ")" << dendl;
double subdir_sum = 0;
for (elist<CInode*>::iterator it = dir->pop_lru_subdirs.begin_use_current();
if (dir->should_split_fast()) {
queue_split(dir, true);
} else {
- dout(10) << __func__ << ": fragment already enqueued to split: "
+ dout(10) << ": fragment already enqueued to split: "
<< *dir << dendl;
}
}
const bool hot = (v > g_conf()->mds_bal_split_rd && type == META_POP_IRD) ||
(v > g_conf()->mds_bal_split_wr && type == META_POP_IWR);
- dout(20) << "hit_dir " << type << " pop is " << v << ", frag " << dir->get_frag()
+ dout(20) << type << " pop is " << v << ", frag " << dir->get_frag()
<< " size " << dir->get_frag_size() << " " << dir->pop_me << dendl;
maybe_fragment(dir, hot);
//if (dir->ino() == inodeno_t(0x10000000002))
if (pop_sp > 0) {
- dout(20) << "hit_dir " << type << " pop " << dir_pop << " spread " << pop_sp
+ dout(20) << type << " pop " << dir_pop << " spread " << pop_sp
<< " " << dir->pop_spread.last[0]
<< " " << dir->pop_spread.last[1]
<< " " << dir->pop_spread.last[2]
if (mds->mdcache->get_root()) {
mds->mdcache->get_root()->get_dirfrags(dfs);
} else {
- dout(5) << "dump_load no root" << dendl;
+ dout(10) << "no root" << dendl;
}
f->open_object_section("loads");
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
-#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".migrator "
+#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".mig " << __func__ << " "
class MigratorContext : public MDSContext {
void Migrator::export_empty_import(CDir *dir)
{
- dout(7) << "export_empty_import " << *dir << dendl;
+ dout(7) << *dir << dendl;
ceph_assert(dir->is_subtree_root());
if (dir->inode->is_auth()) {
void Migrator::export_try_cancel(CDir *dir, bool notify_peer)
{
- dout(10) << "export_try_cancel " << *dir << dendl;
+ dout(10) << *dir << dendl;
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
ceph_assert(it != export_state.end());
void Migrator::handle_mds_failure_or_stop(mds_rank_t who)
{
- dout(5) << "handle_mds_failure_or_stop mds." << who << dendl;
+ dout(5) << who << dendl;
// check my exports
void Migrator::show_importing()
{
- dout(10) << "show_importing" << dendl;
+ dout(10) << dendl;
for (map<dirfrag_t,import_state_t>::iterator p = import_state.begin();
p != import_state.end();
++p) {
void Migrator::show_exporting()
{
- dout(10) << "show_exporting" << dendl;
- for (map<CDir*,export_state_t>::iterator p = export_state.begin();
- p != export_state.end();
- ++p)
- dout(10) << " exporting to " << p->second.peer
- << ": (" << p->second.state << ") " << get_export_statename(p->second.state)
- << " " << p->first->dirfrag() << " " << *p->first << dendl;
+ dout(10) << dendl;
+ for (const auto& [dir, state] : export_state) {
+ dout(10) << " exporting to " << state.peer
+ << ": (" << state.state << ") " << get_export_statename(state.state)
+ << " " << dir->dirfrag() << " " << *dir << dendl;
+ }
}
void Migrator::export_dir_nicely(CDir *dir, mds_rank_t dest)
{
// enqueue
- dout(7) << "export_dir_nicely " << *dir << " to " << dest << dendl;
+ dout(7) << *dir << " to " << dest << dendl;
export_queue.push_back(pair<dirfrag_t,mds_rank_t>(dir->dirfrag(), dest));
maybe_do_queued_export();
if (!dir) continue;
if (!dir->is_auth()) continue;
- dout(0) << "nicely exporting to mds." << dest << " " << *dir << dendl;
+ dout(7) << "nicely exporting to mds." << dest << " " << *dir << dendl;
export_dir(dir, dest);
}
*/
void Migrator::export_dir(CDir *dir, mds_rank_t dest)
{
- dout(7) << "export_dir " << *dir << " to " << dest << dendl;
ceph_assert(dir->is_auth());
ceph_assert(dest != mds->get_nodeid());
+ CDir* parent = dir->inode->get_projected_parent_dir();
if (!mds->is_stopping() && !dir->inode->is_exportable(dest)) {
- dout(25) << "dir is export pinned" << dendl;
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": dir is export pinned" << dendl;
return;
- }
-
- if (!(mds->is_active() || mds->is_stopping())) {
- dout(7) << "i'm not active, no exports for now" << dendl;
+ } else if (!(mds->is_active() || mds->is_stopping())) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": not active" << dendl;
return;
- }
- if (mds->mdcache->is_readonly()) {
- dout(7) << "read-only FS, no exports for now" << dendl;
+ } else if (mds->mdcache->is_readonly()) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": read-only FS, no exports for now" << dendl;
return;
- }
- if (!mds->mdsmap->is_active(dest)) {
- dout(7) << "dest not active, no exports for now" << dendl;
+ } else if (!mds->mdsmap->is_active(dest)) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": destination not active" << dendl;
return;
- }
- if (mds->is_cluster_degraded()) {
- dout(7) << "cluster degraded, no exports for now" << dendl;
+ } else if (mds->is_cluster_degraded()) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": cluster degraded" << dendl;
return;
- }
- if (dir->inode->is_system()) {
- dout(7) << "i won't export system dirs (root, mdsdirs, stray, /.ceph, etc.)" << dendl;
- //ceph_abort();
+ } else if (dir->inode->is_system()) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": is a system directory" << dendl;
return;
- }
-
- CDir* parent_dir = dir->inode->get_projected_parent_dir();
- if (parent_dir && parent_dir->inode->is_stray()) {
- if (parent_dir->get_parent_dir()->ino() != MDS_INO_MDSDIR(dest)) {
- dout(7) << "i won't export anything in stray" << dendl;
- return;
- }
- }
-
- if (dir->is_frozen() ||
- dir->is_freezing()) {
- dout(7) << " can't export, freezing|frozen. wait for other exports to finish first." << dendl;
+ } else if (dir->is_frozen() || dir->is_freezing()) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": is frozen" << dendl;
return;
- }
- if (dir->state_test(CDir::STATE_EXPORTING)) {
- dout(7) << "already exporting" << dendl;
+ } else if (dir->state_test(CDir::STATE_EXPORTING)) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": already exporting" << dendl;
+ return;
+ } else if (parent && parent->inode->is_stray()
+ && parent->get_parent_dir()->ino() != MDS_INO_MDSDIR(dest)) {
+ dout(7) << "Cannot export to mds." << dest << " " << *dir << ": in stray directory" << dendl;
return;
}
- if (g_conf()->mds_thrash_exports) {
+ if (unlikely(g_conf()->mds_thrash_exports)) {
// create random subtree bound (which will not be exported)
std::vector<CDir*> ls;
for (auto p = dir->begin(); p != dir->end(); ++p) {
ceph_assert(bd->is_auth());
dir->state_set(CDir::STATE_AUXSUBTREE);
mds->mdcache->adjust_subtree_auth(dir, mds->get_nodeid());
- dout(0) << "export_dir: create aux subtree " << *bd << " under " << *dir << dendl;
+ dout(7) << "create aux subtree " << *bd << " under " << *dir << dendl;
}
}
}
+ dout(4) << "Starting export to mds." << dest << " " << *dir << dendl;
+
mds->hit_export_target(dest, -1);
dir->auth_pin(this);
void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count)
{
CDir *dir = mdr->more()->export_dir;
- dout(7) << "dispatch_export_dir " << *mdr << " " << *dir << dendl;
+ dout(7) << *mdr << " " << *dir << dendl;
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
if (it == export_state.end() || it->second.tid != mdr->reqid.tid) {
if (!mds->is_export_target(dest)) {
dout(7) << "dest is not yet an export target" << dendl;
if (count > 3) {
- dout(5) << "dest has not been added as export target after three MDSMap epochs, canceling export" << dendl;
+ dout(7) << "dest has not been added as export target after three MDSMap epochs, canceling export" << dendl;
export_try_cancel(dir);
return;
}
mds_rank_t dest(m->get_source().num());
ceph_assert(dir);
- dout(7) << "export_discover_ack from " << m->get_source()
+ dout(7) << "from " << m->get_source()
<< " on " << *dir << dendl;
mds->hit_export_target(dest, -1);
void Migrator::export_sessions_flushed(CDir *dir, uint64_t tid)
{
- dout(7) << "export_sessions_flushed " << *dir << dendl;
+ dout(7) << *dir << dendl;
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
if (it == export_state.end() ||
{
ENCODE_START(1, 1, final_bl);
- dout(7) << __func__ << " started to encode dir " << *bound << dendl;
+ dout(7) << " started to encode dir " << *bound << dendl;
CDir *cur = bound;
bufferlist tracebl;
char start = '-';
void Migrator::export_frozen(CDir *dir, uint64_t tid)
{
- dout(7) << "export_frozen on " << *dir << dendl;
+ dout(7) << *dir << dendl;
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
if (it == export_state.end() || it->second.tid != tid) {
mds_rank_t dest(m->get_source().num());
ceph_assert(dir);
- dout(7) << "export_prep_ack " << *dir << dendl;
+ dout(7) << *dir << dendl;
mds->hit_export_target(dest, -1);
{
auto it = export_state.find(dir);
ceph_assert(it != export_state.end());
- dout(7) << "export_go " << *dir << " to " << it->second.peer << dendl;
+ dout(7) << *dir << " to " << it->second.peer << dendl;
// first sync log to flush out e.g. any cap imports
mds->mdlog->wait_for_safe(new C_M_ExportGo(this, dir, it->second.tid));
ceph_assert(it->second.state == EXPORT_WARNING);
mds_rank_t dest = it->second.peer;
- dout(7) << "export_go_synced " << *dir << " to " << dest << dendl;
+ dout(7) << *dir << " to " << dest << dendl;
cache->show_subtrees();
map<client_t,client_metadata_t>& exported_client_metadata_map)
{
ENCODE_START(1, 1, enc_state);
- dout(7) << __func__ << " " << *in << dendl;
+ dout(7) << *in << dendl;
ceph_assert(!in->is_replica(mds->get_nodeid()));
encode(in->inode.ino, enc_state);
map<client_t,client_metadata_t>& exported_client_metadata_map)
{
ENCODE_START(1, 1, bl);
- dout(20) << "encode_export_inode_caps " << *in << dendl;
+ dout(20) << *in << dendl;
// encode caps
map<client_t,Capability::Export> cap_map;
in->export_client_caps(cap_map);
void Migrator::finish_export_inode_caps(CInode *in, mds_rank_t peer,
map<client_t,Capability::Import>& peer_imported)
{
- dout(20) << "finish_export_inode_caps " << *in << dendl;
+ dout(20) << *in << dendl;
in->state_clear(CInode::STATE_EXPORTINGCAPS);
in->put(CInode::PIN_EXPORTINGCAPS);
// tell (all) clients about migrating caps..
for (const auto &p : in->get_client_caps()) {
const Capability *cap = &p.second;
- dout(7) << "finish_export_inode_caps telling client." << p.first
+ dout(7) << p.first
<< " exported caps on " << *in << dendl;
auto m = make_message<MClientCaps>(CEPH_CAP_OP_EXPORT, in->ino(), 0,
cap->get_cap_id(), cap->get_mseq(),
map<client_t,Capability::Import>& peer_imported,
MDSContext::vec& finished)
{
- dout(12) << "finish_export_inode " << *in << dendl;
+ dout(12) << *in << dendl;
// clean
if (in->is_dirty())
std::vector<CDir*> subdirs;
ENCODE_START(1, 1, exportbl);
- dout(7) << __func__ << " " << *dir << " " << dir->get_num_head_items() << " head items" << dendl;
+ dout(7) << *dir << " " << dir->get_num_head_items() << " head items" << dendl;
ceph_assert(dir->get_projected_version() == dir->get_version());
num_exported++;
// -- dentry
- dout(7) << __func__ << " exporting " << *dn << dendl;
+ dout(7) << " exporting " << *dn << dendl;
// dn name
encode(dn->get_name(), exportbl);
map<inodeno_t,map<client_t,Capability::Import> >& peer_imported,
MDSContext::vec& finished, int *num_dentries)
{
- dout(10) << "finish_export_dir " << *dir << dendl;
+ dout(10) << *dir << dendl;
// release open_by
dir->clear_replica_map();
ceph_assert(dir->is_frozen_tree_root()); // i'm exporting!
// yay!
- dout(7) << "handle_export_ack " << *dir << dendl;
+ dout(7) << *dir << dendl;
mds->hit_export_target(dest, -1);
void Migrator::export_notify_abort(CDir *dir, export_state_t& stat, set<CDir*>& bounds)
{
- dout(7) << "export_notify_abort " << *dir << dendl;
+ dout(7) << *dir << dendl;
ceph_assert(stat.state == EXPORT_CANCELLING);
*/
void Migrator::export_reverse(CDir *dir, export_state_t& stat)
{
- dout(7) << "export_reverse " << *dir << dendl;
+ dout(7) << *dir << dendl;
set<CInode*> to_eval;
*/
void Migrator::export_logged_finish(CDir *dir)
{
- dout(7) << "export_logged_finish " << *dir << dendl;
+ dout(7) << *dir << dendl;
export_state_t& stat = export_state[dir];
if (stat.state == EXPORT_WARNING &&
stat.warning_ack_waiting.erase(from)) {
// exporting. process warning.
- dout(7) << "handle_export_notify_ack from " << m->get_source()
+ dout(7) << "from " << m->get_source()
<< ": exporting, processing warning on " << *dir << dendl;
if (stat.warning_ack_waiting.empty())
export_go(dir); // start export.
} else if (stat.state == EXPORT_NOTIFYING &&
stat.notify_ack_waiting.erase(from)) {
// exporting. process notify.
- dout(7) << "handle_export_notify_ack from " << m->get_source()
+ dout(7) << "from " << m->get_source()
<< ": exporting, processing notify on " << *dir << dendl;
if (stat.notify_ack_waiting.empty())
export_finish(dir);
} else if (stat.state == EXPORT_CANCELLING &&
m->get_new_auth().second == CDIR_AUTH_UNKNOWN && // not warning ack
stat.notify_ack_waiting.erase(from)) {
- dout(7) << "handle_export_notify_ack from " << m->get_source()
+ dout(7) << "from " << m->get_source()
<< ": cancelling export, processing notify on " << *dir << dendl;
if (stat.notify_ack_waiting.empty()) {
export_cancel_finish(export_state_entry);
import_state_t& stat = import_state_entry->second;
if (stat.state == IMPORT_ABORTING) {
// reversing import
- dout(7) << "handle_export_notify_ack from " << m->get_source()
+ dout(7) << "from " << m->get_source()
<< ": aborting import on " << *dir << dendl;
ceph_assert(stat.bystanders.count(from));
stat.bystanders.erase(from);
void Migrator::export_finish(CDir *dir)
{
- dout(5) << "export_finish " << *dir << dendl;
+ dout(3) << *dir << dendl;
assert (g_conf()->mds_kill_export_at != 12);
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
// discard delayed expires
cache->discard_delayed_expire(dir);
- dout(7) << "export_finish unfreezing" << dendl;
+ dout(7) << "unfreezing" << dendl;
// unfreeze tree, with possible subtree merge.
// (we do this _after_ removing EXPORTBOUND pins, to allow merges)
mds_rank_t from = m->get_source_mds();
ceph_assert(from != mds->get_nodeid());
- dout(7) << "handle_export_discover on " << m->get_path() << dendl;
+ dout(7) << m->get_path() << dendl;
// note import state
dirfrag_t df = m->get_dirfrag();
C_MDS_ExportDiscoverFactory cf(this, m);
if (!mds->mdcache->is_open()) {
- dout(5) << " waiting for root" << dendl;
+ dout(10) << " waiting for root" << dendl;
mds->mdcache->wait_for_open(cf.build());
return;
}
&trace);
if (r > 0) return;
if (r < 0) {
- dout(7) << "handle_export_discover failed to discover or not dir " << m->get_path() << ", NAK" << dendl;
+ dout(7) << "failed to discover or not dir " << m->get_path() << ", NAK" << dendl;
ceph_abort(); // this shouldn't happen if the auth pins its path properly!!!!
}
}
// yay
- dout(7) << "handle_export_discover have " << df << " inode " << *in << dendl;
+ dout(7) << "have " << df << " inode " << *in << dendl;
p_state->state = IMPORT_DISCOVERED;
void Migrator::handle_export_cancel(const cref_t<MExportDirCancel> &m)
{
- dout(7) << "handle_export_cancel on " << m->get_dirfrag() << dendl;
+ dout(7) << "on " << m->get_dirfrag() << dendl;
dirfrag_t df = m->get_dirfrag();
map<dirfrag_t,import_state_t>::iterator it = import_state.find(df);
if (it == import_state.end()) {
ceph_assert(diri);
auto p = m->basedir.cbegin();
cache->decode_replica_dir(dir, p, diri, oldauth, finished);
- dout(7) << "handle_export_prep on " << *dir << " (first pass)" << dendl;
+ dout(7) << "on " << *dir << " (first pass)" << dendl;
} else {
if (it == import_state.end() ||
it->second.peer != oldauth ||
it->second.tid != m->get_tid()) {
- dout(7) << "handle_export_prep obsolete message, dropping" << dendl;
+ dout(7) << "obsolete message, dropping" << dendl;
return;
}
ceph_assert(it->second.state == IMPORT_PREPPING);
dir = cache->get_dirfrag(m->get_dirfrag());
ceph_assert(dir);
- dout(7) << "handle_export_prep on " << *dir << " (subsequent pass)" << dendl;
+ dout(7) << "on " << *dir << " (subsequent pass)" << dendl;
diri = dir->get_inode();
}
ceph_assert(dir->is_auth() == false);
ceph_assert(dir);
mds_rank_t oldauth = mds_rank_t(m->get_source().num());
- dout(7) << "handle_export_dir importing " << *dir << " from " << oldauth << dendl;
+ dout(7) << "importing " << *dir << " from " << oldauth << dendl;
ceph_assert(!dir->is_auth());
ceph_assert(dir->freeze_tree_state);
// adjust popularity
mds->balancer->add_import(dir);
- dout(7) << "handle_export_dir did " << *dir << dendl;
+ dout(7) << "did " << *dir << dendl;
// note state
it->second.state = IMPORT_LOGGINGSTART;
*/
void Migrator::import_reverse(CDir *dir)
{
- dout(7) << "import_reverse " << *dir << dendl;
+ dout(7) << *dir << dendl;
import_state_t& stat = import_state[dir->dirfrag()];
stat.state = IMPORT_ABORTING;
void Migrator::import_notify_finish(CDir *dir, set<CDir*>& bounds)
{
- dout(7) << "import_notify_finish " << *dir << dendl;
+ dout(7) << *dir << dendl;
import_state_t& stat = import_state[dir->dirfrag()];
for (set<mds_rank_t>::iterator p = stat.bystanders.begin();
void Migrator::import_notify_abort(CDir *dir, set<CDir*>& bounds)
{
- dout(7) << "import_notify_abort " << *dir << dendl;
+ dout(7) << *dir << dendl;
import_state_t& stat = import_state[dir->dirfrag()];
for (set<mds_rank_t>::iterator p = stat.bystanders.begin();
void Migrator::import_reverse_unfreeze(CDir *dir)
{
- dout(7) << "import_reverse_unfreeze " << *dir << dendl;
+ dout(7) << *dir << dendl;
ceph_assert(!dir->is_auth());
cache->discard_delayed_expire(dir);
dir->unfreeze_tree();
void Migrator::import_reverse_final(CDir *dir)
{
- dout(7) << "import_reverse_final " << *dir << dendl;
+ dout(7) << *dir << dendl;
// clean up
map<dirfrag_t, import_state_t>::iterator it = import_state.find(dir->dirfrag());
void Migrator::import_logged_start(dirfrag_t df, CDir *dir, mds_rank_t from,
map<client_t,pair<Session*,uint64_t> >& imported_session_map)
{
- dout(7) << "import_logged " << *dir << dendl;
+ dout(7) << *dir << dendl;
map<dirfrag_t, import_state_t>::iterator it = import_state.find(dir->dirfrag());
if (it == import_state.end() ||
{
CDir *dir = cache->get_dirfrag(m->get_dirfrag());
ceph_assert(dir);
- dout(7) << "handle_export_finish on " << *dir << (m->is_last() ? " last" : "") << dendl;
+ dout(7) << *dir << (m->is_last() ? " last" : "") << dendl;
map<dirfrag_t,import_state_t>::iterator it = import_state.find(m->get_dirfrag());
ceph_assert(it != import_state.end());
void Migrator::import_finish(CDir *dir, bool notify, bool last)
{
- dout(7) << "import_finish on " << *dir << dendl;
+ dout(7) << *dir << dendl;
map<dirfrag_t,import_state_t>::iterator it = import_state.find(dir->dirfrag());
ceph_assert(it != import_state.end());
CInode *in;
bool added = false;
DECODE_START(1, blp);
- dout(15) << __func__ << " on " << *dn << dendl;
+ dout(15) << " on " << *dn << dendl;
inodeno_t ino;
snapid_t last;
map<client_t,Capability::Import> &import_map)
{
for (auto& it : export_map) {
- dout(10) << "finish_import_inode_caps for client." << it.first << " on " << *in << dendl;
+ dout(10) << "for client." << it.first << " on " << *in << dendl;
auto p = session_map.find(it.first);
if (p == session_map.end()) {
CDir *dir = diri->get_or_open_dirfrag(mds->mdcache, df.frag);
ceph_assert(dir);
- dout(7) << __func__ << " " << *dir << dendl;
+ dout(7) << *dir << dendl;
if (!dir->freeze_tree_state) {
ceph_assert(dir->get_version() == 0);
if (dn->lock.get_state() != LOCK_SYNC)
mds->locker->try_eval(&dn->lock, NULL);
- dout(15) << __func__ << " got " << *dn << dendl;
+ dout(15) << " got " << *dn << dendl;
// points to...
char icode;
dir->inode->maybe_export_pin();
- dout(7) << __func__ << " done " << *dir << dendl;
+ dout(7) << " done " << *dir << dendl;
DECODE_FINISH(blp);
}
mds_authority_t new_auth = m->get_new_auth();
if (!dir) {
- dout(7) << "handle_export_notify " << old_auth << " -> " << new_auth
+ dout(7) << old_auth << " -> " << new_auth
<< " on missing dir " << m->get_dirfrag() << dendl;
} else if (dir->authority() != old_auth) {
- dout(7) << "handle_export_notify old_auth was " << dir->authority()
+ dout(7) << "old_auth was " << dir->authority()
<< " != " << old_auth << " -> " << new_auth
<< " on " << *dir << dendl;
} else {
- dout(7) << "handle_export_notify " << old_auth << " -> " << new_auth
+ dout(7) << old_auth << " -> " << new_auth
<< " on " << *dir << dendl;
// adjust auth
set<CDir*> have;
mds->send_message_mds(make_message<MExportDirNotifyAck>(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from);
} else {
// aborted. no ack.
- dout(7) << "handle_export_notify no ack requested" << dendl;
+ dout(7) << "no ack requested" << dendl;
}
}
void Migrator::export_caps(CInode *in)
{
mds_rank_t dest = in->authority().first;
- dout(7) << "export_caps to mds." << dest << " " << *in << dendl;
+ dout(7) << "to mds." << dest << " " << *in << dendl;
ceph_assert(in->is_any_caps());
ceph_assert(!in->is_auth());
if (in) {
ceph_assert(!in->is_auth());
- dout(10) << "handle_export_caps_ack " << *ack << " from "
+ dout(10) << *ack << " from "
<< ack->get_source() << " on " << *in << dendl;
map<client_t,Capability::Import> imported_caps;
if (!cap || cap->get_cap_id() != caps_ids.at(it.first))
continue;
- dout(7) << __func__ << " telling client." << it.first
+ dout(7) << " telling client." << it.first
<< " exported caps on " << *in << dendl;
auto m = make_message<MClientCaps>(CEPH_CAP_OP_EXPORT, in->ino(), 0,
cap->get_cap_id(), cap->get_mseq(),
if (!in)
return;
- dout(10) << "handle_gather_caps " << *m << " from " << m->get_source()
+ dout(10) << *m << " from " << m->get_source()
<< " on " << *in << dendl;
if (in->is_any_caps() &&
void Migrator::handle_export_caps(const cref_t<MExportCaps> &ex)
{
- dout(10) << "handle_export_caps " << *ex << " from " << ex->get_source() << dendl;
+ dout(10) << *ex << " from " << ex->get_source() << dendl;
CInode *in = cache->get_inode(ex->ino);
ceph_assert(in);
map<client_t,pair<Session*,uint64_t> >& imported_session_map,
map<CInode*, map<client_t,Capability::Export> >& peer_exports)
{
- dout(10) << "logged_import_caps on " << *in << dendl;
+ dout(10) << *in << dendl;
// see export_go() vs export_go_synced()
ceph_assert(in->is_auth());