From: Patrick Donnelly Date: Fri, 21 Feb 2020 19:06:09 +0000 (-0800) Subject: mds: relevel debug message levels for balancer/migrator X-Git-Tag: v15.1.1~265^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=d9d180d1c895866d062beb40da894071ec2ede7d;p=ceph.git mds: relevel debug message levels for balancer/migrator Some of these messages are too verbose or inconsistent with other messages. Also, clean up function name printing and be consistent for all functions. Signed-off-by: Patrick Donnelly --- diff --git a/src/mds/MDBalancer.cc b/src/mds/MDBalancer.cc index 9e533c0cf4b8..a1b8508f54e3 100644 --- a/src/mds/MDBalancer.cc +++ b/src/mds/MDBalancer.cc @@ -41,7 +41,7 @@ using std::chrono::duration_cast; #define dout_context g_ceph_context #undef dout_prefix -#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".bal " +#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".bal " << __func__ << " " #undef dout #define dout(lvl) \ do {\ @@ -259,7 +259,7 @@ mds_load_t MDBalancer::get_load() load.all.add(d->pop_nested); } } else { - dout(20) << "get_load no root, no load" << dendl; + dout(20) << "no root, no load" << dendl; } uint64_t num_requests = mds->get_num_requests(); @@ -311,7 +311,7 @@ mds_load_t MDBalancer::get_load() last_get_load = now; } - dout(15) << "get_load " << load << dendl; + dout(15) << load << dendl; return load; } @@ -351,7 +351,7 @@ int MDBalancer::localize_balancer() } bal_code.assign(lua_src.to_str()); bal_version.assign(oid.name); - dout(10) << "localized balancer, bal_code=" << bal_code << dendl; + dout(10) "bal_code=" << bal_code << dendl; } return r; } @@ -359,12 +359,12 @@ int MDBalancer::localize_balancer() void MDBalancer::send_heartbeat() { if (mds->is_cluster_degraded()) { - dout(10) << "send_heartbeat degraded" << dendl; + dout(10) << "degraded" << dendl; return; } if (!mds->mdcache->is_open()) { - dout(5) << "not open" << dendl; + dout(10) << "not open" << dendl; mds->mdcache->wait_for_open(new C_Bal_SendHeartbeat(mds)); return; } @@ -395,11 +395,9 @@ void MDBalancer::send_heartbeat() mds_import_map[ mds->get_nodeid() ] = import_map; - dout(5) << "mds." << mds->get_nodeid() << " epoch " << beat_epoch << " load " << load << dendl; - for (map::iterator it = import_map.begin(); - it != import_map.end(); - ++it) { - dout(5) << " import_map from " << it->first << " -> " << it->second << dendl; + dout(3) << " epoch " << beat_epoch << " load " << load << dendl; + for (const auto& [rank, load] : import_map) { + dout(5) << " import_map from " << rank << " -> " << load << dendl; } @@ -613,12 +611,12 @@ void MDBalancer::queue_merge(CDir *dir) }; if (merge_pending.count(frag) == 0) { - dout(20) << __func__ << " enqueued dir " << *dir << dendl; + dout(20) << " enqueued dir " << *dir << dendl; merge_pending.insert(frag); mds->timer.add_event_after(bal_fragment_interval, new LambdaContext(std::move(callback))); } else { - dout(20) << __func__ << " dir already in queue " << *dir << dendl; + dout(20) << " dir already in queue " << *dir << dendl; } } @@ -638,7 +636,7 @@ void MDBalancer::prep_rebalance(int beat) mds_rank_t whoami = mds->get_nodeid(); rebalance_time = clock::now(); - dout(5) << " prep_rebalance: cluster loads are" << dendl; + dout(7) << "cluster loads are" << dendl; mds->mdcache->migrator->clear_export_queue(); @@ -666,7 +664,7 @@ void MDBalancer::prep_rebalance(int beat) mds_meta_load[i] = l; if (whoami == 0) - dout(5) << " mds." << i + dout(7) << " mds." << i << " " << load << " = " << load.mds_load() << " ~ " << l << dendl; @@ -679,31 +677,31 @@ void MDBalancer::prep_rebalance(int beat) // target load target_load = total_load / (double)cluster_size; - dout(5) << "prep_rebalance: my load " << my_load + dout(7) << "my load " << my_load << " target " << target_load << " total " << total_load << dendl; // under or over? - for (auto p : load_map) { - if (p.first < target_load * (1.0 + g_conf()->mds_bal_min_rebalance)) { - dout(5) << " mds." << p.second << " is underloaded or barely overloaded." << dendl; - mds_last_epoch_under_map[p.second] = beat_epoch; + for (const auto& [load, rank] : load_map) { + if (load < target_load * (1.0 + g_conf()->mds_bal_min_rebalance)) { + dout(7) << " mds." << rank << " is underloaded or barely overloaded." << dendl; + mds_last_epoch_under_map[rank] = beat_epoch; } } int last_epoch_under = mds_last_epoch_under_map[whoami]; if (last_epoch_under == beat_epoch) { - dout(5) << " i am underloaded or barely overloaded, doing nothing." << dendl; + dout(7) << " i am underloaded or barely overloaded, doing nothing." << dendl; return; } // am i over long enough? if (last_epoch_under && beat_epoch - last_epoch_under < 2) { - dout(5) << " i am overloaded, but only for " << (beat_epoch - last_epoch_under) << " epochs" << dendl; + dout(7) << " i am overloaded, but only for " << (beat_epoch - last_epoch_under) << " epochs" << dendl; return; } - dout(5) << " i am sufficiently overloaded" << dendl; + dout(7) << " i am sufficiently overloaded" << dendl; // first separate exporters and importers @@ -825,7 +823,7 @@ int MDBalancer::mantle_prep_rebalance() /* execute the balancer */ Mantle mantle; int ret = mantle.balance(bal_code, mds->get_nodeid(), metrics, state.targets); - dout(5) << " mantle decided that new targets=" << state.targets << dendl; + dout(7) << " mantle decided that new targets=" << state.targets << dendl; /* mantle doesn't know about cluster size, so check target len here */ if ((int) state.targets.size() != cluster_size) @@ -900,12 +898,12 @@ void MDBalancer::try_rebalance(balance_state_t& state) // search imports from target if (import_from_map.count(target)) { - dout(5) << " aha, looking through imports from target mds." << target << dendl; + dout(7) << " aha, looking through imports from target mds." << target << dendl; for (auto p = import_from_map.equal_range(target); p.first != p.second; ) { CDir *dir = p.first->second.first; double pop = p.first->second.second; - dout(5) << "considering " << *dir << " from " << (*p.first).first << dendl; + dout(7) << "considering " << *dir << " from " << (*p.first).first << dendl; auto plast = p.first++; if (dir->inode->is_base()) @@ -913,7 +911,7 @@ void MDBalancer::try_rebalance(balance_state_t& state) ceph_assert(dir->inode->authority().first == target); // cuz that's how i put it in the map, dummy if (pop <= amount-have) { - dout(5) << "reexporting " << *dir << " pop " << pop + dout(7) << "reexporting " << *dir << " pop " << pop << " back to mds." << target << dendl; mds->mdcache->migrator->export_dir_nicely(dir, target); have += pop; @@ -927,7 +925,7 @@ void MDBalancer::try_rebalance(balance_state_t& state) q.first++; } } else { - dout(5) << "can't reexport " << *dir << ", too big " << pop << dendl; + dout(7) << "can't reexport " << *dir << ", too big " << pop << dendl; } if (amount-have < MIN_OFFLOAD) break; @@ -956,7 +954,7 @@ void MDBalancer::try_rebalance(balance_state_t& state) double pop = p->first; if (pop <= amount-have && pop > MIN_REEXPORT) { - dout(0) << "reexporting " << *dir << " pop " << pop + dout(5) << "reexporting " << *dir << " pop " << pop << " to mds." << target << dendl; have += pop; mds->mdcache->migrator->export_dir_nicely(dir, target); @@ -1002,7 +1000,7 @@ void MDBalancer::try_rebalance(balance_state_t& state) } } - dout(5) << "rebalance done" << dendl; + dout(7) << "done" << dendl; mds->mdcache->show_subtrees(); } @@ -1035,7 +1033,7 @@ void MDBalancer::find_exports(CDir *dir, multimap smaller; double dir_pop = dir->pop_auth_subtree.meta_load(); - dout(7) << " find_exports in " << dir_pop << " " << *dir << " need " << need << " (" << needmin << " - " << needmax << ")" << dendl; + dout(7) << "in " << dir_pop << " " << *dir << " need " << need << " (" << needmin << " - " << needmax << ")" << dendl; double subdir_sum = 0; for (elist::iterator it = dir->pop_lru_subdirs.begin_use_current(); @@ -1163,7 +1161,7 @@ void MDBalancer::maybe_fragment(CDir *dir, bool hot) if (dir->should_split_fast()) { queue_split(dir, true); } else { - dout(10) << __func__ << ": fragment already enqueued to split: " + dout(10) << ": fragment already enqueued to split: " << *dir << dendl; } } @@ -1185,7 +1183,7 @@ void MDBalancer::hit_dir(CDir *dir, int type, int who, double amount) const bool hot = (v > g_conf()->mds_bal_split_rd && type == META_POP_IRD) || (v > g_conf()->mds_bal_split_wr && type == META_POP_IWR); - dout(20) << "hit_dir " << type << " pop is " << v << ", frag " << dir->get_frag() + dout(20) << type << " pop is " << v << ", frag " << dir->get_frag() << " size " << dir->get_frag_size() << " " << dir->pop_me << dendl; maybe_fragment(dir, hot); @@ -1205,7 +1203,7 @@ void MDBalancer::hit_dir(CDir *dir, int type, int who, double amount) //if (dir->ino() == inodeno_t(0x10000000002)) if (pop_sp > 0) { - dout(20) << "hit_dir " << type << " pop " << dir_pop << " spread " << pop_sp + dout(20) << type << " pop " << dir_pop << " spread " << pop_sp << " " << dir->pop_spread.last[0] << " " << dir->pop_spread.last[1] << " " << dir->pop_spread.last[2] @@ -1356,7 +1354,7 @@ int MDBalancer::dump_loads(Formatter *f) const if (mds->mdcache->get_root()) { mds->mdcache->get_root()->get_dirfrags(dfs); } else { - dout(5) << "dump_load no root" << dendl; + dout(10) << "no root" << dendl; } f->open_object_section("loads"); diff --git a/src/mds/Migrator.cc b/src/mds/Migrator.cc index 4de3f87d1118..874e56416aa6 100644 --- a/src/mds/Migrator.cc +++ b/src/mds/Migrator.cc @@ -64,7 +64,7 @@ #define dout_context g_ceph_context #define dout_subsys ceph_subsys_mds #undef dout_prefix -#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".migrator " +#define dout_prefix *_dout << "mds." << mds->get_nodeid() << ".mig " << __func__ << " " class MigratorContext : public MDSContext { @@ -169,7 +169,7 @@ public: void Migrator::export_empty_import(CDir *dir) { - dout(7) << "export_empty_import " << *dir << dendl; + dout(7) << *dir << dendl; ceph_assert(dir->is_subtree_root()); if (dir->inode->is_auth()) { @@ -258,7 +258,7 @@ void Migrator::find_stale_export_freeze() void Migrator::export_try_cancel(CDir *dir, bool notify_peer) { - dout(10) << "export_try_cancel " << *dir << dendl; + dout(10) << *dir << dendl; map::iterator it = export_state.find(dir); ceph_assert(it != export_state.end()); @@ -410,7 +410,7 @@ void Migrator::export_cancel_finish(export_state_iterator& it) void Migrator::handle_mds_failure_or_stop(mds_rank_t who) { - dout(5) << "handle_mds_failure_or_stop mds." << who << dendl; + dout(5) << who << dendl; // check my exports @@ -595,7 +595,7 @@ void Migrator::handle_mds_failure_or_stop(mds_rank_t who) void Migrator::show_importing() { - dout(10) << "show_importing" << dendl; + dout(10) << dendl; for (map::iterator p = import_state.begin(); p != import_state.end(); ++p) { @@ -614,13 +614,12 @@ void Migrator::show_importing() void Migrator::show_exporting() { - dout(10) << "show_exporting" << dendl; - for (map::iterator p = export_state.begin(); - p != export_state.end(); - ++p) - dout(10) << " exporting to " << p->second.peer - << ": (" << p->second.state << ") " << get_export_statename(p->second.state) - << " " << p->first->dirfrag() << " " << *p->first << dendl; + dout(10) << dendl; + for (const auto& [dir, state] : export_state) { + dout(10) << " exporting to " << state.peer + << ": (" << state.state << ") " << get_export_statename(state.state) + << " " << dir->dirfrag() << " " << *dir << dendl; + } } @@ -687,7 +686,7 @@ void Migrator::audit() void Migrator::export_dir_nicely(CDir *dir, mds_rank_t dest) { // enqueue - dout(7) << "export_dir_nicely " << *dir << " to " << dest << dendl; + dout(7) << *dir << " to " << dest << dendl; export_queue.push_back(pair(dir->dirfrag(), dest)); maybe_do_queued_export(); @@ -715,7 +714,7 @@ void Migrator::maybe_do_queued_export() if (!dir) continue; if (!dir->is_auth()) continue; - dout(0) << "nicely exporting to mds." << dest << " " << *dir << dendl; + dout(7) << "nicely exporting to mds." << dest << " " << *dir << dendl; export_dir(dir, dest); } @@ -787,56 +786,41 @@ bool Migrator::export_try_grab_locks(CDir *dir, MutationRef& mut) */ void Migrator::export_dir(CDir *dir, mds_rank_t dest) { - dout(7) << "export_dir " << *dir << " to " << dest << dendl; ceph_assert(dir->is_auth()); ceph_assert(dest != mds->get_nodeid()); + CDir* parent = dir->inode->get_projected_parent_dir(); if (!mds->is_stopping() && !dir->inode->is_exportable(dest)) { - dout(25) << "dir is export pinned" << dendl; + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": dir is export pinned" << dendl; return; - } - - if (!(mds->is_active() || mds->is_stopping())) { - dout(7) << "i'm not active, no exports for now" << dendl; + } else if (!(mds->is_active() || mds->is_stopping())) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": not active" << dendl; return; - } - if (mds->mdcache->is_readonly()) { - dout(7) << "read-only FS, no exports for now" << dendl; + } else if (mds->mdcache->is_readonly()) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": read-only FS, no exports for now" << dendl; return; - } - if (!mds->mdsmap->is_active(dest)) { - dout(7) << "dest not active, no exports for now" << dendl; + } else if (!mds->mdsmap->is_active(dest)) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": destination not active" << dendl; return; - } - if (mds->is_cluster_degraded()) { - dout(7) << "cluster degraded, no exports for now" << dendl; + } else if (mds->is_cluster_degraded()) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": cluster degraded" << dendl; return; - } - if (dir->inode->is_system()) { - dout(7) << "i won't export system dirs (root, mdsdirs, stray, /.ceph, etc.)" << dendl; - //ceph_abort(); + } else if (dir->inode->is_system()) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": is a system directory" << dendl; return; - } - - CDir* parent_dir = dir->inode->get_projected_parent_dir(); - if (parent_dir && parent_dir->inode->is_stray()) { - if (parent_dir->get_parent_dir()->ino() != MDS_INO_MDSDIR(dest)) { - dout(7) << "i won't export anything in stray" << dendl; - return; - } - } - - if (dir->is_frozen() || - dir->is_freezing()) { - dout(7) << " can't export, freezing|frozen. wait for other exports to finish first." << dendl; + } else if (dir->is_frozen() || dir->is_freezing()) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": is frozen" << dendl; return; - } - if (dir->state_test(CDir::STATE_EXPORTING)) { - dout(7) << "already exporting" << dendl; + } else if (dir->state_test(CDir::STATE_EXPORTING)) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": already exporting" << dendl; + return; + } else if (parent && parent->inode->is_stray() + && parent->get_parent_dir()->ino() != MDS_INO_MDSDIR(dest)) { + dout(7) << "Cannot export to mds." << dest << " " << *dir << ": in stray directory" << dendl; return; } - if (g_conf()->mds_thrash_exports) { + if (unlikely(g_conf()->mds_thrash_exports)) { // create random subtree bound (which will not be exported) std::vector ls; for (auto p = dir->begin(); p != dir->end(); ++p) { @@ -859,11 +843,13 @@ void Migrator::export_dir(CDir *dir, mds_rank_t dest) ceph_assert(bd->is_auth()); dir->state_set(CDir::STATE_AUXSUBTREE); mds->mdcache->adjust_subtree_auth(dir, mds->get_nodeid()); - dout(0) << "export_dir: create aux subtree " << *bd << " under " << *dir << dendl; + dout(7) << "create aux subtree " << *bd << " under " << *dir << dendl; } } } + dout(4) << "Starting export to mds." << dest << " " << *dir << dendl; + mds->hit_export_target(dest, -1); dir->auth_pin(this); @@ -1034,7 +1020,7 @@ public: void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) { CDir *dir = mdr->more()->export_dir; - dout(7) << "dispatch_export_dir " << *mdr << " " << *dir << dendl; + dout(7) << *mdr << " " << *dir << dendl; map::iterator it = export_state.find(dir); if (it == export_state.end() || it->second.tid != mdr->reqid.tid) { @@ -1059,7 +1045,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) if (!mds->is_export_target(dest)) { dout(7) << "dest is not yet an export target" << dendl; if (count > 3) { - dout(5) << "dest has not been added as export target after three MDSMap epochs, canceling export" << dendl; + dout(7) << "dest has not been added as export target after three MDSMap epochs, canceling export" << dendl; export_try_cancel(dir); return; } @@ -1226,7 +1212,7 @@ void Migrator::handle_export_discover_ack(const cref_t &m mds_rank_t dest(m->get_source().num()); ceph_assert(dir); - dout(7) << "export_discover_ack from " << m->get_source() + dout(7) << "from " << m->get_source() << " on " << *dir << dendl; mds->hit_export_target(dest, -1); @@ -1273,7 +1259,7 @@ public: void Migrator::export_sessions_flushed(CDir *dir, uint64_t tid) { - dout(7) << "export_sessions_flushed " << *dir << dendl; + dout(7) << *dir << dendl; map::iterator it = export_state.find(dir); if (it == export_state.end() || @@ -1298,7 +1284,7 @@ void Migrator::encode_export_prep_trace(bufferlist &final_bl, CDir *bound, { ENCODE_START(1, 1, final_bl); - dout(7) << __func__ << " started to encode dir " << *bound << dendl; + dout(7) << " started to encode dir " << *bound << dendl; CDir *cur = bound; bufferlist tracebl; char start = '-'; @@ -1344,7 +1330,7 @@ void Migrator::encode_export_prep_trace(bufferlist &final_bl, CDir *bound, void Migrator::export_frozen(CDir *dir, uint64_t tid) { - dout(7) << "export_frozen on " << *dir << dendl; + dout(7) << *dir << dendl; map::iterator it = export_state.find(dir); if (it == export_state.end() || it->second.tid != tid) { @@ -1481,7 +1467,7 @@ void Migrator::handle_export_prep_ack(const cref_t &m) mds_rank_t dest(m->get_source().num()); ceph_assert(dir); - dout(7) << "export_prep_ack " << *dir << dendl; + dout(7) << *dir << dendl; mds->hit_export_target(dest, -1); @@ -1556,7 +1542,7 @@ void Migrator::export_go(CDir *dir) { auto it = export_state.find(dir); ceph_assert(it != export_state.end()); - dout(7) << "export_go " << *dir << " to " << it->second.peer << dendl; + dout(7) << *dir << " to " << it->second.peer << dendl; // first sync log to flush out e.g. any cap imports mds->mdlog->wait_for_safe(new C_M_ExportGo(this, dir, it->second.tid)); @@ -1576,7 +1562,7 @@ void Migrator::export_go_synced(CDir *dir, uint64_t tid) ceph_assert(it->second.state == EXPORT_WARNING); mds_rank_t dest = it->second.peer; - dout(7) << "export_go_synced " << *dir << " to " << dest << dendl; + dout(7) << *dir << " to " << dest << dendl; cache->show_subtrees(); @@ -1637,7 +1623,7 @@ void Migrator::encode_export_inode(CInode *in, bufferlist& enc_state, map& exported_client_metadata_map) { ENCODE_START(1, 1, enc_state); - dout(7) << __func__ << " " << *in << dendl; + dout(7) << *in << dendl; ceph_assert(!in->is_replica(mds->get_nodeid())); encode(in->inode.ino, enc_state); @@ -1654,7 +1640,7 @@ void Migrator::encode_export_inode_caps(CInode *in, bool auth_cap, bufferlist& b map& exported_client_metadata_map) { ENCODE_START(1, 1, bl); - dout(20) << "encode_export_inode_caps " << *in << dendl; + dout(20) << *in << dendl; // encode caps map cap_map; in->export_client_caps(cap_map); @@ -1680,7 +1666,7 @@ void Migrator::encode_export_inode_caps(CInode *in, bool auth_cap, bufferlist& b void Migrator::finish_export_inode_caps(CInode *in, mds_rank_t peer, map& peer_imported) { - dout(20) << "finish_export_inode_caps " << *in << dendl; + dout(20) << *in << dendl; in->state_clear(CInode::STATE_EXPORTINGCAPS); in->put(CInode::PIN_EXPORTINGCAPS); @@ -1688,7 +1674,7 @@ void Migrator::finish_export_inode_caps(CInode *in, mds_rank_t peer, // tell (all) clients about migrating caps.. for (const auto &p : in->get_client_caps()) { const Capability *cap = &p.second; - dout(7) << "finish_export_inode_caps telling client." << p.first + dout(7) << p.first << " exported caps on " << *in << dendl; auto m = make_message(CEPH_CAP_OP_EXPORT, in->ino(), 0, cap->get_cap_id(), cap->get_mseq(), @@ -1707,7 +1693,7 @@ void Migrator::finish_export_inode(CInode *in, mds_rank_t peer, map& peer_imported, MDSContext::vec& finished) { - dout(12) << "finish_export_inode " << *in << dendl; + dout(12) << *in << dendl; // clean if (in->is_dirty()) @@ -1760,7 +1746,7 @@ void Migrator::encode_export_dir(bufferlist& exportbl, std::vector subdirs; ENCODE_START(1, 1, exportbl); - dout(7) << __func__ << " " << *dir << " " << dir->get_num_head_items() << " head items" << dendl; + dout(7) << *dir << " " << dir->get_num_head_items() << " head items" << dendl; ceph_assert(dir->get_projected_version() == dir->get_version()); @@ -1785,7 +1771,7 @@ void Migrator::encode_export_dir(bufferlist& exportbl, num_exported++; // -- dentry - dout(7) << __func__ << " exporting " << *dn << dendl; + dout(7) << " exporting " << *dn << dendl; // dn name encode(dn->get_name(), exportbl); @@ -1841,7 +1827,7 @@ void Migrator::finish_export_dir(CDir *dir, mds_rank_t peer, map >& peer_imported, MDSContext::vec& finished, int *num_dentries) { - dout(10) << "finish_export_dir " << *dir << dendl; + dout(10) << *dir << dendl; // release open_by dir->clear_replica_map(); @@ -1910,7 +1896,7 @@ void Migrator::handle_export_ack(const cref_t &m) ceph_assert(dir->is_frozen_tree_root()); // i'm exporting! // yay! - dout(7) << "handle_export_ack " << *dir << dendl; + dout(7) << *dir << dendl; mds->hit_export_target(dest, -1); @@ -1955,7 +1941,7 @@ void Migrator::handle_export_ack(const cref_t &m) void Migrator::export_notify_abort(CDir *dir, export_state_t& stat, set& bounds) { - dout(7) << "export_notify_abort " << *dir << dendl; + dout(7) << *dir << dendl; ceph_assert(stat.state == EXPORT_CANCELLING); @@ -1985,7 +1971,7 @@ void Migrator::export_notify_abort(CDir *dir, export_state_t& stat, set& */ void Migrator::export_reverse(CDir *dir, export_state_t& stat) { - dout(7) << "export_reverse " << *dir << dendl; + dout(7) << *dir << dendl; set to_eval; @@ -2063,7 +2049,7 @@ void Migrator::export_reverse(CDir *dir, export_state_t& stat) */ void Migrator::export_logged_finish(CDir *dir) { - dout(7) << "export_logged_finish " << *dir << dendl; + dout(7) << *dir << dendl; export_state_t& stat = export_state[dir]; @@ -2125,21 +2111,21 @@ void Migrator::handle_export_notify_ack(const cref_t &m) if (stat.state == EXPORT_WARNING && stat.warning_ack_waiting.erase(from)) { // exporting. process warning. - dout(7) << "handle_export_notify_ack from " << m->get_source() + dout(7) << "from " << m->get_source() << ": exporting, processing warning on " << *dir << dendl; if (stat.warning_ack_waiting.empty()) export_go(dir); // start export. } else if (stat.state == EXPORT_NOTIFYING && stat.notify_ack_waiting.erase(from)) { // exporting. process notify. - dout(7) << "handle_export_notify_ack from " << m->get_source() + dout(7) << "from " << m->get_source() << ": exporting, processing notify on " << *dir << dendl; if (stat.notify_ack_waiting.empty()) export_finish(dir); } else if (stat.state == EXPORT_CANCELLING && m->get_new_auth().second == CDIR_AUTH_UNKNOWN && // not warning ack stat.notify_ack_waiting.erase(from)) { - dout(7) << "handle_export_notify_ack from " << m->get_source() + dout(7) << "from " << m->get_source() << ": cancelling export, processing notify on " << *dir << dendl; if (stat.notify_ack_waiting.empty()) { export_cancel_finish(export_state_entry); @@ -2152,7 +2138,7 @@ void Migrator::handle_export_notify_ack(const cref_t &m) import_state_t& stat = import_state_entry->second; if (stat.state == IMPORT_ABORTING) { // reversing import - dout(7) << "handle_export_notify_ack from " << m->get_source() + dout(7) << "from " << m->get_source() << ": aborting import on " << *dir << dendl; ceph_assert(stat.bystanders.count(from)); stat.bystanders.erase(from); @@ -2165,7 +2151,7 @@ void Migrator::handle_export_notify_ack(const cref_t &m) void Migrator::export_finish(CDir *dir) { - dout(5) << "export_finish " << *dir << dendl; + dout(3) << *dir << dendl; assert (g_conf()->mds_kill_export_at != 12); map::iterator it = export_state.find(dir); @@ -2209,7 +2195,7 @@ void Migrator::export_finish(CDir *dir) // discard delayed expires cache->discard_delayed_expire(dir); - dout(7) << "export_finish unfreezing" << dendl; + dout(7) << "unfreezing" << dendl; // unfreeze tree, with possible subtree merge. // (we do this _after_ removing EXPORTBOUND pins, to allow merges) @@ -2287,7 +2273,7 @@ void Migrator::handle_export_discover(const cref_t &m, bool mds_rank_t from = m->get_source_mds(); ceph_assert(from != mds->get_nodeid()); - dout(7) << "handle_export_discover on " << m->get_path() << dendl; + dout(7) << m->get_path() << dendl; // note import state dirfrag_t df = m->get_dirfrag(); @@ -2321,7 +2307,7 @@ void Migrator::handle_export_discover(const cref_t &m, bool C_MDS_ExportDiscoverFactory cf(this, m); if (!mds->mdcache->is_open()) { - dout(5) << " waiting for root" << dendl; + dout(10) << " waiting for root" << dendl; mds->mdcache->wait_for_open(cf.build()); return; } @@ -2340,7 +2326,7 @@ void Migrator::handle_export_discover(const cref_t &m, bool &trace); if (r > 0) return; if (r < 0) { - dout(7) << "handle_export_discover failed to discover or not dir " << m->get_path() << ", NAK" << dendl; + dout(7) << "failed to discover or not dir " << m->get_path() << ", NAK" << dendl; ceph_abort(); // this shouldn't happen if the auth pins its path properly!!!! } @@ -2348,7 +2334,7 @@ void Migrator::handle_export_discover(const cref_t &m, bool } // yay - dout(7) << "handle_export_discover have " << df << " inode " << *in << dendl; + dout(7) << "have " << df << " inode " << *in << dendl; p_state->state = IMPORT_DISCOVERED; @@ -2384,7 +2370,7 @@ void Migrator::import_reverse_prepping(CDir *dir, import_state_t& stat) void Migrator::handle_export_cancel(const cref_t &m) { - dout(7) << "handle_export_cancel on " << m->get_dirfrag() << dendl; + dout(7) << "on " << m->get_dirfrag() << dendl; dirfrag_t df = m->get_dirfrag(); map::iterator it = import_state.find(df); if (it == import_state.end()) { @@ -2494,12 +2480,12 @@ void Migrator::handle_export_prep(const cref_t &m, bool did_assi ceph_assert(diri); auto p = m->basedir.cbegin(); cache->decode_replica_dir(dir, p, diri, oldauth, finished); - dout(7) << "handle_export_prep on " << *dir << " (first pass)" << dendl; + dout(7) << "on " << *dir << " (first pass)" << dendl; } else { if (it == import_state.end() || it->second.peer != oldauth || it->second.tid != m->get_tid()) { - dout(7) << "handle_export_prep obsolete message, dropping" << dendl; + dout(7) << "obsolete message, dropping" << dendl; return; } ceph_assert(it->second.state == IMPORT_PREPPING); @@ -2507,7 +2493,7 @@ void Migrator::handle_export_prep(const cref_t &m, bool did_assi dir = cache->get_dirfrag(m->get_dirfrag()); ceph_assert(dir); - dout(7) << "handle_export_prep on " << *dir << " (subsequent pass)" << dendl; + dout(7) << "on " << *dir << " (subsequent pass)" << dendl; diri = dir->get_inode(); } ceph_assert(dir->is_auth() == false); @@ -2675,7 +2661,7 @@ void Migrator::handle_export_dir(const cref_t &m) ceph_assert(dir); mds_rank_t oldauth = mds_rank_t(m->get_source().num()); - dout(7) << "handle_export_dir importing " << *dir << " from " << oldauth << dendl; + dout(7) << "importing " << *dir << " from " << oldauth << dendl; ceph_assert(!dir->is_auth()); ceph_assert(dir->freeze_tree_state); @@ -2742,7 +2728,7 @@ void Migrator::handle_export_dir(const cref_t &m) // adjust popularity mds->balancer->add_import(dir); - dout(7) << "handle_export_dir did " << *dir << dendl; + dout(7) << "did " << *dir << dendl; // note state it->second.state = IMPORT_LOGGINGSTART; @@ -2819,7 +2805,7 @@ public: */ void Migrator::import_reverse(CDir *dir) { - dout(7) << "import_reverse " << *dir << dendl; + dout(7) << *dir << dendl; import_state_t& stat = import_state[dir->dirfrag()]; stat.state = IMPORT_ABORTING; @@ -2940,7 +2926,7 @@ void Migrator::import_reverse(CDir *dir) void Migrator::import_notify_finish(CDir *dir, set& bounds) { - dout(7) << "import_notify_finish " << *dir << dendl; + dout(7) << *dir << dendl; import_state_t& stat = import_state[dir->dirfrag()]; for (set::iterator p = stat.bystanders.begin(); @@ -2957,7 +2943,7 @@ void Migrator::import_notify_finish(CDir *dir, set& bounds) void Migrator::import_notify_abort(CDir *dir, set& bounds) { - dout(7) << "import_notify_abort " << *dir << dendl; + dout(7) << *dir << dendl; import_state_t& stat = import_state[dir->dirfrag()]; for (set::iterator p = stat.bystanders.begin(); @@ -2986,7 +2972,7 @@ void Migrator::import_notify_abort(CDir *dir, set& bounds) void Migrator::import_reverse_unfreeze(CDir *dir) { - dout(7) << "import_reverse_unfreeze " << *dir << dendl; + dout(7) << *dir << dendl; ceph_assert(!dir->is_auth()); cache->discard_delayed_expire(dir); dir->unfreeze_tree(); @@ -2997,7 +2983,7 @@ void Migrator::import_reverse_unfreeze(CDir *dir) void Migrator::import_reverse_final(CDir *dir) { - dout(7) << "import_reverse_final " << *dir << dendl; + dout(7) << *dir << dendl; // clean up map::iterator it = import_state.find(dir->dirfrag()); @@ -3024,7 +3010,7 @@ void Migrator::import_reverse_final(CDir *dir) void Migrator::import_logged_start(dirfrag_t df, CDir *dir, mds_rank_t from, map >& imported_session_map) { - dout(7) << "import_logged " << *dir << dendl; + dout(7) << *dir << dendl; map::iterator it = import_state.find(dir->dirfrag()); if (it == import_state.end() || @@ -3072,7 +3058,7 @@ void Migrator::handle_export_finish(const cref_t &m) { CDir *dir = cache->get_dirfrag(m->get_dirfrag()); ceph_assert(dir); - dout(7) << "handle_export_finish on " << *dir << (m->is_last() ? " last" : "") << dendl; + dout(7) << *dir << (m->is_last() ? " last" : "") << dendl; map::iterator it = import_state.find(m->get_dirfrag()); ceph_assert(it != import_state.end()); @@ -3083,7 +3069,7 @@ void Migrator::handle_export_finish(const cref_t &m) void Migrator::import_finish(CDir *dir, bool notify, bool last) { - dout(7) << "import_finish on " << *dir << dendl; + dout(7) << *dir << dendl; map::iterator it = import_state.find(dir->dirfrag()); ceph_assert(it != import_state.end()); @@ -3199,7 +3185,7 @@ void Migrator::decode_import_inode(CDentry *dn, bufferlist::const_iterator& blp, CInode *in; bool added = false; DECODE_START(1, blp); - dout(15) << __func__ << " on " << *dn << dendl; + dout(15) << " on " << *dn << dendl; inodeno_t ino; snapid_t last; @@ -3294,7 +3280,7 @@ void Migrator::finish_import_inode_caps(CInode *in, mds_rank_t peer, bool auth_c map &import_map) { for (auto& it : export_map) { - dout(10) << "finish_import_inode_caps for client." << it.first << " on " << *in << dendl; + dout(10) << "for client." << it.first << " on " << *in << dendl; auto p = session_map.find(it.first); if (p == session_map.end()) { @@ -3355,7 +3341,7 @@ void Migrator::decode_import_dir(bufferlist::const_iterator& blp, CDir *dir = diri->get_or_open_dirfrag(mds->mdcache, df.frag); ceph_assert(dir); - dout(7) << __func__ << " " << *dir << dendl; + dout(7) << *dir << dendl; if (!dir->freeze_tree_state) { ceph_assert(dir->get_version() == 0); @@ -3414,7 +3400,7 @@ void Migrator::decode_import_dir(bufferlist::const_iterator& blp, if (dn->lock.get_state() != LOCK_SYNC) mds->locker->try_eval(&dn->lock, NULL); - dout(15) << __func__ << " got " << *dn << dendl; + dout(15) << " got " << *dn << dendl; // points to... char icode; @@ -3457,7 +3443,7 @@ void Migrator::decode_import_dir(bufferlist::const_iterator& blp, dir->inode->maybe_export_pin(); - dout(7) << __func__ << " done " << *dir << dendl; + dout(7) << " done " << *dir << dendl; DECODE_FINISH(blp); } @@ -3480,14 +3466,14 @@ void Migrator::handle_export_notify(const cref_t &m) mds_authority_t new_auth = m->get_new_auth(); if (!dir) { - dout(7) << "handle_export_notify " << old_auth << " -> " << new_auth + dout(7) << old_auth << " -> " << new_auth << " on missing dir " << m->get_dirfrag() << dendl; } else if (dir->authority() != old_auth) { - dout(7) << "handle_export_notify old_auth was " << dir->authority() + dout(7) << "old_auth was " << dir->authority() << " != " << old_auth << " -> " << new_auth << " on " << *dir << dendl; } else { - dout(7) << "handle_export_notify " << old_auth << " -> " << new_auth + dout(7) << old_auth << " -> " << new_auth << " on " << *dir << dendl; // adjust auth set have; @@ -3503,7 +3489,7 @@ void Migrator::handle_export_notify(const cref_t &m) mds->send_message_mds(make_message(m->get_dirfrag(), m->get_tid(), m->get_new_auth()), from); } else { // aborted. no ack. - dout(7) << "handle_export_notify no ack requested" << dendl; + dout(7) << "no ack requested" << dendl; } } @@ -3511,7 +3497,7 @@ void Migrator::handle_export_notify(const cref_t &m) void Migrator::export_caps(CInode *in) { mds_rank_t dest = in->authority().first; - dout(7) << "export_caps to mds." << dest << " " << *in << dendl; + dout(7) << "to mds." << dest << " " << *in << dendl; ceph_assert(in->is_any_caps()); ceph_assert(!in->is_auth()); @@ -3533,7 +3519,7 @@ void Migrator::handle_export_caps_ack(const cref_t &ack) if (in) { ceph_assert(!in->is_auth()); - dout(10) << "handle_export_caps_ack " << *ack << " from " + dout(10) << *ack << " from " << ack->get_source() << " on " << *in << dendl; map imported_caps; @@ -3547,7 +3533,7 @@ void Migrator::handle_export_caps_ack(const cref_t &ack) if (!cap || cap->get_cap_id() != caps_ids.at(it.first)) continue; - dout(7) << __func__ << " telling client." << it.first + dout(7) << " telling client." << it.first << " exported caps on " << *in << dendl; auto m = make_message(CEPH_CAP_OP_EXPORT, in->ino(), 0, cap->get_cap_id(), cap->get_mseq(), @@ -3569,7 +3555,7 @@ void Migrator::handle_gather_caps(const cref_t &m) if (!in) return; - dout(10) << "handle_gather_caps " << *m << " from " << m->get_source() + dout(10) << *m << " from " << m->get_source() << " on " << *in << dendl; if (in->is_any_caps() && @@ -3594,7 +3580,7 @@ public: void Migrator::handle_export_caps(const cref_t &ex) { - dout(10) << "handle_export_caps " << *ex << " from " << ex->get_source() << dendl; + dout(10) << *ex << " from " << ex->get_source() << dendl; CInode *in = cache->get_inode(ex->ino); ceph_assert(in); @@ -3633,7 +3619,7 @@ void Migrator::logged_import_caps(CInode *in, map >& imported_session_map, map >& peer_exports) { - dout(10) << "logged_import_caps on " << *in << dendl; + dout(10) << *in << dendl; // see export_go() vs export_go_synced() ceph_assert(in->is_auth());