From: Kotresh HR Date: Thu, 27 Aug 2020 11:05:42 +0000 (+0530) Subject: mds: add performance counter for cap messages X-Git-Tag: v16.1.0~1190^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=46d400312e5b9ac178505bb8c587d9edc3506616;p=ceph.git mds: add performance counter for cap messages Signed-off-by: Kotresh HR Fixes: https://tracker.ceph.com/issues/47102 --- diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index 79aa4f11017e..4942c276bf27 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -2354,6 +2354,8 @@ int Locker::issue_caps(CInode *in, Capability *only_cap) dout(7) << " sending MClientCaps to client." << it->first << " seq " << seq << " re-issue " << ccap_string(pending) << dendl; + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_grant); + auto m = make_message(CEPH_CAP_OP_GRANT, in->ino(), in->find_snaprealm()->inode->ino(), cap->get_cap_id(), cap->get_last_seq(), @@ -2393,10 +2395,13 @@ int Locker::issue_caps(CInode *in, Capability *only_cap) int op = (before & ~after) ? CEPH_CAP_OP_REVOKE : CEPH_CAP_OP_GRANT; if (op == CEPH_CAP_OP_REVOKE) { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_revoke); revoking_caps.push_back(&cap->item_revoking_caps); revoking_caps_by_client[cap->get_client()].push_back(&cap->item_client_revoking_caps); cap->set_last_revoke_stamp(ceph_clock_now()); cap->reset_num_revoke_warnings(); + } else { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_grant); } auto m = make_message(op, in->ino(), @@ -2421,6 +2426,7 @@ void Locker::issue_truncate(CInode *in) dout(7) << "issue_truncate on " << *in << dendl; for (auto &p : in->client_caps) { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_trunc); Capability *cap = &p.second; auto m = make_message(CEPH_CAP_OP_TRUNC, in->ino(), @@ -2648,6 +2654,8 @@ void Locker::handle_inode_file_caps(const cref_t &m) dout(7) << "handle_inode_file_caps replica mds." << from << " wants caps " << ccap_string(m->get_caps()) << " on " << *in << dendl; + if (mds->logger) mds->logger->inc(l_mdss_handle_inode_file_caps); + in->set_mds_caps_wanted(from, m->get_caps()); try_eval(in, CEPH_CAP_LOCKS); @@ -2869,6 +2877,7 @@ void Locker::share_inode_max_size(CInode *in, Capability *only_cap) continue; if (cap->pending() & (CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER)) { dout(10) << "share_inode_max_size with client." << client << dendl; + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_grant); cap->inc_last_seq(); auto m = make_message(CEPH_CAP_OP_GRANT, in->ino(), @@ -3077,14 +3086,21 @@ void Locker::handle_client_caps(const cref_t &m) return; } + if (mds->logger) mds->logger->inc(l_mdss_handle_client_caps); + if (dirty) { + if (mds->logger) mds->logger->inc(l_mdss_handle_client_caps_dirty); + } + if (m->get_client_tid() > 0 && session && session->have_completed_flush(m->get_client_tid())) { dout(7) << "handle_client_caps already flushed tid " << m->get_client_tid() << " for client." << client << dendl; ref_t ack; if (op == CEPH_CAP_OP_FLUSHSNAP) { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flushsnap_ack); ack = make_message(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier()); } else { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flush_ack); ack = make_message(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier()); } ack->set_snap_follows(follows); @@ -3229,8 +3245,10 @@ void Locker::handle_client_caps(const cref_t &m) head_in->remove_need_snapflush(in, snap, client); } else { dout(7) << " not expecting flushsnap " << snap << " from client." << client << " on " << *in << dendl; - if (ack) + if (ack) { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flushsnap_ack); mds->send_message_client_counted(ack, m->get_connection()); + } } goto out; } @@ -3320,8 +3338,10 @@ void Locker::handle_client_caps(const cref_t &m) need_flush = _need_flush_mdlog(in, cap->wanted() & ~cap->pending()); } else { // no update, ack now. - if (ack) + if (ack) { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flush_ack); mds->send_message_client_counted(ack, m->get_connection()); + } bool did_issue = eval(in, CEPH_CAP_LOCKS); if (!did_issue && (cap->wanted() & ~cap->pending())) @@ -3416,6 +3436,8 @@ void Locker::process_request_cap_release(MDRequestRef& mdr, client_t client, con return; } + if (mds->logger) mds->logger->inc(l_mdss_process_request_cap_release); + if (caps & ~cap->issued()) { dout(10) << " confirming not issued caps " << ccap_string(caps & ~cap->issued()) << dendl; caps &= cap->issued(); @@ -3499,8 +3521,12 @@ void Locker::_do_snap_update(CInode *in, snapid_t snap, int dirty, snapid_t foll // hmm, i guess snap was already deleted? just ack! dout(10) << " wow, the snap following " << follows << " was already deleted. nothing to record, just ack." << dendl; - if (ack) + if (ack) { + if (ack->get_op() == CEPH_CAP_OP_FLUSHSNAP_ACK) { + if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flushsnap_ack); + } mds->send_message_client_counted(ack, m->get_connection()); + } return; } @@ -3884,6 +3910,8 @@ void Locker::handle_client_cap_release(const cref_t &m) return; } + if (mds->logger) mds->logger->inc(l_mdss_handle_client_cap_release); + if (m->osd_epoch_barrier && !mds->objecter->have_map(m->osd_epoch_barrier)) { // Pause RADOS operations until we see the required epoch mds->objecter->set_epoch_barrier(m->osd_epoch_barrier); diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index cf2fd084499c..6e8df3c84987 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -3255,6 +3255,28 @@ void MDSRank::create_logger() mds_plb.add_u64_counter(l_mds_imported_inodes, "imported_inodes", "Imported inodes", "imi", PerfCountersBuilder::PRIO_INTERESTING); + // caps msg stats + mds_plb.add_u64_counter(l_mdss_handle_client_caps, "handle_client_caps", + "Client caps msg", "hcc", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_handle_client_caps_dirty, "handle_client_caps_dirty", + "Client dirty caps msg", "hccd", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_handle_client_cap_release, "handle_client_cap_release", + "Client cap release msg", "hccr", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_process_request_cap_release, "process_request_cap_release", + "Process request cap release", "prcr", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_ceph_cap_op_revoke, "ceph_cap_op_revoke", + "Revoke caps", "crev", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_ceph_cap_op_grant, "ceph_cap_op_grant", + "Grant caps", "cgra", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_ceph_cap_op_trunc, "ceph_cap_op_trunc", + "caps truncate notify", "ctru", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_ceph_cap_op_flushsnap_ack, "ceph_cap_op_flushsnap_ack", + "caps truncate notify", "cfsa", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_ceph_cap_op_flush_ack, "ceph_cap_op_flush_ack", + "caps truncate notify", "cfa", PerfCountersBuilder::PRIO_INTERESTING); + mds_plb.add_u64_counter(l_mdss_handle_inode_file_caps, "handle_inode_file_caps", + "Inter mds caps msg", "hifc", PerfCountersBuilder::PRIO_INTERESTING); + // useful dir/inode/subtree stats mds_plb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL); mds_plb.add_u64(l_mds_root_rfiles, "root_rfiles", "root inode rfiles"); diff --git a/src/mds/MDSRank.h b/src/mds/MDSRank.h index 1af66e933b5e..69281881c460 100644 --- a/src/mds/MDSRank.h +++ b/src/mds/MDSRank.h @@ -84,6 +84,16 @@ enum { l_mds_root_rfiles, l_mds_root_rbytes, l_mds_root_rsnaps, + l_mdss_handle_inode_file_caps, + l_mdss_ceph_cap_op_revoke, + l_mdss_ceph_cap_op_grant, + l_mdss_ceph_cap_op_trunc, + l_mdss_ceph_cap_op_flushsnap_ack, + l_mdss_ceph_cap_op_flush_ack, + l_mdss_handle_client_caps, + l_mdss_handle_client_caps_dirty, + l_mdss_handle_client_cap_release, + l_mdss_process_request_cap_release, l_mds_last, };