dout(7) << " sending MClientCaps to client." << it->first
<< " seq " << seq << " re-issue " << ccap_string(pending) << dendl;
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_grant);
+
auto m = make_message<MClientCaps>(CEPH_CAP_OP_GRANT, in->ino(),
in->find_snaprealm()->inode->ino(),
cap->get_cap_id(), cap->get_last_seq(),
int op = (before & ~after) ? CEPH_CAP_OP_REVOKE : CEPH_CAP_OP_GRANT;
if (op == CEPH_CAP_OP_REVOKE) {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_revoke);
revoking_caps.push_back(&cap->item_revoking_caps);
revoking_caps_by_client[cap->get_client()].push_back(&cap->item_client_revoking_caps);
cap->set_last_revoke_stamp(ceph_clock_now());
cap->reset_num_revoke_warnings();
+ } else {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_grant);
}
auto m = make_message<MClientCaps>(op, in->ino(),
dout(7) << "issue_truncate on " << *in << dendl;
for (auto &p : in->client_caps) {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_trunc);
Capability *cap = &p.second;
auto m = make_message<MClientCaps>(CEPH_CAP_OP_TRUNC,
in->ino(),
dout(7) << "handle_inode_file_caps replica mds." << from << " wants caps " << ccap_string(m->get_caps()) << " on " << *in << dendl;
+ if (mds->logger) mds->logger->inc(l_mdss_handle_inode_file_caps);
+
in->set_mds_caps_wanted(from, m->get_caps());
try_eval(in, CEPH_CAP_LOCKS);
continue;
if (cap->pending() & (CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER)) {
dout(10) << "share_inode_max_size with client." << client << dendl;
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_grant);
cap->inc_last_seq();
auto m = make_message<MClientCaps>(CEPH_CAP_OP_GRANT,
in->ino(),
return;
}
+ if (mds->logger) mds->logger->inc(l_mdss_handle_client_caps);
+ if (dirty) {
+ if (mds->logger) mds->logger->inc(l_mdss_handle_client_caps_dirty);
+ }
+
if (m->get_client_tid() > 0 && session &&
session->have_completed_flush(m->get_client_tid())) {
dout(7) << "handle_client_caps already flushed tid " << m->get_client_tid()
<< " for client." << client << dendl;
ref_t<MClientCaps> ack;
if (op == CEPH_CAP_OP_FLUSHSNAP) {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flushsnap_ack);
ack = make_message<MClientCaps>(CEPH_CAP_OP_FLUSHSNAP_ACK, m->get_ino(), 0, 0, 0, 0, 0, dirty, 0, mds->get_osd_epoch_barrier());
} else {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flush_ack);
ack = make_message<MClientCaps>(CEPH_CAP_OP_FLUSH_ACK, m->get_ino(), 0, m->get_cap_id(), m->get_seq(), m->get_caps(), 0, dirty, 0, mds->get_osd_epoch_barrier());
}
ack->set_snap_follows(follows);
head_in->remove_need_snapflush(in, snap, client);
} else {
dout(7) << " not expecting flushsnap " << snap << " from client." << client << " on " << *in << dendl;
- if (ack)
+ if (ack) {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flushsnap_ack);
mds->send_message_client_counted(ack, m->get_connection());
+ }
}
goto out;
}
need_flush = _need_flush_mdlog(in, cap->wanted() & ~cap->pending());
} else {
// no update, ack now.
- if (ack)
+ if (ack) {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flush_ack);
mds->send_message_client_counted(ack, m->get_connection());
+ }
bool did_issue = eval(in, CEPH_CAP_LOCKS);
if (!did_issue && (cap->wanted() & ~cap->pending()))
return;
}
+ if (mds->logger) mds->logger->inc(l_mdss_process_request_cap_release);
+
if (caps & ~cap->issued()) {
dout(10) << " confirming not issued caps " << ccap_string(caps & ~cap->issued()) << dendl;
caps &= cap->issued();
// hmm, i guess snap was already deleted? just ack!
dout(10) << " wow, the snap following " << follows
<< " was already deleted. nothing to record, just ack." << dendl;
- if (ack)
+ if (ack) {
+ if (ack->get_op() == CEPH_CAP_OP_FLUSHSNAP_ACK) {
+ if (mds->logger) mds->logger->inc(l_mdss_ceph_cap_op_flushsnap_ack);
+ }
mds->send_message_client_counted(ack, m->get_connection());
+ }
return;
}
return;
}
+ if (mds->logger) mds->logger->inc(l_mdss_handle_client_cap_release);
+
if (m->osd_epoch_barrier && !mds->objecter->have_map(m->osd_epoch_barrier)) {
// Pause RADOS operations until we see the required epoch
mds->objecter->set_epoch_barrier(m->osd_epoch_barrier);
mds_plb.add_u64_counter(l_mds_imported_inodes, "imported_inodes", "Imported inodes",
"imi", PerfCountersBuilder::PRIO_INTERESTING);
+ // caps msg stats
+ mds_plb.add_u64_counter(l_mdss_handle_client_caps, "handle_client_caps",
+ "Client caps msg", "hcc", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_handle_client_caps_dirty, "handle_client_caps_dirty",
+ "Client dirty caps msg", "hccd", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_handle_client_cap_release, "handle_client_cap_release",
+ "Client cap release msg", "hccr", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_process_request_cap_release, "process_request_cap_release",
+ "Process request cap release", "prcr", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_ceph_cap_op_revoke, "ceph_cap_op_revoke",
+ "Revoke caps", "crev", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_ceph_cap_op_grant, "ceph_cap_op_grant",
+ "Grant caps", "cgra", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_ceph_cap_op_trunc, "ceph_cap_op_trunc",
+ "caps truncate notify", "ctru", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_ceph_cap_op_flushsnap_ack, "ceph_cap_op_flushsnap_ack",
+ "caps truncate notify", "cfsa", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_ceph_cap_op_flush_ack, "ceph_cap_op_flush_ack",
+ "caps truncate notify", "cfa", PerfCountersBuilder::PRIO_INTERESTING);
+ mds_plb.add_u64_counter(l_mdss_handle_inode_file_caps, "handle_inode_file_caps",
+ "Inter mds caps msg", "hifc", PerfCountersBuilder::PRIO_INTERESTING);
+
// useful dir/inode/subtree stats
mds_plb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
mds_plb.add_u64(l_mds_root_rfiles, "root_rfiles", "root inode rfiles");