return f.get();
}
+PyObject* PyModules::get_perf_schema_python(
+ const std::string &handle,
+ const std::string svc_type,
+ const std::string &svc_id)
+{
+ PyThreadState *tstate = PyEval_SaveThread();
+ Mutex::Locker l(lock);
+ PyEval_RestoreThread(tstate);
+
+ DaemonStateCollection states;
+
+ if (svc_type == "") {
+ states = daemon_state.get_all();
+ } else if (svc_id.empty()) {
+ states = daemon_state.get_by_service(svc_type);
+ } else {
+ auto key = DaemonKey(svc_type, svc_id);
+ // so that the below can be a loop in all cases
+ if (daemon_state.exists(key)) {
+ states[key] = daemon_state.get(key);
+ }
+ }
+
+ PyFormatter f;
+ f.open_object_section("perf_schema");
+
+ // FIXME: this is unsafe, I need to either be inside DaemonStateIndex's
+ // lock or put a lock on individual DaemonStates
+ if (!states.empty()) {
+ for (auto statepair : states) {
+ std::ostringstream daemon_name;
+ auto key = statepair.first;
+ auto state = statepair.second;
+ daemon_name << key.first << "." << key.second;
+ f.open_object_section(daemon_name.str().c_str());
+
+ for (auto typestr : state->perf_counters.declared_types) {
+ f.open_object_section(typestr.c_str());
+ auto type = state->perf_counters.types[typestr];
+ f.dump_string("description", type.description);
+ if (!type.nick.empty()) {
+ f.dump_string("nick", type.nick);
+ }
+ f.dump_unsigned("type", type.type);
+ f.close_section();
+ }
+ f.close_section();
+ }
+ } else {
+ dout(4) << __func__ << ": No daemon state found for "
+ << svc_type << "." << svc_id << ")" << dendl;
+ }
+ f.close_section();
+ return f.get();
+}
+
PyObject *PyModules::get_context()
{
PyThreadState *tstate = PyEval_SaveThread();
const std::string &svc_name,
const std::string &svc_id,
const std::string &path);
+ PyObject *get_perf_schema_python(
+ const std::string &handle,
+ const std::string svc_type,
+ const std::string &svc_id);
PyObject *get_context();
std::map<std::string, std::string> config_cache;
handle, svc_name, svc_id, counter_path);
}
+static PyObject*
+get_perf_schema(PyObject *self, PyObject *args)
+{
+ char *handle = nullptr;
+ char *type_str = nullptr;
+ char *svc_id = nullptr;
+ if (!PyArg_ParseTuple(args, "sss:get_perf_schema", &handle, &type_str,
+ &svc_id)) {
+ return nullptr;
+ }
+
+ return global_handle->get_perf_schema_python(handle, type_str, svc_id);
+}
+
PyMethodDef CephStateMethods[] = {
{"get", ceph_state_get, METH_VARARGS,
"Get a cluster object"},
"Set a configuration value"},
{"get_counter", get_counter, METH_VARARGS,
"Get a performance counter"},
+ {"get_perf_schema", get_perf_schema, METH_VARARGS,
+ "Get the performance counter schema"},
{"log", ceph_log, METH_VARARGS,
"Emit a (local) log message"},
{"get_version", ceph_get_version, METH_VARARGS,
"""
return ceph_state.get_server(self._handle, hostname)
+ def get_perf_schema(self, svc_type, svc_name):
+ """
+ Called by the plugin to fetch perf counter schema info.
+ svc_name can be nullptr, as can svc_type, in which case
+ they are wildcards
+
+ :param svc_type:
+ :param svc_name:
+ :return: list of dicts describing the counters requested
+ """
+ return ceph_state.get_perf_schema(self._handle, svc_type, svc_name)
+
def get_counter(self, svc_type, svc_name, path):
"""
Called by the plugin to fetch data for a particular perf counter