#undef FLAG
#undef COMMAND
#undef COMMAND_WITH_FLAG
-MonCommand mon_commands[] = {
#define FLAG(f) (MonCommand::FLAG_##f)
#define COMMAND(parsesig, helptext, modulename, req_perms, avail) \
{parsesig, helptext, modulename, req_perms, avail, FLAG(NONE)},
#define COMMAND_WITH_FLAG(parsesig, helptext, modulename, req_perms, avail, flags) \
{parsesig, helptext, modulename, req_perms, avail, flags},
+MonCommand mon_commands[] = {
#include <mon/MonCommands.h>
+};
+MonCommand pgmonitor_commands[] = {
+#include <mon/PGMonitorCommands.h>
+};
#undef COMMAND
#undef COMMAND_WITH_FLAG
-};
void C_MonContext::finish(int r) {
}
MonCommand::encode_vector(local_mon_commands, local_mon_commands_bl);
+ local_upgrading_mon_commands = local_mon_commands;
+ for (unsigned i = 0; i < ARRAY_SIZE(pgmonitor_commands); ++i) {
+ local_upgrading_mon_commands.push_back(pgmonitor_commands[i]);
+ }
+ MonCommand::encode_vector(local_upgrading_mon_commands,
+ local_upgrading_mon_commands_bl);
+
// assume our commands until we have an election. this only means
// we won't reply with EINVAL before the election; any command that
// actually matters will wait until we have quorum etc and then
clog->info() << "mon." << name << "@" << rank
<< " won leader election with quorum " << quorum;
- set_leader_commands(get_local_commands());
+ set_leader_commands(get_local_commands(mon_features));
paxos->leader_init();
// NOTE: tell monmap monitor first. This is important for the
osdmon()->osdmap.require_osd_release < CEPH_RELEASE_LUMINOUS;
std::vector<MonCommand> commands;
- commands = static_cast<MgrMonitor*>(
+
+ // only include mgr commands once all mons are upgrade (and we've dropped
+ // the hard-coded PGMonitor commands)
+ if (quorum_mon_features.contains_all(ceph::features::mon::FEATURE_LUMINOUS)) {
+ commands = static_cast<MgrMonitor*>(
paxos_service[PAXOS_MGR])->get_command_descs();
+ }
for (auto& c : leader_mon_commands) {
commands.push_back(c);
}
}
// validate command is in our map & matches, or forward if it is allowed
- const MonCommand *mon_cmd = _get_moncommand(prefix, get_local_commands());
+ const MonCommand *mon_cmd = _get_moncommand(
+ prefix,
+ get_local_commands(quorum_mon_features));
if (!mon_cmd) {
mon_cmd = mgr_cmd;
}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+/* no guard; may be included multiple times */
+
+COMMAND("pg stat", "show placement group status.",
+ "pg", "r", "cli,rest")
+COMMAND("pg getmap", "get binary pg map to -o/stdout", "pg", "r", "cli,rest")
+
+COMMAND("pg dump " \
+ "name=dumpcontents,type=CephChoices,strings=all|summary|sum|delta|pools|osds|pgs|pgs_brief,n=N,req=false", \
+ "show human-readable versions of pg map (only 'all' valid with plain)", "pg", "r", "cli,rest")
+COMMAND("pg dump_json " \
+ "name=dumpcontents,type=CephChoices,strings=all|summary|sum|pools|osds|pgs,n=N,req=false", \
+ "show human-readable version of pg map in json only",\
+ "pg", "r", "cli,rest")
+COMMAND("pg dump_pools_json", "show pg pools info in json only",\
+ "pg", "r", "cli,rest")
+
+COMMAND("pg ls-by-pool " \
+ "name=poolstr,type=CephString " \
+ "name=states,type=CephString,n=N,req=false", \
+ "list pg with pool = [poolname]", "pg", "r", "cli,rest")
+COMMAND("pg ls-by-primary " \
+ "name=osd,type=CephOsdName " \
+ "name=pool,type=CephInt,req=false " \
+ "name=states,type=CephString,n=N,req=false", \
+ "list pg with primary = [osd]", "pg", "r", "cli,rest")
+COMMAND("pg ls-by-osd " \
+ "name=osd,type=CephOsdName " \
+ "name=pool,type=CephInt,req=false " \
+ "name=states,type=CephString,n=N,req=false", \
+ "list pg on osd [osd]", "pg", "r", "cli,rest")
+COMMAND("pg ls " \
+ "name=pool,type=CephInt,req=false " \
+ "name=states,type=CephString,n=N,req=false", \
+ "list pg with specific pool, osd, state", "pg", "r", "cli,rest")
+COMMAND("pg dump_stuck " \
+ "name=stuckops,type=CephChoices,strings=inactive|unclean|stale|undersized|degraded,n=N,req=false " \
+ "name=threshold,type=CephInt,req=false",
+ "show information about stuck pgs",\
+ "pg", "r", "cli,rest")
+COMMAND("pg debug " \
+ "name=debugop,type=CephChoices,strings=unfound_objects_exist|degraded_pgs_exist", \
+ "show debug info about pgs", "pg", "r", "cli,rest")
+
+COMMAND("pg scrub name=pgid,type=CephPgid", "start scrub on <pgid>", \
+ "pg", "rw", "cli,rest")
+COMMAND("pg deep-scrub name=pgid,type=CephPgid", "start deep-scrub on <pgid>", \
+ "pg", "rw", "cli,rest")
+COMMAND("pg repair name=pgid,type=CephPgid", "start repair on <pgid>", \
+ "pg", "rw", "cli,rest")
+
+// stuff in osd namespace
+COMMAND("osd perf", \
+ "print dump of OSD perf summary stats", \
+ "osd", \
+ "r", \
+ "cli,rest")
+COMMAND("osd df " \
+ "name=output_method,type=CephChoices,strings=plain|tree,req=false", \
+ "show OSD utilization", "osd", "r", "cli,rest")
+COMMAND("osd blocked-by", \
+ "print histogram of which OSDs are blocking their peers", \
+ "osd", "r", "cli,rest")
+COMMAND("osd pool stats " \
+ "name=name,type=CephString,req=false",
+ "obtain stats from all pools, or from specified pool",
+ "osd", "r", "cli,rest")
+COMMAND("osd reweight-by-utilization " \
+ "name=oload,type=CephInt,req=false " \
+ "name=max_change,type=CephFloat,req=false " \
+ "name=max_osds,type=CephInt,req=false " \
+ "name=no_increasing,type=CephChoices,strings=--no-increasing,req=false",\
+ "reweight OSDs by utilization [overload-percentage-for-consideration, default 120]", \
+ "osd", "rw", "cli,rest")
+COMMAND("osd test-reweight-by-utilization " \
+ "name=oload,type=CephInt,req=false " \
+ "name=max_change,type=CephFloat,req=false " \
+ "name=max_osds,type=CephInt,req=false " \
+ "name=no_increasing,type=CephChoices,strings=--no-increasing,req=false",\
+ "dry run of reweight OSDs by utilization [overload-percentage-for-consideration, default 120]", \
+ "osd", "r", "cli,rest")
+COMMAND("osd reweight-by-pg " \
+ "name=oload,type=CephInt,req=false " \
+ "name=max_change,type=CephFloat,req=false " \
+ "name=max_osds,type=CephInt,req=false " \
+ "name=pools,type=CephPoolname,n=N,req=false", \
+ "reweight OSDs by PG distribution [overload-percentage-for-consideration, default 120]", \
+ "osd", "rw", "cli,rest")
+COMMAND("osd test-reweight-by-pg " \
+ "name=oload,type=CephInt,req=false " \
+ "name=max_change,type=CephFloat,req=false " \
+ "name=max_osds,type=CephInt,req=false " \
+ "name=pools,type=CephPoolname,n=N,req=false", \
+ "dry run of reweight OSDs by PG distribution [overload-percentage-for-consideration, default 120]", \
+ "osd", "r", "cli,rest")
+
+COMMAND("osd scrub " \
+ "name=who,type=CephString", \
+ "initiate scrub on osd <who>, or use <all|any|*> to scrub all", \
+ "osd", "rw", "cli,rest")
+COMMAND("osd deep-scrub " \
+ "name=who,type=CephString", \
+ "initiate deep scrub on osd <who>, or use <all|any|*> to deep scrub all", \
+ "osd", "rw", "cli,rest")
+COMMAND("osd repair " \
+ "name=who,type=CephString", \
+ "initiate repair on osd <who>, or use <all|any|*> to repair all", \
+ "osd", "rw", "cli,rest")
+
+COMMAND("pg force_create_pg name=pgid,type=CephPgid", \
+ "force creation of pg <pgid>", "pg", "rw", "cli,rest")
+COMMAND_WITH_FLAG("pg set_full_ratio name=ratio,type=CephFloat,range=0.0|1.0", \
+ "set ratio at which pgs are considered full", \
+ "pg", "rw", "cli,rest", FLAG(DEPRECATED))
+COMMAND_WITH_FLAG("pg set_nearfull_ratio " \
+ "name=ratio,type=CephFloat,range=0.0|1.0", \
+ "set ratio at which pgs are considered nearly full", \
+ "pg", "rw", "cli,rest", FLAG(DEPRECATED))