"set location for nvmeof gateway id for (pool, group)",
"mgr", "rw")
+COMMAND("nvme-gw start-failback"
+ " name=pool,type=CephString"
+ " name=group,type=CephString"
+ " name=locale,type=CephString",
+ "start failbacks for recovered location within (pool, group)",
+ "mgr", "rw")
+
// these are tell commands that were implemented as CLI commands in
// the broken pre-octopus way that we want to allow to work when a
}
}
+int NVMeofGwMap::cfg_start_inter_location_failback(
+ const NvmeGroupKey& group_key,
+ std::string &location, bool &propose_pending) {
+ auto& gws_states = created_gws[group_key];
+ // for all the gateways of the subsystem
+ for (auto& found_gw_state: gws_states) {
+ auto st = found_gw_state.second;
+ if (st.location == location) {
+ if(st.availability != gw_availability_t::GW_AVAILABLE ||
+ st.sm_state[st.ana_grp_id] != gw_states_per_group_t::GW_STANDBY_STATE) {
+ dout(4) << "command rejected found gw in state " << st.availability
+ << " ana grp state " << st.sm_state[st.ana_grp_id] << dendl;
+ return -EINVAL;
+ }
+ }
+ }
+ for (auto& found_gw_state: gws_states) {
+ auto st = found_gw_state.second;
+ if (st.location == location) {
+ auto gw_id = found_gw_state.first;
+ find_failback_gw(gw_id, group_key, propose_pending, true);
+ }
+ }
+ return 0;
+}
+
void NVMeofGwMap::gw_performed_startup(const NvmeGwId &gw_id,
const NvmeGroupKey& group_key, bool &propose_pending)
{
}
void NVMeofGwMap::find_failback_gw(
- const NvmeGwId &gw_id, const NvmeGroupKey& group_key, bool &propose)
+ const NvmeGwId &gw_id, const NvmeGroupKey& group_key, bool &propose,
+ bool force_inter_location)
{
auto& gws_states = created_gws[group_key];
auto& gw_state = created_gws[group_key][gw_id];
bool do_failback = false;
- dout(10) << "Find failback GW for GW " << gw_id << dendl;
+ dout(10) << "Find failback GW for GW " << gw_id << "location "
+ << gw_state.location << dendl;
for (auto& gw_state_it: gws_states) {
auto& st = gw_state_it.second;
// some other gw owns or owned the desired ana-group
dout(10) << "Found Failback GW " << failback_gw_id
<< " that previously took over the ANAGRP "
<< gw_state.ana_grp_id << " of the available GW "
- << gw_id << dendl;
+ << gw_id << "location " << st.location << dendl;
+ if (st.location != gw_state.location && !force_inter_location ) {
+ //not allowed inter-location failbacks
+ dout(10) << "not allowed interlocation failbacks. GW "
+ << gw_id << dendl;
+ return;
+ }
st.sm_state[gw_state.ana_grp_id] =
gw_states_per_group_t::GW_WAIT_FAILBACK_PREPARED;
gw_admin_state_t state, bool &propose_pending, bool test = false);
int cfg_set_location(const NvmeGwId &gw_id, const NvmeGroupKey& group_key,
std::string &location, bool &propose_pending, bool test = false);
+ int cfg_start_inter_location_failback(const NvmeGroupKey& group_key,
+ std::string &location, bool &propose_pending);
void process_gw_map_ka(
const NvmeGwId &gw_id, const NvmeGroupKey& group_key,
epoch_t& last_osd_epoch, bool &propose_pending);
NvmeAnaGrpId grpid, bool &propose_pending);
void find_failback_gw(
const NvmeGwId &gw_id, const NvmeGroupKey& group_key,
- bool &propose_pending);
+ bool &propose_pending, bool force_inter_location = false);
void set_failover_gw_for_ANA_group(
const NvmeGwId &failed_gw_id, const NvmeGroupKey& group_key,
const NvmeGwId &gw_id, NvmeAnaGrpId groupid);
if (rc == 0 && propose == true) {
response = true;
}
- }
- else if (prefix == "nvme-gw set-locale") {
+ } else if (prefix == "nvme-gw set-locale") {
std::string id, pool, group, locale;
cmd_getval(cmdmap, "id", id);
if (rc == 0 && propose == true) {
response = true;
}
+ } else if (prefix == "nvme-gw start-failback") {
+ std::string id, pool, group, locale;
+ bool propose = false;
+ cmd_getval(cmdmap, "pool", pool);
+ cmd_getval(cmdmap, "group", group);
+ cmd_getval(cmdmap, "locale", locale);
+ auto group_key = std::make_pair(pool, group);
+ dout(10) << id <<" pool "<< pool << " group "<< group
+ <<" locale "<< locale << dendl;
+ rc = pending_map.cfg_start_inter_location_failback(group_key,
+ locale, propose);
}
getline(sstrm, rs);
if (response == false) {