]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: EC error inject interfaces
authorBill Scales <bill_scales@uk.ibm.com>
Tue, 8 Oct 2024 08:51:14 +0000 (08:51 +0000)
committerJon Bailey <jonathan.bailey1@ibm.com>
Tue, 7 Jan 2025 11:40:33 +0000 (11:40 +0000)
Error inject interfaces for EC reads and writes using
ceph tell osd.<n> interface

Signed-off-by: Bill Scales <bill_scales@uk.ibm.com>
src/osd/ECBackend.cc
src/osd/ECCommon.cc
src/osd/ECCommon.h
src/osd/OSD.cc
src/osd/PGBackend.h
src/osd/PrimaryLogPG.cc
src/osd/PrimaryLogPG.h

index fa2570aba42afa62d25148387b2dab87e01708f6..8630b038812ee4583c34c4a9c343b719552696e0 100644 (file)
@@ -945,6 +945,10 @@ void ECBackend::handle_sub_write(
   }
   trace.event("handle_sub_write");
 
+  if (cct->_conf->bluestore_debug_inject_read_err &&
+      ec_inject_test_write_error3(op.soid)) {
+    ceph_abort_msg("Error inject - OSD down");
+  }
   if (!get_parent()->pgb_is_primary())
     get_parent()->update_stats(op.stats);
   ObjectStore::Transaction localt;
@@ -1191,6 +1195,15 @@ void ECBackend::handle_sub_write_reply(
     i->second->on_all_commit = 0;
     i->second->trace.event("ec write all committed");
   }
+  if (cct->_conf->bluestore_debug_inject_read_err &&
+      (i->second->pending_commit.size() == 1) &&
+      ec_inject_test_write_error2(i->second->hoid)) {
+    std::string cmd =
+      "{ \"prefix\": \"osd down\", \"ids\": [\"" + std::to_string( get_parent()->whoami() ) + "\"] }";
+    vector<std::string> vcmd{cmd};
+    dout(0) << __func__ << " Error inject - marking OSD down" << dendl;
+    get_parent()->start_mon_command(vcmd, {}, nullptr, nullptr, nullptr);
+  }
   rmw_pipeline.check_ops();
 }
 
@@ -1208,6 +1221,19 @@ void ECBackend::handle_sub_read_reply(
     return;
   }
   ReadOp &rop = iter->second;
+  if (cct->_conf->bluestore_debug_inject_read_err) {
+    for (auto i = op.buffers_read.begin();
+        i != op.buffers_read.end();
+        ++i) {
+      if (ec_inject_test_read_error0(ghobject_t(i->first, ghobject_t::NO_GEN, op.from.shard))) {
+       dout(0) << __func__ << " Error inject - EIO error for shard " << op.from.shard << dendl;
+       op.buffers_read.erase(i->first);
+       op.attrs_read.erase(i->first);
+       op.errors[i->first] = -EIO;
+      }
+
+    }
+  }
   for (auto i = op.buffers_read.begin();
        i != op.buffers_read.end();
        ++i) {
index 609ac3141ae30f7f42400d80bbfe26370d13e033..59077547fcb9e44d694e78f52d92909b42e881ab 100644 (file)
@@ -226,8 +226,14 @@ void ECCommon::ReadPipeline::get_all_avail_shards(
        ++i) {
     dout(10) << __func__ << ": checking acting " << *i << dendl;
     const pg_missing_t &missing = get_parent()->get_shard_missing(*i);
-    if (error_shards.find(*i) != error_shards.end())
+    if (error_shards.contains(*i)) {
       continue;
+    }
+    if (cct->_conf->bluestore_debug_inject_read_err &&
+        ec_inject_test_read_error1(ghobject_t(hoid, ghobject_t::NO_GEN, i->shard))) {
+      dout(0) << __func__ << " Error inject - Missing shard " << i->shard << dendl;
+      continue;
+    }
     if (!missing.is_missing(hoid)) {
       ceph_assert(!have.count(i->shard));
       have.insert(i->shard);
@@ -912,6 +918,11 @@ bool ECCommon::RMWPipeline::try_reads_to_commit()
     if (*i == get_parent()->whoami_shard()) {
       should_write_local = true;
       local_write_op.claim(sop);
+    } else if (cct->_conf->bluestore_debug_inject_read_err &&
+              ec_inject_test_write_error1(ghobject_t(op->hoid,
+                ghobject_t::NO_GEN, i->shard))) {
+      dout(0) << " Error inject - Dropping write message to shard " <<
+       i->shard << dendl;
     } else {
       MOSDECSubOpWrite *r = new MOSDECSubOpWrite(sop);
       r->pgid = spg_t(get_parent()->primary_spg_t().pgid, i->shard);
@@ -1090,3 +1101,305 @@ ECUtil::HashInfoRef ECCommon::UnstableHashInfoRegistry::get_hash_info(
   }
   return ref;
 }
+
+// Error inject interfaces
+static ceph::recursive_mutex ec_inject_lock =
+  ceph::make_recursive_mutex("ECCommon::ec_inject_lock");
+static std::map<ghobject_t,std::pair<int64_t,int64_t>> ec_inject_read_failures0;
+static std::map<ghobject_t,std::pair<int64_t,int64_t>> ec_inject_read_failures1;
+static std::map<ghobject_t,std::pair<int64_t,int64_t>> ec_inject_write_failures0;
+static std::map<ghobject_t,std::pair<int64_t,int64_t>> ec_inject_write_failures1;
+static std::map<ghobject_t,std::pair<int64_t,int64_t>> ec_inject_write_failures2;
+static std::map<ghobject_t,std::pair<int64_t,int64_t>> ec_inject_write_failures3;
+static std::map<ghobject_t,shard_id_t> ec_inject_write_failures0_shard;
+static std::set<osd_reqid_t> ec_inject_write_failures0_reqid;
+
+/**
+ * Configure a read error inject that typically forces additional reads of
+ * shards in an EC pool to recover data using the redundancy. With multiple
+ * errors it is possible to force client reads to fail.
+ *
+ * Type 0 - Simulate a medium error. Fail a read with -EIO to force
+ * additional reads and a decode
+ *
+ * Type 1 - Simulate a missing OSD. Dont even try to read a shard
+ *
+ * @brief Set up a read error inject for an object in an EC pool.
+ * @param o Target object for the error inject.
+ * @param when Error inject starts after this many object store reads.
+ * @param duration Error inject affects this many object store reads.
+ * @param type Type of error inject 0 = EIO, 1 = missing shard.
+ * @return string Result of configuring the error inject.
+ */
+std::string ec_inject_read_error(const ghobject_t& o,
+                                const int64_t type,
+                                const int64_t when,
+                                const int64_t duration) {
+  std::lock_guard<ceph::recursive_mutex> l(ec_inject_lock);
+  ghobject_t os = o;
+  if (os.hobj.oid.name == "*") {
+    os.hobj.set_hash(0);
+  }
+  switch (type) {
+  case 0:
+    ec_inject_read_failures0[os] = std::pair(when, duration);
+    return "ok - read returns EIO";
+  case 1:
+    ec_inject_read_failures1[os] = std::pair(when, duration);
+    return "ok - read pretends shard is missing";
+  default:
+    break;
+  }
+  return "unrecognized error inject type";
+}
+
+/**
+ * Configure a write error inject that either fails an OSD or causes a
+ * client write operation to be rolled back.
+ *
+ * Type 0 - Tests rollback. Drop a write I/O to a shard, then simulate an OSD
+ * down to force rollback to occur, lastly fail the retried write from the
+ * client so the results of the rollback can be inspected.
+ *
+ * Type 1 - Drop a write I/O to a shard. Used on its own this will hang a
+ * write I/O.
+ *
+ * Type 2 - Simulate an OSD down (ceph osd down) to force a new epoch. Usually
+ * used together with type 1 to force a rollback
+ *
+ * Type 3 - Abort when an OSD processes a write I/O to a shard. Typically the
+ * client write will be commited while the OSD is absent which will result in
+ * recovery or backfill later when the OSD returns.
+ *
+ * @brief Set up a write error inject for an object in an EC pool.
+ * @param o Target object for the error inject.
+ * @param when Error inject starts after this many object store reads.
+ * @param duration Error inject affects this many object store reads.
+ * @param type Type of error inject 0 = EIO, 1 = missing shard.
+ * @return string Result of configuring the error inect.
+ */
+std::string ec_inject_write_error(const ghobject_t& o,
+                                 const int64_t type,
+                                 const int64_t when,
+                                 const int64_t duration) {
+  std::lock_guard<ceph::recursive_mutex> l(ec_inject_lock);
+  std::map<ghobject_t,std::pair<int64_t,int64_t>> *failures;
+  ghobject_t os = o;
+  bool no_shard = true;
+  std::string result;
+  switch (type) {
+  case 0:
+    failures = &ec_inject_write_failures0;
+    result = "ok - drop write, sim OSD down and fail client retry with EINVAL";
+    break;
+  case 1:
+    failures = &ec_inject_write_failures1;
+    no_shard = false;
+    result = "ok - drop write to shard";
+    break;
+  case 2:
+    failures = &ec_inject_write_failures2;
+    result = "ok - inject OSD down";
+    break;
+  case 3:
+    if (duration != 1) {
+      return "duration must be 1";
+    }
+    failures = &ec_inject_write_failures3;
+    result = "ok - write abort OSDs";
+    break;
+  default:
+    return "unrecognized error inject type";
+  }
+  if (no_shard) {
+    os.set_shard(shard_id_t::NO_SHARD);
+  }
+  if (os.hobj.oid.name == "*") {
+    os.hobj.set_hash(0);
+  }
+  (*failures)[os] = std::pair(when, duration);
+  if (type == 0) {
+    ec_inject_write_failures0_shard[os] = o.shard_id;
+  }
+  return result;
+}
+
+/**
+ * @brief Clear a previously configured read error inject.
+ * @param o Target object for the error inject.
+ * @param type Type of error inject 0 = EIO, 1 = missing shard.
+ * @return string Indication of how many errors were cleared.
+ */
+std::string ec_inject_clear_read_error(const ghobject_t& o,
+                                      const int64_t type) {
+  std::lock_guard<ceph::recursive_mutex> l(ec_inject_lock);
+  std::map<ghobject_t,std::pair<int64_t,int64_t>> *failures;
+  ghobject_t os = o;
+  int64_t remaining = 0;
+  switch (type) {
+  case 0:
+    failures = &ec_inject_read_failures0;
+    break;
+  case 1:
+    failures = &ec_inject_read_failures1;
+    break;
+  default:
+    return "unrecognized error inject type";
+  }
+  if (os.hobj.oid.name == "*") {
+    os.hobj.set_hash(0);
+  }
+  auto it = failures->find(os);
+  if (it != failures->end()) {
+    remaining = it->second.second;
+    failures->erase(it);
+  }
+  if (remaining == 0) {
+    return "no outstanding error injects";
+  } else if (remaining == 1) {
+    return "ok - 1 inject cleared";
+  }
+  return "ok - " + std::to_string(remaining) + " injects cleared";
+}
+
+/**
+ * @brief Clear a previously configured write error inject.
+ * @param o Target object for the error inject.
+ * @param type Type of error inject 0 = EIO, 1 = missing shard.
+ * @return string Indication of how many errors were cleared.
+ */
+std::string ec_inject_clear_write_error(const ghobject_t& o,
+                                       const int64_t type) {
+  std::lock_guard<ceph::recursive_mutex> l(ec_inject_lock);
+  std::map<ghobject_t,std::pair<int64_t,int64_t>> *failures;
+  ghobject_t os = o;
+  bool no_shard = true;
+  int64_t remaining = 0;
+  switch (type) {
+  case 0:
+    failures = &ec_inject_write_failures0;
+    break;
+  case 1:
+    failures = &ec_inject_write_failures1;
+    no_shard = false;
+    break;
+  case 2:
+    failures = &ec_inject_write_failures2;
+    break;
+  case 3:
+    failures = &ec_inject_write_failures3;
+    break;
+  default:
+    return "unrecognized error inject type";
+  }
+  if (no_shard) {
+    os.set_shard(shard_id_t::NO_SHARD);
+  }
+  if (os.hobj.oid.name == "*") {
+    os.hobj.set_hash(0);
+  }
+  auto it = failures->find(os);
+  if (it != failures->end()) {
+    remaining = it->second.second;
+    failures->erase(it);
+    if (type == 0) {
+      ec_inject_write_failures0_shard.erase(os);
+    }
+  }
+  if (remaining == 0) {
+    return "no outstanding error injects";
+  } else if (remaining == 1) {
+    return "ok - 1 inject cleared";
+  }
+  return "ok - " + std::to_string(remaining) + " injects cleared";
+}
+
+static bool ec_inject_test_error(const ghobject_t& o,
+  std::map<ghobject_t,std::pair<int64_t,int64_t>> *failures)
+{
+  std::lock_guard<ceph::recursive_mutex> l(ec_inject_lock);
+  auto it = failures->find(o);
+  if (it == failures->end()) {
+    ghobject_t os = o;
+    os.hobj.oid.name = "*";
+    os.hobj.set_hash(0);
+    it = failures->find(os);
+  }
+  if (it != failures->end()) {
+    auto && [when,duration] = it->second;
+    if (when > 0) {
+      when--;
+      return false;
+    }
+    if (--duration <= 0) {
+      failures->erase(it);
+    }
+    return true;
+  }
+  return false;
+}
+
+bool ec_inject_test_read_error0(const ghobject_t& o)
+{
+  return ec_inject_test_error(o, &ec_inject_read_failures0);
+}
+
+bool ec_inject_test_read_error1(const ghobject_t& o)
+{
+  return ec_inject_test_error(o, &ec_inject_read_failures1);
+}
+
+bool ec_inject_test_write_error0(const hobject_t& o,
+                                const osd_reqid_t& reqid) {
+  std::lock_guard<ceph::recursive_mutex> l(ec_inject_lock);
+  ghobject_t os = ghobject_t(o, ghobject_t::NO_GEN, shard_id_t::NO_SHARD);
+  if (ec_inject_write_failures0_reqid.count(reqid)) {
+    // Matched reqid of retried write - flag for failure
+    ec_inject_write_failures0_reqid.erase(reqid);
+    return true;
+  }
+  auto it = ec_inject_write_failures0.find(os);
+  if (it == ec_inject_write_failures0.end()) {
+    os.hobj.oid.name = "*";
+    os.hobj.set_hash(0);
+    it = ec_inject_write_failures0.find(os);
+  }
+  if (it != ec_inject_write_failures0.end()) {
+    auto && [when, duration] = it->second;
+    auto shard = ec_inject_write_failures0_shard.find(os)->second;
+    if (when > 0) {
+      when--;
+    } else {
+      if (--duration <= 0) {
+       ec_inject_write_failures0.erase(it);
+       ec_inject_write_failures0_shard.erase(os);
+      }
+      // Error inject triggered - save reqid
+      ec_inject_write_failures0_reqid.insert(reqid);
+      // Set up error inject to drop message to primary
+      ec_inject_write_error(ghobject_t(o, ghobject_t::NO_GEN, shard), 1, 0, 1);
+    }
+  }
+  return false;
+}
+
+bool ec_inject_test_write_error1(const ghobject_t& o) {
+  bool rc = ec_inject_test_error(o, &ec_inject_write_failures1);
+  if (rc) {
+    // Set up error inject to generate OSD down
+    ec_inject_write_error(o, 2, 0, 1);
+  }
+  return rc;
+}
+
+bool ec_inject_test_write_error2(const hobject_t& o) {
+  return ec_inject_test_error(
+    ghobject_t(o, ghobject_t::NO_GEN, shard_id_t::NO_SHARD),
+    &ec_inject_write_failures2);
+}
+
+bool ec_inject_test_write_error3(const hobject_t& o) {
+  return ec_inject_test_error(
+    ghobject_t(o, ghobject_t::NO_GEN, shard_id_t::NO_SHARD),
+    &ec_inject_write_failures3);
+}
index 7ff9cae7646ab045a8111a6b1ba02e4e6dedeb2e..de4c11ad50f492902fce82bbce5514374d1a3931 100644 (file)
@@ -493,6 +493,7 @@ struct ECCommon {
       ); ///< @return error code, 0 on success
 
     void schedule_recovery_work();
+
   };
 
   /**
@@ -843,3 +844,15 @@ void ECCommon::ReadPipeline::filter_read_op(
     on_schedule_recovery(op);
   }
 }
+
+// Error inject interfaces
+std::string ec_inject_read_error(const ghobject_t& o, const int64_t type, const int64_t when, const int64_t duration);
+std::string ec_inject_write_error(const ghobject_t& o, const int64_t type, const int64_t when, const int64_t duration);
+std::string ec_inject_clear_read_error(const ghobject_t& o, const int64_t type);
+std::string ec_inject_clear_write_error(const ghobject_t& o, const int64_t type);
+bool ec_inject_test_read_error0(const ghobject_t& o);
+bool ec_inject_test_read_error1(const ghobject_t& o);
+bool ec_inject_test_write_error0(const hobject_t& o,const osd_reqid_t& reqid);
+bool ec_inject_test_write_error1(const ghobject_t& o);
+bool ec_inject_test_write_error2(const hobject_t& o);
+bool ec_inject_test_write_error3(const hobject_t& o);
index 5223eb283e93756d892ac0d9b81748e194f09eee..9c9e540cf616862c445e1b19a3562e207d857f37 100644 (file)
@@ -37,6 +37,7 @@
 #include "osd/PG.h"
 #include "osd/scrubber/scrub_machine.h"
 #include "osd/scrubber/pg_scrubber.h"
+#include "osd/ECCommon.h"
 
 #include "include/types.h"
 #include "include/compat.h"
@@ -4347,6 +4348,46 @@ void OSD::final_init()
     test_ops_hook,
     "inject metadata error to an object");
   ceph_assert(r == 0);
+  r = admin_socket->register_command(
+    "injectecreaderr " \
+    "name=pool,type=CephString " \
+    "name=objname,type=CephObjectname " \
+    "name=shardid,type=CephInt,req=true,range=0|255 " \
+    "name=type,type=CephInt,req=false " \
+    "name=when,type=CephInt,req=false " \
+    "name=duration,type=CephInt,req=false",
+    test_ops_hook,
+    "inject error for read of object in an EC pool");
+  ceph_assert(r == 0);
+  r = admin_socket->register_command(
+    "injectecclearreaderr " \
+    "name=pool,type=CephString " \
+    "name=objname,type=CephObjectname " \
+    "name=shardid,type=CephInt,req=true,range=0|255 " \
+    "name=type,type=CephInt,req=false",
+    test_ops_hook,
+    "clear read error injects for object in an EC pool");
+  ceph_assert(r == 0);
+  r = admin_socket->register_command(
+    "injectecwriteerr " \
+    "name=pool,type=CephString " \
+    "name=objname,type=CephObjectname " \
+    "name=shardid,type=CephInt,req=true,range=0|255 " \
+    "name=type,type=CephInt,req=false " \
+    "name=when,type=CephInt,req=false " \
+    "name=duration,type=CephInt,req=false",
+    test_ops_hook,
+    "inject error for write of object in an EC pool");
+  ceph_assert(r == 0);
+  r = admin_socket->register_command(
+    "injectecclearwriteerr " \
+    "name=pool,type=CephString " \
+    "name=objname,type=CephObjectname " \
+    "name=shardid,type=CephInt,req=true,range=0|255 " \
+    "name=type,type=CephInt,req=false",
+    test_ops_hook,
+    "clear write error inject for object in an EC pool");
+  ceph_assert(r == 0);
   r = admin_socket->register_command(
     "set_recovery_delay " \
     "name=utime,type=CephInt,req=false",
@@ -6487,8 +6528,10 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
   //directly request the osd make a change.
   if (command == "setomapval" || command == "rmomapkey" ||
       command == "setomapheader" || command == "getomap" ||
-      command == "truncobj" || command == "injectmdataerr" ||
-      command == "injectdataerr"
+      command == "truncobj" ||
+      command == "injectmdataerr" || command == "injectdataerr" ||
+      command == "injectecreaderr" || command == "injectecclearreaderr" ||
+      command == "injectecwriteerr" || command == "injectecclearwriteerr"
     ) {
     pg_t rawpg;
     int64_t pool;
@@ -6527,8 +6570,21 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
     ghobject_t gobj(obj, ghobject_t::NO_GEN, shard_id_t(uint8_t(shardid)));
     spg_t pgid(curmap->raw_pg_to_pg(rawpg), shard_id_t(shardid));
     if (curmap->pg_is_ec(rawpg)) {
-        if ((command != "injectdataerr") && (command != "injectmdataerr")) {
-            ss << "Must not call on ec pool, except injectdataerr or injectmdataerr";
+        if ((command != "injectdataerr") &&
+           (command != "injectmdataerr") &&
+           (command != "injectecreaderr") &&
+           (command != "injectecclearreaderr") &&
+           (command != "injectecwriteerr") &&
+           (command != "injectecclearwriteerr")) {
+            ss << "Must not call on ec pool";
+            return;
+        }
+    } else {
+        if ((command == "injectecreaderr") ||
+           (command == "injecteclearreaderr") ||
+           (command == "injectecwriteerr") ||
+           (command == "injecteclearwriteerr")) {
+            ss << "Only supported on ec pool";
             return;
         }
     }
@@ -6607,6 +6663,38 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
     } else if (command == "injectmdataerr") {
       store->inject_mdata_error(gobj);
       ss << "ok";
+    } else if (command == "injectecreaderr") {
+      if (service->cct->_conf->bluestore_debug_inject_read_err) {
+       int64_t type = cmd_getval_or<int64_t>(cmdmap, "type", 0);
+        int64_t when = cmd_getval_or<int64_t>(cmdmap, "when", 0);
+        int64_t duration = cmd_getval_or<int64_t>(cmdmap, "duration", 1);
+       ss << ec_inject_read_error(gobj, type, when, duration);
+      } else {
+       ss << "bluestore_debug_inject_read_err not enabled";
+      }
+    } else if (command == "injectecclearreaderr") {
+      if (service->cct->_conf->bluestore_debug_inject_read_err) {
+       int64_t type = cmd_getval_or<int64_t>(cmdmap, "type", 0);
+       ss << ec_inject_clear_read_error(gobj, type);
+      } else {
+       ss << "bluestore_debug_inject_read_err not enabled";
+      }
+    } else if (command == "injectecwriteerr") {
+      if (service->cct->_conf->bluestore_debug_inject_read_err) {
+       int64_t type = cmd_getval_or<int64_t>(cmdmap, "type", 0);
+       int64_t when = cmd_getval_or<int64_t>(cmdmap, "when", 0);
+        int64_t duration = cmd_getval_or<int64_t>(cmdmap, "duration", 1);
+       ss << ec_inject_write_error(gobj, type, when, duration);
+      } else {
+       ss << "bluestore_debug_inject_read_err not enabled";
+      }
+    } else if (command == "injectecclearwriteerr") {
+      if (service->cct->_conf->bluestore_debug_inject_read_err) {
+       int64_t type = cmd_getval_or<int64_t>(cmdmap, "type", 0);
+       ss << ec_inject_clear_write_error(gobj, type);
+      } else {
+       ss << "bluestore_debug_inject_read_err not enabled";
+      }
     }
     return;
   }
index b87aa1da6771b72a685093d4a1780d0bb387f089..f5eb9ea951e99e62ec3b10d4f60c93d92cb9db5a 100644 (file)
@@ -290,6 +290,10 @@ typedef std::shared_ptr<const OSDMap> OSDMapRef;
        MessageRef, Connection *con) = 0;
      virtual void send_message_osd_cluster(
        Message *m, const ConnectionRef& con) = 0;
+     virtual void start_mon_command(
+       const std::vector<std::string>& cmd, const bufferlist& inbl,
+       bufferlist *outbl, std::string *outs,
+       Context *onfinish) = 0;
      virtual ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) = 0;
      virtual entity_name_t get_cluster_msgr_name() = 0;
 
index 44f8e85b5ef6c34a4333a78f9d629c0ae81ecaed..aa0f84783cd96feb665c55c8667750afe5efa250 100644 (file)
@@ -2286,6 +2286,16 @@ void PrimaryLogPG::do_op(OpRequestRef& op)
     }
   }
 
+  if (cct->_conf->bluestore_debug_inject_read_err &&
+      op->may_write() &&
+      pool.info.is_erasure() &&
+      ec_inject_test_write_error0(m->get_hobj(), m->get_reqid())) {
+    // Fail retried write with error
+    dout(0) << __func__ << " Error inject - Fail retried write with EINVAL" << dendl;
+    osd->reply_op_error(op, -EINVAL);
+    return;
+  }
+
   ObjectContextRef obc;
   bool can_create = op->may_write();
   hobject_t missing_oid;
index f66b5c6e16aed2145a75fdd4fc0363fc654a09a8..bf55d5398218b5b46b0cfa53416dfbc5ca134e33 100644 (file)
@@ -622,6 +622,12 @@ public:
     Message *m, const ConnectionRef& con) override {
     osd->send_message_osd_cluster(m, con);
   }
+  void start_mon_command(
+    const std::vector<std::string>& cmd, const bufferlist& inbl,
+    bufferlist *outbl, std::string *outs,
+    Context *onfinish) override {
+    osd->monc->start_mon_command(cmd, inbl, outbl, outs, onfinish);
+  }
   ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) override;
   entity_name_t get_cluster_msgr_name() override {
     return osd->get_cluster_msgr_name();
@@ -1993,6 +1999,7 @@ public:
 
 private:
   DynamicPerfStats m_dynamic_perf_stats;
+
 };
 
 inline ostream& operator<<(ostream& out, const PrimaryLogPG::RepGather& repop)
@@ -2021,5 +2028,4 @@ inline ostream& operator<<(ostream& out,
 void intrusive_ptr_add_ref(PrimaryLogPG::RepGather *repop);
 void intrusive_ptr_release(PrimaryLogPG::RepGather *repop);
 
-
 #endif