]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: Handle recovery read errors
authorDavid Zafman <dzafman@redhat.com>
Fri, 20 May 2016 22:20:18 +0000 (15:20 -0700)
committerDavid Zafman <dzafman@redhat.com>
Fri, 28 Oct 2016 05:42:10 +0000 (22:42 -0700)
Fixes: http://tracker.ceph.com/issues/13937
Signed-off-by: David Zafman <dzafman@redhat.com>
src/osd/ECBackend.cc
src/osd/ECBackend.h
src/osd/PGBackend.h
src/osd/ReplicatedBackend.cc
src/osd/ReplicatedPG.cc
src/osd/ReplicatedPG.h

index a7303d4c98bc50620bd14459faa549eae4033e31..1bb865b0801b98dfae7557ea723bdedda0e929a6 100644 (file)
@@ -188,6 +188,24 @@ PGBackend::RecoveryHandle *ECBackend::open_recovery_op()
   return new ECRecoveryHandle;
 }
 
+void ECBackend::_failed_push(const hobject_t &hoid,
+  pair<RecoveryMessages *, ECBackend::read_result_t &> &in)
+{
+  ECBackend::read_result_t &res = in.second;
+  dout(10) << __func__ << ": Read error " << hoid << " r="
+          << res.r << " errors=" << res.errors << dendl;
+  dout(10) << __func__ << ": canceling recovery op for obj " << hoid
+          << dendl;
+  assert(recovery_ops.count(hoid));
+  recovery_ops.erase(hoid);
+
+  list<pg_shard_t> fl;
+  for (auto&& i : res.errors) {
+    fl.push_back(i.first);
+  }
+  get_parent()->failed_push(fl, hoid);
+}
+
 struct OnRecoveryReadComplete :
   public GenContext<pair<RecoveryMessages*, ECBackend::read_result_t& > &> {
   ECBackend *pg;
@@ -197,9 +215,10 @@ struct OnRecoveryReadComplete :
     : pg(pg), hoid(hoid) {}
   void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) {
     ECBackend::read_result_t &res = in.second;
-    // FIXME???
-    assert(res.r == 0);
-    assert(res.errors.empty());
+    if (!(res.r == 0 && res.errors.empty())) {
+        pg->_failed_push(hoid, in);
+        return;
+    }
     assert(res.returned.size() == 1);
     pg->handle_recovery_read_complete(
       hoid,
@@ -1071,6 +1090,7 @@ void ECBackend::handle_sub_read_reply(
   unsigned is_complete = 0;
   // For redundant reads check for completion as each shard comes in,
   // or in a non-recovery read check for completion once all the shards read.
+  // TODO: It would be nice if recovery could send more reads too
   if (rop.do_redundant_reads || (!rop.for_recovery && rop.in_progress.empty())) {
     for (map<hobject_t, read_result_t>::const_iterator iter =
         rop.complete.begin();
index f9b48f659a8853c3a213c814f0683b2d8ed2975c..8b6535953ee955d70ce43efd19adb65d4fb015d8 100644 (file)
@@ -504,6 +504,8 @@ public:
   uint64_t be_get_ondisk_size(uint64_t logical_size) {
     return sinfo.logical_to_next_chunk_offset(logical_size);
   }
+  void _failed_push(const hobject_t &hoid,
+    pair<RecoveryMessages *, ECBackend::read_result_t &> &in);
 };
 
 #endif
index e749102f8bfb9c9a6795847928421b7b17c8b70f..b9b2e20cff02d4b15741525820de69c812b26064 100644 (file)
@@ -97,7 +97,7 @@ typedef ceph::shared_ptr<const OSDMap> OSDMapRef;
        pg_shard_t peer,
        const hobject_t oid) = 0;
 
-     virtual void failed_push(pg_shard_t from, const hobject_t &soid) = 0;
+     virtual void failed_push(const list<pg_shard_t> &from, const hobject_t &soid) = 0;
      
      virtual void cancel_pull(const hobject_t &soid) = 0;
 
index a090c2de5bde26ac7548312cfe8fd4dc72b865c4..2267f5ba7493e4a8674b12c459009c0edaae8b83 100644 (file)
@@ -2426,7 +2426,8 @@ void ReplicatedBackend::sub_op_push(OpRequestRef op)
 
 void ReplicatedBackend::_failed_push(pg_shard_t from, const hobject_t &soid)
 {
-  get_parent()->failed_push(from, soid);
+  list<pg_shard_t> fl = { from };
+  get_parent()->failed_push(fl, soid);
   pull_from_peer[from].erase(soid);
   if (pull_from_peer[from].empty())
     pull_from_peer.erase(from);
index a7c00091ad1d5c3297bf131f71ab568113115824..f64734658629122ed220583f4918995ff77c3a74 100644 (file)
@@ -9824,12 +9824,12 @@ void ReplicatedPG::recover_got(hobject_t oid, eversion_t v)
   }
 }
 
-
-void ReplicatedPG::failed_push(pg_shard_t from, const hobject_t &soid)
+void ReplicatedPG::failed_push(const list<pg_shard_t> &from, const hobject_t &soid)
 {
   assert(recovering.count(soid));
   recovering.erase(soid);
-  missing_loc.remove_location(soid, from);
+  for (auto&& i : from)
+    missing_loc.remove_location(soid, i);
   dout(0) << __func__ << " " << soid << " from shard " << from
          << ", reps on " << missing_loc.get_locations(soid)
          << " unfound? " << missing_loc.is_unfound(soid) << dendl;
index 96b07f52747f2a11a8180734b65d0cda96864d46..a3c9488ee83a5a495af05cd8f9b15e7c1105d0f9 100644 (file)
@@ -257,7 +257,7 @@ public:
   void on_global_recover(
     const hobject_t &oid,
     const object_stat_sum_t &stat_diff) override;
-  void failed_push(pg_shard_t from, const hobject_t &soid) override;
+  void failed_push(const list<pg_shard_t> &from, const hobject_t &soid) override;
   void cancel_pull(const hobject_t &soid) override;
 
   template<class T> class BlessedGenContext;