]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
ReplicatedBackend: clear pull source once we are done with it 13879/head
authorSamuel Just <sjust@redhat.com>
Fri, 24 Feb 2017 19:05:25 +0000 (11:05 -0800)
committerSage Weil <sage@redhat.com>
Wed, 8 Mar 2017 17:12:36 +0000 (12:12 -0500)
68defc2b0561414711d4dd0a76bc5d0f46f8a3f8 factored out the clear_pull
behavoir, but we actually need to clear the pull source in
handle_pull_response (even though we don't clear the pulling entry
until the callback) so that we don't clear the pulling entry in
check_recovery_sources.  This should restore the clearing behavior
to basically what it was in kraken.

Fixes: http://tracker.ceph.com/issues/19076
Signed-off-by: Samuel Just <sjust@redhat.com>
src/osd/ReplicatedBackend.cc
src/osd/ReplicatedBackend.h

index 1ae2d0d0de9ce1953edf3e664c1f4211c9872536..ac2edee74fe3919c88a23600cb6d2eb58ccaa035 100644 (file)
@@ -883,7 +883,7 @@ struct C_ReplicatedBackend_OnPullComplete : GenContext<ThreadPool::TPHandle&> {
       auto j = bc->pulling.find(i.hoid);
       assert(j != bc->pulling.end());
       ObjectContextRef obc = j->second.obc;
-      bc->clear_pull(j);
+      bc->clear_pull(j, false /* already did it */);
       if (!bc->start_pushes(i.hoid, obc, h)) {
        bc->get_parent()->on_global_recover(
          i.hoid, i.stat);
@@ -1813,11 +1813,12 @@ bool ReplicatedBackend::handle_pull_response(
   assert((data_included.empty() && data.length() == 0) ||
         (!data_included.empty() && data.length() > 0));
 
-  if (!pulling.count(hoid)) {
+  auto piter = pulling.find(hoid);
+  if (piter == pulling.end()) {
     return false;
   }
 
-  PullInfo &pi = pulling[hoid];
+  PullInfo &pi = piter->second;
   if (pi.recovery_info.size == (uint64_t(-1))) {
     pi.recovery_info.size = pop.recovery_info.size;
     pi.recovery_info.copy_subset.intersection_of(
@@ -1877,6 +1878,7 @@ bool ReplicatedBackend::handle_pull_response(
 
   if (complete) {
     pi.stat.num_objects_recovered++;
+    clear_pull_from(piter);
     to_continue->push_back({hoid, pi.stat});
     get_parent()->on_local_recover(
       hoid, pi.recovery_info, pi.obc, t);
@@ -2415,15 +2417,21 @@ void ReplicatedBackend::_failed_push(pg_shard_t from, const hobject_t &soid)
   clear_pull(pulling.find(soid));
 }
 
+void ReplicatedBackend::clear_pull_from(
+  map<hobject_t, PullInfo>::iterator piter)
+{
+  auto from = piter->second.from;
+  pull_from_peer[from].erase(piter->second.soid);
+  if (pull_from_peer[from].empty())
+    pull_from_peer.erase(from);
+}
+
 void ReplicatedBackend::clear_pull(
   map<hobject_t, PullInfo>::iterator piter,
   bool clear_pull_from_peer)
 {
-  auto from = piter->second.from;
   if (clear_pull_from_peer) {
-    pull_from_peer[from].erase(piter->second.soid);
-    if (pull_from_peer[from].empty())
-      pull_from_peer.erase(from);
+    clear_pull_from(piter);
   }
   get_parent()->release_locks(piter->second.lock_manager);
   pulling.erase(piter);
index c4f40ae81c1a98946f20e1e79b0c22752810ee48..1df8e1493fb25c03e6b431bbc2a75d17d997edaf 100644 (file)
@@ -221,6 +221,8 @@ private:
   void clear_pull(
     map<hobject_t, PullInfo>::iterator piter,
     bool clear_pull_from_peer = true);
+  void clear_pull_from(
+    map<hobject_t, PullInfo>::iterator piter);
 
   void sub_op_push(OpRequestRef op);
   void sub_op_push_reply(OpRequestRef op);