]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: For recovery get all possible shards to read on errors 17920/head
authorDavid Zafman <dzafman@redhat.com>
Fri, 22 Sep 2017 03:35:27 +0000 (20:35 -0700)
committerDavid Zafman <dzafman@redhat.com>
Fri, 29 Sep 2017 06:31:25 +0000 (23:31 -0700)
Signed-off-by: David Zafman <dzafman@redhat.com>
src/osd/ECBackend.cc
src/osd/ECBackend.h

index caadeb92ba1c64d39f8b7128ed6d45401c92e05c..18add2370a89dbaa8298a72ac16b5ec7be9400a4 100644 (file)
@@ -1203,7 +1203,6 @@ void ECBackend::handle_sub_read_reply(
       set<int> want_to_read, dummy_minimum;
       get_want_to_read_shards(&want_to_read);
       int err;
-      // TODO: Should we include non-acting nodes here when for_recovery is set?
       if ((err = ec_impl->minimum_to_decode(want_to_read, have, &dummy_minimum)) < 0) {
        dout(20) << __func__ << " minimum_to_decode failed" << dendl;
         if (rop.in_progress.empty()) {
@@ -1485,19 +1484,12 @@ void ECBackend::call_write_ordered(std::function<void(void)> &&cb) {
   }
 }
 
-int ECBackend::get_min_avail_to_read_shards(
+void ECBackend::get_all_avail_shards(
   const hobject_t &hoid,
-  const set<int> &want,
-  bool for_recovery,
-  bool do_redundant_reads,
-  set<pg_shard_t> *to_read)
+  set<int> &have,
+  map<shard_id_t, pg_shard_t> &shards,
+  bool for_recovery)
 {
-  // Make sure we don't do redundant reads for recovery
-  assert(!for_recovery || !do_redundant_reads);
-
-  set<int> have;
-  map<shard_id_t, pg_shard_t> shards;
-
   for (set<pg_shard_t>::const_iterator i =
         get_parent()->get_acting_shards().begin();
        i != get_parent()->get_acting_shards().end();
@@ -1548,6 +1540,22 @@ int ECBackend::get_min_avail_to_read_shards(
       }
     }
   }
+}
+
+int ECBackend::get_min_avail_to_read_shards(
+  const hobject_t &hoid,
+  const set<int> &want,
+  bool for_recovery,
+  bool do_redundant_reads,
+  set<pg_shard_t> *to_read)
+{
+  // Make sure we don't do redundant reads for recovery
+  assert(!for_recovery || !do_redundant_reads);
+
+  set<int> have;
+  map<shard_id_t, pg_shard_t> shards;
+
+  get_all_avail_shards(hoid, have, shards, for_recovery);
 
   set<int> need;
   int r = ec_impl->minimum_to_decode(want, have, &need);
@@ -1573,30 +1581,18 @@ int ECBackend::get_min_avail_to_read_shards(
 int ECBackend::get_remaining_shards(
   const hobject_t &hoid,
   const set<int> &avail,
-  set<pg_shard_t> *to_read)
+  set<pg_shard_t> *to_read,
+  bool for_recovery)
 {
-  set<int> need;
-  map<shard_id_t, pg_shard_t> shards;
+  assert(to_read);
 
-  for (set<pg_shard_t>::const_iterator i =
-        get_parent()->get_acting_shards().begin();
-       i != get_parent()->get_acting_shards().end();
-       ++i) {
-    dout(10) << __func__ << ": checking acting " << *i << dendl;
-    const pg_missing_t &missing = get_parent()->get_shard_missing(*i);
-    if (!missing.is_missing(hoid)) {
-      assert(!need.count(i->shard));
-      need.insert(i->shard);
-      assert(!shards.count(i->shard));
-      shards.insert(make_pair(i->shard, *i));
-    }
-  }
+  set<int> have;
+  map<shard_id_t, pg_shard_t> shards;
 
-  if (!to_read)
-    return 0;
+  get_all_avail_shards(hoid, have, shards, for_recovery);
 
-  for (set<int>::iterator i = need.begin();
-       i != need.end();
+  for (set<int>::iterator i = have.begin();
+       i != have.end();
        ++i) {
     assert(shards.count(shard_id_t(*i)));
     if (avail.find(*i) == avail.end())
@@ -2317,7 +2313,7 @@ int ECBackend::send_all_remaining_reads(
     already_read.insert(i->shard);
   dout(10) << __func__ << " have/error shards=" << already_read << dendl;
   set<pg_shard_t> shards;
-  int r = get_remaining_shards(hoid, already_read, &shards);
+  int r = get_remaining_shards(hoid, already_read, &shards, rop.for_recovery);
   if (r)
     return r;
   if (shards.empty())
index 6d6d1f1e4cf7f75d96d7a3fbe3a14773b0731c75..a51ce01a1a450888e54856e7f36c62fc32d808f5 100644 (file)
@@ -320,6 +320,11 @@ private:
     const PushReplyOp &op,
     pg_shard_t from,
     RecoveryMessages *m);
+  void get_all_avail_shards(
+    const hobject_t &hoid,
+    set<int> &have,
+    map<shard_id_t, pg_shard_t> &shards,
+    bool for_recovery);
 
 public:
   /**
@@ -650,7 +655,8 @@ public:
   int get_remaining_shards(
     const hobject_t &hoid,
     const set<int> &avail,
-    set<pg_shard_t> *to_read);
+    set<pg_shard_t> *to_read,
+    bool for_recovery);
 
   int objects_get_attrs(
     const hobject_t &hoid,