]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Partial revert "osd/PG: drop scrub machinery to wait for last_updated_applied"
authorSage Weil <sage@redhat.com>
Fri, 16 Mar 2018 19:57:35 +0000 (14:57 -0500)
committerSage Weil <sage@redhat.com>
Fri, 16 Mar 2018 22:20:29 +0000 (17:20 -0500)
This reverts commit c489112a1dbcbb63024ba9c8c0abff6ef0c815d8 (well,
part of it).

We want the scrub machinery that will wait on an EC read/modify/write
until it has been queued with the ObjectStore, but we don't need the
parts that used to worry about queued but unapplied writes in FileStore
not being visible.

Fixes: http://tracker.ceph.com/issues/23339
Signed-off-by: Sage Weil <sage@redhat.com>
src/osd/PG.cc
src/osd/PG.h
src/osd/PrimaryLogPG.cc

index 86d958b4799ed9460949df662ddb0895f706e26b..27bf647338981ae56150b9bd3e4f3dc19d717bae 100644 (file)
@@ -4516,6 +4516,11 @@ void PG::scrub(epoch_t queued, ThreadPool::TPHandle &handle)
  *           |              |   |
  *  _________v__________    |   |
  * |                    |   |   |
+ * |  WAIT_LAST_UPDATE  |   |   |
+ * |____________________|   |   |
+ *           |              |   |
+ *  _________v__________    |   |
+ * |                    |   |   |
  * |      BUILD_MAP     |   |   |
  * |____________________|   |   |
  *           |              |   |
@@ -4735,16 +4740,29 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle)
         break;
 
       case PG::Scrubber::WAIT_PUSHES:
-        if (active_pushes > 0) {
+        if (active_pushes == 0) {
+          scrubber.state = PG::Scrubber::WAIT_LAST_UPDATE;
+        } else {
           dout(15) << "wait for pushes to apply" << dendl;
           done = true;
+        }
+        break;
+
+      case PG::Scrubber::WAIT_LAST_UPDATE:
+        if (last_update_applied < scrubber.subset_last_update) {
+          // will be requeued by op_applied
+          dout(15) << "wait for EC read/modify/writes to queue" << dendl;
+          done = true;
          break;
        }
-       scrubber.primary_scrubmap_pos.reset();
+
        scrubber.state = PG::Scrubber::BUILD_MAP;
+       scrubber.primary_scrubmap_pos.reset();
         break;
 
       case PG::Scrubber::BUILD_MAP:
+        assert(last_update_applied >= scrubber.subset_last_update);
+
         // build my own scrub map
        if (scrub_preempted) {
          dout(10) << __func__ << " preempted" << dendl;
@@ -4799,6 +4817,7 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle)
         break;
 
       case PG::Scrubber::COMPARE_MAPS:
+        assert(last_update_applied >= scrubber.subset_last_update);
         assert(scrubber.waiting_on_whom.empty());
 
         scrub_compare_maps();
@@ -5760,6 +5779,8 @@ ostream& operator<<(ostream& out, const PG& pg)
   if (pg.is_peered()) {
     if (pg.last_update_ondisk != pg.info.last_update)
       out << " luod=" << pg.last_update_ondisk;
+    if (pg.last_update_applied != pg.info.last_update)
+      out << " lua=" << pg.last_update_applied;
   }
 
   if (pg.recovery_ops_active)
index e275f3bd8fa5f2680a14f5ac80028352e9a2db3e..c2fe21f3dcc8ed1adcfbcbc956045d865aa4be8d 100644 (file)
@@ -1485,6 +1485,7 @@ public:
       INACTIVE,
       NEW_CHUNK,
       WAIT_PUSHES,
+      WAIT_LAST_UPDATE,
       BUILD_MAP,
       BUILD_MAP_DONE,
       WAIT_REPLICAS,
@@ -1521,6 +1522,7 @@ public:
         case INACTIVE: ret = "INACTIVE"; break;
         case NEW_CHUNK: ret = "NEW_CHUNK"; break;
         case WAIT_PUSHES: ret = "WAIT_PUSHES"; break;
+        case WAIT_LAST_UPDATE: ret = "WAIT_LAST_UPDATE"; break;
         case BUILD_MAP: ret = "BUILD_MAP"; break;
         case BUILD_MAP_DONE: ret = "BUILD_MAP_DONE"; break;
         case WAIT_REPLICAS: ret = "WAIT_REPLICAS"; break;
index b9f41c71bf911c1acaf9de4e039e70aa5df6e724..e32df9eb32d571001cb4c2cffe40d0ee57f355ed 100644 (file)
@@ -9932,6 +9932,15 @@ void PrimaryLogPG::op_applied(const eversion_t &applied_version)
   dout(10) << "op_applied version " << applied_version << dendl;
   assert(applied_version <= info.last_update);
   last_update_applied = applied_version;
+  if (is_primary()) {
+    if (scrubber.active) {
+      if (last_update_applied >= scrubber.subset_last_update) {
+       requeue_scrub(ops_blocked_by_scrub());
+      }
+    } else {
+      assert(scrubber.start == scrubber.end);
+    }
+  }
 }
 
 void PrimaryLogPG::eval_repop(RepGather *repop)