]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
Revert "ReplicatedPG: block scrub on blocked object contexts"
authorSamuel Just <sam.just@inktank.com>
Fri, 2 May 2014 23:21:26 +0000 (16:21 -0700)
committerSamuel Just <sam.just@inktank.com>
Thu, 8 May 2014 01:10:49 +0000 (18:10 -0700)
This reverts commit e66f2e36c06ca00c1147f922d3513f56b122a5c0.
Reviewed-by: Sage Weil <sage@inktank.com>
0f3235d46c8fd6c537bd4aa8a3faec6c00f311a8 is the firefly commit
corresponding to e66f2e36c06ca00c1147f922d3513f56b122a5c0.

(cherry picked from commit 84728058dbb91b8ed062240b3373b18078f0c9ca)

src/osd/PG.cc
src/osd/PG.h
src/osd/ReplicatedPG.cc
src/osd/ReplicatedPG.h
src/osd/osd_types.h

index 2c86f3ba2d2511875be56256b2f2dedbe98f2da2..6deb0998b296658762ce7eedfdace7829c5a5d0a 100644 (file)
@@ -3880,7 +3880,6 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle)
         scrubber.received_maps.clear();
 
         {
-         hobject_t end;
 
           // get the start and end of our scrub chunk
           //
@@ -3899,11 +3898,11 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle)
              cct->_conf->osd_scrub_chunk_max,
              0,
              &objects,
-             &end);
+             &scrubber.end);
             assert(ret >= 0);
 
             // in case we don't find a boundary: start again at the end
-            start = end;
+            start = scrubber.end;
 
             // special case: reached end of file store, implicitly a boundary
             if (objects.empty()) {
@@ -3911,25 +3910,19 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle)
             }
 
             // search backward from the end looking for a boundary
-            objects.push_back(end);
+            objects.push_back(scrubber.end);
             while (!boundary_found && objects.size() > 1) {
               hobject_t end = objects.back().get_boundary();
               objects.pop_back();
 
               if (objects.back().get_filestore_key() != end.get_filestore_key()) {
-                end = end;
+                scrubber.end = end;
                 boundary_found = true;
               }
             }
           }
-
-         if (!_range_available_for_scrub(scrubber.start, end)) {
-           // we'll be requeued by whatever made us unavailable for scrub
-           done = true;
-           break;
-         }
-         scrubber.end = end;
         }
+
         scrubber.block_writes = true;
 
         // walk the log to find the latest update that affects our chunk
index e9f3981f412d5ca91689ef27f6cdd90702eff280..1fce2979c4eab56badf186514cad968d0a5be5d7 100644 (file)
@@ -1118,13 +1118,6 @@ public:
   void build_scrub_map(ScrubMap &map, ThreadPool::TPHandle &handle);
   void build_inc_scrub_map(
     ScrubMap &map, eversion_t v, ThreadPool::TPHandle &handle);
-  /**
-   * returns true if [begin, end) is good to scrub at this time
-   * a false return value obliges the implementer to requeue scrub when the
-   * condition preventing scrub clears
-   */
-  virtual bool _range_available_for_scrub(
-    const hobject_t &begin, const hobject_t &end) = 0;
   virtual void _scrub(ScrubMap &map) { }
   virtual void _scrub_clear_state() { }
   virtual void _scrub_finish() { }
index 2a20fb84f9e8daa175ddbad5160010d156f27054..94eec05d44458eb5f9b2a5fac7f46f4d0398a4b7 100644 (file)
@@ -7439,9 +7439,6 @@ void ReplicatedPG::kick_object_context_blocked(ObjectContextRef obc)
   dout(10) << __func__ << " " << soid << " requeuing " << ls.size() << " requests" << dendl;
   requeue_ops(ls);
   waiting_for_blocked_object.erase(p);
-
-  if (obc->requeue_scrub_on_unblock)
-    osd->queue_for_scrub(this);
 }
 
 SnapSetContext *ReplicatedPG::create_snapset_context(const hobject_t& oid)
@@ -11583,23 +11580,6 @@ void ReplicatedPG::agent_estimate_atime_temp(const hobject_t& oid,
 // SCRUB
 
 
-bool ReplicatedPG::_range_available_for_scrub(
-  const hobject_t &begin, const hobject_t &end)
-{
-  pair<hobject_t, ObjectContextRef> next;
-  next.second = object_contexts.lookup(begin);
-  next.first = begin;
-  bool more = true;
-  while (more && next.first < end) {
-    if (next.second && next.second->is_blocked()) {
-      next.second->requeue_scrub_on_unblock = true;
-      return true;
-    }
-    more = object_contexts.get_next(next.first, &next);
-  }
-  return false;
-}
-
 void ReplicatedPG::_scrub(ScrubMap& scrubmap)
 {
   dout(10) << "_scrub" << dendl;
index 562cb069270685a5f592c948c252da453a7dd970..3ea47218aadaed8ddbff1d831eb4c6c182b8835d 100644 (file)
@@ -1243,8 +1243,6 @@ protected:
   friend struct C_Flush;
 
   // -- scrub --
-  virtual bool _range_available_for_scrub(
-    const hobject_t &begin, const hobject_t &end);
   virtual void _scrub(ScrubMap& map);
   virtual void _scrub_clear_state();
   virtual void _scrub_finish();
index 328962001ccaca6f65f5fad459ce30ed5fd29a66..092d6ccbf6fadb62272a68c682509fe155849973 100644 (file)
@@ -2690,7 +2690,6 @@ public:
   // set if writes for this object are blocked on another objects recovery
   ObjectContextRef blocked_by;      // object blocking our writes
   set<ObjectContextRef> blocking;   // objects whose writes we block
-  bool requeue_scrub_on_unblock;    // true if we need to requeue scrub on unblock
 
   // any entity in obs.oi.watchers MUST be in either watchers or unconnected_watchers.
   map<pair<uint64_t, entity_name_t>, WatchRef> watchers;
@@ -2863,7 +2862,7 @@ public:
       destructor_callback(0),
       lock("ReplicatedPG::ObjectContext::lock"),
       unstable_writes(0), readers(0), writers_waiting(0), readers_waiting(0),
-      blocked(false), requeue_scrub_on_unblock(false) {}
+      blocked(false) {}
 
   ~ObjectContext() {
     assert(rwstate.empty());