]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Revert "Merge pull request #17893 from dzafman/wip-19140-19224" 22920/head
authorSage Weil <sage@redhat.com>
Fri, 6 Jul 2018 21:58:50 +0000 (16:58 -0500)
committerSage Weil <sage@redhat.com>
Fri, 6 Jul 2018 21:58:50 +0000 (16:58 -0500)
This reverts commit b5f71c68608f1c0debae7efd39c5f12787776a21, reversing
changes made to 93d589af3f245cfe45a366ce4ae8473d62685ca0.

This resulted in several failures due to the objecter sending out of order
reads vs writes after going from full -> non-full.  Not clear why.

Signed-off-by: Sage Weil <sage@redhat.com>
qa/tasks/ceph_manager.py
qa/tasks/thrashosds.py
src/osdc/Objecter.cc
src/osdc/Objecter.h

index 62488dde361ebc29dfa1c780d9d6741885d7a8f6..73638ca1caa56a643c7c54d983d83a77450f6615 100644 (file)
@@ -114,7 +114,6 @@ class Thrasher:
         self.minin = self.config.get("min_in", 3)
         self.chance_move_pg = self.config.get('chance_move_pg', 1.0)
         self.sighup_delay = self.config.get('sighup_delay')
-        self.chance_thrash_cluster_full = self.config.get('chance_thrash_cluster_full', .05)
 
         num_osds = self.in_osds + self.out_osds
         self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds
@@ -457,16 +456,6 @@ class Thrasher:
         self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity',
                                           str(osd), str(pa))
 
-    def thrash_cluster_full(self):
-        """
-        Set and unset cluster full condition
-        """
-        self.log('Setting full ratio to .001')
-        self.ceph_manager.raw_cluster_cmd('pg', 'set_full_ratio', '.001')
-        time.sleep(1)
-        self.log('Setting full ratio back to .95')
-        self.ceph_manager.raw_cluster_cmd('pg', 'set_full_ratio', '.95')
-
     def all_up(self):
         """
         Make sure all osds are up and not out.
@@ -670,8 +659,6 @@ class Thrasher:
                         chance_test_min_size,))
         actions.append((self.test_backfill_full,
                         chance_test_backfill_full,))
-        if self.chance_thrash_cluster_full > 0:
-            actions.append((self.thrash_cluster_full, self.chance_thrash_cluster_full,))
         for key in ['heartbeat_inject_failure', 'filestore_inject_stall']:
             for scenario in [
                 (lambda:
index 0624503e34fbc3f78f16a5fc21170cbeb77eaac5..fb1defbb7bf1b3ce66b21c36bdee0f29182f9755 100644 (file)
@@ -102,8 +102,6 @@ def task(ctx, config):
     ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down
     chance_move_pg: (1.0) chance of moving a pg if more than 1 osd is down (default 100%)
 
-    chance_thrash_cluster_full: .05
-
     example:
 
     tasks:
index a4b6a8f4607aa46180c34012b644e4c297c6c7c3..e9bf5686c767ec04f20ab2533d6b169d1191dc9c 100644 (file)
@@ -1040,7 +1040,8 @@ void Objecter::_scan_requests(OSDSession *s,
     int r = _calc_target(&op->target, &op->last_force_resend);
     switch (r) {
     case RECALC_OP_TARGET_NO_ACTION:
-      if (!force_resend && !(force_resend_writes && op->respects_full()))
+      if (!force_resend &&
+         (!force_resend_writes || !(op->target.flags & CEPH_OSD_FLAG_WRITE)))
        break;
       // -- fall-thru --
     case RECALC_OP_TARGET_NEED_RESEND:
@@ -2315,7 +2316,9 @@ void Objecter::_op_submit(Op *op, shunique_lock& sul, ceph_tid_t *ptid)
                   << dendl;
     op->target.paused = true;
     _maybe_request_map();
-  } else if (op->respects_full() &&
+  } else if ((op->target.flags & CEPH_OSD_FLAG_WRITE) &&
+            !(op->target.flags & (CEPH_OSD_FLAG_FULL_TRY |
+                                  CEPH_OSD_FLAG_FULL_FORCE)) &&
             (_osdmap_full_flag() ||
              _osdmap_pool_full(op->target.base_oloc.pool))) {
     ldout(cct, 0) << " FULL, paused modify " << op << " tid "
index 4fa60f584c54482becc9e829abedaf6cd28aefe7..0d12ff8e9dd9202653e2ba28f8609986e6002171 100644 (file)
@@ -1337,12 +1337,6 @@ public:
       return tid < other.tid;
     }
 
-    bool respects_full() const {
-      return
-       (target.flags & (CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_RWORDERED)) &&
-       !(target.flags & (CEPH_OSD_FLAG_FULL_TRY | CEPH_OSD_FLAG_FULL_FORCE));
-    }
-
   private:
     ~Op() {
       while (!out_handler.empty()) {