]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Revert "osd: new pool settings: scrub intervals" 6084/head
authorSage Weil <sage@redhat.com>
Fri, 25 Sep 2015 18:01:19 +0000 (14:01 -0400)
committerSage Weil <sage@redhat.com>
Fri, 25 Sep 2015 18:01:19 +0000 (14:01 -0400)
This reverts commit 30810da4b573b3c7e679bc66eaa29b70246538cb.

After some discussion we have decided it is better to build a generic
dictionary in pg_pool_t to store infrequently used per-pool properties.

Signed-off-by: Sage Weil <sage@redhat.com>
doc/rados/operations/pools.rst
qa/workunits/cephtool/test.sh
src/mon/MonCommands.h
src/mon/OSDMonitor.cc
src/osd/OSD.cc
src/osd/OSD.h
src/osd/PG.cc
src/osd/osd_types.cc
src/osd/osd_types.h
src/test/mon/misc.sh
src/test/pybind/test_ceph_argparse.py

index a6d08af0cb6d62ae2b0b5b4b5edfefd9cb121207..80b56d4b6d68427a6d79b75e14607a6abac38ae0 100644 (file)
@@ -423,33 +423,6 @@ You may set values for the following keys:
 :Example: ``1800`` 30min
 
 
-``scrub_min_interval``
-
-:Description: The maximum interval in seconds for pool scrubbing when
-              load is low. If it is 0, the value osd_scrub_min_interval
-              from config is used.
-
-:Type: Double
-:Default: ``0``
-
-``scrub_max_interval``
-
-:Description: The maximum interval in seconds for pool scrubbing
-              irrespective of cluster load. If it is 0, the value
-              osd_scrub_max_interval from config is used.
-
-:Type: Double
-:Default: ``0``
-
-
-``deep_scrub_interval``
-
-:Description: The interval in seconds for pool “deep” scrubbing. If it
-              is 0, the value osd_deep_scrub_interval from config is used.
-
-:Type: Double
-:Default: ``0``
-
 
 Get Pool Values
 ===============
index d01b3495072d15dbdedf83c031781e9ffce9616c..157eb612873e78575d60ba3f018c595e044f07db 100755 (executable)
@@ -1368,21 +1368,6 @@ function test_mon_osd_pool_set()
       expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
   done
 
-  ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
-  ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
-  ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
-  ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 0'
-
-  ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
-  ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
-  ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
-  ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 0'
-
-  ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
-  ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
-  ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
-  ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 0'
-
   ceph osd pool set $TEST_POOL_GETSET nopgchange 1
   expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
   expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
index 2f74768cc8ed6b4bf826483aca717a78068b06c4..0286b8319dd1e85f69a5c50725d27034789e3f5c 100644 (file)
@@ -674,11 +674,11 @@ COMMAND("osd pool rename " \
        "rename <srcpool> to <destpool>", "osd", "rw", "cli,rest")
 COMMAND("osd pool get " \
        "name=pool,type=CephPoolname " \
-       "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|scrub_min_interval|scrub_max_interval|deep_scrub_interval", \
+       "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read", \
        "get pool parameter <var>", "osd", "r", "cli,rest")
 COMMAND("osd pool set " \
        "name=pool,type=CephPoolname " \
-       "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|debug_fake_ec_pool|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|scrub_min_interval|scrub_max_interval|deep_scrub_interval " \
+       "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|debug_fake_ec_pool|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read " \
        "name=val,type=CephString " \
        "name=force,type=CephChoices,strings=--yes-i-really-mean-it,req=false", \
        "set pool parameter <var> to <val>", "osd", "rw", "cli,rest")
index 3d255468b636603f7de55e8593a1823445785ed4..f7dca78b54018b93a748be8ac066aef28bee0202 100644 (file)
@@ -2885,8 +2885,7 @@ namespace {
     CACHE_TARGET_FULL_RATIO,
     CACHE_MIN_FLUSH_AGE, CACHE_MIN_EVICT_AGE,
     ERASURE_CODE_PROFILE, MIN_READ_RECENCY_FOR_PROMOTE,
-    MIN_WRITE_RECENCY_FOR_PROMOTE, FAST_READ,
-    SCRUB_MIN_INTERVAL, SCRUB_MAX_INTERVAL, DEEP_SCRUB_INTERVAL};
+    MIN_WRITE_RECENCY_FOR_PROMOTE, FAST_READ};
 
   std::set<osd_pool_get_choices>
     subtract_second_from_first(const std::set<osd_pool_get_choices>& first,
@@ -3360,9 +3359,7 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
       ("erasure_code_profile", ERASURE_CODE_PROFILE)
       ("min_read_recency_for_promote", MIN_READ_RECENCY_FOR_PROMOTE)
       ("min_write_recency_for_promote", MIN_WRITE_RECENCY_FOR_PROMOTE)
-      ("fast_read", FAST_READ)("scrub_min_interval", SCRUB_MIN_INTERVAL)
-      ("scrub_max_interval", SCRUB_MAX_INTERVAL)
-      ("deep_scrub_interval", DEEP_SCRUB_INTERVAL);
+      ("fast_read", FAST_READ);
 
     typedef std::set<osd_pool_get_choices> choices_set_t;
 
@@ -3533,15 +3530,6 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
           case FAST_READ:
             f->dump_int("fast_read", p->fast_read);
             break;
-         case SCRUB_MIN_INTERVAL:
-            f->dump_int("scrub_min_interval", p->scrub_min_interval);
-            break;
-         case SCRUB_MAX_INTERVAL:
-            f->dump_int("scrub_max_interval", p->scrub_max_interval);
-            break;
-         case DEEP_SCRUB_INTERVAL:
-            f->dump_int("deep_scrub_interval", p->deep_scrub_interval);
-            break;
        }
        f->close_section();
        f->flush(rdata);
@@ -3655,15 +3643,6 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
           case FAST_READ:
             ss << "fast_read: " << p->fast_read << "\n";
             break;
-         case SCRUB_MIN_INTERVAL:
-            ss << "scrub_min_interval: " << p->scrub_min_interval << "\n";
-            break;
-         case SCRUB_MAX_INTERVAL:
-            ss << "scrub_max_interval: " << p->scrub_max_interval << "\n";
-            break;
-         case DEEP_SCRUB_INTERVAL:
-            ss << "deep_scrub_interval: " << p->deep_scrub_interval << "\n";
-            break;
        }
        rdata.append(ss.str());
        ss.str("");
@@ -5047,24 +5026,6 @@ int OSDMonitor::prepare_command_pool_set(map<string,cmd_vartype> &cmdmap,
     } else if (val == "false" || (interr.empty() && n == 0)) {
       p.fast_read = false;
     }
-  } else if (var == "scrub_min_interval") {
-    if (floaterr.length()) {
-      ss << "error parsing floating point value '" << val << "': " << floaterr;
-      return -EINVAL;
-    }
-    p.scrub_min_interval = f;
-  } else if (var == "scrub_max_interval") {
-    if (floaterr.length()) {
-      ss << "error parsing floating point value '" << val << "': " << floaterr;
-      return -EINVAL;
-    }
-    p.scrub_max_interval = f;
-  } else if (var == "deep_scrub_interval") {
-    if (floaterr.length()) {
-      ss << "error parsing floating point value '" << val << "': " << floaterr;
-      return -EINVAL;
-    }
-    p.deep_scrub_interval = f;
   } else {
     ss << "unrecognized variable '" << var << "'";
     return -EINVAL;
index f5f68476308a2af8a24616a398e3b2ce225e7f85..a6529d15322805313941c5ec047e0b4a5299e5d9 100644 (file)
@@ -5934,23 +5934,19 @@ bool OSD::scrub_random_backoff()
   return false;
 }
 
-OSDService::ScrubJob::ScrubJob(const spg_t& pg, const utime_t& timestamp,
-                              double pool_scrub_min_interval,
-                              double pool_scrub_max_interval, bool must)
+OSDService::ScrubJob::ScrubJob(const spg_t& pg, const utime_t& timestamp, bool must)
   : pgid(pg),
     sched_time(timestamp),
     deadline(timestamp)
 {
   // if not explicitly requested, postpone the scrub with a random delay
   if (!must) {
-    sched_time += pool_scrub_min_interval > 0 ? pool_scrub_min_interval :
-      g_conf->osd_scrub_min_interval;
+    sched_time += g_conf->osd_scrub_min_interval;
     if (g_conf->osd_scrub_interval_randomize_ratio > 0) {
       sched_time += rand() % (int)(g_conf->osd_scrub_min_interval *
                                   g_conf->osd_scrub_interval_randomize_ratio);
     }
-    deadline += pool_scrub_max_interval > 0 ? pool_scrub_max_interval :
-      g_conf->osd_scrub_max_interval;
+    deadline += g_conf->osd_scrub_max_interval;
   }
 }
 
index 539114861f3e79242134623dcc41843a32b985fa..f59fb5efced67ba54b763a1b206637ec712090da 100644 (file)
@@ -589,19 +589,15 @@ public:
     /// the hard upper bound of scrub time
     utime_t deadline;
     ScrubJob() {}
-    explicit ScrubJob(const spg_t& pg, const utime_t& timestamp,
-                     double pool_scrub_min_interval = 0,
-                     double pool_scrub_max_interval = 0, bool must = true);
+    explicit ScrubJob(const spg_t& pg, const utime_t& timestamp, bool must = true);
     /// order the jobs by sched_time
     bool operator<(const ScrubJob& rhs) const;
   };
   set<ScrubJob> sched_scrub_pg;
 
   /// @returns the scrub_reg_stamp used for unregister the scrub job
-  utime_t reg_pg_scrub(spg_t pgid, utime_t t, double pool_scrub_min_interval,
-                      double pool_scrub_max_interval, bool must) {
-    ScrubJob scrub(pgid, t, pool_scrub_min_interval, pool_scrub_max_interval,
-                  must);
+  utime_t reg_pg_scrub(spg_t pgid, utime_t t, bool must) {
+    ScrubJob scrub(pgid, t, must);
     Mutex::Locker l(sched_scrub_lock);
     sched_scrub_pg.insert(scrub);
     return scrub.sched_time;
index f217eb1faa643fd95bf84cac64d40de50bb14c0c..ada7c81dc21155a2ec96e4c3d9a6e8b882983e8f 100644 (file)
@@ -3149,10 +3149,8 @@ bool PG::sched_scrub()
     return false;
   }
 
-  double deep_scrub_interval = pool.info.deep_scrub_interval > 0 ?
-    pool.info.deep_scrub_interval : cct->_conf->osd_deep_scrub_interval;
-  bool time_for_deep = ceph_clock_now(cct) >
-    info.history.last_deep_scrub_stamp + deep_scrub_interval;
+  bool time_for_deep = (ceph_clock_now(cct) >
+    info.history.last_deep_scrub_stamp + cct->_conf->osd_deep_scrub_interval);
 
   //NODEEP_SCRUB so ignore time initiated deep-scrub
   if (osd->osd->get_osdmap()->test_flag(CEPH_OSDMAP_NODEEP_SCRUB) ||
@@ -3219,8 +3217,6 @@ void PG::reg_next_scrub()
   // later on.
   scrubber.scrub_reg_stamp = osd->reg_pg_scrub(info.pgid,
                                               reg_stamp,
-                                              pool.info.scrub_min_interval,
-                                              pool.info.scrub_max_interval,
                                               scrubber.must_scrub);
 }
 
index 3d7e0a5da5a3b53aaf370ffb3bfc7aa17e56f17f..a675d5640759a3c974ce71fdc811545f59661680 100644 (file)
@@ -948,9 +948,6 @@ void pg_pool_t::dump(Formatter *f) const
   f->dump_unsigned("stripe_width", get_stripe_width());
   f->dump_unsigned("expected_num_objects", expected_num_objects);
   f->dump_bool("fast_read", fast_read);
-  f->dump_float("scrub_min_interval", scrub_min_interval);
-  f->dump_float("scrub_max_interval", scrub_max_interval);
-  f->dump_float("deep_scrub_interval", deep_scrub_interval);
 }
 
 void pg_pool_t::convert_to_pg_shards(const vector<int> &from, set<pg_shard_t>* to) const {
@@ -1260,7 +1257,7 @@ void pg_pool_t::encode(bufferlist& bl, uint64_t features) const
     return;
   }
 
-  ENCODE_START(23, 5, bl);
+  ENCODE_START(22, 5, bl);
   ::encode(type, bl);
   ::encode(size, bl);
   ::encode(crush_ruleset, bl);
@@ -1306,9 +1303,6 @@ void pg_pool_t::encode(bufferlist& bl, uint64_t features) const
   ::encode(min_write_recency_for_promote, bl);
   ::encode(use_gmt_hitset, bl);
   ::encode(fast_read, bl);
-  ::encode(scrub_min_interval, bl);
-  ::encode(scrub_max_interval, bl);
-  ::encode(deep_scrub_interval, bl);
   ENCODE_FINISH(bl);
 }
 
@@ -1446,15 +1440,6 @@ void pg_pool_t::decode(bufferlist::iterator& bl)
   } else {
     fast_read = false;
   }
-  if (struct_v >= 23) {
-    ::decode(scrub_min_interval, bl);
-    ::decode(scrub_max_interval, bl);
-    ::decode(deep_scrub_interval, bl);
-  } else {
-    scrub_min_interval = 0;
-    scrub_max_interval = 0;
-    deep_scrub_interval = 0;
-  }
   DECODE_FINISH(bl);
   calc_pg_masks();
 }
@@ -1566,12 +1551,6 @@ ostream& operator<<(ostream& out, const pg_pool_t& p)
     out << " expected_num_objects " << p.expected_num_objects;
   if (p.fast_read)
     out << " fast_read " << p.fast_read;
-  if (p.scrub_min_interval > 0)
-    out << " scrub_min_interval " << p.scrub_min_interval;
-  if (p.scrub_max_interval > 0)
-    out << " scrub_max_interval " << p.scrub_max_interval;
-  if (p.deep_scrub_interval > 0)
-    out << " deep_scrub_interval " << p.deep_scrub_interval;
   return out;
 }
 
index c0616ebfcec02ee81809c0aaa4f5b4eda55544cf..10c38da351fcc3881422890a2df8f41910f1168a 100644 (file)
@@ -1129,10 +1129,6 @@ public:
                                  ///< user does not specify any expected value
   bool fast_read;            ///< whether turn on fast read on the pool or not
 
-  double scrub_min_interval;  //< scrub min interval
-  double scrub_max_interval;  //< scrub max interval
-  double deep_scrub_interval; //< deep-scrub interval
-
   pg_pool_t()
     : flags(0), type(0), size(0), min_size(0),
       crush_ruleset(0), object_hash(0),
@@ -1160,10 +1156,7 @@ public:
       min_write_recency_for_promote(0),
       stripe_width(0),
       expected_num_objects(0),
-      fast_read(false),
-      scrub_min_interval(0),
-      scrub_max_interval(0),
-      deep_scrub_interval(0)
+      fast_read(false)
   { }
 
   void dump(Formatter *f) const;
index 0c35f74b424f7ffa00e40ff6a0ff796267313ae1..d2e5dbd51c9ad5acc6c9f53cba3a90715df37559 100755 (executable)
@@ -62,16 +62,6 @@ function TEST_osd_pool_get_set() {
        ./ceph osd pool set $TEST_POOL $flag 0 || return 1
     done
 
-    ./ceph osd pool set $TEST_POOL scrub_min_interval 123456 || return 1
-    ./ceph osd dump | grep 'pool ' | grep 'scrub_min_interval 123456' || return 1
-    ./ceph osd pool set $TEST_POOL scrub_min_interval 0 || return 1
-    ./ceph osd pool set $TEST_POOL scrub_max_interval 123456 || return 1
-    ./ceph osd dump | grep 'pool ' | grep 'scrub_max_interval 123456' || return 1
-    ./ceph osd pool set $TEST_POOL scrub_max_interval 0 || return 1
-    ./ceph osd pool set $TEST_POOL deep_scrub_interval 123456 || return 1
-    ./ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval 123456' || return 1
-    ./ceph osd pool set $TEST_POOL deep_scrub_interval 0 || return 1
-
     local size=$(./ceph osd pool get $TEST_POOL size|awk '{print $2}')
     local min_size=$(./ceph osd pool get $TEST_POOL min_size|awk '{print $2}')
     #replicated pool size restrict in 1 and 10
index 655b44f4413f9b3544079e8b7cdc5abab528cd50..b0f608dd199578facac316bf83b389ef3c67f2af 100755 (executable)
@@ -1035,9 +1035,7 @@ class TestOSD(TestArgparse):
 
     def test_pool_get(self):
         for var in ('size', 'min_size', 'crash_replay_interval',
-                    'pg_num', 'pgp_num', 'crush_ruleset', 'auid', 'fast_read',
-                    'scrub_min_interval', 'scrub_max_interval',
-                    'deep_scrub_interval'):
+                    'pg_num', 'pgp_num', 'crush_ruleset', 'auid', 'fast_read'):
             self.assert_valid_command(['osd', 'pool', 'get', 'poolname', var])
         assert_equal({}, validate_command(sigdict, ['osd', 'pool']))
         assert_equal({}, validate_command(sigdict, ['osd', 'pool',
@@ -1054,9 +1052,7 @@ class TestOSD(TestArgparse):
     def test_pool_set(self):
         for var in ('size', 'min_size', 'crash_replay_interval',
                     'pg_num', 'pgp_num', 'crush_ruleset',
-                    'hashpspool', 'auid', 'fast_read',
-                    'scrub_min_interval', 'scrub_max_interval',
-                    'deep_scrub_interval'):
+                    'hashpspool', 'auid', 'fast_read'):
             self.assert_valid_command(['osd', 'pool',
                                        'set', 'poolname', var, 'value'])
         assert_equal({}, validate_command(sigdict, ['osd', 'pool',