From 216eef53e924c1d474e31659fc632c8fe60e7c26 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 25 Sep 2015 14:01:19 -0400 Subject: [PATCH] Revert "osd: new pool settings: scrub intervals" This reverts commit 30810da4b573b3c7e679bc66eaa29b70246538cb. After some discussion we have decided it is better to build a generic dictionary in pg_pool_t to store infrequently used per-pool properties. Signed-off-by: Sage Weil --- doc/rados/operations/pools.rst | 27 ----------------- qa/workunits/cephtool/test.sh | 15 ---------- src/mon/MonCommands.h | 4 +-- src/mon/OSDMonitor.cc | 43 ++------------------------- src/osd/OSD.cc | 10 ++----- src/osd/OSD.h | 10 ++----- src/osd/PG.cc | 8 ++--- src/osd/osd_types.cc | 23 +------------- src/osd/osd_types.h | 9 +----- src/test/mon/misc.sh | 10 ------- src/test/pybind/test_ceph_argparse.py | 8 ++--- 11 files changed, 16 insertions(+), 151 deletions(-) diff --git a/doc/rados/operations/pools.rst b/doc/rados/operations/pools.rst index a6d08af0cb6d6..80b56d4b6d684 100644 --- a/doc/rados/operations/pools.rst +++ b/doc/rados/operations/pools.rst @@ -423,33 +423,6 @@ You may set values for the following keys: :Example: ``1800`` 30min -``scrub_min_interval`` - -:Description: The maximum interval in seconds for pool scrubbing when - load is low. If it is 0, the value osd_scrub_min_interval - from config is used. - -:Type: Double -:Default: ``0`` - -``scrub_max_interval`` - -:Description: The maximum interval in seconds for pool scrubbing - irrespective of cluster load. If it is 0, the value - osd_scrub_max_interval from config is used. - -:Type: Double -:Default: ``0`` - - -``deep_scrub_interval`` - -:Description: The interval in seconds for pool “deep” scrubbing. If it - is 0, the value osd_deep_scrub_interval from config is used. - -:Type: Double -:Default: ``0`` - Get Pool Values =============== diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index d01b3495072d1..157eb612873e7 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -1368,21 +1368,6 @@ function test_mon_osd_pool_set() expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2 done - ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456 - ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456' - ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0 - ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 0' - - ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456 - ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456' - ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0 - ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 0' - - ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456 - ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456' - ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0 - ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 0' - ceph osd pool set $TEST_POOL_GETSET nopgchange 1 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10 diff --git a/src/mon/MonCommands.h b/src/mon/MonCommands.h index 2f74768cc8ed6..0286b8319dd1e 100644 --- a/src/mon/MonCommands.h +++ b/src/mon/MonCommands.h @@ -674,11 +674,11 @@ COMMAND("osd pool rename " \ "rename to ", "osd", "rw", "cli,rest") COMMAND("osd pool get " \ "name=pool,type=CephPoolname " \ - "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|scrub_min_interval|scrub_max_interval|deep_scrub_interval", \ + "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read", \ "get pool parameter ", "osd", "r", "cli,rest") COMMAND("osd pool set " \ "name=pool,type=CephPoolname " \ - "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|debug_fake_ec_pool|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|scrub_min_interval|scrub_max_interval|deep_scrub_interval " \ + "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|debug_fake_ec_pool|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read " \ "name=val,type=CephString " \ "name=force,type=CephChoices,strings=--yes-i-really-mean-it,req=false", \ "set pool parameter to ", "osd", "rw", "cli,rest") diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 3d255468b6366..f7dca78b54018 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -2885,8 +2885,7 @@ namespace { CACHE_TARGET_FULL_RATIO, CACHE_MIN_FLUSH_AGE, CACHE_MIN_EVICT_AGE, ERASURE_CODE_PROFILE, MIN_READ_RECENCY_FOR_PROMOTE, - MIN_WRITE_RECENCY_FOR_PROMOTE, FAST_READ, - SCRUB_MIN_INTERVAL, SCRUB_MAX_INTERVAL, DEEP_SCRUB_INTERVAL}; + MIN_WRITE_RECENCY_FOR_PROMOTE, FAST_READ}; std::set subtract_second_from_first(const std::set& first, @@ -3360,9 +3359,7 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op) ("erasure_code_profile", ERASURE_CODE_PROFILE) ("min_read_recency_for_promote", MIN_READ_RECENCY_FOR_PROMOTE) ("min_write_recency_for_promote", MIN_WRITE_RECENCY_FOR_PROMOTE) - ("fast_read", FAST_READ)("scrub_min_interval", SCRUB_MIN_INTERVAL) - ("scrub_max_interval", SCRUB_MAX_INTERVAL) - ("deep_scrub_interval", DEEP_SCRUB_INTERVAL); + ("fast_read", FAST_READ); typedef std::set choices_set_t; @@ -3533,15 +3530,6 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op) case FAST_READ: f->dump_int("fast_read", p->fast_read); break; - case SCRUB_MIN_INTERVAL: - f->dump_int("scrub_min_interval", p->scrub_min_interval); - break; - case SCRUB_MAX_INTERVAL: - f->dump_int("scrub_max_interval", p->scrub_max_interval); - break; - case DEEP_SCRUB_INTERVAL: - f->dump_int("deep_scrub_interval", p->deep_scrub_interval); - break; } f->close_section(); f->flush(rdata); @@ -3655,15 +3643,6 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op) case FAST_READ: ss << "fast_read: " << p->fast_read << "\n"; break; - case SCRUB_MIN_INTERVAL: - ss << "scrub_min_interval: " << p->scrub_min_interval << "\n"; - break; - case SCRUB_MAX_INTERVAL: - ss << "scrub_max_interval: " << p->scrub_max_interval << "\n"; - break; - case DEEP_SCRUB_INTERVAL: - ss << "deep_scrub_interval: " << p->deep_scrub_interval << "\n"; - break; } rdata.append(ss.str()); ss.str(""); @@ -5047,24 +5026,6 @@ int OSDMonitor::prepare_command_pool_set(map &cmdmap, } else if (val == "false" || (interr.empty() && n == 0)) { p.fast_read = false; } - } else if (var == "scrub_min_interval") { - if (floaterr.length()) { - ss << "error parsing floating point value '" << val << "': " << floaterr; - return -EINVAL; - } - p.scrub_min_interval = f; - } else if (var == "scrub_max_interval") { - if (floaterr.length()) { - ss << "error parsing floating point value '" << val << "': " << floaterr; - return -EINVAL; - } - p.scrub_max_interval = f; - } else if (var == "deep_scrub_interval") { - if (floaterr.length()) { - ss << "error parsing floating point value '" << val << "': " << floaterr; - return -EINVAL; - } - p.deep_scrub_interval = f; } else { ss << "unrecognized variable '" << var << "'"; return -EINVAL; diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index f5f68476308a2..a6529d1532280 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -5934,23 +5934,19 @@ bool OSD::scrub_random_backoff() return false; } -OSDService::ScrubJob::ScrubJob(const spg_t& pg, const utime_t& timestamp, - double pool_scrub_min_interval, - double pool_scrub_max_interval, bool must) +OSDService::ScrubJob::ScrubJob(const spg_t& pg, const utime_t& timestamp, bool must) : pgid(pg), sched_time(timestamp), deadline(timestamp) { // if not explicitly requested, postpone the scrub with a random delay if (!must) { - sched_time += pool_scrub_min_interval > 0 ? pool_scrub_min_interval : - g_conf->osd_scrub_min_interval; + sched_time += g_conf->osd_scrub_min_interval; if (g_conf->osd_scrub_interval_randomize_ratio > 0) { sched_time += rand() % (int)(g_conf->osd_scrub_min_interval * g_conf->osd_scrub_interval_randomize_ratio); } - deadline += pool_scrub_max_interval > 0 ? pool_scrub_max_interval : - g_conf->osd_scrub_max_interval; + deadline += g_conf->osd_scrub_max_interval; } } diff --git a/src/osd/OSD.h b/src/osd/OSD.h index 539114861f3e7..f59fb5efced67 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -589,19 +589,15 @@ public: /// the hard upper bound of scrub time utime_t deadline; ScrubJob() {} - explicit ScrubJob(const spg_t& pg, const utime_t& timestamp, - double pool_scrub_min_interval = 0, - double pool_scrub_max_interval = 0, bool must = true); + explicit ScrubJob(const spg_t& pg, const utime_t& timestamp, bool must = true); /// order the jobs by sched_time bool operator<(const ScrubJob& rhs) const; }; set sched_scrub_pg; /// @returns the scrub_reg_stamp used for unregister the scrub job - utime_t reg_pg_scrub(spg_t pgid, utime_t t, double pool_scrub_min_interval, - double pool_scrub_max_interval, bool must) { - ScrubJob scrub(pgid, t, pool_scrub_min_interval, pool_scrub_max_interval, - must); + utime_t reg_pg_scrub(spg_t pgid, utime_t t, bool must) { + ScrubJob scrub(pgid, t, must); Mutex::Locker l(sched_scrub_lock); sched_scrub_pg.insert(scrub); return scrub.sched_time; diff --git a/src/osd/PG.cc b/src/osd/PG.cc index f217eb1faa643..ada7c81dc2115 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -3149,10 +3149,8 @@ bool PG::sched_scrub() return false; } - double deep_scrub_interval = pool.info.deep_scrub_interval > 0 ? - pool.info.deep_scrub_interval : cct->_conf->osd_deep_scrub_interval; - bool time_for_deep = ceph_clock_now(cct) > - info.history.last_deep_scrub_stamp + deep_scrub_interval; + bool time_for_deep = (ceph_clock_now(cct) > + info.history.last_deep_scrub_stamp + cct->_conf->osd_deep_scrub_interval); //NODEEP_SCRUB so ignore time initiated deep-scrub if (osd->osd->get_osdmap()->test_flag(CEPH_OSDMAP_NODEEP_SCRUB) || @@ -3219,8 +3217,6 @@ void PG::reg_next_scrub() // later on. scrubber.scrub_reg_stamp = osd->reg_pg_scrub(info.pgid, reg_stamp, - pool.info.scrub_min_interval, - pool.info.scrub_max_interval, scrubber.must_scrub); } diff --git a/src/osd/osd_types.cc b/src/osd/osd_types.cc index 3d7e0a5da5a3b..a675d5640759a 100644 --- a/src/osd/osd_types.cc +++ b/src/osd/osd_types.cc @@ -948,9 +948,6 @@ void pg_pool_t::dump(Formatter *f) const f->dump_unsigned("stripe_width", get_stripe_width()); f->dump_unsigned("expected_num_objects", expected_num_objects); f->dump_bool("fast_read", fast_read); - f->dump_float("scrub_min_interval", scrub_min_interval); - f->dump_float("scrub_max_interval", scrub_max_interval); - f->dump_float("deep_scrub_interval", deep_scrub_interval); } void pg_pool_t::convert_to_pg_shards(const vector &from, set* to) const { @@ -1260,7 +1257,7 @@ void pg_pool_t::encode(bufferlist& bl, uint64_t features) const return; } - ENCODE_START(23, 5, bl); + ENCODE_START(22, 5, bl); ::encode(type, bl); ::encode(size, bl); ::encode(crush_ruleset, bl); @@ -1306,9 +1303,6 @@ void pg_pool_t::encode(bufferlist& bl, uint64_t features) const ::encode(min_write_recency_for_promote, bl); ::encode(use_gmt_hitset, bl); ::encode(fast_read, bl); - ::encode(scrub_min_interval, bl); - ::encode(scrub_max_interval, bl); - ::encode(deep_scrub_interval, bl); ENCODE_FINISH(bl); } @@ -1446,15 +1440,6 @@ void pg_pool_t::decode(bufferlist::iterator& bl) } else { fast_read = false; } - if (struct_v >= 23) { - ::decode(scrub_min_interval, bl); - ::decode(scrub_max_interval, bl); - ::decode(deep_scrub_interval, bl); - } else { - scrub_min_interval = 0; - scrub_max_interval = 0; - deep_scrub_interval = 0; - } DECODE_FINISH(bl); calc_pg_masks(); } @@ -1566,12 +1551,6 @@ ostream& operator<<(ostream& out, const pg_pool_t& p) out << " expected_num_objects " << p.expected_num_objects; if (p.fast_read) out << " fast_read " << p.fast_read; - if (p.scrub_min_interval > 0) - out << " scrub_min_interval " << p.scrub_min_interval; - if (p.scrub_max_interval > 0) - out << " scrub_max_interval " << p.scrub_max_interval; - if (p.deep_scrub_interval > 0) - out << " deep_scrub_interval " << p.deep_scrub_interval; return out; } diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index c0616ebfcec02..10c38da351fcc 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -1129,10 +1129,6 @@ public: ///< user does not specify any expected value bool fast_read; ///< whether turn on fast read on the pool or not - double scrub_min_interval; //< scrub min interval - double scrub_max_interval; //< scrub max interval - double deep_scrub_interval; //< deep-scrub interval - pg_pool_t() : flags(0), type(0), size(0), min_size(0), crush_ruleset(0), object_hash(0), @@ -1160,10 +1156,7 @@ public: min_write_recency_for_promote(0), stripe_width(0), expected_num_objects(0), - fast_read(false), - scrub_min_interval(0), - scrub_max_interval(0), - deep_scrub_interval(0) + fast_read(false) { } void dump(Formatter *f) const; diff --git a/src/test/mon/misc.sh b/src/test/mon/misc.sh index 0c35f74b424f7..d2e5dbd51c9ad 100755 --- a/src/test/mon/misc.sh +++ b/src/test/mon/misc.sh @@ -62,16 +62,6 @@ function TEST_osd_pool_get_set() { ./ceph osd pool set $TEST_POOL $flag 0 || return 1 done - ./ceph osd pool set $TEST_POOL scrub_min_interval 123456 || return 1 - ./ceph osd dump | grep 'pool ' | grep 'scrub_min_interval 123456' || return 1 - ./ceph osd pool set $TEST_POOL scrub_min_interval 0 || return 1 - ./ceph osd pool set $TEST_POOL scrub_max_interval 123456 || return 1 - ./ceph osd dump | grep 'pool ' | grep 'scrub_max_interval 123456' || return 1 - ./ceph osd pool set $TEST_POOL scrub_max_interval 0 || return 1 - ./ceph osd pool set $TEST_POOL deep_scrub_interval 123456 || return 1 - ./ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval 123456' || return 1 - ./ceph osd pool set $TEST_POOL deep_scrub_interval 0 || return 1 - local size=$(./ceph osd pool get $TEST_POOL size|awk '{print $2}') local min_size=$(./ceph osd pool get $TEST_POOL min_size|awk '{print $2}') #replicated pool size restrict in 1 and 10 diff --git a/src/test/pybind/test_ceph_argparse.py b/src/test/pybind/test_ceph_argparse.py index 655b44f4413f9..b0f608dd19957 100755 --- a/src/test/pybind/test_ceph_argparse.py +++ b/src/test/pybind/test_ceph_argparse.py @@ -1035,9 +1035,7 @@ class TestOSD(TestArgparse): def test_pool_get(self): for var in ('size', 'min_size', 'crash_replay_interval', - 'pg_num', 'pgp_num', 'crush_ruleset', 'auid', 'fast_read', - 'scrub_min_interval', 'scrub_max_interval', - 'deep_scrub_interval'): + 'pg_num', 'pgp_num', 'crush_ruleset', 'auid', 'fast_read'): self.assert_valid_command(['osd', 'pool', 'get', 'poolname', var]) assert_equal({}, validate_command(sigdict, ['osd', 'pool'])) assert_equal({}, validate_command(sigdict, ['osd', 'pool', @@ -1054,9 +1052,7 @@ class TestOSD(TestArgparse): def test_pool_set(self): for var in ('size', 'min_size', 'crash_replay_interval', 'pg_num', 'pgp_num', 'crush_ruleset', - 'hashpspool', 'auid', 'fast_read', - 'scrub_min_interval', 'scrub_max_interval', - 'deep_scrub_interval'): + 'hashpspool', 'auid', 'fast_read'): self.assert_valid_command(['osd', 'pool', 'set', 'poolname', var, 'value']) assert_equal({}, validate_command(sigdict, ['osd', 'pool', -- 2.39.5