]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: make osd replay interval a per-pool property
authorSage Weil <sage@newdream.net>
Sun, 23 Oct 2011 22:32:58 +0000 (15:32 -0700)
committerSage Weil <sage@newdream.net>
Sun, 23 Oct 2011 23:26:48 +0000 (16:26 -0700)
Change the config value to only control the interval set when the data
pool is first created (presumably during mkfs).  Start replay interval
based on the pool property.

Signed-off-by: Sage Weil <sage@newdream.net>
src/common/config_opts.h
src/osd/OSDMap.cc
src/osd/PG.cc

index 250cdb44e4850c7232b52503debaa55aabfa769a..ab526415982074a079a6544e4d528eefb333bfdf 100644 (file)
@@ -262,7 +262,7 @@ OPTION(osd_mon_report_interval_max, OPT_INT, 120)
 OPTION(osd_mon_report_interval_min, OPT_INT, 5)  // pg stats, failures, up_thru, boot.
 OPTION(osd_min_down_reporters, OPT_INT, 1)   // number of OSDs who need to report a down OSD for it to count
 OPTION(osd_min_down_reports, OPT_INT, 3)     // number of times a down OSD must be reported for it to count
-OPTION(osd_replay_window, OPT_INT, 45)
+OPTION(osd_default_data_pool_replay_window, OPT_INT, 45)
 OPTION(osd_preserve_trimmed_log, OPT_BOOL, true)
 OPTION(osd_auto_mark_unfound_lost, OPT_BOOL, false)
 OPTION(osd_recovery_delay_start, OPT_FLOAT, 15)
index 6ce35f3c366ad4923296b5892e5f59446a60135e..1a3020aa1a4ea5c0760d83152704736abd4db0a1 100644 (file)
@@ -906,6 +906,8 @@ void OSDMap::build_simple(CephContext *cct, epoch_t e, ceph_fsid_t &fsid,
     pools[pool].lpg_num = lpg_bits ? (1 << (lpg_bits-1)) : 0;
     pools[pool].lpgp_num = lpg_bits ? (1 << (lpg_bits-1)) : 0;
     pools[pool].last_change = epoch;
+    if (p->first == CEPH_DATA_RULE)
+      pools[pool].crash_replay_interval = cct->_conf->osd_default_data_pool_replay_window;
     pool_name[pool] = p->second;
   }
 
index eed29b966cf61f4ec8f27983d4287492fa86eb79..47714e5c8968667170c20203b4375ae2698152a8 100644 (file)
@@ -1331,20 +1331,16 @@ void PG::activate(ObjectStore::Transaction& t, list<Context*>& tfin,
   assert(!is_active());
 
   // -- crash recovery?
-  if (may_need_replay(osd->osdmap)) {
-    if (g_conf->osd_replay_window > 0) {
-      replay_until = ceph_clock_now(g_ceph_context);
-      replay_until += g_conf->osd_replay_window;
-      dout(10) << "crashed, allowing op replay for " << g_conf->osd_replay_window
-              << " until " << replay_until << dendl;
-      state_set(PG_STATE_REPLAY);
-      osd->replay_queue_lock.Lock();
-      osd->replay_queue.push_back(pair<pg_t,utime_t>(info.pgid, replay_until));
-      osd->replay_queue_lock.Unlock();
-    } else {
-      dout(10) << "crashed, but osd_replay_window=0.  skipping replay." << dendl;
-      state_clear(PG_STATE_REPLAY);
-    }
+  if (pool->info.crash_replay_interval > 0 &&
+      may_need_replay(osd->osdmap)) {
+    replay_until = ceph_clock_now(g_ceph_context);
+    replay_until += pool->info.crash_replay_interval;
+    dout(10) << "activate starting replay interval for " << pool->info.crash_replay_interval
+            << " until " << replay_until << dendl;
+    state_set(PG_STATE_REPLAY);
+    osd->replay_queue_lock.Lock();
+    osd->replay_queue.push_back(pair<pg_t,utime_t>(info.pgid, replay_until));
+    osd->replay_queue_lock.Unlock();
   }
 
   // twiddle pg state