]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
common,mds,mgr,mon,osd: store event only if it's added
authorKefu Chai <kchai@redhat.com>
Thu, 13 Jul 2017 06:49:48 +0000 (14:49 +0800)
committerJohn Spray <john.spray@redhat.com>
Wed, 1 Nov 2017 23:03:26 +0000 (23:03 +0000)
otherwise
* we will try to cancel it even it's never been added
* we will keep a dangling pointer around. which is, well,
  scaring.
* static analyzer will yell at us:
  Memory - illegal accesses  (USE_AFTER_FREE)

Signed-off-by: Kefu Chai <kchai@redhat.com>
(cherry picked from commit 2449b3a5c365987746ada095fde30e3dc63ee0c7)

28 files changed:
src/client/Client.cc
src/client/Client.h
src/common/Timer.cc
src/common/Timer.h
src/journal/JournalMetadata.cc
src/journal/ObjectPlayer.cc
src/journal/ObjectPlayer.h
src/journal/ObjectRecorder.cc
src/journal/ObjectRecorder.h
src/mds/Beacon.cc
src/mds/Beacon.h
src/mds/MDSDaemon.cc
src/mds/MDSDaemon.h
src/mgr/MgrClient.cc
src/mon/Elector.cc
src/mon/MgrMonitor.cc
src/mon/Monitor.cc
src/mon/Paxos.cc
src/mon/PaxosService.cc
src/osd/PrimaryLogPG.h
src/osd/Watch.cc
src/test/perf_local.cc
src/test/rbd_mirror/mock/MockSafeTimer.h
src/test/rbd_mirror/test_mock_ImageReplayer.cc
src/test/rbd_mirror/test_mock_InstanceReplayer.cc
src/test/rbd_mirror/test_mock_PoolWatcher.cc
src/tools/rbd_mirror/PoolWatcher.cc
src/tools/rbd_mirror/image_sync/ImageCopyRequest.cc

index 29a9c49f79faf26e56c5f496734d58fa52dc712d..1d9277a61b6ecfb1d5c01f83d2aedc4418600e3f 100644 (file)
@@ -5944,19 +5944,6 @@ void Client::unmount()
   ldout(cct, 2) << "unmounted." << dendl;
 }
 
-
-
-class C_C_Tick : public Context {
-  Client *client;
-public:
-  explicit C_C_Tick(Client *c) : client(c) {}
-  void finish(int r) override {
-    // Called back via Timer, which takes client_lock for us
-    assert(client->client_lock.is_locked_by_me());
-    client->tick();
-  }
-};
-
 void Client::flush_cap_releases()
 {
   // send any cap releases
@@ -5985,9 +5972,13 @@ void Client::tick()
   }
 
   ldout(cct, 21) << "tick" << dendl;
-  tick_event = new C_C_Tick(this);
-  timer.add_event_after(cct->_conf->client_tick_interval, tick_event);
-
+  tick_event = timer.add_event_after(
+    cct->_conf->client_tick_interval,
+    new FunctionContext([this](int) {
+       // Called back via Timer, which takes client_lock for us
+       assert(client_lock.is_locked_by_me());
+       tick();
+      }));
   utime_t now = ceph_clock_now();
 
   if (!mounted && !mds_requests.empty()) {
index e89a25440506ac0be1dd550cc011d7932251f9f5..16aef0312c15e8cb00632e01bc1b05be1a684d6a 100644 (file)
@@ -498,7 +498,6 @@ protected:
   friend class C_Client_CacheInvalidate;  // calls ino_invalidate_cb
   friend class C_Client_DentryInvalidate;  // calls dentry_invalidate_cb
   friend class C_Block_Sync; // Calls block map and protected helpers
-  friend class C_C_Tick; // Asserts on client_lock
   friend class C_Client_RequestInterrupt;
   friend class C_Client_Remount;
   friend void intrusive_ptr_release(Inode *in);
index f211a6f8ff8c2171f4921f326ac451e5c82cc283..45305f553fa6c80ed45e23fd94a36ff598b68444 100644 (file)
@@ -114,7 +114,7 @@ void SafeTimer::timer_thread()
   lock.Unlock();
 }
 
-bool SafeTimer::add_event_after(double seconds, Context *callback)
+Context* SafeTimer::add_event_after(double seconds, Context *callback)
 {
   assert(lock.is_locked());
 
@@ -123,14 +123,14 @@ bool SafeTimer::add_event_after(double seconds, Context *callback)
   return add_event_at(when, callback);
 }
 
-bool SafeTimer::add_event_at(utime_t when, Context *callback)
+Context* SafeTimer::add_event_at(utime_t when, Context *callback)
 {
   assert(lock.is_locked());
   ldout(cct,10) << __func__ << " " << when << " -> " << callback << dendl;
   if (stopping) {
     ldout(cct,5) << __func__ << " already shutdown, event not added" << dendl;
     delete callback;
-    return false;
+    return nullptr;
   }
   scheduled_map_t::value_type s_val(when, callback);
   scheduled_map_t::iterator i = schedule.insert(s_val);
@@ -145,7 +145,7 @@ bool SafeTimer::add_event_at(utime_t when, Context *callback)
    * adjust our timeout. */
   if (i == schedule.begin())
     cond.Signal();
-  return true;
+  return callback;
 }
 
 bool SafeTimer::cancel_event(Context *callback)
index 861b239ca32e12637f7fa8b1bfc1aa472ce23416..8fd478a9934999d4bbee05ef8bf26a44f1113178 100644 (file)
@@ -70,8 +70,8 @@ public:
 
   /* Schedule an event in the future
    * Call with the event_lock LOCKED */
-  bool add_event_after(double seconds, Context *callback);
-  bool add_event_at(utime_t when, Context *callback);
+  Context* add_event_after(double seconds, Context *callback);
+  Context* add_event_at(utime_t when, Context *callback);
 
   /* Cancel an event.
    * Call with the event_lock LOCKED
index 3d6fcfb2eca4e1091380b17f9cfe048cf9cada7f..4073216bcdfb2eb561cf03fe77f0d58d62a38dc8 100644 (file)
@@ -802,9 +802,9 @@ void JournalMetadata::schedule_commit_task() {
   assert(m_lock.is_locked());
   assert(m_commit_position_ctx != nullptr);
   if (m_commit_position_task_ctx == NULL) {
-    m_commit_position_task_ctx = new C_CommitPositionTask(this);
-    m_timer->add_event_after(m_settings.commit_interval,
-                             m_commit_position_task_ctx);
+    m_commit_position_task_ctx =
+      m_timer->add_event_after(m_settings.commit_interval,
+                              new C_CommitPositionTask(this));
   }
 }
 
index 92dd702615bcd018e749bb5e56fdce688bc98018..8292ebb1abfcb5801fed45ee62df2eceb87fd1c4 100644 (file)
@@ -234,9 +234,12 @@ void ObjectPlayer::schedule_watch() {
   }
 
   ldout(m_cct, 20) << __func__ << ": " << m_oid << " scheduling watch" << dendl;
-  assert(m_watch_task == NULL);
-  m_watch_task = new C_WatchTask(this);
-  m_timer.add_event_after(m_watch_interval, m_watch_task);
+  assert(m_watch_task == nullptr);
+  m_watch_task = m_timer.add_event_after(
+    m_watch_interval,
+    new FunctionContext([this](int) {
+       handle_watch_task();
+      }));
 }
 
 bool ObjectPlayer::cancel_watch() {
@@ -301,10 +304,6 @@ void ObjectPlayer::C_Fetch::finish(int r) {
   on_finish->complete(r);
 }
 
-void ObjectPlayer::C_WatchTask::finish(int r) {
-  object_player->handle_watch_task();
-}
-
 void ObjectPlayer::C_WatchFetch::finish(int r) {
   object_player->handle_watch_fetched(r);
 }
index 3d495ba7ff7ae0336f4717cdba7752b088a65e1b..a3cbe807332f32182bfca354de051e18e00db344 100644 (file)
@@ -90,12 +90,6 @@ private:
     }
     void finish(int r) override;
   };
-  struct C_WatchTask : public Context {
-    ObjectPlayerPtr object_player;
-    C_WatchTask(ObjectPlayer *o) : object_player(o) {
-    }
-    void finish(int r) override;
-  };
   struct C_WatchFetch : public Context {
     ObjectPlayerPtr object_player;
     C_WatchFetch(ObjectPlayer *o) : object_player(o) {
index a2faeae8aa60e1aadd7e17a1491d09b081d2e330..a87c31ddb29788d40060fdab17ce954807456220 100644 (file)
@@ -28,7 +28,7 @@ ObjectRecorder::ObjectRecorder(librados::IoCtx &ioctx, const std::string &oid,
     m_timer_lock(timer_lock), m_handler(handler), m_order(order),
     m_soft_max_size(1 << m_order), m_flush_interval(flush_interval),
     m_flush_bytes(flush_bytes), m_flush_age(flush_age), m_flush_handler(this),
-    m_append_task(NULL), m_lock(lock), m_append_tid(0), m_pending_bytes(0),
+    m_lock(lock), m_append_tid(0), m_pending_bytes(0),
     m_size(0), m_overflowed(false), m_object_closed(false),
     m_in_flight_flushes(false), m_aio_scheduled(false) {
   m_ioctx.dup(ioctx);
@@ -194,9 +194,11 @@ void ObjectRecorder::cancel_append_task() {
 
 void ObjectRecorder::schedule_append_task() {
   Mutex::Locker locker(m_timer_lock);
-  if (m_append_task == NULL && m_flush_age > 0) {
-    m_append_task = new C_AppendTask(this);
-    m_timer.add_event_after(m_flush_age, m_append_task);
+  if (m_append_task == nullptr && m_flush_age > 0) {
+    m_append_task = m_timer.add_event_after(
+      m_flush_age, new FunctionContext([this](int) {
+         handle_append_task();
+       }));
   }
 }
 
index aad46690134e94e50e38c60b3ecfb9bb1efce914..22a46697c522d8442e598b724581550b143df5cb 100644 (file)
@@ -90,14 +90,6 @@ private:
       object_recorder->flush(future);
     }
   };
-  struct C_AppendTask : public Context {
-    ObjectRecorder *object_recorder;
-    C_AppendTask(ObjectRecorder *o) : object_recorder(o) {
-    }
-    void finish(int r) override {
-      object_recorder->handle_append_task();
-    }
-  };
   struct C_AppendFlush : public Context {
     ObjectRecorder *object_recorder;
     uint64_t tid;
@@ -132,7 +124,7 @@ private:
 
   FlushHandler m_flush_handler;
 
-  C_AppendTask *m_append_task;
+  Context *m_append_task = nullptr;
 
   mutable std::shared_ptr<Mutex> m_lock;
   AppendBuffers m_append_buffers;
index 10ee8c242a0ba3ab0137d99c9e9df2f40ee34de1..e6bf3930e913281097b74993697f96173be6c10b 100644 (file)
 #define dout_prefix *_dout << "mds.beacon." << name << ' '
 
 
-class Beacon::C_MDS_BeaconSender : public Context {
-public:
-  explicit C_MDS_BeaconSender(Beacon *beacon_) : beacon(beacon_) {}
-  void finish(int r) override {
-    assert(beacon->lock.is_locked_by_me());
-    beacon->sender = NULL;
-    beacon->_send();
-  }
-private:
-  Beacon *beacon;
-};
-
 Beacon::Beacon(CephContext *cct_, MonClient *monc_, std::string name_) :
   Dispatcher(cct_), lock("Beacon"), monc(monc_), timer(g_ceph_context, lock),
   name(name_), standby_for_rank(MDS_RANK_NONE),
@@ -53,7 +41,6 @@ Beacon::Beacon(CephContext *cct_, MonClient *monc_, std::string name_) :
   awaiting_seq(-1)
 {
   last_seq = 0;
-  sender = NULL;
   was_laggy = false;
 
   epoch = 0;
@@ -192,8 +179,13 @@ void Beacon::_send()
   if (sender) {
     timer.cancel_event(sender);
   }
-  sender = new C_MDS_BeaconSender(this);
-  timer.add_event_after(g_conf->mds_beacon_interval, sender);
+  sender = timer.add_event_after(
+    g_conf->mds_beacon_interval,
+    new FunctionContext([this](int) {
+       assert(lock.is_locked_by_me());
+       sender = nullptr;
+       _send();
+      }));
 
   if (!cct->get_heartbeat_map()->is_healthy()) {
     /* If anything isn't progressing, let avoid sending a beacon so that
index 571f7f5599564859437732df165ef768bb42a520..201804def072f593bc3dafaac4ede6b7aff9e4bc 100644 (file)
@@ -102,8 +102,7 @@ private:
   MDSHealth health;
 
   // Ticker
-  class C_MDS_BeaconSender;
-  C_MDS_BeaconSender *sender;
+  Context *sender = nullptr;
 
   version_t awaiting_seq;
   Cond waiting_cond;
index 087c995a830df2ae31de81c35f682119c892a934..4c30b6747136bb4ecfd9355a630fe7e30d662aff 100644 (file)
 #undef dout_prefix
 #define dout_prefix *_dout << "mds." << name << ' '
 
-
-class MDSDaemon::C_MDS_Tick : public Context {
-  protected:
-    MDSDaemon *mds_daemon;
-public:
-  explicit C_MDS_Tick(MDSDaemon *m) : mds_daemon(m) {}
-  void finish(int r) override {
-    assert(mds_daemon->mds_lock.is_locked_by_me());
-    mds_daemon->tick();
-  }
-};
-
 // cons/des
 MDSDaemon::MDSDaemon(const std::string &n, Messenger *m, MonClient *mc) :
   Dispatcher(m->cct),
@@ -102,7 +90,6 @@ MDSDaemon::MDSDaemon(const std::string &n, Messenger *m, MonClient *mc) :
   mgrc(m->cct, m),
   log_client(m->cct, messenger, &mc->monmap, LogClient::NO_FLAGS),
   mds_rank(NULL),
-  tick_event(0),
   asok_hook(NULL)
 {
   orig_argc = 0;
@@ -545,8 +532,12 @@ void MDSDaemon::reset_tick()
   if (tick_event) timer.cancel_event(tick_event);
 
   // schedule
-  tick_event = new C_MDS_Tick(this);
-  timer.add_event_after(g_conf->mds_tick_interval, tick_event);
+  tick_event = timer.add_event_after(
+    g_conf->mds_tick_interval,
+    new FunctionContext([this](int) {
+       assert(mds_lock.is_locked_by_me());
+       tick();
+      }));
 }
 
 void MDSDaemon::tick()
index 0c7a1a7378a3b89ffd86c943b57f3bad5132d335..0e3bbaf26398f9e7d6ff197e961fee3da8acbe88 100644 (file)
@@ -87,8 +87,7 @@ class MDSDaemon : public Dispatcher, public md_config_obs_t {
                                  const std::set <std::string> &changed) override;
  protected:
   // tick and other timer fun
-  class C_MDS_Tick;
-  C_MDS_Tick *tick_event;
+  Context *tick_event = nullptr;
   void     reset_tick();
 
   void wait_for_omap_osds();
index 0e4b01931ef2e84ec9c4c36f5ea58d6feafa8419..b9085665868826a498179f60c6bf400413482182 100644 (file)
@@ -114,11 +114,12 @@ void MgrClient::reconnect()
     when += cct->_conf->mgr_connect_retry_interval;
     if (now < when) {
       if (!connect_retry_callback) {
-       connect_retry_callback = new FunctionContext([this](int r){
-           connect_retry_callback = nullptr;
-           reconnect();
-         });
-       timer.add_event_at(when, connect_retry_callback);
+       connect_retry_callback = timer.add_event_at(
+         when,
+         new FunctionContext([this](int r){
+             connect_retry_callback = nullptr;
+             reconnect();
+           }));
       }
       ldout(cct, 4) << "waiting to retry connect until " << when << dendl;
       return;
index b7fde85528deff25ee46b52e537e213fa055c245..f69bcf16d5a6395b4eed8550012099a28124a834 100644 (file)
@@ -159,11 +159,11 @@ void Elector::reset_timer(double plus)
    * as far as we know, we may even be dead); so, just propose ourselves as the
    * Leader.
    */
-  expire_event = new C_MonContext(mon, [this](int) {
-      expire();
-    });
-  mon->timer.add_event_after(g_conf->mon_election_timeout + plus,
-                            expire_event);
+  expire_event = mon->timer.add_event_after(
+    g_conf->mon_election_timeout + plus,
+    new C_MonContext(mon, [this](int) {
+       expire();
+      }));
 }
 
 
index f11859971c620d7077187a2fd2fe39aba879a8e4..01d4899f6f60e69ff758fca324edd94769c41de3 100644 (file)
@@ -464,10 +464,11 @@ void MgrMonitor::send_digests()
     sub->session->con->send_message(mdigest);
   }
 
-  digest_event = new C_MonContext(mon, [this](int){
+  digest_event = mon->timer.add_event_after(
+    g_conf->mon_mgr_digest_period,
+    new C_MonContext(mon, [this](int) {
       send_digests();
-  });
-  mon->timer.add_event_after(g_conf->mon_mgr_digest_period, digest_event);
+  }));
 }
 
 void MgrMonitor::cancel_timer()
index 86c4006ae7ba3f77867d0f143f3955994a37b19b..cb76c70f54d5c07d4cd6d704979e6bfc5aa9f256 100644 (file)
@@ -1250,10 +1250,11 @@ void Monitor::sync_reset_timeout()
   dout(10) << __func__ << dendl;
   if (sync_timeout_event)
     timer.cancel_event(sync_timeout_event);
-  sync_timeout_event = new C_MonContext(this, [this](int) {
-      sync_timeout();
-    });
-  timer.add_event_after(g_conf->mon_sync_timeout, sync_timeout_event);
+  sync_timeout_event = timer.add_event_after(
+    g_conf->mon_sync_timeout,
+    new C_MonContext(this, [this](int) {
+       sync_timeout();
+      }));
 }
 
 void Monitor::sync_finish(version_t last_committed)
@@ -1596,8 +1597,12 @@ void Monitor::reset_probe_timeout()
       probe_timeout(r);
     });
   double t = g_conf->mon_probe_timeout;
-  timer.add_event_after(t, probe_timeout_event);
-  dout(10) << "reset_probe_timeout " << probe_timeout_event << " after " << t << " seconds" << dendl;
+  if (timer.add_event_after(t, probe_timeout_event)) {
+    dout(10) << "reset_probe_timeout " << probe_timeout_event
+            << " after " << t << " seconds" << dendl;
+  } else {
+    probe_timeout_event = nullptr;
+  }
 }
 
 void Monitor::probe_timeout(int r)
@@ -2299,14 +2304,14 @@ void Monitor::health_tick_start()
   dout(15) << __func__ << dendl;
 
   health_tick_stop();
-  health_tick_event = new C_MonContext(this, [this](int r) {
-      if (r < 0)
-        return;
-      do_health_to_clog();
-      health_tick_start();
-    });
-  timer.add_event_after(cct->_conf->mon_health_to_clog_tick_interval,
-                        health_tick_event);
+  health_tick_event = timer.add_event_after(
+    cct->_conf->mon_health_to_clog_tick_interval,
+    new C_MonContext(this, [this](int r) {
+       if (r < 0)
+         return;
+       do_health_to_clog();
+       health_tick_start();
+      }));
 }
 
 void Monitor::health_tick_stop()
@@ -2353,7 +2358,9 @@ void Monitor::health_interval_start()
         return;
       do_health_to_clog_interval();
     });
-  timer.add_event_at(next, health_interval_event);
+  if (!timer.add_event_at(next, health_interval_event)) {
+    health_interval_event = nullptr;
+  }
 }
 
 void Monitor::health_interval_stop()
@@ -4591,10 +4598,11 @@ void Monitor::timecheck_reset_event()
            << " rounds_since_clean " << timecheck_rounds_since_clean
            << dendl;
 
-  timecheck_event = new C_MonContext(this, [this](int) {
-      timecheck_start_round();
-    });
-  timer.add_event_after(delay, timecheck_event);
+  timecheck_event = timer.add_event_after(
+    delay,
+    new C_MonContext(this, [this](int) {
+       timecheck_start_round();
+      }));
 }
 
 void Monitor::timecheck_check_skews()
@@ -5440,10 +5448,11 @@ void Monitor::scrub_event_start()
     return;
   }
 
-  scrub_event = new C_MonContext(this, [this](int) {
+  scrub_event = timer.add_event_after(
+    cct->_conf->mon_scrub_interval,
+    new C_MonContext(this, [this](int) {
       scrub_start();
-    });
-  timer.add_event_after(cct->_conf->mon_scrub_interval, scrub_event);
+      }));
 }
 
 void Monitor::scrub_event_cancel()
@@ -5467,11 +5476,11 @@ void Monitor::scrub_reset_timeout()
 {
   dout(15) << __func__ << " reset timeout event" << dendl;
   scrub_cancel_timeout();
-
-  scrub_timeout_event = new C_MonContext(this, [this](int) {
+  scrub_timeout_event = timer.add_event_after(
+    g_conf->mon_scrub_timeout,
+    new C_MonContext(this, [this](int) {
       scrub_timeout();
-    });
-  timer.add_event_after(g_conf->mon_scrub_timeout, scrub_timeout_event);
+    }));
 }
 
 /************ TICK ***************/
index 31d3cfdb51f13b42993d48685fd40369413386fb..e92438769f088a910be5e7c6097e8a19e31caad3 100644 (file)
@@ -200,14 +200,14 @@ void Paxos::collect(version_t oldpn)
   }
 
   // set timeout event
-  collect_timeout_event = new C_MonContext(mon, [this](int r) {
+  collect_timeout_event = mon->timer.add_event_after(
+    g_conf->mon_accept_timeout_factor *
+    g_conf->mon_lease,
+    new C_MonContext(mon, [this](int r) {
        if (r == -ECANCELED)
          return;
        collect_timeout();
-    });
-  mon->timer.add_event_after(g_conf->mon_accept_timeout_factor *
-                            g_conf->mon_lease,
-                            collect_timeout_event);
+    }));
 }
 
 
@@ -692,14 +692,13 @@ void Paxos::begin(bufferlist& v)
   }
 
   // set timeout event
-  accept_timeout_event = new C_MonContext(mon, [this](int r) {
-      if (r == -ECANCELED)
-       return;
-      accept_timeout();
-    });
-  mon->timer.add_event_after(g_conf->mon_accept_timeout_factor *
-                            g_conf->mon_lease,
-                            accept_timeout_event);
+  accept_timeout_event = mon->timer.add_event_after(
+    g_conf->mon_accept_timeout_factor * g_conf->mon_lease,
+    new C_MonContext(mon, [this](int r) {
+       if (r == -ECANCELED)
+         return;
+       accept_timeout();
+      }));
 }
 
 // peon
@@ -997,26 +996,25 @@ void Paxos::extend_lease()
   // set timeout event.
   //  if old timeout is still in place, leave it.
   if (!lease_ack_timeout_event) {
-    lease_ack_timeout_event = new C_MonContext(mon, [this](int r) {
-       if (r == -ECANCELED)
-         return;
-       lease_ack_timeout();
-      });
-    mon->timer.add_event_after(g_conf->mon_lease_ack_timeout_factor *
-                              g_conf->mon_lease,
-                              lease_ack_timeout_event);
+    lease_ack_timeout_event = mon->timer.add_event_after(
+      g_conf->mon_lease_ack_timeout_factor * g_conf->mon_lease,
+      new C_MonContext(mon, [this](int r) {
+         if (r == -ECANCELED)
+           return;
+         lease_ack_timeout();
+       }));
   }
 
   // set renew event
-  lease_renew_event = new C_MonContext(mon, [this](int r) {
-      if (r == -ECANCELED)
-       return;
-      lease_renew_timeout();
-    });
   utime_t at = lease_expire;
   at -= g_conf->mon_lease;
   at += g_conf->mon_lease_renew_interval_factor * g_conf->mon_lease;
-  mon->timer.add_event_at(at, lease_renew_event);
+  lease_renew_event = mon->timer.add_event_at(
+    at, new C_MonContext(mon, [this](int r) {
+       if (r == -ECANCELED)
+         return;
+       lease_renew_timeout();
+    }));
 }
 
 void Paxos::warn_on_future_time(utime_t t, entity_name_t from)
@@ -1200,14 +1198,13 @@ void Paxos::reset_lease_timeout()
   dout(20) << "reset_lease_timeout - setting timeout event" << dendl;
   if (lease_timeout_event)
     mon->timer.cancel_event(lease_timeout_event);
-  lease_timeout_event = new C_MonContext(mon, [this](int r) {
-      if (r == -ECANCELED)
-       return;
-      lease_timeout();
-    });
-  mon->timer.add_event_after(g_conf->mon_lease_ack_timeout_factor *
-                            g_conf->mon_lease,
-                            lease_timeout_event);
+  lease_timeout_event = mon->timer.add_event_after(
+    g_conf->mon_lease_ack_timeout_factor * g_conf->mon_lease,
+    new C_MonContext(mon, [this](int r) {
+       if (r == -ECANCELED)
+         return;
+       lease_timeout();
+      }));
 }
 
 void Paxos::lease_timeout()
index dcd83506cebbc8a569cee63604613bb39586b6e0..de732c322301956af95f9e7276495fb842f2467b 100644 (file)
@@ -117,7 +117,7 @@ bool PaxosService::dispatch(MonOpRequestRef op)
        * Callback class used to propose the pending value once the proposal_timer
        * fires up.
        */
-    proposal_timer = new C_MonContext(mon, [this](int r) {
+    auto do_propose = new C_MonContext(mon, [this](int r) {
         proposal_timer = 0;
         if (r >= 0) {
           propose_pending();
@@ -127,9 +127,9 @@ bool PaxosService::dispatch(MonOpRequestRef op)
           assert(0 == "bad return value for proposal_timer");
         }
     });
-    dout(10) << " setting proposal_timer " << proposal_timer
+    dout(10) << " setting proposal_timer " << do_propose
              << " with delay of " << delay << dendl;
-    mon->timer.add_event_after(delay, proposal_timer);
+    proposal_timer = mon->timer.add_event_after(delay, do_propose);
   } else {
     dout(10) << " proposal_timer already set" << dendl;
   }
index a4d34d17141d29b52d4d458e3f5ad78afbc579c6..df2a45f5877bbff4625124f7e0a5bbbba3f09e89 100644 (file)
@@ -1551,10 +1551,10 @@ private:
       };
       auto *pg = context< SnapTrimmer >().pg;
       if (pg->cct->_conf->osd_snap_trim_sleep > 0) {
-       wakeup = new OnTimer{pg, pg->get_osdmap()->get_epoch()};
        Mutex::Locker l(pg->osd->snap_sleep_lock);
-       pg->osd->snap_sleep_timer.add_event_after(
-         pg->cct->_conf->osd_snap_trim_sleep, wakeup);
+       wakeup = pg->osd->snap_sleep_timer.add_event_after(
+         pg->cct->_conf->osd_snap_trim_sleep,
+         new OnTimer{pg, pg->get_osdmap()->get_epoch()});
       } else {
        post_event(SnapTrimTimerReady());
       }
index df92bb7712c451e9718877722309fd0b631fbada..7ff9f99b2bfab5929a52b7409279d20cccb5d09d 100644 (file)
@@ -124,9 +124,9 @@ void Notify::register_cb()
   {
     osd->watch_lock.Lock();
     cb = new NotifyTimeoutCB(self.lock());
-    osd->watch_timer.add_event_after(
-      timeout,
-      cb);
+    if (!osd->watch_timer.add_event_after(timeout, cb)) {
+      cb = nullptr;
+    }
     osd->watch_lock.Unlock();
   }
 }
@@ -333,9 +333,9 @@ void Watch::register_cb()
     dout(15) << "registering callback, timeout: " << timeout << dendl;
   }
   cb = new HandleWatchTimeout(self.lock());
-  osd->watch_timer.add_event_after(
-    timeout,
-    cb);
+  if (!osd->watch_timer.add_event_after(timeout, cb)) {
+    cb = nullptr;
+  }
 }
 
 void Watch::unregister_cb()
index 98cccd87bbd58a013bfb472807dacd74acec51ad..c3b9f7cccc8a1f4111703b21e4a4c774c10d67b2 100644 (file)
@@ -785,8 +785,9 @@ double perf_timer()
   uint64_t start = Cycles::rdtsc();
   Mutex::Locker l(lock);
   for (int i = 0; i < count; i++) {
-    timer.add_event_after(12345, c[i]);
-    timer.cancel_event(c[i]);
+    if (timer.add_event_after(12345, c[i])) {
+      timer.cancel_event(c[i]);
+    }
   }
   uint64_t stop = Cycles::rdtsc();
   delete[] c;
index 3de5fbcdbf5cc4e24a5c83fc2fba6bfc63a1300b..32d58471d4a4b0aefbe4a94f84a78988e031b0ff 100644 (file)
@@ -9,7 +9,7 @@
 struct Context;
 
 struct MockSafeTimer {
-  MOCK_METHOD2(add_event_after, void(double, Context*));
+  MOCK_METHOD2(add_event_after, Context*(double, Context*));
   MOCK_METHOD1(cancel_event, bool(Context *));
 };
 
index 7a0bb6706f64393596a75baff2e60b00e728ecf6..9e2006c9d5190a250dd6243d6e0e8ab1097332f2 100644 (file)
@@ -105,6 +105,7 @@ using ::testing::InSequence;
 using ::testing::Invoke;
 using ::testing::MatcherCast;
 using ::testing::Return;
+using ::testing::ReturnArg;
 using ::testing::SetArgPointee;
 using ::testing::WithArg;
 
@@ -356,9 +357,10 @@ public:
   void expect_add_event_after_repeatedly(MockThreads &mock_threads) {
     EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
       .WillRepeatedly(
-        Invoke([this](double seconds, Context *ctx) {
-          m_threads->timer->add_event_after(seconds, ctx);
-        }));
+        DoAll(Invoke([this](double seconds, Context *ctx) {
+                      m_threads->timer->add_event_after(seconds, ctx);
+                    }),
+         ReturnArg<1>()));
     EXPECT_CALL(*mock_threads.timer, cancel_event(_))
       .WillRepeatedly(
         Invoke([this](Context *ctx) {
index 1903c55f2c9032b84c585614bf534155cea12c4c..02bc0886df514655f92a5834f9d5ea3f16e07438 100644 (file)
@@ -121,9 +121,11 @@ namespace rbd {
 namespace mirror {
 
 using ::testing::_;
+using ::testing::DoAll;
 using ::testing::InSequence;
 using ::testing::Invoke;
 using ::testing::Return;
+using ::testing::ReturnArg;
 using ::testing::ReturnRef;
 using ::testing::WithArg;
 
@@ -146,8 +148,8 @@ public:
   void expect_add_event_after(MockThreads &mock_threads,
                               Context** timer_ctx = nullptr) {
     EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
-      .WillOnce(WithArg<1>(
-        Invoke([this, &mock_threads, timer_ctx](Context *ctx) {
+      .WillOnce(DoAll(
+        WithArg<1>(Invoke([this, &mock_threads, timer_ctx](Context *ctx) {
           assert(mock_threads.timer_lock.is_locked());
           if (timer_ctx != nullptr) {
             *timer_ctx = ctx;
@@ -159,7 +161,8 @@ public:
                 ctx->complete(0);
               }), 0);
           }
-        })));
+        })),
+        ReturnArg<1>()));
   }
 
   void expect_cancel_event(MockThreads &mock_threads, bool canceled) {
index 1b7877434ad96bb24a59fbd45bc5d0dd0d8bb8b1..4c7463d660c552df8e20ad29371c031f5035149e 100644 (file)
@@ -145,6 +145,7 @@ using ::testing::DoAll;
 using ::testing::InSequence;
 using ::testing::Invoke;
 using ::testing::Return;
+using ::testing::ReturnArg;
 using ::testing::StrEq;
 using ::testing::WithArg;
 using ::testing::WithoutArgs;
@@ -238,13 +239,15 @@ public:
 
   void expect_timer_add_event(MockThreads &mock_threads) {
     EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
-      .WillOnce(WithArg<1>(Invoke([this](Context *ctx) {
-          auto wrapped_ctx = new FunctionContext([this, ctx](int r) {
-              Mutex::Locker timer_locker(m_threads->timer_lock);
-              ctx->complete(r);
-            });
-          m_threads->work_queue->queue(wrapped_ctx, 0);
-        })));
+      .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
+                        auto wrapped_ctx =
+                         new FunctionContext([this, ctx](int r) {
+                             Mutex::Locker timer_locker(m_threads->timer_lock);
+                             ctx->complete(r);
+                           });
+                       m_threads->work_queue->queue(wrapped_ctx, 0);
+                      })),
+                      ReturnArg<1>()));
   }
 
   int when_shut_down(MockPoolWatcher &mock_pool_watcher) {
index 18c6df3840fb948d863d1277db66d7baddd455bb..8d60aa4f47a8c6a440a3b89545381c57a8bbaae3 100644 (file)
@@ -362,10 +362,11 @@ void PoolWatcher<I>::schedule_refresh_images(double interval) {
   }
 
   m_image_ids_invalid = true;
-  m_timer_ctx = new FunctionContext([this](int r) {
-      process_refresh_images();
-    });
-  m_threads->timer->add_event_after(interval, m_timer_ctx);
+  m_timer_ctx = m_threads->timer->add_event_after(
+    interval,
+    new FunctionContext([this](int r) {
+       process_refresh_images();
+      }));
 }
 
 template <typename I>
index 6278d01015558868bf4dad1ba1b3cc98428cdee1..6768caa005bd49570e1dc2eb87fb1d9177e2e54a 100644 (file)
@@ -161,8 +161,9 @@ void ImageCopyRequest<I>::send_object_copies() {
   {
     Mutex::Locker timer_locker(*m_timer_lock);
     if (m_update_sync_ctx) {
-      m_timer->add_event_after(m_update_sync_point_interval,
-                               m_update_sync_ctx);
+      m_update_sync_ctx = m_timer->add_event_after(
+        m_update_sync_point_interval,
+       m_update_sync_ctx);
     }
   }