]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: keep status and history in coroutine
authorYehuda Sadeh <yehuda@redhat.com>
Mon, 23 Nov 2015 22:28:49 +0000 (14:28 -0800)
committerYehuda Sadeh <yehuda@redhat.com>
Fri, 12 Feb 2016 00:13:38 +0000 (16:13 -0800)
will replace the report container stuff

Signed-off-by: Yehuda Sadeh <yehuda@redhat.com>
src/rgw/rgw_coroutine.cc
src/rgw/rgw_coroutine.h
src/rgw/rgw_sync.cc

index 2db4a04e2038d707f098818885962623c6d40196..957ab57f8ae37b0e247763bd017556a17b3322cd 100644 (file)
@@ -104,6 +104,24 @@ int RGWCoroutine::io_block(int ret) {
   return ret;
 }
 
+void RGWCoroutine::StatusItem::dump(Formatter *f) const {
+  ::encode_json("timestamp", timestamp, f);
+  ::encode_json("status", status, f);
+}
+
+void RGWCoroutine::Status::set_status(const string& s)
+{
+  RWLock::WLocker l(lock);
+  if (!timestamp.is_zero()) {
+    history.push_back(StatusItem(timestamp, status));
+  }
+  if (history.size() > (size_t)max_history) {
+    history.pop_front();
+  }
+  timestamp = ceph_clock_now(cct);
+  status = s;
+}
+
 RGWCoroutinesStack::RGWCoroutinesStack(CephContext *_cct, RGWCoroutinesManager *_ops_mgr, RGWCoroutine *start) : cct(_cct), ops_mgr(_ops_mgr),
                                                                                                          done_flag(false), error_flag(false), blocked_flag(false),
                                                                                                          sleep_flag(false), interval_wait_flag(false), is_scheduled(false), is_waiting_for_child(false),
@@ -677,6 +695,9 @@ void RGWCoroutine::wakeup()
 }
 
 void RGWCoroutine::dump(Formatter *f) const {
+  if (!description.empty()) {
+    encode_json("description", description, f);
+  }
   encode_json("type", to_str(), f);
   if (!spawned.entries.empty()) {
     f->open_array_section("spawned");
@@ -687,6 +708,16 @@ void RGWCoroutine::dump(Formatter *f) const {
     }
     f->close_section();
   }
+  if (!status.history.empty()) {
+    encode_json("history", status.history, f);
+  }
+
+  if (!status.status.empty()) {
+    f->open_object_section("status");
+    encode_json("status", status.status, f);
+    encode_json("timestamp", status.timestamp, f);
+    f->close_section();
+  }
 }
 
 int RGWSimpleCoroutine::operate()
index 396b7fd21fb025bd06ed04b338cd561ed9b9e16f..632092e53a92235c1ddb4b35ab00cc05ac8120a2 100644 (file)
@@ -131,6 +131,34 @@ struct rgw_spawned_stacks {
 class RGWCoroutine : public RefCountedObject, public boost::asio::coroutine {
   friend class RGWCoroutinesStack;
 
+  struct StatusItem {
+    utime_t timestamp;
+    string status;
+
+    StatusItem(utime_t& t, const string& s) : timestamp(t), status(s) {}
+
+    void dump(Formatter *f) const;
+  };
+
+#define MAX_COROUTINE_HISTORY 10
+
+  struct Status {
+    CephContext *cct;
+    RWLock lock;
+    int max_history;
+
+    utime_t timestamp;
+    string status;
+
+    Status(CephContext *_cct) : cct(_cct), lock("RGWCoroutine::Status::lock"), max_history(MAX_COROUTINE_HISTORY) {}
+
+    deque<StatusItem> history;
+
+    void set_status(const string& status);
+  } status;
+
+  string description;
+
 protected:
   bool _yield_ret;
   boost::asio::coroutine drain_cr;
@@ -160,8 +188,15 @@ protected:
   void set_io_blocked(bool flag);
   int io_block(int ret = 0);
 
+  void set_description(const string& s) {
+    description = s;
+  }
+  void set_status(const string& s) {
+    status.set_status(s);
+  }
+
 public:
-  RGWCoroutine(CephContext *_cct) : _yield_ret(false), cct(_cct), stack(NULL), retcode(0), state(RGWCoroutine_Run) {}
+  RGWCoroutine(CephContext *_cct) : status(_cct), _yield_ret(false), cct(_cct), stack(NULL), retcode(0), state(RGWCoroutine_Run) {}
   virtual ~RGWCoroutine() {}
 
   virtual int operate() = 0;
index 9b2ad076ba43f727eca05ed77f309ae8372e5de5..0b962d808d3d11364fd80855ca7d8e664938b07b 100644 (file)
@@ -556,7 +556,7 @@ public:
     int ret;
     reenter(this) {
       yield {
-        report->set_status("acquiring sync lock");
+        set_status("acquiring sync lock");
        uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
         string lock_name = "sync_lock";
         RGWRados *store = sync_env->store;
@@ -568,26 +568,26 @@ public:
       while (!lease_cr->is_locked()) {
         if (lease_cr->is_done()) {
           ldout(cct, 0) << "ERROR: lease cr failed, done early " << dendl;
-          report->set_status("lease lock failed, early abort");
+          set_status("lease lock failed, early abort");
           return set_cr_error(lease_cr->get_ret_status());
         }
         set_sleeping(true);
         yield;
       }
       yield {
-        report->set_status("writing sync status");
+        set_status("writing sync status");
         RGWRados *store = sync_env->store;
         call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store, store->get_zone_params().log_pool,
                                 sync_env->status_oid(), status));
       }
 
       if (retcode < 0) {
-        report->set_status("failed to write sync status");
+        set_status("failed to write sync status");
         ldout(cct, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl;
         return set_cr_error(retcode);
       }
       /* fetch current position in logs */
-      report->set_status("fetching remote log position");
+      set_status("fetching remote log position");
       yield {
         for (int i = 0; i < (int)status.num_shards; i++) {
           spawn(new RGWReadRemoteMDLogShardInfoCR(sync_env->store, sync_env->http_manager, sync_env->async_rados, i, &shards_info[i]), false);
@@ -597,7 +597,7 @@ public:
       drain_all_but(1); /* the lease cr still needs to run */
 
       yield {
-        report->set_status("updating sync status");
+        set_status("updating sync status");
         for (int i = 0; i < (int)status.num_shards; i++) {
          rgw_meta_sync_marker marker;
           RGWMetadataLogInfo& info = shards_info[i];
@@ -609,13 +609,13 @@ public:
         }
       }
       yield {
-        report->set_status("changing sync state: build full sync maps");
+        set_status("changing sync state: build full sync maps");
        status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
         RGWRados *store = sync_env->store;
         call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store, store->get_zone_params().log_pool,
                                 sync_env->status_oid(), status));
       }
-      report->set_status("drop lock lease");
+      set_status("drop lock lease");
       yield lease_cr->go_down();
       while (collect(&ret)) {
        if (ret < 0) {
@@ -746,7 +746,7 @@ public:
 
     reenter(this) {
       yield {
-        report->set_status(string("acquiring lock (") + sync_env->status_oid() + ")");
+        set_status(string("acquiring lock (") + sync_env->status_oid() + ")");
        uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
         string lock_name = "sync_lock";
        lease_cr = new RGWContinuousLeaseCR(sync_env->async_rados, sync_env->store, sync_env->store->get_zone_params().log_pool, sync_env->status_oid(),
@@ -757,7 +757,7 @@ public:
       while (!lease_cr->is_locked()) {
         if (lease_cr->is_done()) {
           ldout(cct, 0) << "ERROR: lease cr failed, done early " << dendl;
-          report->set_status("failed acquiring lock");
+          set_status("failed acquiring lock");
           return set_cr_error(lease_cr->get_ret_status());
         }
         set_sleeping(true);
@@ -1250,7 +1250,7 @@ public:
 #define OMAP_GET_MAX_ENTRIES 100
     int max_entries = OMAP_GET_MAX_ENTRIES;
     reenter(&full_cr) {
-      report->set_status("full_sync");
+      set_status("full_sync");
       oid = full_sync_index_shard_oid(shard_id);
       can_adjust_marker = true;
       /* grab lock */
@@ -1374,7 +1374,7 @@ public:
 
   int incremental_sync() {
     reenter(&incremental_cr) {
-      report->set_status("incremental_sync");
+      set_status("incremental_sync");
       can_adjust_marker = true;
       /* grab lock */
       if (!lease_cr) { /* could have had  a lease_cr lock from previous state */