]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: data sync error handling (for sync init)
authorYehuda Sadeh <yehuda@redhat.com>
Tue, 16 Feb 2016 22:06:49 +0000 (14:06 -0800)
committerYehuda Sadeh <yehuda@redhat.com>
Thu, 18 Feb 2016 23:18:03 +0000 (15:18 -0800)
Signed-off-by: Yehuda Sadeh <yehuda@redhat.com>
src/rgw/rgw_coroutine.cc
src/rgw/rgw_cr_rados.cc
src/rgw/rgw_cr_rados.h
src/rgw/rgw_data_sync.cc

index b01be51fce883e6c5e2f0728e4b90f26ab74b72a..ba36756501c330f27f10fa5b7cd7d10e534fb368 100644 (file)
@@ -373,7 +373,6 @@ void RGWCoroutinesManager::report_error(RGWCoroutinesStack *op)
   if (err.empty()) {
     return;
   }
-#warning need to have error logging infrastructure that logs on backend
   lderr(cct) << "ERROR: failed operation: " << op->error_str() << dendl;
 }
 
index 2f8cca870a03837649cd0e8c11a647e0ee1f736e..0b5243f7dd3375b9045996ff77b8f4d056e51433 100644 (file)
@@ -388,6 +388,7 @@ int RGWOmapAppend::operate() {
         }
       }
       if (get_ret_status() < 0) {
+        ldout(cct, 0) << "ERROR: failed to store entries in omap" << dendl;
         return set_state(RGWCoroutine_Error);
       }
     }
index 97fe612b22b61b608370b50cf9747988a77e71d4..e2692741d2abc1aaf5dc66e6f14a22ba08a61ba1 100644 (file)
@@ -563,7 +563,7 @@ public:
   bool finish() {
     bool success = true;
     for (vector<RGWOmapAppend *>::iterator iter = shards.begin(); iter != shards.end(); ++iter) {
-      success &= (*iter)->finish();
+      success &= ((*iter)->finish() && (!(*iter)->is_error()));
     }
     return success;
   }
index 9927670e39eda95066adcc5c893d340ca02e30fe..d348a4850ef0a346a169e2638ce3841740f956fa 100644 (file)
@@ -447,6 +447,7 @@ class RGWListBucketIndexesCR : public RGWCoroutine {
   int num_shards;
 
   int req_ret;
+  int ret;
 
   list<string> result;
   list<string>::iterator iter;
@@ -467,7 +468,7 @@ public:
   RGWListBucketIndexesCR(RGWDataSyncEnv *_sync_env,
                          rgw_data_sync_status *_sync_status) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
                                                       store(sync_env->store), sync_status(_sync_status),
-                                                     req_ret(0), entries_index(NULL), i(0), failed(false) {
+                                                     req_ret(0), ret(0), entries_index(NULL), i(0), failed(false) {
     oid_prefix = datalog_sync_full_sync_index_prefix + "." + sync_env->source_zone; 
     path = "/admin/metadata/bucket.instance";
     num_shards = sync_status->sync_info.num_shards;
@@ -504,7 +505,6 @@ public:
         }
 
         num_shards = meta_info.data.get_bucket_info().num_shards;
-#warning error handling of shards
         if (num_shards > 0) {
           for (i = 0; i < num_shards; i++) {
             char buf[16];
@@ -529,15 +529,22 @@ public:
           spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store, store->get_zone_params().log_pool,
                                                                 RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id), marker), true);
         }
+      } else {
+          yield call(sync_env->error_logger->log_error_cr(sync_env->conn->get_remote_id(), "data.init", "",
+                                                          EIO, string("failed to build bucket instances map")));
       }
-      int ret;
       while (collect(&ret)) {
        if (ret < 0) {
-         return set_state(RGWCoroutine_Error);
+          yield call(sync_env->error_logger->log_error_cr(sync_env->conn->get_remote_id(), "data.init", "",
+                                                          -ret, string("failed to store sync status: ") + cpp_strerror(-ret)));
+         req_ret = ret;
        }
         yield;
       }
       drain_all();
+      if (req_ret < 0) {
+        yield return set_cr_error(req_ret);
+      }
       yield return set_cr_done();
     }
     return 0;