]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osd/ReplicatedBackend: only do once encode log_entries for multi MOSDRepOp 20201/head
authorJianpeng Ma <jianpeng.ma@intel.com>
Fri, 2 Feb 2018 16:35:15 +0000 (00:35 +0800)
committerJianpeng Ma <jianpeng.ma@intel.com>
Fri, 2 Feb 2018 16:35:15 +0000 (00:35 +0800)
Encode log_entries is the same work for multi MOSDRepOp and encode is
heavy load.

Signed-off-by: Jianpeng Ma <jianpeng.ma@intel.com>
src/osd/ReplicatedBackend.cc
src/osd/ReplicatedBackend.h

index 10a7aadaf692fb13f3490c023aef661d28895064..6f0fcf4f5ca3a911baa5cb8ea265955000cc467c 100644 (file)
@@ -961,7 +961,7 @@ Message * ReplicatedBackend::generate_subop(
   eversion_t pg_roll_forward_to,
   hobject_t new_temp_oid,
   hobject_t discard_temp_oid,
-  const vector<pg_log_entry_t> &log_entries,
+  const bufferlist &log_entries,
   boost::optional<pg_hit_set_history_t> &hset_hist,
   ObjectStore::Transaction &op_t,
   pg_shard_t peer,
@@ -991,7 +991,7 @@ Message * ReplicatedBackend::generate_subop(
     wr->get_header().data_off = op_t.get_data_alignment();
   }
 
-  encode(log_entries, wr->logbl);
+  wr->logbl = log_entries;
 
   if (pinfo.is_incomplete())
     wr->pg_stats = pinfo.stats;  // reflects backfill progress
@@ -1021,44 +1021,44 @@ void ReplicatedBackend::issue_op(
   InProgressOp *op,
   ObjectStore::Transaction &op_t)
 {
-  if (op->op) {
-    op->op->pg_trace.event("issue replication ops");
-    if (parent->get_actingbackfill_shards().size() > 1) {
+  if (parent->get_actingbackfill_shards().size() > 1) {
+    if (op->op) {
+      op->op->pg_trace.event("issue replication ops");
       ostringstream ss;
       set<pg_shard_t> replicas = parent->get_actingbackfill_shards();
       replicas.erase(parent->whoami_shard());
       ss << "waiting for subops from " << replicas;
       op->op->mark_sub_op_sent(ss.str());
     }
-  }
 
-  for (set<pg_shard_t>::const_iterator i =
-        parent->get_actingbackfill_shards().begin();
-       i != parent->get_actingbackfill_shards().end();
-       ++i) {
-    if (*i == parent->whoami_shard()) continue;
-    pg_shard_t peer = *i;
-    const pg_info_t &pinfo = parent->get_shard_info().find(peer)->second;
-
-    Message *wr;
-    wr = generate_subop(
-      soid,
-      at_version,
-      tid,
-      reqid,
-      pg_trim_to,
-      pg_roll_forward_to,
-      new_temp_oid,
-      discard_temp_oid,
-      log_entries,
-      hset_hist,
-      op_t,
-      peer,
-      pinfo);
-    if (op->op && op->op->pg_trace)
-      wr->trace.init("replicated op", nullptr, &op->op->pg_trace);
-    get_parent()->send_message_osd_cluster(
-      peer.osd, wr, get_osdmap()->get_epoch());
+    // avoid doing the same work in generate_subop
+    bufferlist logs;
+    encode(log_entries, logs);
+
+    for (const auto& shard : get_parent()->get_actingbackfill_shards()) {
+      if (shard == parent->whoami_shard()) continue;
+      const pg_info_t &pinfo = parent->get_shard_info().find(shard)->second;
+
+      Message *wr;
+      wr = generate_subop(
+         soid,
+         at_version,
+         tid,
+         reqid,
+         pg_trim_to,
+         pg_roll_forward_to,
+         new_temp_oid,
+         discard_temp_oid,
+         logs,
+         hset_hist,
+         op_t,
+         shard,
+         pinfo);
+      if (op->op && op->op->pg_trace)
+       wr->trace.init("replicated op", nullptr, &op->op->pg_trace);
+      get_parent()->send_message_osd_cluster(
+         shard.osd, wr, get_osdmap()->get_epoch());
+    }
   }
 }
 
index 7c29bba725d1ddc755ed74e410f5211d944605ce..f2a648793273da372827fcd3ed71bf43629a56ff 100644 (file)
@@ -383,7 +383,7 @@ private:
     eversion_t pg_roll_forward_to,
     hobject_t new_temp_oid,
     hobject_t discard_temp_oid,
-    const vector<pg_log_entry_t> &log_entries,
+    const bufferlist &log_entries,
     boost::optional<pg_hit_set_history_t> &hset_history,
     ObjectStore::Transaction &op_t,
     pg_shard_t peer,