]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
os: Transaction uses append_hole() to minimize bl:_buffers inflation. 24459/head
authorRadoslaw Zarzynski <rzarzyns@redhat.com>
Sun, 7 Oct 2018 21:56:17 +0000 (23:56 +0200)
committerRadoslaw Zarzynski <rzarzyns@redhat.com>
Wed, 31 Oct 2018 22:50:10 +0000 (23:50 +0100)
This could be useful as the used `::append(const ptr&)` variant does
not provide any measures (like coalescing adjacent bptrs) to prevent
inflation of the internal structures of a bufferlist. That is, each
call was reflected in dynamic node allocation for the `_buffers` list.
Secondary effect of this behavior is increased iteration cost driven
by large number of elements likely scattered over memory.

Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
src/include/buffer.h
src/os/ObjectStore.h

index 60a5cca99520e0866729127b6b2eec5b5d0788e6..0faa880adf95e09f059dbfdbdcc8154c9ecf44a5 100644 (file)
@@ -651,6 +651,9 @@ namespace buffer CEPH_BUFFER_API {
        memcpy(pos, src, len);
        pos += len;
       }
+      char* c_str() {
+        return pos;
+      }
     };
     // The contiguous_filler is supposed to be not costlier than a single
     // pointer. Keep it dumb, please.
index 86d49894d030407a2d053b17ea4bd79d1daea8db..3c2ced9af05152c825f64d8e40962dc43a29a234 100644 (file)
@@ -446,8 +446,6 @@ public:
     bufferlist data_bl;
     bufferlist op_bl;
 
-    bufferptr op_ptr;
-
     list<Context *> on_applied;
     list<Context *> on_commit;
     list<Context *> on_applied_sync;
@@ -472,7 +470,6 @@ public:
       object_id(other.object_id),
       data_bl(std::move(other.data_bl)),
       op_bl(std::move(other.op_bl)),
-      op_ptr(std::move(other.op_ptr)),
       on_applied(std::move(other.on_applied)),
       on_commit(std::move(other.on_commit)),
       on_applied_sync(std::move(other.on_applied_sync)) {
@@ -488,7 +485,6 @@ public:
       object_id = other.object_id;
       data_bl = std::move(other.data_bl);
       op_bl = std::move(other.op_bl);
-      op_ptr = std::move(other.op_ptr);
       on_applied = std::move(other.on_applied);
       on_commit = std::move(other.on_commit);
       on_applied_sync = std::move(other.on_applied_sync);
@@ -747,10 +743,12 @@ public:
 
       //the other.op_bl SHOULD NOT be changes during append operation,
       //we use additional bufferlist to avoid this problem
-      bufferptr other_op_bl_ptr(other.op_bl.length());
-      other.op_bl.copy(0, other.op_bl.length(), other_op_bl_ptr.c_str());
       bufferlist other_op_bl;
-      other_op_bl.append(other_op_bl_ptr);
+      {
+        bufferptr other_op_bl_ptr(other.op_bl.length());
+        other.op_bl.copy(0, other.op_bl.length(), other_op_bl_ptr.c_str());
+        other_op_bl.append(std::move(other_op_bl_ptr));
+      }
 
       //update other_op_bl with cm & om
       //When the other is appended to current transaction, all coll_index and
@@ -963,15 +961,12 @@ private:
      * form of seat belts for the decoder.
      */
     Op* _get_next_op() {
-      if (op_ptr.length() == 0 || op_ptr.offset() >= op_ptr.length()) {
-        op_ptr = bufferptr(sizeof(Op) * OPS_PER_PTR);
+      if (op_bl.get_append_buffer_unused_tail_length() < sizeof(Op)) {
+        op_bl.reserve(sizeof(Op) * OPS_PER_PTR);
       }
-      bufferptr ptr(op_ptr, 0, sizeof(Op));
-      op_bl.append(ptr);
-
-      op_ptr.set_offset(op_ptr.offset() + sizeof(Op));
-
-      char* p = ptr.c_str();
+      // append_hole ensures bptr merging. Even huge number of ops
+      // shouldn't result in overpopulating bl::_buffers.
+      char* const p = op_bl.append_hole(sizeof(Op)).c_str();
       memset(p, 0, sizeof(Op));
       return reinterpret_cast<Op*>(p);
     }