]> git-server-git.apps.pok.os.sepia.ceph.com Git - rocksdb.git/commitdiff
Fix hang with large write batches and column families.
authorVenkatesh Radhakrishnan <rven@fb.com>
Fri, 1 May 2015 22:41:50 +0000 (15:41 -0700)
committerIgor Canadi <icanadi@fb.com>
Fri, 1 May 2015 23:12:48 +0000 (16:12 -0700)
Summary:
This diff fixes a hang reported by a Github user.
https://www.facebook.com/l.php?u=https%3A%2F%2Fgithub.com%2Ffacebook%2Frocksdb%2Fissues%2F595%23issuecomment-96983273&h=9AQFYOWlo
Multiple large write batches with column families cause a hang.
The issue was caused by not doing flushes/compaction when the
write controller was stopped.

Test Plan: Create a DBTest from the user's test case

Reviewers: igor

Reviewed By: igor

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D37929

db/db_impl.cc
db/db_test.cc

index 6de8535762c170cda2135041ae7c727a2f72521b..547cfe997f8a9e159248891fca4500e5966e0671 100644 (file)
@@ -3131,6 +3131,11 @@ Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) {
 
   if (UNLIKELY(status.ok()) &&
       (write_controller_.IsStopped() || write_controller_.GetDelay() > 0)) {
+    // If writer is stopped, we need to get it going,
+    // so schedule flushes/compactions
+    if (context.schedule_bg_work_) {
+      MaybeScheduleFlushOrCompaction();
+    }
     status = DelayWrite(expiration_time);
   }
 
index 3c013e756926b8886bdc17ef2f26dc0a16f3220d..8dd11b0d9d4fa048c0df21d42eaa798a81bf6b0c 100644 (file)
@@ -11982,6 +11982,38 @@ TEST_F(DBTest, HugeNumberOfLevels) {
   ASSERT_OK(db_->CompactRange(nullptr, nullptr));
 }
 
+// Github issue #595
+// Large write batch with column families
+TEST_F(DBTest, LargeBatchWithColumnFamilies) {
+  Options options;
+  options.env = env_;
+  options = CurrentOptions(options);
+  options.write_buffer_size = 100000;  // Small write buffer
+  CreateAndReopenWithCF({"pikachu"}, options);
+  int64_t j = 0;
+  for (int i = 0; i < 5; i++) {
+    for (int pass = 1; pass <= 3; pass++) {
+      WriteBatch batch;
+      size_t write_size = 1024 * 1024 * (5 + i);
+      fprintf(stderr, "prepare: %ld MB, pass:%d\n", (write_size / 1024 / 1024),
+              pass);
+      for (;;) {
+        std::string data(3000, j++ % 127 + 20);
+        data += std::to_string(j);
+        batch.Put(handles_[0], Slice(data), Slice(data));
+        if (batch.GetDataSize() > write_size) {
+          break;
+        }
+      }
+      fprintf(stderr, "write: %ld MB\n", (batch.GetDataSize() / 1024 / 1024));
+      ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
+      fprintf(stderr, "done\n");
+    }
+  }
+  // make sure we can re-open it.
+  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
+}
+
 }  // namespace rocksdb
 
 int main(int argc, char** argv) {