From cfcc54add7a20265f9d1cdeb804145c753c3394c Mon Sep 17 00:00:00 2001 From: Adam Kupczyk Date: Fri, 24 Apr 2020 14:10:30 +0200 Subject: [PATCH] kv/RocksDBStore: Improved debug output quality during resharding. Signed-off-by: Adam Kupczyk --- src/kv/RocksDBStore.cc | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/kv/RocksDBStore.cc b/src/kv/RocksDBStore.cc index e61c90f0452..6279034159f 100644 --- a/src/kv/RocksDBStore.cc +++ b/src/kv/RocksDBStore.cc @@ -2930,6 +2930,8 @@ int RocksDBStore::reshard(const std::string& new_sharding) size_t keys_in_batch = 0; size_t bytes_per_iterator = 0; size_t keys_per_iterator = 0; + size_t keys_processed = 0; + size_t keys_moved = 0; rocksdb::WriteBatch* bat = nullptr; @@ -2938,12 +2940,12 @@ int RocksDBStore::reshard(const std::string& new_sharding) size_t unittest_command = unittest_str ? atoi(unittest_str) : 0; auto flush_batch = [&]() { - dout(10) << "flushing batch" << dendl; + dout(10) << "flushing batch, " << keys_in_batch << " keys, for " + << bytes_in_batch << " bytes" << dendl; rocksdb::WriteOptions woptions; woptions.sync = true; rocksdb::Status s = db->Write(woptions, bat); ceph_assert(s.ok()); - dout(25) << "processed " << keys_in_batch << " keys, for " << bytes_in_batch << " bytes" << dendl; bytes_in_batch = 0; keys_in_batch = 0; delete bat; @@ -2955,7 +2957,7 @@ int RocksDBStore::reshard(const std::string& new_sharding) const std::string& fixed_prefix) { int r = 0; - dout(10) << " column=" << (void*)handle << " prefix=" << fixed_prefix << dendl; + dout(5) << " column=" << (void*)handle << " prefix=" << fixed_prefix << dendl; rocksdb::Iterator* it; it = db->NewIterator(rocksdb::ReadOptions(), handle); ceph_assert(it); @@ -2968,7 +2970,7 @@ int RocksDBStore::reshard(const std::string& new_sharding) //check if need to refresh iterator if (bytes_per_iterator >= 10000000 || keys_per_iterator >= 10000) { - dout(10) << "refreshing iterator" << dendl; + dout(8) << "refreshing iterator" << dendl; bytes_per_iterator = 0; keys_per_iterator = 0; std::string raw_key_str = raw_key.ToString(); @@ -2987,6 +2989,10 @@ int RocksDBStore::reshard(const std::string& new_sharding) prefix = fixed_prefix; key = raw_key.ToString(); } + keys_processed++; + if ((keys_processed % 10000) == 0) { + dout(10) << "processed " << keys_processed << " keys, moved " << keys_moved << dendl; + } std::string new_raw_key; rocksdb::ColumnFamilyHandle* new_handle = get_cf_handle(prefix, key); if (new_handle == nullptr) { @@ -3005,11 +3011,11 @@ int RocksDBStore::reshard(const std::string& new_sharding) dout(25) << "moving " << (void*)handle << "/" << pretty_binary_string(raw_key.ToString()) << " to " << (void*)new_handle << "/" << pretty_binary_string(new_raw_key) << " size " << value.size() << dendl; - + keys_moved++; bytes_in_batch += new_raw_key.size() * 2 + value.size(); - keys_in_batch ++; + keys_in_batch++; bytes_per_iterator += new_raw_key.size() * 2 + value.size(); - keys_per_iterator ++; + keys_per_iterator++; //check if need to write batch if (bytes_in_batch >= 1000000 || -- 2.39.5