Summary:
Move uncommon code paths in RangeDelAggregator::ShouldDelete() and IterKey::EnlargeBufferIfNeeded() to a separate function, so that the inlined strcuture can be more optimized.
Optimize it because these places show up in CPU profiling, though minimum. The performance is really hard measure. I ran db_bench with readseq benchmark against in-memory DB many times. The variation is big, but it seems to show 1% improvements.
Closes https://github.com/facebook/rocksdb/pull/2877
Differential Revision:
D5828123
Pulled By: siying
fbshipit-source-id:
41a49e229f91e9f8409f85cc6f0dc70e31334e4b
end_ = dst;
}
+void IterKey::EnlargeBuffer(size_t key_size) {
+ // If size is smaller than buffer size, continue using current buffer,
+ // or the static allocated one, as default
+ assert(key_size > buf_size_);
+ // Need to enlarge the buffer.
+ ResetBuffer();
+ buf_ = new char[key_size];
+ buf_size_ = key_size;
+}
} // namespace rocksdb
// If size is smaller than buffer size, continue using current buffer,
// or the static allocated one, as default
if (key_size > buf_size_) {
- // Need to enlarge the buffer.
- ResetBuffer();
- buf_ = new char[key_size];
- buf_size_ = key_size;
+ EnlargeBuffer(key_size);
}
}
+ void EnlargeBuffer(size_t key_size);
+
// No copying allowed
IterKey(const IterKey&) = delete;
void operator=(const IterKey&) = delete;
rep_->pinned_iters_mgr_.StartPinning();
}
-bool RangeDelAggregator::ShouldDelete(
+bool RangeDelAggregator::ShouldDeleteImpl(
const Slice& internal_key, RangeDelAggregator::RangePositioningMode mode) {
- if (rep_ == nullptr) {
- return false;
- }
+ assert(rep_ != nullptr);
ParsedInternalKey parsed;
if (!ParseInternalKey(internal_key, &parsed)) {
assert(false);
return ShouldDelete(parsed, mode);
}
-bool RangeDelAggregator::ShouldDelete(
+bool RangeDelAggregator::ShouldDeleteImpl(
const ParsedInternalKey& parsed,
RangeDelAggregator::RangePositioningMode mode) {
assert(IsValueType(parsed.type));
- if (rep_ == nullptr) {
- return false;
- }
+ assert(rep_ != nullptr);
auto& positional_tombstone_map = GetPositionalTombstoneMap(parsed.sequence);
const auto& tombstone_map = positional_tombstone_map.raw_map;
if (tombstone_map.empty()) {
// the deletion whose interval contains this key. Otherwise, its
// value must be kFullScan indicating linear scan from beginning..
bool ShouldDelete(const ParsedInternalKey& parsed,
- RangePositioningMode mode = kFullScan);
+ RangePositioningMode mode = kFullScan) {
+ if (rep_ == nullptr) {
+ return false;
+ }
+ return ShouldDeleteImpl(parsed, mode);
+ }
bool ShouldDelete(const Slice& internal_key,
- RangePositioningMode mode = kFullScan);
+ RangePositioningMode mode = kFullScan) {
+ if (rep_ == nullptr) {
+ return false;
+ }
+ return ShouldDeleteImpl(internal_key, mode);
+ }
+ bool ShouldDeleteImpl(const ParsedInternalKey& parsed,
+ RangePositioningMode mode = kFullScan);
+ bool ShouldDeleteImpl(const Slice& internal_key,
+ RangePositioningMode mode = kFullScan);
+
bool ShouldAddTombstones(bool bottommost_level = false);
// Adds tombstones to the tombstone aggregation structure maintained by this