Two small refactoring for better inlining

Summary:
Move uncommon code paths in RangeDelAggregator::ShouldDelete() and IterKey::EnlargeBufferIfNeeded() to a separate function, so that the inlined strcuture can be more optimized.

Optimize it because these places show up in CPU profiling, though minimum. The performance is really hard measure. I ran db_bench with readseq benchmark against in-memory DB many times. The variation is big, but it seems to show 1% improvements.
Closes https://github.com/facebook/rocksdb/pull/2877

Differential Revision: D5828123

Pulled By: siying

fbshipit-source-id: 41a49e229f91e9f8409f85cc6f0dc70e31334e4b
main
Siying Dong 7 years ago committed by Facebook Github Bot
parent ffac68367f
commit 885b1c682e
  1. 9
      db/dbformat.cc
  2. 7
      db/dbformat.h
  3. 12
      db/range_del_aggregator.cc
  4. 19
      db/range_del_aggregator.h

@ -174,4 +174,13 @@ LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) {
end_ = dst;
}
void IterKey::EnlargeBuffer(size_t key_size) {
// If size is smaller than buffer size, continue using current buffer,
// or the static allocated one, as default
assert(key_size > buf_size_);
// Need to enlarge the buffer.
ResetBuffer();
buf_ = new char[key_size];
buf_size_ = key_size;
}
} // namespace rocksdb

@ -504,13 +504,12 @@ class IterKey {
// If size is smaller than buffer size, continue using current buffer,
// or the static allocated one, as default
if (key_size > buf_size_) {
// Need to enlarge the buffer.
ResetBuffer();
buf_ = new char[key_size];
buf_size_ = key_size;
EnlargeBuffer(key_size);
}
}
void EnlargeBuffer(size_t key_size);
// No copying allowed
IterKey(const IterKey&) = delete;
void operator=(const IterKey&) = delete;

@ -43,11 +43,9 @@ void RangeDelAggregator::InitRep(const std::vector<SequenceNumber>& snapshots) {
rep_->pinned_iters_mgr_.StartPinning();
}
bool RangeDelAggregator::ShouldDelete(
bool RangeDelAggregator::ShouldDeleteImpl(
const Slice& internal_key, RangeDelAggregator::RangePositioningMode mode) {
if (rep_ == nullptr) {
return false;
}
assert(rep_ != nullptr);
ParsedInternalKey parsed;
if (!ParseInternalKey(internal_key, &parsed)) {
assert(false);
@ -55,13 +53,11 @@ bool RangeDelAggregator::ShouldDelete(
return ShouldDelete(parsed, mode);
}
bool RangeDelAggregator::ShouldDelete(
bool RangeDelAggregator::ShouldDeleteImpl(
const ParsedInternalKey& parsed,
RangeDelAggregator::RangePositioningMode mode) {
assert(IsValueType(parsed.type));
if (rep_ == nullptr) {
return false;
}
assert(rep_ != nullptr);
auto& positional_tombstone_map = GetPositionalTombstoneMap(parsed.sequence);
const auto& tombstone_map = positional_tombstone_map.raw_map;
if (tombstone_map.empty()) {

@ -74,9 +74,24 @@ class RangeDelAggregator {
// the deletion whose interval contains this key. Otherwise, its
// value must be kFullScan indicating linear scan from beginning..
bool ShouldDelete(const ParsedInternalKey& parsed,
RangePositioningMode mode = kFullScan);
RangePositioningMode mode = kFullScan) {
if (rep_ == nullptr) {
return false;
}
return ShouldDeleteImpl(parsed, mode);
}
bool ShouldDelete(const Slice& internal_key,
RangePositioningMode mode = kFullScan);
RangePositioningMode mode = kFullScan) {
if (rep_ == nullptr) {
return false;
}
return ShouldDeleteImpl(internal_key, mode);
}
bool ShouldDeleteImpl(const ParsedInternalKey& parsed,
RangePositioningMode mode = kFullScan);
bool ShouldDeleteImpl(const Slice& internal_key,
RangePositioningMode mode = kFullScan);
bool ShouldAddTombstones(bool bottommost_level = false);
// Adds tombstones to the tombstone aggregation structure maintained by this

Loading…
Cancel
Save