refactor TableCache Get/NewIterator for single exit points

Summary:
these functions were too complicated to change with exit points everywhere, so refactored them.

btw, please review urgently, this is a prereq to fix the 5.0 perf regression
Closes https://github.com/facebook/rocksdb/pull/1534

Differential Revision: D4198972

Pulled By: ajkr

fbshipit-source-id: 04ebfb7
main
Andrew Kryczka 9 years ago committed by Facebook Github Bot
parent f39452e81f
commit 635a7bd1ad
  1. 78
      db/table_cache.cc

@ -174,27 +174,24 @@ InternalIterator* TableCache::NewIterator(
bool skip_filters, int level) { bool skip_filters, int level) {
PERF_TIMER_GUARD(new_table_iterator_nanos); PERF_TIMER_GUARD(new_table_iterator_nanos);
Status s;
if (range_del_agg != nullptr && !options.ignore_range_deletions) { if (range_del_agg != nullptr && !options.ignore_range_deletions) {
std::unique_ptr<InternalIterator> range_del_iter(NewRangeDeletionIterator( std::unique_ptr<InternalIterator> range_del_iter(NewRangeDeletionIterator(
options, icomparator, fd, file_read_hist, skip_filters, level)); options, icomparator, fd, file_read_hist, skip_filters, level));
Status s = range_del_iter->status(); s = range_del_iter->status();
if (s.ok()) { if (s.ok()) {
s = range_del_agg->AddTombstones(std::move(range_del_iter)); s = range_del_agg->AddTombstones(std::move(range_del_iter));
} }
if (!s.ok()) {
return NewErrorInternalIterator(s, arena);
}
} }
bool create_new_table_reader = false;
TableReader* table_reader = nullptr;
Cache::Handle* handle = nullptr;
if (s.ok()) {
if (table_reader_ptr != nullptr) { if (table_reader_ptr != nullptr) {
*table_reader_ptr = nullptr; *table_reader_ptr = nullptr;
} }
TableReader* table_reader = nullptr;
Cache::Handle* handle = nullptr;
size_t readahead = 0; size_t readahead = 0;
bool create_new_table_reader = false;
if (for_compaction) { if (for_compaction) {
if (ioptions_.new_table_reader_for_compaction_inputs) { if (ioptions_.new_table_reader_for_compaction_inputs) {
readahead = ioptions_.compaction_readahead_size; readahead = ioptions_.compaction_readahead_size;
@ -207,28 +204,27 @@ InternalIterator* TableCache::NewIterator(
if (create_new_table_reader) { if (create_new_table_reader) {
unique_ptr<TableReader> table_reader_unique_ptr; unique_ptr<TableReader> table_reader_unique_ptr;
Status s = GetTableReader( s = GetTableReader(
env_options, icomparator, fd, true /* sequential_mode */, readahead, env_options, icomparator, fd, true /* sequential_mode */, readahead,
!for_compaction /* record stats */, nullptr, &table_reader_unique_ptr, !for_compaction /* record stats */, nullptr, &table_reader_unique_ptr,
false /* skip_filters */, level); false /* skip_filters */, level);
if (!s.ok()) { if (s.ok()) {
return NewErrorInternalIterator(s, arena);
}
table_reader = table_reader_unique_ptr.release(); table_reader = table_reader_unique_ptr.release();
}
} else { } else {
table_reader = fd.table_reader; table_reader = fd.table_reader;
if (table_reader == nullptr) { if (table_reader == nullptr) {
Status s = FindTable(env_options, icomparator, fd, &handle, s = FindTable(env_options, icomparator, fd, &handle,
options.read_tier == kBlockCacheTier /* no_io */, options.read_tier == kBlockCacheTier /* no_io */,
!for_compaction /* record read_stats */, !for_compaction /* record read_stats */, file_read_hist,
file_read_hist, skip_filters, level); skip_filters, level);
if (!s.ok()) { if (s.ok()) {
return NewErrorInternalIterator(s, arena);
}
table_reader = GetTableReaderFromHandle(handle); table_reader = GetTableReaderFromHandle(handle);
} }
} }
}
}
if (s.ok()) {
InternalIterator* result = InternalIterator* result =
table_reader->NewIterator(options, arena, skip_filters); table_reader->NewIterator(options, arena, skip_filters);
if (create_new_table_reader) { if (create_new_table_reader) {
@ -245,6 +241,11 @@ InternalIterator* TableCache::NewIterator(
*table_reader_ptr = table_reader; *table_reader_ptr = table_reader;
} }
return result; return result;
}
if (handle != nullptr) {
ReleaseHandle(handle);
}
return NewErrorInternalIterator(s);
} }
InternalIterator* TableCache::NewRangeDeletionIterator( InternalIterator* TableCache::NewRangeDeletionIterator(
@ -281,28 +282,26 @@ Status TableCache::Get(const ReadOptions& options,
const FileDescriptor& fd, const Slice& k, const FileDescriptor& fd, const Slice& k,
GetContext* get_context, HistogramImpl* file_read_hist, GetContext* get_context, HistogramImpl* file_read_hist,
bool skip_filters, int level) { bool skip_filters, int level) {
Status s;
if (get_context->range_del_agg() != nullptr && if (get_context->range_del_agg() != nullptr &&
!options.ignore_range_deletions) { !options.ignore_range_deletions) {
std::unique_ptr<InternalIterator> range_del_iter(NewRangeDeletionIterator( std::unique_ptr<InternalIterator> range_del_iter(NewRangeDeletionIterator(
options, internal_comparator, fd, file_read_hist, skip_filters, level)); options, internal_comparator, fd, file_read_hist, skip_filters, level));
Status s = range_del_iter->status(); s = range_del_iter->status();
if (s.ok()) { if (s.ok()) {
s = get_context->range_del_agg()->AddTombstones( s = get_context->range_del_agg()->AddTombstones(
std::move(range_del_iter)); std::move(range_del_iter));
} }
if (!s.ok()) {
return s;
}
} }
TableReader* t = fd.table_reader; TableReader* t = fd.table_reader;
Status s;
Cache::Handle* handle = nullptr; Cache::Handle* handle = nullptr;
std::string* row_cache_entry = nullptr; std::string* row_cache_entry = nullptr;
bool done = false;
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
IterKey row_cache_key; IterKey row_cache_key;
std::string row_cache_entry_buffer; std::string row_cache_entry_buffer;
if (s.ok()) {
// Check row cache if enabled. Since row cache does not currently store // Check row cache if enabled. Since row cache does not currently store
// sequence numbers, we cannot use it if we need to fetch the sequence. // sequence numbers, we cannot use it if we need to fetch the sequence.
if (ioptions_.row_cache && !get_context->NeedToReadSequence()) { if (ioptions_.row_cache && !get_context->NeedToReadSequence()) {
@ -324,21 +323,23 @@ Status TableCache::Get(const ReadOptions& options,
row_cache_key.TrimAppend(row_cache_key.Size(), user_key.data(), row_cache_key.TrimAppend(row_cache_key.Size(), user_key.data(),
user_key.size()); user_key.size());
if (auto row_handle = ioptions_.row_cache->Lookup(row_cache_key.GetKey())) { if (auto row_handle =
ioptions_.row_cache->Lookup(row_cache_key.GetKey())) {
auto found_row_cache_entry = static_cast<const std::string*>( auto found_row_cache_entry = static_cast<const std::string*>(
ioptions_.row_cache->Value(row_handle)); ioptions_.row_cache->Value(row_handle));
replayGetContextLog(*found_row_cache_entry, user_key, get_context); replayGetContextLog(*found_row_cache_entry, user_key, get_context);
ioptions_.row_cache->Release(row_handle); ioptions_.row_cache->Release(row_handle);
RecordTick(ioptions_.statistics, ROW_CACHE_HIT); RecordTick(ioptions_.statistics, ROW_CACHE_HIT);
return Status::OK(); done = true;
} } else {
// Not found, setting up the replay log. // Not found, setting up the replay log.
RecordTick(ioptions_.statistics, ROW_CACHE_MISS); RecordTick(ioptions_.statistics, ROW_CACHE_MISS);
row_cache_entry = &row_cache_entry_buffer; row_cache_entry = &row_cache_entry_buffer;
} }
}
}
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
if (!done && s.ok()) {
if (!t) { if (!t) {
s = FindTable(env_options_, internal_comparator, fd, &handle, s = FindTable(env_options_, internal_comparator, fd, &handle,
options.read_tier == kBlockCacheTier /* no_io */, options.read_tier == kBlockCacheTier /* no_io */,
@ -352,18 +353,16 @@ Status TableCache::Get(const ReadOptions& options,
get_context->SetReplayLog(row_cache_entry); // nullptr if no cache. get_context->SetReplayLog(row_cache_entry); // nullptr if no cache.
s = t->Get(options, k, get_context, skip_filters); s = t->Get(options, k, get_context, skip_filters);
get_context->SetReplayLog(nullptr); get_context->SetReplayLog(nullptr);
if (handle != nullptr) {
ReleaseHandle(handle);
}
} else if (options.read_tier == kBlockCacheTier && s.IsIncomplete()) { } else if (options.read_tier == kBlockCacheTier && s.IsIncomplete()) {
// Couldn't find Table in cache but treat as kFound if no_io set // Couldn't find Table in cache but treat as kFound if no_io set
get_context->MarkKeyMayExist(); get_context->MarkKeyMayExist();
return Status::OK(); s = Status::OK();
done = true;
}
} }
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
// Put the replay log in row cache only if something was found. // Put the replay log in row cache only if something was found.
if (s.ok() && row_cache_entry && !row_cache_entry->empty()) { if (!done && s.ok() && row_cache_entry && !row_cache_entry->empty()) {
size_t charge = size_t charge =
row_cache_key.Size() + row_cache_entry->size() + sizeof(std::string); row_cache_key.Size() + row_cache_entry->size() + sizeof(std::string);
void* row_ptr = new std::string(std::move(*row_cache_entry)); void* row_ptr = new std::string(std::move(*row_cache_entry));
@ -372,6 +371,9 @@ Status TableCache::Get(const ReadOptions& options,
} }
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
if (handle != nullptr) {
ReleaseHandle(handle);
}
return s; return s;
} }

Loading…
Cancel
Save