Fix FilterBench when RTTI=0 (#6732)

Summary:
The dynamic_cast in the filter benchmark causes release mode to fail due to
no-rtti.  Replace with static_cast_with_check.

Signed-off-by: Derrick Pallas <derrick@pallas.us>

Addition by peterd: Remove unnecessary 2nd template arg on all static_cast_with_check
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6732

Reviewed By: ltamasi

Differential Revision: D21304260

Pulled By: pdillinger

fbshipit-source-id: 6e8eb437c4ca5a16dbbfa4053d67c4ad55f1608c
main
Derrick Pallas 5 years ago committed by Facebook GitHub Bot
parent 8086e5e294
commit 5272305437
  1. 4
      db/convenience.cc
  2. 4
      db/db_impl/db_impl.cc
  3. 5
      db/db_impl/db_impl_compaction_flush.cc
  4. 3
      db/table_cache.cc
  5. 6
      db/wal_manager.cc
  6. 2
      db/write_batch.cc
  7. 3
      env/mock_env.cc
  8. 3
      monitoring/histogram.cc
  9. 4
      monitoring/histogram_windowing.cc
  10. 4
      options/cf_options.cc
  11. 7
      tools/db_bench_tool.cc
  12. 6
      tools/ldb_cmd.cc
  13. 5
      util/filter_bench.cc
  14. 2
      utilities/blob_db/blob_db_impl.cc
  15. 6
      utilities/transactions/optimistic_transaction.cc
  16. 5
      utilities/transactions/pessimistic_transaction.cc
  17. 9
      utilities/transactions/pessimistic_transaction_db.cc
  18. 3
      utilities/transactions/pessimistic_transaction_db.h
  19. 2
      utilities/transactions/transaction_base.cc
  20. 3
      utilities/transactions/transaction_lock_mgr.cc
  21. 3
      utilities/transactions/write_prepared_txn.cc
  22. 17
      utilities/transactions/write_prepared_txn_db.cc
  23. 7
      utilities/transactions/write_prepared_txn_db.h
  24. 6
      utilities/transactions/write_unprepared_txn.cc
  25. 8
      utilities/transactions/write_unprepared_txn_db.cc
  26. 12
      utilities/write_batch_with_index/write_batch_with_index.cc

@ -14,7 +14,7 @@
namespace ROCKSDB_NAMESPACE {
void CancelAllBackgroundWork(DB* db, bool wait) {
(static_cast_with_check<DBImpl, DB>(db->GetRootDB()))
(static_cast_with_check<DBImpl>(db->GetRootDB()))
->CancelAllBackgroundWork(wait);
}
@ -28,7 +28,7 @@ Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family,
Status DeleteFilesInRanges(DB* db, ColumnFamilyHandle* column_family,
const RangePtr* ranges, size_t n,
bool include_end) {
return (static_cast_with_check<DBImpl, DB>(db->GetRootDB()))
return (static_cast_with_check<DBImpl>(db->GetRootDB()))
->DeleteFilesInRanges(column_family, ranges, n, include_end);
}

@ -870,9 +870,7 @@ void DBImpl::DumpStats() {
Status DBImpl::TablesRangeTombstoneSummary(ColumnFamilyHandle* column_family,
int max_entries_to_print,
std::string* out_str) {
auto* cfh =
static_cast_with_check<ColumnFamilyHandleImpl, ColumnFamilyHandle>(
column_family);
auto* cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
ColumnFamilyData* cfd = cfh->cfd();
SuperVersion* super_version = cfd->GetReferencedSuperVersion(this);

@ -2143,8 +2143,7 @@ void DBImpl::BGWorkFlush(void* arg) {
IOSTATS_SET_THREAD_POOL_ID(fta.thread_pri_);
TEST_SYNC_POINT("DBImpl::BGWorkFlush");
static_cast_with_check<DBImpl, DB>(fta.db_)->BackgroundCallFlush(
fta.thread_pri_);
static_cast_with_check<DBImpl>(fta.db_)->BackgroundCallFlush(fta.thread_pri_);
TEST_SYNC_POINT("DBImpl::BGWorkFlush:done");
}
@ -2155,7 +2154,7 @@ void DBImpl::BGWorkCompaction(void* arg) {
TEST_SYNC_POINT("DBImpl::BGWorkCompaction");
auto prepicked_compaction =
static_cast<PrepickedCompaction*>(ca.prepicked_compaction);
static_cast_with_check<DBImpl, DB>(ca.db)->BackgroundCallCompaction(
static_cast_with_check<DBImpl>(ca.db)->BackgroundCallCompaction(
prepicked_compaction, Env::Priority::LOW);
delete prepicked_compaction;
}

@ -314,8 +314,7 @@ void TableCache::CreateRowCacheKeyPrefix(const ReadOptions& options,
// Maybe we can include the whole file ifsnapshot == fd.largest_seqno.
if (options.snapshot != nullptr &&
(get_context->has_callback() ||
static_cast_with_check<const SnapshotImpl, const Snapshot>(
options.snapshot)
static_cast_with_check<const SnapshotImpl>(options.snapshot)
->GetSequenceNumber() <= fd.largest_seqno)) {
// We should consider to use options.snapshot->GetSequenceNumber()
// instead of GetInternalKeySeqno(k), which will make the code

@ -334,10 +334,8 @@ Status WalManager::GetSortedWalsOfType(const std::string& path,
std::sort(
log_files.begin(), log_files.end(),
[](const std::unique_ptr<LogFile>& a, const std::unique_ptr<LogFile>& b) {
LogFileImpl* a_impl =
static_cast_with_check<LogFileImpl, LogFile>(a.get());
LogFileImpl* b_impl =
static_cast_with_check<LogFileImpl, LogFile>(b.get());
LogFileImpl* a_impl = static_cast_with_check<LogFileImpl>(a.get());
LogFileImpl* b_impl = static_cast_with_check<LogFileImpl>(b.get());
return *a_impl < *b_impl;
});
return status;

@ -1281,7 +1281,7 @@ class MemTableInserter : public WriteBatch::Handler {
ignore_missing_column_families_(ignore_missing_column_families),
recovering_log_number_(recovering_log_number),
log_number_ref_(0),
db_(static_cast_with_check<DBImpl, DB>(db)),
db_(static_cast_with_check<DBImpl>(db)),
concurrent_memtable_writes_(concurrent_memtable_writes),
post_info_created_(false),
has_valid_writes_(has_valid_writes),

3
env/mock_env.cc vendored

@ -701,8 +701,7 @@ Status MockEnv::LockFile(const std::string& fname, FileLock** flock) {
}
Status MockEnv::UnlockFile(FileLock* flock) {
std::string fn =
static_cast_with_check<MockEnvFileLock, FileLock>(flock)->FileName();
std::string fn = static_cast_with_check<MockEnvFileLock>(flock)->FileName();
{
MutexLock lock(&mutex_);
if (file_map_.find(fn) != file_map_.end()) {

@ -251,8 +251,7 @@ void HistogramImpl::Add(uint64_t value) {
void HistogramImpl::Merge(const Histogram& other) {
if (strcmp(Name(), other.Name()) == 0) {
Merge(
*static_cast_with_check<const HistogramImpl, const Histogram>(&other));
Merge(*static_cast_with_check<const HistogramImpl>(&other));
}
}

@ -65,9 +65,7 @@ void HistogramWindowingImpl::Add(uint64_t value){
void HistogramWindowingImpl::Merge(const Histogram& other) {
if (strcmp(Name(), other.Name()) == 0) {
Merge(
*static_cast_with_check<const HistogramWindowingImpl, const Histogram>(
&other));
Merge(*static_cast_with_check<const HistogramWindowingImpl>(&other));
}
}

@ -413,7 +413,7 @@ std::unordered_map<std::string, OptionTypeInfo>
reinterpret_cast<std::shared_ptr<TableFactory>*>(addr);
BlockBasedTableOptions table_opts, base_opts;
BlockBasedTableFactory* block_based_table_factory =
static_cast_with_check<BlockBasedTableFactory, TableFactory>(
static_cast_with_check<BlockBasedTableFactory>(
old_table_factory->get());
if (block_based_table_factory != nullptr) {
base_opts = block_based_table_factory->table_options();
@ -437,7 +437,7 @@ std::unordered_map<std::string, OptionTypeInfo>
reinterpret_cast<std::shared_ptr<TableFactory>*>(addr);
PlainTableOptions table_opts, base_opts;
PlainTableFactory* plain_table_factory =
static_cast_with_check<PlainTableFactory, TableFactory>(
static_cast_with_check<PlainTableFactory>(
old_table_factory->get());
if (plain_table_factory != nullptr) {
base_opts = plain_table_factory->table_options();

@ -3287,10 +3287,9 @@ class Benchmark {
fprintf(stdout, "STATISTICS:\n%s\n", dbstats->ToString().c_str());
}
if (FLAGS_simcache_size >= 0) {
fprintf(stdout, "SIMULATOR CACHE STATISTICS:\n%s\n",
static_cast_with_check<SimCache, Cache>(cache_.get())
->ToString()
.c_str());
fprintf(
stdout, "SIMULATOR CACHE STATISTICS:\n%s\n",
static_cast_with_check<SimCache>(cache_.get())->ToString().c_str());
}
#ifndef ROCKSDB_LITE

@ -1748,7 +1748,7 @@ void DBDumperCommand::DoDumpCommand() {
if (max_keys == 0)
break;
if (is_db_ttl_) {
TtlIterator* it_ttl = static_cast_with_check<TtlIterator, Iterator>(iter);
TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(iter);
rawtime = it_ttl->ttl_timestamp();
if (rawtime < ttl_start || rawtime >= ttl_end) {
continue;
@ -2575,7 +2575,7 @@ void ScanCommand::DoCommand() {
it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_);
it->Next()) {
if (is_db_ttl_) {
TtlIterator* it_ttl = static_cast_with_check<TtlIterator, Iterator>(it);
TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(it);
int rawtime = it_ttl->ttl_timestamp();
if (rawtime < ttl_start || rawtime >= ttl_end) {
continue;
@ -3416,7 +3416,7 @@ void ListFileRangeDeletesCommand::DoCommand() {
return;
}
DBImpl* db_impl = static_cast_with_check<DBImpl, DB>(db_->GetRootDB());
DBImpl* db_impl = static_cast_with_check<DBImpl>(db_->GetRootDB());
std::string out_str;

@ -23,6 +23,7 @@ int main() {
#include "table/block_based/full_filter_block.h"
#include "table/block_based/mock_block_based_table.h"
#include "table/plain/plain_table_bloom.h"
#include "util/cast_util.h"
#include "util/gflags_compat.h"
#include "util/hash.h"
#include "util/random.h"
@ -131,6 +132,7 @@ using ROCKSDB_NAMESPACE::ParsedFullFilterBlock;
using ROCKSDB_NAMESPACE::PlainTableBloomV1;
using ROCKSDB_NAMESPACE::Random32;
using ROCKSDB_NAMESPACE::Slice;
using ROCKSDB_NAMESPACE::static_cast_with_check;
using ROCKSDB_NAMESPACE::StderrLogger;
using ROCKSDB_NAMESPACE::mock::MockBlockBasedTableTester;
@ -378,7 +380,8 @@ void FilterBench::Go() {
info.filter_ = info.plain_table_bloom_->GetRawData();
} else {
if (!builder) {
builder.reset(&dynamic_cast<BuiltinFilterBitsBuilder &>(*GetBuilder()));
builder.reset(
static_cast_with_check<BuiltinFilterBitsBuilder>(GetBuilder()));
}
for (uint32_t i = 0; i < keys_to_add; ++i) {
builder->AddKey(kms_[0].Get(filter_id, i));

@ -209,7 +209,7 @@ Status BlobDBImpl::Open(std::vector<ColumnFamilyHandle*>* handles) {
if (!s.ok()) {
return s;
}
db_impl_ = static_cast_with_check<DBImpl, DB>(db_->GetRootDB());
db_impl_ = static_cast_with_check<DBImpl>(db_->GetRootDB());
// Initialize SST file <-> oldest blob file mapping if garbage collection
// is enabled.

@ -76,7 +76,7 @@ Status OptimisticTransaction::CommitWithSerialValidate() {
// check whether this transaction is safe to be committed.
OptimisticTransactionCallback callback(this);
DBImpl* db_impl = static_cast_with_check<DBImpl, DB>(db_->GetRootDB());
DBImpl* db_impl = static_cast_with_check<DBImpl>(db_->GetRootDB());
Status s = db_impl->WriteWithCallback(
write_options_, GetWriteBatch()->GetWriteBatch(), &callback);
@ -92,7 +92,7 @@ Status OptimisticTransaction::CommitWithParallelValidate() {
auto txn_db_impl = static_cast_with_check<OptimisticTransactionDBImpl,
OptimisticTransactionDB>(txn_db_);
assert(txn_db_impl);
DBImpl* db_impl = static_cast_with_check<DBImpl, DB>(db_->GetRootDB());
DBImpl* db_impl = static_cast_with_check<DBImpl>(db_->GetRootDB());
assert(db_impl);
const size_t space = txn_db_impl->GetLockBucketsSize();
std::set<size_t> lk_idxes;
@ -168,7 +168,7 @@ Status OptimisticTransaction::TryLock(ColumnFamilyHandle* column_family,
Status OptimisticTransaction::CheckTransactionForConflicts(DB* db) {
Status result;
auto db_impl = static_cast_with_check<DBImpl, DB>(db);
auto db_impl = static_cast_with_check<DBImpl>(db);
// Since we are on the write thread and do not want to block other writers,
// we will do a cache-only conflict check. This can result in TryAgain

@ -48,9 +48,8 @@ PessimisticTransaction::PessimisticTransaction(
deadlock_detect_(false),
deadlock_detect_depth_(0),
skip_concurrency_control_(false) {
txn_db_impl_ =
static_cast_with_check<PessimisticTransactionDB, TransactionDB>(txn_db);
db_impl_ = static_cast_with_check<DBImpl, DB>(db_);
txn_db_impl_ = static_cast_with_check<PessimisticTransactionDB>(txn_db);
db_impl_ = static_cast_with_check<DBImpl>(db_);
if (init) {
Initialize(txn_options);
}

@ -29,7 +29,7 @@ namespace ROCKSDB_NAMESPACE {
PessimisticTransactionDB::PessimisticTransactionDB(
DB* db, const TransactionDBOptions& txn_db_options)
: TransactionDB(db),
db_impl_(static_cast_with_check<DBImpl, DB>(db)),
db_impl_(static_cast_with_check<DBImpl>(db)),
txn_db_options_(txn_db_options),
lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks,
txn_db_options_.max_num_deadlocks,
@ -60,7 +60,7 @@ PessimisticTransactionDB::PessimisticTransactionDB(
PessimisticTransactionDB::PessimisticTransactionDB(
StackableDB* db, const TransactionDBOptions& txn_db_options)
: TransactionDB(db),
db_impl_(static_cast_with_check<DBImpl, DB>(db->GetRootDB())),
db_impl_(static_cast_with_check<DBImpl>(db->GetRootDB())),
txn_db_options_(txn_db_options),
lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks,
txn_db_options_.max_num_deadlocks,
@ -113,7 +113,7 @@ Status PessimisticTransactionDB::Initialize(
Status s = EnableAutoCompaction(compaction_enabled_cf_handles);
// create 'real' transactions from recovered shell transactions
auto dbimpl = static_cast_with_check<DBImpl, DB>(GetRootDB());
auto dbimpl = static_cast_with_check<DBImpl>(GetRootDB());
assert(dbimpl != nullptr);
auto rtrxs = dbimpl->recovered_transactions();
@ -569,8 +569,7 @@ bool PessimisticTransactionDB::TryStealingExpiredTransactionLocks(
void PessimisticTransactionDB::ReinitializeTransaction(
Transaction* txn, const WriteOptions& write_options,
const TransactionOptions& txn_options) {
auto txn_impl =
static_cast_with_check<PessimisticTransaction, Transaction>(txn);
auto txn_impl = static_cast_with_check<PessimisticTransaction>(txn);
txn_impl->Reinitialize(this, write_options, txn_options);
}

@ -75,8 +75,7 @@ class PessimisticTransactionDB : public TransactionDB {
Transaction* txn = BeginInternalTransaction(opts);
txn->DisableIndexing();
auto txn_impl =
static_cast_with_check<PessimisticTransaction, Transaction>(txn);
auto txn_impl = static_cast_with_check<PessimisticTransaction>(txn);
// Since commitBatch sorts the keys before locking, concurrent Write()
// operations will not cause a deadlock.

@ -22,7 +22,7 @@ namespace ROCKSDB_NAMESPACE {
TransactionBaseImpl::TransactionBaseImpl(DB* db,
const WriteOptions& write_options)
: db_(db),
dbimpl_(static_cast_with_check<DBImpl, DB>(db)),
dbimpl_(static_cast_with_check<DBImpl>(db)),
write_options_(write_options),
cmp_(GetColumnFamilyUserComparator(db->DefaultColumnFamily())),
start_time_(db_->GetEnv()->NowMicros()),

@ -166,8 +166,7 @@ TransactionLockMgr::TransactionLockMgr(
dlock_buffer_(max_num_deadlocks),
mutex_factory_(mutex_factory) {
assert(txn_db);
txn_db_impl_ =
static_cast_with_check<PessimisticTransactionDB, TransactionDB>(txn_db);
txn_db_impl_ = static_cast_with_check<PessimisticTransactionDB>(txn_db);
}
TransactionLockMgr::~TransactionLockMgr() {}

@ -430,8 +430,7 @@ Status WritePreparedTxn::ValidateSnapshot(ColumnFamilyHandle* column_family,
assert(snapshot_);
SequenceNumber min_uncommitted =
static_cast_with_check<const SnapshotImpl, const Snapshot>(
snapshot_.get())
static_cast_with_check<const SnapshotImpl>(snapshot_.get())
->min_uncommitted_;
SequenceNumber snap_seq = snapshot_->GetSequenceNumber();
// tracked_at_seq is either max or the last snapshot with which this key was

@ -30,7 +30,7 @@ namespace ROCKSDB_NAMESPACE {
Status WritePreparedTxnDB::Initialize(
const std::vector<size_t>& compaction_enabled_cf_indices,
const std::vector<ColumnFamilyHandle*>& handles) {
auto dbimpl = static_cast_with_check<DBImpl, DB>(GetRootDB());
auto dbimpl = static_cast_with_check<DBImpl>(GetRootDB());
assert(dbimpl != nullptr);
auto rtxns = dbimpl->recovered_transactions();
std::map<SequenceNumber, SequenceNumber> ordered_seq_cnt;
@ -328,8 +328,7 @@ Iterator* WritePreparedTxnDB::NewIterator(const ReadOptions& options,
if (options.snapshot != nullptr) {
snapshot_seq = options.snapshot->GetSequenceNumber();
min_uncommitted =
static_cast_with_check<const SnapshotImpl, const Snapshot>(
options.snapshot)
static_cast_with_check<const SnapshotImpl>(options.snapshot)
->min_uncommitted_;
} else {
auto* snapshot = GetSnapshot();
@ -337,8 +336,7 @@ Iterator* WritePreparedTxnDB::NewIterator(const ReadOptions& options,
// are not deleted.
snapshot_seq = snapshot->GetSequenceNumber();
min_uncommitted =
static_cast_with_check<const SnapshotImpl, const Snapshot>(snapshot)
->min_uncommitted_;
static_cast_with_check<const SnapshotImpl>(snapshot)->min_uncommitted_;
own_snapshot = std::make_shared<ManagedSnapshot>(db_impl_, snapshot);
}
assert(snapshot_seq != kMaxSequenceNumber);
@ -363,9 +361,9 @@ Status WritePreparedTxnDB::NewIterators(
SequenceNumber min_uncommitted = 0;
if (options.snapshot != nullptr) {
snapshot_seq = options.snapshot->GetSequenceNumber();
min_uncommitted = static_cast_with_check<const SnapshotImpl, const Snapshot>(
options.snapshot)
->min_uncommitted_;
min_uncommitted =
static_cast_with_check<const SnapshotImpl>(options.snapshot)
->min_uncommitted_;
} else {
auto* snapshot = GetSnapshot();
// We take a snapshot to make sure that the related data in the commit map
@ -373,8 +371,7 @@ Status WritePreparedTxnDB::NewIterators(
snapshot_seq = snapshot->GetSequenceNumber();
own_snapshot = std::make_shared<ManagedSnapshot>(db_impl_, snapshot);
min_uncommitted =
static_cast_with_check<const SnapshotImpl, const Snapshot>(snapshot)
->min_uncommitted_;
static_cast_with_check<const SnapshotImpl>(snapshot)->min_uncommitted_;
}
iterators->clear();
iterators->reserve(column_families.size());

@ -1078,10 +1078,9 @@ SnapshotBackup WritePreparedTxnDB::AssignMinMaxSeqs(const Snapshot* snapshot,
SequenceNumber* min,
SequenceNumber* max) {
if (snapshot != nullptr) {
*min = static_cast_with_check<const SnapshotImpl, const Snapshot>(snapshot)
->min_uncommitted_;
*max = static_cast_with_check<const SnapshotImpl, const Snapshot>(snapshot)
->number_;
*min =
static_cast_with_check<const SnapshotImpl>(snapshot)->min_uncommitted_;
*max = static_cast_with_check<const SnapshotImpl>(snapshot)->number_;
return kBackedByDBSnapshot;
} else {
*min = SmallestUnCommittedSeq();

@ -847,8 +847,7 @@ Status WriteUnpreparedTxn::RollbackToSavePointInternal() {
ReadOptions roptions;
roptions.snapshot = top.snapshot_->snapshot();
SequenceNumber min_uncommitted =
static_cast_with_check<const SnapshotImpl, const Snapshot>(
roptions.snapshot)
static_cast_with_check<const SnapshotImpl>(roptions.snapshot)
->min_uncommitted_;
SequenceNumber snap_seq = roptions.snapshot->GetSequenceNumber();
WriteUnpreparedTxnReadCallback callback(wupt_db_, snap_seq, min_uncommitted,
@ -976,8 +975,7 @@ Status WriteUnpreparedTxn::ValidateSnapshot(ColumnFamilyHandle* column_family,
assert(snapshot_);
SequenceNumber min_uncommitted =
static_cast_with_check<const SnapshotImpl, const Snapshot>(
snapshot_.get())
static_cast_with_check<const SnapshotImpl>(snapshot_.get())
->min_uncommitted_;
SequenceNumber snap_seq = snapshot_->GetSequenceNumber();
// tracked_at_seq is either max or the last snapshot with which this key was

@ -193,7 +193,7 @@ Status WriteUnpreparedTxnDB::Initialize(
const std::vector<size_t>& compaction_enabled_cf_indices,
const std::vector<ColumnFamilyHandle*>& handles) {
// TODO(lth): Reduce code duplication in this function.
auto dbimpl = static_cast_with_check<DBImpl, DB>(GetRootDB());
auto dbimpl = static_cast_with_check<DBImpl>(GetRootDB());
assert(dbimpl != nullptr);
db_impl_->SetSnapshotChecker(new WritePreparedSnapshotChecker(this));
@ -268,8 +268,7 @@ Status WriteUnpreparedTxnDB::Initialize(
Transaction* real_trx = BeginTransaction(w_options, t_options, nullptr);
assert(real_trx);
auto wupt =
static_cast_with_check<WriteUnpreparedTxn, Transaction>(real_trx);
auto wupt = static_cast_with_check<WriteUnpreparedTxn>(real_trx);
wupt->recovered_txn_ = true;
real_trx->SetLogNumber(first_log_number);
@ -451,8 +450,7 @@ Iterator* WriteUnpreparedTxnDB::NewIterator(const ReadOptions& options,
return nullptr;
}
min_uncommitted =
static_cast_with_check<const SnapshotImpl, const Snapshot>(snapshot)
->min_uncommitted_;
static_cast_with_check<const SnapshotImpl>(snapshot)->min_uncommitted_;
auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
auto* state =

@ -846,8 +846,7 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(
Status s;
MergeContext merge_context;
const ImmutableDBOptions& immuable_db_options =
static_cast_with_check<DBImpl, DB>(db->GetRootDB())
->immutable_db_options();
static_cast_with_check<DBImpl>(db->GetRootDB())->immutable_db_options();
// Since the lifetime of the WriteBatch is the same as that of the transaction
// we cannot pin it as otherwise the returned value will not be available
@ -887,7 +886,7 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(
get_impl_options.column_family = column_family;
get_impl_options.value = pinnable_val;
get_impl_options.callback = callback;
s = static_cast_with_check<DBImpl, DB>(db->GetRootDB())
s = static_cast_with_check<DBImpl>(db->GetRootDB())
->GetImpl(read_options, key, get_impl_options);
}
@ -938,8 +937,7 @@ void WriteBatchWithIndex::MultiGetFromBatchAndDB(
const size_t num_keys, const Slice* keys, PinnableSlice* values,
Status* statuses, bool sorted_input, ReadCallback* callback) {
const ImmutableDBOptions& immuable_db_options =
static_cast_with_check<DBImpl, DB>(db->GetRootDB())
->immutable_db_options();
static_cast_with_check<DBImpl>(db->GetRootDB())->immutable_db_options();
autovector<KeyContext, MultiGetContext::MAX_BATCH_SIZE> key_context;
autovector<KeyContext*, MultiGetContext::MAX_BATCH_SIZE> sorted_keys;
@ -992,9 +990,9 @@ void WriteBatchWithIndex::MultiGetFromBatchAndDB(
}
// Did not find key in batch OR could not resolve Merges. Try DB.
static_cast_with_check<DBImpl, DB>(db->GetRootDB())
static_cast_with_check<DBImpl>(db->GetRootDB())
->PrepareMultiGetKeys(key_context.size(), sorted_input, &sorted_keys);
static_cast_with_check<DBImpl, DB>(db->GetRootDB())
static_cast_with_check<DBImpl>(db->GetRootDB())
->MultiGetWithCallback(read_options, column_family, callback,
&sorted_keys);

Loading…
Cancel
Save