Revert "comment out unused parameters"

Summary:
This reverts the previous commit 1d7048c598, which broke the build.

Did a `git revert 1d7048c`.
Closes https://github.com/facebook/rocksdb/pull/2627

Differential Revision: D5476473

Pulled By: sagar0

fbshipit-source-id: 4756ff5c0dfc88c17eceb00e02c36176de728d06
main
Sagar Vemuri 7 years ago committed by Facebook Github Bot
parent 1d7048c598
commit 72502cf227
  1. 6
      cache/cache_test.cc
  2. 2
      cache/clock_cache.cc
  3. 2
      cache/sharded_cache.cc
  4. 8
      db/builder.cc
  5. 16
      db/c.cc
  6. 51
      db/column_family_test.cc
  7. 9
      db/compact_files_test.cc
  8. 45
      db/compacted_db_impl.h
  9. 2
      db/compaction_iterator.cc
  10. 2
      db/compaction_iterator.h
  11. 29
      db/compaction_iterator_test.cc
  12. 10
      db/compaction_job_stats_test.cc
  13. 9
      db/compaction_picker.cc
  14. 26
      db/compaction_picker.h
  15. 4
      db/compaction_picker_test.cc
  16. 18
      db/comparator_db_test.cc
  17. 2
      db/db_block_cache_test.cc
  18. 4
      db/db_bloom_filter_test.cc
  19. 48
      db/db_compaction_filter_test.cc
  20. 46
      db/db_compaction_test.cc
  21. 4
      db/db_dynamic_level_test.cc
  22. 4
      db/db_flush_test.cc
  23. 27
      db/db_impl.cc
  24. 2
      db/db_impl_compaction_flush.cc
  25. 2
      db/db_impl_readonly.cc
  26. 59
      db/db_impl_readonly.h
  27. 2
      db/db_impl_write.cc
  28. 6
      db/db_iter_test.cc
  29. 6
      db/db_iterator_test.cc
  30. 2
      db/db_memtable_test.cc
  31. 12
      db/db_properties_test.cc
  32. 13
      db/db_sst_test.cc
  33. 6
      db/db_tailing_iter_test.cc
  34. 163
      db/db_test.cc
  35. 23
      db/db_test2.cc
  36. 11
      db/db_test_util.cc
  37. 2
      db/db_test_util.h
  38. 50
      db/db_universal_compaction_test.cc
  39. 2
      db/deletefile_test.cc
  40. 15
      db/external_sst_file_test.cc
  41. 4
      db/fault_injection_test.cc
  42. 6
      db/file_indexer_test.cc
  43. 2
      db/forward_iterator.cc
  44. 2
      db/forward_iterator.h
  45. 117
      db/internal_stats.cc
  46. 17
      db/listener_test.cc
  47. 2
      db/malloc_stats.cc
  48. 6
      db/manual_compaction_test.cc
  49. 4
      db/memtable_list.cc
  50. 2
      db/merge_test.cc
  51. 2
      db/plain_table_db_test.cc
  52. 6
      db/prefix_test.cc
  53. 4
      db/table_cache.cc
  54. 4
      db/table_properties_collector.cc
  55. 2
      db/table_properties_collector.h
  56. 19
      db/table_properties_collector_test.cc
  57. 2
      db/version_builder.cc
  58. 2
      db/version_edit.cc
  59. 6
      db/version_set.cc
  60. 2
      db/version_set.h
  61. 4
      db/version_set_test.cc
  62. 2
      db/wal_manager_test.cc
  63. 6
      db/write_batch.cc
  64. 32
      db/write_batch_test.cc
  65. 6
      db/write_callback_test.cc
  66. 3
      db/write_thread.cc
  67. 14
      env/env_encryption.cc
  68. 10
      env/env_hdfs.cc
  69. 21
      env/env_test.cc
  70. 4
      env/io_posix.cc
  71. 2
      env/io_posix.h
  72. 14
      env/mock_env.cc
  73. 101
      hdfs/env_hdfs.h
  74. 3
      include/rocksdb/cache.h
  75. 12
      include/rocksdb/compaction_filter.h
  76. 2
      include/rocksdb/db.h
  77. 51
      include/rocksdb/env.h
  78. 5
      include/rocksdb/filter_policy.h
  79. 2
      include/rocksdb/iterator.h
  80. 4
      include/rocksdb/listener.h
  81. 8
      include/rocksdb/memtablerep.h
  82. 15
      include/rocksdb/merge_operator.h
  83. 2
      include/rocksdb/rate_limiter.h
  84. 2
      include/rocksdb/slice.h
  85. 4
      include/rocksdb/slice_transform.h
  86. 2
      include/rocksdb/statistics.h
  87. 2
      include/rocksdb/utilities/geo_db.h
  88. 2
      include/rocksdb/utilities/optimistic_transaction_db.h
  89. 6
      include/rocksdb/utilities/transaction.h
  90. 18
      include/rocksdb/wal_filter.h
  91. 11
      include/rocksdb/write_batch.h
  92. 6
      memtable/hash_cuckoo_rep.cc
  93. 14
      memtable/hash_linklist_rep.cc
  94. 14
      memtable/hash_skiplist_rep.cc
  95. 2
      memtable/skiplistrep.cc
  96. 6
      memtable/vectorrep.cc
  97. 2
      options/options_helper.cc
  98. 2
      options/options_parser.cc
  99. 2
      port/port_posix.cc
  100. 2
      port/stack_trace.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -40,9 +40,9 @@ static int DecodeValue(void* v) {
const std::string kLRU = "lru"; const std::string kLRU = "lru";
const std::string kClock = "clock"; const std::string kClock = "clock";
void dumbDeleter(const Slice& /*key*/, void* /*value*/) {} void dumbDeleter(const Slice& key, void* value) {}
void eraseDeleter(const Slice& /*key*/, void* value) { void eraseDeleter(const Slice& key, void* value) {
Cache* cache = reinterpret_cast<Cache*>(value); Cache* cache = reinterpret_cast<Cache*>(value);
cache->Erase("foo"); cache->Erase("foo");
} }
@ -470,7 +470,7 @@ class Value {
}; };
namespace { namespace {
void deleter(const Slice& /*key*/, void* value) { void deleter(const Slice& key, void* value) {
delete static_cast<Value *>(value); delete static_cast<Value *>(value);
} }
} // namespace } // namespace

@ -581,7 +581,7 @@ Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge, size_t charge,
void (*deleter)(const Slice& key, void* value), void (*deleter)(const Slice& key, void* value),
Cache::Handle** out_handle, Cache::Handle** out_handle,
Cache::Priority /*priority*/) { Cache::Priority priority) {
CleanupContext context; CleanupContext context;
HashTable::accessor accessor; HashTable::accessor accessor;
char* key_data = new char[key.size()]; char* key_data = new char[key.size()];

@ -53,7 +53,7 @@ Status ShardedCache::Insert(const Slice& key, void* value, size_t charge,
->Insert(key, hash, value, charge, deleter, handle, priority); ->Insert(key, hash, value, charge, deleter, handle, priority);
} }
Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* /*stats*/) { Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* stats) {
uint32_t hash = HashSlice(key); uint32_t hash = HashSlice(key);
return GetShard(Shard(hash))->Lookup(key, hash); return GetShard(Shard(hash))->Lookup(key, hash);
} }

@ -61,10 +61,10 @@ TableBuilder* NewTableBuilder(
Status BuildTable( Status BuildTable(
const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions, const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions,
const MutableCFOptions& /*mutable_cf_options*/, const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options,
const EnvOptions& env_options, TableCache* table_cache, TableCache* table_cache, InternalIterator* iter,
InternalIterator* iter, std::unique_ptr<InternalIterator> range_del_iter, std::unique_ptr<InternalIterator> range_del_iter, FileMetaData* meta,
FileMetaData* meta, const InternalKeyComparator& internal_comparator, const InternalKeyComparator& internal_comparator,
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>* const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
int_tbl_prop_collector_factories, int_tbl_prop_collector_factories,
uint32_t column_family_id, const std::string& column_family_name, uint32_t column_family_id, const std::string& column_family_name,

@ -240,7 +240,7 @@ struct rocksdb_comparator_t : public Comparator {
// No-ops since the C binding does not support key shortening methods. // No-ops since the C binding does not support key shortening methods.
virtual void FindShortestSeparator(std::string*, virtual void FindShortestSeparator(std::string*,
const Slice&) const override {} const Slice&) const override {}
virtual void FindShortSuccessor(std::string* /*key*/) const override {} virtual void FindShortSuccessor(std::string* key) const override {}
}; };
struct rocksdb_filterpolicy_t : public FilterPolicy { struct rocksdb_filterpolicy_t : public FilterPolicy {
@ -355,7 +355,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
virtual bool PartialMergeMulti(const Slice& key, virtual bool PartialMergeMulti(const Slice& key,
const std::deque<Slice>& operand_list, const std::deque<Slice>& operand_list,
std::string* new_value, std::string* new_value,
Logger* /*logger*/) const override { Logger* logger) const override {
size_t operand_count = operand_list.size(); size_t operand_count = operand_list.size();
std::vector<const char*> operand_pointers(operand_count); std::vector<const char*> operand_pointers(operand_count);
std::vector<size_t> operand_sizes(operand_count); std::vector<size_t> operand_sizes(operand_count);
@ -2106,8 +2106,8 @@ void rocksdb_options_set_level0_stop_writes_trigger(
opt->rep.level0_stop_writes_trigger = n; opt->rep.level0_stop_writes_trigger = n;
} }
void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* /*opt*/, void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* opt,
int /*n*/) {} int n) {}
void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt,int mode) { void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt,int mode) {
opt->rep.wal_recovery_mode = static_cast<WALRecoveryMode>(mode); opt->rep.wal_recovery_mode = static_cast<WALRecoveryMode>(mode);
@ -2171,8 +2171,8 @@ void rocksdb_options_set_manifest_preallocation_size(
} }
// noop // noop
void rocksdb_options_set_purge_redundant_kvs_while_flush( void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t* opt,
rocksdb_options_t* /*opt*/, unsigned char /*v*/) {} unsigned char v) {}
void rocksdb_options_set_use_direct_reads(rocksdb_options_t* opt, void rocksdb_options_set_use_direct_reads(rocksdb_options_t* opt,
unsigned char v) { unsigned char v) {
@ -2332,7 +2332,7 @@ void rocksdb_options_set_table_cache_numshardbits(
} }
void rocksdb_options_set_table_cache_remove_scan_count_limit( void rocksdb_options_set_table_cache_remove_scan_count_limit(
rocksdb_options_t* /*opt*/, int /*v*/) { rocksdb_options_t* opt, int v) {
// this option is deprecated // this option is deprecated
} }
@ -2836,7 +2836,7 @@ rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create(
rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create_with_comparator( rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create_with_comparator(
const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options,
const rocksdb_comparator_t* /*comparator*/) { const rocksdb_comparator_t* comparator) {
rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t; rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t;
writer->rep = new SstFileWriter(env->rep, io_options->rep); writer->rep = new SstFileWriter(env->rep, io_options->rep);
return writer; return writer;

@ -1168,14 +1168,13 @@ TEST_F(ColumnFamilyTest, MemtableNotSupportSnapshot) {
#endif // !ROCKSDB_LITE #endif // !ROCKSDB_LITE
class TestComparator : public Comparator { class TestComparator : public Comparator {
int Compare(const rocksdb::Slice& /*a*/, int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override {
const rocksdb::Slice& /*b*/) const override {
return 0; return 0;
} }
const char* Name() const override { return "Test"; } const char* Name() const override { return "Test"; }
void FindShortestSeparator(std::string* /*start*/, void FindShortestSeparator(std::string* start,
const rocksdb::Slice& /*limit*/) const override {} const rocksdb::Slice& limit) const override {}
void FindShortSuccessor(std::string* /*key*/) const override {} void FindShortSuccessor(std::string* key) const override {}
}; };
static TestComparator third_comparator; static TestComparator third_comparator;
@ -1347,7 +1346,7 @@ TEST_F(ColumnFamilyTest, MultipleManualCompactions) {
{"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:5"}, {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:5"},
{"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:3"}}); {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:4"); TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:4");
cf_1_1 = false; cf_1_1 = false;
@ -1440,7 +1439,7 @@ TEST_F(ColumnFamilyTest, AutomaticAndManualCompactions) {
{"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:5"}, {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:5"},
{"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:3"}}); {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
cf_1_1 = false; cf_1_1 = false;
TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4");
@ -1541,7 +1540,7 @@ TEST_F(ColumnFamilyTest, ManualAndAutomaticCompactions) {
{"ColumnFamilyTest::ManualAuto:5", "ColumnFamilyTest::ManualAuto:2"}, {"ColumnFamilyTest::ManualAuto:5", "ColumnFamilyTest::ManualAuto:2"},
{"ColumnFamilyTest::ManualAuto:2", "ColumnFamilyTest::ManualAuto:3"}}); {"ColumnFamilyTest::ManualAuto:2", "ColumnFamilyTest::ManualAuto:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4");
cf_1_1 = false; cf_1_1 = false;
@ -1634,7 +1633,7 @@ TEST_F(ColumnFamilyTest, SameCFManualManualCompactions) {
{"ColumnFamilyTest::ManualManual:1", {"ColumnFamilyTest::ManualManual:1",
"ColumnFamilyTest::ManualManual:3"}}); "ColumnFamilyTest::ManualManual:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:4"); TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:4");
cf_1_1 = false; cf_1_1 = false;
@ -1732,7 +1731,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactions) {
{"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:2"}, {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:2"},
{"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4");
cf_1_1 = false; cf_1_1 = false;
@ -1824,7 +1823,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) {
"ColumnFamilyTest::ManualAuto:3"}, "ColumnFamilyTest::ManualAuto:3"},
{"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4");
cf_1_1 = false; cf_1_1 = false;
@ -1927,7 +1926,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticConflict) {
{"ColumnFamilyTest::ManualAutoCon:1", {"ColumnFamilyTest::ManualAutoCon:1",
"ColumnFamilyTest::ManualAutoCon:3"}}); "ColumnFamilyTest::ManualAutoCon:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:4"); TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:4");
cf_1_1 = false; cf_1_1 = false;
@ -2031,7 +2030,7 @@ TEST_F(ColumnFamilyTest, SameCFAutomaticManualCompactions) {
{"CompactionPicker::CompactRange:Conflict", {"CompactionPicker::CompactRange:Conflict",
"ColumnFamilyTest::AutoManual:3"}}); "ColumnFamilyTest::AutoManual:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (cf_1_1) { if (cf_1_1) {
TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4");
cf_1_1 = false; cf_1_1 = false;
@ -2477,21 +2476,21 @@ TEST_F(ColumnFamilyTest, CreateAndDropRace) {
auto main_thread_id = std::this_thread::get_id(); auto main_thread_id = std::this_thread::get_id();
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack("PersistRocksDBOptions:start",
"PersistRocksDBOptions:start", [&](void* /*arg*/) { [&](void* arg) {
auto current_thread_id = std::this_thread::get_id(); auto current_thread_id = std::this_thread::get_id();
// If it's the main thread hitting this sync-point, then it // If it's the main thread hitting this sync-point, then it
// will be blocked until some other thread update the test_stage. // will be blocked until some other thread update the test_stage.
if (main_thread_id == current_thread_id) { if (main_thread_id == current_thread_id) {
test_stage = kMainThreadStartPersistingOptionsFile; test_stage = kMainThreadStartPersistingOptionsFile;
while (test_stage < kChildThreadFinishDroppingColumnFamily) { while (test_stage < kChildThreadFinishDroppingColumnFamily) {
Env::Default()->SleepForMicroseconds(100); Env::Default()->SleepForMicroseconds(100);
} }
} }
}); });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"WriteThread::EnterUnbatched:Wait", [&](void* /*arg*/) { "WriteThread::EnterUnbatched:Wait", [&](void* arg) {
// This means a thread doing DropColumnFamily() is waiting for // This means a thread doing DropColumnFamily() is waiting for
// other thread to finish persisting options. // other thread to finish persisting options.
// In such case, we update the test_stage to unblock the main thread. // In such case, we update the test_stage to unblock the main thread.

@ -37,7 +37,8 @@ class FlushedFileCollector : public EventListener {
FlushedFileCollector() {} FlushedFileCollector() {}
~FlushedFileCollector() {} ~FlushedFileCollector() {}
virtual void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { virtual void OnFlushCompleted(
DB* db, const FlushJobInfo& info) override {
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
flushed_files_.push_back(info.file_path); flushed_files_.push_back(info.file_path);
} }
@ -256,9 +257,9 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
class FilterWithGet : public CompactionFilter { class FilterWithGet : public CompactionFilter {
public: public:
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
if (db_ == nullptr) { if (db_ == nullptr) {
return true; return true;
} }

@ -32,56 +32,55 @@ class CompactedDBImpl : public DBImpl {
override; override;
using DBImpl::Put; using DBImpl::Put;
virtual Status Put(const WriteOptions& /*options*/, virtual Status Put(const WriteOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family, const Slice& key,
const Slice& /*key*/, const Slice& /*value*/) override { const Slice& value) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
using DBImpl::Merge; using DBImpl::Merge;
virtual Status Merge(const WriteOptions& /*options*/, virtual Status Merge(const WriteOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family, const Slice& key,
const Slice& /*key*/, const Slice& /*value*/) override { const Slice& value) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
using DBImpl::Delete; using DBImpl::Delete;
virtual Status Delete(const WriteOptions& /*options*/, virtual Status Delete(const WriteOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const Slice& /*key*/) override { const Slice& key) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
virtual Status Write(const WriteOptions& /*options*/, virtual Status Write(const WriteOptions& options,
WriteBatch* /*updates*/) override { WriteBatch* updates) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
using DBImpl::CompactRange; using DBImpl::CompactRange;
virtual Status CompactRange(const CompactRangeOptions& /*options*/, virtual Status CompactRange(const CompactRangeOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const Slice* /*begin*/, const Slice* begin, const Slice* end) override {
const Slice* /*end*/) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
virtual Status DisableFileDeletions() override { virtual Status DisableFileDeletions() override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
virtual Status EnableFileDeletions(bool /*force*/) override { virtual Status EnableFileDeletions(bool force) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
virtual Status GetLiveFiles(std::vector<std::string>&, virtual Status GetLiveFiles(std::vector<std::string>&,
uint64_t* /*manifest_file_size*/, uint64_t* manifest_file_size,
bool /*flush_memtable*/ = true) override { bool flush_memtable = true) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
using DBImpl::Flush; using DBImpl::Flush;
virtual Status Flush(const FlushOptions& /*options*/, virtual Status Flush(const FlushOptions& options,
ColumnFamilyHandle* /*column_family*/) override { ColumnFamilyHandle* column_family) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }
using DB::IngestExternalFile; using DB::IngestExternalFile;
virtual Status IngestExternalFile( virtual Status IngestExternalFile(
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const std::vector<std::string>& /*external_files*/, const std::vector<std::string>& external_files,
const IngestExternalFileOptions& /*ingestion_options*/) override { const IngestExternalFileOptions& ingestion_options) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }

@ -50,7 +50,7 @@ CompactionIterator::CompactionIterator(
CompactionIterator::CompactionIterator( CompactionIterator::CompactionIterator(
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper, InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
SequenceNumber /*last_sequence*/, std::vector<SequenceNumber>* snapshots, SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
SequenceNumber earliest_write_conflict_snapshot, Env* env, SequenceNumber earliest_write_conflict_snapshot, Env* env,
bool expect_valid_internal_key, RangeDelAggregator* range_del_agg, bool expect_valid_internal_key, RangeDelAggregator* range_del_agg,
std::unique_ptr<CompactionProxy> compaction, std::unique_ptr<CompactionProxy> compaction,

@ -31,7 +31,7 @@ class CompactionIterator {
: compaction_(compaction) {} : compaction_(compaction) {}
virtual ~CompactionProxy() = default; virtual ~CompactionProxy() = default;
virtual int level(size_t /*compaction_input_level*/ = 0) const { virtual int level(size_t compaction_input_level = 0) const {
return compaction_->level(); return compaction_->level();
} }
virtual bool KeyNotExistsBeyondOutputLevel( virtual bool KeyNotExistsBeyondOutputLevel(

@ -17,15 +17,15 @@ namespace rocksdb {
// Expects no merging attempts. // Expects no merging attempts.
class NoMergingMergeOp : public MergeOperator { class NoMergingMergeOp : public MergeOperator {
public: public:
bool FullMergeV2(const MergeOperationInput& /*merge_in*/, bool FullMergeV2(const MergeOperationInput& merge_in,
MergeOperationOutput* /*merge_out*/) const override { MergeOperationOutput* merge_out) const override {
ADD_FAILURE(); ADD_FAILURE();
return false; return false;
} }
bool PartialMergeMulti(const Slice& /*key*/, bool PartialMergeMulti(const Slice& key,
const std::deque<Slice>& /*operand_list*/, const std::deque<Slice>& operand_list,
std::string* /*new_value*/, std::string* new_value,
Logger* /*logger*/) const override { Logger* logger) const override {
ADD_FAILURE(); ADD_FAILURE();
return false; return false;
} }
@ -39,10 +39,9 @@ class NoMergingMergeOp : public MergeOperator {
// Always returns Decition::kRemove. // Always returns Decition::kRemove.
class StallingFilter : public CompactionFilter { class StallingFilter : public CompactionFilter {
public: public:
virtual Decision FilterV2(int /*level*/, const Slice& key, ValueType /*t*/, virtual Decision FilterV2(int level, const Slice& key, ValueType t,
const Slice& /*existing_value*/, const Slice& existing_value, std::string* new_value,
std::string* /*new_value*/, std::string* skip_until) const override {
std::string* /*skip_until*/) const override {
int k = std::atoi(key.ToString().c_str()); int k = std::atoi(key.ToString().c_str());
last_seen.store(k); last_seen.store(k);
while (k >= stall_at.load()) { while (k >= stall_at.load()) {
@ -113,7 +112,7 @@ class LoggingForwardVectorIterator : public InternalIterator {
keys_.begin(); keys_.begin();
} }
virtual void SeekForPrev(const Slice& /*target*/) override { assert(false); } virtual void SeekForPrev(const Slice& target) override { assert(false); }
virtual void Next() override { virtual void Next() override {
assert(Valid()); assert(Valid());
@ -145,9 +144,9 @@ class FakeCompaction : public CompactionIterator::CompactionProxy {
public: public:
FakeCompaction() = default; FakeCompaction() = default;
virtual int level(size_t /*compaction_input_level*/) const { return 0; } virtual int level(size_t compaction_input_level) const { return 0; }
virtual bool KeyNotExistsBeyondOutputLevel( virtual bool KeyNotExistsBeyondOutputLevel(
const Slice& /*user_key*/, std::vector<size_t>* /*level_ptrs*/) const { const Slice& user_key, std::vector<size_t>* level_ptrs) const {
return key_not_exists_beyond_output_level; return key_not_exists_beyond_output_level;
} }
virtual bool bottommost_level() const { return false; } virtual bool bottommost_level() const { return false; }
@ -277,9 +276,9 @@ TEST_F(CompactionIteratorTest, RangeDeletionWithSnapshots) {
TEST_F(CompactionIteratorTest, CompactionFilterSkipUntil) { TEST_F(CompactionIteratorTest, CompactionFilterSkipUntil) {
class Filter : public CompactionFilter { class Filter : public CompactionFilter {
virtual Decision FilterV2(int /*level*/, const Slice& key, ValueType t, virtual Decision FilterV2(int level, const Slice& key, ValueType t,
const Slice& existing_value, const Slice& existing_value,
std::string* /*new_value*/, std::string* new_value,
std::string* skip_until) const override { std::string* skip_until) const override {
std::string k = key.ToString(); std::string k = key.ToString();
std::string v = existing_value.ToString(); std::string v = existing_value.ToString();

@ -426,7 +426,7 @@ class CompactionJobStatsChecker : public EventListener {
// Once a compaction completed, this function will verify the returned // Once a compaction completed, this function will verify the returned
// CompactionJobInfo with the oldest CompactionJobInfo added earlier // CompactionJobInfo with the oldest CompactionJobInfo added earlier
// in "expected_stats_" which has not yet being used for verification. // in "expected_stats_" which has not yet being used for verification.
virtual void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) { virtual void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) {
if (verify_next_comp_io_stats_) { if (verify_next_comp_io_stats_) {
ASSERT_GT(ci.stats.file_write_nanos, 0); ASSERT_GT(ci.stats.file_write_nanos, 0);
ASSERT_GT(ci.stats.file_range_sync_nanos, 0); ASSERT_GT(ci.stats.file_range_sync_nanos, 0);
@ -806,7 +806,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
stats_checker->set_verify_next_comp_io_stats(true); stats_checker->set_verify_next_comp_io_stats(true);
std::atomic<bool> first_prepare_write(true); std::atomic<bool> first_prepare_write(true);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"WritableFileWriter::Append:BeforePrepareWrite", [&](void* /*arg*/) { "WritableFileWriter::Append:BeforePrepareWrite", [&](void* arg) {
if (first_prepare_write.load()) { if (first_prepare_write.load()) {
options.env->SleepForMicroseconds(3); options.env->SleepForMicroseconds(3);
first_prepare_write.store(false); first_prepare_write.store(false);
@ -815,7 +815,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
std::atomic<bool> first_flush(true); std::atomic<bool> first_flush(true);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"WritableFileWriter::Flush:BeforeAppend", [&](void* /*arg*/) { "WritableFileWriter::Flush:BeforeAppend", [&](void* arg) {
if (first_flush.load()) { if (first_flush.load()) {
options.env->SleepForMicroseconds(3); options.env->SleepForMicroseconds(3);
first_flush.store(false); first_flush.store(false);
@ -824,7 +824,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
std::atomic<bool> first_sync(true); std::atomic<bool> first_sync(true);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"WritableFileWriter::SyncInternal:0", [&](void* /*arg*/) { "WritableFileWriter::SyncInternal:0", [&](void* arg) {
if (first_sync.load()) { if (first_sync.load()) {
options.env->SleepForMicroseconds(3); options.env->SleepForMicroseconds(3);
first_sync.store(false); first_sync.store(false);
@ -833,7 +833,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
std::atomic<bool> first_range_sync(true); std::atomic<bool> first_range_sync(true);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"WritableFileWriter::RangeSync:0", [&](void* /*arg*/) { "WritableFileWriter::RangeSync:0", [&](void* arg) {
if (first_range_sync.load()) { if (first_range_sync.load()) {
options.env->SleepForMicroseconds(3); options.env->SleepForMicroseconds(3);
first_range_sync.store(false); first_range_sync.store(false);

@ -199,7 +199,7 @@ void CompactionPicker::GetRange(const std::vector<CompactionInputFiles>& inputs,
assert(initialized); assert(initialized);
} }
bool CompactionPicker::ExpandInputsToCleanCut(const std::string& /*cf_name*/, bool CompactionPicker::ExpandInputsToCleanCut(const std::string& cf_name,
VersionStorageInfo* vstorage, VersionStorageInfo* vstorage,
CompactionInputFiles* inputs) { CompactionInputFiles* inputs) {
// This isn't good compaction // This isn't good compaction
@ -318,7 +318,7 @@ Compaction* CompactionPicker::CompactFiles(
Status CompactionPicker::GetCompactionInputsFromFileNumbers( Status CompactionPicker::GetCompactionInputsFromFileNumbers(
std::vector<CompactionInputFiles>* input_files, std::vector<CompactionInputFiles>* input_files,
std::unordered_set<uint64_t>* input_set, const VersionStorageInfo* vstorage, std::unordered_set<uint64_t>* input_set, const VersionStorageInfo* vstorage,
const CompactionOptions& /*compact_options*/) const { const CompactionOptions& compact_options) const {
if (input_set->size() == 0U) { if (input_set->size() == 0U) {
return Status::InvalidArgument( return Status::InvalidArgument(
"Compaction must include at least one file."); "Compaction must include at least one file.");
@ -1581,9 +1581,8 @@ Compaction* FIFOCompactionPicker::PickCompaction(
Compaction* FIFOCompactionPicker::CompactRange( Compaction* FIFOCompactionPicker::CompactRange(
const std::string& cf_name, const MutableCFOptions& mutable_cf_options, const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
VersionStorageInfo* vstorage, int input_level, int output_level, VersionStorageInfo* vstorage, int input_level, int output_level,
uint32_t /*output_path_id*/, const InternalKey* /*begin*/, uint32_t output_path_id, const InternalKey* begin, const InternalKey* end,
const InternalKey* /*end*/, InternalKey** compaction_end, InternalKey** compaction_end, bool* manual_conflict) {
bool* /*manual_conflict*/) {
assert(input_level == 0); assert(input_level == 0);
assert(output_level == 0); assert(output_level == 0);
*compaction_end = nullptr; *compaction_end = nullptr;

@ -263,29 +263,27 @@ class NullCompactionPicker : public CompactionPicker {
virtual ~NullCompactionPicker() {} virtual ~NullCompactionPicker() {}
// Always return "nullptr" // Always return "nullptr"
Compaction* PickCompaction(const std::string& /*cf_name*/, Compaction* PickCompaction(const std::string& cf_name,
const MutableCFOptions& /*mutable_cf_options*/, const MutableCFOptions& mutable_cf_options,
VersionStorageInfo* /*vstorage*/, VersionStorageInfo* vstorage,
LogBuffer* /*log_buffer*/) override { LogBuffer* log_buffer) override {
return nullptr; return nullptr;
} }
// Always return "nullptr" // Always return "nullptr"
Compaction* CompactRange(const std::string& /*cf_name*/, Compaction* CompactRange(const std::string& cf_name,
const MutableCFOptions& /*mutable_cf_options*/, const MutableCFOptions& mutable_cf_options,
VersionStorageInfo* /*vstorage*/, VersionStorageInfo* vstorage, int input_level,
int /*input_level*/, int /*output_level*/, int output_level, uint32_t output_path_id,
uint32_t /*output_path_id*/, const InternalKey* begin, const InternalKey* end,
const InternalKey* /*begin*/, InternalKey** compaction_end,
const InternalKey* /*end*/, bool* manual_conflict) override {
InternalKey** /*compaction_end*/,
bool* /*manual_conflict*/) override {
return nullptr; return nullptr;
} }
// Always returns false. // Always returns false.
virtual bool NeedsCompaction( virtual bool NeedsCompaction(
const VersionStorageInfo* /*vstorage*/) const override { const VersionStorageInfo* vstorage) const override {
return false; return false;
} }
}; };

@ -20,9 +20,7 @@ namespace rocksdb {
class CountingLogger : public Logger { class CountingLogger : public Logger {
public: public:
using Logger::Logv; using Logger::Logv;
virtual void Logv(const char* /*format*/, va_list /*ap*/) override { virtual void Logv(const char* format, va_list ap) override { log_count++; }
log_count++;
}
size_t log_count; size_t log_count;
}; };

@ -188,10 +188,10 @@ class DoubleComparator : public Comparator {
return -1; return -1;
} }
} }
virtual void FindShortestSeparator(std::string* /*start*/, virtual void FindShortestSeparator(std::string* start,
const Slice& /*limit*/) const override {} const Slice& limit) const override {}
virtual void FindShortSuccessor(std::string* /*key*/) const override {} virtual void FindShortSuccessor(std::string* key) const override {}
}; };
class HashComparator : public Comparator { class HashComparator : public Comparator {
@ -211,10 +211,10 @@ class HashComparator : public Comparator {
return -1; return -1;
} }
} }
virtual void FindShortestSeparator(std::string* /*start*/, virtual void FindShortestSeparator(std::string* start,
const Slice& /*limit*/) const override {} const Slice& limit) const override {}
virtual void FindShortSuccessor(std::string* /*key*/) const override {} virtual void FindShortSuccessor(std::string* key) const override {}
}; };
class TwoStrComparator : public Comparator { class TwoStrComparator : public Comparator {
@ -243,10 +243,10 @@ class TwoStrComparator : public Comparator {
} }
return a2.compare(b2); return a2.compare(b2);
} }
virtual void FindShortestSeparator(std::string* /*start*/, virtual void FindShortestSeparator(std::string* start,
const Slice& /*limit*/) const override {} const Slice& limit) const override {}
virtual void FindShortSuccessor(std::string* /*key*/) const override {} virtual void FindShortSuccessor(std::string* key) const override {}
}; };
} // namespace } // namespace

@ -47,7 +47,7 @@ class DBBlockCacheTest : public DBTestBase {
return options; return options;
} }
void InitTable(const Options& /*options*/) { void InitTable(const Options& options) {
std::string value(kValueSize, 'a'); std::string value(kValueSize, 'a');
for (size_t i = 0; i < kNumBlocks; i++) { for (size_t i = 0; i < kNumBlocks; i++) {
ASSERT_OK(Put(ToString(i), value.c_str())); ASSERT_OK(Put(ToString(i), value.c_str()));

@ -1057,10 +1057,10 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial_move++; }); [&](void* arg) { non_trivial_move++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
CompactRangeOptions compact_options; CompactRangeOptions compact_options;

@ -26,9 +26,9 @@ class DBTestCompactionFilter : public DBTestBase {
class KeepFilter : public CompactionFilter { class KeepFilter : public CompactionFilter {
public: public:
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value, bool* value_changed) const
bool* /*value_changed*/) const override { override {
cfilter_count++; cfilter_count++;
return false; return false;
} }
@ -38,9 +38,9 @@ class KeepFilter : public CompactionFilter {
class DeleteFilter : public CompactionFilter { class DeleteFilter : public CompactionFilter {
public: public:
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value, bool* value_changed) const
bool* /*value_changed*/) const override { override {
cfilter_count++; cfilter_count++;
return true; return true;
} }
@ -50,9 +50,9 @@ class DeleteFilter : public CompactionFilter {
class DeleteISFilter : public CompactionFilter { class DeleteISFilter : public CompactionFilter {
public: public:
virtual bool Filter(int /*level*/, const Slice& key, const Slice& /*value*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
cfilter_count++; cfilter_count++;
int i = std::stoi(key.ToString()); int i = std::stoi(key.ToString());
if (i > 5 && i <= 105) { if (i > 5 && i <= 105) {
@ -70,10 +70,8 @@ class DeleteISFilter : public CompactionFilter {
// zero-padded to length 10. // zero-padded to length 10.
class SkipEvenFilter : public CompactionFilter { class SkipEvenFilter : public CompactionFilter {
public: public:
virtual Decision FilterV2(int /*level*/, const Slice& key, virtual Decision FilterV2(int level, const Slice& key, ValueType value_type,
ValueType /*value_type*/, const Slice& existing_value, std::string* new_value,
const Slice& /*existing_value*/,
std::string* /*new_value*/,
std::string* skip_until) const override { std::string* skip_until) const override {
cfilter_count++; cfilter_count++;
int i = std::stoi(key.ToString()); int i = std::stoi(key.ToString());
@ -95,9 +93,9 @@ class SkipEvenFilter : public CompactionFilter {
class DelayFilter : public CompactionFilter { class DelayFilter : public CompactionFilter {
public: public:
explicit DelayFilter(DBTestBase* d) : db_test(d) {} explicit DelayFilter(DBTestBase* d) : db_test(d) {}
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
db_test->env_->addon_time_.fetch_add(1000); db_test->env_->addon_time_.fetch_add(1000);
return true; return true;
} }
@ -112,9 +110,9 @@ class ConditionalFilter : public CompactionFilter {
public: public:
explicit ConditionalFilter(const std::string* filtered_value) explicit ConditionalFilter(const std::string* filtered_value)
: filtered_value_(filtered_value) {} : filtered_value_(filtered_value) {}
virtual bool Filter(int /*level*/, const Slice& /*key*/, const Slice& value, virtual bool Filter(int level, const Slice& key, const Slice& value,
std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
return value.ToString() == *filtered_value_; return value.ToString() == *filtered_value_;
} }
@ -128,9 +126,9 @@ class ChangeFilter : public CompactionFilter {
public: public:
explicit ChangeFilter() {} explicit ChangeFilter() {}
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* new_value, std::string* new_value, bool* value_changed) const
bool* value_changed) const override { override {
assert(new_value != nullptr); assert(new_value != nullptr);
*new_value = NEW_VALUE; *new_value = NEW_VALUE;
*value_changed = true; *value_changed = true;
@ -219,7 +217,7 @@ class DelayFilterFactory : public CompactionFilterFactory {
public: public:
explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter( virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& /*context*/) override { const CompactionFilter::Context& context) override {
return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test)); return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
} }
@ -235,7 +233,7 @@ class ConditionalFilterFactory : public CompactionFilterFactory {
: filtered_value_(filtered_value.ToString()) {} : filtered_value_(filtered_value.ToString()) {}
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter( virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& /*context*/) override { const CompactionFilter::Context& context) override {
return std::unique_ptr<CompactionFilter>( return std::unique_ptr<CompactionFilter>(
new ConditionalFilter(&filtered_value_)); new ConditionalFilter(&filtered_value_));
} }
@ -253,7 +251,7 @@ class ChangeFilterFactory : public CompactionFilterFactory {
explicit ChangeFilterFactory() {} explicit ChangeFilterFactory() {}
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter( virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& /*context*/) override { const CompactionFilter::Context& context) override {
return std::unique_ptr<CompactionFilter>(new ChangeFilter()); return std::unique_ptr<CompactionFilter>(new ChangeFilter());
} }

@ -53,7 +53,7 @@ class FlushedFileCollector : public EventListener {
FlushedFileCollector() {} FlushedFileCollector() {}
~FlushedFileCollector() {} ~FlushedFileCollector() {}
virtual void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { virtual void OnFlushCompleted(DB* db, const FlushJobInfo& info) override {
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
flushed_files_.push_back(info.file_path); flushed_files_.push_back(info.file_path);
} }
@ -282,7 +282,7 @@ TEST_F(DBCompactionTest, TestTableReaderForCompaction) {
}); });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"TableCache::GetTableReader:0", "TableCache::GetTableReader:0",
[&](void* /*arg*/) { num_new_table_reader++; }); [&](void* arg) { num_new_table_reader++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
for (int k = 0; k < options.level0_file_num_compaction_trigger; ++k) { for (int k = 0; k < options.level0_file_num_compaction_trigger; ++k) {
@ -838,7 +838,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) {
int32_t trivial_move = 0; int32_t trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions(); Options options = CurrentOptions();
@ -895,10 +895,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial_move++; }); [&](void* arg) { non_trivial_move++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions(); Options options = CurrentOptions();
@ -994,10 +994,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveTargetLevel) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial_move++; }); [&](void* arg) { non_trivial_move++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions(); Options options = CurrentOptions();
@ -1053,10 +1053,10 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial_move++; }); [&](void* arg) { non_trivial_move++; });
bool first = true; bool first = true;
// Purpose of dependencies: // Purpose of dependencies:
// 4 -> 1: ensure the order of two non-trivial compactions // 4 -> 1: ensure the order of two non-trivial compactions
@ -1067,7 +1067,7 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) {
{"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:2"}, {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:2"},
{"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:3"}}); {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (first) { if (first) {
first = false; first = false;
TEST_SYNC_POINT("DBCompaction::ManualPartial:4"); TEST_SYNC_POINT("DBCompaction::ManualPartial:4");
@ -1198,17 +1198,17 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial_move++; }); [&](void* arg) { non_trivial_move++; });
bool first = true; bool first = true;
bool second = true; bool second = true;
rocksdb::SyncPoint::GetInstance()->LoadDependency( rocksdb::SyncPoint::GetInstance()->LoadDependency(
{{"DBCompaction::PartialFill:4", "DBCompaction::PartialFill:1"}, {{"DBCompaction::PartialFill:4", "DBCompaction::PartialFill:1"},
{"DBCompaction::PartialFill:2", "DBCompaction::PartialFill:3"}}); {"DBCompaction::PartialFill:2", "DBCompaction::PartialFill:3"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
if (first) { if (first) {
TEST_SYNC_POINT("DBCompaction::PartialFill:4"); TEST_SYNC_POINT("DBCompaction::PartialFill:4");
first = false; first = false;
@ -1444,10 +1444,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial_move++; }); [&](void* arg) { non_trivial_move++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions(); Options options = CurrentOptions();
@ -2325,16 +2325,16 @@ TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) {
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"Compaction::InputCompressionMatchesOutput:Matches", "Compaction::InputCompressionMatchesOutput:Matches",
[&](void* /*arg*/) { matches++; }); [&](void* arg) { matches++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"Compaction::InputCompressionMatchesOutput:DidntMatch", "Compaction::InputCompressionMatchesOutput:DidntMatch",
[&](void* /*arg*/) { didnt_match++; }); [&](void* arg) { didnt_match++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial++; }); [&](void* arg) { non_trivial++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Reopen(options); Reopen(options);
@ -2496,10 +2496,10 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial_move++; }); [&](void* arg) { non_trivial_move++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions(); Options options = CurrentOptions();
@ -2656,7 +2656,9 @@ TEST_P(DBCompactionDirectIOTest, DirectIO) {
}); });
if (options.use_direct_io_for_flush_and_compaction) { if (options.use_direct_io_for_flush_and_compaction) {
SyncPoint::GetInstance()->SetCallBack( SyncPoint::GetInstance()->SetCallBack(
"SanitizeOptions:direct_io", [&](void* /*arg*/) { readahead = true; }); "SanitizeOptions:direct_io", [&](void* arg) {
readahead = true;
});
} }
SyncPoint::GetInstance()->EnableProcessing(); SyncPoint::GetInstance()->EnableProcessing();
CreateAndReopenWithCF({"pikachu"}, options); CreateAndReopenWithCF({"pikachu"}, options);

@ -194,7 +194,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
// Hold compaction jobs to make sure // Hold compaction jobs to make sure
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():Start", "CompactionJob::Run():Start",
[&](void* /*arg*/) { env_->SleepForMicroseconds(100000); }); [&](void* arg) { env_->SleepForMicroseconds(100000); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(dbfull()->SetOptions({ ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"}, {"disable_auto_compactions", "true"},
@ -378,7 +378,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) {
int non_trivial = 0; int non_trivial = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", "DBImpl::BackgroundCompaction:NonTrivial",
[&](void* /*arg*/) { non_trivial++; }); [&](void* arg) { non_trivial++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Random rnd(301); Random rnd(301);

@ -101,7 +101,7 @@ TEST_F(DBFlushTest, FlushInLowPriThreadPool) {
std::thread::id tid; std::thread::id tid;
int num_flushes = 0, num_compactions = 0; int num_flushes = 0, num_compactions = 0;
SyncPoint::GetInstance()->SetCallBack( SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BGWorkFlush", [&](void* /*arg*/) { "DBImpl::BGWorkFlush", [&](void* arg) {
if (tid == std::thread::id()) { if (tid == std::thread::id()) {
tid = std::this_thread::get_id(); tid = std::this_thread::get_id();
} else { } else {
@ -110,7 +110,7 @@ TEST_F(DBFlushTest, FlushInLowPriThreadPool) {
++num_flushes; ++num_flushes;
}); });
SyncPoint::GetInstance()->SetCallBack( SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BGWorkCompaction", [&](void* /*arg*/) { "DBImpl::BGWorkCompaction", [&](void* arg) {
ASSERT_EQ(tid, std::this_thread::get_id()); ASSERT_EQ(tid, std::this_thread::get_id());
++num_compactions; ++num_compactions;
}); });

@ -595,9 +595,8 @@ Status DBImpl::SetDBOptions(
} }
// return the same level if it cannot be moved // return the same level if it cannot be moved
int DBImpl::FindMinimumEmptyLevelFitting( int DBImpl::FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd,
ColumnFamilyData* cfd, const MutableCFOptions& /*mutable_cf_options*/, const MutableCFOptions& mutable_cf_options, int level) {
int level) {
mutex_.AssertHeld(); mutex_.AssertHeld();
const auto* vstorage = cfd->current()->storage_info(); const auto* vstorage = cfd->current()->storage_info();
int minimum_level = level; int minimum_level = level;
@ -807,7 +806,7 @@ struct IterState {
bool background_purge; bool background_purge;
}; };
static void CleanupIteratorState(void* arg1, void* /*arg2*/) { static void CleanupIteratorState(void* arg1, void* arg2) {
IterState* state = reinterpret_cast<IterState*>(arg1); IterState* state = reinterpret_cast<IterState*>(arg1);
if (state->super_version->Unref()) { if (state->super_version->Unref()) {
@ -2191,31 +2190,31 @@ Status DBImpl::GetDbIdentity(std::string& identity) const {
} }
// Default implementation -- returns not supported status // Default implementation -- returns not supported status
Status DB::CreateColumnFamily(const ColumnFamilyOptions& /*cf_options*/, Status DB::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
const std::string& /*column_family_name*/, const std::string& column_family_name,
ColumnFamilyHandle** /*handle*/) { ColumnFamilyHandle** handle) {
return Status::NotSupported(""); return Status::NotSupported("");
} }
Status DB::CreateColumnFamilies( Status DB::CreateColumnFamilies(
const ColumnFamilyOptions& /*cf_options*/, const ColumnFamilyOptions& cf_options,
const std::vector<std::string>& /*column_family_names*/, const std::vector<std::string>& column_family_names,
std::vector<ColumnFamilyHandle*>* /*handles*/) { std::vector<ColumnFamilyHandle*>* handles) {
return Status::NotSupported(""); return Status::NotSupported("");
} }
Status DB::CreateColumnFamilies( Status DB::CreateColumnFamilies(
const std::vector<ColumnFamilyDescriptor>& /*column_families*/, const std::vector<ColumnFamilyDescriptor>& column_families,
std::vector<ColumnFamilyHandle*>* /*handles*/) { std::vector<ColumnFamilyHandle*>* handles) {
return Status::NotSupported(""); return Status::NotSupported("");
} }
Status DB::DropColumnFamily(ColumnFamilyHandle* /*column_family*/) { Status DB::DropColumnFamily(ColumnFamilyHandle* column_family) {
return Status::NotSupported(""); return Status::NotSupported("");
} }
Status DB::DropColumnFamilies( Status DB::DropColumnFamilies(
const std::vector<ColumnFamilyHandle*>& /*column_families*/) { const std::vector<ColumnFamilyHandle*>& column_families) {
return Status::NotSupported(""); return Status::NotSupported("");
} }

@ -779,7 +779,7 @@ int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) {
return cfh->cfd()->NumberLevels(); return cfh->cfd()->NumberLevels();
} }
int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* /*column_family*/) { int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* column_family) {
return 0; return 0;
} }

@ -105,7 +105,7 @@ Status DBImplReadOnly::NewIterators(
} }
Status DB::OpenForReadOnly(const Options& options, const std::string& dbname, Status DB::OpenForReadOnly(const Options& options, const std::string& dbname,
DB** dbptr, bool /*error_if_log_file_exist*/) { DB** dbptr, bool error_if_log_file_exist) {
*dbptr = nullptr; *dbptr = nullptr;
// Try to first open DB as fully compacted DB // Try to first open DB as fully compacted DB

@ -36,47 +36,46 @@ class DBImplReadOnly : public DBImpl {
std::vector<Iterator*>* iterators) override; std::vector<Iterator*>* iterators) override;
using DBImpl::Put; using DBImpl::Put;
virtual Status Put(const WriteOptions& /*options*/, virtual Status Put(const WriteOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family, const Slice& key,
const Slice& /*key*/, const Slice& /*value*/) override { const Slice& value) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
using DBImpl::Merge; using DBImpl::Merge;
virtual Status Merge(const WriteOptions& /*options*/, virtual Status Merge(const WriteOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family, const Slice& key,
const Slice& /*key*/, const Slice& /*value*/) override { const Slice& value) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
using DBImpl::Delete; using DBImpl::Delete;
virtual Status Delete(const WriteOptions& /*options*/, virtual Status Delete(const WriteOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const Slice& /*key*/) override { const Slice& key) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
using DBImpl::SingleDelete; using DBImpl::SingleDelete;
virtual Status SingleDelete(const WriteOptions& /*options*/, virtual Status SingleDelete(const WriteOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const Slice& /*key*/) override { const Slice& key) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
virtual Status Write(const WriteOptions& /*options*/, virtual Status Write(const WriteOptions& options,
WriteBatch* /*updates*/) override { WriteBatch* updates) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
using DBImpl::CompactRange; using DBImpl::CompactRange;
virtual Status CompactRange(const CompactRangeOptions& /*options*/, virtual Status CompactRange(const CompactRangeOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const Slice* /*begin*/, const Slice* begin, const Slice* end) override {
const Slice* /*end*/) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
using DBImpl::CompactFiles; using DBImpl::CompactFiles;
virtual Status CompactFiles( virtual Status CompactFiles(
const CompactionOptions& /*compact_options*/, const CompactionOptions& compact_options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const std::vector<std::string>& /*input_file_names*/, const std::vector<std::string>& input_file_names,
const int /*output_level*/, const int /*output_path_id*/ = -1) override { const int output_level, const int output_path_id = -1) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
@ -84,18 +83,18 @@ class DBImplReadOnly : public DBImpl {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
virtual Status EnableFileDeletions(bool /*force*/) override { virtual Status EnableFileDeletions(bool force) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
virtual Status GetLiveFiles(std::vector<std::string>&, virtual Status GetLiveFiles(std::vector<std::string>&,
uint64_t* /*manifest_file_size*/, uint64_t* manifest_file_size,
bool /*flush_memtable*/ = true) override { bool flush_memtable = true) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
using DBImpl::Flush; using DBImpl::Flush;
virtual Status Flush(const FlushOptions& /*options*/, virtual Status Flush(const FlushOptions& options,
ColumnFamilyHandle* /*column_family*/) override { ColumnFamilyHandle* column_family) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }
@ -106,9 +105,9 @@ class DBImplReadOnly : public DBImpl {
using DB::IngestExternalFile; using DB::IngestExternalFile;
virtual Status IngestExternalFile( virtual Status IngestExternalFile(
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const std::vector<std::string>& /*external_files*/, const std::vector<std::string>& external_files,
const IngestExternalFileOptions& /*ingestion_options*/) override { const IngestExternalFileOptions& ingestion_options) override {
return Status::NotSupported("Not supported operation in read only mode."); return Status::NotSupported("Not supported operation in read only mode.");
} }

@ -1002,7 +1002,7 @@ Status DBImpl::ScheduleFlushes(WriteContext* context) {
} }
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/, void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* cfd,
const MemTableInfo& mem_table_info) { const MemTableInfo& mem_table_info) {
if (immutable_db_options_.listeners.size() == 0U) { if (immutable_db_options_.listeners.size() == 0U) {
return; return;

@ -2459,7 +2459,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace1) {
// and before an SeekToLast() is called. // and before an SeekToLast() is called.
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"MergeIterator::Prev:BeforeSeekToLast", "MergeIterator::Prev:BeforeSeekToLast",
[&](void* /*arg*/) { internal_iter2_->Add("z", kTypeValue, "7", 12u); }); [&](void* arg) { internal_iter2_->Add("z", kTypeValue, "7", 12u); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
db_iter_->Prev(); db_iter_->Prev();
@ -2494,7 +2494,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace2) {
// mem table after MergeIterator::Prev() realized the mem tableiterator is at // mem table after MergeIterator::Prev() realized the mem tableiterator is at
// its end and before an SeekToLast() is called. // its end and before an SeekToLast() is called.
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"MergeIterator::Prev:BeforeSeekToLast", [&](void* /*arg*/) { "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) {
internal_iter2_->Add("z", kTypeValue, "7", 12u); internal_iter2_->Add("z", kTypeValue, "7", 12u);
internal_iter2_->Add("z", kTypeValue, "7", 11u); internal_iter2_->Add("z", kTypeValue, "7", 11u);
}); });
@ -2532,7 +2532,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace3) {
// mem table after MergeIterator::Prev() realized the mem table iterator is at // mem table after MergeIterator::Prev() realized the mem table iterator is at
// its end and before an SeekToLast() is called. // its end and before an SeekToLast() is called.
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"MergeIterator::Prev:BeforeSeekToLast", [&](void* /*arg*/) { "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) {
internal_iter2_->Add("z", kTypeValue, "7", 16u, true); internal_iter2_->Add("z", kTypeValue, "7", 16u, true);
internal_iter2_->Add("z", kTypeValue, "7", 15u, true); internal_iter2_->Add("z", kTypeValue, "7", 15u, true);
internal_iter2_->Add("z", kTypeValue, "7", 14u, true); internal_iter2_->Add("z", kTypeValue, "7", 14u, true);

@ -24,7 +24,7 @@ class DBIteratorTest : public DBTestBase {
class FlushBlockEveryKeyPolicy : public FlushBlockPolicy { class FlushBlockEveryKeyPolicy : public FlushBlockPolicy {
public: public:
virtual bool Update(const Slice& /*key*/, const Slice& /*value*/) override { virtual bool Update(const Slice& key, const Slice& value) override {
if (!start_) { if (!start_) {
start_ = true; start_ = true;
return false; return false;
@ -44,8 +44,8 @@ class FlushBlockEveryKeyPolicyFactory : public FlushBlockPolicyFactory {
} }
FlushBlockPolicy* NewFlushBlockPolicy( FlushBlockPolicy* NewFlushBlockPolicy(
const BlockBasedTableOptions& /*table_options*/, const BlockBasedTableOptions& table_options,
const BlockBuilder& /*data_block_builder*/) const override { const BlockBuilder& data_block_builder) const override {
return new FlushBlockEveryKeyPolicy; return new FlushBlockEveryKeyPolicy;
} }
}; };

@ -121,7 +121,7 @@ class TestPrefixExtractor : public SliceTransform {
return separator(key) != nullptr; return separator(key) != nullptr;
} }
virtual bool InRange(const Slice& /*key*/) const override { return false; } virtual bool InRange(const Slice& key) const override { return false; }
private: private:
const char* separator(const Slice& key) const { const char* separator(const Slice& key) const {

@ -985,9 +985,8 @@ class CountingUserTblPropCollector : public TablePropertiesCollector {
return Status::OK(); return Status::OK();
} }
Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
EntryType /*type*/, SequenceNumber /*seq*/, SequenceNumber seq, uint64_t file_size) override {
uint64_t /*file_size*/) override {
++count_; ++count_;
return Status::OK(); return Status::OK();
} }
@ -1028,9 +1027,8 @@ class CountingDeleteTabPropCollector : public TablePropertiesCollector {
public: public:
const char* Name() const override { return "CountingDeleteTabPropCollector"; } const char* Name() const override { return "CountingDeleteTabPropCollector"; }
Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
EntryType type, SequenceNumber /*seq*/, SequenceNumber seq, uint64_t file_size) override {
uint64_t /*file_size*/) override {
if (type == kEntryDelete) { if (type == kEntryDelete) {
num_deletes_++; num_deletes_++;
} }
@ -1057,7 +1055,7 @@ class CountingDeleteTabPropCollectorFactory
: public TablePropertiesCollectorFactory { : public TablePropertiesCollectorFactory {
public: public:
virtual TablePropertiesCollector* CreateTablePropertiesCollector( virtual TablePropertiesCollector* CreateTablePropertiesCollector(
TablePropertiesCollectorFactory::Context /*context*/) override { TablePropertiesCollectorFactory::Context context) override {
return new CountingDeleteTabPropCollector(); return new CountingDeleteTabPropCollector();
} }
const char* Name() const override { const char* Name() const override {

@ -231,12 +231,11 @@ TEST_F(DBSSTTest, DBWithSstFileManager) {
int files_deleted = 0; int files_deleted = 0;
int files_moved = 0; int files_moved = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"SstFileManagerImpl::OnAddFile", [&](void* /*arg*/) { files_added++; }); "SstFileManagerImpl::OnAddFile", [&](void* arg) { files_added++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"SstFileManagerImpl::OnDeleteFile", "SstFileManagerImpl::OnDeleteFile", [&](void* arg) { files_deleted++; });
[&](void* /*arg*/) { files_deleted++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"SstFileManagerImpl::OnMoveFile", [&](void* /*arg*/) { files_moved++; }); "SstFileManagerImpl::OnMoveFile", [&](void* arg) { files_moved++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions(); Options options = CurrentOptions();
@ -386,7 +385,7 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) {
int bg_delete_file = 0; int bg_delete_file = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DeleteScheduler::DeleteTrashFile:DeleteFile", "DeleteScheduler::DeleteTrashFile:DeleteFile",
[&](void* /*arg*/) { bg_delete_file++; }); [&](void* arg) { bg_delete_file++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions(); Options options = CurrentOptions();
@ -454,7 +453,7 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) {
int bg_delete_file = 0; int bg_delete_file = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DeleteScheduler::DeleteTrashFile:DeleteFile", "DeleteScheduler::DeleteTrashFile:DeleteFile",
[&](void* /*arg*/) { bg_delete_file++; }); [&](void* arg) { bg_delete_file++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Status s; Status s;
@ -547,7 +546,7 @@ TEST_F(DBSSTTest, DBWithMaxSpaceAllowedRandomized) {
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached", "CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached",
[&](void* /*arg*/) { [&](void* arg) {
bg_error_set = true; bg_error_set = true;
GetAllSSTFiles(&total_sst_files_size); GetAllSSTFiles(&total_sst_files_size);
reached_max_space_on_compaction++; reached_max_space_on_compaction++;

@ -157,10 +157,10 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
}); });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"ForwardIterator::RenewIterators:Null", "ForwardIterator::RenewIterators:Null",
[&](void* /*arg*/) { file_iters_renewed_null = true; }); [&](void* arg) { file_iters_renewed_null = true; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"ForwardIterator::RenewIterators:Copy", "ForwardIterator::RenewIterators:Copy",
[&](void* /*arg*/) { file_iters_renewed_copy = true; }); [&](void* arg) { file_iters_renewed_copy = true; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
const int num_records = 1000; const int num_records = 1000;
for (int i = 1; i < num_records; ++i) { for (int i = 1; i < num_records; ++i) {
@ -415,7 +415,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) {
int immutable_seeks = 0; int immutable_seeks = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"ForwardIterator::SeekInternal:Immutable", "ForwardIterator::SeekInternal:Immutable",
[&](void* /*arg*/) { ++immutable_seeks; }); [&](void* arg) { ++immutable_seeks; });
// Seek to 13. This should not require any immutable seeks. // Seek to 13. This should not require any immutable seeks.
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();

@ -231,11 +231,11 @@ TEST_F(DBTest, SkipDelay) {
std::atomic<int> sleep_count(0); std::atomic<int> sleep_count(0);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::DelayWrite:Sleep", "DBImpl::DelayWrite:Sleep",
[&](void* /*arg*/) { sleep_count.fetch_add(1); }); [&](void* arg) { sleep_count.fetch_add(1); });
std::atomic<int> wait_count(0); std::atomic<int> wait_count(0);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::DelayWrite:Wait", "DBImpl::DelayWrite:Wait",
[&](void* /*arg*/) { wait_count.fetch_add(1); }); [&](void* arg) { wait_count.fetch_add(1); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
WriteOptions wo; WriteOptions wo;
@ -715,9 +715,9 @@ TEST_F(DBTest, FlushSchedule) {
namespace { namespace {
class KeepFilter : public CompactionFilter { class KeepFilter : public CompactionFilter {
public: public:
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
return false; return false;
} }
@ -747,9 +747,9 @@ class KeepFilterFactory : public CompactionFilterFactory {
class DelayFilter : public CompactionFilter { class DelayFilter : public CompactionFilter {
public: public:
explicit DelayFilter(DBTestBase* d) : db_test(d) {} explicit DelayFilter(DBTestBase* d) : db_test(d) {}
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
db_test->env_->addon_time_.fetch_add(1000); db_test->env_->addon_time_.fetch_add(1000);
return true; return true;
} }
@ -764,7 +764,7 @@ class DelayFilterFactory : public CompactionFilterFactory {
public: public:
explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter( virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& /*context*/) override { const CompactionFilter::Context& context) override {
return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test)); return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
} }
@ -2210,17 +2210,17 @@ class ModelDB : public DB {
return Write(o, &batch); return Write(o, &batch);
} }
using DB::Get; using DB::Get;
virtual Status Get(const ReadOptions& /*options*/, ColumnFamilyHandle* /*cf*/, virtual Status Get(const ReadOptions& options, ColumnFamilyHandle* cf,
const Slice& key, PinnableSlice* /*value*/) override { const Slice& key, PinnableSlice* value) override {
return Status::NotSupported(key); return Status::NotSupported(key);
} }
using DB::MultiGet; using DB::MultiGet;
virtual std::vector<Status> MultiGet( virtual std::vector<Status> MultiGet(
const ReadOptions& /*options*/, const ReadOptions& options,
const std::vector<ColumnFamilyHandle*>& /*column_family*/, const std::vector<ColumnFamilyHandle*>& column_family,
const std::vector<Slice>& keys, const std::vector<Slice>& keys,
std::vector<std::string>* /*values*/) override { std::vector<std::string>* values) override {
std::vector<Status> s(keys.size(), std::vector<Status> s(keys.size(),
Status::NotSupported("Not implemented.")); Status::NotSupported("Not implemented."));
return s; return s;
@ -2229,30 +2229,30 @@ class ModelDB : public DB {
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
using DB::IngestExternalFile; using DB::IngestExternalFile;
virtual Status IngestExternalFile( virtual Status IngestExternalFile(
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const std::vector<std::string>& /*external_files*/, const std::vector<std::string>& external_files,
const IngestExternalFileOptions& /*options*/) override { const IngestExternalFileOptions& options) override {
return Status::NotSupported("Not implemented."); return Status::NotSupported("Not implemented.");
} }
using DB::GetPropertiesOfAllTables; using DB::GetPropertiesOfAllTables;
virtual Status GetPropertiesOfAllTables( virtual Status GetPropertiesOfAllTables(
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
TablePropertiesCollection* /*props*/) override { TablePropertiesCollection* props) override {
return Status(); return Status();
} }
virtual Status GetPropertiesOfTablesInRange( virtual Status GetPropertiesOfTablesInRange(
ColumnFamilyHandle* /*column_family*/, const Range* /*range*/, ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
std::size_t /*n*/, TablePropertiesCollection* /*props*/) override { TablePropertiesCollection* props) override {
return Status(); return Status();
} }
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
using DB::KeyMayExist; using DB::KeyMayExist;
virtual bool KeyMayExist(const ReadOptions& /*options*/, virtual bool KeyMayExist(const ReadOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family, const Slice& key,
const Slice& /*key*/, std::string* /*value*/, std::string* value,
bool* value_found = nullptr) override { bool* value_found = nullptr) override {
if (value_found != nullptr) { if (value_found != nullptr) {
*value_found = false; *value_found = false;
@ -2260,9 +2260,8 @@ class ModelDB : public DB {
return true; // Not Supported directly return true; // Not Supported directly
} }
using DB::NewIterator; using DB::NewIterator;
virtual Iterator* NewIterator( virtual Iterator* NewIterator(const ReadOptions& options,
const ReadOptions& options, ColumnFamilyHandle* column_family) override {
ColumnFamilyHandle* /*column_family*/) override {
if (options.snapshot == nullptr) { if (options.snapshot == nullptr) {
KVMap* saved = new KVMap; KVMap* saved = new KVMap;
*saved = map_; *saved = map_;
@ -2274,9 +2273,9 @@ class ModelDB : public DB {
} }
} }
virtual Status NewIterators( virtual Status NewIterators(
const ReadOptions& /*options*/, const ReadOptions& options,
const std::vector<ColumnFamilyHandle*>& /*column_family*/, const std::vector<ColumnFamilyHandle*>& column_family,
std::vector<Iterator*>* /*iterators*/) override { std::vector<Iterator*>* iterators) override {
return Status::NotSupported("Not supported yet"); return Status::NotSupported("Not supported yet");
} }
virtual const Snapshot* GetSnapshot() override { virtual const Snapshot* GetSnapshot() override {
@ -2289,7 +2288,7 @@ class ModelDB : public DB {
delete reinterpret_cast<const ModelSnapshot*>(snapshot); delete reinterpret_cast<const ModelSnapshot*>(snapshot);
} }
virtual Status Write(const WriteOptions& /*options*/, virtual Status Write(const WriteOptions& options,
WriteBatch* batch) override { WriteBatch* batch) override {
class Handler : public WriteBatch::Handler { class Handler : public WriteBatch::Handler {
public: public:
@ -2297,8 +2296,7 @@ class ModelDB : public DB {
virtual void Put(const Slice& key, const Slice& value) override { virtual void Put(const Slice& key, const Slice& value) override {
(*map_)[key.ToString()] = value.ToString(); (*map_)[key.ToString()] = value.ToString();
} }
virtual void Merge(const Slice& /*key*/, virtual void Merge(const Slice& key, const Slice& value) override {
const Slice& /*value*/) override {
// ignore merge for now // ignore merge for now
// (*map_)[key.ToString()] = value.ToString(); // (*map_)[key.ToString()] = value.ToString();
} }
@ -2312,65 +2310,62 @@ class ModelDB : public DB {
} }
using DB::GetProperty; using DB::GetProperty;
virtual bool GetProperty(ColumnFamilyHandle* /*column_family*/, virtual bool GetProperty(ColumnFamilyHandle* column_family,
const Slice& /*property*/, const Slice& property, std::string* value) override {
std::string* /*value*/) override {
return false; return false;
} }
using DB::GetIntProperty; using DB::GetIntProperty;
virtual bool GetIntProperty(ColumnFamilyHandle* /*column_family*/, virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
const Slice& /*property*/, const Slice& property, uint64_t* value) override {
uint64_t* /*value*/) override {
return false; return false;
} }
using DB::GetMapProperty; using DB::GetMapProperty;
virtual bool GetMapProperty( virtual bool GetMapProperty(ColumnFamilyHandle* column_family,
ColumnFamilyHandle* /*column_family*/, const Slice& /*property*/, const Slice& property,
std::map<std::string, double>* /*value*/) override { std::map<std::string, double>* value) override {
return false; return false;
} }
using DB::GetAggregatedIntProperty; using DB::GetAggregatedIntProperty;
virtual bool GetAggregatedIntProperty(const Slice& /*property*/, virtual bool GetAggregatedIntProperty(const Slice& property,
uint64_t* /*value*/) override { uint64_t* value) override {
return false; return false;
} }
using DB::GetApproximateSizes; using DB::GetApproximateSizes;
virtual void GetApproximateSizes(ColumnFamilyHandle* /*column_family*/, virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
const Range* /*range*/, int n, const Range* range, int n, uint64_t* sizes,
uint64_t* sizes, uint8_t include_flags
uint8_t /*include_flags*/
= INCLUDE_FILES) override { = INCLUDE_FILES) override {
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
sizes[i] = 0; sizes[i] = 0;
} }
} }
using DB::GetApproximateMemTableStats; using DB::GetApproximateMemTableStats;
virtual void GetApproximateMemTableStats( virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
ColumnFamilyHandle* /*column_family*/, const Range& /*range*/, const Range& range,
uint64_t* const count, uint64_t* const size) override { uint64_t* const count,
uint64_t* const size) override {
*count = 0; *count = 0;
*size = 0; *size = 0;
} }
using DB::CompactRange; using DB::CompactRange;
virtual Status CompactRange(const CompactRangeOptions& /*options*/, virtual Status CompactRange(const CompactRangeOptions& options,
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
const Slice* /*start*/, const Slice* start, const Slice* end) override {
const Slice* /*end*/) override {
return Status::NotSupported("Not supported operation."); return Status::NotSupported("Not supported operation.");
} }
virtual Status SetDBOptions( virtual Status SetDBOptions(
const std::unordered_map<std::string, std::string>& /*new_options*/) const std::unordered_map<std::string, std::string>& new_options)
override { override {
return Status::NotSupported("Not supported operation."); return Status::NotSupported("Not supported operation.");
} }
using DB::CompactFiles; using DB::CompactFiles;
virtual Status CompactFiles( virtual Status CompactFiles(const CompactionOptions& compact_options,
const CompactionOptions& /*compact_options*/, ColumnFamilyHandle* column_family,
ColumnFamilyHandle* /*column_family*/, const std::vector<std::string>& input_file_names,
const std::vector<std::string>& /*input_file_names*/, const int output_level,
const int /*output_level*/, const int /*output_path_id*/ = -1) override { const int output_path_id = -1) override {
return Status::NotSupported("Not supported operation."); return Status::NotSupported("Not supported operation.");
} }
@ -2383,25 +2378,24 @@ class ModelDB : public DB {
} }
Status EnableAutoCompaction( Status EnableAutoCompaction(
const std::vector<ColumnFamilyHandle*>& /*column_family_handles*/) const std::vector<ColumnFamilyHandle*>& column_family_handles) override {
override {
return Status::NotSupported("Not supported operation."); return Status::NotSupported("Not supported operation.");
} }
using DB::NumberLevels; using DB::NumberLevels;
virtual int NumberLevels(ColumnFamilyHandle* /*column_family*/) override { virtual int NumberLevels(ColumnFamilyHandle* column_family) override {
return 1; return 1;
} }
using DB::MaxMemCompactionLevel; using DB::MaxMemCompactionLevel;
virtual int MaxMemCompactionLevel( virtual int MaxMemCompactionLevel(
ColumnFamilyHandle* /*column_family*/) override { ColumnFamilyHandle* column_family) override {
return 1; return 1;
} }
using DB::Level0StopWriteTrigger; using DB::Level0StopWriteTrigger;
virtual int Level0StopWriteTrigger( virtual int Level0StopWriteTrigger(
ColumnFamilyHandle* /*column_family*/) override { ColumnFamilyHandle* column_family) override {
return -1; return -1;
} }
@ -2410,8 +2404,7 @@ class ModelDB : public DB {
virtual Env* GetEnv() const override { return nullptr; } virtual Env* GetEnv() const override { return nullptr; }
using DB::GetOptions; using DB::GetOptions;
virtual Options GetOptions( virtual Options GetOptions(ColumnFamilyHandle* column_family) const override {
ColumnFamilyHandle* /*column_family*/) const override {
return options_; return options_;
} }
@ -2419,8 +2412,8 @@ class ModelDB : public DB {
virtual DBOptions GetDBOptions() const override { return options_; } virtual DBOptions GetDBOptions() const override { return options_; }
using DB::Flush; using DB::Flush;
virtual Status Flush(const rocksdb::FlushOptions& /*options*/, virtual Status Flush(const rocksdb::FlushOptions& options,
ColumnFamilyHandle* /*column_family*/) override { ColumnFamilyHandle* column_family) override {
Status ret; Status ret;
return ret; return ret;
} }
@ -2430,35 +2423,33 @@ class ModelDB : public DB {
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
virtual Status DisableFileDeletions() override { return Status::OK(); } virtual Status DisableFileDeletions() override { return Status::OK(); }
virtual Status EnableFileDeletions(bool /*force*/) override { virtual Status EnableFileDeletions(bool force) override {
return Status::OK(); return Status::OK();
} }
virtual Status GetLiveFiles(std::vector<std::string>&, uint64_t* /*size*/, virtual Status GetLiveFiles(std::vector<std::string>&, uint64_t* size,
bool /*flush_memtable*/ = true) override { bool flush_memtable = true) override {
return Status::OK(); return Status::OK();
} }
virtual Status GetSortedWalFiles(VectorLogPtr& /*files*/) override { virtual Status GetSortedWalFiles(VectorLogPtr& files) override {
return Status::OK(); return Status::OK();
} }
virtual Status DeleteFile(std::string /*name*/) override { virtual Status DeleteFile(std::string name) override { return Status::OK(); }
return Status::OK();
}
virtual Status GetUpdatesSince( virtual Status GetUpdatesSince(
rocksdb::SequenceNumber, unique_ptr<rocksdb::TransactionLogIterator>*, rocksdb::SequenceNumber, unique_ptr<rocksdb::TransactionLogIterator>*,
const TransactionLogIterator::ReadOptions& /*read_options*/ = const TransactionLogIterator::ReadOptions& read_options =
TransactionLogIterator::ReadOptions()) override { TransactionLogIterator::ReadOptions()) override {
return Status::NotSupported("Not supported in Model DB"); return Status::NotSupported("Not supported in Model DB");
} }
virtual void GetColumnFamilyMetaData( virtual void GetColumnFamilyMetaData(
ColumnFamilyHandle* /*column_family*/, ColumnFamilyHandle* column_family,
ColumnFamilyMetaData* /*metadata*/) override {} ColumnFamilyMetaData* metadata) override {}
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
virtual Status GetDbIdentity(std::string& /*identity*/) const override { virtual Status GetDbIdentity(std::string& identity) const override {
return Status::OK(); return Status::OK();
} }
@ -3331,7 +3322,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::DelayWrite:Wait", "DBImpl::DelayWrite:Wait",
[&](void* /*arg*/) { sleeping_task_low.WakeUp(); }); [&](void* arg) { sleeping_task_low.WakeUp(); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
while (!sleeping_task_low.WokenUp() && count < 256) { while (!sleeping_task_low.WokenUp() && count < 256) {
@ -4539,7 +4530,7 @@ class DelayedMergeOperator : public MergeOperator {
public: public:
explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {} explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {}
virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/, virtual bool FullMergeV2(const MergeOperationInput& merge_in,
MergeOperationOutput* merge_out) const override { MergeOperationOutput* merge_out) const override {
db_test_->env_->addon_time_.fetch_add(1000); db_test_->env_->addon_time_.fetch_add(1000);
merge_out->new_value = ""; merge_out->new_value = "";
@ -4890,7 +4881,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
std::atomic<int> callback_count(0); std::atomic<int> callback_count(0);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction()::Conflict", "DBImpl::BackgroundCompaction()::Conflict",
[&](void* /*arg*/) { callback_count.fetch_add(1); }); [&](void* arg) { callback_count.fetch_add(1); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
CompactRangeOptions croptions; CompactRangeOptions croptions;
croptions.exclusive_manual_compaction = false; croptions.exclusive_manual_compaction = false;
@ -5089,7 +5080,7 @@ TEST_F(DBTest, HardLimit) {
std::atomic<int> callback_count(0); std::atomic<int> callback_count(0);
rocksdb::SyncPoint::GetInstance()->SetCallBack("DBImpl::DelayWrite:Wait", rocksdb::SyncPoint::GetInstance()->SetCallBack("DBImpl::DelayWrite:Wait",
[&](void* /*arg*/) { [&](void* arg) {
callback_count.fetch_add(1); callback_count.fetch_add(1);
sleeping_task_low.WakeUp(); sleeping_task_low.WakeUp();
}); });
@ -5182,7 +5173,7 @@ TEST_F(DBTest, SoftLimit) {
// Only allow one compactin going through. // Only allow one compactin going through.
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"BackgroundCallCompaction:0", [&](void* /*arg*/) { "BackgroundCallCompaction:0", [&](void* arg) {
// Schedule a sleeping task. // Schedule a sleeping task.
sleeping_task_low.Reset(); sleeping_task_low.Reset();
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,

@ -497,9 +497,9 @@ TEST_F(DBTest2, WalFilterTest) {
apply_option_at_record_index_(apply_option_for_record_index), apply_option_at_record_index_(apply_option_for_record_index),
current_record_index_(0) {} current_record_index_(0) {}
virtual WalProcessingOption LogRecord( virtual WalProcessingOption LogRecord(const WriteBatch& batch,
const WriteBatch& /*batch*/, WriteBatch* /*new_batch*/, WriteBatch* new_batch,
bool* /*batch_changed*/) const override { bool* batch_changed) const override {
WalFilter::WalProcessingOption option_to_return; WalFilter::WalProcessingOption option_to_return;
if (current_record_index_ == apply_option_at_record_index_) { if (current_record_index_ == apply_option_at_record_index_) {
@ -873,10 +873,11 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) {
cf_name_id_map_ = cf_name_id_map; cf_name_id_map_ = cf_name_id_map;
} }
virtual WalProcessingOption LogRecordFound( virtual WalProcessingOption LogRecordFound(unsigned long long log_number,
unsigned long long log_number, const std::string& /*log_file_name*/, const std::string& log_file_name,
const WriteBatch& batch, WriteBatch* /*new_batch*/, const WriteBatch& batch,
bool* /*batch_changed*/) override { WriteBatch* new_batch,
bool* batch_changed) override {
class LogRecordBatchHandler : public WriteBatch::Handler { class LogRecordBatchHandler : public WriteBatch::Handler {
private: private:
const std::map<uint32_t, uint64_t> & cf_log_number_map_; const std::map<uint32_t, uint64_t> & cf_log_number_map_;
@ -1211,7 +1212,7 @@ class CompactionStallTestListener : public EventListener {
public: public:
CompactionStallTestListener() : compacted_files_cnt_(0) {} CompactionStallTestListener() : compacted_files_cnt_(0) {}
void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override { void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
ASSERT_EQ(ci.cf_name, "default"); ASSERT_EQ(ci.cf_name, "default");
ASSERT_EQ(ci.base_input_level, 0); ASSERT_EQ(ci.base_input_level, 0);
ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum); ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum);
@ -1672,7 +1673,7 @@ TEST_F(DBTest2, SyncPointMarker) {
std::atomic<int> sync_point_called(0); std::atomic<int> sync_point_called(0);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBTest2::MarkedPoint", "DBTest2::MarkedPoint",
[&](void* /*arg*/) { sync_point_called.fetch_add(1); }); [&](void* arg) { sync_point_called.fetch_add(1); });
// The first dependency enforces Marker can be loaded before MarkedPoint. // The first dependency enforces Marker can be loaded before MarkedPoint.
// The second checks that thread 1's MarkedPoint should be disabled here. // The second checks that thread 1's MarkedPoint should be disabled here.
@ -1941,7 +1942,7 @@ TEST_F(DBTest2, AutomaticCompactionOverlapManualCompaction) {
// can fit in L2, these 2 files will be moved to L2 and overlap with // can fit in L2, these 2 files will be moved to L2 and overlap with
// the running compaction and break the LSM consistency. // the running compaction and break the LSM consistency.
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():Start", [&](void* /*arg*/) { "CompactionJob::Run():Start", [&](void* arg) {
ASSERT_OK( ASSERT_OK(
dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"}, dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
{"max_bytes_for_level_base", "1"}})); {"max_bytes_for_level_base", "1"}}));
@ -2007,7 +2008,7 @@ TEST_F(DBTest2, ManualCompactionOverlapManualCompaction) {
// the running compaction and break the LSM consistency. // the running compaction and break the LSM consistency.
std::atomic<bool> flag(false); std::atomic<bool> flag(false);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():Start", [&](void* /*arg*/) { "CompactionJob::Run():Start", [&](void* arg) {
if (flag.exchange(true)) { if (flag.exchange(true)) {
// We want to make sure to call this callback only once // We want to make sure to call this callback only once
return; return;

@ -1127,18 +1127,17 @@ UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(char* prevValue,
} }
} }
UpdateStatus DBTestBase::updateInPlaceLargerSize(char* /*prevValue*/, UpdateStatus DBTestBase::updateInPlaceLargerSize(char* prevValue,
uint32_t* /*prevSize*/, uint32_t* prevSize,
Slice delta, Slice delta,
std::string* newValue) { std::string* newValue) {
*newValue = std::string(delta.size(), 'c'); *newValue = std::string(delta.size(), 'c');
return UpdateStatus::UPDATED; return UpdateStatus::UPDATED;
} }
UpdateStatus DBTestBase::updateInPlaceNoAction(char* /*prevValue*/, UpdateStatus DBTestBase::updateInPlaceNoAction(char* prevValue,
uint32_t* /*prevSize*/, uint32_t* prevSize, Slice delta,
Slice /*delta*/, std::string* newValue) {
std::string* /*newValue*/) {
return UpdateStatus::UPDATE_FAILED; return UpdateStatus::UPDATE_FAILED;
} }

@ -187,7 +187,7 @@ class SpecialSkipListFactory : public MemTableRepFactory {
using MemTableRepFactory::CreateMemTableRep; using MemTableRepFactory::CreateMemTableRep;
virtual MemTableRep* CreateMemTableRep( virtual MemTableRep* CreateMemTableRep(
const MemTableRep::KeyComparator& compare, Allocator* allocator, const MemTableRep::KeyComparator& compare, Allocator* allocator,
const SliceTransform* transform, Logger* /*logger*/) override { const SliceTransform* transform, Logger* logger) override {
return new SpecialMemTableRep( return new SpecialMemTableRep(
allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0), allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0),
num_entries_flush_); num_entries_flush_);

@ -56,9 +56,9 @@ void VerifyCompactionResult(
class KeepFilter : public CompactionFilter { class KeepFilter : public CompactionFilter {
public: public:
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value, bool* value_changed) const
bool* /*value_changed*/) const override { override {
return false; return false;
} }
@ -88,9 +88,9 @@ class KeepFilterFactory : public CompactionFilterFactory {
class DelayFilter : public CompactionFilter { class DelayFilter : public CompactionFilter {
public: public:
explicit DelayFilter(DBTestBase* d) : db_test(d) {} explicit DelayFilter(DBTestBase* d) : db_test(d) {}
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& value,
const Slice& /*value*/, std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
db_test->env_->addon_time_.fetch_add(1000); db_test->env_->addon_time_.fetch_add(1000);
return true; return true;
} }
@ -105,7 +105,7 @@ class DelayFilterFactory : public CompactionFilterFactory {
public: public:
explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter( virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& /*context*/) override { const CompactionFilter::Context& context) override {
return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test)); return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
} }
@ -522,7 +522,7 @@ TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
non_trivial_move++; non_trivial_move++;
@ -593,23 +593,23 @@ TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) {
// Delay every compaction so multiple compactions will happen. // Delay every compaction so multiple compactions will happen.
std::atomic<int> num_compactions_running(0); std::atomic<int> num_compactions_running(0);
std::atomic<bool> has_parallel(false); std::atomic<bool> has_parallel(false);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack("CompactionJob::Run():Start",
"CompactionJob::Run():Start", [&](void* /*arg*/) { [&](void* arg) {
if (num_compactions_running.fetch_add(1) > 0) { if (num_compactions_running.fetch_add(1) > 0) {
has_parallel.store(true); has_parallel.store(true);
return; return;
} }
for (int nwait = 0; nwait < 20000; nwait++) { for (int nwait = 0; nwait < 20000; nwait++) {
if (has_parallel.load() || num_compactions_running.load() > 1) { if (has_parallel.load() || num_compactions_running.load() > 1) {
has_parallel.store(true); has_parallel.store(true);
break; break;
} }
env_->SleepForMicroseconds(1000); env_->SleepForMicroseconds(1000);
} }
}); });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():End", "CompactionJob::Run():End",
[&](void* /*arg*/) { num_compactions_running.fetch_add(-1); }); [&](void* arg) { num_compactions_running.fetch_add(-1); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
options = CurrentOptions(options); options = CurrentOptions(options);
@ -984,7 +984,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) {
int32_t non_trivial_move = 0; int32_t non_trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
non_trivial_move++; non_trivial_move++;
@ -1030,7 +1030,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) {
int32_t trivial_move = 0; int32_t trivial_move = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:TrivialMove", "DBImpl::BackgroundCompaction:TrivialMove",
[&](void* /*arg*/) { trivial_move++; }); [&](void* arg) { trivial_move++; });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
ASSERT_TRUE(arg != nullptr); ASSERT_TRUE(arg != nullptr);

@ -159,7 +159,7 @@ class DeleteFileTest : public testing::Test {
} }
// An empty job to guard all jobs are processed // An empty job to guard all jobs are processed
static void GuardFinish(void* /*arg*/) { static void GuardFinish(void* arg) {
TEST_SYNC_POINT("DeleteFileTest::GuardFinish"); TEST_SYNC_POINT("DeleteFileTest::GuardFinish");
} }
}; };

@ -395,9 +395,8 @@ class SstFileWriterCollector : public TablePropertiesCollector {
return Status::OK(); return Status::OK();
} }
Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
EntryType /*type*/, SequenceNumber /*seq*/, SequenceNumber seq, uint64_t file_size) override {
uint64_t /*file_size*/) override {
++count_; ++count_;
return Status::OK(); return Status::OK();
} }
@ -417,7 +416,7 @@ class SstFileWriterCollectorFactory : public TablePropertiesCollectorFactory {
explicit SstFileWriterCollectorFactory(std::string prefix) explicit SstFileWriterCollectorFactory(std::string prefix)
: prefix_(prefix), num_created_(0) {} : prefix_(prefix), num_created_(0) {}
virtual TablePropertiesCollector* CreateTablePropertiesCollector( virtual TablePropertiesCollector* CreateTablePropertiesCollector(
TablePropertiesCollectorFactory::Context /*context*/) override { TablePropertiesCollectorFactory::Context context) override {
num_created_++; num_created_++;
return new SstFileWriterCollector(prefix_); return new SstFileWriterCollector(prefix_);
} }
@ -688,7 +687,7 @@ TEST_F(ExternalSSTFileTest, PurgeObsoleteFilesBug) {
DestroyAndReopen(options); DestroyAndReopen(options);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::AddFile:FileCopied", [&](void* /*arg*/) { "DBImpl::AddFile:FileCopied", [&](void* arg) {
ASSERT_OK(Put("aaa", "bbb")); ASSERT_OK(Put("aaa", "bbb"));
ASSERT_OK(Flush()); ASSERT_OK(Flush());
ASSERT_OK(Put("aaa", "xxx")); ASSERT_OK(Put("aaa", "xxx"));
@ -1127,7 +1126,7 @@ TEST_F(ExternalSSTFileTest, PickedLevelBug) {
std::atomic<bool> bg_compact_started(false); std::atomic<bool> bg_compact_started(false);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:Start", "DBImpl::BackgroundCompaction:Start",
[&](void* /*arg*/) { bg_compact_started.store(true); }); [&](void* arg) { bg_compact_started.store(true); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
@ -1408,7 +1407,7 @@ TEST_F(ExternalSSTFileTest, AddFileTrivialMoveBug) {
ASSERT_OK(GenerateAndAddExternalFile(options, {22, 23}, 6)); // L2 ASSERT_OK(GenerateAndAddExternalFile(options, {22, 23}, 6)); // L2
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():Start", [&](void* /*arg*/) { "CompactionJob::Run():Start", [&](void* arg) {
// fit in L3 but will overlap with compaction so will be added // fit in L3 but will overlap with compaction so will be added
// to L2 but a compaction will trivially move it to L3 // to L2 but a compaction will trivially move it to L3
// and break LSM consistency // and break LSM consistency
@ -1798,7 +1797,7 @@ TEST_F(ExternalSSTFileTest, FileWithCFInfo) {
class TestIngestExternalFileListener : public EventListener { class TestIngestExternalFileListener : public EventListener {
public: public:
void OnExternalFileIngested(DB* /*db*/, void OnExternalFileIngested(DB* db,
const ExternalFileIngestionInfo& info) override { const ExternalFileIngestionInfo& info) override {
ingested_files.push_back(info); ingested_files.push_back(info);
} }

@ -463,10 +463,10 @@ TEST_P(FaultInjectionTest, UninstalledCompaction) {
std::atomic<bool> opened(false); std::atomic<bool> opened(false);
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::Open:Opened", [&](void* /*arg*/) { opened.store(true); }); "DBImpl::Open:Opened", [&](void* arg) { opened.store(true); });
rocksdb::SyncPoint::GetInstance()->SetCallBack( rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BGWorkCompaction", "DBImpl::BGWorkCompaction",
[&](void* /*arg*/) { ASSERT_TRUE(opened.load()); }); [&](void* arg) { ASSERT_TRUE(opened.load()); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(OpenDB()); ASSERT_OK(OpenDB());
ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound)); ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound));

@ -36,10 +36,10 @@ class IntComparator : public Comparator {
const char* Name() const override { return "IntComparator"; } const char* Name() const override { return "IntComparator"; }
void FindShortestSeparator(std::string* /*start*/, void FindShortestSeparator(std::string* start,
const Slice& /*limit*/) const override {} const Slice& limit) const override {}
void FindShortSuccessor(std::string* /*key*/) const override {} void FindShortSuccessor(std::string* key) const override {}
}; };
class FileIndexerTest : public testing::Test { class FileIndexerTest : public testing::Test {

@ -104,7 +104,7 @@ class LevelIterator : public InternalIterator {
file_iter_->Seek(internal_key); file_iter_->Seek(internal_key);
valid_ = file_iter_->Valid(); valid_ = file_iter_->Valid();
} }
void SeekForPrev(const Slice& /*internal_key*/) override { void SeekForPrev(const Slice& internal_key) override {
status_ = Status::NotSupported("LevelIterator::SeekForPrev()"); status_ = Status::NotSupported("LevelIterator::SeekForPrev()");
valid_ = false; valid_ = false;
} }

@ -55,7 +55,7 @@ class ForwardIterator : public InternalIterator {
ColumnFamilyData* cfd, SuperVersion* current_sv = nullptr); ColumnFamilyData* cfd, SuperVersion* current_sv = nullptr);
virtual ~ForwardIterator(); virtual ~ForwardIterator();
void SeekForPrev(const Slice& /*target*/) override { void SeekForPrev(const Slice& target) override {
status_ = Status::NotSupported("ForwardIterator::SeekForPrev()"); status_ = Status::NotSupported("ForwardIterator::SeekForPrev()");
valid_ = false; valid_ = false;
} }

@ -435,7 +435,7 @@ bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info,
} }
bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info, bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info,
const Slice& /*property*/, const Slice& property,
std::map<std::string, double>* value) { std::map<std::string, double>* value) {
assert(value != nullptr); assert(value != nullptr);
assert(property_info.handle_map != nullptr); assert(property_info.handle_map != nullptr);
@ -487,7 +487,7 @@ bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value,
return true; return true;
} }
bool InternalStats::HandleLevelStats(std::string* value, Slice /*suffix*/) { bool InternalStats::HandleLevelStats(std::string* value, Slice suffix) {
char buf[1000]; char buf[1000];
const auto* vstorage = cfd_->current()->storage_info(); const auto* vstorage = cfd_->current()->storage_info();
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
@ -519,36 +519,35 @@ bool InternalStats::HandleCFMapStats(std::map<std::string, double>* cf_stats) {
return true; return true;
} }
bool InternalStats::HandleCFStats(std::string* value, Slice /*suffix*/) { bool InternalStats::HandleCFStats(std::string* value, Slice suffix) {
DumpCFStats(value); DumpCFStats(value);
return true; return true;
} }
bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value, bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value,
Slice /*suffix*/) { Slice suffix) {
DumpCFStatsNoFileHistogram(value); DumpCFStatsNoFileHistogram(value);
return true; return true;
} }
bool InternalStats::HandleCFFileHistogram(std::string* value, bool InternalStats::HandleCFFileHistogram(std::string* value, Slice suffix) {
Slice /*suffix*/) {
DumpCFFileHistogram(value); DumpCFFileHistogram(value);
return true; return true;
} }
bool InternalStats::HandleDBStats(std::string* value, Slice /*suffix*/) { bool InternalStats::HandleDBStats(std::string* value, Slice suffix) {
DumpDBStats(value); DumpDBStats(value);
return true; return true;
} }
bool InternalStats::HandleSsTables(std::string* value, Slice /*suffix*/) { bool InternalStats::HandleSsTables(std::string* value, Slice suffix) {
auto* current = cfd_->current(); auto* current = cfd_->current();
*value = current->DebugString(true, true); *value = current->DebugString(true, true);
return true; return true;
} }
bool InternalStats::HandleAggregatedTableProperties(std::string* value, bool InternalStats::HandleAggregatedTableProperties(std::string* value,
Slice /*suffix*/) { Slice suffix) {
std::shared_ptr<const TableProperties> tp; std::shared_ptr<const TableProperties> tp;
auto s = cfd_->current()->GetAggregatedTableProperties(&tp); auto s = cfd_->current()->GetAggregatedTableProperties(&tp);
if (!s.ok()) { if (!s.ok()) {
@ -575,34 +574,34 @@ bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value,
return true; return true;
} }
bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = cfd_->imm()->NumNotFlushed(); *value = cfd_->imm()->NumNotFlushed();
return true; return true;
} }
bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value, bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value,
DBImpl* /*db*/, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = cfd_->imm()->NumFlushed(); *value = cfd_->imm()->NumFlushed();
return true; return true;
} }
bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
// Return number of mem tables that are ready to flush (made immutable) // Return number of mem tables that are ready to flush (made immutable)
*value = (cfd_->imm()->IsFlushPending() ? 1 : 0); *value = (cfd_->imm()->IsFlushPending() ? 1 : 0);
return true; return true;
} }
bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db, bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = db->num_running_flushes(); *value = db->num_running_flushes();
return true; return true;
} }
bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
// 1 if the system already determines at least one compaction is needed. // 1 if the system already determines at least one compaction is needed.
// 0 otherwise, // 0 otherwise,
const auto* vstorage = cfd_->current()->storage_info(); const auto* vstorage = cfd_->current()->storage_info();
@ -611,74 +610,70 @@ bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* /*db*/,
} }
bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db, bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = db->num_running_compactions_; *value = db->num_running_compactions_;
return true; return true;
} }
bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
// Accumulated number of errors in background flushes or compactions. // Accumulated number of errors in background flushes or compactions.
*value = GetBackgroundErrorCount(); *value = GetBackgroundErrorCount();
return true; return true;
} }
bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
// Current size of the active memtable // Current size of the active memtable
*value = cfd_->mem()->ApproximateMemoryUsage(); *value = cfd_->mem()->ApproximateMemoryUsage();
return true; return true;
} }
bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
// Current size of the active memtable + immutable memtables // Current size of the active memtable + immutable memtables
*value = cfd_->mem()->ApproximateMemoryUsage() + *value = cfd_->mem()->ApproximateMemoryUsage() +
cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage(); cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage();
return true; return true;
} }
bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = cfd_->mem()->ApproximateMemoryUsage() + *value = cfd_->mem()->ApproximateMemoryUsage() +
cfd_->imm()->ApproximateMemoryUsage(); cfd_->imm()->ApproximateMemoryUsage();
return true; return true;
} }
bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db,
DBImpl* /*db*/, Version* version) {
Version* /*version*/) {
// Current number of entires in the active memtable // Current number of entires in the active memtable
*value = cfd_->mem()->num_entries(); *value = cfd_->mem()->num_entries();
return true; return true;
} }
bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db,
DBImpl* /*db*/, Version* version) {
Version* /*version*/) {
// Current number of entries in the immutable memtables // Current number of entries in the immutable memtables
*value = cfd_->imm()->current()->GetTotalNumEntries(); *value = cfd_->imm()->current()->GetTotalNumEntries();
return true; return true;
} }
bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db,
DBImpl* /*db*/, Version* version) {
Version* /*version*/) {
// Current number of entires in the active memtable // Current number of entires in the active memtable
*value = cfd_->mem()->num_deletes(); *value = cfd_->mem()->num_deletes();
return true; return true;
} }
bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db,
DBImpl* /*db*/, Version* version) {
Version* /*version*/) {
// Current number of entries in the immutable memtables // Current number of entries in the immutable memtables
*value = cfd_->imm()->current()->GetTotalNumDeletes(); *value = cfd_->imm()->current()->GetTotalNumDeletes();
return true; return true;
} }
bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
// Estimate number of entries in the column family: // Estimate number of entries in the column family:
// Use estimated entries in tables + total entries in memtables. // Use estimated entries in tables + total entries in memtables.
const auto* vstorage = cfd_->current()->storage_info(); const auto* vstorage = cfd_->current()->storage_info();
@ -694,79 +689,77 @@ bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* /*db*/,
} }
bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db, bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = db->snapshots().count(); *value = db->snapshots().count();
return true; return true;
} }
bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotTime()); *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotTime());
return true; return true;
} }
bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = cfd_->GetNumLiveVersions(); *value = cfd_->GetNumLiveVersions();
return true; return true;
} }
bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db,
DBImpl* /*db*/, Version* version) {
Version* /*version*/) {
*value = cfd_->GetSuperVersionNumber(); *value = cfd_->GetSuperVersionNumber();
return true; return true;
} }
bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db, bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = db->IsFileDeletionsEnabled(); *value = db->IsFileDeletionsEnabled();
return true; return true;
} }
bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
const auto* vstorage = cfd_->current()->storage_info(); const auto* vstorage = cfd_->current()->storage_info();
*value = vstorage->base_level(); *value = vstorage->base_level();
return true; return true;
} }
bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = cfd_->GetTotalSstFilesSize(); *value = cfd_->GetTotalSstFilesSize();
return true; return true;
} }
bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value, bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value,
DBImpl* /*db*/, DBImpl* db,
Version* /*version*/) { Version* version) {
const auto* vstorage = cfd_->current()->storage_info(); const auto* vstorage = cfd_->current()->storage_info();
*value = vstorage->estimated_compaction_needed_bytes(); *value = vstorage->estimated_compaction_needed_bytes();
return true; return true;
} }
bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db,
DBImpl* /*db*/,
Version* version) { Version* version) {
*value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders(); *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders();
return true; return true;
} }
bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* /*db*/, bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
const auto* vstorage = cfd_->current()->storage_info(); const auto* vstorage = cfd_->current()->storage_info();
*value = vstorage->EstimateLiveDataSize(); *value = vstorage->EstimateLiveDataSize();
return true; return true;
} }
bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db, bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = db->MinLogNumberToKeep(); *value = db->MinLogNumberToKeep();
return true; return true;
} }
bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db, bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
const WriteController& wc = db->write_controller(); const WriteController& wc = db->write_controller();
if (!wc.NeedsDelay()) { if (!wc.NeedsDelay()) {
*value = 0; *value = 0;
@ -777,7 +770,7 @@ bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
} }
bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db, bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db,
Version* /*version*/) { Version* version) {
*value = db->write_controller().IsStopped() ? 1 : 0; *value = db->write_controller().IsStopped() ? 1 : 0;
return true; return true;
} }

@ -46,11 +46,11 @@ class EventListenerTest : public DBTestBase {
}; };
struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector { struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector {
virtual rocksdb::Status AddUserKey(const rocksdb::Slice& /*key*/, virtual rocksdb::Status AddUserKey(const rocksdb::Slice& key,
const rocksdb::Slice& /*value*/, const rocksdb::Slice& value,
rocksdb::EntryType /*type*/, rocksdb::EntryType type,
rocksdb::SequenceNumber /*seq*/, rocksdb::SequenceNumber seq,
uint64_t /*file_size*/) override { uint64_t file_size) override {
return Status::OK(); return Status::OK();
} }
virtual rocksdb::Status Finish( virtual rocksdb::Status Finish(
@ -73,7 +73,7 @@ struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector {
class TestPropertiesCollectorFactory : public TablePropertiesCollectorFactory { class TestPropertiesCollectorFactory : public TablePropertiesCollectorFactory {
public: public:
virtual TablePropertiesCollector* CreateTablePropertiesCollector( virtual TablePropertiesCollector* CreateTablePropertiesCollector(
TablePropertiesCollectorFactory::Context /*context*/) override { TablePropertiesCollectorFactory::Context context) override {
return new TestPropertiesCollector; return new TestPropertiesCollector;
} }
const char* Name() const override { return "TestTablePropertiesCollector"; } const char* Name() const override { return "TestTablePropertiesCollector"; }
@ -425,7 +425,7 @@ TEST_F(EventListenerTest, DisableBGCompaction) {
class TestCompactionReasonListener : public EventListener { class TestCompactionReasonListener : public EventListener {
public: public:
void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override { void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
compaction_reasons_.push_back(ci.compaction_reason); compaction_reasons_.push_back(ci.compaction_reason);
} }
@ -807,8 +807,7 @@ class BackgroundErrorListener : public EventListener {
public: public:
BackgroundErrorListener(SpecialEnv* env) : env_(env), counter_(0) {} BackgroundErrorListener(SpecialEnv* env) : env_(env), counter_(0) {}
void OnBackgroundError(BackgroundErrorReason /*reason*/, void OnBackgroundError(BackgroundErrorReason reason, Status* bg_error) override {
Status* bg_error) override {
if (counter_ == 0) { if (counter_ == 0) {
// suppress the first error and disable write-dropping such that a retry // suppress the first error and disable write-dropping such that a retry
// can succeed. // can succeed.

@ -36,7 +36,7 @@ static void GetJemallocStatus(void* mstat_arg, const char* status) {
} }
#endif // ROCKSDB_JEMALLOC #endif // ROCKSDB_JEMALLOC
void DumpMallocStats(std::string* /*stats*/) { void DumpMallocStats(std::string* stats) {
#ifdef ROCKSDB_JEMALLOC #ifdef ROCKSDB_JEMALLOC
MallocStatus mstat; MallocStatus mstat;
const unsigned int kMallocStatusLen = 1000000; const unsigned int kMallocStatusLen = 1000000;

@ -46,9 +46,9 @@ class DestroyAllCompactionFilter : public CompactionFilter {
public: public:
DestroyAllCompactionFilter() {} DestroyAllCompactionFilter() {}
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
const Slice& existing_value, std::string* /*new_value*/, std::string* new_value,
bool* /*value_changed*/) const override { bool* value_changed) const override {
return existing_value.ToString() == "destroy"; return existing_value.ToString() == "destroy";
} }

@ -152,7 +152,7 @@ bool MemTableListVersion::GetFromList(std::list<MemTable*>* list,
} }
Status MemTableListVersion::AddRangeTombstoneIterators( Status MemTableListVersion::AddRangeTombstoneIterators(
const ReadOptions& read_opts, Arena* /*arena*/, const ReadOptions& read_opts, Arena* arena,
RangeDelAggregator* range_del_agg) { RangeDelAggregator* range_del_agg) {
assert(range_del_agg != nullptr); assert(range_del_agg != nullptr);
for (auto& m : memlist_) { for (auto& m : memlist_) {
@ -298,7 +298,7 @@ void MemTableList::PickMemtablesToFlush(autovector<MemTable*>* ret) {
} }
void MemTableList::RollbackMemtableFlush(const autovector<MemTable*>& mems, void MemTableList::RollbackMemtableFlush(const autovector<MemTable*>& mems,
uint64_t /*file_number*/) { uint64_t file_number) {
AutoThreadOperationStageUpdater stage_updater( AutoThreadOperationStageUpdater stage_updater(
ThreadStatus::STAGE_MEMTABLE_ROLLBACK); ThreadStatus::STAGE_MEMTABLE_ROLLBACK);
assert(!mems.empty()); assert(!mems.empty());

@ -504,7 +504,7 @@ void runTest(int argc, const std::string& dbname, const bool use_ttl = false) {
} }
} // namespace } // namespace
int main(int argc, char* /*argv*/ []) { int main(int argc, char *argv[]) {
//TODO: Make this test like a general rocksdb unit-test //TODO: Make this test like a general rocksdb unit-test
rocksdb::port::InstallStackTraceHandler(); rocksdb::port::InstallStackTraceHandler();
runTest(argc, test::TmpDir() + "/merge_testdb"); runTest(argc, test::TmpDir() + "/merge_testdb");

@ -327,7 +327,7 @@ class TestPlainTableFactory : public PlainTableFactory {
const TableReaderOptions& table_reader_options, const TableReaderOptions& table_reader_options,
unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size, unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
unique_ptr<TableReader>* table, unique_ptr<TableReader>* table,
bool /*prefetch_index_and_filter_in_cache*/) const override { bool prefetch_index_and_filter_in_cache) const override {
TableProperties* props = nullptr; TableProperties* props = nullptr;
auto s = auto s =
ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber, ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,

@ -126,10 +126,10 @@ class TestKeyComparator : public Comparator {
return "TestKeyComparator"; return "TestKeyComparator";
} }
virtual void FindShortestSeparator(std::string* /*start*/, virtual void FindShortestSeparator(std::string* start,
const Slice& /*limit*/) const override {} const Slice& limit) const override {}
virtual void FindShortSuccessor(std::string* /*key*/) const override {} virtual void FindShortSuccessor(std::string* key) const override {}
}; };
namespace { namespace {

@ -30,7 +30,7 @@ namespace rocksdb {
namespace { namespace {
template <class T> template <class T>
static void DeleteEntry(const Slice& /*key*/, void* value) { static void DeleteEntry(const Slice& key, void* value) {
T* typed_value = reinterpret_cast<T*>(value); T* typed_value = reinterpret_cast<T*>(value);
delete typed_value; delete typed_value;
} }
@ -41,7 +41,7 @@ static void UnrefEntry(void* arg1, void* arg2) {
cache->Release(h); cache->Release(h);
} }
static void DeleteTableReader(void* arg1, void* /*arg2*/) { static void DeleteTableReader(void* arg1, void* arg2) {
TableReader* table_reader = reinterpret_cast<TableReader*>(arg1); TableReader* table_reader = reinterpret_cast<TableReader*>(arg1);
delete table_reader; delete table_reader;
} }

@ -12,8 +12,8 @@
namespace rocksdb { namespace rocksdb {
Status InternalKeyPropertiesCollector::InternalAdd(const Slice& key, Status InternalKeyPropertiesCollector::InternalAdd(const Slice& key,
const Slice& /*value*/, const Slice& value,
uint64_t /*file_size*/) { uint64_t file_size) {
ParsedInternalKey ikey; ParsedInternalKey ikey;
if (!ParseInternalKey(key, &ikey)) { if (!ParseInternalKey(key, &ikey)) {
return Status::InvalidArgument("Invalid internal key"); return Status::InvalidArgument("Invalid internal key");

@ -73,7 +73,7 @@ class InternalKeyPropertiesCollectorFactory
: public IntTblPropCollectorFactory { : public IntTblPropCollectorFactory {
public: public:
virtual IntTblPropCollector* CreateIntTblPropCollector( virtual IntTblPropCollector* CreateIntTblPropCollector(
uint32_t /*column_family_id*/) override { uint32_t column_family_id) override {
return new InternalKeyPropertiesCollector(); return new InternalKeyPropertiesCollector();
} }

@ -82,9 +82,8 @@ class RegularKeysStartWithA: public TablePropertiesCollector {
return Status::OK(); return Status::OK();
} }
Status AddUserKey(const Slice& user_key, const Slice& /*value*/, Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
EntryType type, SequenceNumber /*seq*/, SequenceNumber seq, uint64_t file_size) override {
uint64_t file_size) override {
// simply asssume all user keys are not empty. // simply asssume all user keys are not empty.
if (user_key.data()[0] == 'A') { if (user_key.data()[0] == 'A') {
++count_; ++count_;
@ -134,7 +133,7 @@ class RegularKeysStartWithABackwardCompatible
return Status::OK(); return Status::OK();
} }
Status Add(const Slice& user_key, const Slice& /*value*/) override { Status Add(const Slice& user_key, const Slice& value) override {
// simply asssume all user keys are not empty. // simply asssume all user keys are not empty.
if (user_key.data()[0] == 'A') { if (user_key.data()[0] == 'A') {
++count_; ++count_;
@ -162,8 +161,8 @@ class RegularKeysStartWithAInternal : public IntTblPropCollector {
return Status::OK(); return Status::OK();
} }
Status InternalAdd(const Slice& user_key, const Slice& /*value*/, Status InternalAdd(const Slice& user_key, const Slice& value,
uint64_t /*file_size*/) override { uint64_t file_size) override {
// simply asssume all user keys are not empty. // simply asssume all user keys are not empty.
if (user_key.data()[0] == 'A') { if (user_key.data()[0] == 'A') {
++count_; ++count_;
@ -194,7 +193,7 @@ class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory,
} }
} }
virtual IntTblPropCollector* CreateIntTblPropCollector( virtual IntTblPropCollector* CreateIntTblPropCollector(
uint32_t /*column_family_id*/) override { uint32_t column_family_id) override {
return new RegularKeysStartWithAInternal(); return new RegularKeysStartWithAInternal();
} }
const char* Name() const override { return "RegularKeysStartWithA"; } const char* Name() const override { return "RegularKeysStartWithA"; }
@ -204,7 +203,7 @@ class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory,
class FlushBlockEveryThreePolicy : public FlushBlockPolicy { class FlushBlockEveryThreePolicy : public FlushBlockPolicy {
public: public:
virtual bool Update(const Slice& /*key*/, const Slice& /*value*/) override { virtual bool Update(const Slice& key, const Slice& value) override {
return (++count_ % 3U == 0); return (++count_ % 3U == 0);
} }
@ -221,8 +220,8 @@ class FlushBlockEveryThreePolicyFactory : public FlushBlockPolicyFactory {
} }
FlushBlockPolicy* NewFlushBlockPolicy( FlushBlockPolicy* NewFlushBlockPolicy(
const BlockBasedTableOptions& /*table_options*/, const BlockBasedTableOptions& table_options,
const BlockBuilder& /*data_block_builder*/) const override { const BlockBuilder& data_block_builder) const override {
return new FlushBlockEveryThreePolicy; return new FlushBlockEveryThreePolicy;
} }
}; };

@ -185,7 +185,7 @@ class VersionBuilder::Rep {
} }
} }
void CheckConsistencyForDeletes(VersionEdit* /*edit*/, uint64_t number, void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number,
int level) { int level) {
#ifdef NDEBUG #ifdef NDEBUG
if (!base_vstorage_->force_consistency_checks()) { if (!base_vstorage_->force_consistency_checks()) {

@ -198,7 +198,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) {
} }
} }
bool VersionEdit::GetLevel(Slice* input, int* level, const char** /*msg*/) { bool VersionEdit::GetLevel(Slice* input, int* level, const char** msg) {
uint32_t v; uint32_t v;
if (GetVarint32(input, &v)) { if (GetVarint32(input, &v)) {
*level = v; *level = v;

@ -528,7 +528,9 @@ class LevelFileIteratorState : public TwoLevelIteratorState {
for_compaction_, nullptr /* arena */, skip_filters_, level_); for_compaction_, nullptr /* arena */, skip_filters_, level_);
} }
bool PrefixMayMatch(const Slice& /*internal_key*/) override { return true; } bool PrefixMayMatch(const Slice& internal_key) override {
return true;
}
bool KeyReachedUpperBound(const Slice& internal_key) override { bool KeyReachedUpperBound(const Slice& internal_key) override {
return read_options_.iterate_upper_bound != nullptr && return read_options_.iterate_upper_bound != nullptr &&
@ -2654,7 +2656,7 @@ void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) {
} }
void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd, void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd,
VersionBuilder* builder, Version* /*v*/, VersionBuilder* builder, Version* v,
VersionEdit* edit, InstrumentedMutex* mu) { VersionEdit* edit, InstrumentedMutex* mu) {
mu->AssertHeld(); mu->AssertHeld();
assert(!edit->IsColumnFamilyManipulation()); assert(!edit->IsColumnFamilyManipulation());

@ -792,7 +792,7 @@ class VersionSet {
struct LogReporter : public log::Reader::Reporter { struct LogReporter : public log::Reader::Reporter {
Status* status; Status* status;
virtual void Corruption(size_t /*bytes*/, const Status& s) override { virtual void Corruption(size_t bytes, const Status& s) override {
if (this->status->ok()) *this->status = s; if (this->status->ok()) *this->status = s;
} }
}; };

@ -76,9 +76,7 @@ class CountingLogger : public Logger {
public: public:
CountingLogger() : log_count(0) {} CountingLogger() : log_count(0) {}
using Logger::Logv; using Logger::Logv;
virtual void Logv(const char* /*format*/, va_list /*ap*/) override { virtual void Logv(const char* format, va_list ap) override { log_count++; }
log_count++;
}
int log_count; int log_count;
}; };

@ -72,7 +72,7 @@ class WalManagerTest : public testing::Test {
} }
// NOT thread safe // NOT thread safe
void RollTheLog(bool /*archived*/) { void RollTheLog(bool archived) {
current_log_number_++; current_log_number_++;
std::string fname = ArchivedLogFileName(dbname_, current_log_number_); std::string fname = ArchivedLogFileName(dbname_, current_log_number_);
unique_ptr<WritableFile> file; unique_ptr<WritableFile> file;

@ -171,7 +171,7 @@ WriteBatch::~WriteBatch() { delete save_points_; }
WriteBatch::Handler::~Handler() { } WriteBatch::Handler::~Handler() { }
void WriteBatch::Handler::LogData(const Slice& /*blob*/) { void WriteBatch::Handler::LogData(const Slice& blob) {
// If the user has not specified something to do with blobs, then we ignore // If the user has not specified something to do with blobs, then we ignore
// them. // them.
} }
@ -469,7 +469,7 @@ void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) {
EncodeFixed64(&b->rep_[0], seq); EncodeFixed64(&b->rep_[0], seq);
} }
size_t WriteBatchInternal::GetFirstOffset(WriteBatch* /*b*/) { size_t WriteBatchInternal::GetFirstOffset(WriteBatch* b) {
return WriteBatchInternal::kHeader; return WriteBatchInternal::kHeader;
} }
@ -1003,7 +1003,7 @@ public:
return Status::OK(); return Status::OK();
} }
Status DeleteImpl(uint32_t /*column_family_id*/, const Slice& key, Status DeleteImpl(uint32_t column_family_id, const Slice& key,
const Slice& value, ValueType delete_type) { const Slice& value, ValueType delete_type) {
MemTable* mem = cf_mems_->GetMemTable(); MemTable* mem = cf_mems_->GetMemTable();
mem->Add(sequence_, delete_type, key, value, concurrent_memtable_writes_, mem->Add(sequence_, delete_type, key, value, concurrent_memtable_writes_,

@ -434,7 +434,7 @@ TEST_F(WriteBatchTest, DISABLED_ManyUpdates) {
struct NoopHandler : public WriteBatch::Handler { struct NoopHandler : public WriteBatch::Handler {
uint32_t num_seen = 0; uint32_t num_seen = 0;
char expected_char = 'A'; char expected_char = 'A';
virtual Status PutCF(uint32_t /*column_family_id*/, const Slice& key, virtual Status PutCF(uint32_t column_family_id, const Slice& key,
const Slice& value) override { const Slice& value) override {
EXPECT_EQ(kKeyValueSize, key.size()); EXPECT_EQ(kKeyValueSize, key.size());
EXPECT_EQ(kKeyValueSize, value.size()); EXPECT_EQ(kKeyValueSize, value.size());
@ -449,22 +449,22 @@ TEST_F(WriteBatchTest, DISABLED_ManyUpdates) {
++num_seen; ++num_seen;
return Status::OK(); return Status::OK();
} }
virtual Status DeleteCF(uint32_t /*column_family_id*/, virtual Status DeleteCF(uint32_t column_family_id,
const Slice& /*key*/) override { const Slice& key) override {
ADD_FAILURE(); ADD_FAILURE();
return Status::OK(); return Status::OK();
} }
virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, virtual Status SingleDeleteCF(uint32_t column_family_id,
const Slice& /*key*/) override { const Slice& key) override {
ADD_FAILURE(); ADD_FAILURE();
return Status::OK(); return Status::OK();
} }
virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
const Slice& /*value*/) override { const Slice& value) override {
ADD_FAILURE(); ADD_FAILURE();
return Status::OK(); return Status::OK();
} }
virtual void LogData(const Slice& /*blob*/) override { ADD_FAILURE(); } virtual void LogData(const Slice& blob) override { ADD_FAILURE(); }
virtual bool Continue() override { return num_seen < kNumUpdates; } virtual bool Continue() override { return num_seen < kNumUpdates; }
} handler; } handler;
@ -489,7 +489,7 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) {
struct NoopHandler : public WriteBatch::Handler { struct NoopHandler : public WriteBatch::Handler {
int num_seen = 0; int num_seen = 0;
virtual Status PutCF(uint32_t /*column_family_id*/, const Slice& key, virtual Status PutCF(uint32_t column_family_id, const Slice& key,
const Slice& value) override { const Slice& value) override {
EXPECT_EQ(kKeyValueSize, key.size()); EXPECT_EQ(kKeyValueSize, key.size());
EXPECT_EQ(kKeyValueSize, value.size()); EXPECT_EQ(kKeyValueSize, value.size());
@ -500,22 +500,22 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) {
++num_seen; ++num_seen;
return Status::OK(); return Status::OK();
} }
virtual Status DeleteCF(uint32_t /*column_family_id*/, virtual Status DeleteCF(uint32_t column_family_id,
const Slice& /*key*/) override { const Slice& key) override {
ADD_FAILURE(); ADD_FAILURE();
return Status::OK(); return Status::OK();
} }
virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, virtual Status SingleDeleteCF(uint32_t column_family_id,
const Slice& /*key*/) override { const Slice& key) override {
ADD_FAILURE(); ADD_FAILURE();
return Status::OK(); return Status::OK();
} }
virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
const Slice& /*value*/) override { const Slice& value) override {
ADD_FAILURE(); ADD_FAILURE();
return Status::OK(); return Status::OK();
} }
virtual void LogData(const Slice& /*blob*/) override { ADD_FAILURE(); } virtual void LogData(const Slice& blob) override { ADD_FAILURE(); }
virtual bool Continue() override { return num_seen < 2; } virtual bool Continue() override { return num_seen < 2; }
} handler; } handler;

@ -55,7 +55,9 @@ class WriteCallbackTestWriteCallback1 : public WriteCallback {
class WriteCallbackTestWriteCallback2 : public WriteCallback { class WriteCallbackTestWriteCallback2 : public WriteCallback {
public: public:
Status Callback(DB* /*db*/) override { return Status::Busy(); } Status Callback(DB *db) override {
return Status::Busy();
}
bool AllowWriteBatching() override { return true; } bool AllowWriteBatching() override { return true; }
}; };
@ -73,7 +75,7 @@ class MockWriteCallback : public WriteCallback {
was_called_.store(other.was_called_.load()); was_called_.store(other.was_called_.load());
} }
Status Callback(DB* /*db*/) override { Status Callback(DB* db) override {
was_called_.store(true); was_called_.store(true);
if (should_fail_) { if (should_fail_) {
return Status::Busy(); return Status::Busy();

@ -434,8 +434,7 @@ void WriteThread::EnterAsMemTableWriter(Writer* leader,
last_writer->sequence + WriteBatchInternal::Count(last_writer->batch) - 1; last_writer->sequence + WriteBatchInternal::Count(last_writer->batch) - 1;
} }
void WriteThread::ExitAsMemTableWriter(Writer* /*self*/, void WriteThread::ExitAsMemTableWriter(Writer* self, WriteGroup& write_group) {
WriteGroup& write_group) {
Writer* leader = write_group.leader; Writer* leader = write_group.leader;
Writer* last_writer = write_group.last_writer; Writer* last_writer = write_group.last_writer;

@ -844,9 +844,7 @@ static void decodeCTRParameters(const char *prefix, size_t blockSize, uint64_t &
// CreateNewPrefix initialized an allocated block of prefix memory // CreateNewPrefix initialized an allocated block of prefix memory
// for a new file. // for a new file.
Status CTREncryptionProvider::CreateNewPrefix(const std::string& /*fname*/, Status CTREncryptionProvider::CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) {
char* prefix,
size_t prefixLength) {
// Create & seed rnd. // Create & seed rnd.
Random rnd((uint32_t)Env::Default()->NowMicros()); Random rnd((uint32_t)Env::Default()->NowMicros());
// Fill entire prefix block with random values. // Fill entire prefix block with random values.
@ -875,9 +873,7 @@ Status CTREncryptionProvider::CreateNewPrefix(const std::string& /*fname*/,
// in plain text. // in plain text.
// Returns the amount of space (starting from the start of the prefix) // Returns the amount of space (starting from the start of the prefix)
// that has been initialized. // that has been initialized.
size_t CTREncryptionProvider::PopulateSecretPrefixPart(char* /*prefix*/, size_t CTREncryptionProvider::PopulateSecretPrefixPart(char *prefix, size_t prefixLength, size_t blockSize) {
size_t /*prefixLength*/,
size_t /*blockSize*/) {
// Nothing to do here, put in custom data in override when needed. // Nothing to do here, put in custom data in override when needed.
return 0; return 0;
} }
@ -902,10 +898,8 @@ Status CTREncryptionProvider::CreateCipherStream(const std::string& fname, const
// CreateCipherStreamFromPrefix creates a block access cipher stream for a file given // CreateCipherStreamFromPrefix creates a block access cipher stream for a file given
// given name and options. The given prefix is already decrypted. // given name and options. The given prefix is already decrypted.
Status CTREncryptionProvider::CreateCipherStreamFromPrefix( Status CTREncryptionProvider::CreateCipherStreamFromPrefix(const std::string& fname, const EnvOptions& options,
const std::string& /*fname*/, const EnvOptions& /*options*/, uint64_t initialCounter, const Slice& iv, const Slice& prefix, unique_ptr<BlockAccessCipherStream>* result) {
uint64_t initialCounter, const Slice& iv, const Slice& /*prefix*/,
unique_ptr<BlockAccessCipherStream>* result) {
(*result) = unique_ptr<BlockAccessCipherStream>(new CTRCipherStream(cipher_, iv.data(), initialCounter)); (*result) = unique_ptr<BlockAccessCipherStream>(new CTRCipherStream(cipher_, iv.data(), initialCounter));
return Status::OK(); return Status::OK();
} }

10
env/env_hdfs.cc vendored

@ -598,13 +598,13 @@ Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) {
// dummy placeholders used when HDFS is not available // dummy placeholders used when HDFS is not available
namespace rocksdb { namespace rocksdb {
Status HdfsEnv::NewSequentialFile(const std::string& /*fname*/, Status HdfsEnv::NewSequentialFile(const std::string& fname,
unique_ptr<SequentialFile>* /*result*/, unique_ptr<SequentialFile>* result,
const EnvOptions& /*options*/) { const EnvOptions& options) {
return Status::NotSupported("Not compiled with hdfs support"); return Status::NotSupported("Not compiled with hdfs support");
} }
Status NewHdfsEnv(Env** /*hdfs_env*/, const std::string& /*fsname*/) { Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) {
return Status::NotSupported("Not compiled with hdfs support"); return Status::NotSupported("Not compiled with hdfs support");
} }
} }

21
env/env_test.cc vendored

@ -1248,36 +1248,33 @@ TEST_P(EnvPosixTestWithParam, WritableFileWrapper) {
inc(0); inc(0);
} }
Status Append(const Slice& /*data*/) override { Status Append(const Slice& data) override { inc(1); return Status::OK(); }
inc(1); Status Truncate(uint64_t size) override { return Status::OK(); }
return Status::OK();
}
Status Truncate(uint64_t /*size*/) override { return Status::OK(); }
Status Close() override { inc(2); return Status::OK(); } Status Close() override { inc(2); return Status::OK(); }
Status Flush() override { inc(3); return Status::OK(); } Status Flush() override { inc(3); return Status::OK(); }
Status Sync() override { inc(4); return Status::OK(); } Status Sync() override { inc(4); return Status::OK(); }
Status Fsync() override { inc(5); return Status::OK(); } Status Fsync() override { inc(5); return Status::OK(); }
void SetIOPriority(Env::IOPriority /*pri*/) override { inc(6); } void SetIOPriority(Env::IOPriority pri) override { inc(6); }
uint64_t GetFileSize() override { inc(7); return 0; } uint64_t GetFileSize() override { inc(7); return 0; }
void GetPreallocationStatus(size_t* /*block_size*/, void GetPreallocationStatus(size_t* block_size,
size_t* /*last_allocated_block*/) override { size_t* last_allocated_block) override {
inc(8); inc(8);
} }
size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override { size_t GetUniqueId(char* id, size_t max_size) const override {
inc(9); inc(9);
return 0; return 0;
} }
Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override { Status InvalidateCache(size_t offset, size_t length) override {
inc(10); inc(10);
return Status::OK(); return Status::OK();
} }
protected: protected:
Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) override { Status Allocate(uint64_t offset, uint64_t len) override {
inc(11); inc(11);
return Status::OK(); return Status::OK();
} }
Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) override { Status RangeSync(uint64_t offset, uint64_t nbytes) override {
inc(12); inc(12);
return Status::OK(); return Status::OK();
} }

4
env/io_posix.cc vendored

@ -443,7 +443,7 @@ PosixMmapReadableFile::~PosixMmapReadableFile() {
} }
Status PosixMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result, Status PosixMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result,
char* /*scratch*/) const { char* scratch) const {
Status s; Status s;
if (offset > length_) { if (offset > length_) {
*result = Slice(); *result = Slice();
@ -922,7 +922,7 @@ size_t PosixWritableFile::GetUniqueId(char* id, size_t max_size) const {
*/ */
PosixRandomRWFile::PosixRandomRWFile(const std::string& fname, int fd, PosixRandomRWFile::PosixRandomRWFile(const std::string& fname, int fd,
const EnvOptions& /*options*/) const EnvOptions& options)
: filename_(fname), fd_(fd) {} : filename_(fname), fd_(fd) {}
PosixRandomRWFile::~PosixRandomRWFile() { PosixRandomRWFile::~PosixRandomRWFile() {

2
env/io_posix.h vendored

@ -201,7 +201,7 @@ class PosixMmapFile : public WritableFile {
// Means Close() will properly take care of truncate // Means Close() will properly take care of truncate
// and it does not need any additional information // and it does not need any additional information
virtual Status Truncate(uint64_t /*size*/) override { return Status::OK(); } virtual Status Truncate(uint64_t size) override { return Status::OK(); }
virtual Status Close() override; virtual Status Close() override;
virtual Status Append(const Slice& data) override; virtual Status Append(const Slice& data) override;
virtual Status Flush() override; virtual Status Flush() override;

14
env/mock_env.cc vendored

@ -445,8 +445,8 @@ MockEnv::~MockEnv() {
// Partial implementation of the Env interface. // Partial implementation of the Env interface.
Status MockEnv::NewSequentialFile(const std::string& fname, Status MockEnv::NewSequentialFile(const std::string& fname,
unique_ptr<SequentialFile>* result, unique_ptr<SequentialFile>* result,
const EnvOptions& /*soptions*/) { const EnvOptions& soptions) {
auto fn = NormalizePath(fname); auto fn = NormalizePath(fname);
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
if (file_map_.find(fn) == file_map_.end()) { if (file_map_.find(fn) == file_map_.end()) {
@ -462,8 +462,8 @@ Status MockEnv::NewSequentialFile(const std::string& fname,
} }
Status MockEnv::NewRandomAccessFile(const std::string& fname, Status MockEnv::NewRandomAccessFile(const std::string& fname,
unique_ptr<RandomAccessFile>* result, unique_ptr<RandomAccessFile>* result,
const EnvOptions& /*soptions*/) { const EnvOptions& soptions) {
auto fn = NormalizePath(fname); auto fn = NormalizePath(fname);
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
if (file_map_.find(fn) == file_map_.end()) { if (file_map_.find(fn) == file_map_.end()) {
@ -480,7 +480,7 @@ Status MockEnv::NewRandomAccessFile(const std::string& fname,
Status MockEnv::NewRandomRWFile(const std::string& fname, Status MockEnv::NewRandomRWFile(const std::string& fname,
unique_ptr<RandomRWFile>* result, unique_ptr<RandomRWFile>* result,
const EnvOptions& /*soptions*/) { const EnvOptions& soptions) {
auto fn = NormalizePath(fname); auto fn = NormalizePath(fname);
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
if (file_map_.find(fn) == file_map_.end()) { if (file_map_.find(fn) == file_map_.end()) {
@ -523,8 +523,8 @@ Status MockEnv::NewWritableFile(const std::string& fname,
return Status::OK(); return Status::OK();
} }
Status MockEnv::NewDirectory(const std::string& /*name*/, Status MockEnv::NewDirectory(const std::string& name,
unique_ptr<Directory>* result) { unique_ptr<Directory>* result) {
result->reset(new MockEnvDirectory()); result->reset(new MockEnvDirectory());
return Status::OK(); return Status::OK();
} }

@ -245,7 +245,7 @@ static const Status notsup;
class HdfsEnv : public Env { class HdfsEnv : public Env {
public: public:
explicit HdfsEnv(const std::string& /*fsname*/) { explicit HdfsEnv(const std::string& fsname) {
fprintf(stderr, "You have not build rocksdb with HDFS support\n"); fprintf(stderr, "You have not build rocksdb with HDFS support\n");
fprintf(stderr, "Please see hdfs/README for details\n"); fprintf(stderr, "Please see hdfs/README for details\n");
abort(); abort();
@ -258,125 +258,112 @@ class HdfsEnv : public Env {
unique_ptr<SequentialFile>* result, unique_ptr<SequentialFile>* result,
const EnvOptions& options) override; const EnvOptions& options) override;
virtual Status NewRandomAccessFile(const std::string& /*fname*/, virtual Status NewRandomAccessFile(const std::string& fname,
unique_ptr<RandomAccessFile>* /*result*/, unique_ptr<RandomAccessFile>* result,
const EnvOptions& /*options*/) override { const EnvOptions& options) override {
return notsup; return notsup;
} }
virtual Status NewWritableFile(const std::string& /*fname*/, virtual Status NewWritableFile(const std::string& fname,
unique_ptr<WritableFile>* /*result*/, unique_ptr<WritableFile>* result,
const EnvOptions& /*options*/) override { const EnvOptions& options) override {
return notsup; return notsup;
} }
virtual Status NewDirectory(const std::string& /*name*/, virtual Status NewDirectory(const std::string& name,
unique_ptr<Directory>* /*result*/) override { unique_ptr<Directory>* result) override {
return notsup; return notsup;
} }
virtual Status FileExists(const std::string& /*fname*/) override { virtual Status FileExists(const std::string& fname) override {
return notsup; return notsup;
} }
virtual Status GetChildren(const std::string& /*path*/, virtual Status GetChildren(const std::string& path,
std::vector<std::string>* /*result*/) override { std::vector<std::string>* result) override {
return notsup; return notsup;
} }
virtual Status DeleteFile(const std::string& /*fname*/) override { virtual Status DeleteFile(const std::string& fname) override {
return notsup; return notsup;
} }
virtual Status CreateDir(const std::string& /*name*/) override { virtual Status CreateDir(const std::string& name) override { return notsup; }
return notsup;
}
virtual Status CreateDirIfMissing(const std::string& /*name*/) override { virtual Status CreateDirIfMissing(const std::string& name) override {
return notsup; return notsup;
} }
virtual Status DeleteDir(const std::string& /*name*/) override { virtual Status DeleteDir(const std::string& name) override { return notsup; }
return notsup;
}
virtual Status GetFileSize(const std::string& /*fname*/, virtual Status GetFileSize(const std::string& fname,
uint64_t* /*size*/) override { uint64_t* size) override {
return notsup; return notsup;
} }
virtual Status GetFileModificationTime(const std::string& /*fname*/, virtual Status GetFileModificationTime(const std::string& fname,
uint64_t* /*time*/) override { uint64_t* time) override {
return notsup; return notsup;
} }
virtual Status RenameFile(const std::string& /*src*/, virtual Status RenameFile(const std::string& src,
const std::string& /*target*/) override { const std::string& target) override {
return notsup; return notsup;
} }
virtual Status LinkFile(const std::string& /*src*/, virtual Status LinkFile(const std::string& src,
const std::string& /*target*/) override { const std::string& target) override {
return notsup; return notsup;
} }
virtual Status LockFile(const std::string& /*fname*/, virtual Status LockFile(const std::string& fname, FileLock** lock) override {
FileLock** /*lock*/) override {
return notsup; return notsup;
} }
virtual Status UnlockFile(FileLock* /*lock*/) override { return notsup; } virtual Status UnlockFile(FileLock* lock) override { return notsup; }
virtual Status NewLogger(const std::string& /*fname*/, virtual Status NewLogger(const std::string& fname,
shared_ptr<Logger>* /*result*/) override { shared_ptr<Logger>* result) override {
return notsup; return notsup;
} }
virtual void Schedule(void (*/*function*/)(void* arg), void* /*arg*/, virtual void Schedule(void (*function)(void* arg), void* arg,
Priority /*pri*/ = LOW, void* /*tag*/ = nullptr, Priority pri = LOW, void* tag = nullptr,
void (*/*unschedFunction*/)(void* arg) = 0) override {} void (*unschedFunction)(void* arg) = 0) override {}
virtual int UnSchedule(void* /*tag*/, Priority /*pri*/) override { return 0; } virtual int UnSchedule(void* tag, Priority pri) override { return 0; }
virtual void StartThread(void (*/*function*/)(void* arg), virtual void StartThread(void (*function)(void* arg), void* arg) override {}
void* /*arg*/) override {}
virtual void WaitForJoin() override {} virtual void WaitForJoin() override {}
virtual unsigned int GetThreadPoolQueueLen( virtual unsigned int GetThreadPoolQueueLen(
Priority /*pri*/ = LOW) const override { Priority pri = LOW) const override {
return 0; return 0;
} }
virtual Status GetTestDirectory(std::string* /*path*/) override { virtual Status GetTestDirectory(std::string* path) override { return notsup; }
return notsup;
}
virtual uint64_t NowMicros() override { return 0; } virtual uint64_t NowMicros() override { return 0; }
virtual void SleepForMicroseconds(int /*micros*/) override {} virtual void SleepForMicroseconds(int micros) override {}
virtual Status GetHostName(char* /*name*/, uint64_t /*len*/) override { virtual Status GetHostName(char* name, uint64_t len) override {
return notsup; return notsup;
} }
virtual Status GetCurrentTime(int64_t* /*unix_time*/) override { virtual Status GetCurrentTime(int64_t* unix_time) override { return notsup; }
return notsup;
}
virtual Status GetAbsolutePath(const std::string& /*db_path*/, virtual Status GetAbsolutePath(const std::string& db_path,
std::string* /*outputpath*/) override { std::string* outputpath) override {
return notsup; return notsup;
} }
virtual void SetBackgroundThreads(int /*number*/, virtual void SetBackgroundThreads(int number, Priority pri = LOW) override {}
Priority /*pri*/ = LOW) override {} virtual int GetBackgroundThreads(Priority pri = LOW) override { return 0; }
virtual int GetBackgroundThreads(Priority /*pri*/ = LOW) override { virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) override {
return 0;
} }
virtual void IncBackgroundThreadsIfNeeded(int /*number*/, virtual std::string TimeToString(uint64_t number) override { return ""; }
Priority /*pri*/) override {}
virtual std::string TimeToString(uint64_t /*number*/) override { return ""; }
virtual uint64_t GetThreadID() const override { virtual uint64_t GetThreadID() const override {
return 0; return 0;

@ -189,8 +189,7 @@ class Cache {
// Mark the last inserted object as being a raw data block. This will be used // Mark the last inserted object as being a raw data block. This will be used
// in tests. The default implementation does nothing. // in tests. The default implementation does nothing.
virtual void TEST_mark_as_data_block(const Slice& /*key*/, virtual void TEST_mark_as_data_block(const Slice& key, size_t charge) {}
size_t /*charge*/) {}
private: private:
// No copying allowed // No copying allowed

@ -97,10 +97,8 @@ class CompactionFilter {
// The last paragraph is not true if you set max_subcompactions to more than // The last paragraph is not true if you set max_subcompactions to more than
// 1. In that case, subcompaction from multiple threads may call a single // 1. In that case, subcompaction from multiple threads may call a single
// CompactionFilter concurrently. // CompactionFilter concurrently.
virtual bool Filter(int /*level*/, const Slice& /*key*/, virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
const Slice& /*existing_value*/, std::string* new_value, bool* value_changed) const {
std::string* /*new_value*/,
bool* /*value_changed*/) const {
return false; return false;
} }
@ -113,8 +111,8 @@ class CompactionFilter {
// may not realize there is a write conflict and may allow a Transaction to // may not realize there is a write conflict and may allow a Transaction to
// Commit that should have failed. Instead, it is better to implement any // Commit that should have failed. Instead, it is better to implement any
// Merge filtering inside the MergeOperator. // Merge filtering inside the MergeOperator.
virtual bool FilterMergeOperand(int /*level*/, const Slice& /*key*/, virtual bool FilterMergeOperand(int level, const Slice& key,
const Slice& /*operand*/) const { const Slice& operand) const {
return false; return false;
} }
@ -159,7 +157,7 @@ class CompactionFilter {
// MergeOperator. // MergeOperator.
virtual Decision FilterV2(int level, const Slice& key, ValueType value_type, virtual Decision FilterV2(int level, const Slice& key, ValueType value_type,
const Slice& existing_value, std::string* new_value, const Slice& existing_value, std::string* new_value,
std::string* /*skip_until*/) const { std::string* skip_until) const {
switch (value_type) { switch (value_type) {
case ValueType::kValue: { case ValueType::kValue: {
bool value_changed = false; bool value_changed = false;

@ -855,7 +855,7 @@ class DB {
// Flush the WAL memory buffer to the file. If sync is true, it calls SyncWAL // Flush the WAL memory buffer to the file. If sync is true, it calls SyncWAL
// afterwards. // afterwards.
virtual Status FlushWAL(bool /*sync*/) { virtual Status FlushWAL(bool sync) {
return Status::NotSupported("FlushWAL not implemented"); return Status::NotSupported("FlushWAL not implemented");
} }
// Sync the wal. Note that Write() followed by SyncWAL() is not exactly the // Sync the wal. Note that Write() followed by SyncWAL() is not exactly the

@ -170,9 +170,9 @@ class Env {
// returns non-OK. // returns non-OK.
// //
// The returned file will only be accessed by one thread at a time. // The returned file will only be accessed by one thread at a time.
virtual Status ReopenWritableFile(const std::string& /*fname*/, virtual Status ReopenWritableFile(const std::string& fname,
unique_ptr<WritableFile>* /*result*/, unique_ptr<WritableFile>* result,
const EnvOptions& /*options*/) { const EnvOptions& options) {
return Status::NotSupported(); return Status::NotSupported();
} }
@ -187,9 +187,9 @@ class Env {
// *result and returns OK. On failure returns non-OK. // *result and returns OK. On failure returns non-OK.
// //
// The returned file will only be accessed by one thread at a time. // The returned file will only be accessed by one thread at a time.
virtual Status NewRandomRWFile(const std::string& /*fname*/, virtual Status NewRandomRWFile(const std::string& fname,
unique_ptr<RandomRWFile>* /*result*/, unique_ptr<RandomRWFile>* result,
const EnvOptions& /*options*/) { const EnvOptions& options) {
return Status::NotSupported("RandomRWFile is not implemented in this Env"); return Status::NotSupported("RandomRWFile is not implemented in this Env");
} }
@ -257,8 +257,7 @@ class Env {
const std::string& target) = 0; const std::string& target) = 0;
// Hard Link file src to target. // Hard Link file src to target.
virtual Status LinkFile(const std::string& /*src*/, virtual Status LinkFile(const std::string& src, const std::string& target) {
const std::string& /*target*/) {
return Status::NotSupported("LinkFile is not supported for this Env"); return Status::NotSupported("LinkFile is not supported for this Env");
} }
@ -309,7 +308,7 @@ class Env {
// Arrange to remove jobs for given arg from the queue_ if they are not // Arrange to remove jobs for given arg from the queue_ if they are not
// already scheduled. Caller is expected to have exclusive lock on arg. // already scheduled. Caller is expected to have exclusive lock on arg.
virtual int UnSchedule(void* /*arg*/, Priority /*pri*/) { return 0; } virtual int UnSchedule(void* arg, Priority pri) { return 0; }
// Start a new thread, invoking "function(arg)" within the new thread. // Start a new thread, invoking "function(arg)" within the new thread.
// When "function(arg)" returns, the thread will be destroyed. // When "function(arg)" returns, the thread will be destroyed.
@ -319,7 +318,7 @@ class Env {
virtual void WaitForJoin() {} virtual void WaitForJoin() {}
// Get thread pool queue length for specific thread pool. // Get thread pool queue length for specific thread pool.
virtual unsigned int GetThreadPoolQueueLen(Priority /*pri*/ = LOW) const { virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const {
return 0; return 0;
} }
@ -373,7 +372,7 @@ class Env {
virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) = 0; virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) = 0;
// Lower IO priority for threads from the specified pool. // Lower IO priority for threads from the specified pool.
virtual void LowerThreadPoolIOPriority(Priority /*pool*/ = LOW) {} virtual void LowerThreadPoolIOPriority(Priority pool = LOW) {}
// Converts seconds-since-Jan-01-1970 to a printable string // Converts seconds-since-Jan-01-1970 to a printable string
virtual std::string TimeToString(uint64_t time) = 0; virtual std::string TimeToString(uint64_t time) = 0;
@ -417,7 +416,7 @@ class Env {
const ImmutableDBOptions& db_options) const; const ImmutableDBOptions& db_options) const;
// Returns the status of all threads that belong to the current Env. // Returns the status of all threads that belong to the current Env.
virtual Status GetThreadList(std::vector<ThreadStatus>* /*thread_list*/) { virtual Status GetThreadList(std::vector<ThreadStatus>* thread_list) {
return Status::NotSupported("Not supported."); return Status::NotSupported("Not supported.");
} }
@ -483,14 +482,14 @@ class SequentialFile {
// Remove any kind of caching of data from the offset to offset+length // Remove any kind of caching of data from the offset to offset+length
// of this file. If the length is 0, then it refers to the end of file. // of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop. // If the system is not caching the file contents, then this is a noop.
virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { virtual Status InvalidateCache(size_t offset, size_t length) {
return Status::NotSupported("InvalidateCache not supported."); return Status::NotSupported("InvalidateCache not supported.");
} }
// Positioned Read for direct I/O // Positioned Read for direct I/O
// If Direct I/O enabled, offset, n, and scratch should be properly aligned // If Direct I/O enabled, offset, n, and scratch should be properly aligned
virtual Status PositionedRead(uint64_t /*offset*/, size_t /*n*/, virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result,
Slice* /*result*/, char* /*scratch*/) { char* scratch) {
return Status::NotSupported(); return Status::NotSupported();
} }
}; };
@ -516,7 +515,7 @@ class RandomAccessFile {
char* scratch) const = 0; char* scratch) const = 0;
// Readahead the file starting from offset by n bytes for caching. // Readahead the file starting from offset by n bytes for caching.
virtual Status Prefetch(uint64_t /*offset*/, size_t /*n*/) { virtual Status Prefetch(uint64_t offset, size_t n) {
return Status::OK(); return Status::OK();
} }
@ -535,14 +534,14 @@ class RandomAccessFile {
// a single varint. // a single varint.
// //
// Note: these IDs are only valid for the duration of the process. // Note: these IDs are only valid for the duration of the process.
virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { virtual size_t GetUniqueId(char* id, size_t max_size) const {
return 0; // Default implementation to prevent issues with backwards return 0; // Default implementation to prevent issues with backwards
// compatibility. // compatibility.
}; };
enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED }; enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED };
virtual void Hint(AccessPattern /*pattern*/) {} virtual void Hint(AccessPattern pattern) {}
// Indicates the upper layers if the current RandomAccessFile implementation // Indicates the upper layers if the current RandomAccessFile implementation
// uses direct IO. // uses direct IO.
@ -555,7 +554,7 @@ class RandomAccessFile {
// Remove any kind of caching of data from the offset to offset+length // Remove any kind of caching of data from the offset to offset+length
// of this file. If the length is 0, then it refers to the end of file. // of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop. // If the system is not caching the file contents, then this is a noop.
virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { virtual Status InvalidateCache(size_t offset, size_t length) {
return Status::NotSupported("InvalidateCache not supported."); return Status::NotSupported("InvalidateCache not supported.");
} }
}; };
@ -605,7 +604,9 @@ class WritableFile {
// before closing. It is not always possible to keep track of the file // before closing. It is not always possible to keep track of the file
// size due to whole pages writes. The behavior is undefined if called // size due to whole pages writes. The behavior is undefined if called
// with other writes to follow. // with other writes to follow.
virtual Status Truncate(uint64_t /*size*/) { return Status::OK(); } virtual Status Truncate(uint64_t size) {
return Status::OK();
}
virtual Status Close() = 0; virtual Status Close() = 0;
virtual Status Flush() = 0; virtual Status Flush() = 0;
virtual Status Sync() = 0; // sync data virtual Status Sync() = 0; // sync data
@ -667,7 +668,7 @@ class WritableFile {
} }
// For documentation, refer to RandomAccessFile::GetUniqueId() // For documentation, refer to RandomAccessFile::GetUniqueId()
virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { virtual size_t GetUniqueId(char* id, size_t max_size) const {
return 0; // Default implementation to prevent issues with backwards return 0; // Default implementation to prevent issues with backwards
} }
@ -675,7 +676,7 @@ class WritableFile {
// of this file. If the length is 0, then it refers to the end of file. // of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop. // If the system is not caching the file contents, then this is a noop.
// This call has no effect on dirty pages in the cache. // This call has no effect on dirty pages in the cache.
virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { virtual Status InvalidateCache(size_t offset, size_t length) {
return Status::NotSupported("InvalidateCache not supported."); return Status::NotSupported("InvalidateCache not supported.");
} }
@ -685,9 +686,7 @@ class WritableFile {
// This asks the OS to initiate flushing the cached data to disk, // This asks the OS to initiate flushing the cached data to disk,
// without waiting for completion. // without waiting for completion.
// Default implementation does nothing. // Default implementation does nothing.
virtual Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) { virtual Status RangeSync(uint64_t offset, uint64_t nbytes) { return Status::OK(); }
return Status::OK();
}
// PrepareWrite performs any necessary preparation for a write // PrepareWrite performs any necessary preparation for a write
// before the write actually occurs. This allows for pre-allocation // before the write actually occurs. This allows for pre-allocation
@ -714,7 +713,7 @@ class WritableFile {
} }
// Pre-allocates space for a file. // Pre-allocates space for a file.
virtual Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) { virtual Status Allocate(uint64_t offset, uint64_t len) {
return Status::OK(); return Status::OK();
} }

@ -45,7 +45,7 @@ class FilterBitsBuilder {
virtual Slice Finish(std::unique_ptr<const char[]>* buf) = 0; virtual Slice Finish(std::unique_ptr<const char[]>* buf) = 0;
// Calculate num of entries fit into a space. // Calculate num of entries fit into a space.
virtual int CalculateNumEntry(const uint32_t /*space*/) { virtual int CalculateNumEntry(const uint32_t space) {
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
throw std::runtime_error("CalculateNumEntry not Implemented"); throw std::runtime_error("CalculateNumEntry not Implemented");
#else #else
@ -114,8 +114,7 @@ class FilterPolicy {
// Get the FilterBitsReader, which is ONLY used for full filter block // Get the FilterBitsReader, which is ONLY used for full filter block
// It contains interface to tell if key can be in filter // It contains interface to tell if key can be in filter
// The input slice should NOT be deleted by FilterPolicy // The input slice should NOT be deleted by FilterPolicy
virtual FilterBitsReader* GetFilterBitsReader( virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents) const {
const Slice& /*contents*/) const {
return nullptr; return nullptr;
} }
}; };

@ -51,7 +51,7 @@ class Iterator : public Cleanable {
// Position at the last key in the source that at or before target // Position at the last key in the source that at or before target
// The iterator is Valid() after this call iff the source contains // The iterator is Valid() after this call iff the source contains
// an entry that comes at or before target. // an entry that comes at or before target.
virtual void SeekForPrev(const Slice& /*target*/) {} virtual void SeekForPrev(const Slice& target) {}
// Moves to the next entry in the source. After this call, Valid() is // Moves to the next entry in the source. After this call, Valid() is
// true iff the iterator was not positioned at the last entry in the source. // true iff the iterator was not positioned at the last entry in the source.

@ -345,8 +345,8 @@ class EventListener {
// returns. Otherwise, RocksDB may be blocked. // returns. Otherwise, RocksDB may be blocked.
// @param handle is a pointer to the column family handle to be deleted // @param handle is a pointer to the column family handle to be deleted
// which will become a dangling pointer after the deletion. // which will become a dangling pointer after the deletion.
virtual void OnColumnFamilyHandleDeletionStarted( virtual void OnColumnFamilyHandleDeletionStarted(ColumnFamilyHandle* handle) {
ColumnFamilyHandle* /*handle*/) {} }
// A call-back function for RocksDB which will be called after an external // A call-back function for RocksDB which will be called after an external
// file is ingested using IngestExternalFile. // file is ingested using IngestExternalFile.

@ -89,14 +89,14 @@ class MemTableRep {
// //
// Currently only skip-list based memtable implement the interface. Other // Currently only skip-list based memtable implement the interface. Other
// implementations will fallback to Insert() by default. // implementations will fallback to Insert() by default.
virtual void InsertWithHint(KeyHandle handle, void** /*hint*/) { virtual void InsertWithHint(KeyHandle handle, void** hint) {
// Ignore the hint by default. // Ignore the hint by default.
Insert(handle); Insert(handle);
} }
// Like Insert(handle), but may be called concurrent with other calls // Like Insert(handle), but may be called concurrent with other calls
// to InsertConcurrently for other handles // to InsertConcurrently for other handles
virtual void InsertConcurrently(KeyHandle /*handle*/) { virtual void InsertConcurrently(KeyHandle handle) {
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
throw std::runtime_error("concurrent insert not supported"); throw std::runtime_error("concurrent insert not supported");
#else #else
@ -128,8 +128,8 @@ class MemTableRep {
virtual void Get(const LookupKey& k, void* callback_args, virtual void Get(const LookupKey& k, void* callback_args,
bool (*callback_func)(void* arg, const char* entry)); bool (*callback_func)(void* arg, const char* entry));
virtual uint64_t ApproximateNumEntries(const Slice& /*start_ikey*/, virtual uint64_t ApproximateNumEntries(const Slice& start_ikey,
const Slice& /*end_key*/) { const Slice& end_key) {
return 0; return 0;
} }

@ -66,9 +66,11 @@ class MergeOperator {
// internal corruption. This will be treated as an error by the library. // internal corruption. This will be treated as an error by the library.
// //
// Also make use of the *logger for error messages. // Also make use of the *logger for error messages.
virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, virtual bool FullMerge(const Slice& key,
const std::deque<std::string>& /*operand_list*/, const Slice* existing_value,
std::string* /*new_value*/, Logger* /*logger*/) const { const std::deque<std::string>& operand_list,
std::string* new_value,
Logger* logger) const {
// deprecated, please use FullMergeV2() // deprecated, please use FullMergeV2()
assert(false); assert(false);
return false; return false;
@ -143,10 +145,9 @@ class MergeOperator {
// If there is corruption in the data, handle it in the FullMergeV2() function // If there is corruption in the data, handle it in the FullMergeV2() function
// and return false there. The default implementation of PartialMerge will // and return false there. The default implementation of PartialMerge will
// always return false. // always return false.
virtual bool PartialMerge(const Slice& /*key*/, const Slice& /*left_operand*/, virtual bool PartialMerge(const Slice& key, const Slice& left_operand,
const Slice& /*right_operand*/, const Slice& right_operand, std::string* new_value,
std::string* /*new_value*/, Logger* logger) const {
Logger* /*logger*/) const {
return false; return false;
} }

@ -45,7 +45,7 @@ class RateLimiter {
// Request for token for bytes. If this request can not be satisfied, the call // Request for token for bytes. If this request can not be satisfied, the call
// is blocked. Caller is responsible to make sure // is blocked. Caller is responsible to make sure
// bytes <= GetSingleBurstBytes() // bytes <= GetSingleBurstBytes()
virtual void Request(const int64_t /*bytes*/, const Env::IOPriority /*pri*/) { virtual void Request(const int64_t bytes, const Env::IOPriority pri) {
assert(false); assert(false);
} }

@ -173,7 +173,7 @@ class PinnableSlice : public Slice, public Cleanable {
} }
} }
void remove_prefix(size_t /*n*/) { void remove_prefix(size_t n) {
assert(0); // Not implemented assert(0); // Not implemented
} }

@ -58,7 +58,7 @@ class SliceTransform {
virtual bool InDomain(const Slice& key) const = 0; virtual bool InDomain(const Slice& key) const = 0;
// This is currently not used and remains here for backward compatibility. // This is currently not used and remains here for backward compatibility.
virtual bool InRange(const Slice& /*dst*/) const { return false; } virtual bool InRange(const Slice& dst) const { return false; }
// Transform(s)=Transform(`prefix`) for any s with `prefix` as a prefix. // Transform(s)=Transform(`prefix`) for any s with `prefix` as a prefix.
// //
@ -83,7 +83,7 @@ class SliceTransform {
// "abcd,e", the file can be filtered out and the key will be invisible. // "abcd,e", the file can be filtered out and the key will be invisible.
// //
// i.e., an implementation always returning false is safe. // i.e., an implementation always returning false is safe.
virtual bool SameResultWhenAppended(const Slice& /*prefix*/) const { virtual bool SameResultWhenAppended(const Slice& prefix) const {
return false; return false;
} }
}; };

@ -445,7 +445,7 @@ class Statistics {
virtual uint64_t getTickerCount(uint32_t tickerType) const = 0; virtual uint64_t getTickerCount(uint32_t tickerType) const = 0;
virtual void histogramData(uint32_t type, virtual void histogramData(uint32_t type,
HistogramData* const data) const = 0; HistogramData* const data) const = 0;
virtual std::string getHistogramString(uint32_t /*type*/) const { return ""; } virtual std::string getHistogramString(uint32_t type) const { return ""; }
virtual void recordTick(uint32_t tickerType, uint64_t count = 0) = 0; virtual void recordTick(uint32_t tickerType, uint64_t count = 0) = 0;
virtual void setTickerCount(uint32_t tickerType, uint64_t count) = 0; virtual void setTickerCount(uint32_t tickerType, uint64_t count) = 0;
virtual uint64_t getAndResetTickerCount(uint32_t tickerType) = 0; virtual uint64_t getAndResetTickerCount(uint32_t tickerType) = 0;

@ -80,7 +80,7 @@ class GeoDB : public StackableDB {
// GeoDB owns the pointer `DB* db` now. You should not delete it or // GeoDB owns the pointer `DB* db` now. You should not delete it or
// use it after the invocation of GeoDB // use it after the invocation of GeoDB
// GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} // GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {}
GeoDB(DB* db, const GeoDBOptions& /*options*/) : StackableDB(db) {} GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {}
virtual ~GeoDB() {} virtual ~GeoDB() {}
// Insert a new object into the location database. The object is // Insert a new object into the location database. The object is

@ -62,7 +62,7 @@ class OptimisticTransactionDB {
protected: protected:
// To Create an OptimisticTransactionDB, call Open() // To Create an OptimisticTransactionDB, call Open()
explicit OptimisticTransactionDB(DB* /*db*/) {} explicit OptimisticTransactionDB(DB* db) {}
OptimisticTransactionDB() {} OptimisticTransactionDB() {}
private: private:

@ -402,8 +402,8 @@ class Transaction {
virtual bool IsDeadlockDetect() const { return false; } virtual bool IsDeadlockDetect() const { return false; }
virtual std::vector<TransactionID> GetWaitingTxns( virtual std::vector<TransactionID> GetWaitingTxns(uint32_t* column_family_id,
uint32_t* /*column_family_id*/, std::string* /*key*/) const { std::string* key) const {
assert(false); assert(false);
return std::vector<TransactionID>(); return std::vector<TransactionID>();
} }
@ -423,7 +423,7 @@ class Transaction {
void SetState(TransactionState state) { txn_state_ = state; } void SetState(TransactionState state) { txn_state_ = state; }
protected: protected:
explicit Transaction(const TransactionDB* /*db*/) {} explicit Transaction(const TransactionDB* db) {}
Transaction() {} Transaction() {}
// the log in which the prepared section for this txn resides // the log in which the prepared section for this txn resides

@ -44,8 +44,8 @@ class WalFilter {
// @params cf_name_id_map column_family_name to column_family_id map // @params cf_name_id_map column_family_name to column_family_id map
virtual void ColumnFamilyLogNumberMap( virtual void ColumnFamilyLogNumberMap(
const std::map<uint32_t, uint64_t>& /*cf_lognumber_map*/, const std::map<uint32_t, uint64_t>& cf_lognumber_map,
const std::map<std::string, uint32_t>& /*cf_name_id_map*/) {} const std::map<std::string, uint32_t>& cf_name_id_map) {}
// LogRecord is invoked for each log record encountered for all the logs // LogRecord is invoked for each log record encountered for all the logs
// during replay on logs on recovery. This method can be used to: // during replay on logs on recovery. This method can be used to:
@ -75,9 +75,11 @@ class WalFilter {
// @returns Processing option for the current record. // @returns Processing option for the current record.
// Please see WalProcessingOption enum above for // Please see WalProcessingOption enum above for
// details. // details.
virtual WalProcessingOption LogRecordFound( virtual WalProcessingOption LogRecordFound(unsigned long long log_number,
unsigned long long /*log_number*/, const std::string& /*log_file_name*/, const std::string& log_file_name,
const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { const WriteBatch& batch,
WriteBatch* new_batch,
bool* batch_changed) {
// Default implementation falls back to older function for compatibility // Default implementation falls back to older function for compatibility
return LogRecord(batch, new_batch, batch_changed); return LogRecord(batch, new_batch, batch_changed);
} }
@ -85,9 +87,9 @@ class WalFilter {
// Please see the comments for LogRecord above. This function is for // Please see the comments for LogRecord above. This function is for
// compatibility only and contains a subset of parameters. // compatibility only and contains a subset of parameters.
// New code should use the function above. // New code should use the function above.
virtual WalProcessingOption LogRecord(const WriteBatch& /*batch*/, virtual WalProcessingOption LogRecord(const WriteBatch& batch,
WriteBatch* /*new_batch*/, WriteBatch* new_batch,
bool* /*batch_changed*/) const { bool* batch_changed) const {
return WalProcessingOption::kContinueProcessing; return WalProcessingOption::kContinueProcessing;
} }

@ -217,9 +217,8 @@ class WriteBatch : public WriteBatchBase {
} }
virtual void SingleDelete(const Slice& /*key*/) {} virtual void SingleDelete(const Slice& /*key*/) {}
virtual Status DeleteRangeCF(uint32_t /*column_family_id*/, virtual Status DeleteRangeCF(uint32_t column_family_id,
const Slice& /*begin_key*/, const Slice& begin_key, const Slice& end_key) {
const Slice& /*end_key*/) {
return Status::InvalidArgument("DeleteRangeCF not implemented"); return Status::InvalidArgument("DeleteRangeCF not implemented");
} }
@ -241,16 +240,16 @@ class WriteBatch : public WriteBatchBase {
return Status::InvalidArgument("MarkBeginPrepare() handler not defined."); return Status::InvalidArgument("MarkBeginPrepare() handler not defined.");
} }
virtual Status MarkEndPrepare(const Slice& /*xid*/) { virtual Status MarkEndPrepare(const Slice& xid) {
return Status::InvalidArgument("MarkEndPrepare() handler not defined."); return Status::InvalidArgument("MarkEndPrepare() handler not defined.");
} }
virtual Status MarkRollback(const Slice& /*xid*/) { virtual Status MarkRollback(const Slice& xid) {
return Status::InvalidArgument( return Status::InvalidArgument(
"MarkRollbackPrepare() handler not defined."); "MarkRollbackPrepare() handler not defined.");
} }
virtual Status MarkCommit(const Slice& /*xid*/) { virtual Status MarkCommit(const Slice& xid) {
return Status::InvalidArgument("MarkCommit() handler not defined."); return Status::InvalidArgument("MarkCommit() handler not defined.");
} }

@ -597,8 +597,8 @@ void HashCuckooRep::Iterator::Seek(const Slice& user_key,
} }
// Retreat to the last entry with a key <= target // Retreat to the last entry with a key <= target
void HashCuckooRep::Iterator::SeekForPrev(const Slice& /*user_key*/, void HashCuckooRep::Iterator::SeekForPrev(const Slice& user_key,
const char* /*memtable_key*/) { const char* memtable_key) {
assert(false); assert(false);
} }
@ -623,7 +623,7 @@ void HashCuckooRep::Iterator::SeekToLast() {
MemTableRep* HashCuckooRepFactory::CreateMemTableRep( MemTableRep* HashCuckooRepFactory::CreateMemTableRep(
const MemTableRep::KeyComparator& compare, Allocator* allocator, const MemTableRep::KeyComparator& compare, Allocator* allocator,
const SliceTransform* /*transform*/, Logger* /*logger*/) { const SliceTransform* transform, Logger* logger) {
// The estimated average fullness. The write performance of any close hash // The estimated average fullness. The write performance of any close hash
// degrades as the fullness of the mem-table increases. Setting kFullness // degrades as the fullness of the mem-table increases. Setting kFullness
// to a value around 0.7 can better avoid write performance degradation while // to a value around 0.7 can better avoid write performance degradation while

@ -362,14 +362,14 @@ class HashLinkListRep : public MemTableRep {
// Advance to the first entry with a key >= target // Advance to the first entry with a key >= target
virtual void Seek(const Slice& internal_key, virtual void Seek(const Slice& internal_key,
const char* /*memtable_key*/) override { const char* memtable_key) override {
node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_, node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_,
internal_key); internal_key);
} }
// Retreat to the last entry with a key <= target // Retreat to the last entry with a key <= target
virtual void SeekForPrev(const Slice& /*internal_key*/, virtual void SeekForPrev(const Slice& internal_key,
const char* /*memtable_key*/) override { const char* memtable_key) override {
// Since we do not support Prev() // Since we do not support Prev()
// We simply do not support SeekForPrev // We simply do not support SeekForPrev
Reset(nullptr); Reset(nullptr);
@ -483,10 +483,10 @@ class HashLinkListRep : public MemTableRep {
} }
virtual void Next() override {} virtual void Next() override {}
virtual void Prev() override {} virtual void Prev() override {}
virtual void Seek(const Slice& /*user_key*/, virtual void Seek(const Slice& user_key,
const char* /*memtable_key*/) override {} const char* memtable_key) override {}
virtual void SeekForPrev(const Slice& /*user_key*/, virtual void SeekForPrev(const Slice& user_key,
const char* /*memtable_key*/) override {} const char* memtable_key) override {}
virtual void SeekToFirst() override {} virtual void SeekToFirst() override {}
virtual void SeekToLast() override {} virtual void SeekToLast() override {}

@ -131,8 +131,8 @@ class HashSkipListRep : public MemTableRep {
} }
// Retreat to the last entry with a key <= target // Retreat to the last entry with a key <= target
virtual void SeekForPrev(const Slice& /*internal_key*/, virtual void SeekForPrev(const Slice& internal_key,
const char* /*memtable_key*/) override { const char* memtable_key) override {
// not supported // not supported
assert(false); assert(false);
} }
@ -219,10 +219,10 @@ class HashSkipListRep : public MemTableRep {
} }
virtual void Next() override {} virtual void Next() override {}
virtual void Prev() override {} virtual void Prev() override {}
virtual void Seek(const Slice& /*internal_key*/, virtual void Seek(const Slice& internal_key,
const char* /*memtable_key*/) override {} const char* memtable_key) override {}
virtual void SeekForPrev(const Slice& /*internal_key*/, virtual void SeekForPrev(const Slice& internal_key,
const char* /*memtable_key*/) override {} const char* memtable_key) override {}
virtual void SeekToFirst() override {} virtual void SeekToFirst() override {}
virtual void SeekToLast() override {} virtual void SeekToLast() override {}
@ -335,7 +335,7 @@ MemTableRep::Iterator* HashSkipListRep::GetDynamicPrefixIterator(Arena* arena) {
MemTableRep* HashSkipListRepFactory::CreateMemTableRep( MemTableRep* HashSkipListRepFactory::CreateMemTableRep(
const MemTableRep::KeyComparator& compare, Allocator* allocator, const MemTableRep::KeyComparator& compare, Allocator* allocator,
const SliceTransform* transform, Logger* /*logger*/) { const SliceTransform* transform, Logger* logger) {
return new HashSkipListRep(compare, allocator, transform, bucket_count_, return new HashSkipListRep(compare, allocator, transform, bucket_count_,
skiplist_height_, skiplist_branching_factor_); skiplist_height_, skiplist_branching_factor_);
} }

@ -270,7 +270,7 @@ public:
MemTableRep* SkipListFactory::CreateMemTableRep( MemTableRep* SkipListFactory::CreateMemTableRep(
const MemTableRep::KeyComparator& compare, Allocator* allocator, const MemTableRep::KeyComparator& compare, Allocator* allocator,
const SliceTransform* transform, Logger* /*logger*/) { const SliceTransform* transform, Logger* logger) {
return new SkipListRep(compare, allocator, transform, lookahead_); return new SkipListRep(compare, allocator, transform, lookahead_);
} }

@ -227,8 +227,8 @@ void VectorRep::Iterator::Seek(const Slice& user_key,
} }
// Advance to the first entry with a key <= target // Advance to the first entry with a key <= target
void VectorRep::Iterator::SeekForPrev(const Slice& /*user_key*/, void VectorRep::Iterator::SeekForPrev(const Slice& user_key,
const char* /*memtable_key*/) { const char* memtable_key) {
assert(false); assert(false);
} }
@ -296,7 +296,7 @@ MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) {
MemTableRep* VectorRepFactory::CreateMemTableRep( MemTableRep* VectorRepFactory::CreateMemTableRep(
const MemTableRep::KeyComparator& compare, Allocator* allocator, const MemTableRep::KeyComparator& compare, Allocator* allocator,
const SliceTransform*, Logger* /*logger*/) { const SliceTransform*, Logger* logger) {
return new VectorRep(compare, allocator, count_); return new VectorRep(compare, allocator, count_);
} }
} // namespace rocksdb } // namespace rocksdb

@ -1129,7 +1129,7 @@ Status GetPlainTableOptionsFromMap(
const PlainTableOptions& table_options, const PlainTableOptions& table_options,
const std::unordered_map<std::string, std::string>& opts_map, const std::unordered_map<std::string, std::string>& opts_map,
PlainTableOptions* new_table_options, bool input_strings_escaped, PlainTableOptions* new_table_options, bool input_strings_escaped,
bool /*ignore_unknown_options*/) { bool ignore_unknown_options) {
assert(new_table_options); assert(new_table_options);
*new_table_options = table_options; *new_table_options = table_options;
for (const auto& o : opts_map) { for (const auto& o : opts_map) {

@ -689,7 +689,7 @@ Status RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
Status RocksDBOptionsParser::VerifyDBOptions( Status RocksDBOptionsParser::VerifyDBOptions(
const DBOptions& base_opt, const DBOptions& persisted_opt, const DBOptions& base_opt, const DBOptions& persisted_opt,
const std::unordered_map<std::string, std::string>* /*opt_map*/, const std::unordered_map<std::string, std::string>* opt_map,
OptionsSanityCheckLevel sanity_check_level) { OptionsSanityCheckLevel sanity_check_level) {
for (auto pair : db_options_type_info) { for (auto pair : db_options_type_info) {
if (pair.second.verification == OptionVerificationType::kDeprecated) { if (pair.second.verification == OptionVerificationType::kDeprecated) {

@ -35,7 +35,7 @@ static int PthreadCall(const char* label, int result) {
return result; return result;
} }
Mutex::Mutex(bool /*adaptive*/) { Mutex::Mutex(bool adaptive) {
#ifdef ROCKSDB_PTHREAD_ADAPTIVE_MUTEX #ifdef ROCKSDB_PTHREAD_ADAPTIVE_MUTEX
if (!adaptive) { if (!adaptive) {
PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr)); PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr));

@ -13,7 +13,7 @@
namespace rocksdb { namespace rocksdb {
namespace port { namespace port {
void InstallStackTraceHandler() {} void InstallStackTraceHandler() {}
void PrintStack(int /*first_frames_to_skip*/) {} void PrintStack(int first_frames_to_skip) {}
} // namespace port } // namespace port
} // namespace rocksdb } // namespace rocksdb

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save