Add auto prefetching parameters to db_bench and db_stress (#10632)

Summary:
Same as title

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10632

Test Plan: make crash_test -j32

Reviewed By: anand1976

Differential Revision: D39241479

Pulled By: akankshamahajan15

fbshipit-source-id: 5db5b0c007da786bacc1b30d8926d36d6d029b87
main
Akanksha Mahajan 2 years ago committed by Facebook GitHub Bot
parent dc7d155438
commit 7a9ecdac3c
  1. 5
      db_stress_tool/db_stress_common.h
  2. 9
      db_stress_tool/db_stress_gflags.cc
  3. 6
      db_stress_tool/db_stress_test_base.cc
  4. 10
      tools/db_bench_tool.cc
  5. 4
      tools/db_crashtest.py

@ -314,6 +314,11 @@ DECLARE_int64(preclude_last_level_data_seconds);
DECLARE_int32(verify_iterator_with_expected_state_one_in);
DECLARE_uint64(readahead_size);
DECLARE_uint64(initial_auto_readahead_size);
DECLARE_uint64(max_auto_readahead_size);
DECLARE_uint64(num_file_reads_for_auto_readahead);
constexpr long KB = 1024;
constexpr int kRandomValueMaxFactor = 3;
constexpr int kValueMaxLen = 100;

@ -1035,4 +1035,13 @@ DEFINE_int32(verify_iterator_with_expected_state_one_in, 0,
"chance that the iterator is verified against the expected state "
"file, instead of comparing keys between two iterators.");
DEFINE_uint64(readahead_size, 0, "Iterator readahead size");
DEFINE_uint64(initial_auto_readahead_size, 0,
"Initial auto readahead size for prefetching during Iteration");
DEFINE_uint64(max_auto_readahead_size, 0,
"Max auto readahead size for prefetching during Iteration");
DEFINE_uint64(
num_file_reads_for_auto_readahead, 0,
"Num of sequential reads to enable auto prefetching during Iteration");
#endif // GFLAGS

@ -635,6 +635,7 @@ void StressTest::OperateDb(ThreadState* thread) {
FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
read_opts.async_io = FLAGS_async_io;
read_opts.adaptive_readahead = FLAGS_adaptive_readahead;
read_opts.readahead_size = FLAGS_readahead_size;
WriteOptions write_opts;
if (FLAGS_rate_limit_auto_wal_flush) {
write_opts.rate_limiter_priority = Env::IO_USER;
@ -2950,6 +2951,11 @@ void InitializeOptionsFromFlags(
block_based_options.prepopulate_block_cache =
static_cast<BlockBasedTableOptions::PrepopulateBlockCache>(
FLAGS_prepopulate_block_cache);
block_based_options.initial_auto_readahead_size =
FLAGS_initial_auto_readahead_size;
block_based_options.max_auto_readahead_size = FLAGS_max_auto_readahead_size;
block_based_options.num_file_reads_for_auto_readahead =
FLAGS_num_file_reads_for_auto_readahead;
options.table_factory.reset(NewBlockBasedTableFactory(block_based_options));
options.db_write_buffer_size = FLAGS_db_write_buffer_size;
options.write_buffer_size = FLAGS_write_buffer_size;

@ -1236,6 +1236,14 @@ DEFINE_uint64(
"BlockBasedTableOptions.initial_auto_readahead_size and doubles on every "
"additional read upto max_auto_readahead_size");
DEFINE_uint64(
num_file_reads_for_auto_readahead,
ROCKSDB_NAMESPACE::BlockBasedTableOptions()
.num_file_reads_for_auto_readahead,
"Rocksdb implicit readahead is enabled if reads are sequential and "
"num_file_reads_for_auto_readahead indicates after how many sequential "
"reads into that file internal auto prefetching should be start.");
static enum ROCKSDB_NAMESPACE::CompressionType StringToCompressionType(
const char* ctype) {
assert(ctype);
@ -4372,6 +4380,8 @@ class Benchmark {
FLAGS_max_auto_readahead_size;
block_based_options.initial_auto_readahead_size =
FLAGS_initial_auto_readahead_size;
block_based_options.num_file_reads_for_auto_readahead =
FLAGS_num_file_reads_for_auto_readahead;
BlockBasedTableOptions::PrepopulateBlockCache prepopulate_block_cache =
block_based_options.prepopulate_block_cache;
switch (FLAGS_prepopulate_block_cache) {

@ -180,6 +180,10 @@ default_params = {
"secondary_cache_uri": lambda: random.choice(
["", "compressed_secondary_cache://capacity=8388608"]),
"allow_data_in_errors": True,
"readahead_size": lambda: random.choice([0, 16384, 524288]),
"initial_auto_readahead_size": lambda: random.choice([0, 16384, 524288]),
"max_auto_readahead_size": lambda: random.choice([0, 16384, 524288]),
"num_file_reads_for_auto_readahead": lambda: random.choice([0, 1, 2]),
}
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'

Loading…
Cancel
Save