Extend stress test to cover periodic compaction and compaction TTL (#5741)

Summary:
Covering periodic compaction and compaction TTL can help us expose potential issues. Add it there.
Randomly select value for these two options.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5741

Test Plan: Run crash_test and see the perameters generated.

Differential Revision: D17059515

fbshipit-source-id: 8213974846a0b6a22fc13be705825c9054d1d097
main
sdong 5 years ago committed by Facebook Github Bot
parent ba0967b567
commit 1d6a10f52d
  1. 10
      tools/db_crashtest.py
  2. 12
      tools/db_stress.cc

@ -47,7 +47,7 @@ default_params = {
"max_write_buffer_number": 3, "max_write_buffer_number": 3,
"mmap_read": lambda: random.randint(0, 1), "mmap_read": lambda: random.randint(0, 1),
"nooverwritepercent": 1, "nooverwritepercent": 1,
"open_files": 500000, "open_files": lambda : random.choice([-1, 500000]),
"prefixpercent": 5, "prefixpercent": 5,
"progress_reports": 0, "progress_reports": 0,
"readpercent": 45, "readpercent": 45,
@ -67,6 +67,9 @@ default_params = {
"format_version": lambda: random.randint(2, 4), "format_version": lambda: random.randint(2, 4),
"index_block_restart_interval": lambda: random.choice(range(1, 16)), "index_block_restart_interval": lambda: random.choice(range(1, 16)),
"use_multiget" : lambda: random.randint(0, 1), "use_multiget" : lambda: random.randint(0, 1),
"periodic_compaction_seconds" :
lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
"compaction_ttl" : lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
} }
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR' _TEST_DIR_ENV_VAR = 'TEST_TMPDIR'
@ -162,6 +165,11 @@ def finalize_and_sanitize(src_params):
dest_params["delrangepercent"] = 0 dest_params["delrangepercent"] = 0
if dest_params.get("disable_wal", 0) == 1: if dest_params.get("disable_wal", 0) == 1:
dest_params["atomic_flush"] = 1 dest_params["atomic_flush"] = 1
if dest_params.get("open_files", 1) != -1:
# Compaction TTL and periodic compactions are only compatible
# with open_files = -1
dest_params["compaction_ttl"] = 0
dest_params["periodic_compaction_seconds"] = 0
return dest_params return dest_params

@ -320,6 +320,12 @@ DEFINE_uint64(subcompactions, 1,
"Maximum number of subcompactions to divide L0-L1 compactions " "Maximum number of subcompactions to divide L0-L1 compactions "
"into."); "into.");
DEFINE_uint64(periodic_compaction_seconds, 1000,
"Files older than this value will be picked up for compaction.");
DEFINE_uint64(compaction_ttl, 1000,
"Files older than TTL will be compacted to the next level.");
DEFINE_bool(allow_concurrent_memtable_write, false, DEFINE_bool(allow_concurrent_memtable_write, false,
"Allow multi-writers to update mem tables in parallel."); "Allow multi-writers to update mem tables in parallel.");
@ -2745,6 +2751,10 @@ class StressTest {
} }
fprintf(stdout, "Snapshot refresh nanos : %" PRIu64 "\n", fprintf(stdout, "Snapshot refresh nanos : %" PRIu64 "\n",
FLAGS_snap_refresh_nanos); FLAGS_snap_refresh_nanos);
fprintf(stdout, "Periodic Compaction Secs : %" PRIu64 "\n",
FLAGS_periodic_compaction_seconds);
fprintf(stdout, "Compaction TTL : %" PRIu64 "\n",
FLAGS_compaction_ttl);
fprintf(stdout, "------------------------------------------------\n"); fprintf(stdout, "------------------------------------------------\n");
} }
@ -2821,6 +2831,8 @@ class StressTest {
options_.max_subcompactions = static_cast<uint32_t>(FLAGS_subcompactions); options_.max_subcompactions = static_cast<uint32_t>(FLAGS_subcompactions);
options_.allow_concurrent_memtable_write = options_.allow_concurrent_memtable_write =
FLAGS_allow_concurrent_memtable_write; FLAGS_allow_concurrent_memtable_write;
options_.periodic_compaction_seconds = FLAGS_periodic_compaction_seconds;
options_.ttl = FLAGS_compaction_ttl;
options_.enable_pipelined_write = FLAGS_enable_pipelined_write; options_.enable_pipelined_write = FLAGS_enable_pipelined_write;
options_.enable_write_thread_adaptive_yield = options_.enable_write_thread_adaptive_yield =
FLAGS_enable_write_thread_adaptive_yield; FLAGS_enable_write_thread_adaptive_yield;

Loading…
Cancel
Save