From 8dbd0bd11fe498afc991db8acd197999d87a0a3f Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Mon, 31 Jan 2022 13:19:55 -0800 Subject: [PATCH] db_crashtest.py use cheaper settings (#9476) Summary: Despite attempts to optimize `db_stress` setup phase (i.e., pre-`OperateDb()`) latency in https://github.com/facebook/rocksdb/issues/9470 and https://github.com/facebook/rocksdb/issues/9475, it still always took tens of seconds. Since we still aren't able to setup a 100M key `db_stress` quickly, we should reduce the number of keys. This PR reduces it 4x while increasing `value_size_mult` 4x (from its default value of 8) so that memtables and SST files fill at a similar rate compared to before this PR. Also disabled bzip2 compression since we'll probably never use it and I noticed many CI runs spending majority of CPU on bzip2 decompression. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9476 Reviewed By: siying Differential Revision: D33898520 Pulled By: ajkr fbshipit-source-id: 855021784ad9664f2be5bce21f0339a1cf93230d --- tools/db_crashtest.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py index bd9561087..9d1f7c56a 100644 --- a/tools/db_crashtest.py +++ b/tools/db_crashtest.py @@ -39,12 +39,11 @@ default_params = { "cache_size": 1048576, "checkpoint_one_in": 1000000, "compression_type": lambda: random.choice( - ["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress", "zstd"]), + ["none", "snappy", "zlib", "lz4", "lz4hc", "xpress", "zstd"]), "bottommost_compression_type": lambda: "disable" if random.randint(0, 1) == 0 else random.choice( - ["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress", - "zstd"]), + ["none", "snappy", "zlib", "lz4", "lz4hc", "xpress", "zstd"]), "checksum_type" : lambda: random.choice(["kCRC32c", "kxxHash", "kxxHash64", "kXXH3"]), "compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1), "compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1), @@ -75,7 +74,7 @@ default_params = { "mark_for_compaction_one_file_in": lambda: 10 * random.randint(0, 1), "max_background_compactions": 20, "max_bytes_for_level_base": 10485760, - "max_key": 100000000, + "max_key": 25000000, "max_write_buffer_number": 3, "mmap_read": lambda: random.randint(0, 1), # Setting `nooverwritepercent > 0` is only possible because we do not vary @@ -111,6 +110,7 @@ default_params = { # 999 -> use Bloom API "ribbon_starting_level": lambda: random.choice([random.randint(-1, 10), 999]), "use_block_based_filter": lambda: random.randint(0, 1), + "value_size_mult": 32, "verify_checksum": 1, "write_buffer_size": 4 * 1024 * 1024, "writepercent": 35,