Improve defaults for benchmarks

Summary:
Changes include:
* don't sync-on-commit for single writer thread in readwhile... tests
* make default block size 8kb rather than 4kb to avoid too small blocks after compression
* use snappy instead of zlib to avoid stalls from compression latency
* disable statistics
* use bytes_per_sync=8M to reduce throughput loss on disk
* use open_files=-1 to reduce mutex contention

Task ID: #

Blame Rev:

Test Plan:
run benchmark

Revert Plan:

Database Impact:

Memcache Impact:

Other Notes:

EImportant:

- begin *PUBLIC* platform impact section -
Bugzilla: #
- end platform impact -

Reviewers: igor

Reviewed By: igor

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D44961
main
Mark Callaghan 9 years ago
parent a203b913c1
commit 41a0e2811d
  1. 11
      tools/benchmark.sh
  2. 14
      tools/run_flash_bench.sh

@ -47,7 +47,7 @@ duration=${DURATION:-0}
num_keys=${NUM_KEYS:-$((1 * G))}
key_size=20
value_size=${VALUE_SIZE:-400}
block_size=${BLOCK_SIZE:-4096}
block_size=${BLOCK_SIZE:-8192}
const_params="
--db=$DB_DIR \
@ -61,11 +61,12 @@ const_params="
--block_size=$block_size \
--cache_size=$cache_size \
--cache_numshardbits=6 \
--compression_type=zlib \
--compression_type=snappy \
--min_level_to_compress=3 \
--compression_ratio=0.5 \
--level_compaction_dynamic_level_bytes=true \
--bytes_per_sync=$((2 * M)) \
--bytes_per_sync=$((8 * M)) \
--cache_index_and_filter_blocks=0 \
\
--hard_rate_limit=3 \
--rate_limit_delay_max_milliseconds=1000000 \
@ -79,14 +80,14 @@ const_params="
--max_grandparent_overlap_factor=8 \
--max_bytes_for_level_multiplier=8 \
\
--statistics=1 \
--statistics=0 \
--stats_per_interval=1 \
--stats_interval_seconds=60 \
--histogram=1 \
\
--memtablerep=skip_list \
--bloom_bits=10 \
--open_files=$((20 * K))"
--open_files=-1"
l0_config="
--level0_file_num_compaction_trigger=4 \

@ -62,7 +62,7 @@ duration=${NSECONDS:-$((60 * 60))}
nps=${RANGE_LIMIT:-10}
vs=${VAL_SIZE:-400}
cs=${CACHE_BYTES:-$(( 1 * G ))}
bs=${BLOCK_LENGTH:-4096}
bs=${BLOCK_LENGTH:-8192}
# If no command line arguments then run for 24 threads.
if [[ $# -eq 0 ]]; then
@ -193,15 +193,15 @@ for num_thr in "${nthreads[@]}" ; do
# Test 11: random read while writing
env $ARGS DURATION=$duration NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
./tools/benchmark.sh readwhilewriting
DB_BENCH_NO_SYNC=1 ./tools/benchmark.sh readwhilewriting
# Test 12: range scan while writing
env $ARGS DURATION=$duration NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh fwdrangewhilewriting
DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh fwdrangewhilewriting
# Test 13: reverse range scan while writing
env $ARGS DURATION=$duration NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh revrangewhilewriting
DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh revrangewhilewriting
done
###### Merge tests
@ -216,15 +216,15 @@ for num_thr in "${nthreads[@]}" ; do
# Test 16: random read while merging
env $ARGS DURATION=$duration NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
./tools/benchmark.sh readwhilemerging
DB_BENCH_NO_SYNC=1 ./tools/benchmark.sh readwhilemerging
# Test 17: range scan while merging
env $ARGS DURATION=$duration NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh fwdrangewhilemerging
DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh fwdrangewhilemerging
# Test 18: reverse range scan while merging
env $ARGS DURATION=$duration NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh revrangewhilemerging
DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh revrangewhilemerging
done
echo bulkload > $output_dir/report2.txt

Loading…
Cancel
Save