Summary: This runs a benchmark for LevelDB similar to what we have in tools/run_flash_bench.sh. It requires changes to db_bench that I published in a LevelDB fork on github. Some results are at: http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html Sample output: ops/sec mb/sec usec/op avg p50 Test 525 16.4 1904.5 1904.5 111.0 fillseq.v32768 75187 15.5 13.3 13.3 4.4 fillseq.v200 28328 5.8 35.3 35.3 4.7 overwrite.t1.s0 175438 0.0 5.7 5.7 4.4 readrandom.t1 28490 5.9 35.1 35.1 4.7 overwrite.t1.s0 121951 0.0 8.2 8.2 5.7 readwhilewriting.t1 Task ID: # Blame Rev: Test Plan: Revert Plan: Database Impact: Memcache Impact: Other Notes: EImportant: - begin *PUBLIC* platform impact section - Bugzilla: # - end platform impact - Reviewers: igor Reviewed By: igor Subscribers: dhruba Differential Revision: https://reviews.facebook.net/D37749main
parent
1bb4928da9
commit
a087f80e9d
@ -0,0 +1,185 @@ |
||||
#!/bin/bash |
||||
# REQUIRE: db_bench binary exists in the current directory |
||||
# |
||||
# This should be used with the LevelDB fork listed here to use additional test options. |
||||
# For more details on the changes see the blog post listed below. |
||||
# https://github.com/mdcallag/leveldb-1 |
||||
# http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html |
||||
|
||||
if [ $# -ne 1 ]; then |
||||
echo -n "./benchmark.sh [fillseq/overwrite/readrandom/readwhilewriting]" |
||||
exit 0 |
||||
fi |
||||
|
||||
# size constants |
||||
K=1024 |
||||
M=$((1024 * K)) |
||||
G=$((1024 * M)) |
||||
|
||||
if [ -z $DB_DIR ]; then |
||||
echo "DB_DIR is not defined" |
||||
exit 0 |
||||
fi |
||||
|
||||
output_dir=${OUTPUT_DIR:-/tmp/} |
||||
if [ ! -d $output_dir ]; then |
||||
mkdir -p $output_dir |
||||
fi |
||||
|
||||
# all multithreaded tests run with sync=1 unless |
||||
# $DB_BENCH_NO_SYNC is defined |
||||
syncval="1" |
||||
if [ ! -z $DB_BENCH_NO_SYNC ]; then |
||||
echo "Turning sync off for all multithreaded tests" |
||||
syncval="0"; |
||||
fi |
||||
|
||||
num_threads=${NUM_THREADS:-16} |
||||
# Only for *whilewriting, *whilemerging |
||||
writes_per_second=${WRITES_PER_SECOND:-$((10 * K))} |
||||
cache_size=${CACHE_SIZE:-$((1 * G))} |
||||
|
||||
num_keys=${NUM_KEYS:-$((1 * G))} |
||||
key_size=20 |
||||
value_size=${VALUE_SIZE:-400} |
||||
block_size=${BLOCK_SIZE:-4096} |
||||
|
||||
const_params=" |
||||
--db=$DB_DIR \ |
||||
\ |
||||
--num=$num_keys \ |
||||
--value_size=$value_size \ |
||||
--cache_size=$cache_size \ |
||||
--compression_ratio=0.5 \ |
||||
\ |
||||
--write_buffer_size=$((2 * M)) \ |
||||
\ |
||||
--histogram=1 \ |
||||
\ |
||||
--bloom_bits=10 \ |
||||
--open_files=$((20 * K))" |
||||
|
||||
params_w="$const_params " |
||||
|
||||
function summarize_result { |
||||
test_out=$1 |
||||
test_name=$2 |
||||
bench_name=$3 |
||||
nthr=$4 |
||||
|
||||
usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' ) |
||||
mb_sec=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $5 }' ) |
||||
ops=$( grep "^Count:" $test_out | awk '{ print $2 }' ) |
||||
ops_sec=$( echo "scale=0; (1000000.0 * $nthr) / $usecs_op" | bc ) |
||||
avg=$( grep "^Count:" $test_out | awk '{ printf "%.1f", $4 }' ) |
||||
p50=$( grep "^Min:" $test_out | awk '{ printf "%.1f", $4 }' ) |
||||
echo -e "$ops_sec\t$mb_sec\t$usecs_op\t$avg\t$p50\t$test_name" \ |
||||
>> $output_dir/report.txt |
||||
} |
||||
|
||||
function run_fillseq { |
||||
# This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the |
||||
# client can discover where to restart a load after a crash. I think this is a good way to load. |
||||
echo "Loading $num_keys keys sequentially" |
||||
cmd="./db_bench --benchmarks=fillseq \ |
||||
--use_existing_db=0 \ |
||||
--sync=0 \ |
||||
$params_w \ |
||||
--threads=1 \ |
||||
--seed=$( date +%s ) \ |
||||
2>&1 | tee -a $output_dir/benchmark_fillseq.v${value_size}.log" |
||||
echo $cmd | tee $output_dir/benchmark_fillseq.v${value_size}.log |
||||
eval $cmd |
||||
summarize_result $output_dir/benchmark_fillseq.v${value_size}.log fillseq.v${value_size} fillseq 1 |
||||
} |
||||
|
||||
function run_change { |
||||
operation=$1 |
||||
echo "Do $num_keys random $operation" |
||||
out_name="benchmark_${operation}.t${num_threads}.s${syncval}.log" |
||||
cmd="./db_bench --benchmarks=$operation \ |
||||
--use_existing_db=1 \ |
||||
--sync=$syncval \ |
||||
$params_w \ |
||||
--threads=$num_threads \ |
||||
--seed=$( date +%s ) \ |
||||
2>&1 | tee -a $output_dir/${out_name}" |
||||
echo $cmd | tee $output_dir/${out_name} |
||||
eval $cmd |
||||
summarize_result $output_dir/${out_name} ${operation}.t${num_threads}.s${syncval} $operation $num_threads |
||||
} |
||||
|
||||
function run_readrandom { |
||||
echo "Reading $num_keys random keys" |
||||
out_name="benchmark_readrandom.t${num_threads}.log" |
||||
cmd="./db_bench --benchmarks=readrandom \ |
||||
--use_existing_db=1 \ |
||||
$params_w \ |
||||
--threads=$num_threads \ |
||||
--seed=$( date +%s ) \ |
||||
2>&1 | tee -a $output_dir/${out_name}" |
||||
echo $cmd | tee $output_dir/${out_name} |
||||
eval $cmd |
||||
summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom $num_threads |
||||
} |
||||
|
||||
function run_readwhile { |
||||
operation=$1 |
||||
echo "Reading $num_keys random keys while $operation" |
||||
out_name="benchmark_readwhile${operation}.t${num_threads}.log" |
||||
cmd="./db_bench --benchmarks=readwhile${operation} \ |
||||
--use_existing_db=1 \ |
||||
--sync=$syncval \ |
||||
$params_w \ |
||||
--threads=$num_threads \ |
||||
--writes_per_second=$writes_per_second \ |
||||
--seed=$( date +%s ) \ |
||||
2>&1 | tee -a $output_dir/${out_name}" |
||||
echo $cmd | tee $output_dir/${out_name} |
||||
eval $cmd |
||||
summarize_result $output_dir/${out_name} readwhile${operation}.t${num_threads} readwhile${operation} $num_threads |
||||
} |
||||
|
||||
function now() { |
||||
echo `date +"%s"` |
||||
} |
||||
|
||||
report="$output_dir/report.txt" |
||||
schedule="$output_dir/schedule.txt" |
||||
|
||||
echo "===== Benchmark =====" |
||||
|
||||
# Run!!! |
||||
IFS=',' read -a jobs <<< $1 |
||||
for job in ${jobs[@]}; do |
||||
|
||||
if [ $job != debug ]; then |
||||
echo "Start $job at `date`" | tee -a $schedule |
||||
fi |
||||
|
||||
start=$(now) |
||||
if [ $job = fillseq ]; then |
||||
run_fillseq |
||||
elif [ $job = overwrite ]; then |
||||
run_change overwrite |
||||
elif [ $job = readrandom ]; then |
||||
run_readrandom |
||||
elif [ $job = readwhilewriting ]; then |
||||
run_readwhile writing |
||||
elif [ $job = debug ]; then |
||||
num_keys=1000; # debug |
||||
echo "Setting num_keys to $num_keys" |
||||
else |
||||
echo "unknown job $job" |
||||
exit |
||||
fi |
||||
end=$(now) |
||||
|
||||
if [ $job != debug ]; then |
||||
echo "Complete $job in $((end-start)) seconds" | tee -a $schedule |
||||
fi |
||||
|
||||
echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest" |
||||
tail -1 $output_dir/report.txt |
||||
|
||||
done |
@ -0,0 +1,174 @@ |
||||
#!/bin/bash |
||||
# REQUIRE: benchmark_leveldb.sh exists in the current directory |
||||
# After execution of this script, log files are generated in $output_dir. |
||||
# report.txt provides a high level statistics |
||||
# |
||||
# This should be used with the LevelDB fork listed here to use additional test options. |
||||
# For more details on the changes see the blog post listed below. |
||||
# https://github.com/mdcallag/leveldb-1 |
||||
# http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html |
||||
# |
||||
# This should be run from the parent of the tools directory. The command line is: |
||||
# [$env_vars] tools/run_flash_bench.sh [list-of-threads] |
||||
# |
||||
# This runs a sequence of tests in the following sequence: |
||||
# step 1) load - bulkload, compact, fillseq, overwrite |
||||
# step 2) read-only for each number of threads |
||||
# step 3) read-write for each number of threads |
||||
# |
||||
# The list of threads is optional and when not set is equivalent to "24". |
||||
# Were list-of-threads specified as "1 2 4" then the tests in steps 2, 3 and |
||||
# 4 above would be repeated for 1, 2 and 4 threads. The tests in step 1 are |
||||
# only run for 1 thread. |
||||
|
||||
# Test output is written to $OUTPUT_DIR, currently /tmp/output. The performance |
||||
# summary is in $OUTPUT_DIR/report.txt. There is one file in $OUTPUT_DIR per |
||||
# test and the tests are listed below. |
||||
# |
||||
# The environment variables are also optional. The variables are: |
||||
# NKEYS - number of key/value pairs to load |
||||
# NWRITESPERSEC - the writes/second rate limit for the *whilewriting* tests. |
||||
# If this is too large then the non-writer threads can get |
||||
# starved. |
||||
# VAL_SIZE - the length of the value in the key/value pairs loaded. |
||||
# You can estimate the size of the test database from this, |
||||
# NKEYS and the compression rate (--compression_ratio) set |
||||
# in tools/benchmark_leveldb.sh |
||||
# BLOCK_LENGTH - value for db_bench --block_size |
||||
# CACHE_BYTES - the size of the RocksDB block cache in bytes |
||||
# DATA_DIR - directory in which to create database files |
||||
# DO_SETUP - when set to 0 then a backup of the database is copied from |
||||
# $DATA_DIR.bak to $DATA_DIR and the load tests from step 1 |
||||
# This allows tests from steps 2, 3 to be repeated faster. |
||||
# SAVE_SETUP - saves a copy of the database at the end of step 1 to |
||||
# $DATA_DIR.bak. |
||||
|
||||
# Size constants |
||||
K=1024 |
||||
M=$((1024 * K)) |
||||
G=$((1024 * M)) |
||||
|
||||
num_keys=${NKEYS:-$((1 * G))} |
||||
wps=${NWRITESPERSEC:-$((10 * K))} |
||||
vs=${VAL_SIZE:-400} |
||||
cs=${CACHE_BYTES:-$(( 1 * G ))} |
||||
bs=${BLOCK_LENGTH:-4096} |
||||
|
||||
# If no command line arguments then run for 24 threads. |
||||
if [[ $# -eq 0 ]]; then |
||||
nthreads=( 24 ) |
||||
else |
||||
nthreads=( "$@" ) |
||||
fi |
||||
|
||||
for num_thr in "${nthreads[@]}" ; do |
||||
echo Will run for $num_thr threads |
||||
done |
||||
|
||||
# Update these parameters before execution !!! |
||||
db_dir=${DATA_DIR:-"/tmp/rocksdb/"} |
||||
|
||||
do_setup=${DO_SETUP:-1} |
||||
save_setup=${SAVE_SETUP:-0} |
||||
|
||||
output_dir="/tmp/output" |
||||
|
||||
ARGS="\ |
||||
OUTPUT_DIR=$output_dir \ |
||||
NUM_KEYS=$num_keys \ |
||||
DB_DIR=$db_dir \ |
||||
VALUE_SIZE=$vs \ |
||||
BLOCK_SIZE=$bs \ |
||||
CACHE_SIZE=$cs" |
||||
|
||||
mkdir -p $output_dir |
||||
echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest" \ |
||||
> $output_dir/report.txt |
||||
|
||||
# Notes on test sequence: |
||||
# step 1) Setup database via sequential fill followed by overwrite to fragment it. |
||||
# Done without setting DURATION to make sure that overwrite does $num_keys writes |
||||
# step 2) read-only tests for all levels of concurrency requested |
||||
# step 3) non read-only tests for all levels of concurrency requested |
||||
|
||||
###### Setup the database |
||||
|
||||
if [[ $do_setup != 0 ]]; then |
||||
echo Doing setup |
||||
|
||||
# Test 2a: sequential fill with large values to get peak ingest |
||||
# adjust NUM_KEYS given the use of larger values |
||||
env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \ |
||||
./tools/benchmark_leveldb.sh fillseq |
||||
|
||||
# Test 2b: sequential fill with the configured value size |
||||
env $ARGS ./tools/benchmark_leveldb.sh fillseq |
||||
|
||||
# Test 3: single-threaded overwrite |
||||
env $ARGS NUM_THREADS=1 DB_BENCH_NO_SYNC=1 ./tools/benchmark_leveldb.sh overwrite |
||||
|
||||
else |
||||
echo Restoring from backup |
||||
|
||||
rm -rf $db_dir |
||||
|
||||
if [ ! -d ${db_dir}.bak ]; then |
||||
echo Database backup does not exist at ${db_dir}.bak |
||||
exit -1 |
||||
fi |
||||
|
||||
echo Restore database from ${db_dir}.bak |
||||
cp -p -r ${db_dir}.bak $db_dir |
||||
fi |
||||
|
||||
if [[ $save_setup != 0 ]]; then |
||||
echo Save database to ${db_dir}.bak |
||||
cp -p -r $db_dir ${db_dir}.bak |
||||
fi |
||||
|
||||
###### Read-only tests |
||||
|
||||
for num_thr in "${nthreads[@]}" ; do |
||||
# Test 4: random read |
||||
env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh readrandom |
||||
|
||||
done |
||||
|
||||
###### Non read-only tests |
||||
|
||||
for num_thr in "${nthreads[@]}" ; do |
||||
# Test 7: overwrite with sync=0 |
||||
env $ARGS NUM_THREADS=$num_thr DB_BENCH_NO_SYNC=1 \ |
||||
./tools/benchmark_leveldb.sh overwrite |
||||
|
||||
# Test 8: overwrite with sync=1 |
||||
# Not run for now because LevelDB db_bench doesn't have an option to limit the |
||||
# test run to X seconds and doing sync-per-commit for --num can take too long. |
||||
# env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh overwrite |
||||
|
||||
# Test 11: random read while writing |
||||
env $ARGS NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \ |
||||
./tools/benchmark_leveldb.sh readwhilewriting |
||||
|
||||
done |
||||
|
||||
echo bulkload > $output_dir/report2.txt |
||||
head -1 $output_dir/report.txt >> $output_dir/report2.txt |
||||
grep bulkload $output_dir/report.txt >> $output_dir/report2.txt |
||||
echo fillseq >> $output_dir/report2.txt |
||||
head -1 $output_dir/report.txt >> $output_dir/report2.txt |
||||
grep fillseq $output_dir/report.txt >> $output_dir/report2.txt |
||||
echo overwrite sync=0 >> $output_dir/report2.txt |
||||
head -1 $output_dir/report.txt >> $output_dir/report2.txt |
||||
grep overwrite $output_dir/report.txt | grep \.s0 >> $output_dir/report2.txt |
||||
echo overwrite sync=1 >> $output_dir/report2.txt |
||||
head -1 $output_dir/report.txt >> $output_dir/report2.txt |
||||
grep overwrite $output_dir/report.txt | grep \.s1 >> $output_dir/report2.txt |
||||
echo readrandom >> $output_dir/report2.txt |
||||
head -1 $output_dir/report.txt >> $output_dir/report2.txt |
||||
grep readrandom $output_dir/report.txt >> $output_dir/report2.txt |
||||
echo readwhile >> $output_dir/report2.txt >> $output_dir/report2.txt |
||||
head -1 $output_dir/report.txt >> $output_dir/report2.txt |
||||
grep readwhilewriting $output_dir/report.txt >> $output_dir/report2.txt |
||||
|
||||
cat $output_dir/report2.txt |
Loading…
Reference in new issue