From 2b9a360c8be894a6ce217e094a4baabc7af2efd1 Mon Sep 17 00:00:00 2001 From: Vamsi Ponnekanti Date: Wed, 3 Apr 2013 03:40:39 -0700 Subject: [PATCH] [Getting warning while running db_crashtest] Summary: When I run db_crashtest, I am seeing lot of warnings that say db_stress completed before it was killed. To fix that I made ops per thread a very large value so that it keeps running until it is killed. I also set #reopens to 0. Since we are killing the process anyway, the 'simulated crash' that happens during reopen may not add additional value. I usually see 10-25K ops happening before the kill. So I increased max_key from 100 to 1000 so that we use more distinct keys. Test Plan: Ran a few times. Revert Plan: OK Task ID: # Reviewers: emayanke Reviewed By: emayanke CC: leveldb Differential Revision: https://reviews.facebook.net/D9909 --- tools/db_crashtest.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py index af275d37d..54f970c46 100644 --- a/tools/db_crashtest.py +++ b/tools/db_crashtest.py @@ -25,7 +25,8 @@ def main(argv): interval = 120 # time for one db_stress instance to run duration = 6000 # total time for this script to test db_stress threads = 32 - ops_per_thread = 500000 + # since we will be killing anyway, use large value for ops_per_thread + ops_per_thread = 10000000 write_buf_size = 4 * 1024 * 1024 for opt, arg in opts: @@ -62,10 +63,10 @@ def main(argv): --ops_per_thread=0' + str(ops_per_thread) + ' \ --threads=0' + str(threads) + ' \ --write_buffer_size=' + str(write_buf_size) + '\ - --reopen=10 \ + --reopen=0 \ --readpercent=50 \ --db=/tmp/rocksdb/crashtest \ - --max_key=100'], stderr=subprocess.PIPE, shell=True) + --max_key=1000'], stderr=subprocess.PIPE, shell=True) time.sleep(interval) while True: if time.time() > killtime: