Conversion of db_bench, db_stress and db_repl_stress to use gflags

Summary: Converted db_stress, db_repl_stress and db_bench to use gflags

Test Plan: I tested by printing out all the flags from old and new versions. Tried defaults, + various combinations with "interesting flags". Also, tested by running db_crashtest.py and db_crashtest2.py.

Reviewers: emayanke, dhruba, haobo, kailiu, sdong

Reviewed By: emayanke

CC: leveldb, xjin

Differential Revision: https://reviews.facebook.net/D13581
main
Slobodan Predolac 11 years ago
parent 7e2c1ba173
commit e44976b199
  1. 11
      build_tools/build_detect_platform
  2. 8
      build_tools/fbcode.clang31.sh
  3. 8
      build_tools/fbcode.gcc471.sh
  4. 1007
      db/db_bench.cc
  5. 27
      tools/db_repl_stress.cc
  6. 617
      tools/db_stress.cc

@ -175,6 +175,17 @@ EOF
PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS ${SNAPPY_LDFLAGS:-./snappy/libs/libsnappy.a}" PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS ${SNAPPY_LDFLAGS:-./snappy/libs/libsnappy.a}"
fi fi
# Test whether gflags library is installed
# http://code.google.com/p/gflags/
$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
#include <gflags/gflags.h>
int main() {}
EOF
if [ "$?" = 0 ]; then
COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS"
fi
# Test whether zlib library is installed # Test whether zlib library is installed
$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
#include <zlib.h> #include <zlib.h>

@ -18,11 +18,15 @@ SNAPPY_LIBS=" $TOOLCHAIN_LIB_BASE/snappy/snappy-1.0.3/7518bbe/lib/libsnappy.a"
LIBEVENT_INCLUDE=" -I $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/include" LIBEVENT_INCLUDE=" -I $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/include"
LIBEVENT_LIBS=" -L $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/lib" LIBEVENT_LIBS=" -L $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/lib"
# location of gflags headers and libraries
GFLAGS_INCLUDE=" -I $TOOLCHAIN_LIB_BASE/gflags/gflags-1.6/91ddd43/include"
GFLAGS_LIBS=" $TOOLCHAIN_LIB_BASE/gflags/gflags-1.6/91ddd43/lib/libgflags.a"
# use Intel SSE support for checksum calculations # use Intel SSE support for checksum calculations
export USE_SSE=" -msse -msse4.2 " export USE_SSE=" -msse -msse4.2 "
CC="$TOOLCHAIN_EXECUTABLES/clang/clang-3.1/6ca8a59/bin/clang $CLANG_INCLUDES" CC="$TOOLCHAIN_EXECUTABLES/clang/clang-3.1/6ca8a59/bin/clang $CLANG_INCLUDES"
CXX="$TOOLCHAIN_EXECUTABLES/clang/clang-3.1/6ca8a59/bin/clang++ $CLANG_INCLUDES $JINCLUDE $SNAPPY_INCLUDE $LIBEVENT_INCLUDE" CXX="$TOOLCHAIN_EXECUTABLES/clang/clang-3.1/6ca8a59/bin/clang++ $CLANG_INCLUDES $JINCLUDE $SNAPPY_INCLUDE $LIBEVENT_INCLUDE $GFLAGS_INCLUDE"
AR=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ar AR=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ar
RANLIB=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ranlib RANLIB=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ranlib
@ -33,7 +37,7 @@ CFLAGS+=" -I $TOOLCHAIN_LIB_BASE/jemalloc/$TOOL_JEMALLOC/include -DHAVE_JEMALLOC
EXEC_LDFLAGS=" -Wl,--whole-archive $TOOLCHAIN_LIB_BASE/jemalloc/$TOOL_JEMALLOC/lib/libjemalloc.a" EXEC_LDFLAGS=" -Wl,--whole-archive $TOOLCHAIN_LIB_BASE/jemalloc/$TOOL_JEMALLOC/lib/libjemalloc.a"
EXEC_LDFLAGS+=" -Wl,--no-whole-archive $TOOLCHAIN_LIB_BASE/libunwind/libunwind-1.0.1/350336c/lib/libunwind.a" EXEC_LDFLAGS+=" -Wl,--no-whole-archive $TOOLCHAIN_LIB_BASE/libunwind/libunwind-1.0.1/350336c/lib/libunwind.a"
EXEC_LDFLAGS+=" $HDFSLIB $SNAPPY_LIBS $LIBEVENT_LIBS" EXEC_LDFLAGS+=" $HDFSLIB $SNAPPY_LIBS $LIBEVENT_LIBS $GFLAGS_LIBS"
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,$GLIBC_RUNTIME_PATH/lib/ld-linux-x86-64.so.2" EXEC_LDFLAGS+=" -Wl,--dynamic-linker,$GLIBC_RUNTIME_PATH/lib/ld-linux-x86-64.so.2"
EXEC_LDFLAGS+=" -B$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin" EXEC_LDFLAGS+=" -B$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin"

@ -31,11 +31,15 @@ ZLIB_LIBS=" $TOOLCHAIN_LIB_BASE/zlib/zlib-1.2.5/91ddd43/lib/libz.a"
LIBEVENT_INCLUDE=" -I $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/include" LIBEVENT_INCLUDE=" -I $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/include"
LIBEVENT_LIBS=" -L $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/lib" LIBEVENT_LIBS=" -L $TOOLCHAIN_LIB_BASE/libevent/libevent-1.4.14b/91ddd43/lib"
# location of gflags headers and libraries
GFLAGS_INCLUDE=" -I $TOOLCHAIN_LIB_BASE/gflags/gflags-1.6/91ddd43/include"
GFLAGS_LIBS=" $TOOLCHAIN_LIB_BASE/gflags/gflags-1.6/91ddd43/lib/libgflags.a"
# use Intel SSE support for checksum calculations # use Intel SSE support for checksum calculations
export USE_SSE=" -msse -msse4.2 " export USE_SSE=" -msse -msse4.2 "
CC="$TOOLCHAIN_EXECUTABLES/gcc/gcc-4.7.1-glibc-2.14.1/bin/gcc" CC="$TOOLCHAIN_EXECUTABLES/gcc/gcc-4.7.1-glibc-2.14.1/bin/gcc"
CXX="$TOOLCHAIN_EXECUTABLES/gcc/gcc-4.7.1-glibc-2.14.1/bin/g++ $JINCLUDE $SNAPPY_INCLUDE $ZLIB_INCLUDE $LIBEVENT_INCLUDE" CXX="$TOOLCHAIN_EXECUTABLES/gcc/gcc-4.7.1-glibc-2.14.1/bin/g++ $JINCLUDE $SNAPPY_INCLUDE $ZLIB_INCLUDE $LIBEVENT_INCLUDE $GFLAGS_INCLUDE"
AR=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ar AR=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ar
RANLIB=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ranlib RANLIB=$TOOLCHAIN_EXECUTABLES/binutils/binutils-2.21.1/da39a3e/bin/ranlib
@ -44,7 +48,7 @@ CFLAGS+=" -I $TOOLCHAIN_LIB_BASE/jemalloc/$TOOL_JEMALLOC/include -DHAVE_JEMALLOC
EXEC_LDFLAGS=" -Wl,--whole-archive $TOOLCHAIN_LIB_BASE/jemalloc/$TOOL_JEMALLOC/lib/libjemalloc.a" EXEC_LDFLAGS=" -Wl,--whole-archive $TOOLCHAIN_LIB_BASE/jemalloc/$TOOL_JEMALLOC/lib/libjemalloc.a"
EXEC_LDFLAGS+=" -Wl,--no-whole-archive $TOOLCHAIN_LIB_BASE/libunwind/libunwind-1.0.1/350336c/lib/libunwind.a" EXEC_LDFLAGS+=" -Wl,--no-whole-archive $TOOLCHAIN_LIB_BASE/libunwind/libunwind-1.0.1/350336c/lib/libunwind.a"
EXEC_LDFLAGS+=" $HDFSLIB $SNAPPY_LIBS $ZLIB_LIBS $LIBEVENT_LIBS" EXEC_LDFLAGS+=" $HDFSLIB $SNAPPY_LIBS $ZLIB_LIBS $LIBEVENT_LIBS $GFLAGS_LIBS"
PLATFORM_LDFLAGS="-L$TOOLCHAIN_LIB_BASE/libgcc/libgcc-4.7.1/afc21dc/lib -L$TOOLCHAIN_LIB_BASE/glibc/glibc-2.14.1/99df8fc/lib" PLATFORM_LDFLAGS="-L$TOOLCHAIN_LIB_BASE/libgcc/libgcc-4.7.1/afc21dc/lib -L$TOOLCHAIN_LIB_BASE/glibc/glibc-2.14.1/99df8fc/lib"

File diff suppressed because it is too large Load Diff

@ -5,6 +5,8 @@
// //
#include <cstdio> #include <cstdio>
#include <gflags/gflags.h>
#include "db/write_batch_internal.h" #include "db/write_batch_internal.h"
#include "rocksdb/db.h" #include "rocksdb/db.h"
#include "rocksdb/types.h" #include "rocksdb/types.h"
@ -76,23 +78,14 @@ static void ReplicationThreadBody(void* arg) {
} }
} }
int main(int argc, const char** argv) { DEFINE_uint64(num_inserts, 1000, "the num of inserts the first thread should"
" perform.");
DEFINE_uint64(wal_ttl, 1000, "the wal ttl for the run(in seconds)");
uint64_t FLAGS_num_inserts = 1000; int main(int argc, const char** argv) {
uint64_t FLAGS_WAL_ttl_seconds = 1000; google::SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
char junk; " --num_inserts=<num_inserts> --wal_ttl=<WAL_ttl_seconds>");
long l; google::ParseCommandLineFlags(&argc, const_cast<char***>(&argv), true);
for (int i = 1; i < argc; ++i) {
if (sscanf(argv[i], "--num_inserts=%ld%c", &l, &junk) == 1) {
FLAGS_num_inserts = l;
} else if (sscanf(argv[i], "--wal_ttl=%ld%c", &l, &junk) == 1) {
FLAGS_WAL_ttl_seconds = l;
} else {
fprintf(stderr, "Invalid Flag '%s'\n", argv[i]);
exit(1);
}
}
Env* env = Env::Default(); Env* env = Env::Default();
std::string default_db_path; std::string default_db_path;
@ -100,7 +93,7 @@ int main(int argc, const char** argv) {
default_db_path += "db_repl_stress"; default_db_path += "db_repl_stress";
Options options; Options options;
options.create_if_missing = true; options.create_if_missing = true;
options.WAL_ttl_seconds = FLAGS_WAL_ttl_seconds; options.WAL_ttl_seconds = FLAGS_wal_ttl;
DB* db; DB* db;
DestroyDB(default_db_path, options); DestroyDB(default_db_path, options);

@ -23,6 +23,7 @@
#include <sys/types.h> #include <sys/types.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <gflags/gflags.h>
#include "db/db_impl.h" #include "db/db_impl.h"
#include "db/version_set.h" #include "db/version_set.h"
#include "db/db_statistics.h" #include "db/db_statistics.h"
@ -45,192 +46,255 @@
static const long KB = 1024; static const long KB = 1024;
// Seed for PRNG
static uint32_t FLAGS_seed = 2341234;
// Max number of key/values to place in database static bool ValidateUint32Range(const char* flagname, uint64_t value) {
static long FLAGS_max_key = 1 * KB * KB * KB; if (value > std::numeric_limits<uint32_t>::max()) {
fprintf(stderr, "Invalid value for --%s: %lu, overflow\n", flagname, value);
// If set, the test uses MultiGet(), MultiPut() and MultiDelete() which return false;
// read/write/delete multiple keys in a batch. In this mode, we do not verify }
// db content by comparing the content with the pre-allocated array. Instead, return true;
// we do partial verification inside MultiGet() by checking various values in }
// a batch. Benefit of this mode: DEFINE_uint64(seed, 2341234, "Seed for PRNG");
// (a) No need to acquire mutexes during writes (less cache flushes static const bool FLAGS_seed_dummy =
// in multi-core leading to speed up) google::RegisterFlagValidator(&FLAGS_seed, &ValidateUint32Range);
// (b) No long validation at the end (more speed up)
// (c) Test snapshot and atomicity of batch writes
static bool FLAGS_test_batches_snapshots = false;
// Number of concurrent threads to run.
static int FLAGS_threads = 32;
// Opens the db with this ttl value if this is not -1
// Carefully specify a large value such that verifications on deleted
// values don't fail
static int FLAGS_ttl = -1;
// Size of each value will be this number times rand_int(1,3) bytes DEFINE_int64(max_key, 1 * KB * KB * KB,
static int FLAGS_value_size_mult = 8; "Max number of key/values to place in database");
static bool FLAGS_verify_before_write = false; DEFINE_bool(test_batches_snapshots, false,
"If set, the test uses MultiGet(), MultiPut() and MultiDelete()"
" which read/write/delete multiple keys in a batch. In this mode,"
" we do not verify db content by comparing the content with the "
"pre-allocated array. Instead, we do partial verification inside"
" MultiGet() by checking various values in a batch. Benefit of"
" this mode:\n"
"\t(a) No need to acquire mutexes during writes (less cache "
"flushes in multi-core leading to speed up)\n"
"\t(b) No long validation at the end (more speed up)\n"
"\t(c) Test snapshot and atomicity of batch writes");
// Print histogram of operation timings DEFINE_int32(threads, 32, "Number of concurrent threads to run.");
static bool FLAGS_histogram = false;
// Destroys the database dir before start if this is true DEFINE_int32(ttl, -1,
static bool FLAGS_destroy_db_initially = true; "Opens the db with this ttl value if this is not -1. "
"Carefully specify a large value such that verifications on "
"deleted values don't fail");
static bool FLAGS_verbose = false; DEFINE_int32(value_size_mult, 8,
"Size of value will be this number times rand_int(1,3) bytes");
// Number of bytes to buffer in memtable before compacting DEFINE_bool(verify_before_write, false, "Verify before write");
// (initialized to default value by "main")
static int FLAGS_write_buffer_size = 0;
// The number of in-memory memtables. DEFINE_bool(histogram, false, "Print histogram of operation timings");
// Each memtable is of size FLAGS_write_buffer_size.
// This is initialized to default value of 2 in "main" function.
static int FLAGS_max_write_buffer_number = 0;
// The minimum number of write buffers that will be merged together DEFINE_bool(destroy_db_initially, true,
// before writing to storage. This is cheap because it is an "Destroys the database dir before start if this is true");
// in-memory merge. If this feature is not enabled, then all these
// write buffers are flushed to L0 as separate files and this increases
// read amplification because a get request has to check in all of these
// files. Also, an in-memory merge may result in writing less
// data to storage if there are duplicate records in each of these
// individual write buffers.
static int FLAGS_min_write_buffer_number_to_merge = 0;
// The maximum number of concurrent background compactions DEFINE_bool (verbose, false, "Verbose");
// that can occur in parallel.
// This is initialized to default value of 1 in "main" function.
static int FLAGS_max_background_compactions = 0;
// This is initialized to default value of false DEFINE_int32(write_buffer_size, rocksdb::Options().write_buffer_size,
static rocksdb::CompactionStyle FLAGS_compaction_style = rocksdb::kCompactionStyleLevel; "Number of bytes to buffer in memtable before compacting");
// The ratio of file sizes that trigger compaction in universal style DEFINE_int32(max_write_buffer_number,
static unsigned int FLAGS_universal_size_ratio = 0; rocksdb::Options().max_write_buffer_number,
"The number of in-memory memtables. "
"Each memtable is of size FLAGS_write_buffer_size.");
// The minimum number of files to compact in universal style compaction DEFINE_int32(min_write_buffer_number_to_merge,
static unsigned int FLAGS_universal_min_merge_width = 0; rocksdb::Options().min_write_buffer_number_to_merge,
"The minimum number of write buffers that will be merged together "
"before writing to storage. This is cheap because it is an "
"in-memory merge. If this feature is not enabled, then all these "
"write buffers are flushed to L0 as separate files and this "
"increases read amplification because a get request has to check "
"in all of these files. Also, an in-memory merge may result in "
"writing less data to storage if there are duplicate records in"
" each of these individual write buffers.");
// The max number of files to compact in universal style compaction DEFINE_int32(open_files, rocksdb::Options().max_open_files,
static unsigned int FLAGS_universal_max_merge_width = 0; "Maximum number of files to keep open at the same time "
"(use default if == 0)");
// The max size amplification for universal style compaction DEFINE_int32(compaction_style, rocksdb::Options().compaction_style, "");
static unsigned int FLAGS_universal_max_size_amplification_percent = 0;
// Number of bytes to use as a cache of uncompressed data. DEFINE_int32(level0_file_num_compaction_trigger,
static long FLAGS_cache_size = 2 * KB * KB * KB; rocksdb::Options().level0_file_num_compaction_trigger,
"Level0 compaction start trigger");
// Number of bytes in a block. DEFINE_int32(level0_slowdown_writes_trigger,
static int FLAGS_block_size = 4 * KB; rocksdb::Options().level0_slowdown_writes_trigger,
"Number of files in level-0 that will slow down writes");
// Number of times database reopens DEFINE_int32(level0_stop_writes_trigger,
static int FLAGS_reopen = 10; rocksdb::Options().level0_stop_writes_trigger,
"Number of files in level-0 that will trigger put stop.");
// Maximum number of files to keep open at the same time (use default if == 0) DEFINE_int32(block_size, rocksdb::Options().block_size,
static int FLAGS_open_files = 0; "Number of bytes in a block.");
// Bloom filter bits per key. DEFINE_int32(max_background_compactions,
// Negative means use default settings. rocksdb::Options().max_background_compactions,
static int FLAGS_bloom_bits = 10; "The maximum number of concurrent background compactions "
"that can occur in parallel.");
// Use the db with the following name. DEFINE_int32(universal_size_ratio, 0, "The ratio of file sizes that trigger"
static const char* FLAGS_db = nullptr; " compaction in universal style");
// Verify checksum for every block read from storage DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files to "
static bool FLAGS_verify_checksum = false; "compact in universal style compaction");
// Allow reads to occur via mmap-ing files DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact"
static bool FLAGS_use_mmap_reads = rocksdb::EnvOptions().use_mmap_reads; " in universal style compaction");
// Database statistics DEFINE_int32(universal_max_size_amplification_percent, 0,
static std::shared_ptr<rocksdb::Statistics> dbstats; "The max size amplification for universal style compaction");
// Sync all writes to disk DEFINE_int64(cache_size, 2 * KB * KB * KB,
static bool FLAGS_sync = false; "Number of bytes to use as a cache of uncompressed data.");
// If true, do not wait until data is synced to disk. static bool ValidateInt32Positive(const char* flagname, int32_t value) {
static bool FLAGS_disable_data_sync = false; if (value < 0) {
fprintf(stderr, "Invalid value for --%s: %d, must be >=0\n",
// If true, issue fsync instead of fdatasync flagname, value);
static bool FLAGS_use_fsync = false; return false;
}
return true;
}
DEFINE_int32(reopen, 10, "Number of times database reopens");
static const bool FLAGS_reopen_dummy =
google::RegisterFlagValidator(&FLAGS_reopen, &ValidateInt32Positive);
// If non-zero, kill at various points in source code with probability 1/this DEFINE_int32(bloom_bits, 10, "Bloom filter bits per key. "
static int FLAGS_kill_random_test = 0; "Negative means use default settings.");
extern int rocksdb_kill_odds;
// If true, do not write WAL for write. DEFINE_string(db, "", "Use the db with the following name.");
static bool FLAGS_disable_wal = false;
// Target level-1 file size for compaction DEFINE_bool(verify_checksum, false,
static int FLAGS_target_file_size_base = 64 * KB; "Verify checksum for every block read from storage");
// A multiplier to compute targe level-N file size (N >= 2) DEFINE_bool(mmap_read, rocksdb::EnvOptions().use_mmap_reads,
static int FLAGS_target_file_size_multiplier = 1; "Allow reads to occur via mmap-ing files");
// Max bytes for level-1 // Database statistics
static uint64_t FLAGS_max_bytes_for_level_base = 256 * KB; static std::shared_ptr<rocksdb::Statistics> dbstats;
DEFINE_bool(statistics, false, "Create database statistics");
// A multiplier to compute max bytes for level-N (N >= 2) DEFINE_bool(sync, false, "Sync all writes to disk");
static int FLAGS_max_bytes_for_level_multiplier = 2;
// Number of files in level-0 that will trigger put stop. DEFINE_bool(disable_data_sync, false,
static int FLAGS_level0_stop_writes_trigger = 12; "If true, do not wait until data is synced to disk.");
// Number of files in level-0 that will slow down writes. DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync");
static int FLAGS_level0_slowdown_writes_trigger = 8;
// Ratio of reads to total workload (expressed as a percentage) DEFINE_int32(kill_random_test, 0,
static unsigned int FLAGS_readpercent = 10; "If non-zero, kill at various points in source code with "
"probability 1/this");
static const bool FLAGS_kill_random_test_dummy =
google::RegisterFlagValidator(&FLAGS_kill_random_test,
&ValidateInt32Positive);
extern int rocksdb_kill_odds;
// Ratio of prefix iterators to total workload (expressed as a percentage) DEFINE_bool(disable_wal, false, "If true, do not write WAL for write.");
static unsigned int FLAGS_prefixpercent = 20;
// Ratio of deletes to total workload (expressed as a percentage) DEFINE_int32(target_file_size_base, 64 * KB,
static unsigned int FLAGS_writepercent = 45; "Target level-1 file size for compaction");
// Ratio of deletes to total workload (expressed as a percentage) DEFINE_int32(target_file_size_multiplier, 1,
static unsigned int FLAGS_delpercent = 15; "A multiplier to compute targe level-N file size (N >= 2)");
// Ratio of iterations to total workload (expressed as a percentage) DEFINE_uint64(max_bytes_for_level_base, 256 * KB, "Max bytes for level-1");
static unsigned int FLAGS_iterpercent = 10;
// Number of iterations per MultiIterate run DEFINE_int32(max_bytes_for_level_multiplier, 2,
static uint32_t FLAGS_num_iterations = 10; "A multiplier to compute max bytes for level-N (N >= 2)");
// Option to disable compation triggered by read. static bool ValidateInt32Percent(const char* flagname, int32_t value) {
static int FLAGS_disable_seek_compaction = false; if (value < 0 || value>100) {
fprintf(stderr, "Invalid value for --%s: %d, 0<= pct <=100 \n",
flagname, value);
return false;
}
return true;
}
DEFINE_int32(readpercent, 10,
"Ratio of reads to total workload (expressed as a percentage)");
static const bool FLAGS_readpercent_dummy =
google::RegisterFlagValidator(&FLAGS_readpercent, &ValidateInt32Percent);
DEFINE_int32(prefixpercent, 20,
"Ratio of prefix iterators to total workload (expressed as a"
" percentage)");
static const bool FLAGS_prefixpercent_dummy =
google::RegisterFlagValidator(&FLAGS_prefixpercent, &ValidateInt32Percent);
DEFINE_int32(writepercent, 45,
" Ratio of deletes to total workload (expressed as a percentage)");
static const bool FLAGS_writepercent_dummy =
google::RegisterFlagValidator(&FLAGS_writepercent, &ValidateInt32Percent);
DEFINE_int32(delpercent, 15,
"Ratio of deletes to total workload (expressed as a percentage)");
static const bool FLAGS_delpercent_dummy =
google::RegisterFlagValidator(&FLAGS_delpercent, &ValidateInt32Percent);
DEFINE_int32(iterpercent, 10, "Ratio of iterations to total workload"
" (expressed as a percentage)");
static const bool FLAGS_iterpercent_dummy =
google::RegisterFlagValidator(&FLAGS_iterpercent, &ValidateInt32Percent);
DEFINE_uint64(num_iterations, 10, "Number of iterations per MultiIterate run");
static const bool FLAGS_num_iterations_dummy =
google::RegisterFlagValidator(&FLAGS_num_iterations, &ValidateUint32Range);
DEFINE_bool(disable_seek_compaction, false,
"Option to disable compation triggered by read.");
DEFINE_uint64(delete_obsolete_files_period_micros, 0,
"Option to delete obsolete files periodically"
"0 means that obsolete files are "
" deleted after every compaction run.");
enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
assert(ctype);
// Option to delete obsolete files periodically if (!strcasecmp(ctype, "none"))
// Default: 0 which means that obsolete files are return rocksdb::kNoCompression;
// deleted after every compaction run. else if (!strcasecmp(ctype, "snappy"))
static uint64_t FLAGS_delete_obsolete_files_period_micros = 0; return rocksdb::kSnappyCompression;
else if (!strcasecmp(ctype, "zlib"))
return rocksdb::kZlibCompression;
else if (!strcasecmp(ctype, "bzip2"))
return rocksdb::kBZip2Compression;
// Algorithm to use to compress the database fprintf(stdout, "Cannot parse compression type '%s'\n", ctype);
static enum rocksdb::CompressionType FLAGS_compression_type = return rocksdb::kSnappyCompression; //default value
}
DEFINE_string(compression_type, "snappy",
"Algorithm to use to compress the database");
static enum rocksdb::CompressionType FLAGS_compression_type_e =
rocksdb::kSnappyCompression; rocksdb::kSnappyCompression;
DEFINE_string(hdfs, "", "Name of hdfs environment");
// posix or hdfs environment // posix or hdfs environment
static rocksdb::Env* FLAGS_env = rocksdb::Env::Default(); static rocksdb::Env* FLAGS_env = rocksdb::Env::Default();
// Number of operations per thread. DEFINE_uint64(ops_per_thread, 600000, "Number of operations per thread.");
static uint32_t FLAGS_ops_per_thread = 600000; static const bool FLAGS_ops_per_thread_dummy =
google::RegisterFlagValidator(&FLAGS_ops_per_thread, &ValidateUint32Range);
// Log2 of number of keys per lock
static uint32_t FLAGS_log2_keys_per_lock = 2; // implies 2^2 keys per lock
// Percentage of times we want to purge redundant keys in memory before flushing DEFINE_uint64(log2_keys_per_lock, 2, "Log2 of number of keys per lock");
static uint32_t FLAGS_purge_redundant_percent = 50; static const bool FLAGS_log2_keys_per_lock_dummy =
google::RegisterFlagValidator(&FLAGS_log2_keys_per_lock,
&ValidateUint32Range);
// On true, deletes use KeyMayExist to drop the delete if key not present DEFINE_int32(purge_redundant_percent, 50,
static bool FLAGS_filter_deletes = false; "Percentage of times we want to purge redundant keys in memory "
"before flushing");
static const bool FLAGS_purge_redundant_percent_dummy =
google::RegisterFlagValidator(&FLAGS_purge_redundant_percent,
&ValidateInt32Percent);
// Level0 compaction start trigger DEFINE_bool(filter_deletes, false, "On true, deletes use KeyMayExist to drop"
static int FLAGS_level0_file_num_compaction_trigger = 0; " the delete if key not present");
enum RepFactory { enum RepFactory {
kSkipList, kSkipList,
@ -238,14 +302,39 @@ enum RepFactory {
kUnsorted, kUnsorted,
kVectorRep kVectorRep
}; };
enum RepFactory StringToRepFactory(const char* ctype) {
assert(ctype);
if (!strcasecmp(ctype, "skip_list"))
return kSkipList;
else if (!strcasecmp(ctype, "prefix_hash"))
return kPrefixHash;
else if (!strcasecmp(ctype, "unsorted"))
return kUnsorted;
else if (!strcasecmp(ctype, "vector"))
return kVectorRep;
fprintf(stdout, "Cannot parse memreptable %s\n", ctype);
return kSkipList;
}
static enum RepFactory FLAGS_rep_factory; static enum RepFactory FLAGS_rep_factory;
DEFINE_string(memtablerep, "skip_list", "");
static bool ValidatePrefixSize(const char* flagname, int32_t value) {
if (value < 0 || value>=2000000000) {
fprintf(stderr, "Invalid value for --%s: %d. 0<= PrefixSize <=2000000000\n",
flagname, value);
return false;
}
return true;
}
DEFINE_int32(prefix_size, 0, "Control the prefix size for PrefixHashRep");
static const bool FLAGS_prefix_size_dummy =
google::RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize);
// Control the prefix size for PrefixHashRep DEFINE_bool(use_merge, false, "On true, replaces all writes with a Merge "
static bool FLAGS_prefix_size = 0; "that behaves like a Put");
// On true, replaces all writes with a Merge that behaves like a Put
static bool FLAGS_use_merge_put = false;
namespace rocksdb { namespace rocksdb {
@ -596,7 +685,7 @@ class StressTest {
FLAGS_env->GetChildren(FLAGS_db, &files); FLAGS_env->GetChildren(FLAGS_db, &files);
for (unsigned int i = 0; i < files.size(); i++) { for (unsigned int i = 0; i < files.size(); i++) {
if (Slice(files[i]).starts_with("heap-")) { if (Slice(files[i]).starts_with("heap-")) {
FLAGS_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); FLAGS_env->DeleteFile(FLAGS_db + "/" + files[i]);
} }
} }
DestroyDB(FLAGS_db, Options()); DestroyDB(FLAGS_db, Options());
@ -733,7 +822,7 @@ class StressTest {
keys[i] += key.ToString(); keys[i] += key.ToString();
values[i] += value.ToString(); values[i] += value.ToString();
value_slices[i] = values[i]; value_slices[i] = values[i];
if (FLAGS_use_merge_put) { if (FLAGS_use_merge) {
batch.Merge(keys[i], value_slices[i]); batch.Merge(keys[i], value_slices[i]);
} else { } else {
batch.Put(keys[i], value_slices[i]); batch.Put(keys[i], value_slices[i]);
@ -921,7 +1010,7 @@ class StressTest {
unique_ptr<Iterator> iter(db_->NewIterator(readoptionscopy)); unique_ptr<Iterator> iter(db_->NewIterator(readoptionscopy));
iter->Seek(key); iter->Seek(key);
for (long i = 0; i < FLAGS_num_iterations && iter->Valid(); i++) { for (uint64_t i = 0; i < FLAGS_num_iterations && iter->Valid(); i++) {
if (thread->rand.OneIn(2)) { if (thread->rand.OneIn(2)) {
iter->Next(); iter->Next();
} else { } else {
@ -955,7 +1044,7 @@ class StressTest {
const int delBound = writeBound + (int)FLAGS_delpercent; const int delBound = writeBound + (int)FLAGS_delpercent;
thread->stats.Start(); thread->stats.Start();
for (long i = 0; i < FLAGS_ops_per_thread; i++) { for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
if(i != 0 && (i % (FLAGS_ops_per_thread / (FLAGS_reopen + 1))) == 0) { if(i != 0 && (i % (FLAGS_ops_per_thread / (FLAGS_reopen + 1))) == 0) {
{ {
thread->stats.FinishedSingleOp(); thread->stats.FinishedSingleOp();
@ -1038,7 +1127,7 @@ class StressTest {
true); true);
} }
thread->shared->Put(rand_key, value_base); thread->shared->Put(rand_key, value_base);
if (FLAGS_use_merge_put) { if (FLAGS_use_merge) {
db_->Merge(write_opts, key, v); db_->Merge(write_opts, key, v);
} else { } else {
db_->Put(write_opts, key, v); db_->Put(write_opts, key, v);
@ -1183,7 +1272,7 @@ class StressTest {
fprintf(stdout, "LevelDB version : %d.%d\n", fprintf(stdout, "LevelDB version : %d.%d\n",
kMajorVersion, kMinorVersion); kMajorVersion, kMinorVersion);
fprintf(stdout, "Number of threads : %d\n", FLAGS_threads); fprintf(stdout, "Number of threads : %d\n", FLAGS_threads);
fprintf(stdout, "Ops per thread : %d\n", FLAGS_ops_per_thread); fprintf(stdout, "Ops per thread : %lu\n", FLAGS_ops_per_thread);
std::string ttl_state("unused"); std::string ttl_state("unused");
if (FLAGS_ttl > 0) { if (FLAGS_ttl > 0) {
ttl_state = NumberToString(FLAGS_ttl); ttl_state = NumberToString(FLAGS_ttl);
@ -1195,7 +1284,7 @@ class StressTest {
fprintf(stdout, "Delete percentage : %d\n", FLAGS_delpercent); fprintf(stdout, "Delete percentage : %d\n", FLAGS_delpercent);
fprintf(stdout, "Iterate percentage : %d\n", FLAGS_iterpercent); fprintf(stdout, "Iterate percentage : %d\n", FLAGS_iterpercent);
fprintf(stdout, "Write-buffer-size : %d\n", FLAGS_write_buffer_size); fprintf(stdout, "Write-buffer-size : %d\n", FLAGS_write_buffer_size);
fprintf(stdout, "Iterations : %d\n", FLAGS_num_iterations); fprintf(stdout, "Iterations : %lu\n", FLAGS_num_iterations);
fprintf(stdout, "Max key : %ld\n", FLAGS_max_key); fprintf(stdout, "Max key : %ld\n", FLAGS_max_key);
fprintf(stdout, "Ratio #ops/#keys : %f\n", fprintf(stdout, "Ratio #ops/#keys : %f\n",
(1.0 * FLAGS_ops_per_thread * FLAGS_threads)/FLAGS_max_key); (1.0 * FLAGS_ops_per_thread * FLAGS_threads)/FLAGS_max_key);
@ -1210,7 +1299,7 @@ class StressTest {
1 << FLAGS_log2_keys_per_lock); 1 << FLAGS_log2_keys_per_lock);
const char* compression = ""; const char* compression = "";
switch (FLAGS_compression_type) { switch (FLAGS_compression_type_e) {
case rocksdb::kNoCompression: case rocksdb::kNoCompression:
compression = "none"; compression = "none";
break; break;
@ -1257,7 +1346,8 @@ class StressTest {
options.min_write_buffer_number_to_merge = options.min_write_buffer_number_to_merge =
FLAGS_min_write_buffer_number_to_merge; FLAGS_min_write_buffer_number_to_merge;
options.max_background_compactions = FLAGS_max_background_compactions; options.max_background_compactions = FLAGS_max_background_compactions;
options.compaction_style = FLAGS_compaction_style; options.compaction_style =
static_cast<rocksdb::CompactionStyle>(FLAGS_compaction_style);
options.block_size = FLAGS_block_size; options.block_size = FLAGS_block_size;
options.filter_policy = filter_policy_; options.filter_policy = filter_policy_;
options.prefix_extractor = prefix_extractor_; options.prefix_extractor = prefix_extractor_;
@ -1266,7 +1356,7 @@ class StressTest {
options.env = FLAGS_env; options.env = FLAGS_env;
options.disableDataSync = FLAGS_disable_data_sync; options.disableDataSync = FLAGS_disable_data_sync;
options.use_fsync = FLAGS_use_fsync; options.use_fsync = FLAGS_use_fsync;
options.allow_mmap_reads = FLAGS_use_mmap_reads; options.allow_mmap_reads = FLAGS_mmap_read;
rocksdb_kill_odds = FLAGS_kill_random_test; rocksdb_kill_odds = FLAGS_kill_random_test;
options.target_file_size_base = FLAGS_target_file_size_base; options.target_file_size_base = FLAGS_target_file_size_base;
options.target_file_size_multiplier = FLAGS_target_file_size_multiplier; options.target_file_size_multiplier = FLAGS_target_file_size_multiplier;
@ -1278,7 +1368,7 @@ class StressTest {
FLAGS_level0_slowdown_writes_trigger; FLAGS_level0_slowdown_writes_trigger;
options.level0_file_num_compaction_trigger = options.level0_file_num_compaction_trigger =
FLAGS_level0_file_num_compaction_trigger; FLAGS_level0_file_num_compaction_trigger;
options.compression = FLAGS_compression_type; options.compression = FLAGS_compression_type_e;
options.create_if_missing = true; options.create_if_missing = true;
options.disable_seek_compaction = FLAGS_disable_seek_compaction; options.disable_seek_compaction = FLAGS_disable_seek_compaction;
options.delete_obsolete_files_period_micros = options.delete_obsolete_files_period_micros =
@ -1311,11 +1401,12 @@ class StressTest {
break; break;
} }
static Random purge_percent(1000); // no benefit from non-determinism here static Random purge_percent(1000); // no benefit from non-determinism here
if (purge_percent.Uniform(100) < FLAGS_purge_redundant_percent - 1) { if (static_cast<int32_t>(purge_percent.Uniform(100)) <
FLAGS_purge_redundant_percent - 1) {
options.purge_redundant_kvs_while_flush = false; options.purge_redundant_kvs_while_flush = false;
} }
if (FLAGS_use_merge_put) { if (FLAGS_use_merge) {
options.merge_operator = MergeOperators::CreatePutOperator(); options.merge_operator = MergeOperators::CreatePutOperator();
} }
@ -1337,7 +1428,7 @@ class StressTest {
FLAGS_universal_max_size_amplification_percent; FLAGS_universal_max_size_amplification_percent;
} }
fprintf(stdout, "DB path: [%s]\n", FLAGS_db); fprintf(stdout, "DB path: [%s]\n", FLAGS_db.c_str());
Status s; Status s;
if (FLAGS_ttl == -1) { if (FLAGS_ttl == -1) {
@ -1387,213 +1478,22 @@ class StressTest {
} // namespace rocksdb } // namespace rocksdb
int main(int argc, char** argv) { int main(int argc, char** argv) {
FLAGS_write_buffer_size = rocksdb::Options().write_buffer_size; google::SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
FLAGS_max_write_buffer_number = rocksdb::Options().max_write_buffer_number; " [OPTIONS]...");
FLAGS_min_write_buffer_number_to_merge = google::ParseCommandLineFlags(&argc, &argv, true);
rocksdb::Options().min_write_buffer_number_to_merge;
FLAGS_open_files = rocksdb::Options().max_open_files;
FLAGS_max_background_compactions =
rocksdb::Options().max_background_compactions;
FLAGS_compaction_style =
rocksdb::Options().compaction_style;
FLAGS_level0_file_num_compaction_trigger =
rocksdb::Options().level0_file_num_compaction_trigger;
FLAGS_level0_slowdown_writes_trigger =
rocksdb::Options().level0_slowdown_writes_trigger;
FLAGS_level0_stop_writes_trigger =
rocksdb::Options().level0_stop_writes_trigger;
// Compression test code above refers to FLAGS_block_size
FLAGS_block_size = rocksdb::Options().block_size;
std::string default_db_path;
for (int i = 1; i < argc; i++) { if (FLAGS_statistics) {
int n;
uint32_t u;
long l;
char junk;
char hdfsname[2048];
if (sscanf(argv[i], "--seed=%uf%c", &u, &junk) == 1) {
FLAGS_seed = u;
} else if (sscanf(argv[i], "--max_key=%ld%c", &l, &junk) == 1) {
FLAGS_max_key = l;
} else if (sscanf(argv[i], "--log2_keys_per_lock=%u%c", &u, &junk) == 1) {
FLAGS_log2_keys_per_lock = u;
} else if (sscanf(argv[i], "--ops_per_thread=%u%c", &u, &junk) == 1) {
FLAGS_ops_per_thread = u;
} else if (sscanf(argv[i], "--verbose=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_verbose = n;
} else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_histogram = n;
} else if (sscanf(argv[i], "--destroy_db_initially=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_destroy_db_initially = n;
} else if (sscanf(argv[i], "--verify_before_write=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_verify_before_write = n;
} else if (sscanf(argv[i], "--test_batches_snapshots=%d%c", &n, &junk) == 1
&& (n == 0 || n == 1)) {
FLAGS_test_batches_snapshots = n;
} else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
FLAGS_threads = n;
} else if (sscanf(argv[i], "--ttl=%d%c", &n, &junk) == 1) {
FLAGS_ttl = n;
} else if (sscanf(argv[i], "--value_size_mult=%d%c", &n, &junk) == 1) {
FLAGS_value_size_mult = n;
} else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
FLAGS_write_buffer_size = n;
} else if (sscanf(argv[i], "--max_write_buffer_number=%d%c", &n, &junk) == 1) {
FLAGS_max_write_buffer_number = n;
} else if (sscanf(argv[i], "--min_write_buffer_number_to_merge=%d%c",
&n, &junk) == 1) {
FLAGS_min_write_buffer_number_to_merge = n;
} else if (sscanf(argv[i], "--max_background_compactions=%d%c", &n, &junk) == 1) {
FLAGS_max_background_compactions = n;
} else if (sscanf(argv[i], "--compaction_style=%d%c", &n, &junk) == 1) {
FLAGS_compaction_style = (rocksdb::CompactionStyle)n;
} else if (sscanf(argv[i], "--cache_size=%ld%c", &l, &junk) == 1) {
FLAGS_cache_size = l;
} else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
FLAGS_block_size = n;
} else if (sscanf(argv[i], "--reopen=%d%c", &n, &junk) == 1 && n >= 0) {
FLAGS_reopen = n;
} else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
FLAGS_bloom_bits = n;
} else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) {
FLAGS_open_files = n;
} else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5;
} else if (sscanf(argv[i], "--verify_checksum=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_verify_checksum = n;
} else if (sscanf(argv[i], "--mmap_read=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_use_mmap_reads = n;
} else if (sscanf(argv[i], "--statistics=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
if (n == 1) {
dbstats = rocksdb::CreateDBStatistics(); dbstats = rocksdb::CreateDBStatistics();
} }
} else if (sscanf(argv[i], "--sync=%d%c", &n, &junk) == 1 && FLAGS_compression_type_e =
(n == 0 || n == 1)) { StringToCompressionType(FLAGS_compression_type.c_str());
FLAGS_sync = n; if (!FLAGS_hdfs.empty()) {
} else if (sscanf(argv[i], "--readpercent=%d%c", &n, &junk) == 1 && FLAGS_env = new rocksdb::HdfsEnv(FLAGS_hdfs);
(n >= 0 && n <= 100)) {
FLAGS_readpercent = n;
} else if (sscanf(argv[i], "--prefixpercent=%d%c", &n, &junk) == 1 &&
(n >= 0 && n <= 100)) {
FLAGS_prefixpercent = n;
} else if (sscanf(argv[i], "--writepercent=%d%c", &n, &junk) == 1 &&
(n >= 0 && n <= 100)) {
FLAGS_writepercent = n;
} else if (sscanf(argv[i], "--iterpercent=%d%c", &n, &junk) == 1 &&
(n >= 0 && n <= 100)) {
FLAGS_iterpercent = n;
} else if (sscanf(argv[i], "--delpercent=%d%c", &n, &junk) == 1 &&
(n >= 0 && n <= 100)) {
FLAGS_delpercent = n;
} else if (sscanf(argv[i], "--numiterations=%u%c", &u, &junk) == 1) {
FLAGS_num_iterations = u;
} else if (sscanf(argv[i], "--disable_data_sync=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_disable_data_sync = n;
} else if (sscanf(argv[i], "--use_fsync=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_use_fsync = n;
} else if (sscanf(argv[i], "--kill_random_test=%d%c", &n, &junk) == 1 &&
(n >= 0)) {
FLAGS_kill_random_test = n;
} else if (sscanf(argv[i], "--disable_wal=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_disable_wal = n;
} else if (sscanf(argv[i], "--hdfs=%s", hdfsname) == 1) {
FLAGS_env = new rocksdb::HdfsEnv(hdfsname);
} else if (sscanf(argv[i], "--target_file_size_base=%d%c",
&n, &junk) == 1) {
FLAGS_target_file_size_base = n;
} else if ( sscanf(argv[i], "--target_file_size_multiplier=%d%c",
&n, &junk) == 1) {
FLAGS_target_file_size_multiplier = n;
} else if (
sscanf(argv[i], "--max_bytes_for_level_base=%ld%c", &l, &junk) == 1) {
FLAGS_max_bytes_for_level_base = l;
} else if (sscanf(argv[i], "--max_bytes_for_level_multiplier=%d%c",
&n, &junk) == 1) {
FLAGS_max_bytes_for_level_multiplier = n;
} else if (sscanf(argv[i],"--level0_stop_writes_trigger=%d%c",
&n, &junk) == 1) {
FLAGS_level0_stop_writes_trigger = n;
} else if (sscanf(argv[i],"--level0_slowdown_writes_trigger=%d%c",
&n, &junk) == 1) {
FLAGS_level0_slowdown_writes_trigger = n;
} else if (sscanf(argv[i],"--level0_file_num_compaction_trigger=%d%c",
&n, &junk) == 1) {
FLAGS_level0_file_num_compaction_trigger = n;
} else if (strncmp(argv[i], "--compression_type=", 19) == 0) {
const char* ctype = argv[i] + 19;
if (!strcasecmp(ctype, "none"))
FLAGS_compression_type = rocksdb::kNoCompression;
else if (!strcasecmp(ctype, "snappy"))
FLAGS_compression_type = rocksdb::kSnappyCompression;
else if (!strcasecmp(ctype, "zlib"))
FLAGS_compression_type = rocksdb::kZlibCompression;
else if (!strcasecmp(ctype, "bzip2"))
FLAGS_compression_type = rocksdb::kBZip2Compression;
else {
fprintf(stdout, "Cannot parse %s\n", argv[i]);
}
} else if (strncmp(argv[i], "--memtablerep=", 14) == 0) {
const char* ctype = argv[i] + 14;
if (!strcasecmp(ctype, "skip_list"))
FLAGS_rep_factory = kSkipList;
else if (!strcasecmp(ctype, "prefix_hash"))
FLAGS_rep_factory = kPrefixHash;
else if (!strcasecmp(ctype, "unsorted"))
FLAGS_rep_factory = kUnsorted;
else if (!strcasecmp(ctype, "vector"))
FLAGS_rep_factory = kVectorRep;
else {
fprintf(stdout, "Cannot parse %s\n", argv[i]);
}
} else if (sscanf(argv[i], "--disable_seek_compaction=%d%c", &n, &junk) == 1
&& (n == 0 || n == 1)) {
FLAGS_disable_seek_compaction = n;
} else if (sscanf(argv[i], "--delete_obsolete_files_period_micros=%ld%c",
&l, &junk) == 1) {
FLAGS_delete_obsolete_files_period_micros = n;
} else if (sscanf(argv[i], "--purge_redundant_percent=%d%c", &n, &junk) == 1
&& (n >= 0 && n <= 100)) {
FLAGS_purge_redundant_percent = n;
} else if (sscanf(argv[i], "--filter_deletes=%d%c", &n, &junk)
== 1 && (n == 0 || n == 1)) {
FLAGS_filter_deletes = n;
} else if (sscanf(argv[i], "--prefix_size=%d%c", &n, &junk) == 1 &&
n >= 0 && n < 2000000000) {
FLAGS_prefix_size = n;
} else if (sscanf(argv[i], "--use_merge=%d%c", &n, &junk)
== 1 && (n == 0 || n == 1)) {
FLAGS_use_merge_put = n;
} else if (sscanf(argv[i], "--universal_size_ratio=%d%c",
&n, &junk) == 1) {
FLAGS_universal_size_ratio = n;
} else if (sscanf(argv[i], "--universal_min_merge_width=%d%c",
&n, &junk) == 1) {
FLAGS_universal_min_merge_width = n;
} else if (sscanf(argv[i], "--universal_max_merge_width=%d%c",
&n, &junk) == 1) {
FLAGS_universal_max_merge_width = n;
} else if (sscanf(argv[i],
"--universal_max_size_amplification_percent=%d%c",
&n, &junk) == 1) {
FLAGS_universal_max_size_amplification_percent = n;
} else {
fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
exit(1);
}
} }
FLAGS_rep_factory = StringToRepFactory(FLAGS_memtablerep.c_str());
// The number of background threads should be at least as much the // The number of background threads should be at least as much the
// max number of concurrent compactions. // max number of concurrent compactions.
@ -1611,16 +1511,17 @@ int main(int argc, char** argv) {
} }
if ((unsigned)FLAGS_reopen >= FLAGS_ops_per_thread) { if ((unsigned)FLAGS_reopen >= FLAGS_ops_per_thread) {
fprintf(stderr, "Error: #DB-reopens should be < ops_per_thread\n" fprintf(stderr, "Error: #DB-reopens should be < ops_per_thread\n"
"Provided reopens = %d and ops_per_thread = %u\n", FLAGS_reopen, "Provided reopens = %d and ops_per_thread = %lu\n", FLAGS_reopen,
FLAGS_ops_per_thread); FLAGS_ops_per_thread);
exit(1); exit(1);
} }
// Choose a location for the test database if none given with --db=<path> // Choose a location for the test database if none given with --db=<path>
if (FLAGS_db == nullptr) { if (FLAGS_db.empty()) {
std::string default_db_path;
rocksdb::Env::Default()->GetTestDirectory(&default_db_path); rocksdb::Env::Default()->GetTestDirectory(&default_db_path);
default_db_path += "/dbstress"; default_db_path += "/dbstress";
FLAGS_db = default_db_path.c_str(); FLAGS_db = default_db_path;
} }
rocksdb::StressTest stress; rocksdb::StressTest stress;

Loading…
Cancel
Save