@ -96,7 +96,12 @@ using GFLAGS_NAMESPACE::ParseCommandLineFlags;
using GFLAGS_NAMESPACE : : RegisterFlagValidator ;
using GFLAGS_NAMESPACE : : RegisterFlagValidator ;
using GFLAGS_NAMESPACE : : SetUsageMessage ;
using GFLAGS_NAMESPACE : : SetUsageMessage ;
# ifndef ROCKSDB_LITE
# ifdef ROCKSDB_LITE
# define IF_ROCKSDB_LITE(Then, Else) Then
# else
# define IF_ROCKSDB_LITE(Then, Else) Else
# endif
DEFINE_string (
DEFINE_string (
benchmarks ,
benchmarks ,
" fillseq, "
" fillseq, "
@ -116,9 +121,11 @@ DEFINE_string(
" compact, "
" compact, "
" compactall, "
" compactall, "
" flush, "
" flush, "
IF_ROCKSDB_LITE ( " " ,
" compact0, "
" compact0, "
" compact1, "
" compact1, "
" waitforcompaction, "
" waitforcompaction, "
)
" multireadrandom, "
" multireadrandom, "
" mixgraph, "
" mixgraph, "
" readseq, "
" readseq, "
@ -208,9 +215,11 @@ DEFINE_string(
" Meta operations: \n "
" Meta operations: \n "
" \t compact -- Compact the entire DB; If multiple, randomly choose one \n "
" \t compact -- Compact the entire DB; If multiple, randomly choose one \n "
" \t compactall -- Compact the entire DB \n "
" \t compactall -- Compact the entire DB \n "
IF_ROCKSDB_LITE ( " " ,
" \t compact0 -- compact L0 into L1 \n "
" \t compact0 -- compact L0 into L1 \n "
" \t compact1 -- compact L1 into L2 \n "
" \t compact1 -- compact L1 into L2 \n "
" \t waitforcompaction - pause until compaction is (probably) done \n "
" \t waitforcompaction - pause until compaction is (probably) done \n "
)
" \t flush - flush the memtable \n "
" \t flush - flush the memtable \n "
" \t stats -- Print DB stats \n "
" \t stats -- Print DB stats \n "
" \t resetstats -- Reset DB stats \n "
" \t resetstats -- Reset DB stats \n "
@ -225,130 +234,6 @@ DEFINE_string(
" by doing a Get followed by binary searching in the large sorted list vs "
" by doing a Get followed by binary searching in the large sorted list vs "
" doing a GetMergeOperands and binary searching in the operands which are "
" doing a GetMergeOperands and binary searching in the operands which are "
" sorted sub-lists. The MergeOperator used is sortlist.h \n " ) ;
" sorted sub-lists. The MergeOperator used is sortlist.h \n " ) ;
# else
DEFINE_string (
benchmarks ,
" fillseq, "
" fillseqdeterministic, "
" fillsync, "
" fillrandom, "
" filluniquerandomdeterministic, "
" overwrite, "
" readrandom, "
" newiterator, "
" newiteratorwhilewriting, "
" seekrandom, "
" seekrandomwhilewriting, "
" seekrandomwhilemerging, "
" readseq, "
" readreverse, "
" compact, "
" compactall, "
" flush, "
" multireadrandom, "
" mixgraph, "
" readseq, "
" readtorowcache, "
" readtocache, "
" readreverse, "
" readwhilewriting, "
" readwhilemerging, "
" readwhilescanning, "
" readrandomwriterandom, "
" updaterandom, "
" xorupdaterandom, "
" approximatesizerandom, "
" randomwithverify, "
" fill100K, "
" crc32c, "
" xxhash, "
" compress, "
" uncompress, "
" acquireload, "
" fillseekseq, "
" randomtransaction, "
" randomreplacekeys, "
" timeseries, "
" getmergeoperands " ,
" Comma-separated list of operations to run in the specified "
" order. Available benchmarks: \n "
" \t fillseq -- write N values in sequential key "
" order in async mode \n "
" \t fillseqdeterministic -- write N values in the specified "
" key order and keep the shape of the LSM tree \n "
" \t fillrandom -- write N values in random key order in async "
" mode \n "
" \t filluniquerandomdeterministic -- write N values in a random "
" key order and keep the shape of the LSM tree \n "
" \t overwrite -- overwrite N values in random key order in "
" async mode \n "
" \t fillsync -- write N/1000 values in random key order in "
" sync mode \n "
" \t fill100K -- write N/1000 100K values in random order in "
" async mode \n "
" \t deleteseq -- delete N keys in sequential order \n "
" \t deleterandom -- delete N keys in random order \n "
" \t readseq -- read N times sequentially \n "
" \t readtocache -- 1 thread reading database sequentially \n "
" \t readreverse -- read N times in reverse order \n "
" \t readrandom -- read N times in random order \n "
" \t readmissing -- read N missing keys in random order \n "
" \t readwhilewriting -- 1 writer, N threads doing random "
" reads \n "
" \t readwhilemerging -- 1 merger, N threads doing random "
" reads \n "
" \t readwhilescanning -- 1 thread doing full table scan, "
" N threads doing random reads \n "
" \t readrandomwriterandom -- N threads doing random-read, "
" random-write \n "
" \t updaterandom -- N threads doing read-modify-write for random "
" keys \n "
" \t xorupdaterandom -- N threads doing read-XOR-write for "
" random keys \n "
" \t appendrandom -- N threads doing read-modify-write with "
" growing values \n "
" \t mergerandom -- same as updaterandom/appendrandom using merge "
" operator. "
" Must be used with merge_operator \n "
" \t readrandommergerandom -- perform N random read-or-merge "
" operations. Must be used with merge_operator \n "
" \t newiterator -- repeated iterator creation \n "
" \t seekrandom -- N random seeks, call Next seek_nexts times "
" per seek \n "
" \t seekrandomwhilewriting -- seekrandom and 1 thread doing "
" overwrite \n "
" \t seekrandomwhilemerging -- seekrandom and 1 thread doing "
" merge \n "
" \t crc32c -- repeated crc32c of 4K of data \n "
" \t xxhash -- repeated xxHash of 4K of data \n "
" \t acquireload -- load N*1000 times \n "
" \t fillseekseq -- write N values in sequential key, then read "
" them by seeking to each key \n "
" \t randomtransaction -- execute N random transactions and "
" verify correctness \n "
" \t randomreplacekeys -- randomly replaces N keys by deleting "
" the old version and putting the new version \n \n "
" \t timeseries -- 1 writer generates time series data "
" and multiple readers doing random reads on id \n \n "
" Meta operations: \n "
" \t compact -- Compact the entire DB; If multiple, randomly choose one \n "
" \t compactall -- Compact the entire DB \n "
" \t flush - flush the memtable \n "
" \t stats -- Print DB stats \n "
" \t resetstats -- Reset DB stats \n "
" \t levelstats -- Print the number of files and bytes per level \n "
" \t memstats -- Print memtable stats \n "
" \t sstables -- Print sstable info \n "
" \t heapprofile -- Dump a heap profile (if supported by this port) \n "
" \t replay -- replay the trace file specified with trace_file \n "
" \t getmergeoperands -- Insert lots of merge records which are a list of "
" sorted ints for a key and then compare performance of lookup for another "
" key "
" by doing a Get followed by binary searching in the large sorted list vs "
" doing a GetMergeOperands and binary searching in the operands which are "
" sorted sub-lists. The MergeOperator used is sortlist.h \n " ) ;
# endif
DEFINE_int64 ( num , 1000000 , " Number of key/values to place in database " ) ;
DEFINE_int64 ( num , 1000000 , " Number of key/values to place in database " ) ;