@ -14,34 +14,47 @@ int main() {
# include <stdio.h>
# include <stdio.h>
# include <sys/types.h>
# include <sys/types.h>
# include <cinttypes>
# include <cinttypes>
# include <limits>
# include "port/port.h"
# include "port/port.h"
# include "rocksdb/cache.h"
# include "rocksdb/cache.h"
# include "rocksdb/db.h"
# include "rocksdb/db.h"
# include "rocksdb/env.h"
# include "rocksdb/env.h"
# include "util/coding.h"
# include "util/gflags_compat.h"
# include "util/gflags_compat.h"
# include "util/hash.h"
# include "util/mutexlock.h"
# include "util/mutexlock.h"
# include "util/random.h"
# include "util/random.h"
using GFLAGS_NAMESPACE : : ParseCommandLineFlags ;
using GFLAGS_NAMESPACE : : ParseCommandLineFlags ;
static const uint32_t KB = 1024 ;
static constexpr uint32_t KiB = uint32_t { 1 } < < 10 ;
static constexpr uint32_t MiB = KiB < < 10 ;
DEFINE_int32 ( threads , 16 , " Number of concurrent threads to run. " ) ;
static constexpr uint64_t GiB = MiB < < 10 ;
DEFINE_int64 ( cache_size , 8 * KB * KB ,
" Number of bytes to use as a cache of uncompressed data. " ) ;
DEFINE_uint32 ( threads , 16 , " Number of concurrent threads to run. " ) ;
DEFINE_int32 ( num_shard_bits , 4 , " shard_bits. " ) ;
DEFINE_uint64 ( cache_size , 1 * GiB ,
" Number of bytes to use as a cache of uncompressed data. " ) ;
DEFINE_int64 ( max_key , 1 * KB * KB * KB , " Max number of key to place in cache " ) ;
DEFINE_uint32 ( num_shard_bits , 6 , " shard_bits. " ) ;
DEFINE_uint64 ( ops_per_thread , 1200000 , " Number of operations per thread. " ) ;
DEFINE_double ( resident_ratio , 0.25 ,
DEFINE_bool ( populate_cache , false , " Populate cache before operations " ) ;
" Ratio of keys fitting in cache to keyspace. " ) ;
DEFINE_int32 ( insert_percent , 40 ,
DEFINE_uint64 ( ops_per_thread , 0 ,
" Ratio of insert to total workload (expressed as a percentage) " ) ;
" Number of operations per thread. (Default: 5 * keyspace size) " ) ;
DEFINE_int32 ( lookup_percent , 50 ,
DEFINE_uint32 ( value_bytes , 8 * KiB , " Size of each value added. " ) ;
" Ratio of lookup to total workload (expressed as a percentage) " ) ;
DEFINE_int32 ( erase_percent , 10 ,
DEFINE_uint32 ( skew , 5 , " Degree of skew in key selection " ) ;
" Ratio of erase to total workload (expressed as a percentage) " ) ;
DEFINE_bool ( populate_cache , true , " Populate cache before operations " ) ;
DEFINE_uint32 ( lookup_insert_percent , 87 ,
" Ratio of lookup (+ insert on not found) to total workload "
" (expressed as a percentage) " ) ;
DEFINE_uint32 ( insert_percent , 2 ,
" Ratio of insert to total workload (expressed as a percentage) " ) ;
DEFINE_uint32 ( lookup_percent , 10 ,
" Ratio of lookup to total workload (expressed as a percentage) " ) ;
DEFINE_uint32 ( erase_percent , 1 ,
" Ratio of erase to total workload (expressed as a percentage) " ) ;
DEFINE_bool ( use_clock_cache , false , " " ) ;
DEFINE_bool ( use_clock_cache , false , " " ) ;
@ -49,21 +62,15 @@ namespace ROCKSDB_NAMESPACE {
class CacheBench ;
class CacheBench ;
namespace {
namespace {
void deleter ( const Slice & /*key*/ , void * value ) {
delete reinterpret_cast < char * > ( value ) ;
}
// State shared by all concurrent executions of the same benchmark.
// State shared by all concurrent executions of the same benchmark.
class SharedState {
class SharedState {
public :
public :
explicit SharedState ( CacheBench * cache_bench )
explicit SharedState ( CacheBench * cache_bench )
: cv_ ( & mu_ ) ,
: cv_ ( & mu_ ) ,
num_threads_ ( FLAGS_threads ) ,
num_initialized_ ( 0 ) ,
num_initialized_ ( 0 ) ,
start_ ( false ) ,
start_ ( false ) ,
num_done_ ( 0 ) ,
num_done_ ( 0 ) ,
cache_bench_ ( cache_bench ) {
cache_bench_ ( cache_bench ) { }
}
~ SharedState ( ) { }
~ SharedState ( ) { }
@ -87,13 +94,9 @@ class SharedState {
num_done_ + + ;
num_done_ + + ;
}
}
bool AllInitialized ( ) const {
bool AllInitialized ( ) const { return num_initialized_ > = FLAGS_threads ; }
return num_initialized_ > = num_threads_ ;
}
bool AllDone ( ) const {
bool AllDone ( ) const { return num_done_ > = FLAGS_threads ; }
return num_done_ > = num_threads_ ;
}
void SetStart ( ) {
void SetStart ( ) {
start_ = true ;
start_ = true ;
@ -107,7 +110,6 @@ class SharedState {
port : : Mutex mu_ ;
port : : Mutex mu_ ;
port : : CondVar cv_ ;
port : : CondVar cv_ ;
const uint64_t num_threads_ ;
uint64_t num_initialized_ ;
uint64_t num_initialized_ ;
bool start_ ;
bool start_ ;
uint64_t num_done_ ;
uint64_t num_done_ ;
@ -118,17 +120,69 @@ class SharedState {
// Per-thread state for concurrent executions of the same benchmark.
// Per-thread state for concurrent executions of the same benchmark.
struct ThreadState {
struct ThreadState {
uint32_t tid ;
uint32_t tid ;
Random rnd ;
Random64 rnd ;
SharedState * shared ;
SharedState * shared ;
ThreadState ( uint32_t index , SharedState * _shared )
ThreadState ( uint32_t index , SharedState * _shared )
: tid ( index ) , rnd ( 1000 + index ) , shared ( _shared ) { }
: tid ( index ) , rnd ( 1000 + index ) , shared ( _shared ) { }
} ;
} ;
struct KeyGen {
char key_data [ 27 ] ;
Slice GetRand ( Random64 & rnd , uint64_t max_key ) {
uint64_t raw = rnd . Next ( ) ;
// Skew according to setting
for ( uint32_t i = 0 ; i < FLAGS_skew ; + + i ) {
raw = std : : min ( raw , rnd . Next ( ) ) ;
}
uint64_t key = fastrange64 ( raw , max_key ) ;
// Variable size and alignment
size_t off = key % 8 ;
key_data [ 0 ] = char { 42 } ;
EncodeFixed64 ( key_data + 1 , key ) ;
key_data [ 9 ] = char { 11 } ;
EncodeFixed64 ( key_data + 10 , key ) ;
key_data [ 18 ] = char { 4 } ;
EncodeFixed64 ( key_data + 19 , key ) ;
return Slice ( & key_data [ off ] , sizeof ( key_data ) - off ) ;
}
} ;
char * createValue ( Random64 & rnd ) {
char * rv = new char [ FLAGS_value_bytes ] ;
// Fill with some filler data, and take some CPU time
for ( uint32_t i = 0 ; i < FLAGS_value_bytes ; i + = 8 ) {
EncodeFixed64 ( rv + i , rnd . Next ( ) ) ;
}
return rv ;
}
void deleter ( const Slice & /*key*/ , void * value ) {
delete [ ] static_cast < char * > ( value ) ;
}
} // namespace
} // namespace
class CacheBench {
class CacheBench {
static constexpr uint64_t kHundredthUint64 =
std : : numeric_limits < uint64_t > : : max ( ) / 100U ;
public :
public :
CacheBench ( ) : num_threads_ ( FLAGS_threads ) {
CacheBench ( )
: max_key_ ( static_cast < uint64_t > ( FLAGS_cache_size / FLAGS_resident_ratio /
FLAGS_value_bytes ) ) ,
lookup_insert_threshold_ ( kHundredthUint64 *
FLAGS_lookup_insert_percent ) ,
insert_threshold_ ( lookup_insert_threshold_ +
kHundredthUint64 * FLAGS_insert_percent ) ,
lookup_threshold_ ( insert_threshold_ +
kHundredthUint64 * FLAGS_lookup_percent ) ,
erase_threshold_ ( lookup_threshold_ +
kHundredthUint64 * FLAGS_erase_percent ) {
if ( erase_threshold_ ! = 100U * kHundredthUint64 ) {
fprintf ( stderr , " Percentages must add to 100. \n " ) ;
exit ( 1 ) ;
}
if ( FLAGS_use_clock_cache ) {
if ( FLAGS_use_clock_cache ) {
cache_ = NewClockCache ( FLAGS_cache_size , FLAGS_num_shard_bits ) ;
cache_ = NewClockCache ( FLAGS_cache_size , FLAGS_num_shard_bits ) ;
if ( ! cache_ ) {
if ( ! cache_ ) {
@ -138,18 +192,19 @@ class CacheBench {
} else {
} else {
cache_ = NewLRUCache ( FLAGS_cache_size , FLAGS_num_shard_bits ) ;
cache_ = NewLRUCache ( FLAGS_cache_size , FLAGS_num_shard_bits ) ;
}
}
if ( FLAGS_ops_per_thread = = 0 ) {
FLAGS_ops_per_thread = 5 * max_key_ ;
}
}
}
~ CacheBench ( ) { }
~ CacheBench ( ) { }
void PopulateCache ( ) {
void PopulateCache ( ) {
Random rnd ( 1 ) ;
Random64 rnd ( 1 ) ;
for ( int64_t i = 0 ; i < FLAGS_cache_size ; i + + ) {
KeyGen keygen ;
uint64_t rand_key = rnd . Next ( ) % FLAGS_max_key ;
for ( uint64_t i = 0 ; i < 2 * FLAGS_cache_size ; i + = FLAGS_value_bytes ) {
// Cast uint64* to be char*, data would be copied to cache
cache_ - > Insert ( keygen . GetRand ( rnd , max_key_ ) , createValue ( rnd ) ,
Slice key ( reinterpret_cast < char * > ( & rand_key ) , 8 ) ;
FLAGS_value_bytes , & deleter ) ;
// do insert
cache_ - > Insert ( key , new char [ 10 ] , 1 , & deleter ) ;
}
}
}
}
@ -158,10 +213,10 @@ class CacheBench {
PrintEnv ( ) ;
PrintEnv ( ) ;
SharedState shared ( this ) ;
SharedState shared ( this ) ;
std : : vector < ThreadState * > threads ( num_threads_ ) ;
std : : vector < std : : unique_ptr < ThreadState > > threads ( FLAGS_threads ) ;
for ( uint32_t i = 0 ; i < num_threads_ ; i + + ) {
for ( uint32_t i = 0 ; i < FLAGS_threads ; i + + ) {
threads [ i ] = new ThreadState ( i , & shared ) ;
threads [ i ] . reset ( new ThreadState ( i , & shared ) ) ;
env - > StartThread ( ThreadBody , threads [ i ] ) ;
env - > StartThread ( ThreadBody , threads [ i ] . get ( ) ) ;
}
}
{
{
MutexLock l ( shared . GetMutex ( ) ) ;
MutexLock l ( shared . GetMutex ( ) ) ;
@ -192,10 +247,15 @@ class CacheBench {
private :
private :
std : : shared_ptr < Cache > cache_ ;
std : : shared_ptr < Cache > cache_ ;
uint32_t num_threads_ ;
const uint64_t max_key_ ;
// Cumulative thresholds in the space of a random uint64_t
const uint64_t lookup_insert_threshold_ ;
const uint64_t insert_threshold_ ;
const uint64_t lookup_threshold_ ;
const uint64_t erase_threshold_ ;
static void ThreadBody ( void * v ) {
static void ThreadBody ( void * v ) {
ThreadState * thread = reinterpret_cast < ThreadState * > ( v ) ;
ThreadState * thread = static _cast< ThreadState * > ( v ) ;
SharedState * shared = thread - > shared ;
SharedState * shared = thread - > shared ;
{
{
@ -220,40 +280,78 @@ class CacheBench {
}
}
void OperateCache ( ThreadState * thread ) {
void OperateCache ( ThreadState * thread ) {
// To use looked-up values
uint64_t result = 0 ;
// To hold handles for a non-trivial amount of time
Cache : : Handle * handle = nullptr ;
KeyGen gen ;
for ( uint64_t i = 0 ; i < FLAGS_ops_per_thread ; i + + ) {
for ( uint64_t i = 0 ; i < FLAGS_ops_per_thread ; i + + ) {
uint64_t rand_key = thread - > rnd . Next ( ) % FLAGS_max_key ;
Slice key = gen . GetRand ( thread - > rnd , max_key_ ) ;
// Cast uint64* to be char*, data would be copied to cache
uint64_t random_op = thread - > rnd . Next ( ) ;
Slice key ( reinterpret_cast < char * > ( & rand_key ) , 8 ) ;
if ( random_op < lookup_insert_threshold_ ) {
int32_t prob_op = thread - > rnd . Uniform ( 100 ) ;
if ( handle ) {
if ( prob_op > = 0 & & prob_op < FLAGS_insert_percent ) {
cache_ - > Release ( handle ) ;
// do insert
handle = nullptr ;
cache_ - > Insert ( key , new char [ 10 ] , 1 , & deleter ) ;
}
} else if ( prob_op - = FLAGS_insert_percent & &
prob_op < FLAGS_lookup_percent ) {
// do lookup
// do lookup
auto handle = cache_ - > Lookup ( key ) ;
handle = cache_ - > Lookup ( key ) ;
if ( handle ) {
// do something with the data
result + = NPHash64 ( static_cast < char * > ( cache_ - > Value ( handle ) ) ,
FLAGS_value_bytes ) ;
} else {
// do insert
cache_ - > Insert ( key , createValue ( thread - > rnd ) , FLAGS_value_bytes ,
& deleter , & handle ) ;
}
} else if ( random_op < insert_threshold_ ) {
if ( handle ) {
if ( handle ) {
cache_ - > Release ( handle ) ;
cache_ - > Release ( handle ) ;
handle = nullptr ;
}
}
} else if ( prob_op - = FLAGS_lookup_percent & &
// do insert
prob_op < FLAGS_erase_percent ) {
cache_ - > Insert ( key , createValue ( thread - > rnd ) , FLAGS_value_bytes ,
& deleter , & handle ) ;
} else if ( random_op < lookup_threshold_ ) {
if ( handle ) {
cache_ - > Release ( handle ) ;
handle = nullptr ;
}
// do lookup
handle = cache_ - > Lookup ( key ) ;
if ( handle ) {
// do something with the data
result + = NPHash64 ( static_cast < char * > ( cache_ - > Value ( handle ) ) ,
FLAGS_value_bytes ) ;
}
} else if ( random_op < erase_threshold_ ) {
// do erase
// do erase
cache_ - > Erase ( key ) ;
cache_ - > Erase ( key ) ;
} else {
// Should be extremely unlikely (noop)
assert ( random_op > = kHundredthUint64 * 100U ) ;
}
}
}
}
if ( handle ) {
cache_ - > Release ( handle ) ;
handle = nullptr ;
}
}
}
void PrintEnv ( ) const {
void PrintEnv ( ) const {
printf ( " RocksDB version : %d.%d \n " , kMajorVersion , kMinorVersion ) ;
printf ( " RocksDB version : %d.%d \n " , kMajorVersion , kMinorVersion ) ;
printf ( " Number of threads : %d \n " , FLAGS_threads ) ;
printf ( " Number of threads : %u \n " , FLAGS_threads ) ;
printf ( " Ops per thread : % " PRIu64 " \n " , FLAGS_ops_per_thread ) ;
printf ( " Ops per thread : % " PRIu64 " \n " , FLAGS_ops_per_thread ) ;
printf ( " Cache size : % " PRIu64 " \n " , FLAGS_cache_size ) ;
printf ( " Cache size : % " PRIu64 " \n " , FLAGS_cache_size ) ;
printf ( " Num shard bits : %d \n " , FLAGS_num_shard_bits ) ;
printf ( " Num shard bits : %u \n " , FLAGS_num_shard_bits ) ;
printf ( " Max key : % " PRIu64 " \n " , FLAGS_max_key ) ;
printf ( " Max key : % " PRIu64 " \n " , max_key_ ) ;
printf ( " Populate cache : %d \n " , FLAGS_populate_cache ) ;
printf ( " Resident ratio : %g \n " , FLAGS_resident_ratio ) ;
printf ( " Insert percentage : %d%% \n " , FLAGS_insert_percent ) ;
printf ( " Skew degree : %u \n " , FLAGS_skew ) ;
printf ( " Lookup percentage : %d%% \n " , FLAGS_lookup_percent ) ;
printf ( " Populate cache : %d \n " , int { FLAGS_populate_cache } ) ;
printf ( " Erase percentage : %d%% \n " , FLAGS_erase_percent ) ;
printf ( " Lookup+Insert pct : %u%% \n " , FLAGS_lookup_insert_percent ) ;
printf ( " Insert percentage : %u%% \n " , FLAGS_insert_percent ) ;
printf ( " Lookup percentage : %u%% \n " , FLAGS_lookup_percent ) ;
printf ( " Erase percentage : %u%% \n " , FLAGS_erase_percent ) ;
printf ( " ---------------------------- \n " ) ;
printf ( " ---------------------------- \n " ) ;
}
}
} ;
} ;
@ -270,6 +368,8 @@ int main(int argc, char** argv) {
ROCKSDB_NAMESPACE : : CacheBench bench ;
ROCKSDB_NAMESPACE : : CacheBench bench ;
if ( FLAGS_populate_cache ) {
if ( FLAGS_populate_cache ) {
bench . PopulateCache ( ) ;
bench . PopulateCache ( ) ;
printf ( " Population complete \n " ) ;
printf ( " ---------------------------- \n " ) ;
}
}
if ( bench . Run ( ) ) {
if ( bench . Run ( ) ) {
return 0 ;
return 0 ;