Improve comments for some files (#9793)

Summary:
Update the comments, e.g. fixing typo, formatting, etc.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/9793

Reviewed By: jay-zhuang

Differential Revision: D35323989

Pulled By: gitbw95

fbshipit-source-id: 4a72fc02b67abaae8be0d1439b68f9967a68052d
main
Bo Wang 2 years ago committed by Facebook GitHub Bot
parent f246e56d0a
commit bcabee737f
  1. 65
      cache/lru_cache.cc
  2. 36
      cache/lru_cache.h
  3. 13
      db/db_filesnapshot.cc
  4. 28
      include/rocksdb/cache.h
  5. 61
      tools/db_bench_tool.cc

65
cache/lru_cache.cc vendored

@ -76,13 +76,12 @@ LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
void LRUHandleTable::Resize() {
if (length_bits_ >= max_length_bits_) {
// Due to reaching limit of hash information, if we made the table
// bigger, we would allocate more addresses but only the same
// number would be used.
// Due to reaching limit of hash information, if we made the table bigger,
// we would allocate more addresses but only the same number would be used.
return;
}
if (length_bits_ >= 31) {
// Avoid undefined behavior shifting uint32_t by 32
// Avoid undefined behavior shifting uint32_t by 32.
return;
}
@ -125,7 +124,7 @@ LRUCacheShard::LRUCacheShard(
mutex_(use_adaptive_mutex),
secondary_cache_(secondary_cache) {
set_metadata_charge_policy(metadata_charge_policy);
// Make empty circular linked list
// Make empty circular linked list.
lru_.next = &lru_;
lru_.prev = &lru_;
lru_low_pri_ = &lru_;
@ -138,7 +137,7 @@ void LRUCacheShard::EraseUnRefEntries() {
MutexLock l(&mutex_);
while (lru_.next != &lru_) {
LRUHandle* old = lru_.next;
// LRU list contains only elements which can be evicted
// LRU list contains only elements which can be evicted.
assert(old->InCache() && !old->HasRefs());
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
@ -168,7 +167,7 @@ void LRUCacheShard::ApplyToSomeEntries(
assert(average_entries_per_lock > 0);
// Assuming we are called with same average_entries_per_lock repeatedly,
// this simplifies some logic (index_end will not overflow)
// this simplifies some logic (index_end will not overflow).
assert(average_entries_per_lock < length || *state == 0);
uint32_t index_begin = *state >> (32 - length_bits);
@ -274,7 +273,7 @@ void LRUCacheShard::EvictFromLRU(size_t charge,
autovector<LRUHandle*>* deleted) {
while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
// LRU list contains only elements which can be evicted
// LRU list contains only elements which can be evicted.
assert(old->InCache() && !old->HasRefs());
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
@ -295,8 +294,8 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
EvictFromLRU(0, &last_reference_list);
}
// Try to insert the evicted entries into tiered cache
// Free the entries outside of mutex for performance reasons
// Try to insert the evicted entries into tiered cache.
// Free the entries outside of mutex for performance reasons.
for (auto entry : last_reference_list) {
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
!entry->IsPromoted()) {
@ -322,7 +321,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
MutexLock l(&mutex_);
// Free the space following strict LRU policy until enough space
// is freed or the lru list is empty
// is freed or the lru list is empty.
EvictFromLRU(total_charge, &last_reference_list);
if ((usage_ + total_charge) > capacity_ &&
@ -349,7 +348,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
assert(old->InCache());
old->SetInCache(false);
if (!old->HasRefs()) {
// old is on LRU because it's in cache and its reference count is 0
// old is on LRU because it's in cache and its reference count is 0.
LRU_Remove(old);
size_t old_total_charge =
old->CalcTotalCharge(metadata_charge_policy_);
@ -361,7 +360,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
if (handle == nullptr) {
LRU_Insert(e);
} else {
// If caller already holds a ref, no need to take one here
// If caller already holds a ref, no need to take one here.
if (!e->HasRefs()) {
e->Ref();
}
@ -370,8 +369,8 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
}
}
// Try to insert the evicted entries into the secondary cache
// Free the entries here outside of mutex for performance reasons
// Try to insert the evicted entries into the secondary cache.
// Free the entries here outside of mutex for performance reasons.
for (auto entry : last_reference_list) {
if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
!entry->IsPromoted()) {
@ -404,7 +403,7 @@ void LRUCacheShard::Promote(LRUHandle* e) {
Status s = InsertItem(e, &handle, /*free_handle_on_fail=*/false);
if (!s.ok()) {
// Item is in memory, but not accounted against the cache capacity.
// When the handle is released, the item should get deleted
// When the handle is released, the item should get deleted.
assert(!e->InCache());
}
} else {
@ -437,8 +436,8 @@ Cache::Handle* LRUCacheShard::Lookup(
}
// If handle table lookup failed, then allocate a handle outside the
// mutex if we're going to lookup in the secondary cache
// Only support synchronous for now
// mutex if we're going to lookup in the secondary cache.
// Only support synchronous for now.
// TODO: Support asynchronous lookup in secondary cache
if (!e && secondary_cache_ && helper && helper->saveto_cb) {
// For objects from the secondary cache, we expect the caller to provide
@ -469,7 +468,7 @@ Cache::Handle* LRUCacheShard::Lookup(
if (wait) {
Promote(e);
if (!e->value) {
// The secondary cache returned a handle, but the lookup failed
// The secondary cache returned a handle, but the lookup failed.
e->Unref();
e->Free();
e = nullptr;
@ -479,7 +478,7 @@ Cache::Handle* LRUCacheShard::Lookup(
}
} else {
// If wait is false, we always return a handle and let the caller
// release the handle after checking for success or failure
// release the handle after checking for success or failure.
e->SetIncomplete(true);
// This may be slightly inaccurate, if the lookup eventually fails.
// But the probability is very low.
@ -494,7 +493,7 @@ Cache::Handle* LRUCacheShard::Lookup(
bool LRUCacheShard::Ref(Cache::Handle* h) {
LRUHandle* e = reinterpret_cast<LRUHandle*>(h);
MutexLock l(&mutex_);
// To create another reference - entry must be already externally referenced
// To create another reference - entry must be already externally referenced.
assert(e->HasRefs());
e->Ref();
return true;
@ -517,15 +516,15 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
MutexLock l(&mutex_);
last_reference = e->Unref();
if (last_reference && e->InCache()) {
// The item is still in cache, and nobody else holds a reference to it
// The item is still in cache, and nobody else holds a reference to it.
if (usage_ > capacity_ || erase_if_last_ref) {
// The LRU list must be empty since the cache is full
// The LRU list must be empty since the cache is full.
assert(lru_.next == &lru_ || erase_if_last_ref);
// Take this opportunity and remove the item
// Take this opportunity and remove the item.
table_.Remove(e->key(), e->hash);
e->SetInCache(false);
} else {
// Put the item back on the LRU list, and don't free it
// Put the item back on the LRU list, and don't free it.
LRU_Insert(e);
last_reference = false;
}
@ -542,7 +541,7 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
}
}
// Free the entry here outside of mutex for performance reasons
// Free the entry here outside of mutex for performance reasons.
if (last_reference) {
e->Free();
}
@ -554,8 +553,8 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
void (*deleter)(const Slice& key, void* value),
const Cache::CacheItemHelper* helper,
Cache::Handle** handle, Cache::Priority priority) {
// Allocate the memory here outside of the mutex
// If the cache is full, we'll have to release it
// Allocate the memory here outside of the mutex.
// If the cache is full, we'll have to release it.
// It shouldn't happen very often though.
LRUHandle* e = reinterpret_cast<LRUHandle*>(
new char[sizeof(LRUHandle) - 1 + key.size()]);
@ -603,8 +602,8 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
}
}
// Free the entry here outside of mutex for performance reasons
// last_reference will only be true if e != nullptr
// Free the entry here outside of mutex for performance reasons.
// last_reference will only be true if e != nullptr.
if (last_reference) {
e->Free();
}
@ -705,7 +704,7 @@ uint32_t LRUCache::GetHash(Handle* handle) const {
}
void LRUCache::DisownData() {
// Leak data only if that won't generate an ASAN/valgrind warning
// Leak data only if that won't generate an ASAN/valgrind warning.
if (!kMustFreeHeapAllocations) {
shards_ = nullptr;
num_shards_ = 0;
@ -765,10 +764,10 @@ std::shared_ptr<Cache> NewLRUCache(
CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<SecondaryCache>& secondary_cache) {
if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces
return nullptr; // The cache cannot be sharded into too many fine pieces.
}
if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
// invalid high_pri_pool_ratio
// Invalid high_pri_pool_ratio
return nullptr;
}
if (num_shard_bits < 0) {

36
cache/lru_cache.h vendored

@ -81,11 +81,11 @@ struct LRUHandle {
IN_HIGH_PRI_POOL = (1 << 2),
// Whether this entry has had any lookups (hits).
HAS_HIT = (1 << 3),
// Can this be inserted into the secondary cache
// Can this be inserted into the secondary cache.
IS_SECONDARY_CACHE_COMPATIBLE = (1 << 4),
// Is the handle still being read from a lower tier
// Is the handle still being read from a lower tier.
IS_PENDING = (1 << 5),
// Has the item been promoted from a lower tier
// Has the item been promoted from a lower tier.
IS_PROMOTED = (1 << 6),
};
@ -208,7 +208,7 @@ struct LRUHandle {
delete[] reinterpret_cast<char*>(this);
}
// Calculate the memory usage by metadata
// Calculate the memory usage by metadata.
inline size_t CalcTotalCharge(
CacheMetadataChargePolicy metadata_charge_policy) {
size_t meta_charge = 0;
@ -216,7 +216,7 @@ struct LRUHandle {
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
meta_charge += malloc_usable_size(static_cast<void*>(this));
#else
// This is the size that is used when a new handle is created
// This is the size that is used when a new handle is created.
meta_charge += sizeof(LRUHandle) - 1 + key_length;
#endif
}
@ -272,10 +272,10 @@ class LRUHandleTable {
// a linked list of cache entries that hash into the bucket.
std::unique_ptr<LRUHandle*[]> list_;
// Number of elements currently in the table
// Number of elements currently in the table.
uint32_t elems_;
// Set from max_upper_hash_bits (see constructor)
// Set from max_upper_hash_bits (see constructor).
const int max_length_bits_;
};
@ -291,7 +291,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// Separate from constructor so caller can easily make an array of LRUCache
// if current usage is more than new capacity, the function will attempt to
// free the needed space
// free the needed space.
virtual void SetCapacity(size_t capacity) override;
// Set the flag to reject insertion if cache if full.
@ -314,8 +314,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
assert(helper);
return Insert(key, hash, value, charge, nullptr, helper, handle, priority);
}
// If helper_cb is null, the values of the following arguments don't
// matter
// If helper_cb is null, the values of the following arguments don't matter.
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash,
const ShardedCache::CacheItemHelper* helper,
const ShardedCache::CreateCallback& create_cb,
@ -354,8 +353,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
void TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri);
// Retrieves number of elements in LRU, for unit test purpose only
// not threadsafe
// Retrieves number of elements in LRU, for unit test purpose only.
// Not threadsafe.
size_t TEST_GetLRUSize();
// Retrieves high pri pool ratio
@ -365,7 +364,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
friend class LRUCache;
// Insert an item into the hash table and, if handle is null, insert into
// the LRU list. Older items are evicted as necessary. If the cache is full
// and free_handle_on_fail is true, the item is deleted and handle is set to.
// and free_handle_on_fail is true, the item is deleted and handle is set to
// nullptr.
Status InsertItem(LRUHandle* item, Cache::Handle** handle,
bool free_handle_on_fail);
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
@ -389,7 +389,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// Free some space following strict LRU policy until enough space
// to hold (usage_ + charge) is freed or the lru list is empty
// This function is not thread safe - it needs to be executed while
// holding the mutex_
// holding the mutex_.
void EvictFromLRU(size_t charge, autovector<LRUHandle*>* deleted);
// Initialized before use.
@ -429,10 +429,10 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// ------------vvvvvvvvvvvvv-----------
LRUHandleTable table_;
// Memory size for entries residing in the cache
// Memory size for entries residing in the cache.
size_t usage_;
// Memory size for entries residing only in the LRU list
// Memory size for entries residing only in the LRU list.
size_t lru_usage_;
// mutex_ protects the following state.
@ -467,9 +467,9 @@ class LRUCache
virtual void DisownData() override;
virtual void WaitAll(std::vector<Handle*>& handles) override;
// Retrieves number of elements in LRU, for unit test purpose only
// Retrieves number of elements in LRU, for unit test purpose only.
size_t TEST_GetLRUSize();
// Retrieves high pri pool ratio
// Retrieves high pri pool ratio.
double GetHighPriPoolRatio();
private:

@ -96,7 +96,7 @@ Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
3); // for CURRENT + MANIFEST + OPTIONS
// create names of the live files. The names are not absolute
// paths, instead they are relative to dbname_;
// paths, instead they are relative to dbname_.
for (const auto& table_file_number : live_table_files) {
ret.emplace_back(MakeTableFileName("", table_file_number));
}
@ -166,7 +166,7 @@ Status DBImpl::GetCurrentWalFile(std::unique_ptr<LogFile>* current_log_file) {
Status DBImpl::GetLiveFilesStorageInfo(
const LiveFilesStorageInfoOptions& opts,
std::vector<LiveFileStorageInfo>* files) {
// To avoid returning partial results, only move to ouput on success
// To avoid returning partial results, only move results to files on success.
assert(files);
files->clear();
std::vector<LiveFileStorageInfo> results;
@ -180,7 +180,7 @@ Status DBImpl::GetLiveFilesStorageInfo(
if (opts.wal_size_for_flush == port::kMaxUint64) {
flush_memtable = false;
} else if (opts.wal_size_for_flush > 0) {
// If out standing log files are small, we skip the flush.
// If the outstanding log files are small, we skip the flush.
s = GetSortedWalFiles(live_wal_files);
if (!s.ok()) {
@ -313,8 +313,7 @@ Status DBImpl::GetLiveFilesStorageInfo(
info.relative_filename = kCurrentFileName;
info.directory = GetName();
info.file_type = kCurrentFile;
// CURRENT could be replaced so we have to record the contents we want
// for it
// CURRENT could be replaced so we have to record the contents as needed.
info.replacement_contents = manifest_fname + "\n";
info.size = manifest_fname.size() + 1;
if (opts.include_checksum_info) {
@ -356,7 +355,7 @@ Status DBImpl::GetLiveFilesStorageInfo(
TEST_SYNC_POINT("CheckpointImpl::CreateCustomCheckpoint:AfterGetLive1");
TEST_SYNC_POINT("CheckpointImpl::CreateCustomCheckpoint:AfterGetLive2");
// if we have more than one column family, we need to also get WAL files
// If we have more than one column family, we also need to get WAL files.
if (s.ok()) {
s = GetSortedWalFiles(live_wal_files);
}
@ -394,7 +393,7 @@ Status DBImpl::GetLiveFilesStorageInfo(
}
if (s.ok()) {
// Only move output on success
// Only move results to output on success.
*files = std::move(results);
}
return s;

@ -52,9 +52,8 @@ struct LRUCacheOptions {
// Capacity of the cache.
size_t capacity = 0;
// Cache is sharded into 2^num_shard_bits shards,
// by hash of key. Refer to NewLRUCache for further
// information.
// Cache is sharded into 2^num_shard_bits shards, by hash of key.
// Refer to NewLRUCache for further information.
int num_shard_bits = -1;
// If strict_capacity_limit is set,
@ -91,7 +90,7 @@ struct LRUCacheOptions {
CacheMetadataChargePolicy metadata_charge_policy =
kDefaultCacheMetadataChargePolicy;
// A SecondaryCache instance to use a the non-volatile tier
// A SecondaryCache instance to use a the non-volatile tier.
std::shared_ptr<SecondaryCache> secondary_cache;
LRUCacheOptions() {}
@ -328,8 +327,8 @@ class Cache {
/**
* Release a mapping returned by a previous Lookup(). A released entry might
* still remain in cache in case it is later looked up by others. If
* erase_if_last_ref is set then it also erase it from the cache if there is
* still remain in cache in case it is later looked up by others. If
* erase_if_last_ref is set then it also erases it from the cache if there is
* no other reference to it. Erasing it should call the deleter function that
* was provided when the entry was inserted.
*
@ -345,7 +344,7 @@ class Cache {
// REQUIRES: handle must have been returned by a method on *this.
virtual void* Value(Handle* handle) = 0;
// If the cache contains entry for key, erase it. Note that the
// If the cache contains the entry for the key, erase it. Note that the
// underlying entry will be kept around until all existing handles
// to it have been released.
virtual void Erase(const Slice& key) = 0;
@ -369,19 +368,19 @@ class Cache {
// full capacity.
virtual bool HasStrictCapacityLimit() const = 0;
// returns the maximum configured capacity of the cache
// Returns the maximum configured capacity of the cache
virtual size_t GetCapacity() const = 0;
// returns the memory size for the entries residing in the cache.
// Returns the memory size for the entries residing in the cache.
virtual size_t GetUsage() const = 0;
// returns the memory size for a specific entry in the cache.
// Returns the memory size for a specific entry in the cache.
virtual size_t GetUsage(Handle* handle) const = 0;
// returns the memory size for the entries in use by the system
// Returns the memory size for the entries in use by the system
virtual size_t GetPinnedUsage() const = 0;
// returns the charge for the specific entry in the cache.
// Returns the charge for the specific entry in the cache.
virtual size_t GetCharge(Handle* handle) const = 0;
// Returns the deleter for the specified entry. This might seem useless
@ -440,9 +439,8 @@ class Cache {
// The Insert and Lookup APIs below are intended to allow cached objects
// to be demoted/promoted between the primary block cache and a secondary
// cache. The secondary cache could be a non-volatile cache, and will
// likely store the object in a different representation more suitable
// for on disk storage. They rely on a per object CacheItemHelper to do
// the conversions.
// likely store the object in a different representation. They rely on a
// per object CacheItemHelper to do the conversions.
// The secondary cache may persist across process and system restarts,
// and may even be moved between hosts. Therefore, the cache key must
// be repeatable across restarts/reboots, and globally unique if

@ -244,10 +244,9 @@ IF_ROCKSDB_LITE("",
)
"\tgetmergeoperands -- Insert lots of merge records which are a list of "
"sorted ints for a key and then compare performance of lookup for another "
"key "
"by doing a Get followed by binary searching in the large sorted list vs "
"doing a GetMergeOperands and binary searching in the operands which are"
"sorted sub-lists. The MergeOperator used is sortlist.h\n"
"key by doing a Get followed by binary searching in the large sorted list "
"vs doing a GetMergeOperands and binary searching in the operands which "
"are sorted sub-lists. The MergeOperator used is sortlist.h\n"
"\treadrandomoperands -- read random keys using `GetMergeOperands()`. An "
"operation includes a rare but possible retry in case it got "
"`Status::Incomplete()`. This happens upon encountering more keys than "
@ -356,18 +355,14 @@ DEFINE_double(
"Used in 'filluniquerandom' benchmark: for each write operation, "
"we give a probability to perform an overwrite instead. The key used for "
"the overwrite is randomly chosen from the last 'overwrite_window_size' "
"keys "
"previously inserted into the DB. "
"keys previously inserted into the DB. "
"Valid overwrite_probability values: [0.0, 1.0].");
DEFINE_uint32(overwrite_window_size, 1,
"Used in 'filluniquerandom' benchmark. For each write "
"operation, when "
"the overwrite_probability flag is set by the user, the key used "
"to perform "
"an overwrite is randomly chosen from the last "
"'overwrite_window_size' keys "
"previously inserted into the DB. "
"Used in 'filluniquerandom' benchmark. For each write operation,"
" when the overwrite_probability flag is set by the user, the "
"key used to perform an overwrite is randomly chosen from the "
"last 'overwrite_window_size' keys previously inserted into DB. "
"Warning: large values can affect throughput. "
"Valid overwrite_window_size values: [1, kMaxUint32].");
@ -567,7 +562,7 @@ DEFINE_bool(use_lru_secondary_cache, false,
"Use the LRUSecondaryCache as the secondary cache.");
DEFINE_int64(lru_secondary_cache_size, 8 << 20, // 8MB
"Number of bytes to use as a cache of data");
"Number of bytes to use as a cache of data.");
DEFINE_int32(lru_secondary_cache_numshardbits, 6,
"Number of shards for the block cache"
@ -838,18 +833,15 @@ DEFINE_string(max_bytes_for_level_multiplier_additional, "",
DEFINE_int32(level0_stop_writes_trigger,
ROCKSDB_NAMESPACE::Options().level0_stop_writes_trigger,
"Number of files in level-0"
" that will trigger put stop.");
"Number of files in level-0 that will trigger put stop.");
DEFINE_int32(level0_slowdown_writes_trigger,
ROCKSDB_NAMESPACE::Options().level0_slowdown_writes_trigger,
"Number of files in level-0"
" that will slow down writes.");
"Number of files in level-0 that will slow down writes.");
DEFINE_int32(level0_file_num_compaction_trigger,
ROCKSDB_NAMESPACE::Options().level0_file_num_compaction_trigger,
"Number of files in level-0"
" when compactions start");
"Number of files in level-0 when compactions start.");
DEFINE_uint64(periodic_compaction_seconds,
ROCKSDB_NAMESPACE::Options().periodic_compaction_seconds,
@ -913,8 +905,7 @@ DEFINE_int64(writes_per_range_tombstone, 0,
DEFINE_int64(range_tombstone_width, 100, "Number of keys in tombstone's range");
DEFINE_int64(max_num_range_tombstones, 0,
"Maximum number of range tombstones "
"to insert.");
"Maximum number of range tombstones to insert.");
DEFINE_bool(expand_range_tombstones, false,
"Expand range tombstone into sequential regular tombstones.");
@ -1128,8 +1119,7 @@ DEFINE_bool(file_checksum, false,
DEFINE_bool(rate_limit_auto_wal_flush, false,
"When true use Env::IO_USER priority level to charge internal rate "
"limiter for automatic WAL flush (`Options::manual_wal_flush` == "
"false) after the user "
"write operation");
"false) after the user write operation.");
DEFINE_bool(async_io, false,
"When set true, RocksDB does asynchronous reads for internal auto "
@ -1218,8 +1208,7 @@ DEFINE_int32(table_cache_numshardbits, 4, "");
#ifndef ROCKSDB_LITE
DEFINE_string(env_uri, "",
"URI for registry Env lookup. Mutually exclusive"
" with --fs_uri");
"URI for registry Env lookup. Mutually exclusive with --fs_uri");
DEFINE_string(fs_uri, "",
"URI for registry Filesystem lookup. Mutually exclusive"
" with --env_uri."
@ -1313,8 +1302,7 @@ DEFINE_uint64(write_thread_slow_yield_usec, 3,
DEFINE_uint64(rate_limiter_bytes_per_sec, 0, "Set options.rate_limiter value.");
DEFINE_int64(rate_limiter_refill_period_us, 100 * 1000,
"Set refill period on "
"rate limiter.");
"Set refill period on rate limiter.");
DEFINE_bool(rate_limiter_auto_tuned, false,
"Enable dynamic adjustment of rate limit according to demand for "
@ -1362,14 +1350,11 @@ DEFINE_double(keyrange_dist_d, 0.0,
"f(x)=a*exp(b*x)+c*exp(d*x)");
DEFINE_int64(keyrange_num, 1,
"The number of key ranges that are in the same prefix "
"group, each prefix range will have its key access "
"distribution");
"group, each prefix range will have its key access distribution");
DEFINE_double(key_dist_a, 0.0,
"The parameter 'a' of key access distribution model "
"f(x)=a*x^b");
"The parameter 'a' of key access distribution model f(x)=a*x^b");
DEFINE_double(key_dist_b, 0.0,
"The parameter 'b' of key access distribution model "
"f(x)=a*x^b");
"The parameter 'b' of key access distribution model f(x)=a*x^b");
DEFINE_double(value_theta, 0.0,
"The parameter 'theta' of Generized Pareto Distribution "
"f(x)=(1/sigma)*(1+k*(x-theta)/sigma)^-(1/k+1)");
@ -1897,7 +1882,7 @@ struct DBWithColumnFamilies {
}
};
// a class that reports stats to CSV file
// A class that reports stats to CSV file.
class ReporterAgent {
public:
ReporterAgent(Env* env, const std::string& fname,
@ -2082,7 +2067,7 @@ class Stats {
if (other.start_ < start_) start_ = other.start_;
if (other.finish_ > finish_) finish_ = other.finish_;
// Just keep the messages from one thread
// Just keep the messages from one thread.
if (message_.empty()) message_ = other.message_;
}
@ -2139,7 +2124,7 @@ class Stats {
}
void ResetLastOpTime() {
// Set to now to avoid latency from calls to SleepForMicroseconds
// Set to now to avoid latency from calls to SleepForMicroseconds.
last_op_finish_ = clock_->NowMicros();
}
@ -2186,7 +2171,7 @@ class Stats {
if (FLAGS_stats_interval_seconds &&
usecs_since_last < (FLAGS_stats_interval_seconds * 1000000)) {
// Don't check again for this many operations
// Don't check again for this many operations.
next_report_ += FLAGS_stats_interval;
} else {

Loading…
Cancel
Save