From c88ff4ca76a6a24632cbdd834f621952a251d7a1 Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Tue, 17 Mar 2015 15:04:37 -0700 Subject: [PATCH] Deprecate removeScanCountLimit in NewLRUCache Summary: It is no longer used by the implementation, so we should also remove it from the public API. Test Plan: make check Reviewers: sdong Reviewed By: sdong Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D34971 --- HISTORY.md | 2 +- db/c.cc | 2 +- db/compaction_job_test.cc | 2 +- db/db_bench.cc | 5 +---- db/db_impl.cc | 3 +-- db/flush_job_test.cc | 2 +- db/repair.cc | 3 +-- db/version_set.cc | 5 ++--- db/wal_manager_test.cc | 2 +- include/rocksdb/cache.h | 13 ++----------- include/rocksdb/options.h | 10 ++-------- java/rocksjni/options.cc | 14 ++++++-------- util/cache.cc | 32 ++++---------------------------- util/cache_test.cc | 9 +++------ util/ldb_cmd.cc | 8 +++----- util/options.cc | 5 ----- util/options_helper.cc | 2 -- util/options_test.cc | 2 -- 18 files changed, 30 insertions(+), 91 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index ee8400312..ea3fe284c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,7 +1,7 @@ # Rocksdb Change Log ### Unreleased Features -* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted +* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted. This change made parameter removeScanCountLimit obsolete. Because of that NewLRUCache doesn't take three arguments anymore. table_cache_remove_scan_limit option is also removed * By default we now optimize the compilation for the compilation platform (using -march=native). If you want to build portable binary, use 'PORTABLE=1' before the make command. * We now allow level-compaction to place files in different paths by specifying them in db_paths along with the target_size. diff --git a/db/c.cc b/db/c.cc index 3294ef2de..985c9fb1f 100644 --- a/db/c.cc +++ b/db/c.cc @@ -1613,7 +1613,7 @@ void rocksdb_options_set_table_cache_numshardbits( void rocksdb_options_set_table_cache_remove_scan_count_limit( rocksdb_options_t* opt, int v) { - opt->rep.table_cache_remove_scan_count_limit = v; + // this option is deprecated } void rocksdb_options_set_arena_block_size( diff --git a/db/compaction_job_test.cc b/db/compaction_job_test.cc index fb7ad2332..1f0bdc0fd 100644 --- a/db/compaction_job_test.cc +++ b/db/compaction_job_test.cc @@ -26,7 +26,7 @@ class CompactionJobTest : public testing::Test { : env_(Env::Default()), dbname_(test::TmpDir() + "/compaction_job_test"), mutable_cf_options_(Options(), ImmutableCFOptions(Options())), - table_cache_(NewLRUCache(50000, 16, 8)), + table_cache_(NewLRUCache(50000, 16)), write_buffer_(db_options_.db_write_buffer_size), versions_(new VersionSet(dbname_, &db_options_, env_options_, table_cache_.get(), &write_buffer_, diff --git a/db/db_bench.cc b/db/db_bench.cc index 5d3c54202..150ac4666 100644 --- a/db/db_bench.cc +++ b/db/db_bench.cc @@ -307,8 +307,6 @@ DEFINE_int32(cache_numshardbits, -1, "Number of shards for the block cache" " is 2 ** cache_numshardbits. Negative means use default settings." " This is applied only if FLAGS_cache_size is non-negative."); -DEFINE_int32(cache_remove_scan_count_limit, 32, ""); - DEFINE_bool(verify_checksum, false, "Verify checksum for every block read" " from storage"); @@ -1357,8 +1355,7 @@ class Benchmark { : cache_( FLAGS_cache_size >= 0 ? (FLAGS_cache_numshardbits >= 1 - ? NewLRUCache(FLAGS_cache_size, FLAGS_cache_numshardbits, - FLAGS_cache_remove_scan_count_limit) + ? NewLRUCache(FLAGS_cache_size, FLAGS_cache_numshardbits) : NewLRUCache(FLAGS_cache_size)) : nullptr), compressed_cache_(FLAGS_compressed_cache_size >= 0 diff --git a/db/db_impl.cc b/db/db_impl.cc index eabdfcd15..5aef74569 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -240,8 +240,7 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname) 4194304 : db_options_.max_open_files - 10; // Reserve ten files or so for other uses and give the rest to TableCache. table_cache_ = - NewLRUCache(table_cache_size, db_options_.table_cache_numshardbits, - db_options_.table_cache_remove_scan_count_limit); + NewLRUCache(table_cache_size, db_options_.table_cache_numshardbits); versions_.reset(new VersionSet(dbname_, &db_options_, env_options_, table_cache_.get(), &write_buffer_, diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index dce1e6df8..e08d02ecc 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -25,7 +25,7 @@ class FlushJobTest : public testing::Test { FlushJobTest() : env_(Env::Default()), dbname_(test::TmpDir() + "/flush_job_test"), - table_cache_(NewLRUCache(50000, 16, 8)), + table_cache_(NewLRUCache(50000, 16)), write_buffer_(db_options_.db_write_buffer_size), versions_(new VersionSet(dbname_, &db_options_, env_options_, table_cache_.get(), &write_buffer_, diff --git a/db/repair.cc b/db/repair.cc index 1f3dfc184..158dcc9bc 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -69,8 +69,7 @@ class Repairer { raw_table_cache_( // TableCache can be small since we expect each table to be opened // once. - NewLRUCache(10, options_.table_cache_numshardbits, - options_.table_cache_remove_scan_count_limit)), + NewLRUCache(10, options_.table_cache_numshardbits)), next_file_number_(1) { table_cache_ = new TableCache(ioptions_, env_options_, raw_table_cache_.get()); diff --git a/db/version_set.cc b/db/version_set.cc index 61353c0cc..fa64a1d2e 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -2370,9 +2370,8 @@ Status VersionSet::ReduceNumberOfLevels(const std::string& dbname, } ColumnFamilyOptions cf_options(*options); - std::shared_ptr tc(NewLRUCache( - options->max_open_files - 10, options->table_cache_numshardbits, - options->table_cache_remove_scan_count_limit)); + std::shared_ptr tc(NewLRUCache(options->max_open_files - 10, + options->table_cache_numshardbits)); WriteController wc; WriteBuffer wb(options->db_write_buffer_size); VersionSet versions(dbname, options, env_options, tc.get(), &wb, &wc); diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index ebde0dc5e..20b3f4b56 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -28,7 +28,7 @@ class WalManagerTest : public testing::Test { WalManagerTest() : env_(Env::Default()), dbname_(test::TmpDir() + "/wal_manager_test"), - table_cache_(NewLRUCache(50000, 16, 8)), + table_cache_(NewLRUCache(50000, 16)), write_buffer_(db_options_.db_write_buffer_size), current_log_number_(0) { DestroyDB(dbname_, Options()); diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index a8a6f9b73..eb78ab009 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -34,20 +34,11 @@ class Cache; // Create a new cache with a fixed size capacity. The cache is sharded // to 2^numShardBits shards, by hash of the key. The total capacity -// is divided and evenly assigned to each shard. Inside each shard, -// the eviction is done in two passes: first try to free spaces by -// evicting entries that are among the most least used removeScanCountLimit -// entries and do not have reference other than by the cache itself, in -// the least-used order. If not enough space is freed, further free the -// entries in least used order. +// is divided and evenly assigned to each shard. // -// The functions without parameter numShardBits and/or removeScanCountLimit -// use default values. removeScanCountLimit's default value is 0, which -// means a strict LRU order inside each shard. +// The functions without parameter numShardBits uses default value, which is 4 extern shared_ptr NewLRUCache(size_t capacity); extern shared_ptr NewLRUCache(size_t capacity, int numShardBits); -extern shared_ptr NewLRUCache(size_t capacity, int numShardBits, - int removeScanCountLimit); class Cache { public: diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index 088f5f598..469f9682b 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -897,14 +897,8 @@ struct DBOptions { // Number of shards used for table cache. int table_cache_numshardbits; - // During data eviction of table's LRU cache, it would be inefficient - // to strictly follow LRU because this piece of memory will not really - // be released unless its refcount falls to zero. Instead, make two - // passes: the first pass will release items with refcount = 1, - // and if not enough space releases after scanning the number of - // elements specified by this parameter, we will remove items in LRU - // order. - int table_cache_remove_scan_count_limit; + // DEPRECATED + // int table_cache_remove_scan_count_limit; // The following two fields affect how archived logs will be deleted. // 1. If both set to 0, logs will be deleted asap and will not get into diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index 8826566c6..07aadeab2 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -702,8 +702,8 @@ void Java_org_rocksdb_Options_setTableCacheNumshardbits( */ jint Java_org_rocksdb_Options_tableCacheRemoveScanCountLimit( JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast( - jhandle)->table_cache_remove_scan_count_limit; + // deprecated + return 0; } /* @@ -713,8 +713,7 @@ jint Java_org_rocksdb_Options_tableCacheRemoveScanCountLimit( */ void Java_org_rocksdb_Options_setTableCacheRemoveScanCountLimit( JNIEnv* env, jobject jobj, jlong jhandle, jint limit) { - reinterpret_cast( - jhandle)->table_cache_remove_scan_count_limit = static_cast(limit); + // deprecated } /* @@ -3383,8 +3382,7 @@ jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits( */ void Java_org_rocksdb_DBOptions_setTableCacheRemoveScanCountLimit( JNIEnv* env, jobject jobj, jlong jhandle, jint limit) { - reinterpret_cast( - jhandle)->table_cache_remove_scan_count_limit = static_cast(limit); + // deprecated } /* @@ -3394,8 +3392,8 @@ void Java_org_rocksdb_DBOptions_setTableCacheRemoveScanCountLimit( */ jint Java_org_rocksdb_DBOptions_tableCacheRemoveScanCountLimit( JNIEnv* env, jobject jobj, jlong jhandle) { - return reinterpret_cast( - jhandle)->table_cache_remove_scan_count_limit; + // deprecated + return 0; } /* diff --git a/util/cache.cc b/util/cache.cc index 366624fbb..a4a97a75b 100644 --- a/util/cache.cc +++ b/util/cache.cc @@ -193,9 +193,6 @@ class LRUCache { // Separate from constructor so caller can easily make an array of LRUCache void SetCapacity(size_t capacity) { capacity_ = capacity; } - void SetRemoveScanCountLimit(uint32_t remove_scan_count_limit) { - remove_scan_count_limit_ = remove_scan_count_limit; - } // Like Cache methods, but with an extra "hash" parameter. Cache::Handle* Insert(const Slice& key, uint32_t hash, @@ -224,7 +221,6 @@ class LRUCache { // Initialized before use. size_t capacity_; - uint32_t remove_scan_count_limit_; // mutex_ protects the following state. // We don't count mutex_ as the cache's internal state so semantically we @@ -426,7 +422,6 @@ void LRUCache::Erase(const Slice& key, uint32_t hash) { } static int kNumShardBits = 4; // default values, can be overridden -static int kRemoveScanCountLimit = 0; // default values, can be overridden class ShardedLRUCache : public Cache { private: @@ -445,28 +440,16 @@ class ShardedLRUCache : public Cache { return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0; } - void init(size_t capacity, int numbits, int removeScanCountLimit) { - num_shard_bits_ = numbits; - capacity_ = capacity; + public: + ShardedLRUCache(size_t capacity, int num_shard_bits) + : last_id_(0), num_shard_bits_(num_shard_bits), capacity_(capacity) { int num_shards = 1 << num_shard_bits_; shards_ = new LRUCache[num_shards]; const size_t per_shard = (capacity + (num_shards - 1)) / num_shards; for (int s = 0; s < num_shards; s++) { shards_[s].SetCapacity(per_shard); - shards_[s].SetRemoveScanCountLimit(removeScanCountLimit); } } - - public: - explicit ShardedLRUCache(size_t capacity) - : last_id_(0) { - init(capacity, kNumShardBits, kRemoveScanCountLimit); - } - ShardedLRUCache(size_t capacity, int num_shard_bits, - int removeScanCountLimit) - : last_id_(0) { - init(capacity, num_shard_bits, removeScanCountLimit); - } virtual ~ShardedLRUCache() { delete[] shards_; } @@ -526,17 +509,10 @@ shared_ptr NewLRUCache(size_t capacity) { } shared_ptr NewLRUCache(size_t capacity, int num_shard_bits) { - return NewLRUCache(capacity, num_shard_bits, kRemoveScanCountLimit); -} - -shared_ptr NewLRUCache(size_t capacity, int num_shard_bits, - int removeScanCountLimit) { if (num_shard_bits >= 20) { return nullptr; // the cache cannot be sharded into too many fine pieces } - return std::make_shared(capacity, - num_shard_bits, - removeScanCountLimit); + return std::make_shared(capacity, num_shard_bits); } } // namespace rocksdb diff --git a/util/cache_test.cc b/util/cache_test.cc index 72880ffa5..834973659 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -43,11 +43,9 @@ class CacheTest : public testing::Test { static const int kCacheSize = 1000; static const int kNumShardBits = 4; - static const int kRemoveScanCountLimit = 16; static const int kCacheSize2 = 100; static const int kNumShardBits2 = 2; - static const int kRemoveScanCountLimit2 = 200; std::vector deleted_keys_; std::vector deleted_values_; @@ -55,9 +53,8 @@ class CacheTest : public testing::Test { shared_ptr cache2_; CacheTest() : - cache_(NewLRUCache(kCacheSize, kNumShardBits, kRemoveScanCountLimit)), - cache2_(NewLRUCache(kCacheSize2, kNumShardBits2, - kRemoveScanCountLimit2)) { + cache_(NewLRUCache(kCacheSize, kNumShardBits)), + cache2_(NewLRUCache(kCacheSize2, kNumShardBits2)) { current_ = this; } @@ -116,7 +113,7 @@ void dumbDeleter(const Slice& key, void* value) { } TEST_F(CacheTest, UsageTest) { // cache is shared_ptr and will be automatically cleaned up. const uint64_t kCapacity = 100000; - auto cache = NewLRUCache(kCapacity, 8, 200); + auto cache = NewLRUCache(kCapacity, 8); size_t usage = 0; const char* value = "abcdef"; diff --git a/util/ldb_cmd.cc b/util/ldb_cmd.cc index 585bdd786..440d8c3eb 100644 --- a/util/ldb_cmd.cc +++ b/util/ldb_cmd.cc @@ -525,9 +525,8 @@ void DumpManifestFile(std::string file, bool verbose, bool hex) { Options options; EnvOptions sopt; std::string dbname("dummy"); - std::shared_ptr tc( - NewLRUCache(options.max_open_files - 10, options.table_cache_numshardbits, - options.table_cache_remove_scan_count_limit)); + std::shared_ptr tc(NewLRUCache(options.max_open_files - 10, + options.table_cache_numshardbits)); // Notice we are using the default options not through SanitizeOptions(), // if VersionSet::DumpManifest() depends on any option done by // SanitizeOptions(), we need to initialize it manually. @@ -1134,8 +1133,7 @@ Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, int* levels) { EnvOptions soptions; std::shared_ptr tc( - NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits, - opt.table_cache_remove_scan_count_limit)); + NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits)); const InternalKeyComparator cmp(opt.comparator); WriteController wc; WriteBuffer wb(opt.db_write_buffer_size); diff --git a/util/options.cc b/util/options.cc index 2c126d028..3b47c3696 100644 --- a/util/options.cc +++ b/util/options.cc @@ -238,7 +238,6 @@ DBOptions::DBOptions() keep_log_file_num(1000), max_manifest_file_size(std::numeric_limits::max()), table_cache_numshardbits(4), - table_cache_remove_scan_count_limit(16), WAL_ttl_seconds(0), WAL_size_limit_MB(0), manifest_preallocation_size(4 * 1024 * 1024), @@ -282,8 +281,6 @@ DBOptions::DBOptions(const Options& options) keep_log_file_num(options.keep_log_file_num), max_manifest_file_size(options.max_manifest_file_size), table_cache_numshardbits(options.table_cache_numshardbits), - table_cache_remove_scan_count_limit( - options.table_cache_remove_scan_count_limit), WAL_ttl_seconds(options.WAL_ttl_seconds), WAL_size_limit_MB(options.WAL_size_limit_MB), manifest_preallocation_size(options.manifest_preallocation_size), @@ -330,8 +327,6 @@ void DBOptions::Dump(Logger* log) const { wal_dir.c_str()); Log(log, " Options.table_cache_numshardbits: %d", table_cache_numshardbits); - Log(log, " Options.table_cache_remove_scan_count_limit: %d", - table_cache_remove_scan_count_limit); Log(log, " Options.delete_obsolete_files_period_micros: %" PRIu64, delete_obsolete_files_period_micros); Log(log, " Options.max_background_compactions: %d", diff --git a/util/options_helper.cc b/util/options_helper.cc index 683ea9a01..7f78ee2a3 100644 --- a/util/options_helper.cc +++ b/util/options_helper.cc @@ -509,8 +509,6 @@ bool ParseDBOption(const std::string& name, const std::string& value, new_options->max_manifest_file_size = ParseUint64(value); } else if (name == "table_cache_numshardbits") { new_options->table_cache_numshardbits = ParseInt(value); - } else if (name == "table_cache_remove_scan_count_limit") { - new_options->table_cache_remove_scan_count_limit = ParseInt(value); } else if (name == "WAL_ttl_seconds") { new_options->WAL_ttl_seconds = ParseUint64(value); } else if (name == "WAL_size_limit_MB") { diff --git a/util/options_test.cc b/util/options_test.cc index 3b41a8ec3..2688cd256 100644 --- a/util/options_test.cc +++ b/util/options_test.cc @@ -159,7 +159,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) { {"keep_log_file_num", "39"}, {"max_manifest_file_size", "40"}, {"table_cache_numshardbits", "41"}, - {"table_cache_remove_scan_count_limit", "42"}, {"WAL_ttl_seconds", "43"}, {"WAL_size_limit_MB", "44"}, {"manifest_preallocation_size", "45"}, @@ -266,7 +265,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) { ASSERT_EQ(new_db_opt.keep_log_file_num, 39U); ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast(40)); ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41); - ASSERT_EQ(new_db_opt.table_cache_remove_scan_count_limit, 42); ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast(43)); ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast(44)); ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);