From ef8ed3681c85dc0e4970b9a8ec7b8db4685e2e23 Mon Sep 17 00:00:00 2001 From: sdong Date: Mon, 30 Nov 2015 11:41:53 -0800 Subject: [PATCH] Fix DBTest.SuggestCompactRangeTest for disable jemalloc case Summary: DBTest.SuggestCompactRangeTest fails for the case when jemalloc is disabled, including ASAN and valgrind builds. It is caused by the improvement of skip list, which allocates different size of nodes for a new records. Fix it by using a special mem table that triggers a flush by number of entries. In that way the behavior will be consistent for all allocators. Test Plan: Run the test with both of DISABLE_JEMALLOC=1 and 0 Reviewers: anthony, rven, yhchiang, kradhakrishnan, igor, IslamAbdelRahman Reviewed By: IslamAbdelRahman Subscribers: leveldb, dhruba Differential Revision: https://reviews.facebook.net/D51423 --- db/db_test.cc | 4 ++- db/db_test_util.cc | 4 ++- db/db_test_util.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 2 deletions(-) diff --git a/db/db_test.cc b/db/db_test.cc index e6ebd7054..208bc1580 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -8626,10 +8626,12 @@ TEST_F(DBTest, SuggestCompactRangeTest) { }; Options options = CurrentOptions(); + options.memtable_factory.reset( + new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile)); options.compaction_style = kCompactionStyleLevel; options.compaction_filter_factory.reset( new CompactionFilterFactoryGetContext()); - options.write_buffer_size = 100 << 10; + options.write_buffer_size = 200 << 10; options.arena_block_size = 4 << 10; options.level0_file_num_compaction_trigger = 4; options.num_levels = 4; diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 85a3f90df..0e29cb2e4 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -855,8 +855,10 @@ void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) { } } +const int DBTestBase::kNumKeysByGenerateNewRandomFile = 51; + void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) { - for (int i = 0; i < 51; i++) { + for (int i = 0; i < kNumKeysByGenerateNewRandomFile; i++) { ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 2000))); } ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 200))); diff --git a/db/db_test_util.h b/db/db_test_util.h index af5b53710..b335cf184 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -118,6 +118,84 @@ struct OptionsOverride { } // namespace anon +// A hacky skip list mem table that triggers flush after number of entries. +class SpecialMemTableRep : public MemTableRep { + public: + explicit SpecialMemTableRep(MemTableAllocator* allocator, + MemTableRep* memtable, int num_entries_flush) + : MemTableRep(allocator), + memtable_(memtable), + num_entries_flush_(num_entries_flush), + num_entries_(0) {} + + virtual KeyHandle Allocate(const size_t len, char** buf) override { + return memtable_->Allocate(len, buf); + } + + // Insert key into the list. + // REQUIRES: nothing that compares equal to key is currently in the list. + virtual void Insert(KeyHandle handle) override { + memtable_->Insert(handle); + num_entries_++; + } + + // Returns true iff an entry that compares equal to key is in the list. + virtual bool Contains(const char* key) const override { + return memtable_->Contains(key); + } + + virtual size_t ApproximateMemoryUsage() override { + // Return a high memory usage when number of entries exceeds the threshold + // to trigger a flush. + return (num_entries_ < num_entries_flush_) ? 0 : 1024 * 1024 * 1024; + } + + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, + const char* entry)) override { + memtable_->Get(k, callback_args, callback_func); + } + + uint64_t ApproximateNumEntries(const Slice& start_ikey, + const Slice& end_ikey) override { + return memtable_->ApproximateNumEntries(start_ikey, end_ikey); + } + + virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override { + return memtable_->GetIterator(arena); + } + + virtual ~SpecialMemTableRep() override {} + + private: + unique_ptr memtable_; + int num_entries_flush_; + int num_entries_; +}; + +// The factory for the hacky skip list mem table that triggers flush after +// number of entries exceeds a threshold. +class SpecialSkipListFactory : public MemTableRepFactory { + public: + // After number of inserts exceeds `num_entries_flush` in a mem table, trigger + // flush. + explicit SpecialSkipListFactory(int num_entries_flush) + : num_entries_flush_(num_entries_flush) {} + + virtual MemTableRep* CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) { + return new SpecialMemTableRep( + allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0), + num_entries_flush_); + } + virtual const char* Name() const override { return "SkipListFactory"; } + + private: + SkipListFactory factory_; + int num_entries_flush_; +}; + // Special Env used to delay background operations class SpecialEnv : public EnvWrapper { public: @@ -631,6 +709,8 @@ class DBTestBase : public testing::Test { void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false); + static const int kNumKeysByGenerateNewRandomFile; + void GenerateNewRandomFile(Random* rnd, bool nowait = false); std::string IterStatus(Iterator* iter);