|
|
|
@ -71,19 +71,19 @@ class AtomicCounter { |
|
|
|
|
// Special Env used to delay background operations
|
|
|
|
|
class SpecialEnv : public EnvWrapper { |
|
|
|
|
public: |
|
|
|
|
// sstable Sync() calls are blocked while this pointer is non-NULL.
|
|
|
|
|
// sstable Sync() calls are blocked while this pointer is non-nullptr.
|
|
|
|
|
port::AtomicPointer delay_sstable_sync_; |
|
|
|
|
|
|
|
|
|
// Simulate no-space errors while this pointer is non-NULL.
|
|
|
|
|
// Simulate no-space errors while this pointer is non-nullptr.
|
|
|
|
|
port::AtomicPointer no_space_; |
|
|
|
|
|
|
|
|
|
// Simulate non-writable file system while this pointer is non-NULL
|
|
|
|
|
// Simulate non-writable file system while this pointer is non-nullptr
|
|
|
|
|
port::AtomicPointer non_writable_; |
|
|
|
|
|
|
|
|
|
// Force sync of manifest files to fail while this pointer is non-NULL
|
|
|
|
|
// Force sync of manifest files to fail while this pointer is non-nullptr
|
|
|
|
|
port::AtomicPointer manifest_sync_error_; |
|
|
|
|
|
|
|
|
|
// Force write to manifest files to fail while this pointer is non-NULL
|
|
|
|
|
// Force write to manifest files to fail while this pointer is non-nullptr
|
|
|
|
|
port::AtomicPointer manifest_write_error_; |
|
|
|
|
|
|
|
|
|
bool count_random_reads_; |
|
|
|
@ -92,12 +92,12 @@ class SpecialEnv : public EnvWrapper { |
|
|
|
|
anon::AtomicCounter sleep_counter_; |
|
|
|
|
|
|
|
|
|
explicit SpecialEnv(Env* base) : EnvWrapper(base) { |
|
|
|
|
delay_sstable_sync_.Release_Store(NULL); |
|
|
|
|
no_space_.Release_Store(NULL); |
|
|
|
|
non_writable_.Release_Store(NULL); |
|
|
|
|
delay_sstable_sync_.Release_Store(nullptr); |
|
|
|
|
no_space_.Release_Store(nullptr); |
|
|
|
|
non_writable_.Release_Store(nullptr); |
|
|
|
|
count_random_reads_ = false; |
|
|
|
|
manifest_sync_error_.Release_Store(NULL); |
|
|
|
|
manifest_write_error_.Release_Store(NULL); |
|
|
|
|
manifest_sync_error_.Release_Store(nullptr); |
|
|
|
|
manifest_write_error_.Release_Store(nullptr); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r) { |
|
|
|
@ -112,7 +112,7 @@ class SpecialEnv : public EnvWrapper { |
|
|
|
|
base_(std::move(base)) { |
|
|
|
|
} |
|
|
|
|
Status Append(const Slice& data) { |
|
|
|
|
if (env_->no_space_.Acquire_Load() != NULL) { |
|
|
|
|
if (env_->no_space_.Acquire_Load() != nullptr) { |
|
|
|
|
// Drop writes on the floor
|
|
|
|
|
return Status::OK(); |
|
|
|
|
} else { |
|
|
|
@ -122,7 +122,7 @@ class SpecialEnv : public EnvWrapper { |
|
|
|
|
Status Close() { return base_->Close(); } |
|
|
|
|
Status Flush() { return base_->Flush(); } |
|
|
|
|
Status Sync() { |
|
|
|
|
while (env_->delay_sstable_sync_.Acquire_Load() != NULL) { |
|
|
|
|
while (env_->delay_sstable_sync_.Acquire_Load() != nullptr) { |
|
|
|
|
env_->SleepForMicroseconds(100000); |
|
|
|
|
} |
|
|
|
|
return base_->Sync(); |
|
|
|
@ -136,7 +136,7 @@ class SpecialEnv : public EnvWrapper { |
|
|
|
|
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b) |
|
|
|
|
: env_(env), base_(std::move(b)) { } |
|
|
|
|
Status Append(const Slice& data) { |
|
|
|
|
if (env_->manifest_write_error_.Acquire_Load() != NULL) { |
|
|
|
|
if (env_->manifest_write_error_.Acquire_Load() != nullptr) { |
|
|
|
|
return Status::IOError("simulated writer error"); |
|
|
|
|
} else { |
|
|
|
|
return base_->Append(data); |
|
|
|
@ -145,7 +145,7 @@ class SpecialEnv : public EnvWrapper { |
|
|
|
|
Status Close() { return base_->Close(); } |
|
|
|
|
Status Flush() { return base_->Flush(); } |
|
|
|
|
Status Sync() { |
|
|
|
|
if (env_->manifest_sync_error_.Acquire_Load() != NULL) { |
|
|
|
|
if (env_->manifest_sync_error_.Acquire_Load() != nullptr) { |
|
|
|
|
return Status::IOError("simulated sync error"); |
|
|
|
|
} else { |
|
|
|
|
return base_->Sync(); |
|
|
|
@ -153,15 +153,15 @@ class SpecialEnv : public EnvWrapper { |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
if (non_writable_.Acquire_Load() != NULL) { |
|
|
|
|
if (non_writable_.Acquire_Load() != nullptr) { |
|
|
|
|
return Status::IOError("simulated write error"); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Status s = target()->NewWritableFile(f, r); |
|
|
|
|
if (s.ok()) { |
|
|
|
|
if (strstr(f.c_str(), ".sst") != NULL) { |
|
|
|
|
if (strstr(f.c_str(), ".sst") != nullptr) { |
|
|
|
|
r->reset(new SSTableFile(this, std::move(*r))); |
|
|
|
|
} else if (strstr(f.c_str(), "MANIFEST") != NULL) { |
|
|
|
|
} else if (strstr(f.c_str(), "MANIFEST") != nullptr) { |
|
|
|
|
r->reset(new ManifestFile(this, std::move(*r))); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -227,7 +227,7 @@ class DBTest { |
|
|
|
|
filter_policy_ = NewBloomFilterPolicy(10); |
|
|
|
|
dbname_ = test::TmpDir() + "/db_test"; |
|
|
|
|
DestroyDB(dbname_, Options()); |
|
|
|
|
db_ = NULL; |
|
|
|
|
db_ = nullptr; |
|
|
|
|
Reopen(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -278,18 +278,18 @@ class DBTest { |
|
|
|
|
return reinterpret_cast<DBImpl*>(db_); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void Reopen(Options* options = NULL) { |
|
|
|
|
void Reopen(Options* options = nullptr) { |
|
|
|
|
ASSERT_OK(TryReopen(options)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void Close() { |
|
|
|
|
delete db_; |
|
|
|
|
db_ = NULL; |
|
|
|
|
db_ = nullptr; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void DestroyAndReopen(Options* options = NULL) { |
|
|
|
|
void DestroyAndReopen(Options* options = nullptr) { |
|
|
|
|
delete db_; |
|
|
|
|
db_ = NULL; |
|
|
|
|
db_ = nullptr; |
|
|
|
|
DestroyDB(dbname_, Options()); |
|
|
|
|
ASSERT_OK(TryReopen(options)); |
|
|
|
|
} |
|
|
|
@ -300,9 +300,9 @@ class DBTest { |
|
|
|
|
|
|
|
|
|
Status TryReopen(Options* options) { |
|
|
|
|
delete db_; |
|
|
|
|
db_ = NULL; |
|
|
|
|
db_ = nullptr; |
|
|
|
|
Options opts; |
|
|
|
|
if (options != NULL) { |
|
|
|
|
if (options != nullptr) { |
|
|
|
|
opts = *options; |
|
|
|
|
} else { |
|
|
|
|
opts = CurrentOptions(); |
|
|
|
@ -321,7 +321,7 @@ class DBTest { |
|
|
|
|
return db_->Delete(WriteOptions(), k); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
std::string Get(const std::string& k, const Snapshot* snapshot = NULL) { |
|
|
|
|
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { |
|
|
|
|
ReadOptions options; |
|
|
|
|
options.snapshot = snapshot; |
|
|
|
|
std::string result; |
|
|
|
@ -508,7 +508,7 @@ class DBTest { |
|
|
|
|
|
|
|
|
|
TEST(DBTest, Empty) { |
|
|
|
|
do { |
|
|
|
|
ASSERT_TRUE(db_ != NULL); |
|
|
|
|
ASSERT_TRUE(db_ != nullptr); |
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("foo")); |
|
|
|
|
} while (ChangeOptions()); |
|
|
|
|
} |
|
|
|
@ -605,7 +605,7 @@ TEST(DBTest, GetFromImmutableLayer) { |
|
|
|
|
Put("k1", std::string(100000, 'x')); // Fill memtable
|
|
|
|
|
Put("k2", std::string(100000, 'y')); // Trigger compaction
|
|
|
|
|
ASSERT_EQ("v1", Get("foo")); |
|
|
|
|
env_->delay_sstable_sync_.Release_Store(NULL); // Release sync calls
|
|
|
|
|
env_->delay_sstable_sync_.Release_Store(nullptr); // Release sync calls
|
|
|
|
|
} while (ChangeOptions()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -699,7 +699,7 @@ TEST(DBTest, GetEncountersEmptyLevel) { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Step 2: clear level 1 if necessary.
|
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(2), 1); |
|
|
|
@ -1144,7 +1144,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) { |
|
|
|
|
|
|
|
|
|
// Reopening moves updates to level-0
|
|
|
|
|
Reopen(&options); |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0); |
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(1), 1); |
|
|
|
@ -1318,20 +1318,20 @@ static int cfilter_count; |
|
|
|
|
static std::string NEW_VALUE = "NewValue"; |
|
|
|
|
static bool keep_filter(void* arg, int level, const Slice& key, |
|
|
|
|
const Slice& value, Slice** new_value) { |
|
|
|
|
assert(arg == NULL); |
|
|
|
|
assert(arg == nullptr); |
|
|
|
|
cfilter_count++; |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
static bool delete_filter(void*argv, int level, const Slice& key, |
|
|
|
|
const Slice& value, Slice** new_value) { |
|
|
|
|
assert(argv == NULL); |
|
|
|
|
assert(argv == nullptr); |
|
|
|
|
cfilter_count++; |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
static bool change_filter(void*argv, int level, const Slice& key, |
|
|
|
|
const Slice& value, Slice** new_value) { |
|
|
|
|
assert(argv == (void*)100); |
|
|
|
|
assert(new_value != NULL); |
|
|
|
|
assert(new_value != nullptr); |
|
|
|
|
*new_value = new Slice(NEW_VALUE); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
@ -1360,10 +1360,10 @@ TEST(DBTest, CompactionFilter) { |
|
|
|
|
// the compaction is each level invokes the filter for
|
|
|
|
|
// all the keys in that level.
|
|
|
|
|
cfilter_count = 0; |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(cfilter_count, 100000); |
|
|
|
|
cfilter_count = 0; |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(cfilter_count, 100000); |
|
|
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0); |
|
|
|
@ -1407,10 +1407,10 @@ TEST(DBTest, CompactionFilter) { |
|
|
|
|
// means that all keys should pass at least once
|
|
|
|
|
// via the compaction filter
|
|
|
|
|
cfilter_count = 0; |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(cfilter_count, 100000); |
|
|
|
|
cfilter_count = 0; |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(cfilter_count, 100000); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0); |
|
|
|
@ -1438,10 +1438,10 @@ TEST(DBTest, CompactionFilter) { |
|
|
|
|
// verify that at the end of the compaction process,
|
|
|
|
|
// nothing is left.
|
|
|
|
|
cfilter_count = 0; |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(cfilter_count, 100000); |
|
|
|
|
cfilter_count = 0; |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(cfilter_count, 0); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0); |
|
|
|
@ -1500,8 +1500,8 @@ TEST(DBTest, CompactionFilterWithValueChange) { |
|
|
|
|
|
|
|
|
|
// push all files to lower levels
|
|
|
|
|
dbfull()->TEST_CompactMemTable(); |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
|
|
|
|
|
// re-write all data again
|
|
|
|
|
for (int i = 0; i < 100001; i++) { |
|
|
|
@ -1513,8 +1513,8 @@ TEST(DBTest, CompactionFilterWithValueChange) { |
|
|
|
|
// push all files to lower levels. This should
|
|
|
|
|
// invoke the compaction filter for all 100000 keys.
|
|
|
|
|
dbfull()->TEST_CompactMemTable(); |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
|
|
|
|
|
// verify that all keys now have the new value that
|
|
|
|
|
// was set by the compaction process.
|
|
|
|
@ -1549,7 +1549,7 @@ TEST(DBTest, SparseMerge) { |
|
|
|
|
} |
|
|
|
|
Put("C", "vc"); |
|
|
|
|
dbfull()->TEST_CompactMemTable(); |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
|
|
|
|
|
// Make sparse update
|
|
|
|
|
Put("A", "va2"); |
|
|
|
@ -1560,9 +1560,9 @@ TEST(DBTest, SparseMerge) { |
|
|
|
|
// Compactions should not cause us to create a situation where
|
|
|
|
|
// a file overlaps too much data at the next level.
|
|
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -1660,7 +1660,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { |
|
|
|
|
|
|
|
|
|
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000)); |
|
|
|
|
|
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
} |
|
|
|
|
} while (ChangeOptions()); |
|
|
|
|
} |
|
|
|
@ -1736,11 +1736,11 @@ TEST(DBTest, HiddenValuesAreRemoved) { |
|
|
|
|
db_->ReleaseSnapshot(snapshot); |
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]"); |
|
|
|
|
Slice x("x"); |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, &x); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, &x); |
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0); |
|
|
|
|
ASSERT_GE(NumTableFilesAtLevel(1), 1); |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, &x); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, &x); |
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); |
|
|
|
|
|
|
|
|
|
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000)); |
|
|
|
@ -1773,7 +1773,7 @@ TEST(DBTest, CompactBetweenSnapshots) { |
|
|
|
|
// After a compaction, "second", "third" and "fifth" should
|
|
|
|
|
// be removed
|
|
|
|
|
FillLevels("a", "z"); |
|
|
|
|
dbfull()->CompactRange(NULL, NULL); |
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr); |
|
|
|
|
ASSERT_EQ("sixth", Get("foo")); |
|
|
|
|
ASSERT_EQ("fourth", Get("foo", snapshot2)); |
|
|
|
|
ASSERT_EQ("first", Get("foo", snapshot1)); |
|
|
|
@ -1782,7 +1782,7 @@ TEST(DBTest, CompactBetweenSnapshots) { |
|
|
|
|
// after we release the snapshot1, only two values left
|
|
|
|
|
db_->ReleaseSnapshot(snapshot1); |
|
|
|
|
FillLevels("a", "z"); |
|
|
|
|
dbfull()->CompactRange(NULL, NULL); |
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr); |
|
|
|
|
|
|
|
|
|
// We have only one valid snapshot snapshot2. Since snapshot1 is
|
|
|
|
|
// not valid anymore, "first" should be removed by a compaction.
|
|
|
|
@ -1793,7 +1793,7 @@ TEST(DBTest, CompactBetweenSnapshots) { |
|
|
|
|
// after we release the snapshot2, only one value should be left
|
|
|
|
|
db_->ReleaseSnapshot(snapshot2); |
|
|
|
|
FillLevels("a", "z"); |
|
|
|
|
dbfull()->CompactRange(NULL, NULL); |
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr); |
|
|
|
|
ASSERT_EQ("sixth", Get("foo")); |
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth ]"); |
|
|
|
|
|
|
|
|
@ -1819,11 +1819,11 @@ TEST(DBTest, DeletionMarkers1) { |
|
|
|
|
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); |
|
|
|
|
Slice z("z"); |
|
|
|
|
dbfull()->TEST_CompactRange(last-2, NULL, &z); |
|
|
|
|
dbfull()->TEST_CompactRange(last-2, nullptr, &z); |
|
|
|
|
// DEL eliminated, but v1 remains because we aren't compacting that level
|
|
|
|
|
// (DEL can be eliminated because v2 hides v1).
|
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); |
|
|
|
|
dbfull()->TEST_CompactRange(last-1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr); |
|
|
|
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
|
|
|
|
// DEL is removed. (as is v1).
|
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]"); |
|
|
|
@ -1846,10 +1846,10 @@ TEST(DBTest, DeletionMarkers2) { |
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); |
|
|
|
|
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); |
|
|
|
|
dbfull()->TEST_CompactRange(last-2, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(last-2, nullptr, nullptr); |
|
|
|
|
// DEL kept: "last" file overlaps
|
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); |
|
|
|
|
dbfull()->TEST_CompactRange(last-1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr); |
|
|
|
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
|
|
|
|
// DEL is removed. (as is v1).
|
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); |
|
|
|
@ -1883,8 +1883,8 @@ TEST(DBTest, OverlapInLevel0) { |
|
|
|
|
ASSERT_EQ("2,1,1", FilesPerLevel()); |
|
|
|
|
|
|
|
|
|
// Compact away the placeholder files we created initially
|
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(2, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
dbfull()->TEST_CompactRange(2, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ("2", FilesPerLevel()); |
|
|
|
|
|
|
|
|
|
// Do a memtable compaction. Before bug-fix, the compaction would
|
|
|
|
@ -1993,7 +1993,7 @@ TEST(DBTest, CustomComparator) { |
|
|
|
|
Options new_options = CurrentOptions(); |
|
|
|
|
new_options.create_if_missing = true; |
|
|
|
|
new_options.comparator = &cmp; |
|
|
|
|
new_options.filter_policy = NULL; // Cannot use bloom filters
|
|
|
|
|
new_options.filter_policy = nullptr; // Cannot use bloom filters
|
|
|
|
|
new_options.write_buffer_size = 1000; // Compact more often
|
|
|
|
|
DestroyAndReopen(&new_options); |
|
|
|
|
ASSERT_OK(Put("[10]", "ten")); |
|
|
|
@ -2048,7 +2048,7 @@ TEST(DBTest, ManualCompaction) { |
|
|
|
|
// Compact all
|
|
|
|
|
MakeTables(1, "a", "z"); |
|
|
|
|
ASSERT_EQ("0,1,2", FilesPerLevel()); |
|
|
|
|
db_->CompactRange(NULL, NULL); |
|
|
|
|
db_->CompactRange(nullptr, nullptr); |
|
|
|
|
ASSERT_EQ("0,0,1", FilesPerLevel()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -2057,38 +2057,38 @@ TEST(DBTest, DBOpen_Options) { |
|
|
|
|
DestroyDB(dbname, Options()); |
|
|
|
|
|
|
|
|
|
// Does not exist, and create_if_missing == false: error
|
|
|
|
|
DB* db = NULL; |
|
|
|
|
DB* db = nullptr; |
|
|
|
|
Options opts; |
|
|
|
|
opts.create_if_missing = false; |
|
|
|
|
Status s = DB::Open(opts, dbname, &db); |
|
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL); |
|
|
|
|
ASSERT_TRUE(db == NULL); |
|
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr); |
|
|
|
|
ASSERT_TRUE(db == nullptr); |
|
|
|
|
|
|
|
|
|
// Does not exist, and create_if_missing == true: OK
|
|
|
|
|
opts.create_if_missing = true; |
|
|
|
|
s = DB::Open(opts, dbname, &db); |
|
|
|
|
ASSERT_OK(s); |
|
|
|
|
ASSERT_TRUE(db != NULL); |
|
|
|
|
ASSERT_TRUE(db != nullptr); |
|
|
|
|
|
|
|
|
|
delete db; |
|
|
|
|
db = NULL; |
|
|
|
|
db = nullptr; |
|
|
|
|
|
|
|
|
|
// Does exist, and error_if_exists == true: error
|
|
|
|
|
opts.create_if_missing = false; |
|
|
|
|
opts.error_if_exists = true; |
|
|
|
|
s = DB::Open(opts, dbname, &db); |
|
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL); |
|
|
|
|
ASSERT_TRUE(db == NULL); |
|
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr); |
|
|
|
|
ASSERT_TRUE(db == nullptr); |
|
|
|
|
|
|
|
|
|
// Does exist, and error_if_exists == false: OK
|
|
|
|
|
opts.create_if_missing = true; |
|
|
|
|
opts.error_if_exists = false; |
|
|
|
|
s = DB::Open(opts, dbname, &db); |
|
|
|
|
ASSERT_OK(s); |
|
|
|
|
ASSERT_TRUE(db != NULL); |
|
|
|
|
ASSERT_TRUE(db != nullptr); |
|
|
|
|
|
|
|
|
|
delete db; |
|
|
|
|
db = NULL; |
|
|
|
|
db = nullptr; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST(DBTest, DBOpen_Change_NumLevels) { |
|
|
|
@ -2096,22 +2096,22 @@ TEST(DBTest, DBOpen_Change_NumLevels) { |
|
|
|
|
DestroyDB(dbname, Options()); |
|
|
|
|
Options opts; |
|
|
|
|
Status s; |
|
|
|
|
DB* db = NULL; |
|
|
|
|
DB* db = nullptr; |
|
|
|
|
opts.create_if_missing = true; |
|
|
|
|
s = DB::Open(opts, dbname, &db); |
|
|
|
|
ASSERT_OK(s); |
|
|
|
|
ASSERT_TRUE(db != NULL); |
|
|
|
|
ASSERT_TRUE(db != nullptr); |
|
|
|
|
db->Put(WriteOptions(), "a", "123"); |
|
|
|
|
db->Put(WriteOptions(), "b", "234"); |
|
|
|
|
db->CompactRange(NULL, NULL); |
|
|
|
|
db->CompactRange(nullptr, nullptr); |
|
|
|
|
delete db; |
|
|
|
|
db = NULL; |
|
|
|
|
db = nullptr; |
|
|
|
|
|
|
|
|
|
opts.create_if_missing = false; |
|
|
|
|
opts.num_levels = 2; |
|
|
|
|
s = DB::Open(opts, dbname, &db); |
|
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "Corruption") != NULL); |
|
|
|
|
ASSERT_TRUE(db == NULL); |
|
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "Corruption") != nullptr); |
|
|
|
|
ASSERT_TRUE(db == nullptr); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST(DBTest, DestroyDBMetaDatabase) { |
|
|
|
@ -2127,16 +2127,16 @@ TEST(DBTest, DestroyDBMetaDatabase) { |
|
|
|
|
// Setup databases
|
|
|
|
|
Options opts; |
|
|
|
|
opts.create_if_missing = true; |
|
|
|
|
DB* db = NULL; |
|
|
|
|
DB* db = nullptr; |
|
|
|
|
ASSERT_OK(DB::Open(opts, dbname, &db)); |
|
|
|
|
delete db; |
|
|
|
|
db = NULL; |
|
|
|
|
db = nullptr; |
|
|
|
|
ASSERT_OK(DB::Open(opts, metadbname, &db)); |
|
|
|
|
delete db; |
|
|
|
|
db = NULL; |
|
|
|
|
db = nullptr; |
|
|
|
|
ASSERT_OK(DB::Open(opts, metametadbname, &db)); |
|
|
|
|
delete db; |
|
|
|
|
db = NULL; |
|
|
|
|
db = nullptr; |
|
|
|
|
|
|
|
|
|
// Delete databases
|
|
|
|
|
DestroyDB(dbname, Options()); |
|
|
|
@ -2163,10 +2163,10 @@ TEST(DBTest, NoSpace) { |
|
|
|
|
env_->sleep_counter_.Reset(); |
|
|
|
|
for (int i = 0; i < 5; i++) { |
|
|
|
|
for (int level = 0; level < dbfull()->NumberLevels()-1; level++) { |
|
|
|
|
dbfull()->TEST_CompactRange(level, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(level, nullptr, nullptr); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
env_->no_space_.Release_Store(NULL); |
|
|
|
|
env_->no_space_.Release_Store(nullptr); |
|
|
|
|
ASSERT_LT(CountFiles(), num_files + 3); |
|
|
|
|
|
|
|
|
|
// Check that compaction attempts slept after errors
|
|
|
|
@ -2190,7 +2190,7 @@ TEST(DBTest, NonWritableFileSystem) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
ASSERT_GT(errors, 0); |
|
|
|
|
env_->non_writable_.Release_Store(NULL); |
|
|
|
|
env_->non_writable_.Release_Store(nullptr); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST(DBTest, ManifestWriteError) { |
|
|
|
@ -2224,11 +2224,11 @@ TEST(DBTest, ManifestWriteError) { |
|
|
|
|
|
|
|
|
|
// Merging compaction (will fail)
|
|
|
|
|
error_type->Release_Store(env_); |
|
|
|
|
dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail
|
|
|
|
|
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
|
|
|
|
|
ASSERT_EQ("bar", Get("foo")); |
|
|
|
|
|
|
|
|
|
// Recovery: should not lose data
|
|
|
|
|
error_type->Release_Store(NULL); |
|
|
|
|
error_type->Release_Store(nullptr); |
|
|
|
|
Reopen(&options); |
|
|
|
|
ASSERT_EQ("bar", Get("foo")); |
|
|
|
|
} |
|
|
|
@ -2286,7 +2286,7 @@ TEST(DBTest, BloomFilter) { |
|
|
|
|
fprintf(stderr, "%d missing => %d reads\n", N, reads); |
|
|
|
|
ASSERT_LE(reads, 3*N/100); |
|
|
|
|
|
|
|
|
|
env_->delay_sstable_sync_.Release_Store(NULL); |
|
|
|
|
env_->delay_sstable_sync_.Release_Store(nullptr); |
|
|
|
|
Close(); |
|
|
|
|
delete options.filter_policy; |
|
|
|
|
} |
|
|
|
@ -2554,7 +2554,7 @@ TEST(DBTest, ReadCompaction) { |
|
|
|
|
options.max_open_files = 20; // only 10 file in file-cache
|
|
|
|
|
options.target_file_size_base = 512; |
|
|
|
|
options.write_buffer_size = 64 * 1024; |
|
|
|
|
options.filter_policy = NULL; |
|
|
|
|
options.filter_policy = nullptr; |
|
|
|
|
options.block_size = 4096; |
|
|
|
|
options.block_cache = NewLRUCache(0); // Prevent cache hits
|
|
|
|
|
|
|
|
|
@ -2569,8 +2569,8 @@ TEST(DBTest, ReadCompaction) { |
|
|
|
|
|
|
|
|
|
// clear level 0 and 1 if necessary.
|
|
|
|
|
dbfull()->TEST_CompactMemTable(); |
|
|
|
|
dbfull()->TEST_CompactRange(0, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(1, NULL, NULL); |
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr); |
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0); |
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0); |
|
|
|
|
|
|
|
|
@ -2636,7 +2636,7 @@ static void MTThreadBody(void* arg) { |
|
|
|
|
Random rnd(1000 + id); |
|
|
|
|
std::string value; |
|
|
|
|
char valbuf[1500]; |
|
|
|
|
while (t->state->stop.Acquire_Load() == NULL) { |
|
|
|
|
while (t->state->stop.Acquire_Load() == nullptr) { |
|
|
|
|
t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter)); |
|
|
|
|
|
|
|
|
|
int key = rnd.Uniform(kNumKeys); |
|
|
|
@ -2699,7 +2699,7 @@ TEST(DBTest, MultiThreaded) { |
|
|
|
|
// Stop the threads and wait for them to finish
|
|
|
|
|
mt.stop.Release_Store(&mt); |
|
|
|
|
for (int id = 0; id < kNumThreads; id++) { |
|
|
|
|
while (mt.thread_done[id].Acquire_Load() == NULL) { |
|
|
|
|
while (mt.thread_done[id].Acquire_Load() == nullptr) { |
|
|
|
|
env_->SleepForMicroseconds(100000); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -2730,7 +2730,7 @@ class ModelDB: public DB { |
|
|
|
|
return Status::NotFound(key); |
|
|
|
|
} |
|
|
|
|
virtual Iterator* NewIterator(const ReadOptions& options) { |
|
|
|
|
if (options.snapshot == NULL) { |
|
|
|
|
if (options.snapshot == nullptr) { |
|
|
|
|
KVMap* saved = new KVMap; |
|
|
|
|
*saved = map_; |
|
|
|
|
return new ModelIter(saved, true); |
|
|
|
@ -2909,8 +2909,8 @@ TEST(DBTest, Randomized) { |
|
|
|
|
do { |
|
|
|
|
ModelDB model(CurrentOptions()); |
|
|
|
|
const int N = 10000; |
|
|
|
|
const Snapshot* model_snap = NULL; |
|
|
|
|
const Snapshot* db_snap = NULL; |
|
|
|
|
const Snapshot* model_snap = nullptr; |
|
|
|
|
const Snapshot* db_snap = nullptr; |
|
|
|
|
std::string k, v; |
|
|
|
|
for (int step = 0; step < N; step++) { |
|
|
|
|
if (step % 100 == 0) { |
|
|
|
@ -2955,23 +2955,23 @@ TEST(DBTest, Randomized) { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if ((step % 100) == 0) { |
|
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); |
|
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); |
|
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap)); |
|
|
|
|
// Save a snapshot from each DB this time that we'll use next
|
|
|
|
|
// time we compare things, to make sure the current state is
|
|
|
|
|
// preserved with the snapshot
|
|
|
|
|
if (model_snap != NULL) model.ReleaseSnapshot(model_snap); |
|
|
|
|
if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); |
|
|
|
|
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); |
|
|
|
|
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); |
|
|
|
|
|
|
|
|
|
Reopen(); |
|
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); |
|
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); |
|
|
|
|
|
|
|
|
|
model_snap = model.GetSnapshot(); |
|
|
|
|
db_snap = db_->GetSnapshot(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
if (model_snap != NULL) model.ReleaseSnapshot(model_snap); |
|
|
|
|
if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); |
|
|
|
|
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); |
|
|
|
|
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); |
|
|
|
|
} while (ChangeOptions()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -2985,15 +2985,15 @@ void BM_LogAndApply(int iters, int num_base_files) { |
|
|
|
|
std::string dbname = test::TmpDir() + "/leveldb_test_benchmark"; |
|
|
|
|
DestroyDB(dbname, Options()); |
|
|
|
|
|
|
|
|
|
DB* db = NULL; |
|
|
|
|
DB* db = nullptr; |
|
|
|
|
Options opts; |
|
|
|
|
opts.create_if_missing = true; |
|
|
|
|
Status s = DB::Open(opts, dbname, &db); |
|
|
|
|
ASSERT_OK(s); |
|
|
|
|
ASSERT_TRUE(db != NULL); |
|
|
|
|
ASSERT_TRUE(db != nullptr); |
|
|
|
|
|
|
|
|
|
delete db; |
|
|
|
|
db = NULL; |
|
|
|
|
db = nullptr; |
|
|
|
|
|
|
|
|
|
Env* env = Env::Default(); |
|
|
|
|
|
|
|
|
@ -3002,7 +3002,7 @@ void BM_LogAndApply(int iters, int num_base_files) { |
|
|
|
|
|
|
|
|
|
InternalKeyComparator cmp(BytewiseComparator()); |
|
|
|
|
Options options; |
|
|
|
|
VersionSet vset(dbname, &options, NULL, &cmp); |
|
|
|
|
VersionSet vset(dbname, &options, nullptr, &cmp); |
|
|
|
|
ASSERT_OK(vset.Recover()); |
|
|
|
|
VersionEdit vbase(vset.NumberLevels()); |
|
|
|
|
uint64_t fnum = 1; |
|
|
|
|