Codemod NULL to nullptr

Summary:
scripted NULL to nullptr in
* include/leveldb/
* db/
* table/
* util/

Test Plan: make all check

Reviewers: dhruba, emayanke

Reviewed By: emayanke

CC: leveldb

Differential Revision: https://reviews.facebook.net/D9003
main
Abhishek Kona 11 years ago
parent e45c7a8444
commit c41f1e995c
  1. 16
      db/corruption_test.cc
  2. 204
      db/db_test.cc
  3. 2
      db/log_reader.cc
  4. 2
      db/log_reader.h
  5. 6
      db/memtablelist.cc
  6. 4
      db/memtablelist.h
  7. 6
      db/repair.cc
  8. 36
      db/skiplist.h
  9. 4
      db/skiplist_test.cc
  10. 8
      db/table_cache.h
  11. 2
      db/transaction_log_iterator_impl.cc
  12. 8
      db/version_edit.cc
  13. 79
      db/version_set.cc
  14. 28
      db/version_set.h
  15. 52
      db/version_set_test.cc
  16. 2
      include/leveldb/cache.h
  17. 8
      include/leveldb/db.h
  18. 10
      include/leveldb/env.h
  19. 30
      include/leveldb/options.h
  20. 12
      include/leveldb/status.h
  21. 2
      include/leveldb/table.h
  22. 16
      table/block.cc
  23. 4
      table/filter_block.cc
  24. 4
      table/format.cc
  25. 14
      table/iterator.cc
  26. 8
      table/iterator_wrapper.h
  27. 8
      table/merger.cc
  28. 30
      table/table.cc
  29. 12
      table/table_builder.cc
  30. 36
      table/table_test.cc
  31. 33
      table/two_level_iterator.cc
  32. 2
      util/arena.cc
  33. 20
      util/cache.cc
  34. 4
      util/cache_test.cc
  35. 12
      util/coding.cc
  36. 2
      util/coding.h
  37. 18
      util/coding_test.cc
  38. 4
      util/crc32c.cc
  39. 56
      util/env_hdfs.cc
  40. 61
      util/env_posix.cc
  41. 6
      util/env_test.cc
  42. 12
      util/options.cc
  43. 2
      util/posix_logger.h
  44. 2
      util/status.cc
  45. 10
      util/testharness.cc

@ -38,7 +38,7 @@ class CorruptionTest {
dbname_ = test::TmpDir() + "/db_test"; dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, options_); DestroyDB(dbname_, options_);
db_ = NULL; db_ = nullptr;
options_.create_if_missing = true; options_.create_if_missing = true;
Reopen(); Reopen();
options_.create_if_missing = false; options_.create_if_missing = false;
@ -49,22 +49,22 @@ class CorruptionTest {
DestroyDB(dbname_, Options()); DestroyDB(dbname_, Options());
} }
Status TryReopen(Options* options = NULL) { Status TryReopen(Options* options = nullptr) {
delete db_; delete db_;
db_ = NULL; db_ = nullptr;
Options opt = (options ? *options : options_); Options opt = (options ? *options : options_);
opt.env = &env_; opt.env = &env_;
opt.block_cache = tiny_cache_; opt.block_cache = tiny_cache_;
return DB::Open(opt, dbname_, &db_); return DB::Open(opt, dbname_, &db_);
} }
void Reopen(Options* options = NULL) { void Reopen(Options* options = nullptr) {
ASSERT_OK(TryReopen(options)); ASSERT_OK(TryReopen(options));
} }
void RepairDB() { void RepairDB() {
delete db_; delete db_;
db_ = NULL; db_ = nullptr;
ASSERT_OK(::leveldb::RepairDB(dbname_, options_)); ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
} }
@ -228,8 +228,8 @@ TEST(CorruptionTest, TableFile) {
Build(100); Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
dbi->TEST_CompactRange(0, NULL, NULL); dbi->TEST_CompactRange(0, nullptr, nullptr);
dbi->TEST_CompactRange(1, NULL, NULL); dbi->TEST_CompactRange(1, nullptr, nullptr);
Corrupt(kTableFile, 100, 1); Corrupt(kTableFile, 100, 1);
Check(99, 99); Check(99, 99);
@ -277,7 +277,7 @@ TEST(CorruptionTest, CorruptedDescriptor) {
ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
dbi->TEST_CompactRange(0, NULL, NULL); dbi->TEST_CompactRange(0, nullptr, nullptr);
Corrupt(kDescriptorFile, 0, 1000); Corrupt(kDescriptorFile, 0, 1000);
Status s = TryReopen(); Status s = TryReopen();

@ -71,19 +71,19 @@ class AtomicCounter {
// Special Env used to delay background operations // Special Env used to delay background operations
class SpecialEnv : public EnvWrapper { class SpecialEnv : public EnvWrapper {
public: public:
// sstable Sync() calls are blocked while this pointer is non-NULL. // sstable Sync() calls are blocked while this pointer is non-nullptr.
port::AtomicPointer delay_sstable_sync_; port::AtomicPointer delay_sstable_sync_;
// Simulate no-space errors while this pointer is non-NULL. // Simulate no-space errors while this pointer is non-nullptr.
port::AtomicPointer no_space_; port::AtomicPointer no_space_;
// Simulate non-writable file system while this pointer is non-NULL // Simulate non-writable file system while this pointer is non-nullptr
port::AtomicPointer non_writable_; port::AtomicPointer non_writable_;
// Force sync of manifest files to fail while this pointer is non-NULL // Force sync of manifest files to fail while this pointer is non-nullptr
port::AtomicPointer manifest_sync_error_; port::AtomicPointer manifest_sync_error_;
// Force write to manifest files to fail while this pointer is non-NULL // Force write to manifest files to fail while this pointer is non-nullptr
port::AtomicPointer manifest_write_error_; port::AtomicPointer manifest_write_error_;
bool count_random_reads_; bool count_random_reads_;
@ -92,12 +92,12 @@ class SpecialEnv : public EnvWrapper {
anon::AtomicCounter sleep_counter_; anon::AtomicCounter sleep_counter_;
explicit SpecialEnv(Env* base) : EnvWrapper(base) { explicit SpecialEnv(Env* base) : EnvWrapper(base) {
delay_sstable_sync_.Release_Store(NULL); delay_sstable_sync_.Release_Store(nullptr);
no_space_.Release_Store(NULL); no_space_.Release_Store(nullptr);
non_writable_.Release_Store(NULL); non_writable_.Release_Store(nullptr);
count_random_reads_ = false; count_random_reads_ = false;
manifest_sync_error_.Release_Store(NULL); manifest_sync_error_.Release_Store(nullptr);
manifest_write_error_.Release_Store(NULL); manifest_write_error_.Release_Store(nullptr);
} }
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r) { Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r) {
@ -112,7 +112,7 @@ class SpecialEnv : public EnvWrapper {
base_(std::move(base)) { base_(std::move(base)) {
} }
Status Append(const Slice& data) { Status Append(const Slice& data) {
if (env_->no_space_.Acquire_Load() != NULL) { if (env_->no_space_.Acquire_Load() != nullptr) {
// Drop writes on the floor // Drop writes on the floor
return Status::OK(); return Status::OK();
} else { } else {
@ -122,7 +122,7 @@ class SpecialEnv : public EnvWrapper {
Status Close() { return base_->Close(); } Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); } Status Flush() { return base_->Flush(); }
Status Sync() { Status Sync() {
while (env_->delay_sstable_sync_.Acquire_Load() != NULL) { while (env_->delay_sstable_sync_.Acquire_Load() != nullptr) {
env_->SleepForMicroseconds(100000); env_->SleepForMicroseconds(100000);
} }
return base_->Sync(); return base_->Sync();
@ -136,7 +136,7 @@ class SpecialEnv : public EnvWrapper {
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b) ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
: env_(env), base_(std::move(b)) { } : env_(env), base_(std::move(b)) { }
Status Append(const Slice& data) { Status Append(const Slice& data) {
if (env_->manifest_write_error_.Acquire_Load() != NULL) { if (env_->manifest_write_error_.Acquire_Load() != nullptr) {
return Status::IOError("simulated writer error"); return Status::IOError("simulated writer error");
} else { } else {
return base_->Append(data); return base_->Append(data);
@ -145,7 +145,7 @@ class SpecialEnv : public EnvWrapper {
Status Close() { return base_->Close(); } Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); } Status Flush() { return base_->Flush(); }
Status Sync() { Status Sync() {
if (env_->manifest_sync_error_.Acquire_Load() != NULL) { if (env_->manifest_sync_error_.Acquire_Load() != nullptr) {
return Status::IOError("simulated sync error"); return Status::IOError("simulated sync error");
} else { } else {
return base_->Sync(); return base_->Sync();
@ -153,15 +153,15 @@ class SpecialEnv : public EnvWrapper {
} }
}; };
if (non_writable_.Acquire_Load() != NULL) { if (non_writable_.Acquire_Load() != nullptr) {
return Status::IOError("simulated write error"); return Status::IOError("simulated write error");
} }
Status s = target()->NewWritableFile(f, r); Status s = target()->NewWritableFile(f, r);
if (s.ok()) { if (s.ok()) {
if (strstr(f.c_str(), ".sst") != NULL) { if (strstr(f.c_str(), ".sst") != nullptr) {
r->reset(new SSTableFile(this, std::move(*r))); r->reset(new SSTableFile(this, std::move(*r)));
} else if (strstr(f.c_str(), "MANIFEST") != NULL) { } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
r->reset(new ManifestFile(this, std::move(*r))); r->reset(new ManifestFile(this, std::move(*r)));
} }
} }
@ -227,7 +227,7 @@ class DBTest {
filter_policy_ = NewBloomFilterPolicy(10); filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test"; dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options()); DestroyDB(dbname_, Options());
db_ = NULL; db_ = nullptr;
Reopen(); Reopen();
} }
@ -278,18 +278,18 @@ class DBTest {
return reinterpret_cast<DBImpl*>(db_); return reinterpret_cast<DBImpl*>(db_);
} }
void Reopen(Options* options = NULL) { void Reopen(Options* options = nullptr) {
ASSERT_OK(TryReopen(options)); ASSERT_OK(TryReopen(options));
} }
void Close() { void Close() {
delete db_; delete db_;
db_ = NULL; db_ = nullptr;
} }
void DestroyAndReopen(Options* options = NULL) { void DestroyAndReopen(Options* options = nullptr) {
delete db_; delete db_;
db_ = NULL; db_ = nullptr;
DestroyDB(dbname_, Options()); DestroyDB(dbname_, Options());
ASSERT_OK(TryReopen(options)); ASSERT_OK(TryReopen(options));
} }
@ -300,9 +300,9 @@ class DBTest {
Status TryReopen(Options* options) { Status TryReopen(Options* options) {
delete db_; delete db_;
db_ = NULL; db_ = nullptr;
Options opts; Options opts;
if (options != NULL) { if (options != nullptr) {
opts = *options; opts = *options;
} else { } else {
opts = CurrentOptions(); opts = CurrentOptions();
@ -321,7 +321,7 @@ class DBTest {
return db_->Delete(WriteOptions(), k); return db_->Delete(WriteOptions(), k);
} }
std::string Get(const std::string& k, const Snapshot* snapshot = NULL) { std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options; ReadOptions options;
options.snapshot = snapshot; options.snapshot = snapshot;
std::string result; std::string result;
@ -508,7 +508,7 @@ class DBTest {
TEST(DBTest, Empty) { TEST(DBTest, Empty) {
do { do {
ASSERT_TRUE(db_ != NULL); ASSERT_TRUE(db_ != nullptr);
ASSERT_EQ("NOT_FOUND", Get("foo")); ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions()); } while (ChangeOptions());
} }
@ -605,7 +605,7 @@ TEST(DBTest, GetFromImmutableLayer) {
Put("k1", std::string(100000, 'x')); // Fill memtable Put("k1", std::string(100000, 'x')); // Fill memtable
Put("k2", std::string(100000, 'y')); // Trigger compaction Put("k2", std::string(100000, 'y')); // Trigger compaction
ASSERT_EQ("v1", Get("foo")); ASSERT_EQ("v1", Get("foo"));
env_->delay_sstable_sync_.Release_Store(NULL); // Release sync calls env_->delay_sstable_sync_.Release_Store(nullptr); // Release sync calls
} while (ChangeOptions()); } while (ChangeOptions());
} }
@ -699,7 +699,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
} }
// Step 2: clear level 1 if necessary. // Step 2: clear level 1 if necessary.
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 1); ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 1); ASSERT_EQ(NumTableFilesAtLevel(2), 1);
@ -1144,7 +1144,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
// Reopening moves updates to level-0 // Reopening moves updates to level-0
Reopen(&options); Reopen(&options);
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 1); ASSERT_GT(NumTableFilesAtLevel(1), 1);
@ -1318,20 +1318,20 @@ static int cfilter_count;
static std::string NEW_VALUE = "NewValue"; static std::string NEW_VALUE = "NewValue";
static bool keep_filter(void* arg, int level, const Slice& key, static bool keep_filter(void* arg, int level, const Slice& key,
const Slice& value, Slice** new_value) { const Slice& value, Slice** new_value) {
assert(arg == NULL); assert(arg == nullptr);
cfilter_count++; cfilter_count++;
return false; return false;
} }
static bool delete_filter(void*argv, int level, const Slice& key, static bool delete_filter(void*argv, int level, const Slice& key,
const Slice& value, Slice** new_value) { const Slice& value, Slice** new_value) {
assert(argv == NULL); assert(argv == nullptr);
cfilter_count++; cfilter_count++;
return true; return true;
} }
static bool change_filter(void*argv, int level, const Slice& key, static bool change_filter(void*argv, int level, const Slice& key,
const Slice& value, Slice** new_value) { const Slice& value, Slice** new_value) {
assert(argv == (void*)100); assert(argv == (void*)100);
assert(new_value != NULL); assert(new_value != nullptr);
*new_value = new Slice(NEW_VALUE); *new_value = new Slice(NEW_VALUE);
return false; return false;
} }
@ -1360,10 +1360,10 @@ TEST(DBTest, CompactionFilter) {
// the compaction is each level invokes the filter for // the compaction is each level invokes the filter for
// all the keys in that level. // all the keys in that level.
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(0), 0);
@ -1407,10 +1407,10 @@ TEST(DBTest, CompactionFilter) {
// means that all keys should pass at least once // means that all keys should pass at least once
// via the compaction filter // via the compaction filter
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(1), 0);
@ -1438,10 +1438,10 @@ TEST(DBTest, CompactionFilter) {
// verify that at the end of the compaction process, // verify that at the end of the compaction process,
// nothing is left. // nothing is left.
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0; cfilter_count = 0;
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 0); ASSERT_EQ(cfilter_count, 0);
ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(1), 0);
@ -1500,8 +1500,8 @@ TEST(DBTest, CompactionFilterWithValueChange) {
// push all files to lower levels // push all files to lower levels
dbfull()->TEST_CompactMemTable(); dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
// re-write all data again // re-write all data again
for (int i = 0; i < 100001; i++) { for (int i = 0; i < 100001; i++) {
@ -1513,8 +1513,8 @@ TEST(DBTest, CompactionFilterWithValueChange) {
// push all files to lower levels. This should // push all files to lower levels. This should
// invoke the compaction filter for all 100000 keys. // invoke the compaction filter for all 100000 keys.
dbfull()->TEST_CompactMemTable(); dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
// verify that all keys now have the new value that // verify that all keys now have the new value that
// was set by the compaction process. // was set by the compaction process.
@ -1549,7 +1549,7 @@ TEST(DBTest, SparseMerge) {
} }
Put("C", "vc"); Put("C", "vc");
dbfull()->TEST_CompactMemTable(); dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
// Make sparse update // Make sparse update
Put("A", "va2"); Put("A", "va2");
@ -1560,9 +1560,9 @@ TEST(DBTest, SparseMerge) {
// Compactions should not cause us to create a situation where // Compactions should not cause us to create a situation where
// a file overlaps too much data at the next level. // a file overlaps too much data at the next level.
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
} }
@ -1660,7 +1660,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000)); ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
} }
} while (ChangeOptions()); } while (ChangeOptions());
} }
@ -1736,11 +1736,11 @@ TEST(DBTest, HiddenValuesAreRemoved) {
db_->ReleaseSnapshot(snapshot); db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
Slice x("x"); Slice x("x");
dbfull()->TEST_CompactRange(0, NULL, &x); dbfull()->TEST_CompactRange(0, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GE(NumTableFilesAtLevel(1), 1); ASSERT_GE(NumTableFilesAtLevel(1), 1);
dbfull()->TEST_CompactRange(1, NULL, &x); dbfull()->TEST_CompactRange(1, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000)); ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
@ -1773,7 +1773,7 @@ TEST(DBTest, CompactBetweenSnapshots) {
// After a compaction, "second", "third" and "fifth" should // After a compaction, "second", "third" and "fifth" should
// be removed // be removed
FillLevels("a", "z"); FillLevels("a", "z");
dbfull()->CompactRange(NULL, NULL); dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ("sixth", Get("foo")); ASSERT_EQ("sixth", Get("foo"));
ASSERT_EQ("fourth", Get("foo", snapshot2)); ASSERT_EQ("fourth", Get("foo", snapshot2));
ASSERT_EQ("first", Get("foo", snapshot1)); ASSERT_EQ("first", Get("foo", snapshot1));
@ -1782,7 +1782,7 @@ TEST(DBTest, CompactBetweenSnapshots) {
// after we release the snapshot1, only two values left // after we release the snapshot1, only two values left
db_->ReleaseSnapshot(snapshot1); db_->ReleaseSnapshot(snapshot1);
FillLevels("a", "z"); FillLevels("a", "z");
dbfull()->CompactRange(NULL, NULL); dbfull()->CompactRange(nullptr, nullptr);
// We have only one valid snapshot snapshot2. Since snapshot1 is // We have only one valid snapshot snapshot2. Since snapshot1 is
// not valid anymore, "first" should be removed by a compaction. // not valid anymore, "first" should be removed by a compaction.
@ -1793,7 +1793,7 @@ TEST(DBTest, CompactBetweenSnapshots) {
// after we release the snapshot2, only one value should be left // after we release the snapshot2, only one value should be left
db_->ReleaseSnapshot(snapshot2); db_->ReleaseSnapshot(snapshot2);
FillLevels("a", "z"); FillLevels("a", "z");
dbfull()->CompactRange(NULL, NULL); dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ("sixth", Get("foo")); ASSERT_EQ("sixth", Get("foo"));
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ sixth ]");
@ -1819,11 +1819,11 @@ TEST(DBTest, DeletionMarkers1) {
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z"); Slice z("z");
dbfull()->TEST_CompactRange(last-2, NULL, &z); dbfull()->TEST_CompactRange(last-2, nullptr, &z);
// DEL eliminated, but v1 remains because we aren't compacting that level // DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1). // (DEL can be eliminated because v2 hides v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
dbfull()->TEST_CompactRange(last-1, NULL, NULL); dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so // Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1). // DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
@ -1846,10 +1846,10 @@ TEST(DBTest, DeletionMarkers2) {
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last-2, NULL, NULL); dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
// DEL kept: "last" file overlaps // DEL kept: "last" file overlaps
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last-1, NULL, NULL); dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so // Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1). // DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
@ -1883,8 +1883,8 @@ TEST(DBTest, OverlapInLevel0) {
ASSERT_EQ("2,1,1", FilesPerLevel()); ASSERT_EQ("2,1,1", FilesPerLevel());
// Compact away the placeholder files we created initially // Compact away the placeholder files we created initially
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
dbfull()->TEST_CompactRange(2, NULL, NULL); dbfull()->TEST_CompactRange(2, nullptr, nullptr);
ASSERT_EQ("2", FilesPerLevel()); ASSERT_EQ("2", FilesPerLevel());
// Do a memtable compaction. Before bug-fix, the compaction would // Do a memtable compaction. Before bug-fix, the compaction would
@ -1993,7 +1993,7 @@ TEST(DBTest, CustomComparator) {
Options new_options = CurrentOptions(); Options new_options = CurrentOptions();
new_options.create_if_missing = true; new_options.create_if_missing = true;
new_options.comparator = &cmp; new_options.comparator = &cmp;
new_options.filter_policy = NULL; // Cannot use bloom filters new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options); DestroyAndReopen(&new_options);
ASSERT_OK(Put("[10]", "ten")); ASSERT_OK(Put("[10]", "ten"));
@ -2048,7 +2048,7 @@ TEST(DBTest, ManualCompaction) {
// Compact all // Compact all
MakeTables(1, "a", "z"); MakeTables(1, "a", "z");
ASSERT_EQ("0,1,2", FilesPerLevel()); ASSERT_EQ("0,1,2", FilesPerLevel());
db_->CompactRange(NULL, NULL); db_->CompactRange(nullptr, nullptr);
ASSERT_EQ("0,0,1", FilesPerLevel()); ASSERT_EQ("0,0,1", FilesPerLevel());
} }
@ -2057,38 +2057,38 @@ TEST(DBTest, DBOpen_Options) {
DestroyDB(dbname, Options()); DestroyDB(dbname, Options());
// Does not exist, and create_if_missing == false: error // Does not exist, and create_if_missing == false: error
DB* db = NULL; DB* db = nullptr;
Options opts; Options opts;
opts.create_if_missing = false; opts.create_if_missing = false;
Status s = DB::Open(opts, dbname, &db); Status s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL); ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
ASSERT_TRUE(db == NULL); ASSERT_TRUE(db == nullptr);
// Does not exist, and create_if_missing == true: OK // Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true; opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db); s = DB::Open(opts, dbname, &db);
ASSERT_OK(s); ASSERT_OK(s);
ASSERT_TRUE(db != NULL); ASSERT_TRUE(db != nullptr);
delete db; delete db;
db = NULL; db = nullptr;
// Does exist, and error_if_exists == true: error // Does exist, and error_if_exists == true: error
opts.create_if_missing = false; opts.create_if_missing = false;
opts.error_if_exists = true; opts.error_if_exists = true;
s = DB::Open(opts, dbname, &db); s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL); ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
ASSERT_TRUE(db == NULL); ASSERT_TRUE(db == nullptr);
// Does exist, and error_if_exists == false: OK // Does exist, and error_if_exists == false: OK
opts.create_if_missing = true; opts.create_if_missing = true;
opts.error_if_exists = false; opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db); s = DB::Open(opts, dbname, &db);
ASSERT_OK(s); ASSERT_OK(s);
ASSERT_TRUE(db != NULL); ASSERT_TRUE(db != nullptr);
delete db; delete db;
db = NULL; db = nullptr;
} }
TEST(DBTest, DBOpen_Change_NumLevels) { TEST(DBTest, DBOpen_Change_NumLevels) {
@ -2096,22 +2096,22 @@ TEST(DBTest, DBOpen_Change_NumLevels) {
DestroyDB(dbname, Options()); DestroyDB(dbname, Options());
Options opts; Options opts;
Status s; Status s;
DB* db = NULL; DB* db = nullptr;
opts.create_if_missing = true; opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db); s = DB::Open(opts, dbname, &db);
ASSERT_OK(s); ASSERT_OK(s);
ASSERT_TRUE(db != NULL); ASSERT_TRUE(db != nullptr);
db->Put(WriteOptions(), "a", "123"); db->Put(WriteOptions(), "a", "123");
db->Put(WriteOptions(), "b", "234"); db->Put(WriteOptions(), "b", "234");
db->CompactRange(NULL, NULL); db->CompactRange(nullptr, nullptr);
delete db; delete db;
db = NULL; db = nullptr;
opts.create_if_missing = false; opts.create_if_missing = false;
opts.num_levels = 2; opts.num_levels = 2;
s = DB::Open(opts, dbname, &db); s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "Corruption") != NULL); ASSERT_TRUE(strstr(s.ToString().c_str(), "Corruption") != nullptr);
ASSERT_TRUE(db == NULL); ASSERT_TRUE(db == nullptr);
} }
TEST(DBTest, DestroyDBMetaDatabase) { TEST(DBTest, DestroyDBMetaDatabase) {
@ -2127,16 +2127,16 @@ TEST(DBTest, DestroyDBMetaDatabase) {
// Setup databases // Setup databases
Options opts; Options opts;
opts.create_if_missing = true; opts.create_if_missing = true;
DB* db = NULL; DB* db = nullptr;
ASSERT_OK(DB::Open(opts, dbname, &db)); ASSERT_OK(DB::Open(opts, dbname, &db));
delete db; delete db;
db = NULL; db = nullptr;
ASSERT_OK(DB::Open(opts, metadbname, &db)); ASSERT_OK(DB::Open(opts, metadbname, &db));
delete db; delete db;
db = NULL; db = nullptr;
ASSERT_OK(DB::Open(opts, metametadbname, &db)); ASSERT_OK(DB::Open(opts, metametadbname, &db));
delete db; delete db;
db = NULL; db = nullptr;
// Delete databases // Delete databases
DestroyDB(dbname, Options()); DestroyDB(dbname, Options());
@ -2163,10 +2163,10 @@ TEST(DBTest, NoSpace) {
env_->sleep_counter_.Reset(); env_->sleep_counter_.Reset();
for (int i = 0; i < 5; i++) { for (int i = 0; i < 5; i++) {
for (int level = 0; level < dbfull()->NumberLevels()-1; level++) { for (int level = 0; level < dbfull()->NumberLevels()-1; level++) {
dbfull()->TEST_CompactRange(level, NULL, NULL); dbfull()->TEST_CompactRange(level, nullptr, nullptr);
} }
} }
env_->no_space_.Release_Store(NULL); env_->no_space_.Release_Store(nullptr);
ASSERT_LT(CountFiles(), num_files + 3); ASSERT_LT(CountFiles(), num_files + 3);
// Check that compaction attempts slept after errors // Check that compaction attempts slept after errors
@ -2190,7 +2190,7 @@ TEST(DBTest, NonWritableFileSystem)
} }
} }
ASSERT_GT(errors, 0); ASSERT_GT(errors, 0);
env_->non_writable_.Release_Store(NULL); env_->non_writable_.Release_Store(nullptr);
} }
TEST(DBTest, ManifestWriteError) { TEST(DBTest, ManifestWriteError) {
@ -2224,11 +2224,11 @@ TEST(DBTest, ManifestWriteError) {
// Merging compaction (will fail) // Merging compaction (will fail)
error_type->Release_Store(env_); error_type->Release_Store(env_);
dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
// Recovery: should not lose data // Recovery: should not lose data
error_type->Release_Store(NULL); error_type->Release_Store(nullptr);
Reopen(&options); Reopen(&options);
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
} }
@ -2286,7 +2286,7 @@ TEST(DBTest, BloomFilter) {
fprintf(stderr, "%d missing => %d reads\n", N, reads); fprintf(stderr, "%d missing => %d reads\n", N, reads);
ASSERT_LE(reads, 3*N/100); ASSERT_LE(reads, 3*N/100);
env_->delay_sstable_sync_.Release_Store(NULL); env_->delay_sstable_sync_.Release_Store(nullptr);
Close(); Close();
delete options.filter_policy; delete options.filter_policy;
} }
@ -2554,7 +2554,7 @@ TEST(DBTest, ReadCompaction) {
options.max_open_files = 20; // only 10 file in file-cache options.max_open_files = 20; // only 10 file in file-cache
options.target_file_size_base = 512; options.target_file_size_base = 512;
options.write_buffer_size = 64 * 1024; options.write_buffer_size = 64 * 1024;
options.filter_policy = NULL; options.filter_policy = nullptr;
options.block_size = 4096; options.block_size = 4096;
options.block_cache = NewLRUCache(0); // Prevent cache hits options.block_cache = NewLRUCache(0); // Prevent cache hits
@ -2569,8 +2569,8 @@ TEST(DBTest, ReadCompaction) {
// clear level 0 and 1 if necessary. // clear level 0 and 1 if necessary.
dbfull()->TEST_CompactMemTable(); dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, NULL, NULL); dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, NULL, NULL); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(1), 0);
@ -2636,7 +2636,7 @@ static void MTThreadBody(void* arg) {
Random rnd(1000 + id); Random rnd(1000 + id);
std::string value; std::string value;
char valbuf[1500]; char valbuf[1500];
while (t->state->stop.Acquire_Load() == NULL) { while (t->state->stop.Acquire_Load() == nullptr) {
t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter)); t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
int key = rnd.Uniform(kNumKeys); int key = rnd.Uniform(kNumKeys);
@ -2699,7 +2699,7 @@ TEST(DBTest, MultiThreaded) {
// Stop the threads and wait for them to finish // Stop the threads and wait for them to finish
mt.stop.Release_Store(&mt); mt.stop.Release_Store(&mt);
for (int id = 0; id < kNumThreads; id++) { for (int id = 0; id < kNumThreads; id++) {
while (mt.thread_done[id].Acquire_Load() == NULL) { while (mt.thread_done[id].Acquire_Load() == nullptr) {
env_->SleepForMicroseconds(100000); env_->SleepForMicroseconds(100000);
} }
} }
@ -2730,7 +2730,7 @@ class ModelDB: public DB {
return Status::NotFound(key); return Status::NotFound(key);
} }
virtual Iterator* NewIterator(const ReadOptions& options) { virtual Iterator* NewIterator(const ReadOptions& options) {
if (options.snapshot == NULL) { if (options.snapshot == nullptr) {
KVMap* saved = new KVMap; KVMap* saved = new KVMap;
*saved = map_; *saved = map_;
return new ModelIter(saved, true); return new ModelIter(saved, true);
@ -2909,8 +2909,8 @@ TEST(DBTest, Randomized) {
do { do {
ModelDB model(CurrentOptions()); ModelDB model(CurrentOptions());
const int N = 10000; const int N = 10000;
const Snapshot* model_snap = NULL; const Snapshot* model_snap = nullptr;
const Snapshot* db_snap = NULL; const Snapshot* db_snap = nullptr;
std::string k, v; std::string k, v;
for (int step = 0; step < N; step++) { for (int step = 0; step < N; step++) {
if (step % 100 == 0) { if (step % 100 == 0) {
@ -2955,23 +2955,23 @@ TEST(DBTest, Randomized) {
} }
if ((step % 100) == 0) { if ((step % 100) == 0) {
ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap)); ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
// Save a snapshot from each DB this time that we'll use next // Save a snapshot from each DB this time that we'll use next
// time we compare things, to make sure the current state is // time we compare things, to make sure the current state is
// preserved with the snapshot // preserved with the snapshot
if (model_snap != NULL) model.ReleaseSnapshot(model_snap); if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
Reopen(); Reopen();
ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
model_snap = model.GetSnapshot(); model_snap = model.GetSnapshot();
db_snap = db_->GetSnapshot(); db_snap = db_->GetSnapshot();
} }
} }
if (model_snap != NULL) model.ReleaseSnapshot(model_snap); if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
} while (ChangeOptions()); } while (ChangeOptions());
} }
@ -2985,15 +2985,15 @@ void BM_LogAndApply(int iters, int num_base_files) {
std::string dbname = test::TmpDir() + "/leveldb_test_benchmark"; std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
DestroyDB(dbname, Options()); DestroyDB(dbname, Options());
DB* db = NULL; DB* db = nullptr;
Options opts; Options opts;
opts.create_if_missing = true; opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db); Status s = DB::Open(opts, dbname, &db);
ASSERT_OK(s); ASSERT_OK(s);
ASSERT_TRUE(db != NULL); ASSERT_TRUE(db != nullptr);
delete db; delete db;
db = NULL; db = nullptr;
Env* env = Env::Default(); Env* env = Env::Default();
@ -3002,7 +3002,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
InternalKeyComparator cmp(BytewiseComparator()); InternalKeyComparator cmp(BytewiseComparator());
Options options; Options options;
VersionSet vset(dbname, &options, NULL, &cmp); VersionSet vset(dbname, &options, nullptr, &cmp);
ASSERT_OK(vset.Recover()); ASSERT_OK(vset.Recover());
VersionEdit vbase(vset.NumberLevels()); VersionEdit vbase(vset.NumberLevels());
uint64_t fnum = 1; uint64_t fnum = 1;

@ -170,7 +170,7 @@ void Reader::ReportCorruption(size_t bytes, const char* reason) {
} }
void Reader::ReportDrop(size_t bytes, const Status& reason) { void Reader::ReportDrop(size_t bytes, const Status& reason) {
if (reporter_ != NULL && if (reporter_ != nullptr &&
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) { end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
reporter_->Corruption(bytes, reason); reporter_->Corruption(bytes, reason);
} }

@ -34,7 +34,7 @@ class Reader {
// Create a reader that will return log records from "*file". // Create a reader that will return log records from "*file".
// "*file" must remain live while this Reader is in use. // "*file" must remain live while this Reader is in use.
// //
// If "reporter" is non-NULL, it is notified whenever some data is // If "reporter" is non-nullptr, it is notified whenever some data is
// dropped due to a detected corruption. "*reporter" must remain // dropped due to a detected corruption. "*reporter" must remain
// live while this Reader is in use. // live while this Reader is in use.
// //

@ -45,7 +45,7 @@ int MemTableList::size() {
// not yet started. // not yet started.
bool MemTableList::IsFlushPending() { bool MemTableList::IsFlushPending() {
if (num_flush_not_started_ > 0) { if (num_flush_not_started_ > 0) {
assert(imm_flush_needed.NoBarrier_Load() != NULL); assert(imm_flush_needed.NoBarrier_Load() != nullptr);
return true; return true;
} }
return false; return false;
@ -61,13 +61,13 @@ MemTable* MemTableList::PickMemtableToFlush() {
assert(!m->flush_completed_); assert(!m->flush_completed_);
num_flush_not_started_--; num_flush_not_started_--;
if (num_flush_not_started_ == 0) { if (num_flush_not_started_ == 0) {
imm_flush_needed.Release_Store(NULL); imm_flush_needed.Release_Store(nullptr);
} }
m->flush_in_progress_ = true; // flushing will start very soon m->flush_in_progress_ = true; // flushing will start very soon
return m; return m;
} }
} }
return NULL; return nullptr;
} }
// Record a successful flush in the manifest file // Record a successful flush in the manifest file

@ -30,11 +30,11 @@ class MemTableList {
// A list of memtables. // A list of memtables.
MemTableList() : size_(0), num_flush_not_started_(0), MemTableList() : size_(0), num_flush_not_started_(0),
commit_in_progress_(false) { commit_in_progress_(false) {
imm_flush_needed.Release_Store(NULL); imm_flush_needed.Release_Store(nullptr);
} }
~MemTableList() {}; ~MemTableList() {};
// so that backgrund threads can detect non-NULL pointer to // so that backgrund threads can detect non-nullptr pointer to
// determine whether this is anything more to start flushing. // determine whether this is anything more to start flushing.
port::AtomicPointer imm_flush_needed; port::AtomicPointer imm_flush_needed;

@ -219,7 +219,7 @@ class Repairer {
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta); status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
delete iter; delete iter;
mem->Unref(); mem->Unref();
mem = NULL; mem = nullptr;
if (status.ok()) { if (status.ok()) {
if (meta.file_size > 0) { if (meta.file_size > 0) {
table_numbers_.push_back(meta.number); table_numbers_.push_back(meta.number);
@ -353,14 +353,14 @@ class Repairer {
// dir/lost/foo // dir/lost/foo
const char* slash = strrchr(fname.c_str(), '/'); const char* slash = strrchr(fname.c_str(), '/');
std::string new_dir; std::string new_dir;
if (slash != NULL) { if (slash != nullptr) {
new_dir.assign(fname.data(), slash - fname.data()); new_dir.assign(fname.data(), slash - fname.data());
} }
new_dir.append("/lost"); new_dir.append("/lost");
env_->CreateDir(new_dir); // Ignore error env_->CreateDir(new_dir); // Ignore error
std::string new_file = new_dir; std::string new_file = new_dir;
new_file.append("/"); new_file.append("/");
new_file.append((slash == NULL) ? fname.c_str() : slash + 1); new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
Status s = env_->RenameFile(fname, new_file); Status s = env_->RenameFile(fname, new_file);
Log(options_.info_log, "Archiving %s: %s\n", Log(options_.info_log, "Archiving %s: %s\n",
fname.c_str(), s.ToString().c_str()); fname.c_str(), s.ToString().c_str());

@ -127,9 +127,9 @@ class SkipList {
bool KeyIsAfterNode(const Key& key, Node* n) const; bool KeyIsAfterNode(const Key& key, Node* n) const;
// Return the earliest node that comes at or after key. // Return the earliest node that comes at or after key.
// Return NULL if there is no such node. // Return nullptr if there is no such node.
// //
// If prev is non-NULL, fills prev[level] with pointer to previous // If prev is non-nullptr, fills prev[level] with pointer to previous
// node at "level" for every level in [0..max_height_-1]. // node at "level" for every level in [0..max_height_-1].
Node* FindGreaterOrEqual(const Key& key, Node** prev) const; Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
@ -194,12 +194,12 @@ SkipList<Key,Comparator>::NewNode(const Key& key, int height) {
template<typename Key, class Comparator> template<typename Key, class Comparator>
inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) { inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) {
list_ = list; list_ = list;
node_ = NULL; node_ = nullptr;
} }
template<typename Key, class Comparator> template<typename Key, class Comparator>
inline bool SkipList<Key,Comparator>::Iterator::Valid() const { inline bool SkipList<Key,Comparator>::Iterator::Valid() const {
return node_ != NULL; return node_ != nullptr;
} }
template<typename Key, class Comparator> template<typename Key, class Comparator>
@ -221,13 +221,13 @@ inline void SkipList<Key,Comparator>::Iterator::Prev() {
assert(Valid()); assert(Valid());
node_ = list_->FindLessThan(node_->key); node_ = list_->FindLessThan(node_->key);
if (node_ == list_->head_) { if (node_ == list_->head_) {
node_ = NULL; node_ = nullptr;
} }
} }
template<typename Key, class Comparator> template<typename Key, class Comparator>
inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) { inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) {
node_ = list_->FindGreaterOrEqual(target, NULL); node_ = list_->FindGreaterOrEqual(target, nullptr);
} }
template<typename Key, class Comparator> template<typename Key, class Comparator>
@ -239,7 +239,7 @@ template<typename Key, class Comparator>
inline void SkipList<Key,Comparator>::Iterator::SeekToLast() { inline void SkipList<Key,Comparator>::Iterator::SeekToLast() {
node_ = list_->FindLast(); node_ = list_->FindLast();
if (node_ == list_->head_) { if (node_ == list_->head_) {
node_ = NULL; node_ = nullptr;
} }
} }
@ -258,8 +258,8 @@ int SkipList<Key,Comparator>::RandomHeight() {
template<typename Key, class Comparator> template<typename Key, class Comparator>
bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const { bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
// NULL n is considered infinite // nullptr n is considered infinite
return (n != NULL) && (compare_(n->key, key) < 0); return (n != nullptr) && (compare_(n->key, key) < 0);
} }
template<typename Key, class Comparator> template<typename Key, class Comparator>
@ -282,7 +282,7 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
// Keep searching in this list // Keep searching in this list
x = next; x = next;
} else { } else {
if (prev != NULL) prev[level] = x; if (prev != nullptr) prev[level] = x;
if (level == 0) { if (level == 0) {
return next; return next;
} else { } else {
@ -301,7 +301,7 @@ SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
while (true) { while (true) {
assert(x == head_ || compare_(x->key, key) < 0); assert(x == head_ || compare_(x->key, key) < 0);
Node* next = x->Next(level); Node* next = x->Next(level);
if (next == NULL || compare_(next->key, key) >= 0) { if (next == nullptr || compare_(next->key, key) >= 0) {
if (level == 0) { if (level == 0) {
return x; return x;
} else { } else {
@ -321,7 +321,7 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
int level = GetMaxHeight() - 1; int level = GetMaxHeight() - 1;
while (true) { while (true) {
Node* next = x->Next(level); Node* next = x->Next(level);
if (next == NULL) { if (next == nullptr) {
if (level == 0) { if (level == 0) {
return x; return x;
} else { } else {
@ -342,7 +342,7 @@ SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena)
max_height_(reinterpret_cast<void*>(1)), max_height_(reinterpret_cast<void*>(1)),
rnd_(0xdeadbeef) { rnd_(0xdeadbeef) {
for (int i = 0; i < kMaxHeight; i++) { for (int i = 0; i < kMaxHeight; i++) {
head_->SetNext(i, NULL); head_->SetNext(i, nullptr);
prev_[i] = head_; prev_[i] = head_;
} }
} }
@ -354,7 +354,7 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
Node* x = FindGreaterOrEqual(key, prev_); Node* x = FindGreaterOrEqual(key, prev_);
// Our data structure does not allow duplicate insertion // Our data structure does not allow duplicate insertion
assert(x == NULL || !Equal(key, x->key)); assert(x == nullptr || !Equal(key, x->key));
int height = RandomHeight(); int height = RandomHeight();
if (height > GetMaxHeight()) { if (height > GetMaxHeight()) {
@ -366,9 +366,9 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
// It is ok to mutate max_height_ without any synchronization // It is ok to mutate max_height_ without any synchronization
// with concurrent readers. A concurrent reader that observes // with concurrent readers. A concurrent reader that observes
// the new value of max_height_ will see either the old value of // the new value of max_height_ will see either the old value of
// new level pointers from head_ (NULL), or a new value set in // new level pointers from head_ (nullptr), or a new value set in
// the loop below. In the former case the reader will // the loop below. In the former case the reader will
// immediately drop to the next level since NULL sorts after all // immediately drop to the next level since nullptr sorts after all
// keys. In the latter case the reader will use the new node. // keys. In the latter case the reader will use the new node.
max_height_.NoBarrier_Store(reinterpret_cast<void*>(height)); max_height_.NoBarrier_Store(reinterpret_cast<void*>(height));
} }
@ -385,8 +385,8 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
template<typename Key, class Comparator> template<typename Key, class Comparator>
bool SkipList<Key,Comparator>::Contains(const Key& key) const { bool SkipList<Key,Comparator>::Contains(const Key& key) const {
Node* x = FindGreaterOrEqual(key, NULL); Node* x = FindGreaterOrEqual(key, nullptr);
if (x != NULL && Equal(key, x->key)) { if (x != nullptr && Equal(key, x->key)) {
return true; return true;
} else { } else {
return false; return false;

@ -308,7 +308,7 @@ class TestState {
explicit TestState(int s) explicit TestState(int s)
: seed_(s), : seed_(s),
quit_flag_(NULL), quit_flag_(nullptr),
state_(STARTING), state_(STARTING),
state_cv_(&mu_) {} state_cv_(&mu_) {}
@ -360,7 +360,7 @@ static void RunConcurrent(int run) {
for (int i = 0; i < kSize; i++) { for (int i = 0; i < kSize; i++) {
state.t_.WriteStep(&rnd); state.t_.WriteStep(&rnd);
} }
state.quit_flag_.Release_Store(&state); // Any non-NULL arg will do state.quit_flag_.Release_Store(&state); // Any non-nullptr arg will do
state.Wait(TestState::DONE); state.Wait(TestState::DONE);
} }
} }

@ -25,15 +25,15 @@ class TableCache {
// Return an iterator for the specified file number (the corresponding // Return an iterator for the specified file number (the corresponding
// file length must be exactly "file_size" bytes). If "tableptr" is // file length must be exactly "file_size" bytes). If "tableptr" is
// non-NULL, also sets "*tableptr" to point to the Table object // non-nullptr, also sets "*tableptr" to point to the Table object
// underlying the returned iterator, or NULL if no Table object underlies // underlying the returned iterator, or nullptr if no Table object underlies
// the returned iterator. The returned "*tableptr" object is owned by // the returned iterator. The returned "*tableptr" object is owned by
// the cache and should not be deleted, and is valid for as long as the // the cache and should not be deleted, and is valid for as long as the
// returned iterator is live. // returned iterator is live.
Iterator* NewIterator(const ReadOptions& options, Iterator* NewIterator(const ReadOptions& options,
uint64_t file_number, uint64_t file_number,
uint64_t file_size, uint64_t file_size,
Table** tableptr = NULL); Table** tableptr = nullptr);
// If a seek to internal key "k" in specified file finds an entry, // If a seek to internal key "k" in specified file finds an entry,
// call (*handle_result)(arg, found_key, found_value). // call (*handle_result)(arg, found_key, found_value).
@ -55,7 +55,7 @@ class TableCache {
std::shared_ptr<Cache> cache_; std::shared_ptr<Cache> cache_;
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**, Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**,
bool* tableIO = NULL); bool* tableIO = nullptr);
}; };
} // namespace leveldb } // namespace leveldb

@ -15,7 +15,7 @@ TransactionLogIteratorImpl::TransactionLogIteratorImpl(
started_(false), started_(false),
isValid_(true), isValid_(true),
currentFileIndex_(0) { currentFileIndex_(0) {
assert(files_ != NULL); assert(files_ != nullptr);
} }
LogReporter LogReporter

@ -112,7 +112,7 @@ bool VersionEdit::GetLevel(Slice* input, int* level, const char** msg) {
Status VersionEdit::DecodeFrom(const Slice& src) { Status VersionEdit::DecodeFrom(const Slice& src) {
Clear(); Clear();
Slice input = src; Slice input = src;
const char* msg = NULL; const char* msg = nullptr;
uint32_t tag; uint32_t tag;
// Temporary storage for parsing // Temporary storage for parsing
@ -122,7 +122,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
Slice str; Slice str;
InternalKey key; InternalKey key;
while (msg == NULL && GetVarint32(&input, &tag)) { while (msg == nullptr && GetVarint32(&input, &tag)) {
switch (tag) { switch (tag) {
case kComparator: case kComparator:
if (GetLengthPrefixedSlice(&input, &str)) { if (GetLengthPrefixedSlice(&input, &str)) {
@ -207,12 +207,12 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
} }
} }
if (msg == NULL && !input.empty()) { if (msg == nullptr && !input.empty()) {
msg = "invalid tag"; msg = "invalid tag";
} }
Status result; Status result;
if (msg != NULL) { if (msg != nullptr) {
result = Status::Corruption("VersionEdit", msg); result = Status::Corruption("VersionEdit", msg);
} }
return result; return result;

@ -72,15 +72,15 @@ int FindFile(const InternalKeyComparator& icmp,
static bool AfterFile(const Comparator* ucmp, static bool AfterFile(const Comparator* ucmp,
const Slice* user_key, const FileMetaData* f) { const Slice* user_key, const FileMetaData* f) {
// NULL user_key occurs before all keys and is therefore never after *f // nullptr user_key occurs before all keys and is therefore never after *f
return (user_key != NULL && return (user_key != nullptr &&
ucmp->Compare(*user_key, f->largest.user_key()) > 0); ucmp->Compare(*user_key, f->largest.user_key()) > 0);
} }
static bool BeforeFile(const Comparator* ucmp, static bool BeforeFile(const Comparator* ucmp,
const Slice* user_key, const FileMetaData* f) { const Slice* user_key, const FileMetaData* f) {
// NULL user_key occurs after all keys and is therefore never before *f // nullptr user_key occurs after all keys and is therefore never before *f
return (user_key != NULL && return (user_key != nullptr &&
ucmp->Compare(*user_key, f->smallest.user_key()) < 0); ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
} }
@ -107,7 +107,7 @@ bool SomeFileOverlapsRange(
// Binary search over file list // Binary search over file list
uint32_t index = 0; uint32_t index = 0;
if (smallest_user_key != NULL) { if (smallest_user_key != nullptr) {
// Find the earliest possible internal key for smallest_user_key // Find the earliest possible internal key for smallest_user_key
InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek); InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
index = FindFile(icmp, files, small.Encode()); index = FindFile(icmp, files, small.Encode());
@ -256,7 +256,7 @@ Version::Version(VersionSet* vset, uint64_t version_number)
: vset_(vset), next_(this), prev_(this), refs_(0), : vset_(vset), next_(this), prev_(this), refs_(0),
files_by_size_(vset->NumberLevels()), files_by_size_(vset->NumberLevels()),
next_file_to_compact_by_size_(vset->NumberLevels()), next_file_to_compact_by_size_(vset->NumberLevels()),
file_to_compact_(NULL), file_to_compact_(nullptr),
file_to_compact_level_(-1), file_to_compact_level_(-1),
compaction_score_(vset->NumberLevels()), compaction_score_(vset->NumberLevels()),
compaction_level_(vset->NumberLevels()), compaction_level_(vset->NumberLevels()),
@ -274,9 +274,9 @@ Status Version::Get(const ReadOptions& options,
const Comparator* ucmp = vset_->icmp_.user_comparator(); const Comparator* ucmp = vset_->icmp_.user_comparator();
Status s; Status s;
stats->seek_file = NULL; stats->seek_file = nullptr;
stats->seek_file_level = -1; stats->seek_file_level = -1;
FileMetaData* last_file_read = NULL; FileMetaData* last_file_read = nullptr;
int last_file_read_level = -1; int last_file_read_level = -1;
// We can search level-by-level since entries never hop across // We can search level-by-level since entries never hop across
@ -310,13 +310,13 @@ Status Version::Get(const ReadOptions& options,
// Binary search to find earliest index whose largest key >= ikey. // Binary search to find earliest index whose largest key >= ikey.
uint32_t index = FindFile(vset_->icmp_, files_[level], ikey); uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
if (index >= num_files) { if (index >= num_files) {
files = NULL; files = nullptr;
num_files = 0; num_files = 0;
} else { } else {
tmp2 = files[index]; tmp2 = files[index];
if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) { if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
// All of "tmp2" is past any data for user_key // All of "tmp2" is past any data for user_key
files = NULL; files = nullptr;
num_files = 0; num_files = 0;
} else { } else {
files = &tmp2; files = &tmp2;
@ -341,7 +341,7 @@ Status Version::Get(const ReadOptions& options,
return s; return s;
} }
if (last_file_read != NULL && stats->seek_file == NULL) { if (last_file_read != nullptr && stats->seek_file == nullptr) {
// We have had more than one seek for this read. Charge the 1st file. // We have had more than one seek for this read. Charge the 1st file.
stats->seek_file = last_file_read; stats->seek_file = last_file_read;
stats->seek_file_level = last_file_read_level; stats->seek_file_level = last_file_read_level;
@ -379,9 +379,9 @@ Status Version::Get(const ReadOptions& options,
bool Version::UpdateStats(const GetStats& stats) { bool Version::UpdateStats(const GetStats& stats) {
FileMetaData* f = stats.seek_file; FileMetaData* f = stats.seek_file;
if (f != NULL) { if (f != nullptr) {
f->allowed_seeks--; f->allowed_seeks--;
if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) { if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) {
file_to_compact_ = f; file_to_compact_ = f;
file_to_compact_level_ = stats.seek_file_level; file_to_compact_level_ = stats.seek_file_level;
return true; return true;
@ -454,17 +454,17 @@ void Version::GetOverlappingInputs(
int* file_index) { int* file_index) {
inputs->clear(); inputs->clear();
Slice user_begin, user_end; Slice user_begin, user_end;
if (begin != NULL) { if (begin != nullptr) {
user_begin = begin->user_key(); user_begin = begin->user_key();
} }
if (end != NULL) { if (end != nullptr) {
user_end = end->user_key(); user_end = end->user_key();
} }
if (file_index) { if (file_index) {
*file_index = -1; *file_index = -1;
} }
const Comparator* user_cmp = vset_->icmp_.user_comparator(); const Comparator* user_cmp = vset_->icmp_.user_comparator();
if (begin != NULL && end != NULL && level > 0) { if (begin != nullptr && end != nullptr && level > 0) {
GetOverlappingInputsBinarySearch(level, user_begin, user_end, inputs, GetOverlappingInputsBinarySearch(level, user_begin, user_end, inputs,
hint_index, file_index); hint_index, file_index);
return; return;
@ -473,20 +473,21 @@ void Version::GetOverlappingInputs(
FileMetaData* f = files_[level][i++]; FileMetaData* f = files_[level][i++];
const Slice file_start = f->smallest.user_key(); const Slice file_start = f->smallest.user_key();
const Slice file_limit = f->largest.user_key(); const Slice file_limit = f->largest.user_key();
if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) { if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
// "f" is completely before specified range; skip it // "f" is completely before specified range; skip it
} else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) { } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
// "f" is completely after specified range; skip it // "f" is completely after specified range; skip it
} else { } else {
inputs->push_back(f); inputs->push_back(f);
if (level == 0) { if (level == 0) {
// Level-0 files may overlap each other. So check if the newly // Level-0 files may overlap each other. So check if the newly
// added file has expanded the range. If so, restart search. // added file has expanded the range. If so, restart search.
if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) { if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
user_begin = file_start; user_begin = file_start;
inputs->clear(); inputs->clear();
i = 0; i = 0;
} else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) { } else if (end != nullptr
&& user_cmp->Compare(file_limit, user_end) > 0) {
user_end = file_limit; user_end = file_limit;
inputs->clear(); inputs->clear();
i = 0; i = 0;
@ -897,7 +898,7 @@ VersionSet::VersionSet(const std::string& dbname,
prev_log_number_(0), prev_log_number_(0),
num_levels_(options_->num_levels), num_levels_(options_->num_levels),
dummy_versions_(this), dummy_versions_(this),
current_(NULL), current_(nullptr),
compactions_in_progress_(options_->num_levels), compactions_in_progress_(options_->num_levels),
current_version_number_(0), current_version_number_(0),
last_observed_manifest_size_(0) { last_observed_manifest_size_(0) {
@ -934,7 +935,7 @@ void VersionSet::AppendVersion(Version* v) {
// Make "v" current // Make "v" current
assert(v->refs_ == 0); assert(v->refs_ == 0);
assert(v != current_); assert(v != current_);
if (current_ != NULL) { if (current_ != nullptr) {
assert(current_->refs_ > 0); assert(current_->refs_ > 0);
current_->Unref(); current_->Unref();
} }
@ -1562,7 +1563,7 @@ bool VersionSet::ManifestContains(const std::string& record) const {
Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str()); Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str());
return false; return false;
} }
log::Reader reader(std::move(file), NULL, true/*checksum*/, 0); log::Reader reader(std::move(file), nullptr, true/*checksum*/, 0);
Slice r; Slice r;
std::string scratch; std::string scratch;
bool result = false; bool result = false;
@ -1599,7 +1600,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
Table* tableptr; Table* tableptr;
Iterator* iter = table_cache_->NewIterator( Iterator* iter = table_cache_->NewIterator(
ReadOptions(), files[i]->number, files[i]->file_size, &tableptr); ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
if (tableptr != NULL) { if (tableptr != nullptr) {
result += tableptr->ApproximateOffsetOf(ikey.Encode()); result += tableptr->ApproximateOffsetOf(ikey.Encode());
} }
delete iter; delete iter;
@ -1827,14 +1828,14 @@ uint64_t VersionSet::SizeBeingCompacted(int level) {
} }
Compaction* VersionSet::PickCompactionBySize(int level, double score) { Compaction* VersionSet::PickCompactionBySize(int level, double score) {
Compaction* c = NULL; Compaction* c = nullptr;
// level 0 files are overlapping. So we cannot pick more // level 0 files are overlapping. So we cannot pick more
// than one concurrent compactions at this level. This // than one concurrent compactions at this level. This
// could be made better by looking at key-ranges that are // could be made better by looking at key-ranges that are
// being compacted at level 0. // being compacted at level 0.
if (level == 0 && compactions_in_progress_[level].size() == 1) { if (level == 0 && compactions_in_progress_[level].size() == 1) {
return NULL; return nullptr;
} }
assert(level >= 0); assert(level >= 0);
@ -1890,7 +1891,7 @@ Compaction* VersionSet::PickCompactionBySize(int level, double score) {
if (c->inputs_[0].empty()) { if (c->inputs_[0].empty()) {
delete c; delete c;
c = NULL; c = nullptr;
} }
// store where to start the iteration in the next call to PickCompaction // store where to start the iteration in the next call to PickCompaction
@ -1900,7 +1901,7 @@ Compaction* VersionSet::PickCompactionBySize(int level, double score) {
} }
Compaction* VersionSet::PickCompaction() { Compaction* VersionSet::PickCompaction() {
Compaction* c = NULL; Compaction* c = nullptr;
int level = -1; int level = -1;
// compute the compactions needed. It is better to do it here // compute the compactions needed. It is better to do it here
@ -1917,14 +1918,14 @@ Compaction* VersionSet::PickCompaction() {
level = current_->compaction_level_[i]; level = current_->compaction_level_[i];
if ((current_->compaction_score_[i] >= 1)) { if ((current_->compaction_score_[i] >= 1)) {
c = PickCompactionBySize(level, current_->compaction_score_[i]); c = PickCompactionBySize(level, current_->compaction_score_[i]);
if (c != NULL) { if (c != nullptr) {
break; break;
} }
} }
} }
// Find compactions needed by seeks // Find compactions needed by seeks
if (c == NULL && (current_->file_to_compact_ != NULL)) { if (c == nullptr && (current_->file_to_compact_ != nullptr)) {
level = current_->file_to_compact_level_; level = current_->file_to_compact_level_;
// Only allow one level 0 compaction at a time. // Only allow one level 0 compaction at a time.
@ -1935,8 +1936,8 @@ Compaction* VersionSet::PickCompaction() {
} }
} }
if (c == NULL) { if (c == nullptr) {
return NULL; return nullptr;
} }
c->input_version_ = current_; c->input_version_ = current_;
@ -1957,7 +1958,7 @@ Compaction* VersionSet::PickCompaction() {
if (ParentRangeInCompaction(&smallest, &largest, if (ParentRangeInCompaction(&smallest, &largest,
level, &c->parent_index_)) { level, &c->parent_index_)) {
delete c; delete c;
return NULL; return nullptr;
} }
assert(!c->inputs_[0].empty()); assert(!c->inputs_[0].empty());
} }
@ -2010,7 +2011,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
if (!c->inputs_[1].empty()) { if (!c->inputs_[1].empty()) {
std::vector<FileMetaData*> expanded0; std::vector<FileMetaData*> expanded0;
current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0, current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0,
c->base_index_, NULL); c->base_index_, nullptr);
const int64_t inputs0_size = TotalFileSize(c->inputs_[0]); const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]); const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
const int64_t expanded0_size = TotalFileSize(expanded0); const int64_t expanded0_size = TotalFileSize(expanded0);
@ -2073,7 +2074,7 @@ Compaction* VersionSet::CompactRange(
std::vector<FileMetaData*> inputs; std::vector<FileMetaData*> inputs;
current_->GetOverlappingInputs(level, begin, end, &inputs); current_->GetOverlappingInputs(level, begin, end, &inputs);
if (inputs.empty()) { if (inputs.empty()) {
return NULL; return nullptr;
} }
// Avoid compacting too much in one shot in case the range is large. // Avoid compacting too much in one shot in case the range is large.
@ -2109,7 +2110,7 @@ Compaction::Compaction(int level, uint64_t target_file_size,
: level_(level), : level_(level),
max_output_file_size_(target_file_size), max_output_file_size_(target_file_size),
maxGrandParentOverlapBytes_(max_grandparent_overlap_bytes), maxGrandParentOverlapBytes_(max_grandparent_overlap_bytes),
input_version_(NULL), input_version_(nullptr),
number_levels_(number_levels), number_levels_(number_levels),
seek_compaction_(seek_compaction), seek_compaction_(seek_compaction),
grandparent_index_(0), grandparent_index_(0),
@ -2128,7 +2129,7 @@ Compaction::Compaction(int level, uint64_t target_file_size,
Compaction::~Compaction() { Compaction::~Compaction() {
delete[] level_ptrs_; delete[] level_ptrs_;
delete edit_; delete edit_;
if (input_version_ != NULL) { if (input_version_ != nullptr) {
input_version_->Unref(); input_version_->Unref();
} }
} }
@ -2210,9 +2211,9 @@ void Compaction::MarkFilesBeingCompacted(bool value) {
} }
void Compaction::ReleaseInputs() { void Compaction::ReleaseInputs() {
if (input_version_ != NULL) { if (input_version_ != nullptr) {
input_version_->Unref(); input_version_->Unref();
input_version_ = NULL; input_version_ = nullptr;
} }
} }

@ -47,8 +47,8 @@ extern int FindFile(const InternalKeyComparator& icmp,
// Returns true iff some file in "files" overlaps the user key range // Returns true iff some file in "files" overlaps the user key range
// [*smallest,*largest]. // [*smallest,*largest].
// smallest==NULL represents a key smaller than all keys in the DB. // smallest==nullptr represents a key smaller than all keys in the DB.
// largest==NULL represents a key largest than all keys in the DB. // largest==nullptr represents a key largest than all keys in the DB.
// REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges // REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges
// in sorted order. // in sorted order.
extern bool SomeFileOverlapsRange( extern bool SomeFileOverlapsRange(
@ -87,24 +87,24 @@ class Version {
void GetOverlappingInputs( void GetOverlappingInputs(
int level, int level,
const InternalKey* begin, // NULL means before all keys const InternalKey* begin, // nullptr means before all keys
const InternalKey* end, // NULL means after all keys const InternalKey* end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs, std::vector<FileMetaData*>* inputs,
int hint_index = -1, // index of overlap file int hint_index = -1, // index of overlap file
int* file_index = NULL); // return index of overlap file int* file_index = nullptr); // return index of overlap file
void GetOverlappingInputsBinarySearch( void GetOverlappingInputsBinarySearch(
int level, int level,
const Slice& begin, // NULL means before all keys const Slice& begin, // nullptr means before all keys
const Slice& end, // NULL means after all keys const Slice& end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs, std::vector<FileMetaData*>* inputs,
int hint_index, // index of overlap file int hint_index, // index of overlap file
int* file_index); // return index of overlap file int* file_index); // return index of overlap file
void ExtendOverlappingInputs( void ExtendOverlappingInputs(
int level, int level,
const Slice& begin, // NULL means before all keys const Slice& begin, // nullptr means before all keys
const Slice& end, // NULL means after all keys const Slice& end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs, std::vector<FileMetaData*>* inputs,
int index); // start extending from this index int index); // start extending from this index
@ -272,13 +272,13 @@ class VersionSet {
int NumberLevels() const { return num_levels_; } int NumberLevels() const { return num_levels_; }
// Pick level and inputs for a new compaction. // Pick level and inputs for a new compaction.
// Returns NULL if there is no compaction to be done. // Returns nullptr if there is no compaction to be done.
// Otherwise returns a pointer to a heap-allocated object that // Otherwise returns a pointer to a heap-allocated object that
// describes the compaction. Caller should delete the result. // describes the compaction. Caller should delete the result.
Compaction* PickCompaction(); Compaction* PickCompaction();
// Return a compaction object for compacting the range [begin,end] in // Return a compaction object for compacting the range [begin,end] in
// the specified level. Returns NULL if there is nothing in that // the specified level. Returns nullptr if there is nothing in that
// level that overlaps the specified range. Caller should delete // level that overlaps the specified range. Caller should delete
// the result. // the result.
Compaction* CompactRange( Compaction* CompactRange(
@ -306,7 +306,7 @@ class VersionSet {
} }
// Returns true iff some level needs a compaction. // Returns true iff some level needs a compaction.
bool NeedsCompaction() const { bool NeedsCompaction() const {
return ((current_->file_to_compact_ != NULL) || return ((current_->file_to_compact_ != nullptr) ||
NeedsSizeCompaction()); NeedsSizeCompaction());
} }
@ -345,9 +345,9 @@ class VersionSet {
const uint64_t ManifestFileSize() { return current_->offset_manifest_file_; } const uint64_t ManifestFileSize() { return current_->offset_manifest_file_; }
// For the specfied level, pick a compaction. // For the specfied level, pick a compaction.
// Returns NULL if there is no compaction to be done. // Returns nullptr if there is no compaction to be done.
// If level is 0 and there is already a compaction on that level, this // If level is 0 and there is already a compaction on that level, this
// function will return NULL. // function will return nullptr.
Compaction* PickCompactionBySize(int level, double score); Compaction* PickCompactionBySize(int level, double score);
// Free up the files that were participated in a compaction // Free up the files that were participated in a compaction

@ -40,20 +40,20 @@ class FindFileTest {
bool Overlaps(const char* smallest, const char* largest) { bool Overlaps(const char* smallest, const char* largest) {
InternalKeyComparator cmp(BytewiseComparator()); InternalKeyComparator cmp(BytewiseComparator());
Slice s(smallest != NULL ? smallest : ""); Slice s(smallest != nullptr ? smallest : "");
Slice l(largest != NULL ? largest : ""); Slice l(largest != nullptr ? largest : "");
return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_, return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_,
(smallest != NULL ? &s : NULL), (smallest != nullptr ? &s : nullptr),
(largest != NULL ? &l : NULL)); (largest != nullptr ? &l : nullptr));
} }
}; };
TEST(FindFileTest, Empty) { TEST(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo")); ASSERT_EQ(0, Find("foo"));
ASSERT_TRUE(! Overlaps("a", "z")); ASSERT_TRUE(! Overlaps("a", "z"));
ASSERT_TRUE(! Overlaps(NULL, "z")); ASSERT_TRUE(! Overlaps(nullptr, "z"));
ASSERT_TRUE(! Overlaps("a", NULL)); ASSERT_TRUE(! Overlaps("a", nullptr));
ASSERT_TRUE(! Overlaps(NULL, NULL)); ASSERT_TRUE(! Overlaps(nullptr, nullptr));
} }
TEST(FindFileTest, Single) { TEST(FindFileTest, Single) {
@ -78,12 +78,12 @@ TEST(FindFileTest, Single) {
ASSERT_TRUE(Overlaps("q", "q")); ASSERT_TRUE(Overlaps("q", "q"));
ASSERT_TRUE(Overlaps("q", "q1")); ASSERT_TRUE(Overlaps("q", "q1"));
ASSERT_TRUE(! Overlaps(NULL, "j")); ASSERT_TRUE(! Overlaps(nullptr, "j"));
ASSERT_TRUE(! Overlaps("r", NULL)); ASSERT_TRUE(! Overlaps("r", nullptr));
ASSERT_TRUE(Overlaps(NULL, "p")); ASSERT_TRUE(Overlaps(nullptr, "p"));
ASSERT_TRUE(Overlaps(NULL, "p1")); ASSERT_TRUE(Overlaps(nullptr, "p1"));
ASSERT_TRUE(Overlaps("q", NULL)); ASSERT_TRUE(Overlaps("q", nullptr));
ASSERT_TRUE(Overlaps(NULL, NULL)); ASSERT_TRUE(Overlaps(nullptr, nullptr));
} }
@ -130,19 +130,19 @@ TEST(FindFileTest, MultipleNullBoundaries) {
Add("200", "250"); Add("200", "250");
Add("300", "350"); Add("300", "350");
Add("400", "450"); Add("400", "450");
ASSERT_TRUE(! Overlaps(NULL, "149")); ASSERT_TRUE(! Overlaps(nullptr, "149"));
ASSERT_TRUE(! Overlaps("451", NULL)); ASSERT_TRUE(! Overlaps("451", nullptr));
ASSERT_TRUE(Overlaps(NULL, NULL)); ASSERT_TRUE(Overlaps(nullptr, nullptr));
ASSERT_TRUE(Overlaps(NULL, "150")); ASSERT_TRUE(Overlaps(nullptr, "150"));
ASSERT_TRUE(Overlaps(NULL, "199")); ASSERT_TRUE(Overlaps(nullptr, "199"));
ASSERT_TRUE(Overlaps(NULL, "200")); ASSERT_TRUE(Overlaps(nullptr, "200"));
ASSERT_TRUE(Overlaps(NULL, "201")); ASSERT_TRUE(Overlaps(nullptr, "201"));
ASSERT_TRUE(Overlaps(NULL, "400")); ASSERT_TRUE(Overlaps(nullptr, "400"));
ASSERT_TRUE(Overlaps(NULL, "800")); ASSERT_TRUE(Overlaps(nullptr, "800"));
ASSERT_TRUE(Overlaps("100", NULL)); ASSERT_TRUE(Overlaps("100", nullptr));
ASSERT_TRUE(Overlaps("200", NULL)); ASSERT_TRUE(Overlaps("200", nullptr));
ASSERT_TRUE(Overlaps("449", NULL)); ASSERT_TRUE(Overlaps("449", nullptr));
ASSERT_TRUE(Overlaps("450", NULL)); ASSERT_TRUE(Overlaps("450", nullptr));
} }
TEST(FindFileTest, OverlapSequenceChecks) { TEST(FindFileTest, OverlapSequenceChecks) {

@ -56,7 +56,7 @@ class Cache {
virtual Handle* Insert(const Slice& key, void* value, size_t charge, virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) = 0; void (*deleter)(const Slice& key, void* value)) = 0;
// If the cache has no mapping for "key", returns NULL. // If the cache has no mapping for "key", returns nullptr.
// //
// Else return a handle that corresponds to the mapping. The caller // Else return a handle that corresponds to the mapping. The caller
// must call this->Release(handle) when the returned mapping is no // must call this->Release(handle) when the returned mapping is no

@ -53,7 +53,7 @@ class DB {
// Open the database with the specified "name". // Open the database with the specified "name".
// Stores a pointer to a heap-allocated database in *dbptr and returns // Stores a pointer to a heap-allocated database in *dbptr and returns
// OK on success. // OK on success.
// Stores NULL in *dbptr and returns a non-OK status on error. // Stores nullptr in *dbptr and returns a non-OK status on error.
// Caller should delete *dbptr when it is no longer needed. // Caller should delete *dbptr when it is no longer needed.
static Status Open(const Options& options, static Status Open(const Options& options,
const std::string& name, const std::string& name,
@ -149,10 +149,10 @@ class DB {
// needed to access the data. This operation should typically only // needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation. // be invoked by users who understand the underlying implementation.
// //
// begin==NULL is treated as a key before all keys in the database. // begin==nullptr is treated as a key before all keys in the database.
// end==NULL is treated as a key after all keys in the database. // end==nullptr is treated as a key after all keys in the database.
// Therefore the following call will compact the entire database: // Therefore the following call will compact the entire database:
// db->CompactRange(NULL, NULL); // db->CompactRange(nullptr, nullptr);
virtual void CompactRange(const Slice* begin, const Slice* end) = 0; virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
// Number of levels used for this DB. // Number of levels used for this DB.

@ -46,7 +46,7 @@ class Env {
// Create a brand new sequentially-readable file with the specified name. // Create a brand new sequentially-readable file with the specified name.
// On success, stores a pointer to the new file in *result and returns OK. // On success, stores a pointer to the new file in *result and returns OK.
// On failure stores NULL in *result and returns non-OK. If the file does // On failure stores nullptr in *result and returns non-OK. If the file does
// not exist, returns a non-OK status. // not exist, returns a non-OK status.
// //
// The returned file will only be accessed by one thread at a time. // The returned file will only be accessed by one thread at a time.
@ -55,7 +55,7 @@ class Env {
// Create a brand new random access read-only file with the // Create a brand new random access read-only file with the
// specified name. On success, stores a pointer to the new file in // specified name. On success, stores a pointer to the new file in
// *result and returns OK. On failure stores NULL in *result and // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK. If the file does not exist, returns a non-OK // returns non-OK. If the file does not exist, returns a non-OK
// status. // status.
// //
@ -66,7 +66,7 @@ class Env {
// Create an object that writes to a new file with the specified // Create an object that writes to a new file with the specified
// name. Deletes any existing file with the same name and creates a // name. Deletes any existing file with the same name and creates a
// new file. On success, stores a pointer to the new file in // new file. On success, stores a pointer to the new file in
// *result and returns OK. On failure stores NULL in *result and // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK. // returns non-OK.
// //
// The returned file will only be accessed by one thread at a time. // The returned file will only be accessed by one thread at a time.
@ -106,7 +106,7 @@ class Env {
const std::string& target) = 0; const std::string& target) = 0;
// Lock the specified file. Used to prevent concurrent access to // Lock the specified file. Used to prevent concurrent access to
// the same db by multiple processes. On failure, stores NULL in // the same db by multiple processes. On failure, stores nullptr in
// *lock and returns non-OK. // *lock and returns non-OK.
// //
// On success, stores a pointer to the object that represents the // On success, stores a pointer to the object that represents the
@ -363,7 +363,7 @@ class FileLock {
void operator=(const FileLock&); void operator=(const FileLock&);
}; };
// Log the specified data to *info_log if info_log is non-NULL. // Log the specified data to *info_log if info_log is non-nullptr.
extern void Log(const shared_ptr<Logger>& info_log, const char* format, ...) extern void Log(const shared_ptr<Logger>& info_log, const char* format, ...)
# if defined(__GNUC__) || defined(__clang__) # if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__ (__printf__, 2, 3))) __attribute__((__format__ (__printf__, 2, 3)))

@ -85,9 +85,9 @@ struct Options {
Env* env; Env* env;
// Any internal progress/error information generated by the db will // Any internal progress/error information generated by the db will
// be written to info_log if it is non-NULL, or to a file stored // be written to info_log if it is non-nullptr, or to a file stored
// in the same directory as the DB contents if info_log is NULL. // in the same directory as the DB contents if info_log is nullptr.
// Default: NULL // Default: nullptr
shared_ptr<Logger> info_log; shared_ptr<Logger> info_log;
// ------------------- // -------------------
@ -122,9 +122,9 @@ struct Options {
// Control over blocks (user data is stored in a set of blocks, and // Control over blocks (user data is stored in a set of blocks, and
// a block is the unit of reading from disk). // a block is the unit of reading from disk).
// If non-NULL, use the specified cache for blocks. // If non-NULL use the specified cache for blocks.
// If NULL, leveldb will automatically create and use an 8MB internal cache. // If NULL, leveldb will automatically create and use an 8MB internal cache.
// Default: NULL // Default: nullptr
shared_ptr<Cache> block_cache; shared_ptr<Cache> block_cache;
// Approximate size of user data packed per block. Note that the // Approximate size of user data packed per block. Note that the
@ -163,8 +163,8 @@ struct Options {
// are cases where most lower levels would like to quick compression // are cases where most lower levels would like to quick compression
// algorithm while the higher levels (which have more data) use // algorithm while the higher levels (which have more data) use
// compression algorithms that have better compression but could // compression algorithms that have better compression but could
// be slower. This array, if non NULL, should have an entry for // be slower. This array, if non nullptr, should have an entry for
// each level of the database. This array, if non NULL, overides the // each level of the database. This array, if non nullptr, overides the
// value specified in the previous field 'compression'. The caller is // value specified in the previous field 'compression'. The caller is
// reponsible for allocating memory and initializing the values in it // reponsible for allocating memory and initializing the values in it
// before invoking Open(). The caller is responsible for freeing this // before invoking Open(). The caller is responsible for freeing this
@ -176,11 +176,11 @@ struct Options {
//different options for compression algorithms //different options for compression algorithms
CompressionOptions compression_opts; CompressionOptions compression_opts;
// If non-NULL, use the specified filter policy to reduce disk reads. // If non-nullptr, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of // Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here. // NewBloomFilterPolicy() here.
// //
// Default: NULL // Default: nullptr
const FilterPolicy* filter_policy; const FilterPolicy* filter_policy;
// Number of levels for this database // Number of levels for this database
@ -331,7 +331,7 @@ struct Options {
// Disable block cache. If this is set to false, // Disable block cache. If this is set to false,
// then no block cache should be used, and the block_cache should // then no block cache should be used, and the block_cache should
// point to a NULL object. // point to a nullptr object.
bool no_block_cache; bool no_block_cache;
// Number of shards used for table cache. // Number of shards used for table cache.
@ -394,21 +394,21 @@ struct ReadOptions {
// Default: true // Default: true
bool fill_cache; bool fill_cache;
// If "snapshot" is non-NULL, read as of the supplied snapshot // If "snapshot" is non-nullptr, read as of the supplied snapshot
// (which must belong to the DB that is being read and which must // (which must belong to the DB that is being read and which must
// not have been released). If "snapshot" is NULL, use an impliicit // not have been released). If "snapshot" is nullptr, use an impliicit
// snapshot of the state at the beginning of this read operation. // snapshot of the state at the beginning of this read operation.
// Default: NULL // Default: nullptr
const Snapshot* snapshot; const Snapshot* snapshot;
ReadOptions() ReadOptions()
: verify_checksums(false), : verify_checksums(false),
fill_cache(true), fill_cache(true),
snapshot(NULL) { snapshot(nullptr) {
} }
ReadOptions(bool cksum, bool cache) : ReadOptions(bool cksum, bool cache) :
verify_checksums(cksum), fill_cache(cache), verify_checksums(cksum), fill_cache(cache),
snapshot(NULL) { snapshot(nullptr) {
} }
}; };

@ -21,7 +21,7 @@ namespace leveldb {
class Status { class Status {
public: public:
// Create a success status. // Create a success status.
Status() : state_(NULL) { } Status() : state_(nullptr) { }
~Status() { delete[] state_; } ~Status() { delete[] state_; }
// Copy the specified status. // Copy the specified status.
@ -49,7 +49,7 @@ class Status {
} }
// Returns true iff the status indicates success. // Returns true iff the status indicates success.
bool ok() const { return (state_ == NULL); } bool ok() const { return (state_ == nullptr); }
// Returns true iff the status indicates a NotFound error. // Returns true iff the status indicates a NotFound error.
bool IsNotFound() const { return code() == kNotFound; } bool IsNotFound() const { return code() == kNotFound; }
@ -71,7 +71,7 @@ class Status {
std::string ToString() const; std::string ToString() const;
private: private:
// OK status has a NULL state_. Otherwise, state_ is a new[] array // OK status has a nullptr state_. Otherwise, state_ is a new[] array
// of the following form: // of the following form:
// state_[0..3] == length of message // state_[0..3] == length of message
// state_[4] == code // state_[4] == code
@ -88,7 +88,7 @@ class Status {
}; };
Code code() const { Code code() const {
return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]); return (state_ == nullptr) ? kOk : static_cast<Code>(state_[4]);
} }
Status(Code code, const Slice& msg, const Slice& msg2); Status(Code code, const Slice& msg, const Slice& msg2);
@ -96,14 +96,14 @@ class Status {
}; };
inline Status::Status(const Status& s) { inline Status::Status(const Status& s) {
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
} }
inline void Status::operator=(const Status& s) { inline void Status::operator=(const Status& s) {
// The following condition catches both aliasing (when this == &s), // The following condition catches both aliasing (when this == &s),
// and the common case where both s and *this are ok. // and the common case where both s and *this are ok.
if (state_ != s.state_) { if (state_ != s.state_) {
delete[] state_; delete[] state_;
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
} }
} }

@ -33,7 +33,7 @@ class Table {
// If successful, returns ok and sets "*table" to the newly opened // If successful, returns ok and sets "*table" to the newly opened
// table. The client should delete "*table" when no longer needed. // table. The client should delete "*table" when no longer needed.
// If there was an error while initializing the table, sets "*table" // If there was an error while initializing the table, sets "*table"
// to NULL and returns a non-ok status. Does not take ownership of // to nullptr and returns a non-ok status. Does not take ownership of
// "*source", but the client must ensure that "source" remains live // "*source", but the client must ensure that "source" remains live
// for the duration of the returned table's lifetime. // for the duration of the returned table's lifetime.
// //

@ -47,13 +47,13 @@ Block::~Block() {
// and the length of the value in "*shared", "*non_shared", and // and the length of the value in "*shared", "*non_shared", and
// "*value_length", respectively. Will not derefence past "limit". // "*value_length", respectively. Will not derefence past "limit".
// //
// If any errors are detected, returns NULL. Otherwise, returns a // If any errors are detected, returns nullptr. Otherwise, returns a
// pointer to the key delta (just past the three decoded values). // pointer to the key delta (just past the three decoded values).
static inline const char* DecodeEntry(const char* p, const char* limit, static inline const char* DecodeEntry(const char* p, const char* limit,
uint32_t* shared, uint32_t* shared,
uint32_t* non_shared, uint32_t* non_shared,
uint32_t* value_length) { uint32_t* value_length) {
if (limit - p < 3) return NULL; if (limit - p < 3) return nullptr;
*shared = reinterpret_cast<const unsigned char*>(p)[0]; *shared = reinterpret_cast<const unsigned char*>(p)[0];
*non_shared = reinterpret_cast<const unsigned char*>(p)[1]; *non_shared = reinterpret_cast<const unsigned char*>(p)[1];
*value_length = reinterpret_cast<const unsigned char*>(p)[2]; *value_length = reinterpret_cast<const unsigned char*>(p)[2];
@ -61,13 +61,13 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
// Fast path: all three values are encoded in one byte each // Fast path: all three values are encoded in one byte each
p += 3; p += 3;
} else { } else {
if ((p = GetVarint32Ptr(p, limit, shared)) == NULL) return NULL; if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
if ((p = GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL; if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
if ((p = GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL; if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
} }
if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) { if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
return NULL; return nullptr;
} }
return p; return p;
} }
@ -173,7 +173,7 @@ class Block::Iter : public Iterator {
const char* key_ptr = DecodeEntry(data_ + region_offset, const char* key_ptr = DecodeEntry(data_ + region_offset,
data_ + restarts_, data_ + restarts_,
&shared, &non_shared, &value_length); &shared, &non_shared, &value_length);
if (key_ptr == NULL || (shared != 0)) { if (key_ptr == nullptr || (shared != 0)) {
CorruptionError(); CorruptionError();
return; return;
} }
@ -236,7 +236,7 @@ class Block::Iter : public Iterator {
// Decode next entry // Decode next entry
uint32_t shared, non_shared, value_length; uint32_t shared, non_shared, value_length;
p = DecodeEntry(p, limit, &shared, &non_shared, &value_length); p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
if (p == NULL || key_.size() < shared) { if (p == nullptr || key_.size() < shared) {
CorruptionError(); CorruptionError();
return false; return false;
} else { } else {

@ -78,8 +78,8 @@ void FilterBlockBuilder::GenerateFilter() {
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy, FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
const Slice& contents) const Slice& contents)
: policy_(policy), : policy_(policy),
data_(NULL), data_(nullptr),
offset_(NULL), offset_(nullptr),
num_(0), num_(0),
base_lg_(0) { base_lg_(0) {
size_t n = contents.size(); size_t n = contents.size();

@ -42,7 +42,7 @@ void Footer::EncodeTo(std::string* dst) const {
} }
Status Footer::DecodeFrom(Slice* input) { Status Footer::DecodeFrom(Slice* input) {
assert(input != NULL); assert(input != nullptr);
assert(input->size() >= kEncodedLength); assert(input->size() >= kEncodedLength);
const char* magic_ptr = input->data() + kEncodedLength - 8; const char* magic_ptr = input->data() + kEncodedLength - 8;
@ -101,7 +101,7 @@ Status ReadBlock(RandomAccessFile* file,
} }
} }
char* ubuf = NULL; char* ubuf = nullptr;
int decompress_size = 0; int decompress_size = 0;
switch (data[n]) { switch (data[n]) {
case kNoCompression: case kNoCompression:

@ -7,14 +7,14 @@
namespace leveldb { namespace leveldb {
Iterator::Iterator() { Iterator::Iterator() {
cleanup_.function = NULL; cleanup_.function = nullptr;
cleanup_.next = NULL; cleanup_.next = nullptr;
} }
Iterator::~Iterator() { Iterator::~Iterator() {
if (cleanup_.function != NULL) { if (cleanup_.function != nullptr) {
(*cleanup_.function)(cleanup_.arg1, cleanup_.arg2); (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
for (Cleanup* c = cleanup_.next; c != NULL; ) { for (Cleanup* c = cleanup_.next; c != nullptr; ) {
(*c->function)(c->arg1, c->arg2); (*c->function)(c->arg1, c->arg2);
Cleanup* next = c->next; Cleanup* next = c->next;
delete c; delete c;
@ -24,9 +24,9 @@ Iterator::~Iterator() {
} }
void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) { void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
assert(func != NULL); assert(func != nullptr);
Cleanup* c; Cleanup* c;
if (cleanup_.function == NULL) { if (cleanup_.function == nullptr) {
c = &cleanup_; c = &cleanup_;
} else { } else {
c = new Cleanup; c = new Cleanup;
@ -41,7 +41,7 @@ void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
namespace { namespace {
class EmptyIterator : public Iterator { class EmptyIterator : public Iterator {
public: public:
EmptyIterator(const Status& s) : status_(s) { } explicit EmptyIterator(const Status& s) : status_(s) { }
virtual bool Valid() const { return false; } virtual bool Valid() const { return false; }
virtual void Seek(const Slice& target) { } virtual void Seek(const Slice& target) { }
virtual void SeekToFirst() { } virtual void SeekToFirst() { }

@ -13,8 +13,8 @@ namespace leveldb {
// cache locality. // cache locality.
class IteratorWrapper { class IteratorWrapper {
public: public:
IteratorWrapper(): iter_(NULL), valid_(false) { } IteratorWrapper(): iter_(nullptr), valid_(false) { }
explicit IteratorWrapper(Iterator* iter): iter_(NULL) { explicit IteratorWrapper(Iterator* iter): iter_(nullptr) {
Set(iter); Set(iter);
} }
~IteratorWrapper() { delete iter_; } ~IteratorWrapper() { delete iter_; }
@ -25,7 +25,7 @@ class IteratorWrapper {
void Set(Iterator* iter) { void Set(Iterator* iter) {
delete iter_; delete iter_;
iter_ = iter; iter_ = iter;
if (iter_ == NULL) { if (iter_ == nullptr) {
valid_ = false; valid_ = false;
} else { } else {
Update(); Update();
@ -37,7 +37,7 @@ class IteratorWrapper {
bool Valid() const { return valid_; } bool Valid() const { return valid_; }
Slice key() const { assert(Valid()); return key_; } Slice key() const { assert(Valid()); return key_; }
Slice value() const { assert(Valid()); return iter_->value(); } Slice value() const { assert(Valid()); return iter_->value(); }
// Methods below require iter() != NULL // Methods below require iter() != nullptr
Status status() const { assert(iter_); return iter_->status(); } Status status() const { assert(iter_); return iter_->status(); }
void Next() { assert(iter_); iter_->Next(); Update(); } void Next() { assert(iter_); iter_->Next(); Update(); }
void Prev() { assert(iter_); iter_->Prev(); Update(); } void Prev() { assert(iter_); iter_->Prev(); Update(); }

@ -19,7 +19,7 @@ class MergingIterator : public Iterator {
: comparator_(comparator), : comparator_(comparator),
children_(new IteratorWrapper[n]), children_(new IteratorWrapper[n]),
n_(n), n_(n),
current_(NULL), current_(nullptr),
direction_(kForward), direction_(kForward),
maxHeap_(NewMaxIterHeap(comparator_)), maxHeap_(NewMaxIterHeap(comparator_)),
minHeap_ (NewMinIterHeap(comparator_)) { minHeap_ (NewMinIterHeap(comparator_)) {
@ -38,7 +38,7 @@ class MergingIterator : public Iterator {
} }
virtual bool Valid() const { virtual bool Valid() const {
return (current_ != NULL); return (current_ != nullptr);
} }
virtual void SeekToFirst() { virtual void SeekToFirst() {
@ -189,7 +189,7 @@ class MergingIterator : public Iterator {
void MergingIterator::FindSmallest() { void MergingIterator::FindSmallest() {
if (minHeap_.empty()) { if (minHeap_.empty()) {
current_ = NULL; current_ = nullptr;
} else { } else {
current_ = minHeap_.top(); current_ = minHeap_.top();
assert(current_->Valid()); assert(current_->Valid());
@ -199,7 +199,7 @@ void MergingIterator::FindSmallest() {
void MergingIterator::FindLargest() { void MergingIterator::FindLargest() {
if (maxHeap_.empty()) { if (maxHeap_.empty()) {
current_ = NULL; current_ = nullptr;
} else { } else {
current_ = maxHeap_.top(); current_ = maxHeap_.top();
assert(current_->Valid()); assert(current_->Valid());

@ -89,7 +89,7 @@ Status Table::Open(const Options& options,
// Read the index block // Read the index block
BlockContents contents; BlockContents contents;
Block* index_block = NULL; Block* index_block = nullptr;
if (s.ok()) { if (s.ok()) {
s = ReadBlock(file.get(), ReadOptions(), footer.index_handle(), &contents); s = ReadBlock(file.get(), ReadOptions(), footer.index_handle(), &contents);
if (s.ok()) { if (s.ok()) {
@ -106,8 +106,8 @@ Status Table::Open(const Options& options,
rep->metaindex_handle = footer.metaindex_handle(); rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block; rep->index_block = index_block;
SetupCacheKeyPrefix(rep); SetupCacheKeyPrefix(rep);
rep->filter_data = NULL; rep->filter_data = nullptr;
rep->filter = NULL; rep->filter = nullptr;
table->reset(new Table(rep)); table->reset(new Table(rep));
(*table)->ReadMeta(footer); (*table)->ReadMeta(footer);
} else { } else {
@ -118,7 +118,7 @@ Status Table::Open(const Options& options,
} }
void Table::ReadMeta(const Footer& footer) { void Table::ReadMeta(const Footer& footer) {
if (rep_->options.filter_policy == NULL) { if (rep_->options.filter_policy == nullptr) {
return; // Do not need any metadata return; // Do not need any metadata
} }
@ -192,8 +192,8 @@ Iterator* Table::BlockReader(void* arg,
Table* table = reinterpret_cast<Table*>(arg); Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache.get(); Cache* block_cache = table->rep_->options.block_cache.get();
Statistics* const statistics = table->rep_->options.statistics; Statistics* const statistics = table->rep_->options.statistics;
Block* block = NULL; Block* block = nullptr;
Cache::Handle* cache_handle = NULL; Cache::Handle* cache_handle = nullptr;
BlockHandle handle; BlockHandle handle;
Slice input = index_value; Slice input = index_value;
@ -203,7 +203,7 @@ Iterator* Table::BlockReader(void* arg,
if (s.ok()) { if (s.ok()) {
BlockContents contents; BlockContents contents;
if (block_cache != NULL) { if (block_cache != nullptr) {
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
const size_t cache_key_prefix_size = table->rep_->cache_key_prefix_size; const size_t cache_key_prefix_size = table->rep_->cache_key_prefix_size;
assert(cache_key_prefix_size != 0); assert(cache_key_prefix_size != 0);
@ -214,7 +214,7 @@ Iterator* Table::BlockReader(void* arg,
handle.offset()); handle.offset());
Slice key(cache_key, static_cast<size_t>(end-cache_key)); Slice key(cache_key, static_cast<size_t>(end-cache_key));
cache_handle = block_cache->Lookup(key); cache_handle = block_cache->Lookup(key);
if (cache_handle != NULL) { if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle)); block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
RecordTick(statistics, BLOCK_CACHE_HIT); RecordTick(statistics, BLOCK_CACHE_HIT);
@ -227,7 +227,7 @@ Iterator* Table::BlockReader(void* arg,
key, block, block->size(), &DeleteCachedBlock); key, block, block->size(), &DeleteCachedBlock);
} }
} }
if (didIO != NULL) { if (didIO != nullptr) {
*didIO = true; // we did some io from storage *didIO = true; // we did some io from storage
} }
@ -238,17 +238,17 @@ Iterator* Table::BlockReader(void* arg,
if (s.ok()) { if (s.ok()) {
block = new Block(contents); block = new Block(contents);
} }
if (didIO != NULL) { if (didIO != nullptr) {
*didIO = true; // we did some io from storage *didIO = true; // we did some io from storage
} }
} }
} }
Iterator* iter; Iterator* iter;
if (block != NULL) { if (block != nullptr) {
iter = block->NewIterator(table->rep_->options.comparator); iter = block->NewIterator(table->rep_->options.comparator);
if (cache_handle == NULL) { if (cache_handle == nullptr) {
iter->RegisterCleanup(&DeleteBlock, block, NULL); iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else { } else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle); iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
} }
@ -261,7 +261,7 @@ Iterator* Table::BlockReader(void* arg,
Iterator* Table::BlockReader(void* arg, Iterator* Table::BlockReader(void* arg,
const ReadOptions& options, const ReadOptions& options,
const Slice& index_value) { const Slice& index_value) {
return BlockReader(arg, options, index_value, NULL); return BlockReader(arg, options, index_value, nullptr);
} }
Iterator* Table::NewIterator(const ReadOptions& options) const { Iterator* Table::NewIterator(const ReadOptions& options) const {
@ -280,7 +280,7 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
Slice handle_value = iiter->value(); Slice handle_value = iiter->value();
FilterBlockReader* filter = rep_->filter; FilterBlockReader* filter = rep_->filter;
BlockHandle handle; BlockHandle handle;
if (filter != NULL && if (filter != nullptr &&
handle.DecodeFrom(&handle_value).ok() && handle.DecodeFrom(&handle_value).ok() &&
!filter->KeyMayMatch(handle.offset(), k)) { !filter->KeyMayMatch(handle.offset(), k)) {
// Not found // Not found

@ -53,7 +53,7 @@ struct TableBuilder::Rep {
index_block(&index_block_options), index_block(&index_block_options),
num_entries(0), num_entries(0),
closed(false), closed(false),
filter_block(opt.filter_policy == NULL ? NULL filter_block(opt.filter_policy == nullptr ? nullptr
: new FilterBlockBuilder(opt.filter_policy)), : new FilterBlockBuilder(opt.filter_policy)),
pending_index_entry(false) { pending_index_entry(false) {
index_block_options.block_restart_interval = 1; index_block_options.block_restart_interval = 1;
@ -63,7 +63,7 @@ struct TableBuilder::Rep {
TableBuilder::TableBuilder(const Options& options, WritableFile* file, TableBuilder::TableBuilder(const Options& options, WritableFile* file,
int level) int level)
: rep_(new Rep(options, file)), level_(level) { : rep_(new Rep(options, file)), level_(level) {
if (rep_->filter_block != NULL) { if (rep_->filter_block != nullptr) {
rep_->filter_block->StartBlock(0); rep_->filter_block->StartBlock(0);
} }
} }
@ -107,7 +107,7 @@ void TableBuilder::Add(const Slice& key, const Slice& value) {
r->pending_index_entry = false; r->pending_index_entry = false;
} }
if (r->filter_block != NULL) { if (r->filter_block != nullptr) {
r->filter_block->AddKey(key); r->filter_block->AddKey(key);
} }
@ -132,7 +132,7 @@ void TableBuilder::Flush() {
r->pending_index_entry = true; r->pending_index_entry = true;
r->status = r->file->Flush(); r->status = r->file->Flush();
} }
if (r->filter_block != NULL) { if (r->filter_block != nullptr) {
r->filter_block->StartBlock(r->offset); r->filter_block->StartBlock(r->offset);
} }
} }
@ -249,7 +249,7 @@ Status TableBuilder::Finish() {
BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle; BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle;
// Write filter block // Write filter block
if (ok() && r->filter_block != NULL) { if (ok() && r->filter_block != nullptr) {
WriteRawBlock(r->filter_block->Finish(), kNoCompression, WriteRawBlock(r->filter_block->Finish(), kNoCompression,
&filter_block_handle); &filter_block_handle);
} }
@ -257,7 +257,7 @@ Status TableBuilder::Finish() {
// Write metaindex block // Write metaindex block
if (ok()) { if (ok()) {
BlockBuilder meta_index_block(&r->options); BlockBuilder meta_index_block(&r->options);
if (r->filter_block != NULL) { if (r->filter_block != nullptr) {
// Add mapping from "filter.Name" to location of filter data // Add mapping from "filter.Name" to location of filter data
std::string key = "filter."; std::string key = "filter.";
key.append(r->options.filter_policy->Name()); key.append(r->options.filter_policy->Name());

@ -80,7 +80,7 @@ struct STLLessThan {
const Comparator* cmp; const Comparator* cmp;
STLLessThan() : cmp(BytewiseComparator()) { } STLLessThan() : cmp(BytewiseComparator()) { }
STLLessThan(const Comparator* c) : cmp(c) { } explicit STLLessThan(const Comparator* c) : cmp(c) { }
bool operator()(const std::string& a, const std::string& b) const { bool operator()(const std::string& a, const std::string& b) const {
return cmp->Compare(Slice(a), Slice(b)) < 0; return cmp->Compare(Slice(a), Slice(b)) < 0;
} }
@ -184,7 +184,7 @@ class Constructor {
virtual const KVMap& data() { return data_; } virtual const KVMap& data() { return data_; }
virtual DB* db() const { return NULL; } // Overridden in DBConstructor virtual DB* db() const { return nullptr; } // Overridden in DBConstructor
private: private:
KVMap data_; KVMap data_;
@ -195,13 +195,13 @@ class BlockConstructor: public Constructor {
explicit BlockConstructor(const Comparator* cmp) explicit BlockConstructor(const Comparator* cmp)
: Constructor(cmp), : Constructor(cmp),
comparator_(cmp), comparator_(cmp),
block_(NULL) { } block_(nullptr) { }
~BlockConstructor() { ~BlockConstructor() {
delete block_; delete block_;
} }
virtual Status FinishImpl(const Options& options, const KVMap& data) { virtual Status FinishImpl(const Options& options, const KVMap& data) {
delete block_; delete block_;
block_ = NULL; block_ = nullptr;
BlockBuilder builder(&options); BlockBuilder builder(&options);
for (KVMap::const_iterator it = data.begin(); for (KVMap::const_iterator it = data.begin();
@ -232,7 +232,7 @@ class BlockConstructor: public Constructor {
class TableConstructor: public Constructor { class TableConstructor: public Constructor {
public: public:
TableConstructor(const Comparator* cmp) explicit TableConstructor(const Comparator* cmp)
: Constructor(cmp) { : Constructor(cmp) {
} }
~TableConstructor() { ~TableConstructor() {
@ -377,7 +377,7 @@ class DBConstructor: public Constructor {
explicit DBConstructor(const Comparator* cmp) explicit DBConstructor(const Comparator* cmp)
: Constructor(cmp), : Constructor(cmp),
comparator_(cmp) { comparator_(cmp) {
db_ = NULL; db_ = nullptr;
NewDB(); NewDB();
} }
~DBConstructor() { ~DBConstructor() {
@ -385,7 +385,7 @@ class DBConstructor: public Constructor {
} }
virtual Status FinishImpl(const Options& options, const KVMap& data) { virtual Status FinishImpl(const Options& options, const KVMap& data) {
delete db_; delete db_;
db_ = NULL; db_ = nullptr;
NewDB(); NewDB();
for (KVMap::const_iterator it = data.begin(); for (KVMap::const_iterator it = data.begin();
it != data.end(); it != data.end();
@ -491,25 +491,25 @@ static std::vector<TestArgs> Generate_Arg_List()
for(int i =0; i < test_type_len; i++) for(int i =0; i < test_type_len; i++)
for (int j =0; j < reverse_compare_len; j++) for (int j =0; j < reverse_compare_len; j++)
for (int k =0; k < restart_interval_len; k++) for (int k =0; k < restart_interval_len; k++)
for (unsigned int n =0; n < compression_types.size(); n++) { for (unsigned int n =0; n < compression_types.size(); n++) {
TestArgs one_arg; TestArgs one_arg;
one_arg.type = test_type[i]; one_arg.type = test_type[i];
one_arg.reverse_compare = reverse_compare[j]; one_arg.reverse_compare = reverse_compare[j];
one_arg.restart_interval = restart_interval[k]; one_arg.restart_interval = restart_interval[k];
one_arg.compression = compression_types[n]; one_arg.compression = compression_types[n];
ret.push_back(one_arg); ret.push_back(one_arg);
} }
return ret; return ret;
} }
class Harness { class Harness {
public: public:
Harness() : constructor_(NULL) { } Harness() : constructor_(nullptr) { }
void Init(const TestArgs& args) { void Init(const TestArgs& args) {
delete constructor_; delete constructor_;
constructor_ = NULL; constructor_ = nullptr;
options_ = Options(); options_ = Options();
options_.block_restart_interval = args.restart_interval; options_.block_restart_interval = args.restart_interval;
@ -706,7 +706,7 @@ class Harness {
} }
} }
// Returns NULL if not running against a DB // Returns nullptr if not running against a DB
DB* db() const { return constructor_->db(); } DB* db() const { return constructor_->db(); }
private: private:

@ -46,7 +46,7 @@ class TwoLevelIterator: public Iterator {
// It'd be nice if status() returned a const Status& instead of a Status // It'd be nice if status() returned a const Status& instead of a Status
if (!index_iter_.status().ok()) { if (!index_iter_.status().ok()) {
return index_iter_.status(); return index_iter_.status();
} else if (data_iter_.iter() != NULL && !data_iter_.status().ok()) { } else if (data_iter_.iter() != nullptr && !data_iter_.status().ok()) {
return data_iter_.status(); return data_iter_.status();
} else { } else {
return status_; return status_;
@ -67,8 +67,8 @@ class TwoLevelIterator: public Iterator {
const ReadOptions options_; const ReadOptions options_;
Status status_; Status status_;
IteratorWrapper index_iter_; IteratorWrapper index_iter_;
IteratorWrapper data_iter_; // May be NULL IteratorWrapper data_iter_; // May be nullptr
// If data_iter_ is non-NULL, then "data_block_handle_" holds the // If data_iter_ is non-nullptr, then "data_block_handle_" holds the
// "index_value" passed to block_function_ to create the data_iter_. // "index_value" passed to block_function_ to create the data_iter_.
std::string data_block_handle_; std::string data_block_handle_;
}; };
@ -82,7 +82,7 @@ TwoLevelIterator::TwoLevelIterator(
arg_(arg), arg_(arg),
options_(options), options_(options),
index_iter_(index_iter), index_iter_(index_iter),
data_iter_(NULL) { data_iter_(nullptr) {
} }
TwoLevelIterator::~TwoLevelIterator() { TwoLevelIterator::~TwoLevelIterator() {
@ -91,21 +91,21 @@ TwoLevelIterator::~TwoLevelIterator() {
void TwoLevelIterator::Seek(const Slice& target) { void TwoLevelIterator::Seek(const Slice& target) {
index_iter_.Seek(target); index_iter_.Seek(target);
InitDataBlock(); InitDataBlock();
if (data_iter_.iter() != NULL) data_iter_.Seek(target); if (data_iter_.iter() != nullptr) data_iter_.Seek(target);
SkipEmptyDataBlocksForward(); SkipEmptyDataBlocksForward();
} }
void TwoLevelIterator::SeekToFirst() { void TwoLevelIterator::SeekToFirst() {
index_iter_.SeekToFirst(); index_iter_.SeekToFirst();
InitDataBlock(); InitDataBlock();
if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
SkipEmptyDataBlocksForward(); SkipEmptyDataBlocksForward();
} }
void TwoLevelIterator::SeekToLast() { void TwoLevelIterator::SeekToLast() {
index_iter_.SeekToLast(); index_iter_.SeekToLast();
InitDataBlock(); InitDataBlock();
if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
SkipEmptyDataBlocksBackward(); SkipEmptyDataBlocksBackward();
} }
@ -123,42 +123,43 @@ void TwoLevelIterator::Prev() {
void TwoLevelIterator::SkipEmptyDataBlocksForward() { void TwoLevelIterator::SkipEmptyDataBlocksForward() {
while (data_iter_.iter() == NULL || !data_iter_.Valid()) { while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block // Move to next block
if (!index_iter_.Valid()) { if (!index_iter_.Valid()) {
SetDataIterator(NULL); SetDataIterator(nullptr);
return; return;
} }
index_iter_.Next(); index_iter_.Next();
InitDataBlock(); InitDataBlock();
if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
} }
} }
void TwoLevelIterator::SkipEmptyDataBlocksBackward() { void TwoLevelIterator::SkipEmptyDataBlocksBackward() {
while (data_iter_.iter() == NULL || !data_iter_.Valid()) { while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block // Move to next block
if (!index_iter_.Valid()) { if (!index_iter_.Valid()) {
SetDataIterator(NULL); SetDataIterator(nullptr);
return; return;
} }
index_iter_.Prev(); index_iter_.Prev();
InitDataBlock(); InitDataBlock();
if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
} }
} }
void TwoLevelIterator::SetDataIterator(Iterator* data_iter) { void TwoLevelIterator::SetDataIterator(Iterator* data_iter) {
if (data_iter_.iter() != NULL) SaveError(data_iter_.status()); if (data_iter_.iter() != nullptr) SaveError(data_iter_.status());
data_iter_.Set(data_iter); data_iter_.Set(data_iter);
} }
void TwoLevelIterator::InitDataBlock() { void TwoLevelIterator::InitDataBlock() {
if (!index_iter_.Valid()) { if (!index_iter_.Valid()) {
SetDataIterator(NULL); SetDataIterator(nullptr);
} else { } else {
Slice handle = index_iter_.value(); Slice handle = index_iter_.value();
if (data_iter_.iter() != NULL && handle.compare(data_block_handle_) == 0) { if (data_iter_.iter() != nullptr
&& handle.compare(data_block_handle_) == 0) {
// data_iter_ is already constructed with this iterator, so // data_iter_ is already constructed with this iterator, so
// no need to change anything // no need to change anything
} else { } else {

@ -11,7 +11,7 @@ static const int kBlockSize = 4096;
Arena::Arena() { Arena::Arena() {
blocks_memory_ = 0; blocks_memory_ = 0;
alloc_ptr_ = NULL; // First allocation will allocate a block alloc_ptr_ = nullptr; // First allocation will allocate a block
alloc_bytes_remaining_ = 0; alloc_bytes_remaining_ = 0;
} }

@ -52,7 +52,7 @@ struct LRUHandle {
// 4.4.3's builtin hashtable. // 4.4.3's builtin hashtable.
class HandleTable { class HandleTable {
public: public:
HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); } HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; } ~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) { LRUHandle* Lookup(const Slice& key, uint32_t hash) {
@ -62,9 +62,9 @@ class HandleTable {
LRUHandle* Insert(LRUHandle* h) { LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash); LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr; LRUHandle* old = *ptr;
h->next_hash = (old == NULL ? NULL : old->next_hash); h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h; *ptr = h;
if (old == NULL) { if (old == nullptr) {
++elems_; ++elems_;
if (elems_ > length_) { if (elems_ > length_) {
// Since each cache entry is fairly large, we aim for a small // Since each cache entry is fairly large, we aim for a small
@ -78,7 +78,7 @@ class HandleTable {
LRUHandle* Remove(const Slice& key, uint32_t hash) { LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash); LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr; LRUHandle* result = *ptr;
if (result != NULL) { if (result != nullptr) {
*ptr = result->next_hash; *ptr = result->next_hash;
--elems_; --elems_;
} }
@ -97,7 +97,7 @@ class HandleTable {
// pointer to the trailing slot in the corresponding linked list. // pointer to the trailing slot in the corresponding linked list.
LRUHandle** FindPointer(const Slice& key, uint32_t hash) { LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)]; LRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != NULL && while (*ptr != nullptr &&
((*ptr)->hash != hash || key != (*ptr)->key())) { ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash; ptr = &(*ptr)->next_hash;
} }
@ -114,7 +114,7 @@ class HandleTable {
uint32_t count = 0; uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) { for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i]; LRUHandle* h = list_[i];
while (h != NULL) { while (h != nullptr) {
LRUHandle* next = h->next_hash; LRUHandle* next = h->next_hash;
uint32_t hash = h->hash; uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)]; LRUHandle** ptr = &new_list[hash & (new_length - 1)];
@ -211,7 +211,7 @@ void LRUCache::LRU_Append(LRUHandle* e) {
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) { Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_); MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash); LRUHandle* e = table_.Lookup(key, hash);
if (e != NULL) { if (e != nullptr) {
e->refs++; e->refs++;
LRU_Remove(e); LRU_Remove(e);
LRU_Append(e); LRU_Append(e);
@ -242,7 +242,7 @@ Cache::Handle* LRUCache::Insert(
usage_ += charge; usage_ += charge;
LRUHandle* old = table_.Insert(e); LRUHandle* old = table_.Insert(e);
if (old != NULL) { if (old != nullptr) {
LRU_Remove(old); LRU_Remove(old);
Unref(old); Unref(old);
} }
@ -260,7 +260,7 @@ Cache::Handle* LRUCache::Insert(
void LRUCache::Erase(const Slice& key, uint32_t hash) { void LRUCache::Erase(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_); MutexLock l(&mutex_);
LRUHandle* e = table_.Remove(key, hash); LRUHandle* e = table_.Remove(key, hash);
if (e != NULL) { if (e != nullptr) {
LRU_Remove(e); LRU_Remove(e);
Unref(e); Unref(e);
} }
@ -344,7 +344,7 @@ shared_ptr<Cache> NewLRUCache(size_t capacity) {
shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits) { shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits) {
if (numShardBits >= 20) { if (numShardBits >= 20) {
return NULL; // the cache cannot be sharded into too many fine pieces return nullptr; // the cache cannot be sharded into too many fine pieces
} }
return std::make_shared<ShardedLRUCache>(capacity, numShardBits); return std::make_shared<ShardedLRUCache>(capacity, numShardBits);
} }

@ -46,8 +46,8 @@ class CacheTest {
int Lookup(int key) { int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key)); Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle)); const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
if (handle != NULL) { if (handle != nullptr) {
cache_->Release(handle); cache_->Release(handle);
} }
return r; return r;

@ -127,14 +127,14 @@ const char* GetVarint32PtrFallback(const char* p,
return reinterpret_cast<const char*>(p); return reinterpret_cast<const char*>(p);
} }
} }
return NULL; return nullptr;
} }
bool GetVarint32(Slice* input, uint32_t* value) { bool GetVarint32(Slice* input, uint32_t* value) {
const char* p = input->data(); const char* p = input->data();
const char* limit = p + input->size(); const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value); const char* q = GetVarint32Ptr(p, limit, value);
if (q == NULL) { if (q == nullptr) {
return false; return false;
} else { } else {
*input = Slice(q, limit - q); *input = Slice(q, limit - q);
@ -156,14 +156,14 @@ const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
return reinterpret_cast<const char*>(p); return reinterpret_cast<const char*>(p);
} }
} }
return NULL; return nullptr;
} }
bool GetVarint64(Slice* input, uint64_t* value) { bool GetVarint64(Slice* input, uint64_t* value) {
const char* p = input->data(); const char* p = input->data();
const char* limit = p + input->size(); const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value); const char* q = GetVarint64Ptr(p, limit, value);
if (q == NULL) { if (q == nullptr) {
return false; return false;
} else { } else {
*input = Slice(q, limit - q); *input = Slice(q, limit - q);
@ -175,8 +175,8 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit,
Slice* result) { Slice* result) {
uint32_t len; uint32_t len;
p = GetVarint32Ptr(p, limit, &len); p = GetVarint32Ptr(p, limit, &len);
if (p == NULL) return NULL; if (p == nullptr) return nullptr;
if (p + len > limit) return NULL; if (p + len > limit) return nullptr;
*result = Slice(p, len); *result = Slice(p, len);
return p + len; return p + len;
} }

@ -37,7 +37,7 @@ extern bool GetLengthPrefixedSlice(Slice* input, Slice* result);
// Pointer-based variants of GetVarint... These either store a value // Pointer-based variants of GetVarint... These either store a value
// in *v and return a pointer just past the parsed value, or return // in *v and return a pointer just past the parsed value, or return
// NULL on error. These routines only look at bytes in the range // nullptr on error. These routines only look at bytes in the range
// [p..limit-1] // [p..limit-1]
extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v); extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v);
extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v); extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v);

@ -88,7 +88,7 @@ TEST(Coding, Varint32) {
uint32_t actual; uint32_t actual;
const char* start = p; const char* start = p;
p = GetVarint32Ptr(p, limit, &actual); p = GetVarint32Ptr(p, limit, &actual);
ASSERT_TRUE(p != NULL); ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual); ASSERT_EQ(expected, actual);
ASSERT_EQ(VarintLength(actual), p - start); ASSERT_EQ(VarintLength(actual), p - start);
} }
@ -123,7 +123,7 @@ TEST(Coding, Varint64) {
uint64_t actual; uint64_t actual;
const char* start = p; const char* start = p;
p = GetVarint64Ptr(p, limit, &actual); p = GetVarint64Ptr(p, limit, &actual);
ASSERT_TRUE(p != NULL); ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual); ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start); ASSERT_EQ(VarintLength(actual), p - start);
} }
@ -135,7 +135,7 @@ TEST(Coding, Varint32Overflow) {
uint32_t result; uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11"); std::string input("\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
== NULL); == nullptr);
} }
TEST(Coding, Varint32Truncation) { TEST(Coding, Varint32Truncation) {
@ -144,9 +144,10 @@ TEST(Coding, Varint32Truncation) {
PutVarint32(&s, large_value); PutVarint32(&s, large_value);
uint32_t result; uint32_t result;
for (unsigned int len = 0; len < s.size() - 1; len++) { for (unsigned int len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL); ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
} }
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL); ASSERT_TRUE(
GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
ASSERT_EQ(large_value, result); ASSERT_EQ(large_value, result);
} }
@ -154,7 +155,7 @@ TEST(Coding, Varint64Overflow) {
uint64_t result; uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
== NULL); == nullptr);
} }
TEST(Coding, Varint64Truncation) { TEST(Coding, Varint64Truncation) {
@ -163,9 +164,10 @@ TEST(Coding, Varint64Truncation) {
PutVarint64(&s, large_value); PutVarint64(&s, large_value);
uint64_t result; uint64_t result;
for (unsigned int len = 0; len < s.size() - 1; len++) { for (unsigned int len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL); ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
} }
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL); ASSERT_TRUE(
GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
ASSERT_EQ(large_value, result); ASSERT_EQ(large_value, result);
} }

@ -328,14 +328,14 @@ static bool isSSE42() {
} }
typedef void (*Function)(uint64_t*, uint8_t const**); typedef void (*Function)(uint64_t*, uint8_t const**);
static Function func = NULL; static Function func = nullptr;
static inline Function Choose_CRC32() { static inline Function Choose_CRC32() {
return isSSE42() ? Fast_CRC32 : Slow_CRC32; return isSSE42() ? Fast_CRC32 : Slow_CRC32;
} }
static inline void CRC32(uint64_t* l, uint8_t const **p) { static inline void CRC32(uint64_t* l, uint8_t const **p) {
if (func != NULL) { if (func != nullptr) {
return func(l, p); return func(l, p);
} }
func = Choose_CRC32(); func = Choose_CRC32();

@ -49,7 +49,7 @@ static Status IOError(const std::string& context, int err_number) {
// assume that there is one global logger for now. It is not thread-safe, // assume that there is one global logger for now. It is not thread-safe,
// but need not be because the logger is initialized at db-open time. // but need not be because the logger is initialized at db-open time.
static Logger* mylog = NULL; static Logger* mylog = nullptr;
// Used for reading a file from HDFS. It implements both sequential-read // Used for reading a file from HDFS. It implements both sequential-read
// access methods as well as random read access methods. // access methods as well as random read access methods.
@ -61,7 +61,7 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
public: public:
HdfsReadableFile(hdfsFS fileSys, const std::string& fname) HdfsReadableFile(hdfsFS fileSys, const std::string& fname)
: fileSys_(fileSys), filename_(fname), hfile_(NULL) { : fileSys_(fileSys), filename_(fname), hfile_(nullptr) {
Log(mylog, "[hdfs] HdfsReadableFile opening file %s\n", Log(mylog, "[hdfs] HdfsReadableFile opening file %s\n",
filename_.c_str()); filename_.c_str());
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0); hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0);
@ -75,11 +75,11 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
hdfsCloseFile(fileSys_, hfile_); hdfsCloseFile(fileSys_, hfile_);
Log(mylog, "[hdfs] HdfsReadableFile closed file %s\n", Log(mylog, "[hdfs] HdfsReadableFile closed file %s\n",
filename_.c_str()); filename_.c_str());
hfile_ = NULL; hfile_ = nullptr;
} }
bool isValid() { bool isValid() {
return hfile_ != NULL; return hfile_ != nullptr;
} }
// sequential access, read data at current offset in file // sequential access, read data at current offset in file
@ -149,7 +149,7 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
Log(mylog, "[hdfs] HdfsReadableFile fileSize %s\n", filename_.c_str()); Log(mylog, "[hdfs] HdfsReadableFile fileSize %s\n", filename_.c_str());
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str()); hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str());
tOffset size = 0L; tOffset size = 0L;
if (pFileInfo != NULL) { if (pFileInfo != nullptr) {
size = pFileInfo->mSize; size = pFileInfo->mSize;
hdfsFreeFileInfo(pFileInfo, 1); hdfsFreeFileInfo(pFileInfo, 1);
} else { } else {
@ -169,25 +169,25 @@ class HdfsWritableFile: public WritableFile {
public: public:
HdfsWritableFile(hdfsFS fileSys, const std::string& fname) HdfsWritableFile(hdfsFS fileSys, const std::string& fname)
: fileSys_(fileSys), filename_(fname) , hfile_(NULL) { : fileSys_(fileSys), filename_(fname) , hfile_(nullptr) {
Log(mylog, "[hdfs] HdfsWritableFile opening %s\n", filename_.c_str()); Log(mylog, "[hdfs] HdfsWritableFile opening %s\n", filename_.c_str());
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0); hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0);
Log(mylog, "[hdfs] HdfsWritableFile opened %s\n", filename_.c_str()); Log(mylog, "[hdfs] HdfsWritableFile opened %s\n", filename_.c_str());
assert(hfile_ != NULL); assert(hfile_ != nullptr);
} }
virtual ~HdfsWritableFile() { virtual ~HdfsWritableFile() {
if (hfile_ != NULL) { if (hfile_ != nullptr) {
Log(mylog, "[hdfs] HdfsWritableFile closing %s\n", filename_.c_str()); Log(mylog, "[hdfs] HdfsWritableFile closing %s\n", filename_.c_str());
hdfsCloseFile(fileSys_, hfile_); hdfsCloseFile(fileSys_, hfile_);
Log(mylog, "[hdfs] HdfsWritableFile closed %s\n", filename_.c_str()); Log(mylog, "[hdfs] HdfsWritableFile closed %s\n", filename_.c_str());
hfile_ = NULL; hfile_ = nullptr;
} }
} }
// If the file was successfully created, then this returns true. // If the file was successfully created, then this returns true.
// Otherwise returns false. // Otherwise returns false.
bool isValid() { bool isValid() {
return hfile_ != NULL; return hfile_ != nullptr;
} }
// The name of the file, mostly needed for debug logging. // The name of the file, mostly needed for debug logging.
@ -238,7 +238,7 @@ class HdfsWritableFile: public WritableFile {
return IOError(filename_, errno); return IOError(filename_, errno);
} }
Log(mylog, "[hdfs] HdfsWritableFile closed %s\n", filename_.c_str()); Log(mylog, "[hdfs] HdfsWritableFile closed %s\n", filename_.c_str());
hfile_ = NULL; hfile_ = nullptr;
return Status::OK(); return Status::OK();
} }
}; };
@ -260,8 +260,8 @@ class HdfsLogger : public Logger {
Log(mylog, "[hdfs] HdfsLogger closed %s\n", Log(mylog, "[hdfs] HdfsLogger closed %s\n",
file_->getName().c_str()); file_->getName().c_str());
delete file_; delete file_;
if (mylog != NULL && mylog == this) { if (mylog != nullptr && mylog == this) {
mylog = NULL; mylog = nullptr;
} }
} }
@ -285,7 +285,7 @@ class HdfsLogger : public Logger {
char* limit = base + bufsize; char* limit = base + bufsize;
struct timeval now_tv; struct timeval now_tv;
gettimeofday(&now_tv, NULL); gettimeofday(&now_tv, nullptr);
const time_t seconds = now_tv.tv_sec; const time_t seconds = now_tv.tv_sec;
struct tm t; struct tm t;
localtime_r(&seconds, &t); localtime_r(&seconds, &t);
@ -341,8 +341,8 @@ class HdfsLogger : public Logger {
Status HdfsEnv::NewSequentialFile(const std::string& fname, Status HdfsEnv::NewSequentialFile(const std::string& fname,
SequentialFile** result) { SequentialFile** result) {
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname); HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
if (f == NULL) { if (f == nullptr) {
*result = NULL; *result = nullptr;
return IOError(fname, errno); return IOError(fname, errno);
} }
*result = dynamic_cast<SequentialFile*>(f); *result = dynamic_cast<SequentialFile*>(f);
@ -353,8 +353,8 @@ Status HdfsEnv::NewSequentialFile(const std::string& fname,
Status HdfsEnv::NewRandomAccessFile(const std::string& fname, Status HdfsEnv::NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result) { RandomAccessFile** result) {
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname); HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
if (f == NULL) { if (f == nullptr) {
*result = NULL; *result = nullptr;
return IOError(fname, errno); return IOError(fname, errno);
} }
*result = dynamic_cast<RandomAccessFile*>(f); *result = dynamic_cast<RandomAccessFile*>(f);
@ -366,8 +366,8 @@ Status HdfsEnv::NewWritableFile(const std::string& fname,
WritableFile** result) { WritableFile** result) {
Status s; Status s;
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname); HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
if (f == NULL || !f->isValid()) { if (f == nullptr || !f->isValid()) {
*result = NULL; *result = nullptr;
return IOError(fname, errno); return IOError(fname, errno);
} }
*result = dynamic_cast<WritableFile*>(f); *result = dynamic_cast<WritableFile*>(f);
@ -394,11 +394,11 @@ Status HdfsEnv::GetChildren(const std::string& path,
for(int i = 0; i < numEntries; i++) { for(int i = 0; i < numEntries; i++) {
char* pathname = pHdfsFileInfo[i].mName; char* pathname = pHdfsFileInfo[i].mName;
char* filename = rindex(pathname, '/'); char* filename = rindex(pathname, '/');
if (filename != NULL) { if (filename != nullptr) {
result->push_back(filename+1); result->push_back(filename+1);
} }
} }
if (pHdfsFileInfo != NULL) { if (pHdfsFileInfo != nullptr) {
hdfsFreeFileInfo(pHdfsFileInfo, numEntries); hdfsFreeFileInfo(pHdfsFileInfo, numEntries);
} }
} else { } else {
@ -448,7 +448,7 @@ Status HdfsEnv::DeleteDir(const std::string& name) {
Status HdfsEnv::GetFileSize(const std::string& fname, uint64_t* size) { Status HdfsEnv::GetFileSize(const std::string& fname, uint64_t* size) {
*size = 0L; *size = 0L;
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str()); hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
if (pFileInfo != NULL) { if (pFileInfo != nullptr) {
*size = pFileInfo->mSize; *size = pFileInfo->mSize;
hdfsFreeFileInfo(pFileInfo, 1); hdfsFreeFileInfo(pFileInfo, 1);
return Status::OK(); return Status::OK();
@ -459,7 +459,7 @@ Status HdfsEnv::GetFileSize(const std::string& fname, uint64_t* size) {
Status HdfsEnv::GetFileModificationTime(const std::string& fname, Status HdfsEnv::GetFileModificationTime(const std::string& fname,
uint64_t* time) { uint64_t* time) {
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str()); hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
if (pFileInfo != NULL) { if (pFileInfo != nullptr) {
*time = static_cast<uint64_t>(pFileInfo->mLastMod); *time = static_cast<uint64_t>(pFileInfo->mLastMod);
hdfsFreeFileInfo(pFileInfo, 1); hdfsFreeFileInfo(pFileInfo, 1);
return Status::OK(); return Status::OK();
@ -482,7 +482,7 @@ Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) {
Status HdfsEnv::LockFile(const std::string& fname, FileLock** lock) { Status HdfsEnv::LockFile(const std::string& fname, FileLock** lock) {
// there isn's a very good way to atomically check and create // there isn's a very good way to atomically check and create
// a file via libhdfs // a file via libhdfs
*lock = NULL; *lock = nullptr;
return Status::OK(); return Status::OK();
} }
@ -493,13 +493,13 @@ Status HdfsEnv::UnlockFile(FileLock* lock) {
Status HdfsEnv::NewLogger(const std::string& fname, Status HdfsEnv::NewLogger(const std::string& fname,
shared_ptr<Logger>* result) { shared_ptr<Logger>* result) {
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname); HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
if (f == NULL || !f->isValid()) { if (f == nullptr || !f->isValid()) {
*result = NULL; *result = nullptr;
return IOError(fname, errno); return IOError(fname, errno);
} }
HdfsLogger* h = new HdfsLogger(f, &HdfsEnv::gettid); HdfsLogger* h = new HdfsLogger(f, &HdfsEnv::gettid);
*result = h; *result = h;
if (mylog == NULL) { if (mylog == nullptr) {
// mylog = h; // uncomment this for detailed logging // mylog = h; // uncomment this for detailed logging
} }
return Status::OK(); return Status::OK();

@ -202,7 +202,7 @@ class PosixMmapFile : public WritableFile {
bool UnmapCurrentRegion() { bool UnmapCurrentRegion() {
bool result = true; bool result = true;
if (base_ != NULL) { if (base_ != nullptr) {
if (last_sync_ < limit_) { if (last_sync_ < limit_) {
// Defer syncing this data until next Sync() call, if any // Defer syncing this data until next Sync() call, if any
pending_sync_ = true; pending_sync_ = true;
@ -211,10 +211,10 @@ class PosixMmapFile : public WritableFile {
result = false; result = false;
} }
file_offset_ += limit_ - base_; file_offset_ += limit_ - base_;
base_ = NULL; base_ = nullptr;
limit_ = NULL; limit_ = nullptr;
last_sync_ = NULL; last_sync_ = nullptr;
dst_ = NULL; dst_ = nullptr;
// Increase the amount we map the next time, but capped at 1MB // Increase the amount we map the next time, but capped at 1MB
if (map_size_ < (1<<20)) { if (map_size_ < (1<<20)) {
@ -225,11 +225,11 @@ class PosixMmapFile : public WritableFile {
} }
bool MapNewRegion() { bool MapNewRegion() {
assert(base_ == NULL); assert(base_ == nullptr);
if (ftruncate(fd_, file_offset_ + map_size_) < 0) { if (ftruncate(fd_, file_offset_ + map_size_) < 0) {
return false; return false;
} }
void* ptr = mmap(NULL, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED, void* ptr = mmap(nullptr, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED,
fd_, file_offset_); fd_, file_offset_);
if (ptr == MAP_FAILED) { if (ptr == MAP_FAILED) {
return false; return false;
@ -247,10 +247,10 @@ class PosixMmapFile : public WritableFile {
fd_(fd), fd_(fd),
page_size_(page_size), page_size_(page_size),
map_size_(Roundup(65536, page_size)), map_size_(Roundup(65536, page_size)),
base_(NULL), base_(nullptr),
limit_(NULL), limit_(nullptr),
dst_(NULL), dst_(nullptr),
last_sync_(NULL), last_sync_(nullptr),
file_offset_(0), file_offset_(0),
pending_sync_(false) { pending_sync_(false) {
assert((page_size & (page_size - 1)) == 0); assert((page_size & (page_size - 1)) == 0);
@ -306,8 +306,8 @@ class PosixMmapFile : public WritableFile {
} }
fd_ = -1; fd_ = -1;
base_ = NULL; base_ = nullptr;
limit_ = NULL; limit_ = nullptr;
return s; return s;
} }
@ -569,8 +569,8 @@ class PosixEnv : public Env {
unique_ptr<SequentialFile>* result) { unique_ptr<SequentialFile>* result) {
result->reset(); result->reset();
FILE* f = fopen(fname.c_str(), "r"); FILE* f = fopen(fname.c_str(), "r");
if (f == NULL) { if (f == nullptr) {
*result = NULL; *result = nullptr;
return IOError(fname, errno); return IOError(fname, errno);
} else { } else {
result->reset(new PosixSequentialFile(fname, f)); result->reset(new PosixSequentialFile(fname, f));
@ -592,7 +592,7 @@ class PosixEnv : public Env {
uint64_t size; uint64_t size;
s = GetFileSize(fname, &size); s = GetFileSize(fname, &size);
if (s.ok()) { if (s.ok()) {
void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); void* base = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0);
if (base != MAP_FAILED) { if (base != MAP_FAILED) {
result->reset(new PosixMmapReadableFile(fname, base, size)); result->reset(new PosixMmapReadableFile(fname, base, size));
} else { } else {
@ -631,11 +631,11 @@ class PosixEnv : public Env {
std::vector<std::string>* result) { std::vector<std::string>* result) {
result->clear(); result->clear();
DIR* d = opendir(dir.c_str()); DIR* d = opendir(dir.c_str());
if (d == NULL) { if (d == nullptr) {
return IOError(dir, errno); return IOError(dir, errno);
} }
struct dirent* entry; struct dirent* entry;
while ((entry = readdir(d)) != NULL) { while ((entry = readdir(d)) != nullptr) {
result->push_back(entry->d_name); result->push_back(entry->d_name);
} }
closedir(d); closedir(d);
@ -710,7 +710,7 @@ class PosixEnv : public Env {
} }
virtual Status LockFile(const std::string& fname, FileLock** lock) { virtual Status LockFile(const std::string& fname, FileLock** lock) {
*lock = NULL; *lock = nullptr;
Status result; Status result;
int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644); int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
if (fd < 0) { if (fd < 0) {
@ -766,7 +766,7 @@ class PosixEnv : public Env {
virtual Status NewLogger(const std::string& fname, virtual Status NewLogger(const std::string& fname,
shared_ptr<Logger>* result) { shared_ptr<Logger>* result) {
FILE* f = fopen(fname.c_str(), "w"); FILE* f = fopen(fname.c_str(), "w");
if (f == NULL) { if (f == nullptr) {
result->reset(); result->reset();
return IOError(fname, errno); return IOError(fname, errno);
} else { } else {
@ -777,7 +777,7 @@ class PosixEnv : public Env {
virtual uint64_t NowMicros() { virtual uint64_t NowMicros() {
struct timeval tv; struct timeval tv;
gettimeofday(&tv, NULL); gettimeofday(&tv, nullptr);
return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
} }
@ -797,7 +797,7 @@ class PosixEnv : public Env {
} }
virtual Status GetCurrentTime(int64_t* unix_time) { virtual Status GetCurrentTime(int64_t* unix_time) {
time_t ret = time(NULL); time_t ret = time(nullptr);
if (ret == (time_t) -1) { if (ret == (time_t) -1) {
return IOError("GetCurrentTime", errno); return IOError("GetCurrentTime", errno);
} }
@ -814,7 +814,7 @@ class PosixEnv : public Env {
char the_path[256]; char the_path[256];
char* ret = getcwd(the_path, 256); char* ret = getcwd(the_path, 256);
if (ret == NULL) { if (ret == nullptr) {
return Status::IOError(strerror(errno)); return Status::IOError(strerror(errno));
} }
@ -872,7 +872,7 @@ class PosixEnv : public Env {
void BGThread(); void BGThread();
static void* BGThreadWrapper(void* arg) { static void* BGThreadWrapper(void* arg) {
reinterpret_cast<PosixEnv*>(arg)->BGThread(); reinterpret_cast<PosixEnv*>(arg)->BGThread();
return NULL; return nullptr;
} }
size_t page_size_; size_t page_size_;
@ -893,8 +893,8 @@ PosixEnv::PosixEnv() : page_size_(getpagesize()),
started_bgthread_(0), started_bgthread_(0),
num_threads_(1), num_threads_(1),
queue_size_(0) { queue_size_(0) {
PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL)); PthreadCall("mutex_init", pthread_mutex_init(&mu_, nullptr));
PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL)); PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, nullptr));
bgthread_.resize(num_threads_); bgthread_.resize(num_threads_);
} }
@ -905,7 +905,10 @@ void PosixEnv::Schedule(void (*function)(void*), void* arg) {
for (; started_bgthread_ < num_threads_; started_bgthread_++) { for (; started_bgthread_ < num_threads_; started_bgthread_++) {
PthreadCall( PthreadCall(
"create thread", "create thread",
pthread_create(&bgthread_[started_bgthread_], NULL, &PosixEnv::BGThreadWrapper, this)); pthread_create(&bgthread_[started_bgthread_],
nullptr,
&PosixEnv::BGThreadWrapper,
this));
fprintf(stdout, "Created bg thread 0x%lx\n", bgthread_[started_bgthread_]); fprintf(stdout, "Created bg thread 0x%lx\n", bgthread_[started_bgthread_]);
} }
@ -949,7 +952,7 @@ static void* StartThreadWrapper(void* arg) {
StartThreadState* state = reinterpret_cast<StartThreadState*>(arg); StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
state->user_function(state->arg); state->user_function(state->arg);
delete state; delete state;
return NULL; return nullptr;
} }
void PosixEnv::StartThread(void (*function)(void* arg), void* arg) { void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
@ -958,7 +961,7 @@ void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
state->user_function = function; state->user_function = function;
state->arg = arg; state->arg = arg;
PthreadCall("start thread", PthreadCall("start thread",
pthread_create(&t, NULL, &StartThreadWrapper, state)); pthread_create(&t, nullptr, &StartThreadWrapper, state));
} }
} // namespace } // namespace

@ -28,14 +28,14 @@ static void SetBool(void* ptr) {
} }
TEST(EnvPosixTest, RunImmediately) { TEST(EnvPosixTest, RunImmediately) {
port::AtomicPointer called (NULL); port::AtomicPointer called (nullptr);
env_->Schedule(&SetBool, &called); env_->Schedule(&SetBool, &called);
Env::Default()->SleepForMicroseconds(kDelayMicros); Env::Default()->SleepForMicroseconds(kDelayMicros);
ASSERT_TRUE(called.NoBarrier_Load() != NULL); ASSERT_TRUE(called.NoBarrier_Load() != nullptr);
} }
TEST(EnvPosixTest, RunMany) { TEST(EnvPosixTest, RunMany) {
port::AtomicPointer last_id (NULL); port::AtomicPointer last_id (nullptr);
struct CB { struct CB {
port::AtomicPointer* last_id_ptr; // Pointer to shared slot port::AtomicPointer* last_id_ptr; // Pointer to shared slot

@ -19,14 +19,14 @@ Options::Options()
error_if_exists(false), error_if_exists(false),
paranoid_checks(false), paranoid_checks(false),
env(Env::Default()), env(Env::Default()),
info_log(NULL), info_log(nullptr),
write_buffer_size(4<<20), write_buffer_size(4<<20),
max_write_buffer_number(2), max_write_buffer_number(2),
max_open_files(1000), max_open_files(1000),
block_size(4096), block_size(4096),
block_restart_interval(16), block_restart_interval(16),
compression(kSnappyCompression), compression(kSnappyCompression),
filter_policy(NULL), filter_policy(nullptr),
num_levels(7), num_levels(7),
level0_file_num_compaction_trigger(4), level0_file_num_compaction_trigger(4),
level0_slowdown_writes_trigger(8), level0_slowdown_writes_trigger(8),
@ -39,7 +39,7 @@ Options::Options()
expanded_compaction_factor(25), expanded_compaction_factor(25),
source_compaction_factor(1), source_compaction_factor(1),
max_grandparent_overlap_factor(10), max_grandparent_overlap_factor(10),
statistics(NULL), statistics(nullptr),
disableDataSync(false), disableDataSync(false),
use_fsync(false), use_fsync(false),
db_stats_log_interval(1800), db_stats_log_interval(1800),
@ -54,8 +54,8 @@ Options::Options()
max_manifest_file_size(std::numeric_limits<uint64_t>::max()), max_manifest_file_size(std::numeric_limits<uint64_t>::max()),
no_block_cache(false), no_block_cache(false),
table_cache_numshardbits(4), table_cache_numshardbits(4),
compaction_filter_args(NULL), compaction_filter_args(nullptr),
CompactionFilter(NULL), CompactionFilter(nullptr),
disable_auto_compactions(false), disable_auto_compactions(false),
WAL_ttl_seconds(0), WAL_ttl_seconds(0),
manifest_preallocation_size(4 * 1024 * 1024) { manifest_preallocation_size(4 * 1024 * 1024) {
@ -90,7 +90,7 @@ Options::Dump(Logger* log) const
Log(log," Options.compression: %d", compression); Log(log," Options.compression: %d", compression);
} }
Log(log," Options.filter_policy: %s", Log(log," Options.filter_policy: %s",
filter_policy == NULL ? "NULL" : filter_policy->Name()); filter_policy == nullptr ? "nullptr" : filter_policy->Name());
Log(log," Options.num_levels: %d", num_levels); Log(log," Options.num_levels: %d", num_levels);
Log(log," Options.disableDataSync: %d", disableDataSync); Log(log," Options.disableDataSync: %d", disableDataSync);
Log(log," Options.use_fsync: %d", use_fsync); Log(log," Options.use_fsync: %d", use_fsync);

@ -56,7 +56,7 @@ class PosixLogger : public Logger {
char* limit = base + bufsize; char* limit = base + bufsize;
struct timeval now_tv; struct timeval now_tv;
gettimeofday(&now_tv, NULL); gettimeofday(&now_tv, nullptr);
const time_t seconds = now_tv.tv_sec; const time_t seconds = now_tv.tv_sec;
struct tm t; struct tm t;
localtime_r(&seconds, &t); localtime_r(&seconds, &t);

@ -34,7 +34,7 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) {
} }
std::string Status::ToString() const { std::string Status::ToString() const {
if (state_ == NULL) { if (state_ == nullptr) {
return "OK"; return "OK";
} else { } else {
char tmp[30]; char tmp[30];

@ -22,7 +22,7 @@ std::vector<Test>* tests;
} }
bool RegisterTest(const char* base, const char* name, void (*func)()) { bool RegisterTest(const char* base, const char* name, void (*func)()) {
if (tests == NULL) { if (tests == nullptr) {
tests = new std::vector<Test>; tests = new std::vector<Test>;
} }
Test t; Test t;
@ -37,14 +37,14 @@ int RunAllTests() {
const char* matcher = getenv("LEVELDB_TESTS"); const char* matcher = getenv("LEVELDB_TESTS");
int num = 0; int num = 0;
if (tests != NULL) { if (tests != nullptr) {
for (unsigned int i = 0; i < tests->size(); i++) { for (unsigned int i = 0; i < tests->size(); i++) {
const Test& t = (*tests)[i]; const Test& t = (*tests)[i];
if (matcher != NULL) { if (matcher != nullptr) {
std::string name = t.base; std::string name = t.base;
name.push_back('.'); name.push_back('.');
name.append(t.name); name.append(t.name);
if (strstr(name.c_str(), matcher) == NULL) { if (strstr(name.c_str(), matcher) == nullptr) {
continue; continue;
} }
} }
@ -66,7 +66,7 @@ std::string TmpDir() {
int RandomSeed() { int RandomSeed() {
const char* env = getenv("TEST_RANDOM_SEED"); const char* env = getenv("TEST_RANDOM_SEED");
int result = (env != NULL ? atoi(env) : 301); int result = (env != nullptr ? atoi(env) : 301);
if (result <= 0) { if (result <= 0) {
result = 301; result = 301;
} }

Loading…
Cancel
Save