Fix a number of object lifetime/ownership issues

Summary:
Replace manual memory management with std::unique_ptr in a
number of places; not exhaustive, but this fixes a few leaks with file
handles as well as clarifies semantics of the ownership of file handles
with log classes.

Test Plan: db_stress, make check

Reviewers: dhruba

Reviewed By: dhruba

CC: zshao, leveldb, heyongqiang

Differential Revision: https://reviews.facebook.net/D8043
main
Chip Turner 12 years ago
parent 88b79b24f3
commit 2fdf91a4f8
  1. 6
      db/builder.cc
  2. 15
      db/c.cc
  3. 3
      db/corruption_test.cc
  4. 8
      db/db_bench.cc
  5. 141
      db/db_impl.cc
  6. 8
      db/db_impl.h
  7. 41
      db/db_test.cc
  8. 6
      db/log_reader.cc
  9. 10
      db/log_reader.h
  10. 65
      db/log_test.cc
  11. 4
      db/log_writer.cc
  12. 10
      db/log_writer.h
  13. 25
      db/repair.cc
  14. 30
      db/table_cache.cc
  15. 2
      db/table_cache.h
  16. 33
      db/transaction_log_iterator_impl.cc
  17. 13
      db/transaction_log_iterator_impl.h
  18. 47
      db/version_set.cc
  19. 4
      db/version_set.h
  20. 13
      hdfs/env_hdfs.h
  21. 12
      helpers/memenv/memenv.cc
  22. 32
      helpers/memenv/memenv_test.cc
  23. 7
      include/leveldb/cache.h
  24. 30
      include/leveldb/env.h
  25. 9
      include/leveldb/options.h
  26. 7
      include/leveldb/table.h
  27. 25
      table/table.cc
  28. 18
      table/table_test.cc
  29. 3
      tools/db_stress.cc
  30. 6
      tools/sst_dump.cc
  31. 4
      util/auto_split_logger.h
  32. 8
      util/cache.cc
  33. 3
      util/cache_test.cc
  34. 20
      util/env.cc
  35. 5
      util/env_hdfs.cc
  36. 28
      util/env_posix.cc
  37. 4
      util/ldb_cmd.cc
  38. 8
      util/options.cc
  39. 4
      util/testutil.h

@ -26,13 +26,13 @@ Status BuildTable(const std::string& dbname,
std::string fname = TableFileName(dbname, meta->number);
if (iter->Valid()) {
WritableFile* file;
unique_ptr<WritableFile> file;
s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
return s;
}
TableBuilder* builder = new TableBuilder(options, file, 0);
TableBuilder* builder = new TableBuilder(options, file.get(), 0);
meta->smallest.DecodeFrom(iter->key());
for (; iter->Valid(); iter->Next()) {
Slice key = iter->key();
@ -63,8 +63,6 @@ Status BuildTable(const std::string& dbname,
if (s.ok()) {
s = file->Close();
}
delete file;
file = NULL;
if (s.ok()) {
// Verify that the table is usable

@ -39,6 +39,8 @@ using leveldb::WritableFile;
using leveldb::WriteBatch;
using leveldb::WriteOptions;
using std::shared_ptr;
extern "C" {
struct leveldb_t { DB* rep; };
@ -48,12 +50,12 @@ struct leveldb_snapshot_t { const Snapshot* rep; };
struct leveldb_readoptions_t { ReadOptions rep; };
struct leveldb_writeoptions_t { WriteOptions rep; };
struct leveldb_options_t { Options rep; };
struct leveldb_cache_t { Cache* rep; };
struct leveldb_seqfile_t { SequentialFile* rep; };
struct leveldb_randomfile_t { RandomAccessFile* rep; };
struct leveldb_writablefile_t { WritableFile* rep; };
struct leveldb_logger_t { Logger* rep; };
struct leveldb_filelock_t { FileLock* rep; };
struct leveldb_logger_t { shared_ptr<Logger> rep; };
struct leveldb_cache_t { shared_ptr<Cache> rep; };
struct leveldb_comparator_t : public Comparator {
void* state_;
@ -421,7 +423,9 @@ void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
}
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
opt->rep.info_log = (l ? l->rep : NULL);
if (l) {
opt->rep.info_log = l->rep;
}
}
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
@ -433,7 +437,9 @@ void leveldb_options_set_max_open_files(leveldb_options_t* opt, int n) {
}
void leveldb_options_set_cache(leveldb_options_t* opt, leveldb_cache_t* c) {
opt->rep.block_cache = c->rep;
if (c) {
opt->rep.block_cache = c->rep;
}
}
void leveldb_options_set_block_size(leveldb_options_t* opt, size_t s) {
@ -647,7 +653,6 @@ leveldb_cache_t* leveldb_cache_create_lru(size_t capacity) {
}
void leveldb_cache_destroy(leveldb_cache_t* cache) {
delete cache->rep;
delete cache;
}

@ -28,7 +28,7 @@ class CorruptionTest {
public:
test::ErrorEnv env_;
std::string dbname_;
Cache* tiny_cache_;
shared_ptr<Cache> tiny_cache_;
Options options_;
DB* db_;
@ -47,7 +47,6 @@ class CorruptionTest {
~CorruptionTest() {
delete db_;
DestroyDB(dbname_, Options());
delete tiny_cache_;
}
Status TryReopen(Options* options = NULL) {

@ -469,7 +469,7 @@ struct ThreadState {
class Benchmark {
private:
Cache* cache_;
shared_ptr<Cache> cache_;
const FilterPolicy* filter_policy_;
DB* db_;
long num_;
@ -655,7 +655,6 @@ class Benchmark {
~Benchmark() {
delete db_;
delete cache_;
delete filter_policy_;
}
@ -1276,14 +1275,13 @@ class Benchmark {
void HeapProfile() {
char fname[100];
snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
WritableFile* file;
unique_ptr<WritableFile> file;
Status s = FLAGS_env->NewWritableFile(fname, &file);
if (!s.ok()) {
fprintf(stderr, "%s\n", s.ToString().c_str());
return;
}
bool ok = port::GetHeapProfile(WriteToFile, file);
delete file;
bool ok = port::GetHeapProfile(WriteToFile, file.get());
if (!ok) {
fprintf(stderr, "heap profiling not supported\n");
FLAGS_env->DeleteFile(fname);

@ -48,18 +48,17 @@ static Status NewLogger(const std::string& dbname,
const std::string& db_log_dir,
Env* env,
size_t max_log_file_size,
Logger** logger) {
shared_ptr<Logger>* logger) {
std::string db_absolute_path;
env->GetAbsolutePath(dbname, &db_absolute_path);
if (max_log_file_size > 0) { // need to auto split the log file?
AutoSplitLogger<Logger>* auto_split_logger =
auto logger_ptr =
new AutoSplitLogger<Logger>(env, dbname, db_log_dir, max_log_file_size);
Status s = auto_split_logger->GetStatus();
logger->reset(logger_ptr);
Status s = logger_ptr->GetStatus();
if (!s.ok()) {
delete auto_split_logger;
} else {
*logger = auto_split_logger;
logger->reset();
}
return s;
} else {
@ -103,8 +102,8 @@ struct DBImpl::CompactionState {
std::list<uint64_t> allocated_file_numbers;
// State kept for output being generated
WritableFile* outfile;
TableBuilder* builder;
unique_ptr<WritableFile> outfile;
unique_ptr<TableBuilder> builder;
uint64_t total_bytes;
@ -112,8 +111,6 @@ struct DBImpl::CompactionState {
explicit CompactionState(Compaction* c)
: compaction(c),
outfile(NULL),
builder(NULL),
total_bytes(0) {
}
};
@ -178,14 +175,11 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
dbname, &internal_comparator_, &internal_filter_policy_, options)),
internal_filter_policy_(options.filter_policy),
owns_info_log_(options_.info_log != options.info_log),
owns_cache_(options_.block_cache != options.block_cache),
db_lock_(NULL),
shutting_down_(NULL),
bg_cv_(&mutex_),
mem_(new MemTable(internal_comparator_, NumberLevels())),
logfile_(NULL),
logfile_number_(0),
log_(NULL),
tmp_batch_(new WriteBatch),
bg_compaction_scheduled_(0),
bg_logstats_scheduled_(false),
@ -206,13 +200,13 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
stats_ = new CompactionStats[options.num_levels];
// Reserve ten files or so for other uses and give the rest to TableCache.
const int table_cache_size = options_.max_open_files - 10;
table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
table_cache_.reset(new TableCache(dbname_, &options_, table_cache_size));
versions_ = new VersionSet(dbname_, &options_, table_cache_,
&internal_comparator_);
versions_.reset(new VersionSet(dbname_, &options_, table_cache_.get(),
&internal_comparator_));
dumpLeveldbBuildVersion(options_.info_log);
options_.Dump(options_.info_log);
dumpLeveldbBuildVersion(options_.info_log.get());
options_.Dump(options_.info_log.get());
#ifdef USE_SCRIBE
logger_ = new ScribeLogger("localhost", 1456);
@ -246,21 +240,11 @@ DBImpl::~DBImpl() {
env_->UnlockFile(db_lock_);
}
delete versions_;
if (mem_ != NULL) mem_->Unref();
imm_.UnrefAll();
delete tmp_batch_;
delete log_;
delete logfile_;
delete table_cache_;
delete[] stats_;
if (owns_info_log_) {
delete options_.info_log;
}
if (owns_cache_) {
delete options_.block_cache;
}
if (options_.compression_per_level != NULL) {
delete[] options_.compression_per_level;
}
@ -288,6 +272,10 @@ void DBImpl::TEST_Destroy_DBImpl() {
if (db_lock_ != NULL) {
env_->UnlockFile(db_lock_);
}
log_.reset();
versions_.reset();
table_cache_.reset();
}
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
@ -302,21 +290,17 @@ Status DBImpl::NewDB() {
new_db.SetLastSequence(0);
const std::string manifest = DescriptorFileName(dbname_, 1);
WritableFile* file;
unique_ptr<WritableFile> file;
Status s = env_->NewWritableFile(manifest, &file);
if (!s.ok()) {
return s;
}
{
log::Writer log(file);
log::Writer log(std::move(file));
std::string record;
new_db.EncodeTo(&record);
s = log.AddRecord(record);
if (s.ok()) {
s = file->Close();
}
}
delete file;
if (s.ok()) {
// Make "CURRENT" file that points to the new manifest file.
s = SetCurrentFile(env_, dbname_, 1);
@ -628,7 +612,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
// Open the log file
std::string fname = LogFileName(dbname_, log_number);
SequentialFile* file;
unique_ptr<SequentialFile> file;
Status status = env_->NewSequentialFile(fname, &file);
if (!status.ok()) {
MaybeIgnoreError(&status);
@ -638,14 +622,14 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
// Create the log reader.
LogReporter reporter;
reporter.env = env_;
reporter.info_log = options_.info_log;
reporter.info_log = options_.info_log.get();
reporter.fname = fname.c_str();
reporter.status = (options_.paranoid_checks ? &status : NULL);
// We intentially make log::Reader do checksumming even if
// paranoid_checks==false so that corruptions cause entire commits
// to be skipped instead of propagating bad information (like overly
// large sequence numbers).
log::Reader reader(file, &reporter, true/*checksum*/,
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
0/*initial_offset*/);
Log(options_.info_log, "Recovering log #%llu",
(unsigned long long) log_number);
@ -703,7 +687,6 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
}
if (mem != NULL && !external_table) mem->Unref();
delete file;
return status;
}
@ -720,7 +703,7 @@ Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
Status s;
{
mutex_.Unlock();
s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
s = BuildTable(dbname_, env_, options_, table_cache_.get(), iter, &meta);
mutex_.Lock();
}
@ -766,7 +749,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
Status s;
{
mutex_.Unlock();
s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
s = BuildTable(dbname_, env_, options_, table_cache_.get(), iter, &meta);
mutex_.Lock();
}
base->Unref();
@ -845,8 +828,9 @@ Status DBImpl::CompactMemTable(bool* madeProgress) {
}
// Replace immutable memtable with the generated Table
s = imm_.InstallMemtableFlushResults(m, versions_, s, &mutex_,
options_.info_log, file_number, pending_outputs_);
s = imm_.InstallMemtableFlushResults(
m, versions_.get(), s, &mutex_, options_.info_log.get(),
file_number, pending_outputs_);
if (s.ok()) {
if (madeProgress) {
@ -1009,7 +993,7 @@ Status DBImpl::ReadFirstLine(const std::string& fname,
}
};
SequentialFile* file;
unique_ptr<SequentialFile> file;
Status status = env_->NewSequentialFile(fname, &file);
if (!status.ok()) {
@ -1019,10 +1003,10 @@ Status DBImpl::ReadFirstLine(const std::string& fname,
LogReporter reporter;
reporter.env = env_;
reporter.info_log = options_.info_log;
reporter.info_log = options_.info_log.get();
reporter.fname = fname.c_str();
reporter.status = (options_.paranoid_checks ? &status : NULL);
log::Reader reader(file, &reporter, true/*checksum*/,
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
0/*initial_offset*/);
std::string scratch;
Slice record;
@ -1243,7 +1227,7 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
}
}
Compaction* c = NULL;
unique_ptr<Compaction> c;
bool is_manual = (manual_compaction_ != NULL) &&
(manual_compaction_->in_progress == false);
InternalKey manual_end;
@ -1251,10 +1235,11 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
ManualCompaction* m = manual_compaction_;
assert(!m->in_progress);
m->in_progress = true; // another thread cannot pick up the same work
c = versions_->CompactRange(m->level, m->begin, m->end);
m->done = (c == NULL);
if (c != NULL) {
c.reset(versions_->CompactRange(m->level, m->begin, m->end));
if (c) {
manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
} else {
m->done = true;
}
Log(options_.info_log,
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
@ -1263,11 +1248,11 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
(m->end ? m->end->DebugString().c_str() : "(end)"),
(m->done ? "(end)" : manual_end.DebugString().c_str()));
} else if (!options_.disable_auto_compactions) {
c = versions_->PickCompaction();
c.reset(versions_->PickCompaction());
}
Status status;
if (c == NULL) {
if (!c) {
// Nothing to do
Log(options_.info_log, "Compaction nothing to do");
} else if (!is_manual && c->IsTrivialMove()) {
@ -1285,18 +1270,18 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
static_cast<unsigned long long>(f->file_size),
status.ToString().c_str(),
versions_->LevelSummary(&tmp));
versions_->ReleaseCompactionFiles(c, status);
versions_->ReleaseCompactionFiles(c.get(), status);
*madeProgress = true;
} else {
CompactionState* compact = new CompactionState(c);
CompactionState* compact = new CompactionState(c.get());
status = DoCompactionWork(compact);
CleanupCompaction(compact);
versions_->ReleaseCompactionFiles(c, status);
versions_->ReleaseCompactionFiles(c.get(), status);
c->ReleaseInputs();
FindObsoleteFiles(deletion_state);
*madeProgress = true;
}
delete c;
c.reset();
if (status.ok()) {
// Done
@ -1332,11 +1317,10 @@ void DBImpl::CleanupCompaction(CompactionState* compact) {
if (compact->builder != NULL) {
// May happen if we get a shutdown call in the middle of compaction
compact->builder->Abandon();
delete compact->builder;
compact->builder.reset();
} else {
assert(compact->outfile == NULL);
}
delete compact->outfile;
for (size_t i = 0; i < compact->outputs.size(); i++) {
const CompactionState::Output& out = compact->outputs[i];
pending_outputs_.erase(out.number);
@ -1397,8 +1381,8 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
std::string fname = TableFileName(dbname_, file_number);
Status s = env_->NewWritableFile(fname, &compact->outfile);
if (s.ok()) {
compact->builder = new TableBuilder(options_, compact->outfile,
compact->compaction->level() + 1);
compact->builder.reset(new TableBuilder(options_, compact->outfile.get(),
compact->compaction->level() + 1));
}
return s;
}
@ -1406,7 +1390,7 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
Iterator* input) {
assert(compact != NULL);
assert(compact->outfile != NULL);
assert(compact->outfile);
assert(compact->builder != NULL);
const uint64_t output_number = compact->current_output()->number;
@ -1423,8 +1407,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
const uint64_t current_bytes = compact->builder->FileSize();
compact->current_output()->file_size = current_bytes;
compact->total_bytes += current_bytes;
delete compact->builder;
compact->builder = NULL;
compact->builder.reset();
// Finish and check for file errors
if (s.ok() && !options_.disableDataSync) {
@ -1437,8 +1420,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
if (s.ok()) {
s = compact->outfile->Close();
}
delete compact->outfile;
compact->outfile = NULL;
compact->outfile.reset();
if (s.ok() && current_entries > 0) {
// Verify that the table is usable
@ -1537,7 +1519,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
assert(compact->builder == NULL);
assert(compact->outfile == NULL);
assert(!compact->outfile);
SequenceNumber visible_at_tip = 0;
SequenceNumber earliest_snapshot;
@ -1560,7 +1542,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
mutex_.Unlock();
const uint64_t start_micros = env_->NowMicros();
Iterator* input = versions_->MakeInputIterator(compact->compaction);
unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
input->SeekToFirst();
Status status;
ParsedInternalKey ikey;
@ -1587,7 +1569,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
Slice* compaction_filter_value = NULL;
if (compact->compaction->ShouldStopBefore(key) &&
compact->builder != NULL) {
status = FinishCompactionOutputFile(compact, input);
status = FinishCompactionOutputFile(compact, input.get());
if (!status.ok()) {
break;
}
@ -1690,7 +1672,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
// Close output file if it is big enough
if (compact->builder->FileSize() >=
compact->compaction->MaxOutputFileSize()) {
status = FinishCompactionOutputFile(compact, input);
status = FinishCompactionOutputFile(compact, input.get());
if (!status.ok()) {
break;
}
@ -1704,13 +1686,12 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
status = Status::IOError("Deleting DB during compaction");
}
if (status.ok() && compact->builder != NULL) {
status = FinishCompactionOutputFile(compact, input);
status = FinishCompactionOutputFile(compact, input.get());
}
if (status.ok()) {
status = input->status();
}
delete input;
input = NULL;
input.reset();
CompactionStats stats;
stats.micros = env_->NowMicros() - start_micros - imm_micros;
@ -1950,9 +1931,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
status = log_->AddRecord(WriteBatchInternal::Contents(updates));
if (status.ok() && options.sync) {
if (options_.use_fsync) {
status = logfile_->Fsync();
status = log_->file()->Fsync();
} else {
status = logfile_->Sync();
status = log_->file()->Sync();
}
}
}
@ -2117,18 +2098,15 @@ Status DBImpl::MakeRoomForWrite(bool force) {
DelayLoggingAndReset();
assert(versions_->PrevLogNumber() == 0);
uint64_t new_log_number = versions_->NewFileNumber();
WritableFile* lfile = NULL;
unique_ptr<WritableFile> lfile;
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
if (!s.ok()) {
// Avoid chewing through file number space in a tight loop.
versions_->ReuseFileNumber(new_log_number);
versions_->ReuseFileNumber(new_log_number);
break;
}
delete log_;
delete logfile_;
logfile_ = lfile;
logfile_number_ = new_log_number;
log_ = new log::Writer(lfile);
log_.reset(new log::Writer(std::move(lfile)));
imm_.Add(mem_);
mem_ = new MemTable(internal_comparator_, NumberLevels());
mem_->Ref();
@ -2310,14 +2288,13 @@ Status DB::Open(const Options& options, const std::string& dbname,
s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
if (s.ok()) {
uint64_t new_log_number = impl->versions_->NewFileNumber();
WritableFile* lfile;
unique_ptr<WritableFile> lfile;
s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
&lfile);
if (s.ok()) {
edit.SetLogNumber(new_log_number);
impl->logfile_ = lfile;
impl->logfile_number_ = new_log_number;
impl->log_ = new log::Writer(lfile);
impl->log_.reset(new log::Writer(std::move(lfile)));
s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
}
if (s.ok()) {

@ -90,7 +90,7 @@ class DBImpl : public DB {
protected:
Env* const env_;
const std::string dbname_;
VersionSet* versions_;
unique_ptr<VersionSet> versions_;
const InternalKeyComparator internal_comparator_;
const Options options_; // options_.comparator == &internal_comparator_
@ -202,10 +202,9 @@ class DBImpl : public DB {
// Constant after construction
const InternalFilterPolicy internal_filter_policy_;
bool owns_info_log_;
bool owns_cache_;
// table_cache_ provides its own synchronization
TableCache* table_cache_;
unique_ptr<TableCache> table_cache_;
// Lock over the persistent DB state. Non-NULL iff successfully acquired.
FileLock* db_lock_;
@ -216,9 +215,8 @@ class DBImpl : public DB {
port::CondVar bg_cv_; // Signalled when background work finishes
MemTable* mem_;
MemTableList imm_; // Memtable that are not changing
WritableFile* logfile_;
uint64_t logfile_number_;
log::Writer* log_;
unique_ptr<log::Writer> log_;
std::string host_name_;

@ -100,18 +100,17 @@ class SpecialEnv : public EnvWrapper {
manifest_write_error_.Release_Store(NULL);
}
Status NewWritableFile(const std::string& f, WritableFile** r) {
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r) {
class SSTableFile : public WritableFile {
private:
SpecialEnv* env_;
WritableFile* base_;
unique_ptr<WritableFile> base_;
public:
SSTableFile(SpecialEnv* env, WritableFile* base)
SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base)
: env_(env),
base_(base) {
base_(std::move(base)) {
}
~SSTableFile() { delete base_; }
Status Append(const Slice& data) {
if (env_->no_space_.Acquire_Load() != NULL) {
// Drop writes on the floor
@ -132,10 +131,10 @@ class SpecialEnv : public EnvWrapper {
class ManifestFile : public WritableFile {
private:
SpecialEnv* env_;
WritableFile* base_;
unique_ptr<WritableFile> base_;
public:
ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
~ManifestFile() { delete base_; }
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
: env_(env), base_(std::move(b)) { }
Status Append(const Slice& data) {
if (env_->manifest_write_error_.Acquire_Load() != NULL) {
return Status::IOError("simulated writer error");
@ -161,24 +160,25 @@ class SpecialEnv : public EnvWrapper {
Status s = target()->NewWritableFile(f, r);
if (s.ok()) {
if (strstr(f.c_str(), ".sst") != NULL) {
*r = new SSTableFile(this, *r);
r->reset(new SSTableFile(this, std::move(*r)));
} else if (strstr(f.c_str(), "MANIFEST") != NULL) {
*r = new ManifestFile(this, *r);
r->reset(new ManifestFile(this, std::move(*r)));
}
}
return s;
}
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
Status NewRandomAccessFile(const std::string& f,
unique_ptr<RandomAccessFile>* r) {
class CountingFile : public RandomAccessFile {
private:
RandomAccessFile* target_;
unique_ptr<RandomAccessFile> target_;
anon::AtomicCounter* counter_;
public:
CountingFile(RandomAccessFile* target, anon::AtomicCounter* counter)
: target_(target), counter_(counter) {
CountingFile(unique_ptr<RandomAccessFile>&& target,
anon::AtomicCounter* counter)
: target_(std::move(target)), counter_(counter) {
}
virtual ~CountingFile() { delete target_; }
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const {
counter_->Increment();
@ -188,7 +188,7 @@ class SpecialEnv : public EnvWrapper {
Status s = target()->NewRandomAccessFile(f, r);
if (s.ok() && count_random_reads_) {
*r = new CountingFile(*r, &random_read_counter_);
r->reset(new CountingFile(std::move(*r), &random_read_counter_));
}
return s;
}
@ -2199,7 +2199,6 @@ TEST(DBTest, BloomFilter) {
env_->delay_sstable_sync_.Release_Store(NULL);
Close();
delete options.block_cache;
delete options.filter_policy;
}
@ -2257,9 +2256,9 @@ TEST(DBTest, SnapshotFiles) {
}
}
}
SequentialFile* srcfile;
unique_ptr<SequentialFile> srcfile;
ASSERT_OK(env_->NewSequentialFile(src, &srcfile));
WritableFile* destfile;
unique_ptr<WritableFile> destfile;
ASSERT_OK(env_->NewWritableFile(dest, &destfile));
char buffer[4096];
@ -2271,8 +2270,6 @@ TEST(DBTest, SnapshotFiles) {
size -= slice.size();
}
ASSERT_OK(destfile->Close());
delete destfile;
delete srcfile;
}
// release file snapshot
@ -2519,7 +2516,6 @@ TEST(DBTest, ReadCompaction) {
ASSERT_TRUE(NumTableFilesAtLevel(0) < l1 ||
NumTableFilesAtLevel(1) < l2 ||
NumTableFilesAtLevel(2) < l3);
delete options.block_cache;
}
}
@ -2633,7 +2629,6 @@ class ModelDB: public DB {
};
explicit ModelDB(const Options& options): options_(options) { }
~ModelDB() { }
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
return DB::Put(o, k, v);
}

@ -15,9 +15,9 @@ namespace log {
Reader::Reporter::~Reporter() {
}
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset)
: file_(file),
Reader::Reader(unique_ptr<SequentialFile>&& file, Reporter* reporter,
bool checksum, uint64_t initial_offset)
: file_(std::move(file)),
reporter_(reporter),
checksum_(checksum),
backing_store_(new char[kBlockSize]),

@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_LOG_READER_H_
#define STORAGE_LEVELDB_DB_LOG_READER_H_
#include <memory>
#include <stdint.h>
#include "db/log_format.h"
@ -14,6 +15,7 @@
namespace leveldb {
class SequentialFile;
using std::unique_ptr;
namespace log {
@ -40,8 +42,8 @@ class Reader {
//
// The Reader will start reading at the first record located at physical
// position >= initial_offset within the file.
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset);
Reader(unique_ptr<SequentialFile>&& file, Reporter* reporter,
bool checksum, uint64_t initial_offset);
~Reader();
@ -57,8 +59,10 @@ class Reader {
// Undefined before the first call to ReadRecord.
uint64_t LastRecordOffset();
SequentialFile* file() { return file_.get(); }
private:
SequentialFile* const file_;
const unique_ptr<SequentialFile> file_;
Reporter* const reporter_;
bool const checksum_;
char* const backing_store_;

@ -100,8 +100,26 @@ class LogTest {
}
};
StringDest dest_;
StringSource source_;
std::string& dest_contents() {
auto dest = dynamic_cast<StringDest*>(writer_.file());
assert(dest);
return dest->contents_;
}
const std::string& dest_contents() const {
auto dest = dynamic_cast<const StringDest*>(writer_.file());
assert(dest);
return dest->contents_;
}
void reset_source_contents() {
auto src = dynamic_cast<StringSource*>(reader_.file());
assert(src);
src->contents_ = dest_contents();
}
unique_ptr<StringDest> dest_holder_;
unique_ptr<StringSource> source_holder_;
ReportCollector report_;
bool reading_;
Writer writer_;
@ -112,9 +130,11 @@ class LogTest {
static uint64_t initial_offset_last_record_offsets_[];
public:
LogTest() : reading_(false),
writer_(&dest_),
reader_(&source_, &report_, true/*checksum*/,
LogTest() : dest_holder_(new StringDest),
source_holder_(new StringSource),
reading_(false),
writer_(std::move(dest_holder_)),
reader_(std::move(source_holder_), &report_, true/*checksum*/,
0/*initial_offset*/) {
}
@ -124,13 +144,13 @@ class LogTest {
}
size_t WrittenBytes() const {
return dest_.contents_.size();
return dest_contents().size();
}
std::string Read() {
if (!reading_) {
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
reset_source_contents();
}
std::string scratch;
Slice record;
@ -142,26 +162,27 @@ class LogTest {
}
void IncrementByte(int offset, int delta) {
dest_.contents_[offset] += delta;
dest_contents()[offset] += delta;
}
void SetByte(int offset, char new_byte) {
dest_.contents_[offset] = new_byte;
dest_contents()[offset] = new_byte;
}
void ShrinkSize(int bytes) {
dest_.contents_.resize(dest_.contents_.size() - bytes);
dest_contents().resize(dest_contents().size() - bytes);
}
void FixChecksum(int header_offset, int len) {
// Compute crc of type/len/data
uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
uint32_t crc = crc32c::Value(&dest_contents()[header_offset+6], 1 + len);
crc = crc32c::Mask(crc);
EncodeFixed32(&dest_.contents_[header_offset], crc);
EncodeFixed32(&dest_contents()[header_offset], crc);
}
void ForceError() {
source_.force_error_ = true;
auto src = dynamic_cast<StringSource*>(reader_.file());
src->force_error_ = true;
}
size_t DroppedBytes() const {
@ -192,22 +213,25 @@ class LogTest {
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
WrittenBytes() + offset_past_end);
unique_ptr<StringSource> source(new StringSource);
source->contents_ = dest_contents();
unique_ptr<Reader> offset_reader(
new Reader(std::move(source), &report_, true/*checksum*/,
WrittenBytes() + offset_past_end));
Slice record;
std::string scratch;
ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch));
delete offset_reader;
}
void CheckInitialOffsetRecord(uint64_t initial_offset,
int expected_record_offset) {
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
initial_offset);
unique_ptr<StringSource> source(new StringSource);
source->contents_ = dest_contents();
unique_ptr<Reader> offset_reader(
new Reader(std::move(source), &report_, true/*checksum*/,
initial_offset));
Slice record;
std::string scratch;
ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
@ -216,7 +240,6 @@ class LogTest {
ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
offset_reader->LastRecordOffset());
ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
delete offset_reader;
}
};

@ -12,8 +12,8 @@
namespace leveldb {
namespace log {
Writer::Writer(WritableFile* dest)
: dest_(dest),
Writer::Writer(unique_ptr<WritableFile>&& dest)
: dest_(std::move(dest)),
block_offset_(0) {
for (int i = 0; i <= kMaxRecordType; i++) {
char t = static_cast<char>(i);

@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
#include <memory>
#include <stdint.h>
#include "db/log_format.h"
#include "leveldb/slice.h"
@ -14,6 +15,8 @@ namespace leveldb {
class WritableFile;
using std::unique_ptr;
namespace log {
class Writer {
@ -21,13 +24,16 @@ class Writer {
// Create a writer that will append data to "*dest".
// "*dest" must be initially empty.
// "*dest" must remain live while this Writer is in use.
explicit Writer(WritableFile* dest);
explicit Writer(unique_ptr<WritableFile>&& dest);
~Writer();
Status AddRecord(const Slice& slice);
WritableFile* file() { return dest_.get(); }
const WritableFile* file() const { return dest_.get(); }
private:
WritableFile* dest_;
unique_ptr<WritableFile> dest_;
int block_offset_; // Current offset in block
// crc32c values for all supported record types. These are

@ -50,8 +50,6 @@ class Repairer {
icmp_(options.comparator),
ipolicy_(options.filter_policy),
options_(SanitizeOptions(dbname, &icmp_, &ipolicy_, options)),
owns_info_log_(options_.info_log != options.info_log),
owns_cache_(options_.block_cache != options.block_cache),
next_file_number_(1) {
// TableCache can be small since we expect each table to be opened once.
table_cache_ = new TableCache(dbname_, &options_, 10);
@ -61,12 +59,6 @@ class Repairer {
~Repairer() {
delete table_cache_;
delete edit_;
if (owns_info_log_) {
delete options_.info_log;
}
if (owns_cache_) {
delete options_.block_cache;
}
}
Status Run() {
@ -104,7 +96,6 @@ class Repairer {
InternalKeyComparator const icmp_;
InternalFilterPolicy const ipolicy_;
Options const options_;
bool owns_info_log_;
bool owns_cache_;
TableCache* table_cache_;
VersionEdit* edit_;
@ -164,7 +155,7 @@ class Repairer {
Status ConvertLogToTable(uint64_t log) {
struct LogReporter : public log::Reader::Reporter {
Env* env;
Logger* info_log;
std::shared_ptr<Logger> info_log;
uint64_t lognum;
virtual void Corruption(size_t bytes, const Status& s) {
// We print error messages for corruption, but continue repairing.
@ -177,7 +168,7 @@ class Repairer {
// Open the log file
std::string logname = LogFileName(dbname_, log);
SequentialFile* lfile;
unique_ptr<SequentialFile> lfile;
Status status = env_->NewSequentialFile(logname, &lfile);
if (!status.ok()) {
return status;
@ -192,7 +183,7 @@ class Repairer {
// corruptions cause entire commits to be skipped instead of
// propagating bad information (like overly large sequence
// numbers).
log::Reader reader(lfile, &reporter, false/*do not checksum*/,
log::Reader reader(std::move(lfile), &reporter, false/*do not checksum*/,
0/*initial_offset*/);
// Read all the records and add to a memtable
@ -219,7 +210,6 @@ class Repairer {
status = Status::OK(); // Keep going with rest of file
}
}
delete lfile;
// Do not record a version edit for this conversion to a Table
// since ExtractMetaData() will also generate edits.
@ -304,7 +294,7 @@ class Repairer {
Status WriteDescriptor() {
std::string tmp = TempFileName(dbname_, 1);
WritableFile* file;
unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(tmp, &file);
if (!status.ok()) {
return status;
@ -331,16 +321,11 @@ class Repairer {
//fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
{
log::Writer log(file);
log::Writer log(std::move(file));
std::string record;
edit_->EncodeTo(&record);
status = log.AddRecord(record);
}
if (status.ok()) {
status = file->Close();
}
delete file;
file = NULL;
if (!status.ok()) {
env_->DeleteFile(tmp);

@ -13,17 +13,17 @@
namespace leveldb {
struct TableAndFile {
RandomAccessFile* file;
Table* table;
unique_ptr<RandomAccessFile> file;
unique_ptr<Table> table;
};
static class DBStatistics* dbstatistics;
static void DeleteEntry(const Slice& key, void* value) {
TableAndFile* tf = reinterpret_cast<TableAndFile*>(value);
delete tf->table;
delete tf->file;
dbstatistics ? dbstatistics->incNumFileCloses() : (void)0;
if (dbstatistics) {
dbstatistics->incNumFileCloses();
}
delete tf;
}
@ -44,7 +44,6 @@ TableCache::TableCache(const std::string& dbname,
}
TableCache::~TableCache() {
delete cache_;
}
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
@ -60,24 +59,23 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
*tableIO = true; // we had to do IO from storage
}
std::string fname = TableFileName(dbname_, file_number);
RandomAccessFile* file = NULL;
Table* table = NULL;
unique_ptr<RandomAccessFile> file;
unique_ptr<Table> table;
s = env_->NewRandomAccessFile(fname, &file);
stats ? stats->incNumFileOpens() : (void)0;
if (s.ok()) {
s = Table::Open(*options_, file, file_size, &table);
s = Table::Open(*options_, std::move(file), file_size, &table);
}
if (!s.ok()) {
assert(table == NULL);
delete file;
stats ? stats->incNumFileErrors() : (void)0;
// We do not cache error results so that if the error is transient,
// or somebody repairs the file, we recover automatically.
} else {
TableAndFile* tf = new TableAndFile;
tf->file = file;
tf->table = table;
tf->file = std::move(file);
tf->table = std::move(table);
*handle = cache_->Insert(key, tf, 1, &DeleteEntry);
}
}
@ -98,9 +96,10 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
return NewErrorIterator(s);
}
Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
Table* table =
reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table.get();
Iterator* result = table->NewIterator(options);
result->RegisterCleanup(&UnrefEntry, cache_, handle);
result->RegisterCleanup(&UnrefEntry, cache_.get(), handle);
if (tableptr != NULL) {
*tableptr = table;
}
@ -117,7 +116,8 @@ Status TableCache::Get(const ReadOptions& options,
Cache::Handle* handle = NULL;
Status s = FindTable(file_number, file_size, &handle, tableIO);
if (s.ok()) {
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
Table* t =
reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table.get();
s = t->InternalGet(options, k, arg, saver);
cache_->Release(handle);
}

@ -52,7 +52,7 @@ class TableCache {
Env* const env_;
const std::string dbname_;
const Options* options_;
Cache* cache_;
std::shared_ptr<Cache> cache_;
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**,
bool* tableIO = NULL);

@ -14,22 +14,23 @@ TransactionLogIteratorImpl::TransactionLogIteratorImpl(
files_(files),
started_(false),
isValid_(true),
currentFileIndex_(0),
currentLogReader_(NULL) {
assert( files_ != NULL);
currentFileIndex_(0) {
assert(files_ != NULL);
}
LogReporter
TransactionLogIteratorImpl::NewLogReporter(const uint64_t logNumber) {
LogReporter reporter;
reporter.env = options_->env;
reporter.info_log = options_->info_log;
reporter.info_log = options_->info_log.get();
reporter.log_number = logNumber;
return reporter;
}
Status TransactionLogIteratorImpl::OpenLogFile(const LogFile& logFile,
SequentialFile** file) {
Status TransactionLogIteratorImpl::OpenLogFile(
const LogFile& logFile,
unique_ptr<SequentialFile>* file)
{
Env* env = options_->env;
if (logFile.type == kArchivedLogFile) {
std::string fname = ArchivedLogFileName(dbname_, logFile.logNumber);
@ -73,17 +74,18 @@ void TransactionLogIteratorImpl::Next() {
std::string scratch;
Slice record;
if (!started_) {
SequentialFile* file = NULL;
unique_ptr<SequentialFile> file;
Status status = OpenLogFile(currentLogFile, &file);
if (!status.ok()) {
isValid_ = false;
currentStatus_ = status;
return;
}
assert(file != NULL);
assert(file);
WriteBatch batch;
log::Reader* reader = new log::Reader(file, &reporter, true, 0);
assert(reader != NULL);
unique_ptr<log::Reader> reader(
new log::Reader(std::move(file), &reporter, true, 0));
assert(reader);
while (reader->ReadRecord(&record, &scratch)) {
if (record.size() < 12) {
reporter.Corruption(
@ -95,7 +97,7 @@ void TransactionLogIteratorImpl::Next() {
if (currentNum >= sequenceNumber_) {
isValid_ = true;
currentRecord_ = record;
currentLogReader_ = reader;
currentLogReader_ = std::move(reader);
break;
}
}
@ -108,7 +110,7 @@ void TransactionLogIteratorImpl::Next() {
started_ = true;
} else {
LOOK_NEXT_FILE:
assert(currentLogReader_ != NULL);
assert(currentLogReader_);
bool openNextFile = true;
while (currentLogReader_->ReadRecord(&record, &scratch)) {
if (record.size() < 12) {
@ -125,15 +127,16 @@ LOOK_NEXT_FILE:
if (openNextFile) {
if (currentFileIndex_ < files_->size() - 1) {
++currentFileIndex_;
delete currentLogReader_;
SequentialFile *file;
currentLogReader_.reset();
unique_ptr<SequentialFile> file;
Status status = OpenLogFile(files_->at(currentFileIndex_), &file);
if (!status.ok()) {
isValid_ = false;
currentStatus_ = status;
return;
}
currentLogReader_ = new log::Reader(file, &reporter, true, 0);
currentLogReader_.reset(
new log::Reader(std::move(file), &reporter, true, 0));
goto LOOK_NEXT_FILE;
} else {
// LOOKED AT FILES. WE ARE DONE HERE.

@ -26,14 +26,11 @@ struct LogReporter : public log::Reader::Reporter {
class TransactionLogIteratorImpl : public TransactionLogIterator {
public:
TransactionLogIteratorImpl(const std::string& dbname,
const Options* options,
SequenceNumber& seqNum,
std::vector<LogFile>* files);
const Options* options,
SequenceNumber& seqNum,
std::vector<LogFile>* files);
virtual ~TransactionLogIteratorImpl() {
// TODO move to cc file.
if (currentLogReader_ != NULL) {
delete currentLogReader_;
}
delete files_;
}
@ -55,8 +52,8 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
Status currentStatus_;
size_t currentFileIndex_;
Slice currentRecord_;
log::Reader* currentLogReader_;
Status OpenLogFile(const LogFile& logFile, SequentialFile** file);
unique_ptr<log::Reader> currentLogReader_;
Status OpenLogFile(const LogFile& logFile, unique_ptr<SequentialFile>* file);
LogReporter NewLogReporter(uint64_t logNumber);
};

@ -896,8 +896,6 @@ VersionSet::VersionSet(const std::string& dbname,
log_number_(0),
prev_log_number_(0),
num_levels_(options_->num_levels),
descriptor_file_(NULL),
descriptor_log_(NULL),
dummy_versions_(this),
current_(NULL),
compactions_in_progress_(options_->num_levels),
@ -914,8 +912,6 @@ VersionSet::~VersionSet() {
delete[] compact_pointer_;
delete[] max_file_size_;
delete[] level_max_bytes_;
delete descriptor_log_;
delete descriptor_file_;
}
void VersionSet::Init(int num_levels) {
@ -994,16 +990,17 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
manifest_file_number_ = NewFileNumber(); // Change manifest file no.
}
if (descriptor_log_ == NULL || new_descriptor_log) {
if (!descriptor_log_ || new_descriptor_log) {
// No reason to unlock *mu here since we only hit this path in the
// first call to LogAndApply (when opening the database).
assert(descriptor_file_ == NULL || new_descriptor_log);
assert(!descriptor_log_ || new_descriptor_log);
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
edit->SetNextFile(next_file_number_);
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
unique_ptr<WritableFile> descriptor_file;
s = env_->NewWritableFile(new_manifest_file, &descriptor_file);
if (s.ok()) {
descriptor_log_ = new log::Writer(descriptor_file_);
s = WriteSnapshot(descriptor_log_);
descriptor_log_.reset(new log::Writer(std::move(descriptor_file)));
s = WriteSnapshot(descriptor_log_.get());
}
}
@ -1029,9 +1026,9 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
}
if (s.ok()) {
if (options_->use_fsync) {
s = descriptor_file_->Fsync();
s = descriptor_log_->file()->Fsync();
} else {
s = descriptor_file_->Sync();
s = descriptor_log_->file()->Sync();
}
}
if (!s.ok()) {
@ -1052,7 +1049,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
}
// find offset in manifest file where this version is stored.
new_manifest_file_size = descriptor_file_->GetFileSize();
new_manifest_file_size = descriptor_log_->file()->GetFileSize();
mu->Lock();
// cache the manifest_file_size so that it can be used to rollover in the
@ -1072,10 +1069,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
v->GetVersionNumber());
delete v;
if (!new_manifest_file.empty()) {
delete descriptor_log_;
delete descriptor_file_;
descriptor_log_ = NULL;
descriptor_file_ = NULL;
descriptor_log_.reset();
env_->DeleteFile(new_manifest_file);
}
}
@ -1142,7 +1136,7 @@ Status VersionSet::Recover() {
current.c_str());
std::string dscname = dbname_ + "/" + current;
SequentialFile* file;
unique_ptr<SequentialFile> file;
s = env_->NewSequentialFile(dscname, &file);
if (!s.ok()) {
return s;
@ -1166,7 +1160,8 @@ Status VersionSet::Recover() {
{
LogReporter reporter;
reporter.status = &s;
log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
0/*initial_offset*/);
Slice record;
std::string scratch;
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
@ -1206,8 +1201,7 @@ Status VersionSet::Recover() {
}
}
}
delete file;
file = NULL;
file.reset();
if (s.ok()) {
if (!have_next_file) {
@ -1260,7 +1254,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
};
// Open the specified manifest file.
SequentialFile* file;
unique_ptr<SequentialFile> file;
Status s = options.env->NewSequentialFile(dscname, &file);
if (!s.ok()) {
return s;
@ -1280,7 +1274,8 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
{
LogReporter reporter;
reporter.status = &s;
log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
0/*initial_offset*/);
Slice record;
std::string scratch;
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
@ -1327,8 +1322,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
}
}
}
delete file;
file = NULL;
file.reset();
if (s.ok()) {
if (!have_next_file) {
@ -1561,13 +1555,13 @@ const char* VersionSet::LevelDataSizeSummary(
bool VersionSet::ManifestContains(const std::string& record) const {
std::string fname = DescriptorFileName(dbname_, manifest_file_number_);
Log(options_->info_log, "ManifestContains: checking %s\n", fname.c_str());
SequentialFile* file = NULL;
unique_ptr<SequentialFile> file;
Status s = env_->NewSequentialFile(fname, &file);
if (!s.ok()) {
Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str());
return false;
}
log::Reader reader(file, NULL, true/*checksum*/, 0);
log::Reader reader(std::move(file), NULL, true/*checksum*/, 0);
Slice r;
std::string scratch;
bool result = false;
@ -1577,7 +1571,6 @@ bool VersionSet::ManifestContains(const std::string& record) const {
break;
}
}
delete file;
Log(options_->info_log, "ManifestContains: result = %d\n", result ? 1 : 0);
return result;
}

@ -16,6 +16,7 @@
#define STORAGE_LEVELDB_DB_VERSION_SET_H_
#include <map>
#include <memory>
#include <set>
#include <vector>
#include <deque>
@ -419,8 +420,7 @@ class VersionSet {
int num_levels_;
// Opened lazily
WritableFile* descriptor_file_;
log::Writer* descriptor_log_;
unique_ptr<log::Writer> descriptor_log_;
Version dummy_versions_; // Head of circular doubly-linked list of versions.
Version* current_; // == dummy_versions_.prev_

@ -235,13 +235,17 @@ class HdfsEnv : public Env {
}
virtual Status NewSequentialFile(const std::string& fname,
SequentialFile** result);
unique_ptr<SequentialFile>* result);
virtual Status NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result){ return notsup;}
unique_ptr<RandomAccessFile>* result) {
return notsup;
}
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result){return notsup;}
unique_ptr<WritableFile>* result) {
return notsup;
}
virtual bool FileExists(const std::string& fname){return false;}
@ -269,7 +273,8 @@ class HdfsEnv : public Env {
virtual Status UnlockFile(FileLock* lock){return notsup;}
virtual Status NewLogger(const std::string& fname, Logger** result){return notsup;}
virtual Status NewLogger(const std::string& fname,
shared_ptr<Logger>* result){return notsup;}
virtual void Schedule( void (*function)(void* arg), void* arg) {}

@ -233,31 +233,31 @@ class InMemoryEnv : public EnvWrapper {
// Partial implementation of the Env interface.
virtual Status NewSequentialFile(const std::string& fname,
SequentialFile** result) {
unique_ptr<SequentialFile>* result) {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = NULL;
return Status::IOError(fname, "File not found");
}
*result = new SequentialFileImpl(file_map_[fname]);
result->reset(new SequentialFileImpl(file_map_[fname]));
return Status::OK();
}
virtual Status NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result) {
unique_ptr<RandomAccessFile>* result) {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = NULL;
return Status::IOError(fname, "File not found");
}
*result = new RandomAccessFileImpl(file_map_[fname]);
result->reset(new RandomAccessFileImpl(file_map_[fname]));
return Status::OK();
}
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result) {
unique_ptr<WritableFile>* result) {
MutexLock lock(&mutex_);
if (file_map_.find(fname) != file_map_.end()) {
DeleteFileInternal(fname);
@ -267,7 +267,7 @@ class InMemoryEnv : public EnvWrapper {
file->Ref();
file_map_[fname] = file;
*result = new WritableFileImpl(file);
result->reset(new WritableFileImpl(file));
return Status::OK();
}

@ -8,6 +8,7 @@
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/testharness.h"
#include <memory>
#include <string>
#include <vector>
@ -27,7 +28,7 @@ class MemEnvTest {
TEST(MemEnvTest, Basics) {
uint64_t file_size;
WritableFile* writable_file;
unique_ptr<WritableFile> writable_file;
std::vector<std::string> children;
ASSERT_OK(env_->CreateDir("/dir"));
@ -40,7 +41,7 @@ TEST(MemEnvTest, Basics) {
// Create a file.
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
delete writable_file;
writable_file.reset();
// Check that the file exists.
ASSERT_TRUE(env_->FileExists("/dir/f"));
@ -53,7 +54,7 @@ TEST(MemEnvTest, Basics) {
// Write to the file.
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_OK(writable_file->Append("abc"));
delete writable_file;
writable_file.reset();
// Check for expected size.
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
@ -68,8 +69,8 @@ TEST(MemEnvTest, Basics) {
ASSERT_EQ(3U, file_size);
// Check that opening non-existent file fails.
SequentialFile* seq_file;
RandomAccessFile* rand_file;
unique_ptr<SequentialFile> seq_file;
unique_ptr<RandomAccessFile> rand_file;
ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok());
ASSERT_TRUE(!seq_file);
ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok());
@ -85,9 +86,9 @@ TEST(MemEnvTest, Basics) {
}
TEST(MemEnvTest, ReadWrite) {
WritableFile* writable_file;
SequentialFile* seq_file;
RandomAccessFile* rand_file;
unique_ptr<WritableFile> writable_file;
unique_ptr<SequentialFile> seq_file;
unique_ptr<RandomAccessFile> rand_file;
Slice result;
char scratch[100];
@ -96,7 +97,7 @@ TEST(MemEnvTest, ReadWrite) {
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_OK(writable_file->Append("hello "));
ASSERT_OK(writable_file->Append("world"));
delete writable_file;
writable_file.reset();
// Read sequentially.
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
@ -110,7 +111,6 @@ TEST(MemEnvTest, ReadWrite) {
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
ASSERT_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0U, result.size());
delete seq_file;
// Random reads.
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
@ -123,7 +123,6 @@ TEST(MemEnvTest, ReadWrite) {
// Too high offset.
ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
delete rand_file;
}
TEST(MemEnvTest, Locks) {
@ -139,14 +138,14 @@ TEST(MemEnvTest, Misc) {
ASSERT_OK(env_->GetTestDirectory(&test_dir));
ASSERT_TRUE(!test_dir.empty());
WritableFile* writable_file;
unique_ptr<WritableFile> writable_file;
ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file));
// These are no-ops, but we test they return success.
ASSERT_OK(writable_file->Sync());
ASSERT_OK(writable_file->Flush());
ASSERT_OK(writable_file->Close());
delete writable_file;
writable_file.reset();
}
TEST(MemEnvTest, LargeWrite) {
@ -158,13 +157,13 @@ TEST(MemEnvTest, LargeWrite) {
write_data.append(1, static_cast<char>(i));
}
WritableFile* writable_file;
unique_ptr<WritableFile> writable_file;
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_OK(writable_file->Append("foo"));
ASSERT_OK(writable_file->Append(write_data));
delete writable_file;
writable_file.reset();
SequentialFile* seq_file;
unique_ptr<SequentialFile> seq_file;
Slice result;
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
@ -178,7 +177,6 @@ TEST(MemEnvTest, LargeWrite) {
read += result.size();
}
ASSERT_TRUE(write_data == read_data);
delete seq_file;
delete [] scratch;
}

@ -18,17 +18,20 @@
#ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
#include <memory>
#include <stdint.h>
#include "leveldb/slice.h"
namespace leveldb {
using std::shared_ptr;
class Cache;
// Create a new cache with a fixed size capacity. This implementation
// of Cache uses a least-recently-used eviction policy.
extern Cache* NewLRUCache(size_t capacity);
extern Cache* NewLRUCache(size_t capacity, int numShardBits);
extern shared_ptr<Cache> NewLRUCache(size_t capacity);
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits);
class Cache {
public:

@ -15,6 +15,7 @@
#include <cstdarg>
#include <string>
#include <memory>
#include <vector>
#include <stdint.h>
#include "leveldb/status.h"
@ -28,6 +29,9 @@ class SequentialFile;
class Slice;
class WritableFile;
using std::unique_ptr;
using std::shared_ptr;
class Env {
public:
Env() { }
@ -47,7 +51,7 @@ class Env {
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewSequentialFile(const std::string& fname,
SequentialFile** result) = 0;
unique_ptr<SequentialFile>* result) = 0;
// Create a brand new random access read-only file with the
// specified name. On success, stores a pointer to the new file in
@ -57,7 +61,7 @@ class Env {
//
// The returned file may be concurrently accessed by multiple threads.
virtual Status NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result) = 0;
unique_ptr<RandomAccessFile>* result) = 0;
// Create an object that writes to a new file with the specified
// name. Deletes any existing file with the same name and creates a
@ -67,7 +71,7 @@ class Env {
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result) = 0;
unique_ptr<WritableFile>* result) = 0;
// Returns true iff the named file exists.
virtual bool FileExists(const std::string& fname) = 0;
@ -143,7 +147,8 @@ class Env {
virtual Status GetTestDirectory(std::string* path) = 0;
// Create and return a log file for storing informational messages.
virtual Status NewLogger(const std::string& fname, Logger** result) = 0;
virtual Status NewLogger(const std::string& fname,
shared_ptr<Logger>* result) = 0;
// Returns the number of micro-seconds since some fixed point in time. Only
// useful for computing deltas of time.
@ -288,6 +293,12 @@ class FileLock {
};
// Log the specified data to *info_log if info_log is non-NULL.
extern void Log(const shared_ptr<Logger>& info_log, const char* format, ...)
# if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__ (__printf__, 2, 3)))
# endif
;
extern void Log(Logger* info_log, const char* format, ...)
# if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__ (__printf__, 2, 3)))
@ -315,13 +326,15 @@ class EnvWrapper : public Env {
Env* target() const { return target_; }
// The following text is boilerplate that forwards all methods to target()
Status NewSequentialFile(const std::string& f, SequentialFile** r) {
Status NewSequentialFile(const std::string& f,
unique_ptr<SequentialFile>* r) {
return target_->NewSequentialFile(f, r);
}
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
Status NewRandomAccessFile(const std::string& f,
unique_ptr<RandomAccessFile>* r) {
return target_->NewRandomAccessFile(f, r);
}
Status NewWritableFile(const std::string& f, WritableFile** r) {
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r) {
return target_->NewWritableFile(f, r);
}
bool FileExists(const std::string& f) { return target_->FileExists(f); }
@ -359,7 +372,8 @@ class EnvWrapper : public Env {
virtual Status GetTestDirectory(std::string* path) {
return target_->GetTestDirectory(path);
}
virtual Status NewLogger(const std::string& fname, Logger** result) {
virtual Status NewLogger(const std::string& fname,
shared_ptr<Logger>* result) {
return target_->NewLogger(fname, result);
}
uint64_t NowMicros() {

@ -7,6 +7,7 @@
#include <stddef.h>
#include <string>
#include <memory>
#include <stdint.h>
#include "leveldb/slice.h"
@ -20,6 +21,8 @@ class Logger;
class Snapshot;
class Statistics;
using std::shared_ptr;
// DB contents are stored in a set of blocks, each of which holds a
// sequence of key,value pairs. Each block may be compressed before
// being stored in a file. The following enum describes which
@ -84,7 +87,7 @@ struct Options {
// be written to info_log if it is non-NULL, or to a file stored
// in the same directory as the DB contents if info_log is NULL.
// Default: NULL
Logger* info_log;
shared_ptr<Logger> info_log;
// -------------------
// Parameters that affect performance
@ -121,7 +124,7 @@ struct Options {
// If non-NULL, use the specified cache for blocks.
// If NULL, leveldb will automatically create and use an 8MB internal cache.
// Default: NULL
Cache* block_cache;
shared_ptr<Cache> block_cache;
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
@ -326,7 +329,7 @@ struct Options {
// Create an Options object with default values for all fields.
Options();
void Dump(Logger * log) const;
void Dump(Logger* log) const;
// This method allows an application to modify/delete a key-value at
// the time of compaction. The compaction process invokes this

@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
#include <memory>
#include <stdint.h>
#include "leveldb/iterator.h"
@ -18,6 +19,8 @@ class RandomAccessFile;
struct ReadOptions;
class TableCache;
using std::unique_ptr;
// A Table is a sorted map from strings to strings. Tables are
// immutable and persistent. A Table may be safely accessed from
// multiple threads without external synchronization.
@ -36,9 +39,9 @@ class Table {
//
// *file must remain live while this Table is in use.
static Status Open(const Options& options,
RandomAccessFile* file,
unique_ptr<RandomAccessFile>&& file,
uint64_t file_size,
Table** table);
unique_ptr<Table>* table);
~Table();

@ -27,7 +27,7 @@ struct Table::Rep {
Options options;
Status status;
RandomAccessFile* file;
unique_ptr<RandomAccessFile> file;
uint64_t cache_id;
FilterBlockReader* filter;
const char* filter_data;
@ -37,10 +37,10 @@ struct Table::Rep {
};
Status Table::Open(const Options& options,
RandomAccessFile* file,
unique_ptr<RandomAccessFile>&& file,
uint64_t size,
Table** table) {
*table = NULL;
unique_ptr<Table>* table) {
table->reset();
if (size < Footer::kEncodedLength) {
return Status::InvalidArgument("file is too short to be an sstable");
}
@ -66,7 +66,7 @@ Status Table::Open(const Options& options,
BlockContents contents;
Block* index_block = NULL;
if (s.ok()) {
s = ReadBlock(file, ReadOptions(), footer.index_handle(), &contents);
s = ReadBlock(file.get(), ReadOptions(), footer.index_handle(), &contents);
if (s.ok()) {
index_block = new Block(contents);
}
@ -77,13 +77,13 @@ Status Table::Open(const Options& options,
// ready to serve requests.
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->file = std::move(file);
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
rep->filter_data = NULL;
rep->filter = NULL;
*table = new Table(rep);
table->reset(new Table(rep));
(*table)->ReadMeta(footer);
} else {
if (index_block) delete index_block;
@ -101,7 +101,8 @@ void Table::ReadMeta(const Footer& footer) {
// it is an empty block.
ReadOptions opt;
BlockContents contents;
if (!ReadBlock(rep_->file, opt, footer.metaindex_handle(), &contents).ok()) {
if (!ReadBlock(rep_->file.get(), opt, footer.metaindex_handle(),
&contents).ok()) {
// Do not propagate errors since meta info is not needed for operation
return;
}
@ -129,7 +130,7 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
// requiring checksum verification in Table::Open.
ReadOptions opt;
BlockContents block;
if (!ReadBlock(rep_->file, opt, filter_handle, &block).ok()) {
if (!ReadBlock(rep_->file.get(), opt, filter_handle, &block).ok()) {
return;
}
if (block.heap_allocated) {
@ -164,7 +165,7 @@ Iterator* Table::BlockReader(void* arg,
const Slice& index_value,
bool* didIO) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
Cache* block_cache = table->rep_->options.block_cache.get();
Statistics* const statistics = table->rep_->options.statistics;
Block* block = NULL;
Cache::Handle* cache_handle = NULL;
@ -188,7 +189,7 @@ Iterator* Table::BlockReader(void* arg,
RecordTick(statistics, BLOCK_CACHE_HIT);
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
s = ReadBlock(table->rep_->file.get(), options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
if (contents.cachable && options.fill_cache) {
@ -203,7 +204,7 @@ Iterator* Table::BlockReader(void* arg,
RecordTick(statistics, BLOCK_CACHE_MISS);
}
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
s = ReadBlock(table->rep_->file.get(), options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
}

@ -221,8 +221,7 @@ class BlockConstructor: public Constructor {
class TableConstructor: public Constructor {
public:
TableConstructor(const Comparator* cmp)
: Constructor(cmp),
source_(NULL), table_(NULL) {
: Constructor(cmp) {
}
~TableConstructor() {
Reset();
@ -244,11 +243,12 @@ class TableConstructor: public Constructor {
ASSERT_EQ(sink.contents().size(), builder.FileSize());
// Open the table
source_ = new StringSource(sink.contents());
source_.reset(new StringSource(sink.contents()));
Options table_options;
table_options.comparator = options.comparator;
table_options.compression_opts = options.compression_opts;
return Table::Open(table_options, source_, sink.contents().size(), &table_);
return Table::Open(table_options, std::move(source_),
sink.contents().size(), &table_);
}
virtual Iterator* NewIterator() const {
@ -261,14 +261,12 @@ class TableConstructor: public Constructor {
private:
void Reset() {
delete table_;
delete source_;
table_ = NULL;
source_ = NULL;
table_.reset();
source_.reset();
}
StringSource* source_;
Table* table_;
unique_ptr<StringSource> source_;
unique_ptr<Table> table_;
TableConstructor();
};

@ -433,7 +433,6 @@ class StressTest {
~StressTest() {
delete db_;
delete cache_;
delete filter_policy_;
}
@ -757,7 +756,7 @@ class StressTest {
}
private:
Cache* cache_;
shared_ptr<Cache> cache_;
const FilterPolicy* filter_policy_;
DB* db_;
int num_times_reopened_;

@ -45,16 +45,16 @@ SstFileReader::SstFileReader(std::string file_path,
Status SstFileReader::ReadSequential(bool print_kv, uint64_t read_num)
{
Table* table;
unique_ptr<Table> table;
Options table_options;
RandomAccessFile* file = NULL;
unique_ptr<RandomAccessFile> file;
Status s = table_options.env->NewRandomAccessFile(file_name_, &file);
if(!s.ok()) {
return s;
}
uint64_t file_size;
table_options.env->GetFileSize(file_name_, &file_size);
s = Table::Open(table_options, file, file_size, &table);
s = Table::Open(table_options, std::move(file), file_size, &table);
if(!s.ok()) {
return s;
}

@ -26,7 +26,7 @@ class AutoSplitLogger : public Logger {
std::string db_log_dir_;
std::string db_absolute_path_;
Env* env_;
UnderlyingLogger* logger_;
shared_ptr<UnderlyingLogger> logger_;
const size_t MAX_LOG_FILE_SIZE;
Status status_;
@ -42,7 +42,6 @@ class AutoSplitLogger : public Logger {
log_fname_ = InfoLogFileName(dbname_, db_absolute_path_, db_log_dir_);
InitLogger();
}
~AutoSplitLogger() { delete logger_; }
virtual void Logv(const char* format, va_list ap) {
assert(GetStatus().ok());
@ -50,7 +49,6 @@ class AutoSplitLogger : public Logger {
logger_->Logv(format, ap);
// Check if the log file should be splitted.
if (logger_->GetLogFileSize() > MAX_LOG_FILE_SIZE) {
delete logger_;
std::string old_fname = OldInfoLogFileName(
dbname_, env_->NowMicros(), db_absolute_path_, db_log_dir_);
env_->RenameFile(log_fname_, old_fname);

@ -338,15 +338,15 @@ class ShardedLRUCache : public Cache {
} // end anonymous namespace
Cache* NewLRUCache(size_t capacity) {
return new ShardedLRUCache(capacity);
shared_ptr<Cache> NewLRUCache(size_t capacity) {
return std::make_shared<ShardedLRUCache>(capacity);
}
Cache* NewLRUCache(size_t capacity, int numShardBits) {
shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits) {
if (numShardBits >= 20) {
return NULL; // the cache cannot be sharded into too many fine pieces
}
return new ShardedLRUCache(capacity, numShardBits);
return std::make_shared<ShardedLRUCache>(capacity, numShardBits);
}
} // namespace leveldb

@ -35,14 +35,13 @@ class CacheTest {
static const int kCacheSize = 1000;
std::vector<int> deleted_keys_;
std::vector<int> deleted_values_;
Cache* cache_;
shared_ptr<Cache> cache_;
CacheTest() : cache_(NewLRUCache(kCacheSize)) {
current_ = this;
}
~CacheTest() {
delete cache_;
}
int Lookup(int key) {

@ -25,7 +25,16 @@ FileLock::~FileLock() {
}
void Log(Logger* info_log, const char* format, ...) {
if (info_log != NULL) {
if (info_log) {
va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
va_end(ap);
}
}
void Log(const shared_ptr<Logger>& info_log, const char* format, ...) {
if (info_log) {
va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
@ -36,7 +45,7 @@ void Log(Logger* info_log, const char* format, ...) {
static Status DoWriteStringToFile(Env* env, const Slice& data,
const std::string& fname,
bool should_sync) {
WritableFile* file;
unique_ptr<WritableFile> file;
Status s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
return s;
@ -45,10 +54,6 @@ static Status DoWriteStringToFile(Env* env, const Slice& data,
if (s.ok() && should_sync) {
s = file->Sync();
}
if (s.ok()) {
s = file->Close();
}
delete file; // Will auto-close if we did not close above
if (!s.ok()) {
env->DeleteFile(fname);
}
@ -67,7 +72,7 @@ Status WriteStringToFileSync(Env* env, const Slice& data,
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
data->clear();
SequentialFile* file;
unique_ptr<SequentialFile> file;
Status s = env->NewSequentialFile(fname, &file);
if (!s.ok()) {
return s;
@ -86,7 +91,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
}
}
delete[] space;
delete file;
return s;
}

@ -490,7 +490,8 @@ Status HdfsEnv::UnlockFile(FileLock* lock) {
return Status::OK();
}
Status HdfsEnv::NewLogger(const std::string& fname, Logger** result) {
Status HdfsEnv::NewLogger(const std::string& fname,
shared_ptr<Logger>* result) {
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
if (f == NULL || !f->isValid()) {
*result = NULL;
@ -515,7 +516,7 @@ Status HdfsEnv::NewLogger(const std::string& fname, Logger** result) {
#include "hdfs/env_hdfs.h"
namespace leveldb {
Status HdfsEnv::NewSequentialFile(const std::string& fname,
SequentialFile** result) {
unique_ptr<SequentialFile>* result) {
return Status::NotSupported("Not compiled with hdfs support");
}
}

@ -510,20 +510,21 @@ class PosixEnv : public Env {
}
virtual Status NewSequentialFile(const std::string& fname,
SequentialFile** result) {
unique_ptr<SequentialFile>* result) {
result->reset();
FILE* f = fopen(fname.c_str(), "r");
if (f == NULL) {
*result = NULL;
return IOError(fname, errno);
} else {
*result = new PosixSequentialFile(fname, f);
result->reset(new PosixSequentialFile(fname, f));
return Status::OK();
}
}
virtual Status NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result) {
*result = NULL;
unique_ptr<RandomAccessFile>* result) {
result->reset();
Status s;
int fd = open(fname.c_str(), O_RDONLY);
if (fd < 0) {
@ -537,30 +538,30 @@ class PosixEnv : public Env {
if (s.ok()) {
void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
if (base != MAP_FAILED) {
*result = new PosixMmapReadableFile(fname, base, size);
result->reset(new PosixMmapReadableFile(fname, base, size));
} else {
s = IOError(fname, errno);
}
}
close(fd);
} else {
*result = new PosixRandomAccessFile(fname, fd);
result->reset(new PosixRandomAccessFile(fname, fd));
}
return s;
}
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result) {
unique_ptr<WritableFile>* result) {
result->reset();
Status s;
const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
if (fd < 0) {
*result = NULL;
s = IOError(fname, errno);
} else {
if (useMmapWrite) {
*result = new PosixMmapFile(fname, fd, page_size_);
result->reset(new PosixMmapFile(fname, fd, page_size_));
} else {
*result = new PosixWritableFile(fname, fd, 65536);
result->reset(new PosixWritableFile(fname, fd, 65536));
}
}
return s;
@ -706,13 +707,14 @@ class PosixEnv : public Env {
return thread_id;
}
virtual Status NewLogger(const std::string& fname, Logger** result) {
virtual Status NewLogger(const std::string& fname,
shared_ptr<Logger>* result) {
FILE* f = fopen(fname.c_str(), "w");
if (f == NULL) {
*result = NULL;
result->reset();
return IOError(fname, errno);
} else {
*result = new PosixLogger(f, &PosixEnv::gettid);
result->reset(new PosixLogger(f, &PosixEnv::gettid));
return Status::OK();
}
}

@ -606,7 +606,7 @@ void WALDumper::DoCommand() {
}
};
SequentialFile* file;
unique_ptr<SequentialFile> file;
Env* env_ = Env::Default();
Status status = env_->NewSequentialFile(wal_file_, &file);
if (!status.ok()) {
@ -614,7 +614,7 @@ void WALDumper::DoCommand() {
status.ToString());
} else {
StdErrReporter reporter;
log::Reader reader(file, &reporter, true, 0);
log::Reader reader(std::move(file), &reporter, true, 0);
std::string scratch;
WriteBatch batch;
Slice record;

@ -23,7 +23,6 @@ Options::Options()
write_buffer_size(4<<20),
max_write_buffer_number(2),
max_open_files(1000),
block_cache(NULL),
block_size(4096),
block_restart_interval(16),
compression(kSnappyCompression),
@ -61,19 +60,18 @@ Options::Options()
}
void
Options::Dump(
Logger * log) const
Options::Dump(Logger* log) const
{
Log(log," Options.comparator: %s", comparator->Name());
Log(log," Options.create_if_missing: %d", create_if_missing);
Log(log," Options.error_if_exists: %d", error_if_exists);
Log(log," Options.paranoid_checks: %d", paranoid_checks);
Log(log," Options.env: %p", env);
Log(log," Options.info_log: %p", info_log);
Log(log," Options.info_log: %p", info_log.get());
Log(log," Options.write_buffer_size: %zd", write_buffer_size);
Log(log," Options.max_write_buffer_number: %d", max_write_buffer_number);
Log(log," Options.max_open_files: %d", max_open_files);
Log(log," Options.block_cache: %p", block_cache);
Log(log," Options.block_cache: %p", block_cache.get());
if (block_cache) {
Log(log," Options.block_cache_size: %zd",
block_cache->GetCapacity());

@ -37,10 +37,10 @@ class ErrorEnv : public EnvWrapper {
num_writable_file_errors_(0) { }
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result) {
unique_ptr<WritableFile>* result) {
result->reset();
if (writable_file_error_) {
++num_writable_file_errors_;
*result = NULL;
return Status::IOError(fname, "fake error");
}
return target()->NewWritableFile(fname, result);

Loading…
Cancel
Save