From 588bca2020cc554fa750924fdc33a1707f06a293 Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Tue, 15 Apr 2014 13:39:26 -0700 Subject: [PATCH] RocksDBLite Summary: Introducing RocksDBLite! Removes all the non-essential features and reduces the binary size. This effort should help our adoption on mobile. Binary size when compiling for IOS (`TARGET_OS=IOS m static_lib`) is down to 9MB from 15MB (without stripping) Test Plan: compiles :) Reviewers: dhruba, haobo, ljin, sdong, yhchiang Reviewed By: yhchiang CC: leveldb Differential Revision: https://reviews.facebook.net/D17835 --- Makefile | 5 + ROCKSDB_LITE.md | 20 + build_tools/build_detect_platform | 2 +- db/c.cc | 4 + db/db_filesnapshot.cc | 4 + db/db_impl.cc | 507 ++++++++++--------------- db/db_impl.h | 25 +- db/db_impl_debug.cc | 119 ++++++ db/db_stats_logger.cc | 5 +- db/repair.cc | 4 + db/tailing_iter.cc | 2 + db/tailing_iter.h | 2 + db/transaction_log_impl.cc | 4 +- db/transaction_log_impl.h | 4 +- db/version_set.cc | 2 + db/version_set.h | 11 +- include/rocksdb/db.h | 14 +- include/rocksdb/ldb_tool.h | 6 +- include/rocksdb/memtablerep.h | 23 +- include/rocksdb/options.h | 1 + include/rocksdb/table.h | 3 + include/utilities/backupable_db.h | 2 + include/utilities/geo_db.h | 2 + include/utilities/utility_db.h | 2 + table/plain_table_builder.cc | 2 + table/plain_table_builder.h | 2 + table/plain_table_factory.cc | 2 + table/plain_table_factory.h | 2 + table/plain_table_reader.cc | 2 + table/plain_table_reader.h | 2 + util/autovector.h | 8 +- util/blob_store.cc | 2 + util/blob_store.h | 2 + util/hash_linklist_rep.cc | 2 + util/hash_linklist_rep.h | 2 + util/hash_skiplist_rep.cc | 2 + util/hash_skiplist_rep.h | 2 + util/ldb_cmd.cc | 2 + util/ldb_tool.cc | 2 + util/logging.cc | 9 - util/logging.h | 4 - util/vectorrep.cc | 2 + utilities/backupable/backupable_db.cc | 4 + utilities/geodb/geodb_impl.cc | 4 + utilities/geodb/geodb_impl.h | 4 + utilities/redis/redis_list_exception.h | 2 + utilities/redis/redis_list_iterator.h | 4 +- utilities/redis/redis_lists.cc | 7 +- utilities/redis/redis_lists.h | 2 + utilities/ttl/db_ttl.cc | 2 + utilities/ttl/db_ttl.h | 2 + 51 files changed, 507 insertions(+), 348 deletions(-) create mode 100644 ROCKSDB_LITE.md create mode 100644 db/db_impl_debug.cc diff --git a/Makefile b/Makefile index a8f451b58..b57018c5c 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,12 @@ endif ifeq ($(MAKECMDGOALS),shared_lib) PLATFORM_SHARED_LDFLAGS=-fPIC +OPT += -DNDEBUG +endif +ifeq ($(MAKECMDGOALS),static_lib) +OPT += -DNDEBUG endif + #----------------------------------------------- # detect what platform we're building on diff --git a/ROCKSDB_LITE.md b/ROCKSDB_LITE.md new file mode 100644 index 000000000..e7e3752c8 --- /dev/null +++ b/ROCKSDB_LITE.md @@ -0,0 +1,20 @@ +# RocksDBLite + +RocksDBLite is a project focused on mobile use cases, which don't need a lot of fancy things we've built for server workloads and they are very sensitive to binary size. For that reason, we added a compile flag ROCKSDB_LITE that comments out a lot of the nonessential code and keeps the binary lean. + +Some examples of the features disabled by ROCKSDB_LITE: +* compiled-in support for LDB tool +* No backupable DB +* No support for replication (which we provide in form of TrasactionalIterator) +* No advanced monitoring tools +* No special-purpose memtables that are highly optimized for specific use cases + +When adding a new big feature to RocksDB, please add ROCKSDB_LITE compile guard if: +* Nobody from mobile really needs your feature, +* Your feature is adding a lot of weight to the binary. + +Don't add ROCKSDB_LITE compile guard if: +* It would introduce a lot of code complexity. Compile guards make code harder to read. It's a trade-off. +* Your feature is not adding a lot of weight. + +If unsure, ask. :) diff --git a/build_tools/build_detect_platform b/build_tools/build_detect_platform index 94aafd62e..0e67eb622 100755 --- a/build_tools/build_detect_platform +++ b/build_tools/build_detect_platform @@ -100,7 +100,7 @@ case "$TARGET_OS" in ;; IOS) PLATFORM=IOS - COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE" + COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE -DROCKSDB_LITE" PLATFORM_SHARED_EXT=dylib PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name " CROSS_COMPILE=true diff --git a/db/c.cc b/db/c.cc index b566daf64..e3a0a29a0 100644 --- a/db/c.cc +++ b/db/c.cc @@ -7,6 +7,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE + #include "rocksdb/c.h" #include @@ -1467,3 +1469,5 @@ extern void rocksdb_livefiles_destroy( } } // end extern "C" + +#endif // ROCKSDB_LITE diff --git a/db/db_filesnapshot.cc b/db/db_filesnapshot.cc index 61a818465..1e1ec9757 100644 --- a/db/db_filesnapshot.cc +++ b/db/db_filesnapshot.cc @@ -7,6 +7,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#ifndef ROCKSDB_LITE + #define __STDC_FORMAT_MACROS #include #include @@ -166,3 +168,5 @@ Status DBImpl::GetSortedWalFiles(VectorLogPtr& files) { } } + +#endif // ROCKSDB_LITE diff --git a/db/db_impl.cc b/db/db_impl.cc index 4b2e97689..fb173b42d 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -464,10 +464,6 @@ DBImpl::~DBImpl() { LogFlush(options_.info_log); } -uint64_t DBImpl::TEST_Current_Manifest_FileNo() { - return versions_->ManifestFileNumber(); -} - Status DBImpl::NewDB() { VersionEdit new_db; new_db.SetLogNumber(0); @@ -771,6 +767,7 @@ void DBImpl::DeleteObsoleteFiles() { } } +#ifndef ROCKSDB_LITE // 1. Go through all archived files and // a. if ttl is enabled, delete outdated files // b. if archive size limit is enabled, delete empty files, @@ -895,6 +892,169 @@ void DBImpl::PurgeObsoleteWALFiles() { } } +Status DBImpl::GetSortedWalsOfType(const std::string& path, + VectorLogPtr& log_files, + WalFileType log_type) { + std::vector all_files; + const Status status = env_->GetChildren(path, &all_files); + if (!status.ok()) { + return status; + } + log_files.reserve(all_files.size()); + for (const auto& f : all_files) { + uint64_t number; + FileType type; + if (ParseFileName(f, &number, &type) && type == kLogFile) { + WriteBatch batch; + Status s = ReadFirstRecord(log_type, number, &batch); + if (!s.ok()) { + if (CheckWalFileExistsAndEmpty(log_type, number)) { + continue; + } + return s; + } + + uint64_t size_bytes; + s = env_->GetFileSize(LogFileName(path, number), &size_bytes); + if (!s.ok()) { + return s; + } + + log_files.push_back(std::move(unique_ptr( + new LogFileImpl(number, log_type, + WriteBatchInternal::Sequence(&batch), size_bytes)))); + } + } + CompareLogByPointer compare_log_files; + std::sort(log_files.begin(), log_files.end(), compare_log_files); + return status; +} + +Status DBImpl::RetainProbableWalFiles(VectorLogPtr& all_logs, + const SequenceNumber target) { + int64_t start = 0; // signed to avoid overflow when target is < first file. + int64_t end = static_cast(all_logs.size()) - 1; + // Binary Search. avoid opening all files. + while (end >= start) { + int64_t mid = start + (end - start) / 2; // Avoid overflow. + SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence(); + if (current_seq_num == target) { + end = mid; + break; + } else if (current_seq_num < target) { + start = mid + 1; + } else { + end = mid - 1; + } + } + size_t start_index = std::max(0l, end); // end could be -ve. + // The last wal file is always included + all_logs.erase(all_logs.begin(), all_logs.begin() + start_index); + return Status::OK(); +} + +bool DBImpl::CheckWalFileExistsAndEmpty(const WalFileType type, + const uint64_t number) { + const std::string fname = (type == kAliveLogFile) + ? LogFileName(options_.wal_dir, number) + : ArchivedLogFileName(options_.wal_dir, number); + uint64_t file_size; + Status s = env_->GetFileSize(fname, &file_size); + return (s.ok() && (file_size == 0)); +} + +Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number, + WriteBatch* const result) { + if (type == kAliveLogFile) { + std::string fname = LogFileName(options_.wal_dir, number); + Status status = ReadFirstLine(fname, result); + if (status.ok() || env_->FileExists(fname)) { + // return OK or any error that is not caused non-existing file + return status; + } + + // check if the file got moved to archive. + std::string archived_file = ArchivedLogFileName(options_.wal_dir, number); + Status s = ReadFirstLine(archived_file, result); + if (s.ok() || env_->FileExists(archived_file)) { + return s; + } + return Status::NotFound("Log File has been deleted: " + archived_file); + } else if (type == kArchivedLogFile) { + std::string fname = ArchivedLogFileName(options_.wal_dir, number); + Status status = ReadFirstLine(fname, result); + return status; + } + return Status::NotSupported("File Type Not Known: " + std::to_string(type)); +} + +Status DBImpl::ReadFirstLine(const std::string& fname, + WriteBatch* const batch) { + struct LogReporter : public log::Reader::Reporter { + Env* env; + Logger* info_log; + const char* fname; + + Status* status; + bool ignore_error; // true if options_.paranoid_checks==false + virtual void Corruption(size_t bytes, const Status& s) { + Log(info_log, "%s%s: dropping %d bytes; %s", + (this->ignore_error ? "(ignoring error) " : ""), fname, + static_cast(bytes), s.ToString().c_str()); + if (this->status->ok()) { + // only keep the first error + *this->status = s; + } + } + }; + + unique_ptr file; + Status status = env_->NewSequentialFile(fname, &file, storage_options_); + + if (!status.ok()) { + return status; + } + + LogReporter reporter; + reporter.env = env_; + reporter.info_log = options_.info_log.get(); + reporter.fname = fname.c_str(); + reporter.status = &status; + reporter.ignore_error = !options_.paranoid_checks; + log::Reader reader(std::move(file), &reporter, true /*checksum*/, + 0 /*initial_offset*/); + std::string scratch; + Slice record; + + if (reader.ReadRecord(&record, &scratch) && + (status.ok() || !options_.paranoid_checks)) { + if (record.size() < 12) { + reporter.Corruption(record.size(), + Status::Corruption("log record too small")); + // TODO read record's till the first no corrupt entry? + } else { + WriteBatchInternal::SetContents(batch, record); + return Status::OK(); + } + } + + // ReadRecord returns false on EOF, which is deemed as OK() by Reader + if (status.ok()) { + status = Status::Corruption("eof reached"); + } + return status; +} + +struct CompareLogByPointer { + bool operator()(const unique_ptr& a, const unique_ptr& b) { + LogFileImpl* a_impl = dynamic_cast(a.get()); + LogFileImpl* b_impl = dynamic_cast(b.get()); + return *a_impl < *b_impl; + } +}; + +#endif // ROCKSDB_LITE + Status DBImpl::Recover( const std::vector& column_families, bool read_only, bool error_if_log_file_exist) { @@ -1533,198 +1693,6 @@ SequenceNumber DBImpl::GetLatestSequenceNumber() const { return versions_->LastSequence(); } -Status DBImpl::GetUpdatesSince( - SequenceNumber seq, unique_ptr* iter, - const TransactionLogIterator::ReadOptions& read_options) { - - RecordTick(options_.statistics.get(), GET_UPDATES_SINCE_CALLS); - if (seq > versions_->LastSequence()) { - return Status::NotFound( - "Requested sequence not yet written in the db"); - } - // Get all sorted Wal Files. - // Do binary search and open files and find the seq number. - - std::unique_ptr wal_files(new VectorLogPtr); - Status s = GetSortedWalFiles(*wal_files); - if (!s.ok()) { - return s; - } - - s = RetainProbableWalFiles(*wal_files, seq); - if (!s.ok()) { - return s; - } - iter->reset(new TransactionLogIteratorImpl(options_.wal_dir, &options_, - read_options, storage_options_, - seq, std::move(wal_files), this)); - return (*iter)->status(); -} - -Status DBImpl::RetainProbableWalFiles(VectorLogPtr& all_logs, - const SequenceNumber target) { - long start = 0; // signed to avoid overflow when target is < first file. - long end = static_cast(all_logs.size()) - 1; - // Binary Search. avoid opening all files. - while (end >= start) { - long mid = start + (end - start) / 2; // Avoid overflow. - SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence(); - if (current_seq_num == target) { - end = mid; - break; - } else if (current_seq_num < target) { - start = mid + 1; - } else { - end = mid - 1; - } - } - size_t start_index = std::max(0l, end); // end could be -ve. - // The last wal file is always included - all_logs.erase(all_logs.begin(), all_logs.begin() + start_index); - return Status::OK(); -} - -bool DBImpl::CheckWalFileExistsAndEmpty(const WalFileType type, - const uint64_t number) { - const std::string fname = (type == kAliveLogFile) ? - LogFileName(options_.wal_dir, number) : - ArchivedLogFileName(options_.wal_dir, number); - uint64_t file_size; - Status s = env_->GetFileSize(fname, &file_size); - return (s.ok() && (file_size == 0)); -} - -Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number, - WriteBatch* const result) { - - if (type == kAliveLogFile) { - std::string fname = LogFileName(options_.wal_dir, number); - Status status = ReadFirstLine(fname, result); - if (status.ok() || env_->FileExists(fname)) { - // return OK or any error that is not caused non-existing file - return status; - } - - // check if the file got moved to archive. - std::string archived_file = - ArchivedLogFileName(options_.wal_dir, number); - Status s = ReadFirstLine(archived_file, result); - if (s.ok() || env_->FileExists(archived_file)) { - return s; - } - return Status::NotFound("Log File has been deleted: " + archived_file); - } else if (type == kArchivedLogFile) { - std::string fname = ArchivedLogFileName(options_.wal_dir, number); - Status status = ReadFirstLine(fname, result); - return status; - } - return Status::NotSupported("File Type Not Known: " + std::to_string(type)); -} - -Status DBImpl::ReadFirstLine(const std::string& fname, - WriteBatch* const batch) { - struct LogReporter : public log::Reader::Reporter { - Env* env; - Logger* info_log; - const char* fname; - - Status* status; - bool ignore_error; // true if options_.paranoid_checks==false - virtual void Corruption(size_t bytes, const Status& s) { - Log(info_log, "%s%s: dropping %d bytes; %s", - (this->ignore_error ? "(ignoring error) " : ""), - fname, static_cast(bytes), s.ToString().c_str()); - if (this->status->ok()) { - // only keep the first error - *this->status = s; - } - } - }; - - unique_ptr file; - Status status = env_->NewSequentialFile(fname, &file, storage_options_); - - if (!status.ok()) { - return status; - } - - - LogReporter reporter; - reporter.env = env_; - reporter.info_log = options_.info_log.get(); - reporter.fname = fname.c_str(); - reporter.status = &status; - reporter.ignore_error = !options_.paranoid_checks; - log::Reader reader(std::move(file), &reporter, true/*checksum*/, - 0/*initial_offset*/); - std::string scratch; - Slice record; - - if (reader.ReadRecord(&record, &scratch) && - (status.ok() || !options_.paranoid_checks)) { - if (record.size() < 12) { - reporter.Corruption( - record.size(), Status::Corruption("log record too small")); - // TODO read record's till the first no corrupt entry? - } else { - WriteBatchInternal::SetContents(batch, record); - return Status::OK(); - } - } - - // ReadRecord returns false on EOF, which is deemed as OK() by Reader - if (status.ok()) { - status = Status::Corruption("eof reached"); - } - return status; -} - -struct CompareLogByPointer { - bool operator() (const unique_ptr& a, - const unique_ptr& b) { - LogFileImpl* a_impl = dynamic_cast(a.get()); - LogFileImpl* b_impl = dynamic_cast(b.get()); - return *a_impl < *b_impl; - } -}; - -Status DBImpl::GetSortedWalsOfType(const std::string& path, - VectorLogPtr& log_files, WalFileType log_type) { - std::vector all_files; - const Status status = env_->GetChildren(path, &all_files); - if (!status.ok()) { - return status; - } - log_files.reserve(all_files.size()); - for (const auto& f : all_files) { - uint64_t number; - FileType type; - if (ParseFileName(f, &number, &type) && type == kLogFile){ - - WriteBatch batch; - Status s = ReadFirstRecord(log_type, number, &batch); - if (!s.ok()) { - if (CheckWalFileExistsAndEmpty(log_type, number)) { - continue; - } - return s; - } - - uint64_t size_bytes; - s = env_->GetFileSize(LogFileName(path, number), &size_bytes); - if (!s.ok()) { - return s; - } - - log_files.push_back(std::move(unique_ptr(new LogFileImpl( - number, log_type, WriteBatchInternal::Sequence(&batch), size_bytes)))); - } - } - CompareLogByPointer compare_log_files; - std::sort(log_files.begin(), log_files.end(), compare_log_files); - return status; -} - Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, int output_level, const Slice* begin, const Slice* end) { @@ -1798,23 +1766,6 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, return manual.status; } -Status DBImpl::TEST_CompactRange(int level, const Slice* begin, - const Slice* end, - ColumnFamilyHandle* column_family) { - ColumnFamilyData* cfd; - if (column_family == nullptr) { - cfd = default_cf_handle_->cfd(); - } else { - auto cfh = reinterpret_cast(column_family); - cfd = cfh->cfd(); - } - int output_level = - (cfd->options()->compaction_style == kCompactionStyleUniversal) - ? level - : level + 1; - return RunManualCompaction(cfd, level, output_level, begin, end); -} - Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& options) { // nullptr batch means just wait for earlier writes to be done @@ -1839,38 +1790,6 @@ Status DBImpl::WaitForFlushMemTable(ColumnFamilyData* cfd) { return s; } -Status DBImpl::TEST_FlushMemTable(bool wait) { - FlushOptions fo; - fo.wait = wait; - return FlushMemTable(default_cf_handle_->cfd(), fo); -} - -Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) { - ColumnFamilyData* cfd; - if (column_family == nullptr) { - cfd = default_cf_handle_->cfd(); - } else { - auto cfh = reinterpret_cast(column_family); - cfd = cfh->cfd(); - } - return WaitForFlushMemTable(cfd); -} - -Status DBImpl::TEST_WaitForCompact() { - // Wait until the compaction completes - - // TODO: a bug here. This function actually does not necessarily - // wait for compact. It actually waits for scheduled compaction - // OR flush to finish. - - MutexLock l(&mutex_); - while ((bg_compaction_scheduled_ || bg_flush_scheduled_) && - bg_error_.ok()) { - bg_cv_.Wait(); - } - return bg_error_; -} - void DBImpl::MaybeScheduleFlushOrCompaction() { mutex_.AssertHeld(); bg_schedule_needed_ = false; @@ -2026,16 +1945,6 @@ void DBImpl::BackgroundCallFlush() { } } - -void DBImpl::TEST_PurgeObsoleteteWAL() { - PurgeObsoleteWALFiles(); -} - -uint64_t DBImpl::TEST_GetLevel0TotalSize() { - MutexLock l(&mutex_); - return default_cf_handle_->cfd()->current()->NumLevelBytes(0); -} - void DBImpl::BackgroundCallCompaction() { bool madeProgress = false; DeletionState deletion_state(true); @@ -3222,36 +3131,6 @@ ColumnFamilyHandle* DBImpl::DefaultColumnFamily() const { return default_cf_handle_; } -Iterator* DBImpl::TEST_NewInternalIterator(ColumnFamilyHandle* column_family) { - ColumnFamilyData* cfd; - if (column_family == nullptr) { - cfd = default_cf_handle_->cfd(); - } else { - auto cfh = reinterpret_cast(column_family); - cfd = cfh->cfd(); - } - - mutex_.Lock(); - SuperVersion* super_version = cfd->GetSuperVersion()->Ref(); - mutex_.Unlock(); - ReadOptions roptions; - roptions.prefix_seek = true; - return NewInternalIterator(roptions, cfd, super_version); -} - -int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes( - ColumnFamilyHandle* column_family) { - ColumnFamilyData* cfd; - if (column_family == nullptr) { - cfd = default_cf_handle_->cfd(); - } else { - auto cfh = reinterpret_cast(column_family); - cfd = cfh->cfd(); - } - MutexLock l(&mutex_); - return cfd->current()->MaxNextLevelOverlappingBytes(); -} - Status DBImpl::Get(const ReadOptions& options, ColumnFamilyHandle* column_family, const Slice& key, std::string* value) { @@ -3590,7 +3469,12 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options, Iterator* iter; if (options.tailing) { +#ifdef ROCKSDB_LITE + // not supported in lite version + return nullptr; +#else iter = new TailingIterator(env_, this, options, cfd); +#endif } else { SequenceNumber latest_snapshot = versions_->LastSequence(); SuperVersion* sv = nullptr; @@ -3642,10 +3526,15 @@ Status DBImpl::NewIterators( } if (options.tailing) { +#ifdef ROCKSDB_LITE + return Status::InvalidArgument( + "Tailing interator not supported in RocksDB lite"); +#else for (auto cfh : column_families) { auto cfd = reinterpret_cast(cfh)->cfd(); iterators->push_back(new TailingIterator(env_, this, options, cfd)); } +#endif } else { for (size_t i = 0; i < column_families.size(); ++i) { auto cfh = reinterpret_cast(column_families[i]); @@ -4132,6 +4021,7 @@ Status DBImpl::MakeRoomForWrite(ColumnFamilyData* cfd, bool force) { return s; } +#ifndef ROCKSDB_LITE Status DBImpl::GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, TablePropertiesCollection* props) { auto cfh = reinterpret_cast(column_family); @@ -4152,6 +4042,7 @@ Status DBImpl::GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, return s; } +#endif // ROCKSDB_LITE const std::string& DBImpl::GetName() const { return dbname_; @@ -4211,6 +4102,34 @@ inline void DBImpl::DelayLoggingAndReset() { } } +#ifndef ROCKSDB_LITE +Status DBImpl::GetUpdatesSince( + SequenceNumber seq, unique_ptr* iter, + const TransactionLogIterator::ReadOptions& read_options) { + + RecordTick(options_.statistics.get(), GET_UPDATES_SINCE_CALLS); + if (seq > versions_->LastSequence()) { + return Status::NotFound("Requested sequence not yet written in the db"); + } + // Get all sorted Wal Files. + // Do binary search and open files and find the seq number. + + std::unique_ptr wal_files(new VectorLogPtr); + Status s = GetSortedWalFiles(*wal_files); + if (!s.ok()) { + return s; + } + + s = RetainProbableWalFiles(*wal_files, seq); + if (!s.ok()) { + return s; + } + iter->reset(new TransactionLogIteratorImpl(options_.wal_dir, &options_, + read_options, storage_options_, + seq, std::move(wal_files), this)); + return (*iter)->status(); +} + Status DBImpl::DeleteFile(std::string name) { uint64_t number; FileType type; @@ -4294,6 +4213,7 @@ void DBImpl::GetLiveFilesMetaData(std::vector* metadata) { MutexLock l(&mutex_); versions_->GetLiveFilesMetaData(metadata); } +#endif // ROCKSDB_LITE Status DBImpl::CheckConsistency() { mutex_.AssertHeld(); @@ -4322,23 +4242,6 @@ Status DBImpl::CheckConsistency() { } } -void DBImpl::TEST_GetFilesMetaData( - ColumnFamilyHandle* column_family, - std::vector>* metadata) { - auto cfh = reinterpret_cast(column_family); - auto cfd = cfh->cfd(); - MutexLock l(&mutex_); - metadata->resize(NumberLevels()); - for (int level = 0; level < NumberLevels(); level++) { - const std::vector& files = cfd->current()->files_[level]; - - (*metadata)[level].clear(); - for (const auto& f : files) { - (*metadata)[level].push_back(*f); - } - } -} - Status DBImpl::GetDbIdentity(std::string& identity) { std::string idfilename = IdentityFileName(dbname_); unique_ptr idfile; diff --git a/db/db_impl.h b/db/db_impl.h index e0413a748..ecf30e1a9 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -115,6 +115,10 @@ class DBImpl : public DB { using DB::Flush; virtual Status Flush(const FlushOptions& options, ColumnFamilyHandle* column_family); + + virtual SequenceNumber GetLatestSequenceNumber() const; + +#ifndef ROCKSDB_LITE virtual Status DisableFileDeletions(); virtual Status EnableFileDeletions(bool force); // All the returned filenames start with "/" @@ -122,7 +126,7 @@ class DBImpl : public DB { uint64_t* manifest_file_size, bool flush_memtable = true); virtual Status GetSortedWalFiles(VectorLogPtr& files); - virtual SequenceNumber GetLatestSequenceNumber() const; + virtual Status GetUpdatesSince( SequenceNumber seq_number, unique_ptr* iter, const TransactionLogIterator::ReadOptions& @@ -130,6 +134,7 @@ class DBImpl : public DB { virtual Status DeleteFile(std::string name); virtual void GetLiveFilesMetaData(std::vector* metadata); +#endif // ROCKSDB_LITE // checks if all live files exist on file system and that their file sizes // match to our in-memory records @@ -141,7 +146,9 @@ class DBImpl : public DB { int output_level, const Slice* begin, const Slice* end); +#ifndef NDEBUG // Extra methods (for testing) that are not in the public DB interface + // Implemented in db_impl_debug.cc // Compact any files in the named level that overlap [*begin, *end] Status TEST_CompactRange(int level, const Slice* begin, const Slice* end, @@ -184,6 +191,8 @@ class DBImpl : public DB { void TEST_GetFilesMetaData(ColumnFamilyHandle* column_family, std::vector>* metadata); +#endif // NDEBUG + // needed for CleanupIteratorState struct DeletionState { inline bool HaveSomethingToDelete() const { @@ -270,7 +279,9 @@ class DBImpl : public DB { private: friend class DB; friend class InternalStats; +#ifndef ROCKSDB_LITE friend class TailingIterator; +#endif friend struct SuperVersion; struct CompactionState; struct Writer; @@ -326,8 +337,11 @@ class DBImpl : public DB { Status WaitForFlushMemTable(ColumnFamilyData* cfd); void MaybeScheduleLogDBDeployStats(); + +#ifndef ROCKSDB_LITE static void BGLogDBDeployStats(void* db); void LogDBDeployStats(); +#endif // ROCKSDB_LITE void MaybeScheduleFlushOrCompaction(); static void BGWorkCompaction(void* db); @@ -375,6 +389,12 @@ class DBImpl : public DB { void AllocateCompactionOutputFileNumbers(CompactionState* compact); void ReleaseCompactionUnusedFileNumbers(CompactionState* compact); +#ifdef ROCKSDB_LITE + void PurgeObsoleteWALFiles() { + // this function is used for archiving WAL files. we don't need this in + // ROCKSDB_LITE + } +#else void PurgeObsoleteWALFiles(); Status GetSortedWalsOfType(const std::string& path, @@ -394,6 +414,7 @@ class DBImpl : public DB { WriteBatch* const result); Status ReadFirstLine(const std::string& fname, WriteBatch* const batch); +#endif // ROCKSDB_LITE void PrintStatistics(); @@ -540,10 +561,12 @@ class DBImpl : public DB { void InstallSuperVersion(ColumnFamilyData* cfd, DeletionState& deletion_state); +#ifndef ROCKSDB_LITE using DB::GetPropertiesOfAllTables; virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, TablePropertiesCollection* props) override; +#endif // ROCKSDB_LITE // Function that Get and KeyMayExist call with no_io true or false // Note: 'value_found' from KeyMayExist propagates here diff --git a/db/db_impl_debug.cc b/db/db_impl_debug.cc new file mode 100644 index 000000000..1d0b70d26 --- /dev/null +++ b/db/db_impl_debug.cc @@ -0,0 +1,119 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef NDEBUG + +#include "db/db_impl.h" + +void DBImpl::TEST_PurgeObsoleteteWAL() { PurgeObsoleteWALFiles(); } + +uint64_t DBImpl::TEST_GetLevel0TotalSize() { + MutexLock l(&mutex_); + return default_cf_handle_->cfd()->current()->NumLevelBytes(0); +} + +Iterator* DBImpl::TEST_NewInternalIterator(ColumnFamilyHandle* column_family) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + + mutex_.Lock(); + SuperVersion* super_version = cfd->GetSuperVersion()->Ref(); + mutex_.Unlock(); + ReadOptions roptions; + roptions.prefix_seek = true; + return NewInternalIterator(roptions, cfd, super_version); +} + +int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes( + ColumnFamilyHandle* column_family) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + MutexLock l(&mutex_); + return cfd->current()->MaxNextLevelOverlappingBytes(); +} + +void DBImpl::TEST_GetFilesMetaData( + ColumnFamilyHandle* column_family, + std::vector>* metadata) { + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + MutexLock l(&mutex_); + metadata->resize(NumberLevels()); + for (int level = 0; level < NumberLevels(); level++) { + const std::vector& files = cfd->current()->files_[level]; + + (*metadata)[level].clear(); + for (const auto& f : files) { + (*metadata)[level].push_back(*f); + } + } +} + +uint64_t DBImpl::TEST_Current_Manifest_FileNo() { + return versions_->ManifestFileNumber(); +} + +Status DBImpl::TEST_CompactRange(int level, const Slice* begin, + const Slice* end, + ColumnFamilyHandle* column_family) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + int output_level = + (cfd->options()->compaction_style == kCompactionStyleUniversal) + ? level + : level + 1; + return RunManualCompaction(cfd, level, output_level, begin, end); +} + +Status DBImpl::TEST_FlushMemTable(bool wait) { + FlushOptions fo; + fo.wait = wait; + return FlushMemTable(default_cf_handle_->cfd(), fo); +} + +Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + return WaitForFlushMemTable(cfd); +} + +Status DBImpl::TEST_WaitForCompact() { + // Wait until the compaction completes + + // TODO: a bug here. This function actually does not necessarily + // wait for compact. It actually waits for scheduled compaction + // OR flush to finish. + + MutexLock l(&mutex_); + while ((bg_compaction_scheduled_ || bg_flush_scheduled_) && bg_error_.ok()) { + bg_cv_.Wait(); + } + return bg_error_; +} +#endif // NDEBUG diff --git a/db/db_stats_logger.cc b/db/db_stats_logger.cc index 46918d4e7..288e1bf80 100644 --- a/db/db_stats_logger.cc +++ b/db/db_stats_logger.cc @@ -20,7 +20,8 @@ namespace rocksdb { void DBImpl::MaybeScheduleLogDBDeployStats() { - +// we did say maybe +#ifndef ROCKSDB_LITE // There is a lock in the actual logger. if (!logger_ || options_.db_stats_log_interval < 0 || host_name_.empty()) { @@ -89,6 +90,6 @@ void DBImpl::LogDBDeployStats() { bg_logstats_scheduled_ = false; bg_cv_.SignalAll(); mutex_.Unlock(); +#endif } - } diff --git a/db/repair.cc b/db/repair.cc index ab2850523..c154c04ac 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -29,6 +29,8 @@ // Store per-table metadata (smallest, largest, largest-seq#, ...) // in the table's meta section to speed up ScanTable. +#ifndef ROCKSDB_LITE + #include "db/builder.h" #include "db/db_impl.h" #include "db/dbformat.h" @@ -396,3 +398,5 @@ Status RepairDB(const std::string& dbname, const Options& options) { } } // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/db/tailing_iter.cc b/db/tailing_iter.cc index f22dcc3f8..41d2b225a 100644 --- a/db/tailing_iter.cc +++ b/db/tailing_iter.cc @@ -3,6 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. +#ifndef ROCKSDB_LITE #include "db/tailing_iter.h" #include @@ -217,3 +218,4 @@ void TailingIterator::SeekImmutable(const Slice& target) { } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/db/tailing_iter.h b/db/tailing_iter.h index 7ae8f9d9b..a66a85bc5 100644 --- a/db/tailing_iter.h +++ b/db/tailing_iter.h @@ -2,6 +2,7 @@ // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. +#ifndef ROCKSDB_LITE #pragma once #include @@ -92,3 +93,4 @@ class TailingIterator : public Iterator { }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/db/transaction_log_impl.cc b/db/transaction_log_impl.cc index 0394855c3..82e58f148 100644 --- a/db/transaction_log_impl.cc +++ b/db/transaction_log_impl.cc @@ -2,7 +2,8 @@ // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -// + +#ifndef ROCKSDB_LITE #include "db/transaction_log_impl.h" #include "db/write_batch_internal.h" @@ -257,3 +258,4 @@ Status TransactionLogIteratorImpl::OpenLogReader(const LogFile* logFile) { return Status::OK(); } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/db/transaction_log_impl.h b/db/transaction_log_impl.h index 98e4e26b4..319b01cb1 100644 --- a/db/transaction_log_impl.h +++ b/db/transaction_log_impl.h @@ -2,7 +2,8 @@ // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -// + +#ifndef ROCKSDB_LITE #pragma once #include @@ -116,3 +117,4 @@ class TransactionLogIteratorImpl : public TransactionLogIterator { Status OpenLogReader(const LogFile* file); }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/db/version_set.cc b/db/version_set.cc index 2c3121752..12a8f670b 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -2170,6 +2170,7 @@ Status VersionSet::ListColumnFamilies(std::vector* column_families, return s; } +#ifndef ROCKSDB_LITE Status VersionSet::ReduceNumberOfLevels(const std::string& dbname, const Options* options, const EnvOptions& storage_options, @@ -2430,6 +2431,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname, return s; } +#endif // ROCKSDB_LITE void VersionSet::MarkFileNumberUsed(uint64_t number) { if (next_file_number_ <= number) { diff --git a/db/version_set.h b/db/version_set.h index 05b391dca..fd3e5c893 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -319,6 +319,7 @@ class VersionSet { static Status ListColumnFamilies(std::vector* column_families, const std::string& dbname, Env* env); +#ifndef ROCKSDB_LITE // Try to reduce the number of levels. This call is valid when // only one level from the new max level to the old // max level containing files. @@ -333,6 +334,12 @@ class VersionSet { const EnvOptions& storage_options, int new_levels); + // printf contents (for debugging) + Status DumpManifest(Options& options, std::string& manifestFileName, + bool verbose, bool hex = false); + +#endif // ROCKSDB_LITE + // Return the current manifest file number uint64_t ManifestFileNumber() const { return manifest_file_number_; } @@ -393,10 +400,6 @@ class VersionSet { // "key" as of version "v". uint64_t ApproximateOffsetOf(Version* v, const InternalKey& key); - // printf contents (for debugging) - Status DumpManifest(Options& options, std::string& manifestFileName, - bool verbose, bool hex = false); - // Return the size of the current manifest file uint64_t ManifestFileSize() const { return manifest_file_size_; } diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index acf1633ea..4a90d2e82 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -381,6 +381,11 @@ class DB { return Flush(options, DefaultColumnFamily()); } + // The sequence number of the most recent transaction. + virtual SequenceNumber GetLatestSequenceNumber() const = 0; + +#ifndef ROCKSDB_LITE + // Prevent file deletions. Compactions will continue to occur, // but no obsolete files will be deleted. Calling this multiple // times have the same effect as calling it once. @@ -422,9 +427,6 @@ class DB { // Retrieve the sorted list of all wal files with earliest file first virtual Status GetSortedWalFiles(VectorLogPtr& files) = 0; - // The sequence number of the most recent transaction. - virtual SequenceNumber GetLatestSequenceNumber() const = 0; - // Sets iter to an iterator that is positioned at a write-batch containing // seq_number. If the sequence number is non existent, it returns an iterator // at the first available seq_no after the requested seq_no @@ -447,6 +449,8 @@ class DB { // and end key virtual void GetLiveFilesMetaData(std::vector* metadata) {} +#endif // ROCKSDB_LITE + // Sets the globally unique ID created at database creation time by invoking // Env::GenerateUniqueId(), in identity. Returns Status::OK if identity could // be set properly @@ -455,11 +459,13 @@ class DB { // Returns default column family handle virtual ColumnFamilyHandle* DefaultColumnFamily() const = 0; +#ifndef ROCKSDB_LITE virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, TablePropertiesCollection* props) = 0; Status GetPropertiesOfAllTables(TablePropertiesCollection* props) { return GetPropertiesOfAllTables(DefaultColumnFamily(), props); } +#endif // ROCKSDB_LITE private: // No copying allowed @@ -471,11 +477,13 @@ class DB { // Be very careful using this method. Status DestroyDB(const std::string& name, const Options& options); +#ifndef ROCKSDB_LITE // If a DB cannot be opened, you may attempt to call this method to // resurrect as much of the contents of the database as possible. // Some data may be lost, so be careful when calling this function // on a database that contains important information. Status RepairDB(const std::string& dbname, const Options& options); +#endif } // namespace rocksdb diff --git a/include/rocksdb/ldb_tool.h b/include/rocksdb/ldb_tool.h index a46b6a758..46bacc806 100644 --- a/include/rocksdb/ldb_tool.h +++ b/include/rocksdb/ldb_tool.h @@ -2,8 +2,8 @@ // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#ifndef STORAGE_ROCKSDB_INCLUDE_LDB_TOOL_H -#define STORAGE_ROCKSDB_INCLUDE_LDB_TOOL_H +#ifndef ROCKSDB_LITE +#pragma once #include "rocksdb/options.h" namespace rocksdb { @@ -15,4 +15,4 @@ class LDBTool { } // namespace rocksdb -#endif // STORAGE_ROCKSDB_INCLUDE_LDB_TOOL_H +#endif // ROCKSDB_LITE diff --git a/include/rocksdb/memtablerep.h b/include/rocksdb/memtablerep.h index 05f1aebca..0d251a9a6 100644 --- a/include/rocksdb/memtablerep.h +++ b/include/rocksdb/memtablerep.h @@ -177,6 +177,16 @@ class MemTableRepFactory { virtual const char* Name() const = 0; }; +// This uses a skip list to store keys. It is the default. +class SkipListFactory : public MemTableRepFactory { + public: + virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&, + Arena*, + const SliceTransform*) override; + virtual const char* Name() const override { return "SkipListFactory"; } +}; + +#ifndef ROCKSDB_LITE // This creates MemTableReps that are backed by an std::vector. On iteration, // the vector is sorted. This is useful for workloads where iteration is very // rare and writes are generally not issued after reads begin. @@ -198,17 +208,6 @@ class VectorRepFactory : public MemTableRepFactory { } }; -// This uses a skip list to store keys. It is the default. -class SkipListFactory : public MemTableRepFactory { - public: - virtual MemTableRep* CreateMemTableRep( - const MemTableRep::KeyComparator&, Arena*, - const SliceTransform*) override; - virtual const char* Name() const override { - return "SkipListFactory"; - } -}; - // This class contains a fixed array of buckets, each // pointing to a skiplist (null if the bucket is empty). // bucket_count: number of fixed array buckets @@ -227,4 +226,6 @@ extern MemTableRepFactory* NewHashSkipListRepFactory( extern MemTableRepFactory* NewHashLinkListRepFactory( size_t bucket_count = 50000); +#endif // ROCKSDB_LITE + } // namespace rocksdb diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index 4ed2d6538..7f1bf39a9 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -850,6 +850,7 @@ struct ReadOptions { // added data) and is optimized for sequential reads. It will return records // that were inserted into the database after the creation of the iterator. // Default: false + // Not supported in ROCKSDB_LITE mode! bool tailing; ReadOptions() diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 1016bcf14..b50007a32 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -81,6 +81,7 @@ struct BlockBasedTablePropertyNames { extern TableFactory* NewBlockBasedTableFactory( const BlockBasedTableOptions& table_options = BlockBasedTableOptions()); +#ifndef ROCKSDB_LITE // -- Plain Table with prefix-only seek // For this factory, you need to set Options.prefix_extrator properly to make it // work. Look-up will starts with prefix hash lookup for key prefix. Inside the @@ -120,6 +121,8 @@ extern TableFactory* NewTotalOrderPlainTableFactory( uint32_t user_key_len = kPlainTableVariableLength, int bloom_bits_per_key = 0, size_t index_sparseness = 16); +#endif // ROCKSDB_LITE + // A base class for table factories. class TableFactory { public: diff --git a/include/utilities/backupable_db.h b/include/utilities/backupable_db.h index 8f85e0614..80f82154d 100644 --- a/include/utilities/backupable_db.h +++ b/include/utilities/backupable_db.h @@ -7,6 +7,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include "utilities/stackable_db.h" #include "rocksdb/env.h" @@ -210,3 +211,4 @@ class RestoreBackupableDB { }; } // rocksdb namespace +#endif // ROCKSDB_LITE diff --git a/include/utilities/geo_db.h b/include/utilities/geo_db.h index 8b3e44b06..87ff5e6a0 100644 --- a/include/utilities/geo_db.h +++ b/include/utilities/geo_db.h @@ -4,6 +4,7 @@ // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE #pragma once #include #include @@ -101,3 +102,4 @@ class GeoDB : public StackableDB { }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/include/utilities/utility_db.h b/include/utilities/utility_db.h index 1a7a269d1..ddec2b0b4 100644 --- a/include/utilities/utility_db.h +++ b/include/utilities/utility_db.h @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include "stackable_db.h" @@ -48,3 +49,4 @@ class UtilityDB { }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/table/plain_table_builder.cc b/table/plain_table_builder.cc index fad7b4558..57bfd3bcf 100644 --- a/table/plain_table_builder.cc +++ b/table/plain_table_builder.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #include "table/plain_table_builder.h" #include @@ -205,3 +206,4 @@ uint64_t PlainTableBuilder::FileSize() const { } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/table/plain_table_builder.h b/table/plain_table_builder.h index 4e0150cd5..7bc388bdf 100644 --- a/table/plain_table_builder.h +++ b/table/plain_table_builder.h @@ -5,6 +5,7 @@ // IndexedTable is a simple table format for UNIT TEST ONLY. It is not built // as production quality. +#ifndef ROCKSDB_LITE #pragma once #include #include "rocksdb/options.h" @@ -80,3 +81,4 @@ private: } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/table/plain_table_factory.cc b/table/plain_table_factory.cc index 16ee24eb4..4e844687d 100644 --- a/table/plain_table_factory.cc +++ b/table/plain_table_factory.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #include "table/plain_table_factory.h" #include @@ -46,3 +47,4 @@ extern TableFactory* NewTotalOrderPlainTableFactory(uint32_t user_key_len, } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/table/plain_table_factory.h b/table/plain_table_factory.h index a0a7fbe6f..b23620785 100644 --- a/table/plain_table_factory.h +++ b/table/plain_table_factory.h @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include #include @@ -83,3 +84,4 @@ class PlainTableFactory : public TableFactory { }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index 436d13bf3..ac0505a45 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #include "table/plain_table_reader.h" #include @@ -745,3 +746,4 @@ Status PlainTableIterator::status() const { } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/table/plain_table_reader.h b/table/plain_table_reader.h index ac2cb8744..debb88372 100644 --- a/table/plain_table_reader.h +++ b/table/plain_table_reader.h @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include #include @@ -255,3 +256,4 @@ class PlainTableReader: public TableReader { void operator=(const TableReader&) = delete; }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/util/autovector.h b/util/autovector.h index 812a61795..212073e78 100644 --- a/util/autovector.h +++ b/util/autovector.h @@ -12,6 +12,10 @@ namespace rocksdb { +#ifdef ROCKSDB_LITE +template +class autovector : public std::vector {}; +#else // A vector that leverages pre-allocated stack-based array to achieve better // performance for array with small amount of items. // @@ -299,5 +303,5 @@ autovector& autovector::assign(const autovector& other) { return *this; } - -} // rocksdb +#endif // ROCKSDB_LITE +} // namespace rocksdb diff --git a/util/blob_store.cc b/util/blob_store.cc index 76230679f..daaf4bc02 100644 --- a/util/blob_store.cc +++ b/util/blob_store.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #include "util/blob_store.h" namespace rocksdb { @@ -266,3 +267,4 @@ Status BlobStore::CreateNewBucket() { } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/util/blob_store.h b/util/blob_store.h index 0a81d01df..ce8633740 100644 --- a/util/blob_store.h +++ b/util/blob_store.h @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include "rocksdb/env.h" #include "rocksdb/status.h" @@ -159,3 +160,4 @@ class BlobStore { }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/util/hash_linklist_rep.cc b/util/hash_linklist_rep.cc index 441f5c993..9e77afa3e 100644 --- a/util/hash_linklist_rep.cc +++ b/util/hash_linklist_rep.cc @@ -4,6 +4,7 @@ // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE #include "util/hash_linklist_rep.h" #include "rocksdb/memtablerep.h" @@ -484,3 +485,4 @@ MemTableRepFactory* NewHashLinkListRepFactory(size_t bucket_count) { } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/util/hash_linklist_rep.h b/util/hash_linklist_rep.h index 11fb7467f..f1ab5d560 100644 --- a/util/hash_linklist_rep.h +++ b/util/hash_linklist_rep.h @@ -6,6 +6,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include "rocksdb/slice_transform.h" #include "rocksdb/memtablerep.h" @@ -32,3 +33,4 @@ class HashLinkListRepFactory : public MemTableRepFactory { }; } +#endif // ROCKSDB_LITE diff --git a/util/hash_skiplist_rep.cc b/util/hash_skiplist_rep.cc index 230fae957..e27ec5949 100644 --- a/util/hash_skiplist_rep.cc +++ b/util/hash_skiplist_rep.cc @@ -4,6 +4,7 @@ // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE #include "util/hash_skiplist_rep.h" #include "rocksdb/memtablerep.h" @@ -339,3 +340,4 @@ MemTableRepFactory* NewHashSkipListRepFactory( } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/util/hash_skiplist_rep.h b/util/hash_skiplist_rep.h index abf4a68cd..16903c684 100644 --- a/util/hash_skiplist_rep.h +++ b/util/hash_skiplist_rep.h @@ -6,6 +6,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include "rocksdb/slice_transform.h" #include "rocksdb/memtablerep.h" @@ -39,3 +40,4 @@ class HashSkipListRepFactory : public MemTableRepFactory { }; } +#endif // ROCKSDB_LITE diff --git a/util/ldb_cmd.cc b/util/ldb_cmd.cc index 8ed8014f2..98e7bf086 100644 --- a/util/ldb_cmd.cc +++ b/util/ldb_cmd.cc @@ -3,6 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE #include "util/ldb_cmd.h" #include "db/dbformat.h" @@ -1834,3 +1835,4 @@ void CheckConsistencyCommand::DoCommand() { } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/util/ldb_tool.cc b/util/ldb_tool.cc index 134547b19..8439b63f9 100644 --- a/util/ldb_tool.cc +++ b/util/ldb_tool.cc @@ -3,6 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE #include "rocksdb/ldb_tool.h" #include "util/ldb_cmd.h" @@ -103,3 +104,4 @@ void LDBTool::Run(int argc, char** argv, Options options) { } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/util/logging.cc b/util/logging.cc index 697341349..02e356001 100644 --- a/util/logging.cc +++ b/util/logging.cc @@ -50,15 +50,6 @@ std::string EscapeString(const Slice& value) { return r; } -bool ConsumeChar(Slice* in, char c) { - if (!in->empty() && (*in)[0] == c) { - in->remove_prefix(1); - return true; - } else { - return false; - } -} - bool ConsumeDecimalNumber(Slice* in, uint64_t* val) { uint64_t v = 0; int digits = 0; diff --git a/util/logging.h b/util/logging.h index 411c83beb..d8ce45efc 100644 --- a/util/logging.h +++ b/util/logging.h @@ -35,10 +35,6 @@ extern std::string NumberToString(uint64_t num); // Escapes any non-printable characters found in "value". extern std::string EscapeString(const Slice& value); -// If *in starts with "c", advances *in past the first character and -// returns true. Otherwise, returns false. -extern bool ConsumeChar(Slice* in, char c); - // Parse a human-readable number from "*in" into *value. On success, // advances "*in" past the consumed number and sets "*val" to the // numeric value. Otherwise, returns false and leaves *in in an diff --git a/util/vectorrep.cc b/util/vectorrep.cc index 14e7c9f91..c7f9cca2a 100644 --- a/util/vectorrep.cc +++ b/util/vectorrep.cc @@ -3,6 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE #include "rocksdb/memtablerep.h" #include @@ -278,3 +279,4 @@ MemTableRep* VectorRepFactory::CreateMemTableRep( return new VectorRep(compare, arena, count_); } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 32c3b8481..26ffcb456 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -7,6 +7,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE + #include "utilities/backupable_db.h" #include "db/filename.h" #include "util/coding.h" @@ -1173,3 +1175,5 @@ Status RestoreBackupableDB::DeleteBackup(BackupID backup_id) { } } // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/utilities/geodb/geodb_impl.cc b/utilities/geodb/geodb_impl.cc index 095ecf8ab..065e5ca35 100644 --- a/utilities/geodb/geodb_impl.cc +++ b/utilities/geodb/geodb_impl.cc @@ -3,6 +3,8 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE + #include "utilities/geodb/geodb_impl.h" #define __STDC_FORMAT_MACROS @@ -425,3 +427,5 @@ void GeoDBImpl::QuadKeyToTile(std::string quadkey, Tile* tile, } } } // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/utilities/geodb/geodb_impl.h b/utilities/geodb/geodb_impl.h index 376a211c6..4ee42ad29 100644 --- a/utilities/geodb/geodb_impl.h +++ b/utilities/geodb/geodb_impl.h @@ -4,6 +4,8 @@ // of patent rights can be found in the PATENTS file in the same directory. // +#ifndef ROCKSDB_LITE + #pragma once #include #include @@ -185,3 +187,5 @@ class GeoDBImpl : public GeoDB { }; } // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/utilities/redis/redis_list_exception.h b/utilities/redis/redis_list_exception.h index d409095a6..0b0f37616 100644 --- a/utilities/redis/redis_list_exception.h +++ b/utilities/redis/redis_list_exception.h @@ -5,6 +5,7 @@ * Copyright 2013 Facebook */ +#ifndef ROCKSDB_LITE #pragma once #include @@ -18,3 +19,4 @@ class RedisListException: public std::exception { }; } // namespace rocksdb +#endif diff --git a/utilities/redis/redis_list_iterator.h b/utilities/redis/redis_list_iterator.h index d57f8ac94..b776ada24 100644 --- a/utilities/redis/redis_list_iterator.h +++ b/utilities/redis/redis_list_iterator.h @@ -1,3 +1,4 @@ +// Copyright 2013 Facebook /** * RedisListIterator: * An abstraction over the "list" concept (e.g.: for redis lists). @@ -34,9 +35,9 @@ * - n bytes of data: the actual data. * * @author Deon Nicholas (dnicholas@fb.com) - * Copyright 2013 Facebook */ +#ifndef ROCKSDB_LITE #pragma once #include @@ -306,3 +307,4 @@ class RedisListIterator { }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/utilities/redis/redis_lists.cc b/utilities/redis/redis_lists.cc index 50c544a3a..2b38a2da4 100644 --- a/utilities/redis/redis_lists.cc +++ b/utilities/redis/redis_lists.cc @@ -1,3 +1,4 @@ +// Copyright 2013 Facebook /** * A (persistent) Redis API built using the rocksdb backend. * Implements Redis Lists as described on: http://redis.io/commands#list @@ -18,9 +19,9 @@ * wouldn't have to read and re-write the entire list. * * @author Deon Nicholas (dnicholas@fb.com) - * Copyright 2013 Facebook */ +#ifndef ROCKSDB_LITE #include "redis_lists.h" #include @@ -547,5 +548,5 @@ int RedisLists::Insert(const std::string& key, const std::string& pivot, return it.Length(); } - -} +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/utilities/redis/redis_lists.h b/utilities/redis/redis_lists.h index 8c149bc43..6c8b9551e 100644 --- a/utilities/redis/redis_lists.h +++ b/utilities/redis/redis_lists.h @@ -8,6 +8,7 @@ * Copyright 2013 Facebook */ +#ifndef ROCKSDB_LITE #pragma once #include @@ -104,3 +105,4 @@ class RedisLists { }; } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/utilities/ttl/db_ttl.cc b/utilities/ttl/db_ttl.cc index 21626bec2..ba5d1241a 100644 --- a/utilities/ttl/db_ttl.cc +++ b/utilities/ttl/db_ttl.cc @@ -1,6 +1,7 @@ // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #include "utilities/ttl/db_ttl.h" #include "db/filename.h" @@ -215,3 +216,4 @@ Iterator* DBWithTTL::NewIterator(const ReadOptions& opts, } } // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/utilities/ttl/db_ttl.h b/utilities/ttl/db_ttl.h index 90194d21f..3ed6f7ea3 100644 --- a/utilities/ttl/db_ttl.h +++ b/utilities/ttl/db_ttl.h @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef ROCKSDB_LITE #pragma once #include #include @@ -328,3 +329,4 @@ class TtlMergeOperator : public MergeOperator { }; } +#endif // ROCKSDB_LITE