diff --git a/HISTORY.md b/HISTORY.md index 5650d0e11..0e2eb9cee 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,6 @@ # Rocksdb Change Log -### Unrelease +### Unreleased ### New Features * Support Multiple DB paths in universal style compactions * Add feature of storing plain table index and bloom filter in SST file. @@ -8,6 +8,7 @@ ### Public API changes * DBOptions.db_paths now is a vector of a DBPath structure which indicates both of path and target size * NewPlainTableFactory instead of bunch of parameters now accepts PlainTableOptions, which is defined in include/rocksdb/table.h +* Moved include/utilities/*.h to include/rocksdb/utilities/*.h ## 3.3.0 (7/10/2014) diff --git a/db/merge_test.cc b/db/merge_test.cc index 9bdf54332..bd8da5a87 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -12,12 +12,12 @@ #include "rocksdb/db.h" #include "rocksdb/env.h" #include "rocksdb/merge_operator.h" +#include "rocksdb/utilities/db_ttl.h" #include "db/dbformat.h" #include "db/db_impl.h" #include "db/write_batch_internal.h" #include "utilities/merge_operators.h" #include "util/testharness.h" -#include "utilities/db_ttl.h" using namespace std; using namespace rocksdb; diff --git a/include/rocksdb/utilities/backupable_db.h b/include/rocksdb/utilities/backupable_db.h new file mode 100644 index 000000000..78365769d --- /dev/null +++ b/include/rocksdb/utilities/backupable_db.h @@ -0,0 +1,252 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE + +#define __STDC_FORMAT_MACROS +#include +#include +#include +#include + +#include "rocksdb/utilities/stackable_db.h" + +#include "rocksdb/env.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +struct BackupableDBOptions { + // Where to keep the backup files. Has to be different than dbname_ + // Best to set this to dbname_ + "/backups" + // Required + std::string backup_dir; + + // Backup Env object. It will be used for backup file I/O. If it's + // nullptr, backups will be written out using DBs Env. If it's + // non-nullptr, backup's I/O will be performed using this object. + // If you want to have backups on HDFS, use HDFS Env here! + // Default: nullptr + Env* backup_env; + + // If share_table_files == true, backup will assume that table files with + // same name have the same contents. This enables incremental backups and + // avoids unnecessary data copies. + // If share_table_files == false, each backup will be on its own and will + // not share any data with other backups. + // default: true + bool share_table_files; + + // Backup info and error messages will be written to info_log + // if non-nullptr. + // Default: nullptr + Logger* info_log; + + // If sync == true, we can guarantee you'll get consistent backup even + // on a machine crash/reboot. Backup process is slower with sync enabled. + // If sync == false, we don't guarantee anything on machine reboot. However, + // chances are some of the backups are consistent. + // Default: true + bool sync; + + // If true, it will delete whatever backups there are already + // Default: false + bool destroy_old_data; + + // If false, we won't backup log files. This option can be useful for backing + // up in-memory databases where log file are persisted, but table files are in + // memory. + // Default: true + bool backup_log_files; + + // Max bytes that can be transferred in a second during backup. + // If 0, go as fast as you can + // Default: 0 + uint64_t backup_rate_limit; + + // Max bytes that can be transferred in a second during restore. + // If 0, go as fast as you can + // Default: 0 + uint64_t restore_rate_limit; + + // Only used if share_table_files is set to true. If true, will consider that + // backups can come from different databases, hence a sst is not uniquely + // identifed by its name, but by the triple (file name, crc32, file length) + // Default: false + // Note: this is an experimental option, and you'll need to set it manually + // *turn it on only if you know what you're doing* + bool share_files_with_checksum; + + void Dump(Logger* logger) const; + + explicit BackupableDBOptions(const std::string& _backup_dir, + Env* _backup_env = nullptr, + bool _share_table_files = true, + Logger* _info_log = nullptr, bool _sync = true, + bool _destroy_old_data = false, + bool _backup_log_files = true, + uint64_t _backup_rate_limit = 0, + uint64_t _restore_rate_limit = 0) + : backup_dir(_backup_dir), + backup_env(_backup_env), + share_table_files(_share_table_files), + info_log(_info_log), + sync(_sync), + destroy_old_data(_destroy_old_data), + backup_log_files(_backup_log_files), + backup_rate_limit(_backup_rate_limit), + restore_rate_limit(_restore_rate_limit), + share_files_with_checksum(false) { + assert(share_table_files || !share_files_with_checksum); + } +}; + +struct RestoreOptions { + // If true, restore won't overwrite the existing log files in wal_dir. It will + // also move all log files from archive directory to wal_dir. Use this option + // in combination with BackupableDBOptions::backup_log_files = false for + // persisting in-memory databases. + // Default: false + bool keep_log_files; + + explicit RestoreOptions(bool _keep_log_files = false) + : keep_log_files(_keep_log_files) {} +}; + +typedef uint32_t BackupID; + +struct BackupInfo { + BackupID backup_id; + int64_t timestamp; + uint64_t size; + + BackupInfo() {} + BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size) + : backup_id(_backup_id), timestamp(_timestamp), size(_size) {} +}; + +class BackupEngineReadOnly { + public: + virtual ~BackupEngineReadOnly() {} + + static BackupEngineReadOnly* NewReadOnlyBackupEngine( + Env* db_env, const BackupableDBOptions& options); + + // You can GetBackupInfo safely, even with other BackupEngine performing + // backups on the same directory + virtual void GetBackupInfo(std::vector* backup_info) = 0; + + // Restoring DB from backup is NOT safe when there is another BackupEngine + // running that might call DeleteBackup() or PurgeOldBackups(). It is caller's + // responsibility to synchronize the operation, i.e. don't delete the backup + // when you're restoring from it + virtual Status RestoreDBFromBackup( + BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; + virtual Status RestoreDBFromLatestBackup( + const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; +}; + +// Please see the documentation in BackupableDB and RestoreBackupableDB +class BackupEngine { + public: + virtual ~BackupEngine() {} + + static BackupEngine* NewBackupEngine(Env* db_env, + const BackupableDBOptions& options); + + virtual Status CreateNewBackup(DB* db, bool flush_before_backup = false) = 0; + virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0; + virtual Status DeleteBackup(BackupID backup_id) = 0; + virtual void StopBackup() = 0; + + virtual void GetBackupInfo(std::vector* backup_info) = 0; + virtual Status RestoreDBFromBackup( + BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; + virtual Status RestoreDBFromLatestBackup( + const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; +}; + +// Stack your DB with BackupableDB to be able to backup the DB +class BackupableDB : public StackableDB { + public: + // BackupableDBOptions have to be the same as the ones used in a previous + // incarnation of the DB + // + // BackupableDB ownes the pointer `DB* db` now. You should not delete it or + // use it after the invocation of BackupableDB + BackupableDB(DB* db, const BackupableDBOptions& options); + virtual ~BackupableDB(); + + // Captures the state of the database in the latest backup + // NOT a thread safe call + Status CreateNewBackup(bool flush_before_backup = false); + // Returns info about backups in backup_info + void GetBackupInfo(std::vector* backup_info); + // deletes old backups, keeping latest num_backups_to_keep alive + Status PurgeOldBackups(uint32_t num_backups_to_keep); + // deletes a specific backup + Status DeleteBackup(BackupID backup_id); + // Call this from another thread if you want to stop the backup + // that is currently happening. It will return immediatelly, will + // not wait for the backup to stop. + // The backup will stop ASAP and the call to CreateNewBackup will + // return Status::Incomplete(). It will not clean up after itself, but + // the state will remain consistent. The state will be cleaned up + // next time you create BackupableDB or RestoreBackupableDB. + void StopBackup(); + + private: + BackupEngine* backup_engine_; +}; + +// Use this class to access information about backups and restore from them +class RestoreBackupableDB { + public: + RestoreBackupableDB(Env* db_env, const BackupableDBOptions& options); + ~RestoreBackupableDB(); + + // Returns info about backups in backup_info + void GetBackupInfo(std::vector* backup_info); + + // restore from backup with backup_id + // IMPORTANT -- if options_.share_table_files == true and you restore DB + // from some backup that is not the latest, and you start creating new + // backups from the new DB, they will probably fail + // + // Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. + // If you add new data to the DB and try creating a new backup now, the + // database will diverge from backups 4 and 5 and the new backup will fail. + // If you want to create new backup, you will first have to delete backups 4 + // and 5. + Status RestoreDBFromBackup(BackupID backup_id, const std::string& db_dir, + const std::string& wal_dir, + const RestoreOptions& restore_options = + RestoreOptions()); + + // restore from the latest backup + Status RestoreDBFromLatestBackup(const std::string& db_dir, + const std::string& wal_dir, + const RestoreOptions& restore_options = + RestoreOptions()); + // deletes old backups, keeping latest num_backups_to_keep alive + Status PurgeOldBackups(uint32_t num_backups_to_keep); + // deletes a specific backup + Status DeleteBackup(BackupID backup_id); + + private: + BackupEngine* backup_engine_; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/include/rocksdb/utilities/db_ttl.h b/include/rocksdb/utilities/db_ttl.h new file mode 100644 index 000000000..4534e1ff7 --- /dev/null +++ b/include/rocksdb/utilities/db_ttl.h @@ -0,0 +1,68 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +// Database with TTL support. +// +// USE-CASES: +// This API should be used to open the db when key-values inserted are +// meant to be removed from the db in a non-strict 'ttl' amount of time +// Therefore, this guarantees that key-values inserted will remain in the +// db for >= ttl amount of time and the db will make efforts to remove the +// key-values as soon as possible after ttl seconds of their insertion. +// +// BEHAVIOUR: +// TTL is accepted in seconds +// (int32_t)Timestamp(creation) is suffixed to values in Put internally +// Expired TTL values deleted in compaction only:(Timestamp+ttl=5 +// read_only=true opens in the usual read-only mode. Compactions will not be +// triggered(neither manual nor automatic), so no expired entries removed +// +// CONSTRAINTS: +// Not specifying/passing or non-positive TTL behaves like TTL = infinity +// +// !!!WARNING!!!: +// Calling DB::Open directly to re-open a db created by this API will get +// corrupt values(timestamp suffixed) and no ttl effect will be there +// during the second Open, so use this API consistently to open the db +// Be careful when passing ttl with a small positive value because the +// whole database may be deleted in a small amount of time + +class DBWithTTL : public StackableDB { + public: + virtual Status CreateColumnFamilyWithTtl( + const ColumnFamilyOptions& options, const std::string& column_family_name, + ColumnFamilyHandle** handle, int ttl) = 0; + + static Status Open(const Options& options, const std::string& dbname, + DBWithTTL** dbptr, int32_t ttl = 0, + bool read_only = false); + + static Status Open(const DBOptions& db_options, const std::string& dbname, + const std::vector& column_families, + std::vector* handles, + DBWithTTL** dbptr, std::vector ttls, + bool read_only = false); + + protected: + explicit DBWithTTL(DB* db) : StackableDB(db) {} +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/include/rocksdb/utilities/document_db.h b/include/rocksdb/utilities/document_db.h new file mode 100644 index 000000000..7fde5ec9f --- /dev/null +++ b/include/rocksdb/utilities/document_db.h @@ -0,0 +1,149 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/utilities/json_document.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +// IMPORTANT: DocumentDB is a work in progress. It is unstable and we might +// change the API without warning. Talk to RocksDB team before using this in +// production ;) + +// DocumentDB is a layer on top of RocksDB that provides a very simple JSON API. +// When creating a DB, you specify a list of indexes you want to keep on your +// data. You can insert a JSON document to the DB, which is automatically +// indexed. Every document added to the DB needs to have "_id" field which is +// automatically indexed and is an unique primary key. All other indexes are +// non-unique. + +// NOTE: field names in the JSON are NOT allowed to start with '$' or +// contain '.'. We don't currently enforce that rule, but will start behaving +// badly. + +// Cursor is what you get as a result of executing query. To get all +// results from a query, call Next() on a Cursor while Valid() returns true +class Cursor { + public: + Cursor() = default; + virtual ~Cursor() {} + + virtual bool Valid() const = 0; + virtual void Next() = 0; + // Lifecycle of the returned JSONDocument is until the next Next() call + virtual const JSONDocument& document() const = 0; + virtual Status status() const = 0; + + private: + // No copying allowed + Cursor(const Cursor&); + void operator=(const Cursor&); +}; + +struct DocumentDBOptions { + int background_threads = 4; + uint64_t memtable_size = 128 * 1024 * 1024; // 128 MB + uint64_t cache_size = 1 * 1024 * 1024 * 1024; // 1 GB +}; + +// TODO(icanadi) Add `JSONDocument* info` parameter to all calls that can be +// used by the caller to get more information about the call execution (number +// of dropped records, number of updated records, etc.) +class DocumentDB : public StackableDB { + public: + struct IndexDescriptor { + // Currently, you can only define an index on a single field. To specify an + // index on a field X, set index description to JSON "{X: 1}" + // Currently the value needs to be 1, which means ascending. + // In the future, we plan to also support indexes on multiple keys, where + // you could mix ascending sorting (1) with descending sorting indexes (-1) + JSONDocument* description; + std::string name; + }; + + // Open DocumentDB with specified indexes. The list of indexes has to be + // complete, i.e. include all indexes present in the DB, except the primary + // key index. + // Otherwise, Open() will return an error + static Status Open(const DocumentDBOptions& options, const std::string& name, + const std::vector& indexes, + DocumentDB** db, bool read_only = false); + + explicit DocumentDB(DB* db) : StackableDB(db) {} + + // Create a new index. It will stop all writes for the duration of the call. + // All current documents in the DB are scanned and corresponding index entries + // are created + virtual Status CreateIndex(const WriteOptions& write_options, + const IndexDescriptor& index) = 0; + + // Drop an index. Client is responsible to make sure that index is not being + // used by currently executing queries + virtual Status DropIndex(const std::string& name) = 0; + + // Insert a document to the DB. The document needs to have a primary key "_id" + // which can either be a string or an integer. Otherwise the write will fail + // with InvalidArgument. + virtual Status Insert(const WriteOptions& options, + const JSONDocument& document) = 0; + + // Deletes all documents matching a filter atomically + virtual Status Remove(const ReadOptions& read_options, + const WriteOptions& write_options, + const JSONDocument& query) = 0; + + // Does this sequence of operations: + // 1. Find all documents matching a filter + // 2. For all documents, atomically: + // 2.1. apply the update operators + // 2.2. update the secondary indexes + // + // Currently only $set update operator is supported. + // Syntax is: {$set: {key1: value1, key2: value2, etc...}} + // This operator will change a document's key1 field to value1, key2 to + // value2, etc. New values will be set even if a document didn't have an entry + // for the specified key. + // + // You can not change a primary key of a document. + // + // Update example: Update({id: {$gt: 5}, $index: id}, {$set: {enabled: true}}) + virtual Status Update(const ReadOptions& read_options, + const WriteOptions& write_options, + const JSONDocument& filter, + const JSONDocument& updates) = 0; + + // query has to be an array in which every element is an operator. Currently + // only $filter operator is supported. Syntax of $filter operator is: + // {$filter: {key1: condition1, key2: condition2, etc.}} where conditions can + // be either: + // 1) a single value in which case the condition is equality condition, or + // 2) a defined operators, like {$gt: 4}, which will match all documents that + // have key greater than 4. + // + // Supported operators are: + // 1) $gt -- greater than + // 2) $gte -- greater than or equal + // 3) $lt -- less than + // 4) $lte -- less than or equal + // If you want the filter to use an index, you need to specify it like this: + // {$filter: {...(conditions)..., $index: index_name}} + // + // Example query: + // * [{$filter: {name: John, age: {$gte: 18}, $index: age}}] + // will return all Johns whose age is greater or equal to 18 and it will use + // index "age" to satisfy the query. + virtual Cursor* Query(const ReadOptions& read_options, + const JSONDocument& query) = 0; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/include/rocksdb/utilities/geo_db.h b/include/rocksdb/utilities/geo_db.h new file mode 100644 index 000000000..41c0f1408 --- /dev/null +++ b/include/rocksdb/utilities/geo_db.h @@ -0,0 +1,105 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#ifndef ROCKSDB_LITE +#pragma once +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +// +// Configurable options needed for setting up a Geo database +// +struct GeoDBOptions { + // Backup info and error messages will be written to info_log + // if non-nullptr. + // Default: nullptr + Logger* info_log; + + explicit GeoDBOptions(Logger* _info_log = nullptr):info_log(_info_log) { } +}; + +// +// A position in the earth's geoid +// +class GeoPosition { + public: + double latitude; + double longitude; + + explicit GeoPosition(double la = 0, double lo = 0) : + latitude(la), longitude(lo) { + } +}; + +// +// Description of an object on the Geoid. It is located by a GPS location, +// and is identified by the id. The value associated with this object is +// an opaque string 'value'. Different objects identified by unique id's +// can have the same gps-location associated with them. +// +class GeoObject { + public: + GeoPosition position; + std::string id; + std::string value; + + GeoObject() {} + + GeoObject(const GeoPosition& pos, const std::string& i, + const std::string& val) : + position(pos), id(i), value(val) { + } +}; + +// +// Stack your DB with GeoDB to be able to get geo-spatial support +// +class GeoDB : public StackableDB { + public: + // GeoDBOptions have to be the same as the ones used in a previous + // incarnation of the DB + // + // GeoDB owns the pointer `DB* db` now. You should not delete it or + // use it after the invocation of GeoDB + // GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} + GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} + virtual ~GeoDB() {} + + // Insert a new object into the location database. The object is + // uniquely identified by the id. If an object with the same id already + // exists in the db, then the old one is overwritten by the new + // object being inserted here. + virtual Status Insert(const GeoObject& object) = 0; + + // Retrieve the value of the object located at the specified GPS + // location and is identified by the 'id'. + virtual Status GetByPosition(const GeoPosition& pos, + const Slice& id, std::string* value) = 0; + + // Retrieve the value of the object identified by the 'id'. This method + // could be potentially slower than GetByPosition + virtual Status GetById(const Slice& id, GeoObject* object) = 0; + + // Delete the specified object + virtual Status Remove(const Slice& id) = 0; + + // Returns a list of all items within a circular radius from the + // specified gps location. If 'number_of_values' is specified, + // then this call returns at most that many number of objects. + // The radius is specified in 'meters'. + virtual Status SearchRadial(const GeoPosition& pos, + double radius, + std::vector* values, + int number_of_values = INT_MAX) = 0; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/include/rocksdb/utilities/json_document.h b/include/rocksdb/utilities/json_document.h new file mode 100644 index 000000000..ceb058cf9 --- /dev/null +++ b/include/rocksdb/utilities/json_document.h @@ -0,0 +1,174 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include + +#include "rocksdb/slice.h" + +// We use JSONDocument for DocumentDB API +// Implementation inspired by folly::dynamic and rapidjson + +namespace rocksdb { + +// NOTE: none of this is thread-safe +class JSONDocument { + public: + // return nullptr on parse failure + static JSONDocument* ParseJSON(const char* json); + + enum Type { + kNull, + kArray, + kBool, + kDouble, + kInt64, + kObject, + kString, + }; + + JSONDocument(); // null + /* implicit */ JSONDocument(bool b); + /* implicit */ JSONDocument(double d); + /* implicit */ JSONDocument(int64_t i); + /* implicit */ JSONDocument(const std::string& s); + /* implicit */ JSONDocument(const char* s); + // constructs JSONDocument of specific type with default value + explicit JSONDocument(Type type); + + // copy constructor + JSONDocument(const JSONDocument& json_document); + + ~JSONDocument(); + + Type type() const; + + // REQUIRES: IsObject() + bool Contains(const std::string& key) const; + // Returns nullptr if !Contains() + // don't delete the returned pointer + // REQUIRES: IsObject() + const JSONDocument* Get(const std::string& key) const; + // REQUIRES: IsObject() + JSONDocument& operator[](const std::string& key); + // REQUIRES: IsObject() + const JSONDocument& operator[](const std::string& key) const; + // returns `this`, so you can chain operations. + // Copies value + // REQUIRES: IsObject() + JSONDocument* Set(const std::string& key, const JSONDocument& value); + + // REQUIRES: IsArray() == true || IsObject() == true + size_t Count() const; + + // REQUIRES: IsArray() + const JSONDocument* GetFromArray(size_t i) const; + // REQUIRES: IsArray() + JSONDocument& operator[](size_t i); + // REQUIRES: IsArray() + const JSONDocument& operator[](size_t i) const; + // returns `this`, so you can chain operations. + // Copies the value + // REQUIRES: IsArray() && i < Count() + JSONDocument* SetInArray(size_t i, const JSONDocument& value); + // REQUIRES: IsArray() + JSONDocument* PushBack(const JSONDocument& value); + + bool IsNull() const; + bool IsArray() const; + bool IsBool() const; + bool IsDouble() const; + bool IsInt64() const; + bool IsObject() const; + bool IsString() const; + + // REQUIRES: IsBool() == true + bool GetBool() const; + // REQUIRES: IsDouble() == true + double GetDouble() const; + // REQUIRES: IsInt64() == true + int64_t GetInt64() const; + // REQUIRES: IsString() == true + const std::string& GetString() const; + + bool operator==(const JSONDocument& rhs) const; + + std::string DebugString() const; + + private: + class ItemsIteratorGenerator; + + public: + // REQUIRES: IsObject() + ItemsIteratorGenerator Items() const; + + // appends serialized object to dst + void Serialize(std::string* dst) const; + // returns nullptr if Slice doesn't represent valid serialized JSONDocument + static JSONDocument* Deserialize(const Slice& src); + + private: + void SerializeInternal(std::string* dst, bool type_prefix) const; + // returns false if Slice doesn't represent valid serialized JSONDocument. + // Otherwise, true + bool DeserializeInternal(Slice* input); + + typedef std::vector Array; + typedef std::unordered_map Object; + + // iteration on objects + class const_item_iterator { + public: + typedef Object::const_iterator It; + typedef Object::value_type value_type; + /* implicit */ const_item_iterator(It it) : it_(it) {} + It& operator++() { return ++it_; } + bool operator!=(const const_item_iterator& other) { + return it_ != other.it_; + } + value_type operator*() { return *it_; } + + private: + It it_; + }; + class ItemsIteratorGenerator { + public: + /* implicit */ ItemsIteratorGenerator(const Object& object) + : object_(object) {} + const_item_iterator begin() { return object_.begin(); } + const_item_iterator end() { return object_.end(); } + + private: + const Object& object_; + }; + + union Data { + Data() : n(nullptr) {} + ~Data() {} + + void* n; + Array a; + bool b; + double d; + int64_t i; + std::string s; + Object o; + } data_; + const Type type_; + + // Our serialization format's first byte specifies the encoding version. That + // way, we can easily change our format while providing backwards + // compatibility. This constant specifies the current version of the + // serialization format + static const char kSerializationFormatVersion; +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/include/rocksdb/utilities/stackable_db.h b/include/rocksdb/utilities/stackable_db.h new file mode 100644 index 000000000..5c8c7fe6e --- /dev/null +++ b/include/rocksdb/utilities/stackable_db.h @@ -0,0 +1,220 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include "rocksdb/db.h" + +namespace rocksdb { + +// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d +class StackableDB : public DB { + public: + // StackableDB is the owner of db now! + explicit StackableDB(DB* db) : db_(db) {} + + ~StackableDB() { + delete db_; + } + + virtual DB* GetBaseDB() { + return db_; + } + + virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, + const std::string& column_family_name, + ColumnFamilyHandle** handle) { + return db_->CreateColumnFamily(options, column_family_name, handle); + } + + virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) { + return db_->DropColumnFamily(column_family); + } + + using DB::Put; + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& val) override { + return db_->Put(options, column_family, key, val); + } + + using DB::Get; + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) override { + return db_->Get(options, column_family, key, value); + } + + using DB::MultiGet; + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector& column_family, + const std::vector& keys, + std::vector* values) override { + return db_->MultiGet(options, column_family, keys, values); + } + + using DB::KeyMayExist; + virtual bool KeyMayExist(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value, + bool* value_found = nullptr) override { + return db_->KeyMayExist(options, column_family, key, value, value_found); + } + + using DB::Delete; + virtual Status Delete(const WriteOptions& wopts, + ColumnFamilyHandle* column_family, + const Slice& key) override { + return db_->Delete(wopts, column_family, key); + } + + using DB::Merge; + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { + return db_->Merge(options, column_family, key, value); + } + + + virtual Status Write(const WriteOptions& opts, WriteBatch* updates) + override { + return db_->Write(opts, updates); + } + + using DB::NewIterator; + virtual Iterator* NewIterator(const ReadOptions& opts, + ColumnFamilyHandle* column_family) override { + return db_->NewIterator(opts, column_family); + } + + virtual Status NewIterators( + const ReadOptions& options, + const std::vector& column_families, + std::vector* iterators) { + return db_->NewIterators(options, column_families, iterators); + } + + + virtual const Snapshot* GetSnapshot() override { + return db_->GetSnapshot(); + } + + virtual void ReleaseSnapshot(const Snapshot* snapshot) override { + return db_->ReleaseSnapshot(snapshot); + } + + using DB::GetProperty; + virtual bool GetProperty(ColumnFamilyHandle* column_family, + const Slice& property, std::string* value) override { + return db_->GetProperty(column_family, property, value); + } + + using DB::GetApproximateSizes; + virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* r, int n, + uint64_t* sizes) override { + return db_->GetApproximateSizes(column_family, r, n, sizes); + } + + using DB::CompactRange; + virtual Status CompactRange(ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end, + bool reduce_level = false, int target_level = -1, + uint32_t target_path_id = 0) override { + return db_->CompactRange(column_family, begin, end, reduce_level, + target_level, target_path_id); + } + + using DB::NumberLevels; + virtual int NumberLevels(ColumnFamilyHandle* column_family) override { + return db_->NumberLevels(column_family); + } + + using DB::MaxMemCompactionLevel; + virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) + override { + return db_->MaxMemCompactionLevel(column_family); + } + + using DB::Level0StopWriteTrigger; + virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family) + override { + return db_->Level0StopWriteTrigger(column_family); + } + + virtual const std::string& GetName() const override { + return db_->GetName(); + } + + virtual Env* GetEnv() const override { + return db_->GetEnv(); + } + + using DB::GetOptions; + virtual const Options& GetOptions(ColumnFamilyHandle* column_family) const + override { + return db_->GetOptions(column_family); + } + + using DB::Flush; + virtual Status Flush(const FlushOptions& fopts, + ColumnFamilyHandle* column_family) override { + return db_->Flush(fopts, column_family); + } + + virtual Status DisableFileDeletions() override { + return db_->DisableFileDeletions(); + } + + virtual Status EnableFileDeletions(bool force) override { + return db_->EnableFileDeletions(force); + } + + virtual void GetLiveFilesMetaData( + std::vector* metadata) override { + db_->GetLiveFilesMetaData(metadata); + } + + virtual Status GetLiveFiles(std::vector& vec, uint64_t* mfs, + bool flush_memtable = true) override { + return db_->GetLiveFiles(vec, mfs, flush_memtable); + } + + virtual SequenceNumber GetLatestSequenceNumber() const override { + return db_->GetLatestSequenceNumber(); + } + + virtual Status GetSortedWalFiles(VectorLogPtr& files) override { + return db_->GetSortedWalFiles(files); + } + + virtual Status DeleteFile(std::string name) override { + return db_->DeleteFile(name); + } + + virtual Status GetDbIdentity(std::string& identity) { + return db_->GetDbIdentity(identity); + } + + using DB::GetPropertiesOfAllTables; + virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, + TablePropertiesCollection* props) { + return db_->GetPropertiesOfAllTables(column_family, props); + } + + virtual Status GetUpdatesSince( + SequenceNumber seq_number, unique_ptr* iter, + const TransactionLogIterator::ReadOptions& read_options) override { + return db_->GetUpdatesSince(seq_number, iter, read_options); + } + + virtual ColumnFamilyHandle* DefaultColumnFamily() const override { + return db_->DefaultColumnFamily(); + } + + protected: + DB* db_; +}; + +} // namespace rocksdb diff --git a/include/rocksdb/utilities/utility_db.h b/include/rocksdb/utilities/utility_db.h new file mode 100644 index 000000000..f4db66532 --- /dev/null +++ b/include/rocksdb/utilities/utility_db.h @@ -0,0 +1,30 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/utilities/db_ttl.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +// Please don't use this class. It's deprecated +class UtilityDB { + public: + // This function is here only for backwards compatibility. Please use the + // functions defined in DBWithTTl (rocksdb/utilities/db_ttl.h) + // (deprecated) + __attribute__((deprecated)) static Status OpenTtlDB(const Options& options, + const std::string& name, + StackableDB** dbptr, + int32_t ttl = 0, + bool read_only = false); +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/include/utilities/backupable_db.h b/include/utilities/backupable_db.h index 617fe8aef..43d5a5cec 100644 --- a/include/utilities/backupable_db.h +++ b/include/utilities/backupable_db.h @@ -8,244 +8,5 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #pragma once -#ifndef ROCKSDB_LITE - -#define __STDC_FORMAT_MACROS -#include -#include -#include -#include - -#include "utilities/stackable_db.h" -#include "rocksdb/env.h" -#include "rocksdb/status.h" - -namespace rocksdb { - -struct BackupableDBOptions { - // Where to keep the backup files. Has to be different than dbname_ - // Best to set this to dbname_ + "/backups" - // Required - std::string backup_dir; - - // Backup Env object. It will be used for backup file I/O. If it's - // nullptr, backups will be written out using DBs Env. If it's - // non-nullptr, backup's I/O will be performed using this object. - // If you want to have backups on HDFS, use HDFS Env here! - // Default: nullptr - Env* backup_env; - - // If share_table_files == true, backup will assume that table files with - // same name have the same contents. This enables incremental backups and - // avoids unnecessary data copies. - // If share_table_files == false, each backup will be on its own and will - // not share any data with other backups. - // default: true - bool share_table_files; - - // Backup info and error messages will be written to info_log - // if non-nullptr. - // Default: nullptr - Logger* info_log; - - // If sync == true, we can guarantee you'll get consistent backup even - // on a machine crash/reboot. Backup process is slower with sync enabled. - // If sync == false, we don't guarantee anything on machine reboot. However, - // chances are some of the backups are consistent. - // Default: true - bool sync; - - // If true, it will delete whatever backups there are already - // Default: false - bool destroy_old_data; - - // If false, we won't backup log files. This option can be useful for backing - // up in-memory databases where log file are persisted, but table files are in - // memory. - // Default: true - bool backup_log_files; - - // Max bytes that can be transferred in a second during backup. - // If 0, go as fast as you can - // Default: 0 - uint64_t backup_rate_limit; - - // Max bytes that can be transferred in a second during restore. - // If 0, go as fast as you can - // Default: 0 - uint64_t restore_rate_limit; - - // Only used if share_table_files is set to true. If true, will consider that - // backups can come from different databases, hence a sst is not uniquely - // identifed by its name, but by the triple (file name, crc32, file length) - // Default: false - // Note: this is an experimental option, and you'll need to set it manually - // *turn it on only if you know what you're doing* - bool share_files_with_checksum; - - void Dump(Logger* logger) const; - - explicit BackupableDBOptions(const std::string& _backup_dir, - Env* _backup_env = nullptr, - bool _share_table_files = true, - Logger* _info_log = nullptr, bool _sync = true, - bool _destroy_old_data = false, - bool _backup_log_files = true, - uint64_t _backup_rate_limit = 0, - uint64_t _restore_rate_limit = 0) - : backup_dir(_backup_dir), - backup_env(_backup_env), - share_table_files(_share_table_files), - info_log(_info_log), - sync(_sync), - destroy_old_data(_destroy_old_data), - backup_log_files(_backup_log_files), - backup_rate_limit(_backup_rate_limit), - restore_rate_limit(_restore_rate_limit), - share_files_with_checksum(false) { - assert(share_table_files || !share_files_with_checksum); - } -}; - -struct RestoreOptions { - // If true, restore won't overwrite the existing log files in wal_dir. It will - // also move all log files from archive directory to wal_dir. Use this option - // in combination with BackupableDBOptions::backup_log_files = false for - // persisting in-memory databases. - // Default: false - bool keep_log_files; - - explicit RestoreOptions(bool _keep_log_files = false) - : keep_log_files(_keep_log_files) {} -}; - -typedef uint32_t BackupID; - -struct BackupInfo { - BackupID backup_id; - int64_t timestamp; - uint64_t size; - - BackupInfo() {} - BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size) - : backup_id(_backup_id), timestamp(_timestamp), size(_size) {} -}; - -class BackupEngineReadOnly { - public: - virtual ~BackupEngineReadOnly() {} - - static BackupEngineReadOnly* NewReadOnlyBackupEngine( - Env* db_env, const BackupableDBOptions& options); - - // You can GetBackupInfo safely, even with other BackupEngine performing - // backups on the same directory - virtual void GetBackupInfo(std::vector* backup_info) = 0; - - // Restoring DB from backup is NOT safe when there is another BackupEngine - // running that might call DeleteBackup() or PurgeOldBackups(). It is caller's - // responsibility to synchronize the operation, i.e. don't delete the backup - // when you're restoring from it - virtual Status RestoreDBFromBackup( - BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& restore_options = RestoreOptions()) = 0; - virtual Status RestoreDBFromLatestBackup( - const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& restore_options = RestoreOptions()) = 0; -}; - -// Please see the documentation in BackupableDB and RestoreBackupableDB -class BackupEngine { - public: - virtual ~BackupEngine() {} - - static BackupEngine* NewBackupEngine(Env* db_env, - const BackupableDBOptions& options); - - virtual Status CreateNewBackup(DB* db, bool flush_before_backup = false) = 0; - virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0; - virtual Status DeleteBackup(BackupID backup_id) = 0; - virtual void StopBackup() = 0; - - virtual void GetBackupInfo(std::vector* backup_info) = 0; - virtual Status RestoreDBFromBackup( - BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& restore_options = RestoreOptions()) = 0; - virtual Status RestoreDBFromLatestBackup( - const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& restore_options = RestoreOptions()) = 0; -}; - -// Stack your DB with BackupableDB to be able to backup the DB -class BackupableDB : public StackableDB { - public: - // BackupableDBOptions have to be the same as the ones used in a previous - // incarnation of the DB - // - // BackupableDB ownes the pointer `DB* db` now. You should not delete it or - // use it after the invocation of BackupableDB - BackupableDB(DB* db, const BackupableDBOptions& options); - virtual ~BackupableDB(); - - // Captures the state of the database in the latest backup - // NOT a thread safe call - Status CreateNewBackup(bool flush_before_backup = false); - // Returns info about backups in backup_info - void GetBackupInfo(std::vector* backup_info); - // deletes old backups, keeping latest num_backups_to_keep alive - Status PurgeOldBackups(uint32_t num_backups_to_keep); - // deletes a specific backup - Status DeleteBackup(BackupID backup_id); - // Call this from another thread if you want to stop the backup - // that is currently happening. It will return immediatelly, will - // not wait for the backup to stop. - // The backup will stop ASAP and the call to CreateNewBackup will - // return Status::Incomplete(). It will not clean up after itself, but - // the state will remain consistent. The state will be cleaned up - // next time you create BackupableDB or RestoreBackupableDB. - void StopBackup(); - - private: - BackupEngine* backup_engine_; -}; - -// Use this class to access information about backups and restore from them -class RestoreBackupableDB { - public: - RestoreBackupableDB(Env* db_env, const BackupableDBOptions& options); - ~RestoreBackupableDB(); - - // Returns info about backups in backup_info - void GetBackupInfo(std::vector* backup_info); - - // restore from backup with backup_id - // IMPORTANT -- if options_.share_table_files == true and you restore DB - // from some backup that is not the latest, and you start creating new - // backups from the new DB, they will probably fail - // - // Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. - // If you add new data to the DB and try creating a new backup now, the - // database will diverge from backups 4 and 5 and the new backup will fail. - // If you want to create new backup, you will first have to delete backups 4 - // and 5. - Status RestoreDBFromBackup(BackupID backup_id, const std::string& db_dir, - const std::string& wal_dir, - const RestoreOptions& restore_options = - RestoreOptions()); - - // restore from the latest backup - Status RestoreDBFromLatestBackup(const std::string& db_dir, - const std::string& wal_dir, - const RestoreOptions& restore_options = - RestoreOptions()); - // deletes old backups, keeping latest num_backups_to_keep alive - Status PurgeOldBackups(uint32_t num_backups_to_keep); - // deletes a specific backup - Status DeleteBackup(BackupID backup_id); - - private: - BackupEngine* backup_engine_; -}; - -} // namespace rocksdb -#endif // ROCKSDB_LITE +#warning This file was moved to rocksdb/utilities/backupable_db.h +#include "rocksdb/utilities/backupable_db.h" diff --git a/include/utilities/db_ttl.h b/include/utilities/db_ttl.h index e99744d8f..c3d5c2bcf 100644 --- a/include/utilities/db_ttl.h +++ b/include/utilities/db_ttl.h @@ -4,65 +4,5 @@ // of patent rights can be found in the PATENTS file in the same directory. #pragma once -#ifndef ROCKSDB_LITE - -#include -#include - -#include "utilities/stackable_db.h" -#include "rocksdb/db.h" - -namespace rocksdb { - -// Database with TTL support. -// -// USE-CASES: -// This API should be used to open the db when key-values inserted are -// meant to be removed from the db in a non-strict 'ttl' amount of time -// Therefore, this guarantees that key-values inserted will remain in the -// db for >= ttl amount of time and the db will make efforts to remove the -// key-values as soon as possible after ttl seconds of their insertion. -// -// BEHAVIOUR: -// TTL is accepted in seconds -// (int32_t)Timestamp(creation) is suffixed to values in Put internally -// Expired TTL values deleted in compaction only:(Timestamp+ttl=5 -// read_only=true opens in the usual read-only mode. Compactions will not be -// triggered(neither manual nor automatic), so no expired entries removed -// -// CONSTRAINTS: -// Not specifying/passing or non-positive TTL behaves like TTL = infinity -// -// !!!WARNING!!!: -// Calling DB::Open directly to re-open a db created by this API will get -// corrupt values(timestamp suffixed) and no ttl effect will be there -// during the second Open, so use this API consistently to open the db -// Be careful when passing ttl with a small positive value because the -// whole database may be deleted in a small amount of time - -class DBWithTTL : public StackableDB { - public: - virtual Status CreateColumnFamilyWithTtl( - const ColumnFamilyOptions& options, const std::string& column_family_name, - ColumnFamilyHandle** handle, int ttl) = 0; - - static Status Open(const Options& options, const std::string& dbname, - DBWithTTL** dbptr, int32_t ttl = 0, - bool read_only = false); - - static Status Open(const DBOptions& db_options, const std::string& dbname, - const std::vector& column_families, - std::vector* handles, - DBWithTTL** dbptr, std::vector ttls, - bool read_only = false); - - protected: - explicit DBWithTTL(DB* db) : StackableDB(db) {} -}; - -} // namespace rocksdb -#endif // ROCKSDB_LITE +#warning This file was moved to rocksdb/utilities/db_ttl.h +#include "rocksdb/utilities/db_ttl.h" diff --git a/include/utilities/document_db.h b/include/utilities/document_db.h index 8e072ad29..1d1330bca 100644 --- a/include/utilities/document_db.h +++ b/include/utilities/document_db.h @@ -4,146 +4,5 @@ // of patent rights can be found in the PATENTS file in the same directory. #pragma once -#ifndef ROCKSDB_LITE - -#include -#include - -#include "utilities/stackable_db.h" -#include "utilities/json_document.h" -#include "rocksdb/db.h" - -namespace rocksdb { - -// IMPORTANT: DocumentDB is a work in progress. It is unstable and we might -// change the API without warning. Talk to RocksDB team before using this in -// production ;) - -// DocumentDB is a layer on top of RocksDB that provides a very simple JSON API. -// When creating a DB, you specify a list of indexes you want to keep on your -// data. You can insert a JSON document to the DB, which is automatically -// indexed. Every document added to the DB needs to have "_id" field which is -// automatically indexed and is an unique primary key. All other indexes are -// non-unique. - -// NOTE: field names in the JSON are NOT allowed to start with '$' or -// contain '.'. We don't currently enforce that rule, but will start behaving -// badly. - -// Cursor is what you get as a result of executing query. To get all -// results from a query, call Next() on a Cursor while Valid() returns true -class Cursor { - public: - Cursor() = default; - virtual ~Cursor() {} - - virtual bool Valid() const = 0; - virtual void Next() = 0; - // Lifecycle of the returned JSONDocument is until the next Next() call - virtual const JSONDocument& document() const = 0; - virtual Status status() const = 0; - - private: - // No copying allowed - Cursor(const Cursor&); - void operator=(const Cursor&); -}; - -struct DocumentDBOptions { - int background_threads = 4; - uint64_t memtable_size = 128 * 1024 * 1024; // 128 MB - uint64_t cache_size = 1 * 1024 * 1024 * 1024; // 1 GB -}; - -// TODO(icanadi) Add `JSONDocument* info` parameter to all calls that can be -// used by the caller to get more information about the call execution (number -// of dropped records, number of updated records, etc.) -class DocumentDB : public StackableDB { - public: - struct IndexDescriptor { - // Currently, you can only define an index on a single field. To specify an - // index on a field X, set index description to JSON "{X: 1}" - // Currently the value needs to be 1, which means ascending. - // In the future, we plan to also support indexes on multiple keys, where - // you could mix ascending sorting (1) with descending sorting indexes (-1) - JSONDocument* description; - std::string name; - }; - - // Open DocumentDB with specified indexes. The list of indexes has to be - // complete, i.e. include all indexes present in the DB, except the primary - // key index. - // Otherwise, Open() will return an error - static Status Open(const DocumentDBOptions& options, const std::string& name, - const std::vector& indexes, - DocumentDB** db, bool read_only = false); - - explicit DocumentDB(DB* db) : StackableDB(db) {} - - // Create a new index. It will stop all writes for the duration of the call. - // All current documents in the DB are scanned and corresponding index entries - // are created - virtual Status CreateIndex(const WriteOptions& write_options, - const IndexDescriptor& index) = 0; - - // Drop an index. Client is responsible to make sure that index is not being - // used by currently executing queries - virtual Status DropIndex(const std::string& name) = 0; - - // Insert a document to the DB. The document needs to have a primary key "_id" - // which can either be a string or an integer. Otherwise the write will fail - // with InvalidArgument. - virtual Status Insert(const WriteOptions& options, - const JSONDocument& document) = 0; - - // Deletes all documents matching a filter atomically - virtual Status Remove(const ReadOptions& read_options, - const WriteOptions& write_options, - const JSONDocument& query) = 0; - - // Does this sequence of operations: - // 1. Find all documents matching a filter - // 2. For all documents, atomically: - // 2.1. apply the update operators - // 2.2. update the secondary indexes - // - // Currently only $set update operator is supported. - // Syntax is: {$set: {key1: value1, key2: value2, etc...}} - // This operator will change a document's key1 field to value1, key2 to - // value2, etc. New values will be set even if a document didn't have an entry - // for the specified key. - // - // You can not change a primary key of a document. - // - // Update example: Update({id: {$gt: 5}, $index: id}, {$set: {enabled: true}}) - virtual Status Update(const ReadOptions& read_options, - const WriteOptions& write_options, - const JSONDocument& filter, - const JSONDocument& updates) = 0; - - // query has to be an array in which every element is an operator. Currently - // only $filter operator is supported. Syntax of $filter operator is: - // {$filter: {key1: condition1, key2: condition2, etc.}} where conditions can - // be either: - // 1) a single value in which case the condition is equality condition, or - // 2) a defined operators, like {$gt: 4}, which will match all documents that - // have key greater than 4. - // - // Supported operators are: - // 1) $gt -- greater than - // 2) $gte -- greater than or equal - // 3) $lt -- less than - // 4) $lte -- less than or equal - // If you want the filter to use an index, you need to specify it like this: - // {$filter: {...(conditions)..., $index: index_name}} - // - // Example query: - // * [{$filter: {name: John, age: {$gte: 18}, $index: age}}] - // will return all Johns whose age is greater or equal to 18 and it will use - // index "age" to satisfy the query. - virtual Cursor* Query(const ReadOptions& read_options, - const JSONDocument& query) = 0; -}; - -} // namespace rocksdb -#endif // ROCKSDB_LITE +#warning This file was moved to rocksdb/utilities/document_db.h +#include "rocksdb/utilities/document_db.h" diff --git a/include/utilities/geo_db.h b/include/utilities/geo_db.h index 87ff5e6a0..48957d407 100644 --- a/include/utilities/geo_db.h +++ b/include/utilities/geo_db.h @@ -2,104 +2,7 @@ // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -// -#ifndef ROCKSDB_LITE #pragma once -#include -#include - -#include "utilities/stackable_db.h" -#include "rocksdb/status.h" - -namespace rocksdb { - -// -// Configurable options needed for setting up a Geo database -// -struct GeoDBOptions { - // Backup info and error messages will be written to info_log - // if non-nullptr. - // Default: nullptr - Logger* info_log; - - explicit GeoDBOptions(Logger* _info_log = nullptr):info_log(_info_log) { } -}; - -// -// A position in the earth's geoid -// -class GeoPosition { - public: - double latitude; - double longitude; - - explicit GeoPosition(double la = 0, double lo = 0) : - latitude(la), longitude(lo) { - } -}; - -// -// Description of an object on the Geoid. It is located by a GPS location, -// and is identified by the id. The value associated with this object is -// an opaque string 'value'. Different objects identified by unique id's -// can have the same gps-location associated with them. -// -class GeoObject { - public: - GeoPosition position; - std::string id; - std::string value; - - GeoObject() {} - - GeoObject(const GeoPosition& pos, const std::string& i, - const std::string& val) : - position(pos), id(i), value(val) { - } -}; - -// -// Stack your DB with GeoDB to be able to get geo-spatial support -// -class GeoDB : public StackableDB { - public: - // GeoDBOptions have to be the same as the ones used in a previous - // incarnation of the DB - // - // GeoDB owns the pointer `DB* db` now. You should not delete it or - // use it after the invocation of GeoDB - // GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} - GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} - virtual ~GeoDB() {} - - // Insert a new object into the location database. The object is - // uniquely identified by the id. If an object with the same id already - // exists in the db, then the old one is overwritten by the new - // object being inserted here. - virtual Status Insert(const GeoObject& object) = 0; - - // Retrieve the value of the object located at the specified GPS - // location and is identified by the 'id'. - virtual Status GetByPosition(const GeoPosition& pos, - const Slice& id, std::string* value) = 0; - - // Retrieve the value of the object identified by the 'id'. This method - // could be potentially slower than GetByPosition - virtual Status GetById(const Slice& id, GeoObject* object) = 0; - - // Delete the specified object - virtual Status Remove(const Slice& id) = 0; - - // Returns a list of all items within a circular radius from the - // specified gps location. If 'number_of_values' is specified, - // then this call returns at most that many number of objects. - // The radius is specified in 'meters'. - virtual Status SearchRadial(const GeoPosition& pos, - double radius, - std::vector* values, - int number_of_values = INT_MAX) = 0; -}; - -} // namespace rocksdb -#endif // ROCKSDB_LITE +#warning This file was moved to rocksdb/utilities/geo_db.h +#include "rocksdb/utilities/geo_db.h" diff --git a/include/utilities/json_document.h b/include/utilities/json_document.h index ceb058cf9..f3f93969d 100644 --- a/include/utilities/json_document.h +++ b/include/utilities/json_document.h @@ -3,172 +3,5 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. #pragma once -#ifndef ROCKSDB_LITE - -#include -#include -#include -#include - -#include "rocksdb/slice.h" - -// We use JSONDocument for DocumentDB API -// Implementation inspired by folly::dynamic and rapidjson - -namespace rocksdb { - -// NOTE: none of this is thread-safe -class JSONDocument { - public: - // return nullptr on parse failure - static JSONDocument* ParseJSON(const char* json); - - enum Type { - kNull, - kArray, - kBool, - kDouble, - kInt64, - kObject, - kString, - }; - - JSONDocument(); // null - /* implicit */ JSONDocument(bool b); - /* implicit */ JSONDocument(double d); - /* implicit */ JSONDocument(int64_t i); - /* implicit */ JSONDocument(const std::string& s); - /* implicit */ JSONDocument(const char* s); - // constructs JSONDocument of specific type with default value - explicit JSONDocument(Type type); - - // copy constructor - JSONDocument(const JSONDocument& json_document); - - ~JSONDocument(); - - Type type() const; - - // REQUIRES: IsObject() - bool Contains(const std::string& key) const; - // Returns nullptr if !Contains() - // don't delete the returned pointer - // REQUIRES: IsObject() - const JSONDocument* Get(const std::string& key) const; - // REQUIRES: IsObject() - JSONDocument& operator[](const std::string& key); - // REQUIRES: IsObject() - const JSONDocument& operator[](const std::string& key) const; - // returns `this`, so you can chain operations. - // Copies value - // REQUIRES: IsObject() - JSONDocument* Set(const std::string& key, const JSONDocument& value); - - // REQUIRES: IsArray() == true || IsObject() == true - size_t Count() const; - - // REQUIRES: IsArray() - const JSONDocument* GetFromArray(size_t i) const; - // REQUIRES: IsArray() - JSONDocument& operator[](size_t i); - // REQUIRES: IsArray() - const JSONDocument& operator[](size_t i) const; - // returns `this`, so you can chain operations. - // Copies the value - // REQUIRES: IsArray() && i < Count() - JSONDocument* SetInArray(size_t i, const JSONDocument& value); - // REQUIRES: IsArray() - JSONDocument* PushBack(const JSONDocument& value); - - bool IsNull() const; - bool IsArray() const; - bool IsBool() const; - bool IsDouble() const; - bool IsInt64() const; - bool IsObject() const; - bool IsString() const; - - // REQUIRES: IsBool() == true - bool GetBool() const; - // REQUIRES: IsDouble() == true - double GetDouble() const; - // REQUIRES: IsInt64() == true - int64_t GetInt64() const; - // REQUIRES: IsString() == true - const std::string& GetString() const; - - bool operator==(const JSONDocument& rhs) const; - - std::string DebugString() const; - - private: - class ItemsIteratorGenerator; - - public: - // REQUIRES: IsObject() - ItemsIteratorGenerator Items() const; - - // appends serialized object to dst - void Serialize(std::string* dst) const; - // returns nullptr if Slice doesn't represent valid serialized JSONDocument - static JSONDocument* Deserialize(const Slice& src); - - private: - void SerializeInternal(std::string* dst, bool type_prefix) const; - // returns false if Slice doesn't represent valid serialized JSONDocument. - // Otherwise, true - bool DeserializeInternal(Slice* input); - - typedef std::vector Array; - typedef std::unordered_map Object; - - // iteration on objects - class const_item_iterator { - public: - typedef Object::const_iterator It; - typedef Object::value_type value_type; - /* implicit */ const_item_iterator(It it) : it_(it) {} - It& operator++() { return ++it_; } - bool operator!=(const const_item_iterator& other) { - return it_ != other.it_; - } - value_type operator*() { return *it_; } - - private: - It it_; - }; - class ItemsIteratorGenerator { - public: - /* implicit */ ItemsIteratorGenerator(const Object& object) - : object_(object) {} - const_item_iterator begin() { return object_.begin(); } - const_item_iterator end() { return object_.end(); } - - private: - const Object& object_; - }; - - union Data { - Data() : n(nullptr) {} - ~Data() {} - - void* n; - Array a; - bool b; - double d; - int64_t i; - std::string s; - Object o; - } data_; - const Type type_; - - // Our serialization format's first byte specifies the encoding version. That - // way, we can easily change our format while providing backwards - // compatibility. This constant specifies the current version of the - // serialization format - static const char kSerializationFormatVersion; -}; - -} // namespace rocksdb - -#endif // ROCKSDB_LITE +#warning This file was moved to rocksdb/utilities/json_document.h +#include "rocksdb/utilities/json_document.h" diff --git a/include/utilities/stackable_db.h b/include/utilities/stackable_db.h index 5c8c7fe6e..435818d2b 100644 --- a/include/utilities/stackable_db.h +++ b/include/utilities/stackable_db.h @@ -3,218 +3,5 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #pragma once -#include "rocksdb/db.h" - -namespace rocksdb { - -// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d -class StackableDB : public DB { - public: - // StackableDB is the owner of db now! - explicit StackableDB(DB* db) : db_(db) {} - - ~StackableDB() { - delete db_; - } - - virtual DB* GetBaseDB() { - return db_; - } - - virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, - const std::string& column_family_name, - ColumnFamilyHandle** handle) { - return db_->CreateColumnFamily(options, column_family_name, handle); - } - - virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) { - return db_->DropColumnFamily(column_family); - } - - using DB::Put; - virtual Status Put(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& val) override { - return db_->Put(options, column_family, key, val); - } - - using DB::Get; - virtual Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value) override { - return db_->Get(options, column_family, key, value); - } - - using DB::MultiGet; - virtual std::vector MultiGet( - const ReadOptions& options, - const std::vector& column_family, - const std::vector& keys, - std::vector* values) override { - return db_->MultiGet(options, column_family, keys, values); - } - - using DB::KeyMayExist; - virtual bool KeyMayExist(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value, - bool* value_found = nullptr) override { - return db_->KeyMayExist(options, column_family, key, value, value_found); - } - - using DB::Delete; - virtual Status Delete(const WriteOptions& wopts, - ColumnFamilyHandle* column_family, - const Slice& key) override { - return db_->Delete(wopts, column_family, key); - } - - using DB::Merge; - virtual Status Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { - return db_->Merge(options, column_family, key, value); - } - - - virtual Status Write(const WriteOptions& opts, WriteBatch* updates) - override { - return db_->Write(opts, updates); - } - - using DB::NewIterator; - virtual Iterator* NewIterator(const ReadOptions& opts, - ColumnFamilyHandle* column_family) override { - return db_->NewIterator(opts, column_family); - } - - virtual Status NewIterators( - const ReadOptions& options, - const std::vector& column_families, - std::vector* iterators) { - return db_->NewIterators(options, column_families, iterators); - } - - - virtual const Snapshot* GetSnapshot() override { - return db_->GetSnapshot(); - } - - virtual void ReleaseSnapshot(const Snapshot* snapshot) override { - return db_->ReleaseSnapshot(snapshot); - } - - using DB::GetProperty; - virtual bool GetProperty(ColumnFamilyHandle* column_family, - const Slice& property, std::string* value) override { - return db_->GetProperty(column_family, property, value); - } - - using DB::GetApproximateSizes; - virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, - const Range* r, int n, - uint64_t* sizes) override { - return db_->GetApproximateSizes(column_family, r, n, sizes); - } - - using DB::CompactRange; - virtual Status CompactRange(ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end, - bool reduce_level = false, int target_level = -1, - uint32_t target_path_id = 0) override { - return db_->CompactRange(column_family, begin, end, reduce_level, - target_level, target_path_id); - } - - using DB::NumberLevels; - virtual int NumberLevels(ColumnFamilyHandle* column_family) override { - return db_->NumberLevels(column_family); - } - - using DB::MaxMemCompactionLevel; - virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) - override { - return db_->MaxMemCompactionLevel(column_family); - } - - using DB::Level0StopWriteTrigger; - virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family) - override { - return db_->Level0StopWriteTrigger(column_family); - } - - virtual const std::string& GetName() const override { - return db_->GetName(); - } - - virtual Env* GetEnv() const override { - return db_->GetEnv(); - } - - using DB::GetOptions; - virtual const Options& GetOptions(ColumnFamilyHandle* column_family) const - override { - return db_->GetOptions(column_family); - } - - using DB::Flush; - virtual Status Flush(const FlushOptions& fopts, - ColumnFamilyHandle* column_family) override { - return db_->Flush(fopts, column_family); - } - - virtual Status DisableFileDeletions() override { - return db_->DisableFileDeletions(); - } - - virtual Status EnableFileDeletions(bool force) override { - return db_->EnableFileDeletions(force); - } - - virtual void GetLiveFilesMetaData( - std::vector* metadata) override { - db_->GetLiveFilesMetaData(metadata); - } - - virtual Status GetLiveFiles(std::vector& vec, uint64_t* mfs, - bool flush_memtable = true) override { - return db_->GetLiveFiles(vec, mfs, flush_memtable); - } - - virtual SequenceNumber GetLatestSequenceNumber() const override { - return db_->GetLatestSequenceNumber(); - } - - virtual Status GetSortedWalFiles(VectorLogPtr& files) override { - return db_->GetSortedWalFiles(files); - } - - virtual Status DeleteFile(std::string name) override { - return db_->DeleteFile(name); - } - - virtual Status GetDbIdentity(std::string& identity) { - return db_->GetDbIdentity(identity); - } - - using DB::GetPropertiesOfAllTables; - virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, - TablePropertiesCollection* props) { - return db_->GetPropertiesOfAllTables(column_family, props); - } - - virtual Status GetUpdatesSince( - SequenceNumber seq_number, unique_ptr* iter, - const TransactionLogIterator::ReadOptions& read_options) override { - return db_->GetUpdatesSince(seq_number, iter, read_options); - } - - virtual ColumnFamilyHandle* DefaultColumnFamily() const override { - return db_->DefaultColumnFamily(); - } - - protected: - DB* db_; -}; - -} // namespace rocksdb +#warning This file was moved to rocksdb/utilities/stackable_db.h +#include "rocksdb/utilities/stackable_db.h" diff --git a/include/utilities/utility_db.h b/include/utilities/utility_db.h index f2b99cedf..4a8bbaec3 100644 --- a/include/utilities/utility_db.h +++ b/include/utilities/utility_db.h @@ -3,28 +3,5 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #pragma once -#ifndef ROCKSDB_LITE -#include -#include - -#include "utilities/stackable_db.h" -#include "utilities/db_ttl.h" -#include "rocksdb/db.h" - -namespace rocksdb { - -// Please don't use this class. It's deprecated -class UtilityDB { - public: - // This function is here only for backwards compatibility. Please use the - // functions defined in DBWithTTl (utilities/db_ttl.h) - // (deprecated) - __attribute__((deprecated)) static Status OpenTtlDB(const Options& options, - const std::string& name, - StackableDB** dbptr, - int32_t ttl = 0, - bool read_only = false); -}; - -} // namespace rocksdb -#endif // ROCKSDB_LITE +#warning This file was moved to rocksdb/utilities/utility_db.h +#include "rocksdb/utilities/utility_db.h" diff --git a/tools/db_stress.cc b/tools/db_stress.cc index aae0e0598..05dd3cc88 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -36,7 +36,7 @@ int main() { #include "db/version_set.h" #include "rocksdb/statistics.h" #include "rocksdb/cache.h" -#include "utilities/db_ttl.h" +#include "rocksdb/utilities/db_ttl.h" #include "rocksdb/env.h" #include "rocksdb/write_batch.h" #include "rocksdb/slice.h" diff --git a/util/ldb_cmd.h b/util/ldb_cmd.h index 50dcbf929..0553fe64a 100644 --- a/util/ldb_cmd.h +++ b/util/ldb_cmd.h @@ -17,10 +17,10 @@ #include "rocksdb/ldb_tool.h" #include "rocksdb/options.h" #include "rocksdb/slice.h" +#include "rocksdb/utilities/db_ttl.h" #include "util/logging.h" #include "util/ldb_cmd_execute_result.h" #include "util/string_util.h" -#include "utilities/db_ttl.h" #include "utilities/ttl/db_ttl_impl.h" using std::string; diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 48fc2905a..436f4c2d6 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -9,7 +9,7 @@ #ifndef ROCKSDB_LITE -#include "utilities/backupable_db.h" +#include "rocksdb/utilities/backupable_db.h" #include "db/filename.h" #include "util/coding.h" #include "util/crc32c.h" diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index b68f1c65b..1d876cd50 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -14,8 +14,7 @@ #include "port/port.h" #include "rocksdb/types.h" #include "rocksdb/transaction_log.h" -#include "utilities/utility_db.h" -#include "utilities/backupable_db.h" +#include "rocksdb/utilities/backupable_db.h" #include "util/testharness.h" #include "util/random.h" #include "util/mutexlock.h" diff --git a/utilities/document/document_db.cc b/utilities/document/document_db.cc index 3612b838c..1e333f129 100644 --- a/utilities/document/document_db.cc +++ b/utilities/document/document_db.cc @@ -5,17 +5,17 @@ #ifndef ROCKSDB_LITE -#include "utilities/document_db.h" +#include "rocksdb/utilities/document_db.h" #include "rocksdb/cache.h" #include "rocksdb/filter_policy.h" #include "rocksdb/comparator.h" #include "rocksdb/db.h" #include "rocksdb/slice.h" +#include "rocksdb/utilities/json_document.h" #include "util/coding.h" #include "util/mutexlock.h" #include "port/port.h" -#include "utilities/json_document.h" namespace rocksdb { diff --git a/utilities/document/document_db_test.cc b/utilities/document/document_db_test.cc index 25d5effc2..d4c632cce 100644 --- a/utilities/document/document_db_test.cc +++ b/utilities/document/document_db_test.cc @@ -5,8 +5,9 @@ #include -#include "utilities/json_document.h" -#include "utilities/document_db.h" +#include "rocksdb/utilities/json_document.h" +#include "rocksdb/utilities/document_db.h" + #include "util/testharness.h" #include "util/testutil.h" diff --git a/utilities/document/json_document.cc b/utilities/document/json_document.cc index f3ccc3884..641f4ee09 100644 --- a/utilities/document/json_document.cc +++ b/utilities/document/json_document.cc @@ -4,7 +4,7 @@ // of patent rights can be found in the PATENTS file in the same directory. #ifndef ROCKSDB_LITE -#include "utilities/json_document.h" +#include "rocksdb/utilities/json_document.h" #define __STDC_FORMAT_MACROS #include diff --git a/utilities/document/json_document_test.cc b/utilities/document/json_document_test.cc index 886d1981a..8d1967ed9 100644 --- a/utilities/document/json_document_test.cc +++ b/utilities/document/json_document_test.cc @@ -5,9 +5,10 @@ #include +#include "rocksdb/utilities/json_document.h" + #include "util/testutil.h" #include "util/testharness.h" -#include "utilities/json_document.h" namespace rocksdb { namespace { diff --git a/utilities/geodb/geodb_impl.h b/utilities/geodb/geodb_impl.h index 4ee42ad29..c7e410458 100644 --- a/utilities/geodb/geodb_impl.h +++ b/utilities/geodb/geodb_impl.h @@ -14,8 +14,8 @@ #include #include -#include "utilities/geo_db.h" -#include "utilities/stackable_db.h" +#include "rocksdb/utilities/geo_db.h" +#include "rocksdb/utilities/stackable_db.h" #include "rocksdb/env.h" #include "rocksdb/status.h" diff --git a/utilities/merge_operators/string_append/stringappend_test.cc b/utilities/merge_operators/string_append/stringappend_test.cc index a68186a3a..c8d741dd5 100644 --- a/utilities/merge_operators/string_append/stringappend_test.cc +++ b/utilities/merge_operators/string_append/stringappend_test.cc @@ -11,10 +11,10 @@ #include "rocksdb/db.h" #include "rocksdb/merge_operator.h" +#include "rocksdb/utilities/db_ttl.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/string_append/stringappend.h" #include "utilities/merge_operators/string_append/stringappend2.h" -#include "utilities/db_ttl.h" #include "util/testharness.h" #include "util/random.h" diff --git a/utilities/ttl/db_ttl_impl.cc b/utilities/ttl/db_ttl_impl.cc index f7a697fa6..4d2d8406e 100644 --- a/utilities/ttl/db_ttl_impl.cc +++ b/utilities/ttl/db_ttl_impl.cc @@ -5,7 +5,7 @@ #include "utilities/ttl/db_ttl_impl.h" -#include "utilities/db_ttl.h" +#include "rocksdb/utilities/db_ttl.h" #include "db/filename.h" #include "db/write_batch_internal.h" #include "util/coding.h" diff --git a/utilities/ttl/db_ttl_impl.h b/utilities/ttl/db_ttl_impl.h index a5c8fc8ca..84fb55568 100644 --- a/utilities/ttl/db_ttl_impl.h +++ b/utilities/ttl/db_ttl_impl.h @@ -13,8 +13,8 @@ #include "rocksdb/env.h" #include "rocksdb/compaction_filter.h" #include "rocksdb/merge_operator.h" -#include "utilities/utility_db.h" -#include "utilities/db_ttl.h" +#include "rocksdb/utilities/utility_db.h" +#include "rocksdb/utilities/db_ttl.h" #include "db/db_impl.h" namespace rocksdb { diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 4791a2a77..e6d64e54e 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -4,7 +4,7 @@ #include #include "rocksdb/compaction_filter.h" -#include "utilities/db_ttl.h" +#include "rocksdb/utilities/db_ttl.h" #include "util/testharness.h" #include "util/logging.h" #include