Summary: All public headers need to be under `include/rocksdb` directory. Otherwise, clients include our header files like this: #include <rocksdb/db.h> #include <utilities/backupable_db.h> // still our public header! Also, internally, we include: #include "utilities/backupable/backupable_db.h" // internal header #include "utilities/backupable_db.h" // public header which is confusing. This way, when we install rocksdb as a system library, we can just copy `include/rocksdb` directory to system's header files. We can't really copy `utilities` directory to system's header files. Test Plan: compiles Reviewers: dhruba, ljin, yhchiang, sdong Reviewed By: sdong Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D20409main
parent
e6de02103a
commit
0ff183a0d9
@ -0,0 +1,252 @@ |
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#pragma once |
||||
#ifndef ROCKSDB_LITE |
||||
|
||||
#define __STDC_FORMAT_MACROS |
||||
#include <inttypes.h> |
||||
#include <string> |
||||
#include <map> |
||||
#include <vector> |
||||
|
||||
#include "rocksdb/utilities/stackable_db.h" |
||||
|
||||
#include "rocksdb/env.h" |
||||
#include "rocksdb/status.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
struct BackupableDBOptions { |
||||
// Where to keep the backup files. Has to be different than dbname_
|
||||
// Best to set this to dbname_ + "/backups"
|
||||
// Required
|
||||
std::string backup_dir; |
||||
|
||||
// Backup Env object. It will be used for backup file I/O. If it's
|
||||
// nullptr, backups will be written out using DBs Env. If it's
|
||||
// non-nullptr, backup's I/O will be performed using this object.
|
||||
// If you want to have backups on HDFS, use HDFS Env here!
|
||||
// Default: nullptr
|
||||
Env* backup_env; |
||||
|
||||
// If share_table_files == true, backup will assume that table files with
|
||||
// same name have the same contents. This enables incremental backups and
|
||||
// avoids unnecessary data copies.
|
||||
// If share_table_files == false, each backup will be on its own and will
|
||||
// not share any data with other backups.
|
||||
// default: true
|
||||
bool share_table_files; |
||||
|
||||
// Backup info and error messages will be written to info_log
|
||||
// if non-nullptr.
|
||||
// Default: nullptr
|
||||
Logger* info_log; |
||||
|
||||
// If sync == true, we can guarantee you'll get consistent backup even
|
||||
// on a machine crash/reboot. Backup process is slower with sync enabled.
|
||||
// If sync == false, we don't guarantee anything on machine reboot. However,
|
||||
// chances are some of the backups are consistent.
|
||||
// Default: true
|
||||
bool sync; |
||||
|
||||
// If true, it will delete whatever backups there are already
|
||||
// Default: false
|
||||
bool destroy_old_data; |
||||
|
||||
// If false, we won't backup log files. This option can be useful for backing
|
||||
// up in-memory databases where log file are persisted, but table files are in
|
||||
// memory.
|
||||
// Default: true
|
||||
bool backup_log_files; |
||||
|
||||
// Max bytes that can be transferred in a second during backup.
|
||||
// If 0, go as fast as you can
|
||||
// Default: 0
|
||||
uint64_t backup_rate_limit; |
||||
|
||||
// Max bytes that can be transferred in a second during restore.
|
||||
// If 0, go as fast as you can
|
||||
// Default: 0
|
||||
uint64_t restore_rate_limit; |
||||
|
||||
// Only used if share_table_files is set to true. If true, will consider that
|
||||
// backups can come from different databases, hence a sst is not uniquely
|
||||
// identifed by its name, but by the triple (file name, crc32, file length)
|
||||
// Default: false
|
||||
// Note: this is an experimental option, and you'll need to set it manually
|
||||
// *turn it on only if you know what you're doing*
|
||||
bool share_files_with_checksum; |
||||
|
||||
void Dump(Logger* logger) const; |
||||
|
||||
explicit BackupableDBOptions(const std::string& _backup_dir, |
||||
Env* _backup_env = nullptr, |
||||
bool _share_table_files = true, |
||||
Logger* _info_log = nullptr, bool _sync = true, |
||||
bool _destroy_old_data = false, |
||||
bool _backup_log_files = true, |
||||
uint64_t _backup_rate_limit = 0, |
||||
uint64_t _restore_rate_limit = 0) |
||||
: backup_dir(_backup_dir), |
||||
backup_env(_backup_env), |
||||
share_table_files(_share_table_files), |
||||
info_log(_info_log), |
||||
sync(_sync), |
||||
destroy_old_data(_destroy_old_data), |
||||
backup_log_files(_backup_log_files), |
||||
backup_rate_limit(_backup_rate_limit), |
||||
restore_rate_limit(_restore_rate_limit), |
||||
share_files_with_checksum(false) { |
||||
assert(share_table_files || !share_files_with_checksum); |
||||
} |
||||
}; |
||||
|
||||
struct RestoreOptions { |
||||
// If true, restore won't overwrite the existing log files in wal_dir. It will
|
||||
// also move all log files from archive directory to wal_dir. Use this option
|
||||
// in combination with BackupableDBOptions::backup_log_files = false for
|
||||
// persisting in-memory databases.
|
||||
// Default: false
|
||||
bool keep_log_files; |
||||
|
||||
explicit RestoreOptions(bool _keep_log_files = false) |
||||
: keep_log_files(_keep_log_files) {} |
||||
}; |
||||
|
||||
typedef uint32_t BackupID; |
||||
|
||||
struct BackupInfo { |
||||
BackupID backup_id; |
||||
int64_t timestamp; |
||||
uint64_t size; |
||||
|
||||
BackupInfo() {} |
||||
BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size) |
||||
: backup_id(_backup_id), timestamp(_timestamp), size(_size) {} |
||||
}; |
||||
|
||||
class BackupEngineReadOnly { |
||||
public: |
||||
virtual ~BackupEngineReadOnly() {} |
||||
|
||||
static BackupEngineReadOnly* NewReadOnlyBackupEngine( |
||||
Env* db_env, const BackupableDBOptions& options); |
||||
|
||||
// You can GetBackupInfo safely, even with other BackupEngine performing
|
||||
// backups on the same directory
|
||||
virtual void GetBackupInfo(std::vector<BackupInfo>* backup_info) = 0; |
||||
|
||||
// Restoring DB from backup is NOT safe when there is another BackupEngine
|
||||
// running that might call DeleteBackup() or PurgeOldBackups(). It is caller's
|
||||
// responsibility to synchronize the operation, i.e. don't delete the backup
|
||||
// when you're restoring from it
|
||||
virtual Status RestoreDBFromBackup( |
||||
BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, |
||||
const RestoreOptions& restore_options = RestoreOptions()) = 0; |
||||
virtual Status RestoreDBFromLatestBackup( |
||||
const std::string& db_dir, const std::string& wal_dir, |
||||
const RestoreOptions& restore_options = RestoreOptions()) = 0; |
||||
}; |
||||
|
||||
// Please see the documentation in BackupableDB and RestoreBackupableDB
|
||||
class BackupEngine { |
||||
public: |
||||
virtual ~BackupEngine() {} |
||||
|
||||
static BackupEngine* NewBackupEngine(Env* db_env, |
||||
const BackupableDBOptions& options); |
||||
|
||||
virtual Status CreateNewBackup(DB* db, bool flush_before_backup = false) = 0; |
||||
virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0; |
||||
virtual Status DeleteBackup(BackupID backup_id) = 0; |
||||
virtual void StopBackup() = 0; |
||||
|
||||
virtual void GetBackupInfo(std::vector<BackupInfo>* backup_info) = 0; |
||||
virtual Status RestoreDBFromBackup( |
||||
BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, |
||||
const RestoreOptions& restore_options = RestoreOptions()) = 0; |
||||
virtual Status RestoreDBFromLatestBackup( |
||||
const std::string& db_dir, const std::string& wal_dir, |
||||
const RestoreOptions& restore_options = RestoreOptions()) = 0; |
||||
}; |
||||
|
||||
// Stack your DB with BackupableDB to be able to backup the DB
|
||||
class BackupableDB : public StackableDB { |
||||
public: |
||||
// BackupableDBOptions have to be the same as the ones used in a previous
|
||||
// incarnation of the DB
|
||||
//
|
||||
// BackupableDB ownes the pointer `DB* db` now. You should not delete it or
|
||||
// use it after the invocation of BackupableDB
|
||||
BackupableDB(DB* db, const BackupableDBOptions& options); |
||||
virtual ~BackupableDB(); |
||||
|
||||
// Captures the state of the database in the latest backup
|
||||
// NOT a thread safe call
|
||||
Status CreateNewBackup(bool flush_before_backup = false); |
||||
// Returns info about backups in backup_info
|
||||
void GetBackupInfo(std::vector<BackupInfo>* backup_info); |
||||
// deletes old backups, keeping latest num_backups_to_keep alive
|
||||
Status PurgeOldBackups(uint32_t num_backups_to_keep); |
||||
// deletes a specific backup
|
||||
Status DeleteBackup(BackupID backup_id); |
||||
// Call this from another thread if you want to stop the backup
|
||||
// that is currently happening. It will return immediatelly, will
|
||||
// not wait for the backup to stop.
|
||||
// The backup will stop ASAP and the call to CreateNewBackup will
|
||||
// return Status::Incomplete(). It will not clean up after itself, but
|
||||
// the state will remain consistent. The state will be cleaned up
|
||||
// next time you create BackupableDB or RestoreBackupableDB.
|
||||
void StopBackup(); |
||||
|
||||
private: |
||||
BackupEngine* backup_engine_; |
||||
}; |
||||
|
||||
// Use this class to access information about backups and restore from them
|
||||
class RestoreBackupableDB { |
||||
public: |
||||
RestoreBackupableDB(Env* db_env, const BackupableDBOptions& options); |
||||
~RestoreBackupableDB(); |
||||
|
||||
// Returns info about backups in backup_info
|
||||
void GetBackupInfo(std::vector<BackupInfo>* backup_info); |
||||
|
||||
// restore from backup with backup_id
|
||||
// IMPORTANT -- if options_.share_table_files == true and you restore DB
|
||||
// from some backup that is not the latest, and you start creating new
|
||||
// backups from the new DB, they will probably fail
|
||||
//
|
||||
// Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
|
||||
// If you add new data to the DB and try creating a new backup now, the
|
||||
// database will diverge from backups 4 and 5 and the new backup will fail.
|
||||
// If you want to create new backup, you will first have to delete backups 4
|
||||
// and 5.
|
||||
Status RestoreDBFromBackup(BackupID backup_id, const std::string& db_dir, |
||||
const std::string& wal_dir, |
||||
const RestoreOptions& restore_options = |
||||
RestoreOptions()); |
||||
|
||||
// restore from the latest backup
|
||||
Status RestoreDBFromLatestBackup(const std::string& db_dir, |
||||
const std::string& wal_dir, |
||||
const RestoreOptions& restore_options = |
||||
RestoreOptions()); |
||||
// deletes old backups, keeping latest num_backups_to_keep alive
|
||||
Status PurgeOldBackups(uint32_t num_backups_to_keep); |
||||
// deletes a specific backup
|
||||
Status DeleteBackup(BackupID backup_id); |
||||
|
||||
private: |
||||
BackupEngine* backup_engine_; |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
||||
#endif // ROCKSDB_LITE
|
@ -0,0 +1,68 @@ |
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
#pragma once |
||||
#ifndef ROCKSDB_LITE |
||||
|
||||
#include <string> |
||||
#include <vector> |
||||
|
||||
#include "rocksdb/utilities/stackable_db.h" |
||||
#include "rocksdb/db.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
// Database with TTL support.
|
||||
//
|
||||
// USE-CASES:
|
||||
// This API should be used to open the db when key-values inserted are
|
||||
// meant to be removed from the db in a non-strict 'ttl' amount of time
|
||||
// Therefore, this guarantees that key-values inserted will remain in the
|
||||
// db for >= ttl amount of time and the db will make efforts to remove the
|
||||
// key-values as soon as possible after ttl seconds of their insertion.
|
||||
//
|
||||
// BEHAVIOUR:
|
||||
// TTL is accepted in seconds
|
||||
// (int32_t)Timestamp(creation) is suffixed to values in Put internally
|
||||
// Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now)
|
||||
// Get/Iterator may return expired entries(compaction not run on them yet)
|
||||
// Different TTL may be used during different Opens
|
||||
// Example: Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
|
||||
// Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
|
||||
// read_only=true opens in the usual read-only mode. Compactions will not be
|
||||
// triggered(neither manual nor automatic), so no expired entries removed
|
||||
//
|
||||
// CONSTRAINTS:
|
||||
// Not specifying/passing or non-positive TTL behaves like TTL = infinity
|
||||
//
|
||||
// !!!WARNING!!!:
|
||||
// Calling DB::Open directly to re-open a db created by this API will get
|
||||
// corrupt values(timestamp suffixed) and no ttl effect will be there
|
||||
// during the second Open, so use this API consistently to open the db
|
||||
// Be careful when passing ttl with a small positive value because the
|
||||
// whole database may be deleted in a small amount of time
|
||||
|
||||
class DBWithTTL : public StackableDB { |
||||
public: |
||||
virtual Status CreateColumnFamilyWithTtl( |
||||
const ColumnFamilyOptions& options, const std::string& column_family_name, |
||||
ColumnFamilyHandle** handle, int ttl) = 0; |
||||
|
||||
static Status Open(const Options& options, const std::string& dbname, |
||||
DBWithTTL** dbptr, int32_t ttl = 0, |
||||
bool read_only = false); |
||||
|
||||
static Status Open(const DBOptions& db_options, const std::string& dbname, |
||||
const std::vector<ColumnFamilyDescriptor>& column_families, |
||||
std::vector<ColumnFamilyHandle*>* handles, |
||||
DBWithTTL** dbptr, std::vector<int32_t> ttls, |
||||
bool read_only = false); |
||||
|
||||
protected: |
||||
explicit DBWithTTL(DB* db) : StackableDB(db) {} |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
||||
#endif // ROCKSDB_LITE
|
@ -0,0 +1,149 @@ |
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
#pragma once |
||||
#ifndef ROCKSDB_LITE |
||||
|
||||
#include <string> |
||||
#include <vector> |
||||
|
||||
#include "rocksdb/utilities/stackable_db.h" |
||||
#include "rocksdb/utilities/json_document.h" |
||||
#include "rocksdb/db.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
// IMPORTANT: DocumentDB is a work in progress. It is unstable and we might
|
||||
// change the API without warning. Talk to RocksDB team before using this in
|
||||
// production ;)
|
||||
|
||||
// DocumentDB is a layer on top of RocksDB that provides a very simple JSON API.
|
||||
// When creating a DB, you specify a list of indexes you want to keep on your
|
||||
// data. You can insert a JSON document to the DB, which is automatically
|
||||
// indexed. Every document added to the DB needs to have "_id" field which is
|
||||
// automatically indexed and is an unique primary key. All other indexes are
|
||||
// non-unique.
|
||||
|
||||
// NOTE: field names in the JSON are NOT allowed to start with '$' or
|
||||
// contain '.'. We don't currently enforce that rule, but will start behaving
|
||||
// badly.
|
||||
|
||||
// Cursor is what you get as a result of executing query. To get all
|
||||
// results from a query, call Next() on a Cursor while Valid() returns true
|
||||
class Cursor { |
||||
public: |
||||
Cursor() = default; |
||||
virtual ~Cursor() {} |
||||
|
||||
virtual bool Valid() const = 0; |
||||
virtual void Next() = 0; |
||||
// Lifecycle of the returned JSONDocument is until the next Next() call
|
||||
virtual const JSONDocument& document() const = 0; |
||||
virtual Status status() const = 0; |
||||
|
||||
private: |
||||
// No copying allowed
|
||||
Cursor(const Cursor&); |
||||
void operator=(const Cursor&); |
||||
}; |
||||
|
||||
struct DocumentDBOptions { |
||||
int background_threads = 4; |
||||
uint64_t memtable_size = 128 * 1024 * 1024; // 128 MB
|
||||
uint64_t cache_size = 1 * 1024 * 1024 * 1024; // 1 GB
|
||||
}; |
||||
|
||||
// TODO(icanadi) Add `JSONDocument* info` parameter to all calls that can be
|
||||
// used by the caller to get more information about the call execution (number
|
||||
// of dropped records, number of updated records, etc.)
|
||||
class DocumentDB : public StackableDB { |
||||
public: |
||||
struct IndexDescriptor { |
||||
// Currently, you can only define an index on a single field. To specify an
|
||||
// index on a field X, set index description to JSON "{X: 1}"
|
||||
// Currently the value needs to be 1, which means ascending.
|
||||
// In the future, we plan to also support indexes on multiple keys, where
|
||||
// you could mix ascending sorting (1) with descending sorting indexes (-1)
|
||||
JSONDocument* description; |
||||
std::string name; |
||||
}; |
||||
|
||||
// Open DocumentDB with specified indexes. The list of indexes has to be
|
||||
// complete, i.e. include all indexes present in the DB, except the primary
|
||||
// key index.
|
||||
// Otherwise, Open() will return an error
|
||||
static Status Open(const DocumentDBOptions& options, const std::string& name, |
||||
const std::vector<IndexDescriptor>& indexes, |
||||
DocumentDB** db, bool read_only = false); |
||||
|
||||
explicit DocumentDB(DB* db) : StackableDB(db) {} |
||||
|
||||
// Create a new index. It will stop all writes for the duration of the call.
|
||||
// All current documents in the DB are scanned and corresponding index entries
|
||||
// are created
|
||||
virtual Status CreateIndex(const WriteOptions& write_options, |
||||
const IndexDescriptor& index) = 0; |
||||
|
||||
// Drop an index. Client is responsible to make sure that index is not being
|
||||
// used by currently executing queries
|
||||
virtual Status DropIndex(const std::string& name) = 0; |
||||
|
||||
// Insert a document to the DB. The document needs to have a primary key "_id"
|
||||
// which can either be a string or an integer. Otherwise the write will fail
|
||||
// with InvalidArgument.
|
||||
virtual Status Insert(const WriteOptions& options, |
||||
const JSONDocument& document) = 0; |
||||
|
||||
// Deletes all documents matching a filter atomically
|
||||
virtual Status Remove(const ReadOptions& read_options, |
||||
const WriteOptions& write_options, |
||||
const JSONDocument& query) = 0; |
||||
|
||||
// Does this sequence of operations:
|
||||
// 1. Find all documents matching a filter
|
||||
// 2. For all documents, atomically:
|
||||
// 2.1. apply the update operators
|
||||
// 2.2. update the secondary indexes
|
||||
//
|
||||
// Currently only $set update operator is supported.
|
||||
// Syntax is: {$set: {key1: value1, key2: value2, etc...}}
|
||||
// This operator will change a document's key1 field to value1, key2 to
|
||||
// value2, etc. New values will be set even if a document didn't have an entry
|
||||
// for the specified key.
|
||||
//
|
||||
// You can not change a primary key of a document.
|
||||
//
|
||||
// Update example: Update({id: {$gt: 5}, $index: id}, {$set: {enabled: true}})
|
||||
virtual Status Update(const ReadOptions& read_options, |
||||
const WriteOptions& write_options, |
||||
const JSONDocument& filter, |
||||
const JSONDocument& updates) = 0; |
||||
|
||||
// query has to be an array in which every element is an operator. Currently
|
||||
// only $filter operator is supported. Syntax of $filter operator is:
|
||||
// {$filter: {key1: condition1, key2: condition2, etc.}} where conditions can
|
||||
// be either:
|
||||
// 1) a single value in which case the condition is equality condition, or
|
||||
// 2) a defined operators, like {$gt: 4}, which will match all documents that
|
||||
// have key greater than 4.
|
||||
//
|
||||
// Supported operators are:
|
||||
// 1) $gt -- greater than
|
||||
// 2) $gte -- greater than or equal
|
||||
// 3) $lt -- less than
|
||||
// 4) $lte -- less than or equal
|
||||
// If you want the filter to use an index, you need to specify it like this:
|
||||
// {$filter: {...(conditions)..., $index: index_name}}
|
||||
//
|
||||
// Example query:
|
||||
// * [{$filter: {name: John, age: {$gte: 18}, $index: age}}]
|
||||
// will return all Johns whose age is greater or equal to 18 and it will use
|
||||
// index "age" to satisfy the query.
|
||||
virtual Cursor* Query(const ReadOptions& read_options, |
||||
const JSONDocument& query) = 0; |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
||||
#endif // ROCKSDB_LITE
|
@ -0,0 +1,105 @@ |
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
|
||||
#ifndef ROCKSDB_LITE |
||||
#pragma once |
||||
#include <string> |
||||
#include <vector> |
||||
|
||||
#include "rocksdb/utilities/stackable_db.h" |
||||
#include "rocksdb/status.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
//
|
||||
// Configurable options needed for setting up a Geo database
|
||||
//
|
||||
struct GeoDBOptions { |
||||
// Backup info and error messages will be written to info_log
|
||||
// if non-nullptr.
|
||||
// Default: nullptr
|
||||
Logger* info_log; |
||||
|
||||
explicit GeoDBOptions(Logger* _info_log = nullptr):info_log(_info_log) { } |
||||
}; |
||||
|
||||
//
|
||||
// A position in the earth's geoid
|
||||
//
|
||||
class GeoPosition { |
||||
public: |
||||
double latitude; |
||||
double longitude; |
||||
|
||||
explicit GeoPosition(double la = 0, double lo = 0) : |
||||
latitude(la), longitude(lo) { |
||||
} |
||||
}; |
||||
|
||||
//
|
||||
// Description of an object on the Geoid. It is located by a GPS location,
|
||||
// and is identified by the id. The value associated with this object is
|
||||
// an opaque string 'value'. Different objects identified by unique id's
|
||||
// can have the same gps-location associated with them.
|
||||
//
|
||||
class GeoObject { |
||||
public: |
||||
GeoPosition position; |
||||
std::string id; |
||||
std::string value; |
||||
|
||||
GeoObject() {} |
||||
|
||||
GeoObject(const GeoPosition& pos, const std::string& i, |
||||
const std::string& val) : |
||||
position(pos), id(i), value(val) { |
||||
} |
||||
}; |
||||
|
||||
//
|
||||
// Stack your DB with GeoDB to be able to get geo-spatial support
|
||||
//
|
||||
class GeoDB : public StackableDB { |
||||
public: |
||||
// GeoDBOptions have to be the same as the ones used in a previous
|
||||
// incarnation of the DB
|
||||
//
|
||||
// GeoDB owns the pointer `DB* db` now. You should not delete it or
|
||||
// use it after the invocation of GeoDB
|
||||
// GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {}
|
||||
GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} |
||||
virtual ~GeoDB() {} |
||||
|
||||
// Insert a new object into the location database. The object is
|
||||
// uniquely identified by the id. If an object with the same id already
|
||||
// exists in the db, then the old one is overwritten by the new
|
||||
// object being inserted here.
|
||||
virtual Status Insert(const GeoObject& object) = 0; |
||||
|
||||
// Retrieve the value of the object located at the specified GPS
|
||||
// location and is identified by the 'id'.
|
||||
virtual Status GetByPosition(const GeoPosition& pos, |
||||
const Slice& id, std::string* value) = 0; |
||||
|
||||
// Retrieve the value of the object identified by the 'id'. This method
|
||||
// could be potentially slower than GetByPosition
|
||||
virtual Status GetById(const Slice& id, GeoObject* object) = 0; |
||||
|
||||
// Delete the specified object
|
||||
virtual Status Remove(const Slice& id) = 0; |
||||
|
||||
// Returns a list of all items within a circular radius from the
|
||||
// specified gps location. If 'number_of_values' is specified,
|
||||
// then this call returns at most that many number of objects.
|
||||
// The radius is specified in 'meters'.
|
||||
virtual Status SearchRadial(const GeoPosition& pos, |
||||
double radius, |
||||
std::vector<GeoObject>* values, |
||||
int number_of_values = INT_MAX) = 0; |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
||||
#endif // ROCKSDB_LITE
|
@ -0,0 +1,174 @@ |
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
#pragma once |
||||
#ifndef ROCKSDB_LITE |
||||
|
||||
#include <string> |
||||
#include <map> |
||||
#include <unordered_map> |
||||
#include <vector> |
||||
|
||||
#include "rocksdb/slice.h" |
||||
|
||||
// We use JSONDocument for DocumentDB API
|
||||
// Implementation inspired by folly::dynamic and rapidjson
|
||||
|
||||
namespace rocksdb { |
||||
|
||||
// NOTE: none of this is thread-safe
|
||||
class JSONDocument { |
||||
public: |
||||
// return nullptr on parse failure
|
||||
static JSONDocument* ParseJSON(const char* json); |
||||
|
||||
enum Type { |
||||
kNull, |
||||
kArray, |
||||
kBool, |
||||
kDouble, |
||||
kInt64, |
||||
kObject, |
||||
kString, |
||||
}; |
||||
|
||||
JSONDocument(); // null
|
||||
/* implicit */ JSONDocument(bool b); |
||||
/* implicit */ JSONDocument(double d); |
||||
/* implicit */ JSONDocument(int64_t i); |
||||
/* implicit */ JSONDocument(const std::string& s); |
||||
/* implicit */ JSONDocument(const char* s); |
||||
// constructs JSONDocument of specific type with default value
|
||||
explicit JSONDocument(Type type); |
||||
|
||||
// copy constructor
|
||||
JSONDocument(const JSONDocument& json_document); |
||||
|
||||
~JSONDocument(); |
||||
|
||||
Type type() const; |
||||
|
||||
// REQUIRES: IsObject()
|
||||
bool Contains(const std::string& key) const; |
||||
// Returns nullptr if !Contains()
|
||||
// don't delete the returned pointer
|
||||
// REQUIRES: IsObject()
|
||||
const JSONDocument* Get(const std::string& key) const; |
||||
// REQUIRES: IsObject()
|
||||
JSONDocument& operator[](const std::string& key); |
||||
// REQUIRES: IsObject()
|
||||
const JSONDocument& operator[](const std::string& key) const; |
||||
// returns `this`, so you can chain operations.
|
||||
// Copies value
|
||||
// REQUIRES: IsObject()
|
||||
JSONDocument* Set(const std::string& key, const JSONDocument& value); |
||||
|
||||
// REQUIRES: IsArray() == true || IsObject() == true
|
||||
size_t Count() const; |
||||
|
||||
// REQUIRES: IsArray()
|
||||
const JSONDocument* GetFromArray(size_t i) const; |
||||
// REQUIRES: IsArray()
|
||||
JSONDocument& operator[](size_t i); |
||||
// REQUIRES: IsArray()
|
||||
const JSONDocument& operator[](size_t i) const; |
||||
// returns `this`, so you can chain operations.
|
||||
// Copies the value
|
||||
// REQUIRES: IsArray() && i < Count()
|
||||
JSONDocument* SetInArray(size_t i, const JSONDocument& value); |
||||
// REQUIRES: IsArray()
|
||||
JSONDocument* PushBack(const JSONDocument& value); |
||||
|
||||
bool IsNull() const; |
||||
bool IsArray() const; |
||||
bool IsBool() const; |
||||
bool IsDouble() const; |
||||
bool IsInt64() const; |
||||
bool IsObject() const; |
||||
bool IsString() const; |
||||
|
||||
// REQUIRES: IsBool() == true
|
||||
bool GetBool() const; |
||||
// REQUIRES: IsDouble() == true
|
||||
double GetDouble() const; |
||||
// REQUIRES: IsInt64() == true
|
||||
int64_t GetInt64() const; |
||||
// REQUIRES: IsString() == true
|
||||
const std::string& GetString() const; |
||||
|
||||
bool operator==(const JSONDocument& rhs) const; |
||||
|
||||
std::string DebugString() const; |
||||
|
||||
private: |
||||
class ItemsIteratorGenerator; |
||||
|
||||
public: |
||||
// REQUIRES: IsObject()
|
||||
ItemsIteratorGenerator Items() const; |
||||
|
||||
// appends serialized object to dst
|
||||
void Serialize(std::string* dst) const; |
||||
// returns nullptr if Slice doesn't represent valid serialized JSONDocument
|
||||
static JSONDocument* Deserialize(const Slice& src); |
||||
|
||||
private: |
||||
void SerializeInternal(std::string* dst, bool type_prefix) const; |
||||
// returns false if Slice doesn't represent valid serialized JSONDocument.
|
||||
// Otherwise, true
|
||||
bool DeserializeInternal(Slice* input); |
||||
|
||||
typedef std::vector<JSONDocument*> Array; |
||||
typedef std::unordered_map<std::string, JSONDocument*> Object; |
||||
|
||||
// iteration on objects
|
||||
class const_item_iterator { |
||||
public: |
||||
typedef Object::const_iterator It; |
||||
typedef Object::value_type value_type; |
||||
/* implicit */ const_item_iterator(It it) : it_(it) {} |
||||
It& operator++() { return ++it_; } |
||||
bool operator!=(const const_item_iterator& other) { |
||||
return it_ != other.it_; |
||||
} |
||||
value_type operator*() { return *it_; } |
||||
|
||||
private: |
||||
It it_; |
||||
}; |
||||
class ItemsIteratorGenerator { |
||||
public: |
||||
/* implicit */ ItemsIteratorGenerator(const Object& object) |
||||
: object_(object) {} |
||||
const_item_iterator begin() { return object_.begin(); } |
||||
const_item_iterator end() { return object_.end(); } |
||||
|
||||
private: |
||||
const Object& object_; |
||||
}; |
||||
|
||||
union Data { |
||||
Data() : n(nullptr) {} |
||||
~Data() {} |
||||
|
||||
void* n; |
||||
Array a; |
||||
bool b; |
||||
double d; |
||||
int64_t i; |
||||
std::string s; |
||||
Object o; |
||||
} data_; |
||||
const Type type_; |
||||
|
||||
// Our serialization format's first byte specifies the encoding version. That
|
||||
// way, we can easily change our format while providing backwards
|
||||
// compatibility. This constant specifies the current version of the
|
||||
// serialization format
|
||||
static const char kSerializationFormatVersion; |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
#endif // ROCKSDB_LITE
|
@ -0,0 +1,220 @@ |
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#pragma once |
||||
#include "rocksdb/db.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d
|
||||
class StackableDB : public DB { |
||||
public: |
||||
// StackableDB is the owner of db now!
|
||||
explicit StackableDB(DB* db) : db_(db) {} |
||||
|
||||
~StackableDB() { |
||||
delete db_; |
||||
} |
||||
|
||||
virtual DB* GetBaseDB() { |
||||
return db_; |
||||
} |
||||
|
||||
virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, |
||||
const std::string& column_family_name, |
||||
ColumnFamilyHandle** handle) { |
||||
return db_->CreateColumnFamily(options, column_family_name, handle); |
||||
} |
||||
|
||||
virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) { |
||||
return db_->DropColumnFamily(column_family); |
||||
} |
||||
|
||||
using DB::Put; |
||||
virtual Status Put(const WriteOptions& options, |
||||
ColumnFamilyHandle* column_family, const Slice& key, |
||||
const Slice& val) override { |
||||
return db_->Put(options, column_family, key, val); |
||||
} |
||||
|
||||
using DB::Get; |
||||
virtual Status Get(const ReadOptions& options, |
||||
ColumnFamilyHandle* column_family, const Slice& key, |
||||
std::string* value) override { |
||||
return db_->Get(options, column_family, key, value); |
||||
} |
||||
|
||||
using DB::MultiGet; |
||||
virtual std::vector<Status> MultiGet( |
||||
const ReadOptions& options, |
||||
const std::vector<ColumnFamilyHandle*>& column_family, |
||||
const std::vector<Slice>& keys, |
||||
std::vector<std::string>* values) override { |
||||
return db_->MultiGet(options, column_family, keys, values); |
||||
} |
||||
|
||||
using DB::KeyMayExist; |
||||
virtual bool KeyMayExist(const ReadOptions& options, |
||||
ColumnFamilyHandle* column_family, const Slice& key, |
||||
std::string* value, |
||||
bool* value_found = nullptr) override { |
||||
return db_->KeyMayExist(options, column_family, key, value, value_found); |
||||
} |
||||
|
||||
using DB::Delete; |
||||
virtual Status Delete(const WriteOptions& wopts, |
||||
ColumnFamilyHandle* column_family, |
||||
const Slice& key) override { |
||||
return db_->Delete(wopts, column_family, key); |
||||
} |
||||
|
||||
using DB::Merge; |
||||
virtual Status Merge(const WriteOptions& options, |
||||
ColumnFamilyHandle* column_family, const Slice& key, |
||||
const Slice& value) override { |
||||
return db_->Merge(options, column_family, key, value); |
||||
} |
||||
|
||||
|
||||
virtual Status Write(const WriteOptions& opts, WriteBatch* updates) |
||||
override { |
||||
return db_->Write(opts, updates); |
||||
} |
||||
|
||||
using DB::NewIterator; |
||||
virtual Iterator* NewIterator(const ReadOptions& opts, |
||||
ColumnFamilyHandle* column_family) override { |
||||
return db_->NewIterator(opts, column_family); |
||||
} |
||||
|
||||
virtual Status NewIterators( |
||||
const ReadOptions& options, |
||||
const std::vector<ColumnFamilyHandle*>& column_families, |
||||
std::vector<Iterator*>* iterators) { |
||||
return db_->NewIterators(options, column_families, iterators); |
||||
} |
||||
|
||||
|
||||
virtual const Snapshot* GetSnapshot() override { |
||||
return db_->GetSnapshot(); |
||||
} |
||||
|
||||
virtual void ReleaseSnapshot(const Snapshot* snapshot) override { |
||||
return db_->ReleaseSnapshot(snapshot); |
||||
} |
||||
|
||||
using DB::GetProperty; |
||||
virtual bool GetProperty(ColumnFamilyHandle* column_family, |
||||
const Slice& property, std::string* value) override { |
||||
return db_->GetProperty(column_family, property, value); |
||||
} |
||||
|
||||
using DB::GetApproximateSizes; |
||||
virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, |
||||
const Range* r, int n, |
||||
uint64_t* sizes) override { |
||||
return db_->GetApproximateSizes(column_family, r, n, sizes); |
||||
} |
||||
|
||||
using DB::CompactRange; |
||||
virtual Status CompactRange(ColumnFamilyHandle* column_family, |
||||
const Slice* begin, const Slice* end, |
||||
bool reduce_level = false, int target_level = -1, |
||||
uint32_t target_path_id = 0) override { |
||||
return db_->CompactRange(column_family, begin, end, reduce_level, |
||||
target_level, target_path_id); |
||||
} |
||||
|
||||
using DB::NumberLevels; |
||||
virtual int NumberLevels(ColumnFamilyHandle* column_family) override { |
||||
return db_->NumberLevels(column_family); |
||||
} |
||||
|
||||
using DB::MaxMemCompactionLevel; |
||||
virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) |
||||
override { |
||||
return db_->MaxMemCompactionLevel(column_family); |
||||
} |
||||
|
||||
using DB::Level0StopWriteTrigger; |
||||
virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family) |
||||
override { |
||||
return db_->Level0StopWriteTrigger(column_family); |
||||
} |
||||
|
||||
virtual const std::string& GetName() const override { |
||||
return db_->GetName(); |
||||
} |
||||
|
||||
virtual Env* GetEnv() const override { |
||||
return db_->GetEnv(); |
||||
} |
||||
|
||||
using DB::GetOptions; |
||||
virtual const Options& GetOptions(ColumnFamilyHandle* column_family) const |
||||
override { |
||||
return db_->GetOptions(column_family); |
||||
} |
||||
|
||||
using DB::Flush; |
||||
virtual Status Flush(const FlushOptions& fopts, |
||||
ColumnFamilyHandle* column_family) override { |
||||
return db_->Flush(fopts, column_family); |
||||
} |
||||
|
||||
virtual Status DisableFileDeletions() override { |
||||
return db_->DisableFileDeletions(); |
||||
} |
||||
|
||||
virtual Status EnableFileDeletions(bool force) override { |
||||
return db_->EnableFileDeletions(force); |
||||
} |
||||
|
||||
virtual void GetLiveFilesMetaData( |
||||
std::vector<LiveFileMetaData>* metadata) override { |
||||
db_->GetLiveFilesMetaData(metadata); |
||||
} |
||||
|
||||
virtual Status GetLiveFiles(std::vector<std::string>& vec, uint64_t* mfs, |
||||
bool flush_memtable = true) override { |
||||
return db_->GetLiveFiles(vec, mfs, flush_memtable); |
||||
} |
||||
|
||||
virtual SequenceNumber GetLatestSequenceNumber() const override { |
||||
return db_->GetLatestSequenceNumber(); |
||||
} |
||||
|
||||
virtual Status GetSortedWalFiles(VectorLogPtr& files) override { |
||||
return db_->GetSortedWalFiles(files); |
||||
} |
||||
|
||||
virtual Status DeleteFile(std::string name) override { |
||||
return db_->DeleteFile(name); |
||||
} |
||||
|
||||
virtual Status GetDbIdentity(std::string& identity) { |
||||
return db_->GetDbIdentity(identity); |
||||
} |
||||
|
||||
using DB::GetPropertiesOfAllTables; |
||||
virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, |
||||
TablePropertiesCollection* props) { |
||||
return db_->GetPropertiesOfAllTables(column_family, props); |
||||
} |
||||
|
||||
virtual Status GetUpdatesSince( |
||||
SequenceNumber seq_number, unique_ptr<TransactionLogIterator>* iter, |
||||
const TransactionLogIterator::ReadOptions& read_options) override { |
||||
return db_->GetUpdatesSince(seq_number, iter, read_options); |
||||
} |
||||
|
||||
virtual ColumnFamilyHandle* DefaultColumnFamily() const override { |
||||
return db_->DefaultColumnFamily(); |
||||
} |
||||
|
||||
protected: |
||||
DB* db_; |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
@ -0,0 +1,30 @@ |
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#pragma once |
||||
#ifndef ROCKSDB_LITE |
||||
#include <vector> |
||||
#include <string> |
||||
|
||||
#include "rocksdb/utilities/stackable_db.h" |
||||
#include "rocksdb/utilities/db_ttl.h" |
||||
#include "rocksdb/db.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
// Please don't use this class. It's deprecated
|
||||
class UtilityDB { |
||||
public: |
||||
// This function is here only for backwards compatibility. Please use the
|
||||
// functions defined in DBWithTTl (rocksdb/utilities/db_ttl.h)
|
||||
// (deprecated)
|
||||
__attribute__((deprecated)) static Status OpenTtlDB(const Options& options, |
||||
const std::string& name, |
||||
StackableDB** dbptr, |
||||
int32_t ttl = 0, |
||||
bool read_only = false); |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
||||
#endif // ROCKSDB_LITE
|
Loading…
Reference in new issue