|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#include <cstdint>
|
|
|
|
#include <functional>
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
|
|
|
#include <thread>
|
|
|
|
|
|
|
|
#include "db/db_impl/db_impl.h"
|
|
|
|
#include "db/db_test_util.h"
|
|
|
|
#include "port/port.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/perf_context.h"
|
|
|
|
#include "rocksdb/utilities/optimistic_transaction_db.h"
|
|
|
|
#include "rocksdb/utilities/transaction.h"
|
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/transaction_test_util.h"
|
|
|
|
#include "util/crc32c.h"
|
|
|
|
#include "util/random.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
class OptimisticTransactionTest
|
|
|
|
: public testing::Test,
|
|
|
|
public testing::WithParamInterface<OccValidationPolicy> {
|
|
|
|
public:
|
|
|
|
std::unique_ptr<OptimisticTransactionDB> txn_db;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string dbname;
|
|
|
|
Options options;
|
|
|
|
OptimisticTransactionDBOptions occ_opts;
|
|
|
|
|
|
|
|
OptimisticTransactionTest() {
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_write_buffer_number = 2;
|
|
|
|
options.max_write_buffer_size_to_maintain = 2 * Arena::kInlineSize;
|
|
|
|
options.merge_operator.reset(new TestPutOperator());
|
|
|
|
occ_opts.validate_policy = GetParam();
|
|
|
|
dbname = test::PerThreadDBPath("optimistic_transaction_testdb");
|
|
|
|
|
|
|
|
EXPECT_OK(DestroyDB(dbname, options));
|
|
|
|
Open();
|
|
|
|
}
|
|
|
|
~OptimisticTransactionTest() override {
|
|
|
|
EXPECT_OK(txn_db->Close());
|
|
|
|
txn_db.reset();
|
|
|
|
EXPECT_OK(DestroyDB(dbname, options));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Reopen() {
|
|
|
|
txn_db.reset();
|
|
|
|
Open();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void OpenImpl(const Options& options,
|
|
|
|
const OptimisticTransactionDBOptions& occ_opts,
|
|
|
|
const std::string& dbname,
|
|
|
|
std::unique_ptr<OptimisticTransactionDB>* txn_db) {
|
|
|
|
ColumnFamilyOptions cf_options(options);
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
column_families.push_back(
|
|
|
|
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
|
|
|
OptimisticTransactionDB* raw_txn_db = nullptr;
|
|
|
|
Status s = OptimisticTransactionDB::Open(
|
|
|
|
options, occ_opts, dbname, column_families, &handles, &raw_txn_db);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_NE(raw_txn_db, nullptr);
|
|
|
|
txn_db->reset(raw_txn_db);
|
|
|
|
ASSERT_EQ(handles.size(), 1);
|
|
|
|
delete handles[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void Open() { OpenImpl(options, occ_opts, dbname, &txn_db); }
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, SuccessTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar")));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar")));
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo"), Slice("bar2")));
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, WriteConflictTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo", "bar"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo2", "bar"));
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("foo", "bar2"));
|
|
|
|
|
|
|
|
// This Put outside of a transaction will conflict with the previous write
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo", "barz"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "barz");
|
|
|
|
ASSERT_EQ(1, txn->GetNumKeys());
|
|
|
|
|
|
|
|
Status s = txn->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy()); // Txn should not commit
|
|
|
|
|
|
|
|
// Verify that transaction did not write anything
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "barz");
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo2", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, WriteConflictTest2) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo", "bar"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo2", "bar"));
|
|
|
|
|
|
|
|
txn_options.set_snapshot = true;
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
// This Put outside of a transaction will conflict with a later write
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo", "barz"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put(
|
|
|
|
"foo", "bar2")); // Conflicts with write done after snapshot taken
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "barz");
|
|
|
|
|
|
|
|
Status s = txn->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy()); // Txn should not commit
|
|
|
|
|
|
|
|
// Verify that transaction did not write anything
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "barz");
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo2", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, WriteConflictTest3) {
|
|
|
|
ASSERT_OK(txn_db->Put(WriteOptions(), "foo", "bar"));
|
|
|
|
|
|
|
|
Transaction* txn = txn_db->BeginTransaction(WriteOptions());
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
std::string value;
|
|
|
|
ASSERT_OK(txn->GetForUpdate(ReadOptions(), "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
ASSERT_OK(txn->Merge("foo", "bar3"));
|
|
|
|
|
|
|
|
// Merge outside of a transaction should conflict with the previous merge
|
|
|
|
ASSERT_OK(txn_db->Merge(WriteOptions(), "foo", "bar2"));
|
|
|
|
ASSERT_OK(txn_db->Get(ReadOptions(), "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
ASSERT_EQ(1, txn->GetNumKeys());
|
|
|
|
|
|
|
|
Status s = txn->Commit();
|
|
|
|
EXPECT_TRUE(s.IsBusy()); // Txn should not commit
|
|
|
|
|
|
|
|
// Verify that transaction did not write anything
|
|
|
|
ASSERT_OK(txn_db->Get(ReadOptions(), "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, WriteConflict4) {
|
|
|
|
ASSERT_OK(txn_db->Put(WriteOptions(), "foo", "bar"));
|
|
|
|
|
|
|
|
Transaction* txn = txn_db->BeginTransaction(WriteOptions());
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
std::string value;
|
|
|
|
ASSERT_OK(txn->GetForUpdate(ReadOptions(), "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
ASSERT_OK(txn->Merge("foo", "bar3"));
|
|
|
|
|
|
|
|
// Range delete outside of a transaction should conflict with the previous
|
|
|
|
// merge inside txn
|
|
|
|
auto* dbimpl = static_cast_with_check<DBImpl>(txn_db->GetRootDB());
|
|
|
|
ColumnFamilyHandle* default_cf = dbimpl->DefaultColumnFamily();
|
|
|
|
ASSERT_OK(dbimpl->DeleteRange(WriteOptions(), default_cf, "foo", "foo1"));
|
|
|
|
Status s = txn_db->Get(ReadOptions(), "foo", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
ASSERT_EQ(1, txn->GetNumKeys());
|
|
|
|
|
|
|
|
s = txn->Commit();
|
|
|
|
EXPECT_TRUE(s.IsBusy()); // Txn should not commit
|
|
|
|
|
|
|
|
// Verify that transaction did not write anything
|
|
|
|
s = txn_db->Get(ReadOptions(), "foo", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, ReadConflictTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, snapshot_read_options;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo", "bar"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo2", "bar"));
|
|
|
|
|
|
|
|
txn_options.set_snapshot = true;
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
txn->SetSnapshot();
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
|
|
|
// This Put outside of a transaction will conflict with the previous read
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo", "barz"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "barz");
|
|
|
|
|
|
|
|
Status s = txn->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy()); // Txn should not commit
|
|
|
|
|
|
|
|
// Verify that transaction did not write anything
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "barz");
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "foo2", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, TxnOnlyTest) {
|
|
|
|
// Test to make sure transactions work when there are no other writes in an
|
|
|
|
// empty db.
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("x", "y"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, FlushTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, snapshot_read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar")));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar")));
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo"), Slice("bar2")));
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
// Put a random key so we have a memtable to flush
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy"));
|
|
|
|
|
|
|
|
// force a memtable flush
|
|
|
|
FlushOptions flush_ops;
|
|
|
|
ASSERT_OK(txn_db->Flush(flush_ops));
|
|
|
|
|
|
|
|
// txn should commit since the flushed table is still in MemtableList History
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
Allow TryAgain in db_stress with optimistic txn, and refactoring (#11653)
Summary:
In rare cases, optimistic transaction commit returns TryAgain. This change tolerates that intentional behavior in db_stress, up to a small limit in a row. This way, we don't miss a possible regression with excessive TryAgain, and trying again (rolling back the transaction) should have a well renewed chance of success as the writes will be associated with fresh sequence numbers.
Also, some of the APIs were not clear about Transaction semantics, so I have clarified:
* (Best I can tell....) Destroying a Transaction is safe without calling Rollback() (or at least should be). I don't know why it's a common pattern in our test code and examples to rollback before unconditional destruction. Stress test updated not to call Rollback unnecessarily (to test safe destruction).
* Despite essentially doing what is asked, simply trying Commit() again when it returns TryAgain does not have a chance of success, because of the transaction being bound to the DB state at the time of operations before Commit. Similar logic applies to Busy AFAIK. Commit() API comments updated, and expanded unit test in optimistic_transaction_test.
Also also, because I can't stop myself, I refactored a good portion of the transaction handling code in db_stress.
* Avoid existing and new copy-paste for most transaction interactions with a new ExecuteTransaction (higher-order) function.
* Use unique_ptr (nicely complements removing unnecessary Rollbacks)
* Abstract out a pattern for safely calling std::terminate() and use it in more places. (The TryAgain errors we saw did not have stack traces because of "terminate called recursively".)
Intended follow-up: resurrect use of `FLAGS_rollback_one_in` but also include non-trivial cases
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11653
Test Plan:
this is the test :)
Also, temporarily bypassed the new retry logic and boosted the chance of hitting TryAgain. Quickly reproduced the TryAgain error. Then re-enabled the new retry logic, and was not able to hit the error after running for tens of minutes, even with the boosted chances.
Reviewed By: cbi42
Differential Revision: D47882995
Pulled By: pdillinger
fbshipit-source-id: 21eadb1525423340dbf28d17cf166b9583311a0d
1 year ago
|
|
|
namespace {
|
|
|
|
void FlushTest2PopulateTxn(Transaction* txn) {
|
|
|
|
ReadOptions snapshot_read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo"), Slice("bar2")));
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
Allow TryAgain in db_stress with optimistic txn, and refactoring (#11653)
Summary:
In rare cases, optimistic transaction commit returns TryAgain. This change tolerates that intentional behavior in db_stress, up to a small limit in a row. This way, we don't miss a possible regression with excessive TryAgain, and trying again (rolling back the transaction) should have a well renewed chance of success as the writes will be associated with fresh sequence numbers.
Also, some of the APIs were not clear about Transaction semantics, so I have clarified:
* (Best I can tell....) Destroying a Transaction is safe without calling Rollback() (or at least should be). I don't know why it's a common pattern in our test code and examples to rollback before unconditional destruction. Stress test updated not to call Rollback unnecessarily (to test safe destruction).
* Despite essentially doing what is asked, simply trying Commit() again when it returns TryAgain does not have a chance of success, because of the transaction being bound to the DB state at the time of operations before Commit. Similar logic applies to Busy AFAIK. Commit() API comments updated, and expanded unit test in optimistic_transaction_test.
Also also, because I can't stop myself, I refactored a good portion of the transaction handling code in db_stress.
* Avoid existing and new copy-paste for most transaction interactions with a new ExecuteTransaction (higher-order) function.
* Use unique_ptr (nicely complements removing unnecessary Rollbacks)
* Abstract out a pattern for safely calling std::terminate() and use it in more places. (The TryAgain errors we saw did not have stack traces because of "terminate called recursively".)
Intended follow-up: resurrect use of `FLAGS_rollback_one_in` but also include non-trivial cases
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11653
Test Plan:
this is the test :)
Also, temporarily bypassed the new retry logic and boosted the chance of hitting TryAgain. Quickly reproduced the TryAgain error. Then re-enabled the new retry logic, and was not able to hit the error after running for tens of minutes, even with the boosted chances.
Reviewed By: cbi42
Differential Revision: D47882995
Pulled By: pdillinger
fbshipit-source-id: 21eadb1525423340dbf28d17cf166b9583311a0d
1 year ago
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, FlushTest2) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar")));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar")));
|
|
|
|
|
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
FlushTest2PopulateTxn(txn);
|
|
|
|
|
|
|
|
// Put a random key so we have a MemTable to flush
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy"));
|
|
|
|
|
|
|
|
// force a memtable flush
|
|
|
|
FlushOptions flush_ops;
|
|
|
|
ASSERT_OK(txn_db->Flush(flush_ops));
|
|
|
|
|
|
|
|
// Put a random key so we have a MemTable to flush
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy2"));
|
|
|
|
|
|
|
|
// force a memtable flush
|
|
|
|
ASSERT_OK(txn_db->Flush(flush_ops));
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy3"));
|
|
|
|
|
|
|
|
// force a memtable flush
|
|
|
|
// Since our test db has max_write_buffer_number=2, this flush will cause
|
|
|
|
// the first memtable to get purged from the MemtableList history.
|
|
|
|
ASSERT_OK(txn_db->Flush(flush_ops));
|
|
|
|
|
|
|
|
Status s = txn->Commit();
|
|
|
|
// txn should not commit since MemTableList History is not large enough
|
|
|
|
ASSERT_TRUE(s.IsTryAgain());
|
|
|
|
|
Allow TryAgain in db_stress with optimistic txn, and refactoring (#11653)
Summary:
In rare cases, optimistic transaction commit returns TryAgain. This change tolerates that intentional behavior in db_stress, up to a small limit in a row. This way, we don't miss a possible regression with excessive TryAgain, and trying again (rolling back the transaction) should have a well renewed chance of success as the writes will be associated with fresh sequence numbers.
Also, some of the APIs were not clear about Transaction semantics, so I have clarified:
* (Best I can tell....) Destroying a Transaction is safe without calling Rollback() (or at least should be). I don't know why it's a common pattern in our test code and examples to rollback before unconditional destruction. Stress test updated not to call Rollback unnecessarily (to test safe destruction).
* Despite essentially doing what is asked, simply trying Commit() again when it returns TryAgain does not have a chance of success, because of the transaction being bound to the DB state at the time of operations before Commit. Similar logic applies to Busy AFAIK. Commit() API comments updated, and expanded unit test in optimistic_transaction_test.
Also also, because I can't stop myself, I refactored a good portion of the transaction handling code in db_stress.
* Avoid existing and new copy-paste for most transaction interactions with a new ExecuteTransaction (higher-order) function.
* Use unique_ptr (nicely complements removing unnecessary Rollbacks)
* Abstract out a pattern for safely calling std::terminate() and use it in more places. (The TryAgain errors we saw did not have stack traces because of "terminate called recursively".)
Intended follow-up: resurrect use of `FLAGS_rollback_one_in` but also include non-trivial cases
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11653
Test Plan:
this is the test :)
Also, temporarily bypassed the new retry logic and boosted the chance of hitting TryAgain. Quickly reproduced the TryAgain error. Then re-enabled the new retry logic, and was not able to hit the error after running for tens of minutes, even with the boosted chances.
Reviewed By: cbi42
Differential Revision: D47882995
Pulled By: pdillinger
fbshipit-source-id: 21eadb1525423340dbf28d17cf166b9583311a0d
1 year ago
|
|
|
// simply trying Commit again doesn't help
|
|
|
|
s = txn->Commit();
|
|
|
|
ASSERT_TRUE(s.IsTryAgain());
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
|
Allow TryAgain in db_stress with optimistic txn, and refactoring (#11653)
Summary:
In rare cases, optimistic transaction commit returns TryAgain. This change tolerates that intentional behavior in db_stress, up to a small limit in a row. This way, we don't miss a possible regression with excessive TryAgain, and trying again (rolling back the transaction) should have a well renewed chance of success as the writes will be associated with fresh sequence numbers.
Also, some of the APIs were not clear about Transaction semantics, so I have clarified:
* (Best I can tell....) Destroying a Transaction is safe without calling Rollback() (or at least should be). I don't know why it's a common pattern in our test code and examples to rollback before unconditional destruction. Stress test updated not to call Rollback unnecessarily (to test safe destruction).
* Despite essentially doing what is asked, simply trying Commit() again when it returns TryAgain does not have a chance of success, because of the transaction being bound to the DB state at the time of operations before Commit. Similar logic applies to Busy AFAIK. Commit() API comments updated, and expanded unit test in optimistic_transaction_test.
Also also, because I can't stop myself, I refactored a good portion of the transaction handling code in db_stress.
* Avoid existing and new copy-paste for most transaction interactions with a new ExecuteTransaction (higher-order) function.
* Use unique_ptr (nicely complements removing unnecessary Rollbacks)
* Abstract out a pattern for safely calling std::terminate() and use it in more places. (The TryAgain errors we saw did not have stack traces because of "terminate called recursively".)
Intended follow-up: resurrect use of `FLAGS_rollback_one_in` but also include non-trivial cases
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11653
Test Plan:
this is the test :)
Also, temporarily bypassed the new retry logic and boosted the chance of hitting TryAgain. Quickly reproduced the TryAgain error. Then re-enabled the new retry logic, and was not able to hit the error after running for tens of minutes, even with the boosted chances.
Reviewed By: cbi42
Differential Revision: D47882995
Pulled By: pdillinger
fbshipit-source-id: 21eadb1525423340dbf28d17cf166b9583311a0d
1 year ago
|
|
|
// But rolling back and redoing does
|
|
|
|
ASSERT_OK(txn->Rollback());
|
|
|
|
|
|
|
|
FlushTest2PopulateTxn(txn);
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger the condition where some old memtables are skipped when doing
|
|
|
|
// TransactionUtil::CheckKey(), and make sure the result is still correct.
|
|
|
|
TEST_P(OptimisticTransactionTest, CheckKeySkipOldMemtable) {
|
|
|
|
const int kAttemptHistoryMemtable = 0;
|
|
|
|
const int kAttemptImmMemTable = 1;
|
|
|
|
for (int attempt = kAttemptHistoryMemtable; attempt <= kAttemptImmMemTable;
|
|
|
|
attempt++) {
|
|
|
|
Reopen();
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
|
|
|
ReadOptions snapshot_read_options;
|
|
|
|
ReadOptions snapshot_read_options2;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar")));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar")));
|
|
|
|
|
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_TRUE(txn != nullptr);
|
|
|
|
|
|
|
|
Transaction* txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_TRUE(txn2 != nullptr);
|
|
|
|
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo"), Slice("bar2")));
|
|
|
|
|
|
|
|
snapshot_read_options2.snapshot = txn2->GetSnapshot();
|
|
|
|
ASSERT_OK(txn2->GetForUpdate(snapshot_read_options2, "foo2", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
ASSERT_OK(txn2->Put(Slice("foo2"), Slice("bar2")));
|
|
|
|
|
|
|
|
// txn updates "foo" and txn2 updates "foo2", and now a write is
|
|
|
|
// issued for "foo", which conflicts with txn but not txn2
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "foo", "bar"));
|
|
|
|
|
|
|
|
if (attempt == kAttemptImmMemTable) {
|
|
|
|
// For the second attempt, hold flush from beginning. The memtable
|
|
|
|
// will be switched to immutable after calling TEST_SwitchMemtable()
|
|
|
|
// while CheckKey() is called.
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"OptimisticTransactionTest.CheckKeySkipOldMemtable",
|
|
|
|
"FlushJob::Start"}});
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
}
|
|
|
|
|
|
|
|
// force a memtable flush. The memtable should still be kept
|
|
|
|
FlushOptions flush_ops;
|
|
|
|
if (attempt == kAttemptHistoryMemtable) {
|
|
|
|
ASSERT_OK(txn_db->Flush(flush_ops));
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(attempt, kAttemptImmMemTable);
|
|
|
|
DBImpl* db_impl = static_cast<DBImpl*>(txn_db->GetRootDB());
|
|
|
|
ASSERT_OK(db_impl->TEST_SwitchMemtable());
|
|
|
|
}
|
|
|
|
uint64_t num_imm_mems;
|
|
|
|
ASSERT_TRUE(txn_db->GetIntProperty(DB::Properties::kNumImmutableMemTable,
|
|
|
|
&num_imm_mems));
|
|
|
|
if (attempt == kAttemptHistoryMemtable) {
|
|
|
|
ASSERT_EQ(0, num_imm_mems);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(attempt, kAttemptImmMemTable);
|
|
|
|
ASSERT_EQ(1, num_imm_mems);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put something in active memtable
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, Slice("foo3"), Slice("bar")));
|
|
|
|
|
|
|
|
// Create txn3 after flushing, when this transaction is commited,
|
|
|
|
// only need to check the active memtable
|
|
|
|
Transaction* txn3 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_TRUE(txn3 != nullptr);
|
|
|
|
|
|
|
|
// Commit both of txn and txn2. txn will conflict but txn2 will
|
|
|
|
// pass. In both ways, both memtables are queried.
|
|
|
|
SetPerfLevel(PerfLevel::kEnableCount);
|
|
|
|
|
|
|
|
get_perf_context()->Reset();
|
|
|
|
Status s = txn->Commit();
|
|
|
|
// We should have checked two memtables
|
|
|
|
ASSERT_EQ(2, get_perf_context()->get_from_memtable_count);
|
|
|
|
// txn should fail because of conflict, even if the memtable
|
|
|
|
// has flushed, because it is still preserved in history.
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
get_perf_context()->Reset();
|
|
|
|
s = txn2->Commit();
|
|
|
|
// We should have checked two memtables
|
|
|
|
ASSERT_EQ(2, get_perf_context()->get_from_memtable_count);
|
|
|
|
ASSERT_TRUE(s.ok());
|
|
|
|
|
|
|
|
ASSERT_OK(txn3->Put(Slice("foo2"), Slice("bar2")));
|
|
|
|
get_perf_context()->Reset();
|
|
|
|
s = txn3->Commit();
|
|
|
|
// txn3 is created after the active memtable is created, so that is the only
|
|
|
|
// memtable to check.
|
|
|
|
ASSERT_EQ(1, get_perf_context()->get_from_memtable_count);
|
|
|
|
ASSERT_TRUE(s.ok());
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("OptimisticTransactionTest.CheckKeySkipOldMemtable");
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
|
|
|
|
SetPerfLevel(PerfLevel::kDisable);
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
delete txn2;
|
|
|
|
delete txn3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, NoSnapshotTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "AAA", "bar"));
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
// Modify key after transaction start
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "AAA", "bar1"));
|
|
|
|
|
|
|
|
// Read and write without a snapshot
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value));
|
|
|
|
ASSERT_EQ(value, "bar1");
|
|
|
|
ASSERT_OK(txn->Put("AAA", "bar2"));
|
|
|
|
|
|
|
|
// Should commit since read/write was done after data changed
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, MultipleSnapshotTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, snapshot_read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "AAA", "bar"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "BBB", "bar"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "CCC", "bar"));
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "AAA", "bar1"));
|
|
|
|
|
|
|
|
// Read and write without a snapshot
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value));
|
|
|
|
ASSERT_EQ(value, "bar1");
|
|
|
|
ASSERT_OK(txn->Put("AAA", "bar2"));
|
|
|
|
|
|
|
|
// Modify BBB before snapshot is taken
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "BBB", "bar1"));
|
|
|
|
|
|
|
|
txn->SetSnapshot();
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
// Read and write with snapshot
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "BBB", &value));
|
|
|
|
ASSERT_EQ(value, "bar1");
|
|
|
|
ASSERT_OK(txn->Put("BBB", "bar2"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "CCC", "bar1"));
|
|
|
|
|
|
|
|
// Set a new snapshot
|
|
|
|
txn->SetSnapshot();
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
// Read and write with snapshot
|
|
|
|
ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "CCC", &value));
|
|
|
|
ASSERT_EQ(value, "bar1");
|
|
|
|
ASSERT_OK(txn->Put("CCC", "bar2"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "BBB", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "CCC", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "AAA", &value));
|
|
|
|
ASSERT_EQ(value, "bar1");
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "BBB", &value));
|
|
|
|
ASSERT_EQ(value, "bar1");
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "CCC", &value));
|
|
|
|
ASSERT_EQ(value, "bar1");
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "AAA", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "BBB", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "CCC", &value));
|
|
|
|
ASSERT_EQ(value, "bar2");
|
|
|
|
|
|
|
|
// verify that we track multiple writes to the same key at different snapshots
|
|
|
|
delete txn;
|
|
|
|
txn = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
// Potentially conflicting writes
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "ZZZ", "zzz"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "XXX", "xxx"));
|
|
|
|
|
|
|
|
txn->SetSnapshot();
|
|
|
|
|
|
|
|
OptimisticTransactionOptions txn_options;
|
|
|
|
txn_options.set_snapshot = true;
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
txn2->SetSnapshot();
|
|
|
|
|
|
|
|
// This should not conflict in txn since the snapshot is later than the
|
|
|
|
// previous write (spoiler alert: it will later conflict with txn2).
|
|
|
|
ASSERT_OK(txn->Put("ZZZ", "zzzz"));
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
|
|
|
|
// This will conflict since the snapshot is earlier than another write to ZZZ
|
|
|
|
ASSERT_OK(txn2->Put("ZZZ", "xxxxx"));
|
|
|
|
|
|
|
|
Status s = txn2->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete txn2;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, snapshot_read_options;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ColumnFamilyHandle *cfa, *cfb;
|
|
|
|
ColumnFamilyOptions cf_options;
|
|
|
|
|
|
|
|
// Create 2 new column families
|
|
|
|
ASSERT_OK(txn_db->CreateColumnFamily(cf_options, "CFA", &cfa));
|
|
|
|
ASSERT_OK(txn_db->CreateColumnFamily(cf_options, "CFB", &cfb));
|
|
|
|
|
|
|
|
delete cfa;
|
|
|
|
delete cfb;
|
|
|
|
txn_db.reset();
|
|
|
|
|
|
|
|
OptimisticTransactionDBOptions my_occ_opts = occ_opts;
|
|
|
|
const size_t bucket_count = 500;
|
|
|
|
my_occ_opts.shared_lock_buckets = MakeSharedOccLockBuckets(bucket_count);
|
|
|
|
|
|
|
|
// open DB with three column families
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
// have to open default column family
|
|
|
|
column_families.push_back(
|
|
|
|
ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
|
|
|
|
// open the new column families
|
|
|
|
column_families.push_back(
|
|
|
|
ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
|
|
|
|
column_families.push_back(
|
|
|
|
ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
OptimisticTransactionDB* raw_txn_db = nullptr;
|
|
|
|
ASSERT_OK(OptimisticTransactionDB::Open(
|
|
|
|
options, my_occ_opts, dbname, column_families, &handles, &raw_txn_db));
|
|
|
|
ASSERT_NE(raw_txn_db, nullptr);
|
|
|
|
txn_db.reset(raw_txn_db);
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
txn->SetSnapshot();
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
txn_options.set_snapshot = true;
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
ASSERT_TRUE(txn2);
|
|
|
|
|
|
|
|
// Write some data to the db
|
|
|
|
WriteBatch batch;
|
|
|
|
ASSERT_OK(batch.Put("foo", "foo"));
|
|
|
|
ASSERT_OK(batch.Put(handles[1], "AAA", "bar"));
|
|
|
|
ASSERT_OK(batch.Put(handles[1], "AAAZZZ", "bar"));
|
|
|
|
ASSERT_OK(txn_db->Write(write_options, &batch));
|
|
|
|
ASSERT_OK(txn_db->Delete(write_options, handles[1], "AAAZZZ"));
|
|
|
|
|
|
|
|
// These keys do no conflict with existing writes since they're in
|
|
|
|
// different column families
|
|
|
|
ASSERT_OK(txn->Delete("AAA"));
|
|
|
|
Status s =
|
|
|
|
txn->GetForUpdate(snapshot_read_options, handles[1], "foo", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
Slice key_slice("AAAZZZ");
|
|
|
|
Slice value_slices[2] = {Slice("bar"), Slice("bar")};
|
|
|
|
ASSERT_OK(txn->Put(handles[2], SliceParts(&key_slice, 1),
|
|
|
|
SliceParts(value_slices, 2)));
|
|
|
|
|
|
|
|
ASSERT_EQ(3, txn->GetNumKeys());
|
|
|
|
|
|
|
|
// Txn should commit
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
s = txn_db->Get(read_options, "AAA", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
s = txn_db->Get(read_options, handles[2], "AAAZZZ", &value);
|
|
|
|
ASSERT_EQ(value, "barbar");
|
|
|
|
|
|
|
|
Slice key_slices[3] = {Slice("AAA"), Slice("ZZ"), Slice("Z")};
|
|
|
|
Slice value_slice("barbarbar");
|
|
|
|
// This write will cause a conflict with the earlier batch write
|
|
|
|
ASSERT_OK(txn2->Put(handles[1], SliceParts(key_slices, 3),
|
|
|
|
SliceParts(&value_slice, 1)));
|
|
|
|
|
|
|
|
ASSERT_OK(txn2->Delete(handles[2], "XXX"));
|
|
|
|
ASSERT_OK(txn2->Delete(handles[1], "XXX"));
|
|
|
|
s = txn2->GetForUpdate(snapshot_read_options, handles[1], "AAA", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
// Verify txn did not commit
|
|
|
|
s = txn2->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
s = txn_db->Get(read_options, handles[1], "AAAZZZ", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
ASSERT_EQ(value, "barbar");
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// ** MultiGet **
|
|
|
|
txn = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
snapshot_read_options.snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
|
|
|
|
|
|
|
std::vector<ColumnFamilyHandle*> multiget_cfh = {handles[1], handles[2],
|
|
|
|
handles[0], handles[2]};
|
|
|
|
std::vector<Slice> multiget_keys = {"AAA", "AAAZZZ", "foo", "foo"};
|
|
|
|
std::vector<std::string> values(4);
|
|
|
|
|
|
|
|
std::vector<Status> results = txn->MultiGetForUpdate(
|
|
|
|
snapshot_read_options, multiget_cfh, multiget_keys, &values);
|
|
|
|
ASSERT_OK(results[0]);
|
|
|
|
ASSERT_OK(results[1]);
|
|
|
|
ASSERT_OK(results[2]);
|
|
|
|
ASSERT_TRUE(results[3].IsNotFound());
|
|
|
|
ASSERT_EQ(values[0], "bar");
|
|
|
|
ASSERT_EQ(values[1], "barbar");
|
|
|
|
ASSERT_EQ(values[2], "foo");
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Delete(handles[2], "ZZZ"));
|
|
|
|
ASSERT_OK(txn->Put(handles[2], "ZZZ", "YYY"));
|
|
|
|
ASSERT_OK(txn->Put(handles[2], "ZZZ", "YYYY"));
|
|
|
|
ASSERT_OK(txn->Delete(handles[2], "ZZZ"));
|
|
|
|
ASSERT_OK(txn->Put(handles[2], "AAAZZZ", "barbarbar"));
|
|
|
|
|
|
|
|
ASSERT_EQ(5, txn->GetNumKeys());
|
|
|
|
|
|
|
|
// Txn should commit
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
s = txn_db->Get(read_options, handles[2], "ZZZ", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
// Put a key which will conflict with the next txn using the previous snapshot
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, handles[2], "foo", "000"));
|
|
|
|
|
|
|
|
results = txn2->MultiGetForUpdate(snapshot_read_options, multiget_cfh,
|
|
|
|
multiget_keys, &values);
|
|
|
|
ASSERT_OK(results[0]);
|
|
|
|
ASSERT_OK(results[1]);
|
|
|
|
ASSERT_OK(results[2]);
|
|
|
|
ASSERT_TRUE(results[3].IsNotFound());
|
|
|
|
ASSERT_EQ(values[0], "bar");
|
|
|
|
ASSERT_EQ(values[1], "barbar");
|
|
|
|
ASSERT_EQ(values[2], "foo");
|
|
|
|
|
|
|
|
// Verify Txn Did not Commit
|
|
|
|
s = txn2->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// ** Test independence and/or sharing of lock buckets across CFs and DBs **
|
|
|
|
if (my_occ_opts.validate_policy == OccValidationPolicy::kValidateParallel) {
|
|
|
|
struct SeenStat {
|
|
|
|
uint64_t rolling_hash = 0;
|
|
|
|
uintptr_t min = 0;
|
|
|
|
uintptr_t max = 0;
|
|
|
|
};
|
|
|
|
SeenStat cur_seen;
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"OptimisticTransaction::CommitWithParallelValidate::lock_bucket_ptr",
|
|
|
|
[&](void* arg) {
|
|
|
|
// Hash the pointer
|
|
|
|
cur_seen.rolling_hash = Hash64(reinterpret_cast<char*>(&arg),
|
|
|
|
sizeof(arg), cur_seen.rolling_hash);
|
|
|
|
uintptr_t val = reinterpret_cast<uintptr_t>(arg);
|
|
|
|
if (cur_seen.min == 0 || val < cur_seen.min) {
|
|
|
|
cur_seen.min = val;
|
|
|
|
}
|
|
|
|
if (cur_seen.max == 0 || val > cur_seen.max) {
|
|
|
|
cur_seen.max = val;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
// Another db sharing lock buckets
|
|
|
|
auto shared_dbname =
|
|
|
|
test::PerThreadDBPath("optimistic_transaction_testdb_shared");
|
|
|
|
std::unique_ptr<OptimisticTransactionDB> shared_txn_db = nullptr;
|
|
|
|
OpenImpl(options, my_occ_opts, shared_dbname, &shared_txn_db);
|
|
|
|
|
|
|
|
// Another db not sharing lock buckets
|
|
|
|
auto nonshared_dbname =
|
|
|
|
test::PerThreadDBPath("optimistic_transaction_testdb_nonshared");
|
|
|
|
std::unique_ptr<OptimisticTransactionDB> nonshared_txn_db = nullptr;
|
|
|
|
my_occ_opts.occ_lock_buckets = bucket_count;
|
|
|
|
my_occ_opts.shared_lock_buckets = nullptr;
|
|
|
|
OpenImpl(options, my_occ_opts, nonshared_dbname, &nonshared_txn_db);
|
|
|
|
|
|
|
|
// Plenty of keys to avoid randomly hitting the same hash sequence
|
|
|
|
std::array<std::string, 30> keys;
|
|
|
|
for (size_t i = 0; i < keys.size(); ++i) {
|
|
|
|
keys[i] = std::to_string(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get a baseline pattern of bucket accesses
|
|
|
|
cur_seen = {};
|
|
|
|
txn = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
txn->Put(handles[0], key, "blah");
|
|
|
|
}
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
// Sufficiently large hash coverage of the space
|
|
|
|
const uintptr_t min_span_bytes = sizeof(port::Mutex) * bucket_count / 2;
|
|
|
|
ASSERT_GT(cur_seen.max - cur_seen.min, min_span_bytes);
|
|
|
|
// Save
|
|
|
|
SeenStat base_seen = cur_seen;
|
|
|
|
|
|
|
|
// Verify it is repeatable
|
|
|
|
cur_seen = {};
|
|
|
|
txn = txn_db->BeginTransaction(write_options, txn_options, txn);
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
txn->Put(handles[0], key, "moo");
|
|
|
|
}
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
ASSERT_EQ(cur_seen.rolling_hash, base_seen.rolling_hash);
|
|
|
|
ASSERT_EQ(cur_seen.min, base_seen.min);
|
|
|
|
ASSERT_EQ(cur_seen.max, base_seen.max);
|
|
|
|
|
|
|
|
// Try another CF
|
|
|
|
cur_seen = {};
|
|
|
|
txn = txn_db->BeginTransaction(write_options, txn_options, txn);
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
txn->Put(handles[1], key, "blah");
|
|
|
|
}
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
// Different access pattern (different hash seed)
|
|
|
|
ASSERT_NE(cur_seen.rolling_hash, base_seen.rolling_hash);
|
|
|
|
// Same pointer space
|
|
|
|
ASSERT_LT(cur_seen.min, base_seen.max);
|
|
|
|
ASSERT_GT(cur_seen.max, base_seen.min);
|
|
|
|
// Sufficiently large hash coverage of the space
|
|
|
|
ASSERT_GT(cur_seen.max - cur_seen.min, min_span_bytes);
|
|
|
|
// Save
|
|
|
|
SeenStat cf1_seen = cur_seen;
|
|
|
|
|
|
|
|
// And another CF
|
|
|
|
cur_seen = {};
|
|
|
|
txn = txn_db->BeginTransaction(write_options, txn_options, txn);
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
txn->Put(handles[2], key, "blah");
|
|
|
|
}
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
// Different access pattern (different hash seed)
|
|
|
|
ASSERT_NE(cur_seen.rolling_hash, base_seen.rolling_hash);
|
|
|
|
ASSERT_NE(cur_seen.rolling_hash, cf1_seen.rolling_hash);
|
|
|
|
// Same pointer space
|
|
|
|
ASSERT_LT(cur_seen.min, base_seen.max);
|
|
|
|
ASSERT_GT(cur_seen.max, base_seen.min);
|
|
|
|
// Sufficiently large hash coverage of the space
|
|
|
|
ASSERT_GT(cur_seen.max - cur_seen.min, min_span_bytes);
|
|
|
|
|
|
|
|
// And DB with shared lock buckets
|
|
|
|
cur_seen = {};
|
|
|
|
delete txn;
|
|
|
|
txn = shared_txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
txn->Put(key, "blah");
|
|
|
|
}
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
// Different access pattern (different hash seed)
|
|
|
|
ASSERT_NE(cur_seen.rolling_hash, base_seen.rolling_hash);
|
|
|
|
ASSERT_NE(cur_seen.rolling_hash, cf1_seen.rolling_hash);
|
|
|
|
// Same pointer space
|
|
|
|
ASSERT_LT(cur_seen.min, base_seen.max);
|
|
|
|
ASSERT_GT(cur_seen.max, base_seen.min);
|
|
|
|
// Sufficiently large hash coverage of the space
|
|
|
|
ASSERT_GT(cur_seen.max - cur_seen.min, min_span_bytes);
|
|
|
|
|
|
|
|
// And DB with distinct lock buckets
|
|
|
|
cur_seen = {};
|
|
|
|
delete txn;
|
|
|
|
txn = nonshared_txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
txn->Put(key, "blah");
|
|
|
|
}
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
// Different access pattern (different hash seed)
|
|
|
|
ASSERT_NE(cur_seen.rolling_hash, base_seen.rolling_hash);
|
|
|
|
ASSERT_NE(cur_seen.rolling_hash, cf1_seen.rolling_hash);
|
|
|
|
// Different pointer space
|
|
|
|
ASSERT_TRUE(cur_seen.min > base_seen.max || cur_seen.max < base_seen.min);
|
|
|
|
// Sufficiently large hash coverage of the space
|
|
|
|
ASSERT_GT(cur_seen.max - cur_seen.min, min_span_bytes);
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
|
|
|
|
// ** Test dropping column family before committing, or even creating txn **
|
|
|
|
txn = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
ASSERT_OK(txn->Delete(handles[1], "AAA"));
|
|
|
|
|
|
|
|
s = txn_db->DropColumnFamily(handles[1]);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
s = txn_db->DropColumnFamily(handles[2]);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
|
|
|
|
ASSERT_NOK(txn->Commit());
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
ASSERT_OK(txn2->Delete(handles[2], "AAA"));
|
|
|
|
ASSERT_NOK(txn2->Commit());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
for (auto handle : handles) {
|
|
|
|
delete handle;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, EmptyTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "aaa", "aaa"));
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
delete txn;
|
|
|
|
|
|
|
|
txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn->Rollback());
|
|
|
|
delete txn;
|
|
|
|
|
|
|
|
txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "aaa", &value));
|
|
|
|
ASSERT_EQ(value, "aaa");
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
delete txn;
|
|
|
|
|
|
|
|
txn = txn_db->BeginTransaction(write_options);
|
|
|
|
txn->SetSnapshot();
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, "aaa", &value));
|
|
|
|
ASSERT_EQ(value, "aaa");
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "aaa", "xxx"));
|
|
|
|
Status s = txn->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, PredicateManyPreceders) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options1, read_options2;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
txn_options.set_snapshot = true;
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn1 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options1.snapshot = txn1->GetSnapshot();
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
txn2->SetSnapshot();
|
|
|
|
read_options2.snapshot = txn2->GetSnapshot();
|
|
|
|
|
|
|
|
std::vector<Slice> multiget_keys = {"1", "2", "3"};
|
|
|
|
std::vector<std::string> multiget_values;
|
|
|
|
|
|
|
|
std::vector<Status> results =
|
|
|
|
txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values);
|
|
|
|
ASSERT_TRUE(results[0].IsNotFound());
|
|
|
|
ASSERT_TRUE(results[1].IsNotFound());
|
|
|
|
ASSERT_TRUE(results[2].IsNotFound());
|
|
|
|
|
|
|
|
ASSERT_OK(txn2->Put("2", "x"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
|
|
|
|
multiget_values.clear();
|
|
|
|
results =
|
|
|
|
txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values);
|
|
|
|
ASSERT_TRUE(results[0].IsNotFound());
|
|
|
|
ASSERT_TRUE(results[1].IsNotFound());
|
|
|
|
ASSERT_TRUE(results[2].IsNotFound());
|
|
|
|
|
|
|
|
// should not commit since txn2 wrote a key txn has read
|
|
|
|
Status s = txn1->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete txn1;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options1.snapshot = txn1->GetSnapshot();
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options2.snapshot = txn2->GetSnapshot();
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Put("4", "x"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn2->Delete("4"));
|
|
|
|
|
|
|
|
// txn1 can commit since txn2's delete hasn't happened yet (it's just batched)
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
|
|
|
|
s = txn2->GetForUpdate(read_options2, "4", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
// txn2 cannot commit since txn1 changed "4"
|
|
|
|
s = txn2->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete txn1;
|
|
|
|
delete txn2;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, LostUpdate) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, read_options1, read_options2;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
// Test 2 transactions writing to the same key in multiple orders and
|
|
|
|
// with/without snapshots
|
|
|
|
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
Transaction* txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Put("1", "1"));
|
|
|
|
ASSERT_OK(txn2->Put("1", "2"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
|
|
|
|
Status s = txn2->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete txn1;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
txn_options.set_snapshot = true;
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options1.snapshot = txn1->GetSnapshot();
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options2.snapshot = txn2->GetSnapshot();
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Put("1", "3"));
|
|
|
|
ASSERT_OK(txn2->Put("1", "4"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
|
|
|
|
s = txn2->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete txn1;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options1.snapshot = txn1->GetSnapshot();
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options2.snapshot = txn2->GetSnapshot();
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Put("1", "5"));
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
|
|
|
|
ASSERT_OK(txn2->Put("1", "6"));
|
|
|
|
s = txn2->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete txn1;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options1.snapshot = txn1->GetSnapshot();
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options, txn_options);
|
|
|
|
read_options2.snapshot = txn2->GetSnapshot();
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Put("1", "5"));
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
|
|
|
|
txn2->SetSnapshot();
|
|
|
|
ASSERT_OK(txn2->Put("1", "6"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
|
|
|
|
delete txn1;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->Put("1", "7"));
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
|
|
|
|
ASSERT_OK(txn2->Put("1", "8"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
|
|
|
|
delete txn1;
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "1", &value));
|
|
|
|
ASSERT_EQ(value, "8");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, UntrackedWrites) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
// Verify transaction rollback works for untracked keys.
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn->PutUntracked("untracked", "0"));
|
|
|
|
ASSERT_OK(txn->Rollback());
|
|
|
|
s = txn_db->Get(read_options, "untracked", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
txn = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("tracked", "1"));
|
|
|
|
ASSERT_OK(txn->PutUntracked("untracked", "1"));
|
|
|
|
ASSERT_OK(txn->MergeUntracked("untracked", "2"));
|
|
|
|
ASSERT_OK(txn->DeleteUntracked("untracked"));
|
|
|
|
|
|
|
|
// Write to the untracked key outside of the transaction and verify
|
|
|
|
// it doesn't prevent the transaction from committing.
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "untracked", "x"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
|
|
|
|
|
|
|
s = txn_db->Get(read_options, "untracked", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
txn = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("tracked", "10"));
|
|
|
|
ASSERT_OK(txn->PutUntracked("untracked", "A"));
|
|
|
|
|
|
|
|
// Write to tracked key outside of the transaction and verify that the
|
|
|
|
// untracked keys are not written when the commit fails.
|
|
|
|
ASSERT_OK(txn_db->Delete(write_options, "tracked"));
|
|
|
|
|
|
|
|
s = txn->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
s = txn_db->Get(read_options, "untracked", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, IteratorTest) {
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, snapshot_read_options;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
// Write some keys to the db
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "A", "a"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "G", "g"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "F", "f"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "C", "c"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "D", "d"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
// Write some keys in a txn
|
|
|
|
ASSERT_OK(txn->Put("B", "b"));
|
|
|
|
ASSERT_OK(txn->Put("H", "h"));
|
|
|
|
ASSERT_OK(txn->Delete("D"));
|
|
|
|
ASSERT_OK(txn->Put("E", "e"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
txn->SetSnapshot();
|
|
|
|
const Snapshot* snapshot = txn->GetSnapshot();
|
|
|
|
|
|
|
|
// Write some keys to the db after the snapshot
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "BB", "xx"));
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "C", "xx"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
read_options.snapshot = snapshot;
|
|
|
|
Iterator* iter = txn->GetIterator(read_options);
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
iter->SeekToFirst();
|
|
|
|
|
|
|
|
// Read all keys via iter and lock them all
|
|
|
|
std::string results[] = {"a", "b", "c", "e", "f", "g", "h"};
|
|
|
|
for (int i = 0; i < 7; i++) {
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ(results[i], iter->value().ToString());
|
|
|
|
|
|
|
|
ASSERT_OK(txn->GetForUpdate(read_options, iter->key(), nullptr));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_FALSE(iter->Valid());
|
|
|
|
|
|
|
|
iter->Seek("G");
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("g", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("f", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("D");
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("e", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("C");
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("c", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("e", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("");
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("a", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("X");
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_FALSE(iter->Valid());
|
|
|
|
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("h", iter->value().ToString());
|
|
|
|
|
|
|
|
// key "C" was modified in the db after txn's snapshot. txn will not commit.
|
|
|
|
Status s = txn->Commit();
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, DeleteRangeSupportTest) {
|
|
|
|
// `OptimisticTransactionDB` does not allow range deletion in any API.
|
|
|
|
ASSERT_TRUE(
|
|
|
|
txn_db
|
|
|
|
->DeleteRange(WriteOptions(), txn_db->DefaultColumnFamily(), "a", "b")
|
|
|
|
.IsNotSupported());
|
|
|
|
WriteBatch wb;
|
|
|
|
ASSERT_OK(wb.DeleteRange("a", "b"));
|
|
|
|
ASSERT_NOK(txn_db->Write(WriteOptions(), &wb));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, SavepointTest) {
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, snapshot_read_options;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
Transaction* txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
Status s = txn->RollbackToSavePoint();
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
txn->SetSavePoint(); // 1
|
|
|
|
|
|
|
|
ASSERT_OK(txn->RollbackToSavePoint()); // Rollback to beginning of txn
|
|
|
|
s = txn->RollbackToSavePoint();
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("B", "b"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "B", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("b", value);
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
txn = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_NE(txn, nullptr);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("A", "a"));
|
|
|
|
ASSERT_OK(txn->Put("B", "bb"));
|
|
|
|
ASSERT_OK(txn->Put("C", "c"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
txn->SetSavePoint(); // 2
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Delete("B"));
|
|
|
|
ASSERT_OK(txn->Put("C", "cc"));
|
|
|
|
ASSERT_OK(txn->Put("D", "d"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn->RollbackToSavePoint()); // Rollback to 2
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn->Get(read_options, "A", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("a", value);
|
|
|
|
ASSERT_OK(txn->Get(read_options, "B", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("bb", value);
|
|
|
|
ASSERT_OK(txn->Get(read_options, "C", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("c", value);
|
|
|
|
s = txn->Get(read_options, "D", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("A", "a"));
|
|
|
|
ASSERT_OK(txn->Put("E", "e"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
// Rollback to beginning of txn
|
|
|
|
s = txn->RollbackToSavePoint();
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
ASSERT_OK(txn->Rollback());
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
s = txn->Get(read_options, "A", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
ASSERT_OK(txn->Get(read_options, "B", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("b", value);
|
|
|
|
s = txn->Get(read_options, "D", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
s = txn->Get(read_options, "D", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
s = txn->Get(read_options, "E", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("A", "aa"));
|
|
|
|
ASSERT_OK(txn->Put("F", "f"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
txn->SetSavePoint(); // 3
|
|
|
|
txn->SetSavePoint(); // 4
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Put("G", "g"));
|
|
|
|
ASSERT_OK(txn->Delete("F"));
|
|
|
|
ASSERT_OK(txn->Delete("B"));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn->Get(read_options, "A", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("aa", value);
|
|
|
|
|
|
|
|
s = txn->Get(read_options, "F", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
s = txn->Get(read_options, "B", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
ASSERT_OK(txn->RollbackToSavePoint()); // Rollback to 3
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn->Get(read_options, "F", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("f", value);
|
|
|
|
|
|
|
|
s = txn->Get(read_options, "G", &value);
|
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
ASSERT_OK(txn->Commit());
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "F", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("f", value);
|
|
|
|
|
|
|
|
s = txn_db->Get(read_options, "G", &value);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "A", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("aa", value);
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Get(read_options, "B", &value));
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_EQ("b", value);
|
|
|
|
|
|
|
|
s = txn_db->Get(read_options, "C", &value);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
s = txn_db->Get(read_options, "D", &value);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
s = txn_db->Get(read_options, "E", &value);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
ASSERT_TRUE(s.IsNotFound());
|
|
|
|
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, UndoGetForUpdateTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options, snapshot_read_options;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
std::string value;
|
|
|
|
|
|
|
|
ASSERT_OK(txn_db->Put(write_options, "A", ""));
|
|
|
|
|
|
|
|
Transaction* txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_TRUE(txn1);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
Transaction* txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
txn2->Put("A", "x");
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// Verify that txn1 can commit since A isn't conflict checked
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
delete txn1;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn1->Put("A", "a"));
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn2->Put("A", "x"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// Verify that txn1 cannot commit since A will still be conflict checked
|
|
|
|
Status s = txn1->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
delete txn1;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn2->Put("A", "x"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// Verify that txn1 cannot commit since A will still be conflict checked
|
|
|
|
s = txn1->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
delete txn1;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn2->Put("A", "x"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// Verify that txn1 can commit since A isn't conflict checked
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
delete txn1;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
|
|
|
|
txn1->SetSavePoint();
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn2->Put("A", "x"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// Verify that txn1 cannot commit since A will still be conflict checked
|
|
|
|
s = txn1->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
delete txn1;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
|
|
|
|
txn1->SetSavePoint();
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn2->Put("A", "x"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// Verify that txn1 cannot commit since A will still be conflict checked
|
|
|
|
s = txn1->Commit();
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
|
|
|
delete txn1;
|
|
|
|
|
|
|
|
txn1 = txn_db->BeginTransaction(write_options);
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
|
|
|
|
txn1->SetSavePoint();
|
|
|
|
ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value));
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
ASSERT_OK(txn1->RollbackToSavePoint());
|
|
|
|
txn1->UndoGetForUpdate("A");
|
|
|
|
|
|
|
|
txn2 = txn_db->BeginTransaction(write_options);
|
|
|
|
ASSERT_OK(txn2->Put("A", "x"));
|
|
|
|
ASSERT_OK(txn2->Commit());
|
|
|
|
delete txn2;
|
|
|
|
|
|
|
|
// Verify that txn1 can commit since A isn't conflict checked
|
|
|
|
ASSERT_OK(txn1->Commit());
|
|
|
|
delete txn1;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
Status OptimisticTransactionStressTestInserter(OptimisticTransactionDB* db,
|
|
|
|
const size_t num_transactions,
|
|
|
|
const size_t num_sets,
|
|
|
|
const size_t num_keys_per_set) {
|
|
|
|
size_t seed = std::hash<std::thread::id>()(std::this_thread::get_id());
|
|
|
|
Random64 _rand(seed);
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
|
|
|
OptimisticTransactionOptions txn_options;
|
|
|
|
txn_options.set_snapshot = true;
|
|
|
|
|
|
|
|
RandomTransactionInserter inserter(&_rand, write_options, read_options,
|
|
|
|
num_keys_per_set,
|
|
|
|
static_cast<uint16_t>(num_sets));
|
|
|
|
|
|
|
|
for (size_t t = 0; t < num_transactions; t++) {
|
|
|
|
bool success = inserter.OptimisticTransactionDBInsert(db, txn_options);
|
|
|
|
if (!success) {
|
|
|
|
// unexpected failure
|
|
|
|
return inserter.GetLastStatus();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inserter.GetLastStatus().PermitUncheckedError();
|
|
|
|
|
|
|
|
// Make sure at least some of the transactions succeeded. It's ok if
|
|
|
|
// some failed due to write-conflicts.
|
|
|
|
if (inserter.GetFailureCount() > num_transactions / 2) {
|
|
|
|
return Status::TryAgain("Too many transactions failed! " +
|
|
|
|
std::to_string(inserter.GetFailureCount()) + " / " +
|
|
|
|
std::to_string(num_transactions));
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, OptimisticTransactionStressTest) {
|
|
|
|
const size_t num_threads = 4;
|
|
|
|
const size_t num_transactions_per_thread = 10000;
|
|
|
|
const size_t num_sets = 3;
|
|
|
|
const size_t num_keys_per_set = 100;
|
|
|
|
// Setting the key-space to be 100 keys should cause enough write-conflicts
|
|
|
|
// to make this test interesting.
|
|
|
|
|
|
|
|
std::vector<port::Thread> threads;
|
|
|
|
|
|
|
|
std::function<void()> call_inserter = [&] {
|
|
|
|
ASSERT_OK(OptimisticTransactionStressTestInserter(
|
|
|
|
txn_db.get(), num_transactions_per_thread, num_sets, num_keys_per_set));
|
|
|
|
};
|
|
|
|
|
|
|
|
// Create N threads that use RandomTransactionInserter to write
|
|
|
|
// many transactions.
|
|
|
|
for (uint32_t i = 0; i < num_threads; i++) {
|
|
|
|
threads.emplace_back(call_inserter);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all threads to run
|
|
|
|
for (auto& t : threads) {
|
|
|
|
t.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that data is consistent
|
|
|
|
Status s = RandomTransactionInserter::Verify(txn_db.get(), num_sets);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, SequenceNumberAfterRecoverTest) {
|
|
|
|
WriteOptions write_options;
|
|
|
|
OptimisticTransactionOptions transaction_options;
|
|
|
|
|
|
|
|
Transaction* transaction(
|
|
|
|
txn_db->BeginTransaction(write_options, transaction_options));
|
|
|
|
Status s = transaction->Put("foo", "val");
|
|
|
|
ASSERT_OK(s);
|
|
|
|
s = transaction->Put("foo2", "val");
|
|
|
|
ASSERT_OK(s);
|
|
|
|
s = transaction->Put("foo3", "val");
|
|
|
|
ASSERT_OK(s);
|
|
|
|
s = transaction->Commit();
|
|
|
|
ASSERT_OK(s);
|
|
|
|
delete transaction;
|
|
|
|
|
|
|
|
Reopen();
|
|
|
|
transaction = txn_db->BeginTransaction(write_options, transaction_options);
|
|
|
|
s = transaction->Put("bar", "val");
|
|
|
|
ASSERT_OK(s);
|
|
|
|
s = transaction->Put("bar2", "val");
|
|
|
|
ASSERT_OK(s);
|
|
|
|
s = transaction->Commit();
|
|
|
|
ASSERT_OK(s);
|
|
|
|
|
|
|
|
delete transaction;
|
|
|
|
}
|
|
|
|
|
Snapshots with user-specified timestamps (#9879)
Summary:
In RocksDB, keys are associated with (internal) sequence numbers which denote when the keys are written
to the database. Sequence numbers in different RocksDB instances are unrelated, thus not comparable.
It is nice if we can associate sequence numbers with their corresponding actual timestamps. One thing we can
do is to support user-defined timestamp, which allows the applications to specify the format of custom timestamps
and encode a timestamp with each key. More details can be found at https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29.
This PR provides a different but complementary approach. We can associate rocksdb snapshots (defined in
https://github.com/facebook/rocksdb/blob/7.2.fb/include/rocksdb/snapshot.h#L20) with **user-specified** timestamps.
Since a snapshot is essentially an object representing a sequence number, this PR establishes a bi-directional mapping between sequence numbers and timestamps.
In the past, snapshots are usually taken by readers. The current super-version is grabbed, and a `rocksdb::Snapshot`
object is created with the last published sequence number of the super-version. You can see that the reader actually
has no good idea of what timestamp to assign to this snapshot, because by the time the `GetSnapshot()` is called,
an arbitrarily long period of time may have already elapsed since the last write, which is when the last published
sequence number is written.
This observation motivates the creation of "timestamped" snapshots on the write path. Currently, this functionality is
exposed only to the layer of `TransactionDB`. Application can tell RocksDB to create a snapshot when a transaction
commits, effectively associating the last sequence number with a timestamp. It is also assumed that application will
ensure any two snapshots with timestamps should satisfy the following:
```
snapshot1.seq < snapshot2.seq iff. snapshot1.ts < snapshot2.ts
```
If the application can guarantee that when a reader takes a timestamped snapshot, there is no active writes going on
in the database, then we also allow the user to use a new API `TransactionDB::CreateTimestampedSnapshot()` to create
a snapshot with associated timestamp.
Code example
```cpp
// Create a timestamped snapshot when committing transaction.
txn->SetCommitTimestamp(100);
txn->SetSnapshotOnNextOperation();
txn->Commit();
// A wrapper API for convenience
Status Transaction::CommitAndTryCreateSnapshot(
std::shared_ptr<TransactionNotifier> notifier,
TxnTimestamp ts,
std::shared_ptr<const Snapshot>* ret);
// Create a timestamped snapshot if caller guarantees no concurrent writes
std::pair<Status, std::shared_ptr<const Snapshot>> snapshot = txn_db->CreateTimestampedSnapshot(100);
```
The snapshots created in this way will be managed by RocksDB with ref-counting and potentially shared with
other readers. We provide the following APIs for readers to retrieve a snapshot given a timestamp.
```cpp
// Return the timestamped snapshot correponding to given timestamp. If ts is
// kMaxTxnTimestamp, then we return the latest timestamped snapshot if present.
// Othersise, we return the snapshot whose timestamp is equal to `ts`. If no
// such snapshot exists, then we return null.
std::shared_ptr<const Snapshot> TransactionDB::GetTimestampedSnapshot(TxnTimestamp ts) const;
// Return the latest timestamped snapshot if present.
std::shared_ptr<const Snapshot> TransactionDB::GetLatestTimestampedSnapshot() const;
```
We also provide two additional APIs for stats collection and reporting purposes.
```cpp
Status TransactionDB::GetAllTimestampedSnapshots(
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
// Return timestamped snapshots whose timestamps fall in [ts_lb, ts_ub) and store them in `snapshots`.
Status TransactionDB::GetTimestampedSnapshots(
TxnTimestamp ts_lb,
TxnTimestamp ts_ub,
std::vector<std::shared_ptr<const Snapshot>>& snapshots) const;
```
To prevent the number of timestamped snapshots from growing infinitely, we provide the following API to release
timestamped snapshots whose timestamps are older than or equal to a given threshold.
```cpp
void TransactionDB::ReleaseTimestampedSnapshotsOlderThan(TxnTimestamp ts);
```
Before shutdown, RocksDB will release all timestamped snapshots.
Comparison with user-defined timestamp and how they can be combined:
User-defined timestamp persists every key with a timestamp, while timestamped snapshots maintain a volatile
mapping between snapshots (sequence numbers) and timestamps.
Different internal keys with the same user key but different timestamps will be treated as different by compaction,
thus a newer version will not hide older versions (with smaller timestamps) unless they are eligible for garbage collection.
In contrast, taking a timestamped snapshot at a certain sequence number and timestamp prevents all the keys visible in
this snapshot from been dropped by compaction. Here, visible means (seq < snapshot and most recent).
The timestamped snapshot supports the semantics of reading at an exact point in time.
Timestamped snapshots can also be used with user-defined timestamp.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9879
Test Plan:
```
make check
TEST_TMPDIR=/dev/shm make crash_test_with_txn
```
Reviewed By: siying
Differential Revision: D35783919
Pulled By: riversand963
fbshipit-source-id: 586ad905e169189e19d3bfc0cb0177a7239d1bd4
2 years ago
|
|
|
TEST_P(OptimisticTransactionTest, TimestampedSnapshotMissingCommitTs) {
|
|
|
|
std::unique_ptr<Transaction> txn(txn_db->BeginTransaction(WriteOptions()));
|
|
|
|
ASSERT_OK(txn->Put("a", "v"));
|
|
|
|
Status s = txn->CommitAndTryCreateSnapshot();
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(OptimisticTransactionTest, TimestampedSnapshotSetCommitTs) {
|
|
|
|
std::unique_ptr<Transaction> txn(txn_db->BeginTransaction(WriteOptions()));
|
|
|
|
ASSERT_OK(txn->Put("a", "v"));
|
|
|
|
std::shared_ptr<const Snapshot> snapshot;
|
|
|
|
Status s = txn->CommitAndTryCreateSnapshot(nullptr, /*ts=*/100, &snapshot);
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
}
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
InstanceOccGroup, OptimisticTransactionTest,
|
|
|
|
testing::Values(OccValidationPolicy::kValidateSerial,
|
|
|
|
OccValidationPolicy::kValidateParallel));
|
|
|
|
|
|
|
|
TEST(OccLockBucketsTest, CacheAligned) {
|
|
|
|
// Typical x86_64 is 40 byte mutex, 64 byte cache line
|
|
|
|
if (sizeof(port::Mutex) >= sizeof(CacheAlignedWrapper<port::Mutex>)) {
|
|
|
|
ROCKSDB_GTEST_BYPASS("Test requires mutex smaller than cache line");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auto buckets_unaligned = MakeSharedOccLockBuckets(100, false);
|
|
|
|
auto buckets_aligned = MakeSharedOccLockBuckets(100, true);
|
|
|
|
// Save at least one byte per bucket
|
|
|
|
ASSERT_LE(buckets_unaligned->ApproximateMemoryUsage() + 100,
|
|
|
|
buckets_aligned->ApproximateMemoryUsage());
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|