|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
|
|
|
|
#include "utilities/transactions/optimistic_transaction.h"
|
|
|
|
|
|
|
|
#include <cstdint>
|
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include "db/column_family.h"
|
|
|
|
#include "db/db_impl/db_impl.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "rocksdb/utilities/optimistic_transaction_db.h"
|
|
|
|
#include "util/cast_util.h"
|
|
|
|
#include "util/defer.h"
|
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "utilities/transactions/lock/point/point_lock_tracker.h"
|
|
|
|
#include "utilities/transactions/optimistic_transaction.h"
|
|
|
|
#include "utilities/transactions/optimistic_transaction_db_impl.h"
|
|
|
|
#include "utilities/transactions/transaction_util.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
struct WriteOptions;
|
|
|
|
|
|
|
|
OptimisticTransaction::OptimisticTransaction(
|
|
|
|
OptimisticTransactionDB* txn_db, const WriteOptions& write_options,
|
|
|
|
const OptimisticTransactionOptions& txn_options)
|
|
|
|
: TransactionBaseImpl(txn_db->GetBaseDB(), write_options,
|
|
|
|
PointLockTrackerFactory::Get()),
|
|
|
|
txn_db_(txn_db) {
|
|
|
|
Initialize(txn_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
void OptimisticTransaction::Initialize(
|
|
|
|
const OptimisticTransactionOptions& txn_options) {
|
|
|
|
if (txn_options.set_snapshot) {
|
|
|
|
SetSnapshot();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void OptimisticTransaction::Reinitialize(
|
|
|
|
OptimisticTransactionDB* txn_db, const WriteOptions& write_options,
|
|
|
|
const OptimisticTransactionOptions& txn_options) {
|
|
|
|
TransactionBaseImpl::Reinitialize(txn_db->GetBaseDB(), write_options);
|
|
|
|
Initialize(txn_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
OptimisticTransaction::~OptimisticTransaction() {}
|
|
|
|
|
|
|
|
void OptimisticTransaction::Clear() { TransactionBaseImpl::Clear(); }
|
|
|
|
|
|
|
|
Status OptimisticTransaction::Prepare() {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Two phase commit not supported for optimistic transactions.");
|
|
|
|
}
|
|
|
|
|
|
|
|
Status OptimisticTransaction::Commit() {
|
|
|
|
auto txn_db_impl = static_cast_with_check<OptimisticTransactionDBImpl,
|
|
|
|
OptimisticTransactionDB>(txn_db_);
|
|
|
|
assert(txn_db_impl);
|
|
|
|
switch (txn_db_impl->GetValidatePolicy()) {
|
|
|
|
case OccValidationPolicy::kValidateParallel:
|
|
|
|
return CommitWithParallelValidate();
|
|
|
|
case OccValidationPolicy::kValidateSerial:
|
|
|
|
return CommitWithSerialValidate();
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
// unreachable, just void compiler complain
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status OptimisticTransaction::CommitWithSerialValidate() {
|
|
|
|
// Set up callback which will call CheckTransactionForConflicts() to
|
|
|
|
// check whether this transaction is safe to be committed.
|
|
|
|
OptimisticTransactionCallback callback(this);
|
|
|
|
|
|
|
|
DBImpl* db_impl = static_cast_with_check<DBImpl>(db_->GetRootDB());
|
|
|
|
|
|
|
|
Status s = db_impl->WriteWithCallback(
|
|
|
|
write_options_, GetWriteBatch()->GetWriteBatch(), &callback);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status OptimisticTransaction::CommitWithParallelValidate() {
|
|
|
|
auto txn_db_impl = static_cast_with_check<OptimisticTransactionDBImpl,
|
|
|
|
OptimisticTransactionDB>(txn_db_);
|
|
|
|
assert(txn_db_impl);
|
|
|
|
DBImpl* db_impl = static_cast_with_check<DBImpl>(db_->GetRootDB());
|
|
|
|
assert(db_impl);
|
|
|
|
std::set<port::Mutex*> lk_ptrs;
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
4 years ago
|
|
|
std::unique_ptr<LockTracker::ColumnFamilyIterator> cf_it(
|
|
|
|
tracked_locks_->GetColumnFamilyIterator());
|
|
|
|
assert(cf_it != nullptr);
|
|
|
|
while (cf_it->HasNext()) {
|
|
|
|
ColumnFamilyId cf = cf_it->Next();
|
|
|
|
|
|
|
|
// To avoid the same key(s) contending across CFs or DBs, seed the
|
|
|
|
// hash independently.
|
|
|
|
uint64_t seed = reinterpret_cast<uintptr_t>(db_impl) +
|
|
|
|
uint64_t{0xb83c07fbc6ced699} /*random prime*/ * cf;
|
|
|
|
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
4 years ago
|
|
|
std::unique_ptr<LockTracker::KeyIterator> key_it(
|
|
|
|
tracked_locks_->GetKeyIterator(cf));
|
|
|
|
assert(key_it != nullptr);
|
|
|
|
while (key_it->HasNext()) {
|
|
|
|
auto lock_bucket_ptr = &txn_db_impl->GetLockBucket(key_it->Next(), seed);
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"OptimisticTransaction::CommitWithParallelValidate::lock_bucket_ptr",
|
|
|
|
lock_bucket_ptr);
|
|
|
|
lk_ptrs.insert(lock_bucket_ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// NOTE: in a single txn, all bucket-locks are taken in ascending order.
|
|
|
|
// In this way, txns from different threads all obey this rule so that
|
|
|
|
// deadlock can be avoided.
|
|
|
|
for (auto v : lk_ptrs) {
|
|
|
|
// WART: if an exception is thrown during a Lock(), previously locked will
|
|
|
|
// not be Unlock()ed. But a vector of MutexLock is likely inefficient.
|
|
|
|
v->Lock();
|
|
|
|
}
|
|
|
|
Defer unlocks([&]() {
|
|
|
|
for (auto v : lk_ptrs) {
|
|
|
|
v->Unlock();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
4 years ago
|
|
|
Status s = TransactionUtil::CheckKeysForConflicts(db_impl, *tracked_locks_,
|
|
|
|
true /* cache_only */);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
s = db_impl->Write(write_options_, GetWriteBatch()->GetWriteBatch());
|
|
|
|
if (s.ok()) {
|
|
|
|
Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status OptimisticTransaction::Rollback() {
|
|
|
|
Clear();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
// Record this key so that we can check it for conflicts at commit time.
|
|
|
|
//
|
|
|
|
// 'exclusive' is unused for OptimisticTransaction.
|
|
|
|
Status OptimisticTransaction::TryLock(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, bool read_only,
|
|
|
|
bool exclusive, const bool do_validate,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
assert(!assume_tracked); // not supported
|
|
|
|
(void)assume_tracked;
|
|
|
|
if (!do_validate) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
uint32_t cfh_id = GetColumnFamilyID(column_family);
|
|
|
|
|
|
|
|
SetSnapshotIfNeeded();
|
|
|
|
|
|
|
|
SequenceNumber seq;
|
|
|
|
if (snapshot_) {
|
|
|
|
seq = snapshot_->GetSequenceNumber();
|
|
|
|
} else {
|
|
|
|
seq = db_->GetLatestSequenceNumber();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string key_str = key.ToString();
|
|
|
|
|
|
|
|
TrackKey(cfh_id, key_str, seq, read_only, exclusive);
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
|
|
|
|
// Always return OK. Confilct checking will happen at commit time.
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns OK if it is safe to commit this transaction. Returns Status::Busy
|
|
|
|
// if there are read or write conflicts that would prevent us from committing OR
|
|
|
|
// if we can not determine whether there would be any such conflicts.
|
|
|
|
//
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
10 years ago
|
|
|
// Should only be called on writer thread in order to avoid any race conditions
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
9 years ago
|
|
|
// in detecting write conflicts.
|
|
|
|
Status OptimisticTransaction::CheckTransactionForConflicts(DB* db) {
|
|
|
|
auto db_impl = static_cast_with_check<DBImpl>(db);
|
|
|
|
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
9 years ago
|
|
|
// Since we are on the write thread and do not want to block other writers,
|
|
|
|
// we will do a cache-only conflict check. This can result in TryAgain
|
|
|
|
// getting returned if there is not sufficient memtable history to check
|
|
|
|
// for conflicts.
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
4 years ago
|
|
|
return TransactionUtil::CheckKeysForConflicts(db_impl, *tracked_locks_,
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
9 years ago
|
|
|
true /* cache_only */);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status OptimisticTransaction::SetName(const TransactionName& /* unused */) {
|
|
|
|
return Status::InvalidArgument("Optimistic transactions cannot be named.");
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|