|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// WriteBatch holds a collection of updates to apply atomically to a DB.
|
|
|
|
//
|
|
|
|
// The updates are applied in the order in which they are added
|
|
|
|
// to the WriteBatch. For example, the value of "key" will be "v3"
|
|
|
|
// after the following batch is written:
|
|
|
|
//
|
|
|
|
// batch.Put("key", "v1");
|
|
|
|
// batch.Delete("key");
|
|
|
|
// batch.Put("key", "v2");
|
|
|
|
// batch.Put("key", "v3");
|
|
|
|
//
|
|
|
|
// Multiple threads can invoke const methods on a WriteBatch without
|
|
|
|
// external synchronization, but if any of the threads may call a
|
|
|
|
// non-const method, all threads accessing the same WriteBatch must use
|
|
|
|
// external synchronization.
|
|
|
|
|
|
|
|
#ifndef STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
|
|
|
|
#define STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <stack>
|
|
|
|
#include <string>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "rocksdb/write_batch_base.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class Slice;
|
|
|
|
class ColumnFamilyHandle;
|
|
|
|
struct SavePoints;
|
|
|
|
struct SliceParts;
|
|
|
|
|
|
|
|
class WriteBatch : public WriteBatchBase {
|
|
|
|
public:
|
|
|
|
explicit WriteBatch(size_t reserved_bytes = 0);
|
|
|
|
~WriteBatch();
|
|
|
|
|
|
|
|
using WriteBatchBase::Put;
|
|
|
|
// Store the mapping "key->value" in the database.
|
|
|
|
void Put(ColumnFamilyHandle* column_family, const Slice& key,
|
|
|
|
const Slice& value) override;
|
|
|
|
void Put(const Slice& key, const Slice& value) override {
|
|
|
|
Put(nullptr, key, value);
|
[RocksDB] [Column Family] Interface proposal
Summary:
<This diff is for Column Family branch>
Sharing some of the work I've done so far. This diff compiles and passes the tests.
The biggest change is in options.h - I broke down Options into two parts - DBOptions and ColumnFamilyOptions. DBOptions is DB-specific (env, create_if_missing, block_cache, etc.) and ColumnFamilyOptions is column family-specific (all compaction options, compresion options, etc.). Note that this does not break backwards compatibility at all.
Further, I created DBWithColumnFamily which inherits DB interface and adds new functions with column family support. Clients can transparently switch to DBWithColumnFamily and it will not break their backwards compatibility.
There are few methods worth checking out: ListColumnFamilies(), MultiNewIterator(), MultiGet() and GetSnapshot(). [GetSnapshot() returns the snapshot across all column families for now - I think that's what we agreed on]
Finally, I made small changes to WriteBatch so we are able to atomically insert data across column families.
Please provide feedback.
Test Plan: make check works, the code is backward compatible
Reviewers: dhruba, haobo, sdong, kailiu, emayanke
CC: leveldb
Differential Revision: https://reviews.facebook.net/D14445
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// Variant of Put() that gathers output like writev(2). The key and value
|
|
|
|
// that will be written to the database are concatentations of arrays of
|
|
|
|
// slices.
|
|
|
|
void Put(ColumnFamilyHandle* column_family, const SliceParts& key,
|
|
|
|
const SliceParts& value) override;
|
|
|
|
void Put(const SliceParts& key, const SliceParts& value) override {
|
|
|
|
Put(nullptr, key, value);
|
[RocksDB] [Column Family] Interface proposal
Summary:
<This diff is for Column Family branch>
Sharing some of the work I've done so far. This diff compiles and passes the tests.
The biggest change is in options.h - I broke down Options into two parts - DBOptions and ColumnFamilyOptions. DBOptions is DB-specific (env, create_if_missing, block_cache, etc.) and ColumnFamilyOptions is column family-specific (all compaction options, compresion options, etc.). Note that this does not break backwards compatibility at all.
Further, I created DBWithColumnFamily which inherits DB interface and adds new functions with column family support. Clients can transparently switch to DBWithColumnFamily and it will not break their backwards compatibility.
There are few methods worth checking out: ListColumnFamilies(), MultiNewIterator(), MultiGet() and GetSnapshot(). [GetSnapshot() returns the snapshot across all column families for now - I think that's what we agreed on]
Finally, I made small changes to WriteBatch so we are able to atomically insert data across column families.
Please provide feedback.
Test Plan: make check works, the code is backward compatible
Reviewers: dhruba, haobo, sdong, kailiu, emayanke
CC: leveldb
Differential Revision: https://reviews.facebook.net/D14445
11 years ago
|
|
|
}
|
|
|
|
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
10 years ago
|
|
|
using WriteBatchBase::Delete;
|
|
|
|
// If the database contains a mapping for "key", erase it. Else do nothing.
|
|
|
|
void Delete(ColumnFamilyHandle* column_family, const Slice& key) override;
|
|
|
|
void Delete(const Slice& key) override { Delete(nullptr, key); }
|
|
|
|
|
|
|
|
// variant that takes SliceParts
|
|
|
|
void Delete(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key) override;
|
|
|
|
void Delete(const SliceParts& key) override { Delete(nullptr, key); }
|
|
|
|
|
|
|
|
using WriteBatchBase::SingleDelete;
|
|
|
|
// WriteBatch implementation of DB::SingleDelete(). See db.h.
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
10 years ago
|
|
|
void SingleDelete(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) override;
|
|
|
|
void SingleDelete(const Slice& key) override { SingleDelete(nullptr, key); }
|
|
|
|
|
|
|
|
// variant that takes SliceParts
|
|
|
|
void SingleDelete(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key) override;
|
|
|
|
void SingleDelete(const SliceParts& key) override {
|
|
|
|
SingleDelete(nullptr, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
using WriteBatchBase::Merge;
|
|
|
|
// Merge "value" with the existing value of "key" in the database.
|
|
|
|
// "key->merge(existing, value)"
|
|
|
|
void Merge(ColumnFamilyHandle* column_family, const Slice& key,
|
|
|
|
const Slice& value) override;
|
|
|
|
void Merge(const Slice& key, const Slice& value) override {
|
|
|
|
Merge(nullptr, key, value);
|
[RocksDB] [Column Family] Interface proposal
Summary:
<This diff is for Column Family branch>
Sharing some of the work I've done so far. This diff compiles and passes the tests.
The biggest change is in options.h - I broke down Options into two parts - DBOptions and ColumnFamilyOptions. DBOptions is DB-specific (env, create_if_missing, block_cache, etc.) and ColumnFamilyOptions is column family-specific (all compaction options, compresion options, etc.). Note that this does not break backwards compatibility at all.
Further, I created DBWithColumnFamily which inherits DB interface and adds new functions with column family support. Clients can transparently switch to DBWithColumnFamily and it will not break their backwards compatibility.
There are few methods worth checking out: ListColumnFamilies(), MultiNewIterator(), MultiGet() and GetSnapshot(). [GetSnapshot() returns the snapshot across all column families for now - I think that's what we agreed on]
Finally, I made small changes to WriteBatch so we are able to atomically insert data across column families.
Please provide feedback.
Test Plan: make check works, the code is backward compatible
Reviewers: dhruba, haobo, sdong, kailiu, emayanke
CC: leveldb
Differential Revision: https://reviews.facebook.net/D14445
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// variant that takes SliceParts
|
|
|
|
void Merge(ColumnFamilyHandle* column_family, const SliceParts& key,
|
|
|
|
const SliceParts& value) override;
|
|
|
|
void Merge(const SliceParts& key, const SliceParts& value) override {
|
|
|
|
Merge(nullptr, key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
using WriteBatchBase::PutLogData;
|
|
|
|
// Append a blob of arbitrary size to the records in this batch. The blob will
|
|
|
|
// be stored in the transaction log but not in any other file. In particular,
|
|
|
|
// it will not be persisted to the SST files. When iterating over this
|
|
|
|
// WriteBatch, WriteBatch::Handler::LogData will be called with the contents
|
|
|
|
// of the blob as it is encountered. Blobs, puts, deletes, and merges will be
|
|
|
|
// encountered in the same order in thich they were inserted. The blob will
|
|
|
|
// NOT consume sequence number(s) and will NOT increase the count of the batch
|
|
|
|
//
|
|
|
|
// Example application: add timestamps to the transaction log for use in
|
|
|
|
// replication.
|
|
|
|
void PutLogData(const Slice& blob) override;
|
|
|
|
|
|
|
|
using WriteBatchBase::Clear;
|
|
|
|
// Clear all updates buffered in this batch.
|
|
|
|
void Clear() override;
|
|
|
|
|
|
|
|
// Records the state of the batch for future calls to RollbackToSavePoint().
|
|
|
|
// May be called multiple times to set multiple save points.
|
|
|
|
void SetSavePoint() override;
|
|
|
|
|
|
|
|
// Remove all entries in this batch (Put, Merge, Delete, PutLogData) since the
|
|
|
|
// most recent call to SetSavePoint() and removes the most recent save point.
|
|
|
|
// If there is no previous call to SetSavePoint(), Status::NotFound()
|
|
|
|
// will be returned.
|
|
|
|
// Oterwise returns Status::OK().
|
|
|
|
Status RollbackToSavePoint() override;
|
|
|
|
|
|
|
|
// Support for iterating over the contents of a batch.
|
|
|
|
class Handler {
|
|
|
|
public:
|
|
|
|
virtual ~Handler();
|
[RocksDB] [Column Family] Interface proposal
Summary:
<This diff is for Column Family branch>
Sharing some of the work I've done so far. This diff compiles and passes the tests.
The biggest change is in options.h - I broke down Options into two parts - DBOptions and ColumnFamilyOptions. DBOptions is DB-specific (env, create_if_missing, block_cache, etc.) and ColumnFamilyOptions is column family-specific (all compaction options, compresion options, etc.). Note that this does not break backwards compatibility at all.
Further, I created DBWithColumnFamily which inherits DB interface and adds new functions with column family support. Clients can transparently switch to DBWithColumnFamily and it will not break their backwards compatibility.
There are few methods worth checking out: ListColumnFamilies(), MultiNewIterator(), MultiGet() and GetSnapshot(). [GetSnapshot() returns the snapshot across all column families for now - I think that's what we agreed on]
Finally, I made small changes to WriteBatch so we are able to atomically insert data across column families.
Please provide feedback.
Test Plan: make check works, the code is backward compatible
Reviewers: dhruba, haobo, sdong, kailiu, emayanke
CC: leveldb
Differential Revision: https://reviews.facebook.net/D14445
11 years ago
|
|
|
// default implementation will just call Put without column family for
|
|
|
|
// backwards compatibility. If the column family is not default,
|
|
|
|
// the function is noop
|
|
|
|
virtual Status PutCF(uint32_t column_family_id, const Slice& key,
|
|
|
|
const Slice& value) {
|
|
|
|
if (column_family_id == 0) {
|
|
|
|
// Put() historically doesn't return status. We didn't want to be
|
|
|
|
// backwards incompatible so we didn't change the return status
|
|
|
|
// (this is a public API). We do an ordinary get and return Status::OK()
|
|
|
|
Put(key, value);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"non-default column family and PutCF not implemented");
|
[RocksDB] [Column Family] Interface proposal
Summary:
<This diff is for Column Family branch>
Sharing some of the work I've done so far. This diff compiles and passes the tests.
The biggest change is in options.h - I broke down Options into two parts - DBOptions and ColumnFamilyOptions. DBOptions is DB-specific (env, create_if_missing, block_cache, etc.) and ColumnFamilyOptions is column family-specific (all compaction options, compresion options, etc.). Note that this does not break backwards compatibility at all.
Further, I created DBWithColumnFamily which inherits DB interface and adds new functions with column family support. Clients can transparently switch to DBWithColumnFamily and it will not break their backwards compatibility.
There are few methods worth checking out: ListColumnFamilies(), MultiNewIterator(), MultiGet() and GetSnapshot(). [GetSnapshot() returns the snapshot across all column families for now - I think that's what we agreed on]
Finally, I made small changes to WriteBatch so we are able to atomically insert data across column families.
Please provide feedback.
Test Plan: make check works, the code is backward compatible
Reviewers: dhruba, haobo, sdong, kailiu, emayanke
CC: leveldb
Differential Revision: https://reviews.facebook.net/D14445
11 years ago
|
|
|
}
|
|
|
|
virtual void Put(const Slice& /*key*/, const Slice& /*value*/) {}
|
|
|
|
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
10 years ago
|
|
|
virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) {
|
|
|
|
if (column_family_id == 0) {
|
|
|
|
Delete(key);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"non-default column family and DeleteCF not implemented");
|
|
|
|
}
|
|
|
|
virtual void Delete(const Slice& /*key*/) {}
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
10 years ago
|
|
|
|
|
|
|
virtual Status SingleDeleteCF(uint32_t column_family_id, const Slice& key) {
|
|
|
|
if (column_family_id == 0) {
|
|
|
|
SingleDelete(key);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"non-default column family and SingleDeleteCF not implemented");
|
|
|
|
}
|
|
|
|
virtual void SingleDelete(const Slice& /*key*/) {}
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
10 years ago
|
|
|
|
|
|
|
// Merge and LogData are not pure virtual. Otherwise, we would break
|
|
|
|
// existing clients of Handler on a source code level. The default
|
|
|
|
// implementation of Merge does nothing.
|
|
|
|
virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
|
|
|
|
const Slice& value) {
|
|
|
|
if (column_family_id == 0) {
|
|
|
|
Merge(key, value);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"non-default column family and MergeCF not implemented");
|
[RocksDB] [Column Family] Interface proposal
Summary:
<This diff is for Column Family branch>
Sharing some of the work I've done so far. This diff compiles and passes the tests.
The biggest change is in options.h - I broke down Options into two parts - DBOptions and ColumnFamilyOptions. DBOptions is DB-specific (env, create_if_missing, block_cache, etc.) and ColumnFamilyOptions is column family-specific (all compaction options, compresion options, etc.). Note that this does not break backwards compatibility at all.
Further, I created DBWithColumnFamily which inherits DB interface and adds new functions with column family support. Clients can transparently switch to DBWithColumnFamily and it will not break their backwards compatibility.
There are few methods worth checking out: ListColumnFamilies(), MultiNewIterator(), MultiGet() and GetSnapshot(). [GetSnapshot() returns the snapshot across all column families for now - I think that's what we agreed on]
Finally, I made small changes to WriteBatch so we are able to atomically insert data across column families.
Please provide feedback.
Test Plan: make check works, the code is backward compatible
Reviewers: dhruba, haobo, sdong, kailiu, emayanke
CC: leveldb
Differential Revision: https://reviews.facebook.net/D14445
11 years ago
|
|
|
}
|
|
|
|
virtual void Merge(const Slice& /*key*/, const Slice& /*value*/) {}
|
|
|
|
|
|
|
|
// The default implementation of LogData does nothing.
|
|
|
|
virtual void LogData(const Slice& blob);
|
|
|
|
|
|
|
|
// Continue is called by WriteBatch::Iterate. If it returns false,
|
|
|
|
// iteration is halted. Otherwise, it continues iterating. The default
|
|
|
|
// implementation always returns true.
|
|
|
|
virtual bool Continue();
|
|
|
|
};
|
|
|
|
Status Iterate(Handler* handler) const;
|
|
|
|
|
|
|
|
// Retrieve the serialized version of this batch.
|
|
|
|
const std::string& Data() const { return rep_; }
|
|
|
|
|
|
|
|
// Retrieve data size of the batch.
|
|
|
|
size_t GetDataSize() const { return rep_.size(); }
|
|
|
|
|
|
|
|
// Returns the number of updates in the batch
|
|
|
|
int Count() const;
|
|
|
|
|
|
|
|
// Returns true if PutCF will be called during Iterate
|
|
|
|
bool HasPut() const;
|
|
|
|
|
|
|
|
// Returns true if DeleteCF will be called during Iterate
|
|
|
|
bool HasDelete() const;
|
|
|
|
|
|
|
|
// Returns true if SingleDeleteCF will be called during Iterate
|
|
|
|
bool HasSingleDelete() const;
|
|
|
|
|
|
|
|
// Returns trie if MergeCF will be called during Iterate
|
|
|
|
bool HasMerge() const;
|
|
|
|
|
|
|
|
using WriteBatchBase::GetWriteBatch;
|
|
|
|
WriteBatch* GetWriteBatch() override { return this; }
|
|
|
|
|
|
|
|
// Constructor with a serialized string object
|
|
|
|
explicit WriteBatch(const std::string& rep);
|
|
|
|
|
|
|
|
WriteBatch(const WriteBatch& src);
|
|
|
|
WriteBatch(WriteBatch&& src);
|
|
|
|
WriteBatch& operator=(const WriteBatch& src);
|
|
|
|
WriteBatch& operator=(WriteBatch&& src);
|
|
|
|
|
|
|
|
private:
|
|
|
|
friend class WriteBatchInternal;
|
|
|
|
SavePoints* save_points_;
|
|
|
|
|
|
|
|
// For HasXYZ. Mutable to allow lazy computation of results
|
|
|
|
mutable std::atomic<uint32_t> content_flags_;
|
|
|
|
|
|
|
|
// Performs deferred computation of content_flags if necessary
|
|
|
|
uint32_t ComputeContentFlags() const;
|
|
|
|
|
|
|
|
protected:
|
|
|
|
std::string rep_; // See comment in write_batch.cc for the format of rep_
|
|
|
|
|
|
|
|
// Intentionally copyable
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
#endif // STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
|