Run clang-format on utilities/ (except utilities/transactions/) (#10853)

Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/10853

Test Plan: `make check`

Reviewed By: siying

Differential Revision: D40651315

Pulled By: ltamasi

fbshipit-source-id: 8b270ff4777a06464be86e376c2a680427866a46
main
Levi Tamasi 2 years ago committed by Facebook GitHub Bot
parent 966cd42c7d
commit 4d9cb433fa
  1. 26
      utilities/backup/backup_engine.cc
  2. 20
      utilities/backup/backup_engine_test.cc
  3. 7
      utilities/blob_db/blob_db.h
  4. 12
      utilities/blob_db/blob_db_impl.cc
  5. 3
      utilities/blob_db/blob_db_impl.h
  6. 3
      utilities/blob_db/blob_db_test.cc
  7. 4
      utilities/blob_db/blob_dump_tool.cc
  8. 20
      utilities/blob_db/blob_file.cc
  9. 3
      utilities/cache_dump_load_impl.cc
  10. 4
      utilities/cassandra/cassandra_compaction_filter.cc
  11. 85
      utilities/cassandra/cassandra_format_test.cc
  12. 87
      utilities/cassandra/cassandra_functional_test.cc
  13. 49
      utilities/cassandra/cassandra_row_merge_test.cc
  14. 54
      utilities/cassandra/cassandra_serialize_test.cc
  15. 119
      utilities/cassandra/format.cc
  16. 15
      utilities/cassandra/format.h
  17. 5
      utilities/cassandra/merge_operator.cc
  18. 13
      utilities/cassandra/serialize.h
  19. 9
      utilities/cassandra/test_utils.cc
  20. 7
      utilities/cassandra/test_utils.h
  21. 4
      utilities/checkpoint/checkpoint_impl.h
  22. 15
      utilities/checkpoint/checkpoint_test.cc
  23. 3
      utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
  24. 1
      utilities/convenience/info_log_finder.cc
  25. 6
      utilities/env_mirror_test.cc
  26. 3
      utilities/env_timed_test.cc
  27. 7
      utilities/fault_injection_env.h
  28. 10
      utilities/fault_injection_fs.cc
  29. 12
      utilities/fault_injection_fs.h
  30. 1
      utilities/leveldb_options/leveldb_options.cc
  31. 7
      utilities/merge_operators.h
  32. 14
      utilities/merge_operators/bytesxor.cc
  33. 7
      utilities/merge_operators/bytesxor.h
  34. 3
      utilities/merge_operators/put.cc
  35. 2
      utilities/merge_operators/sortlist.cc
  36. 4
      utilities/merge_operators/string_append/stringappend.cc
  37. 6
      utilities/merge_operators/string_append/stringappend.h
  38. 4
      utilities/merge_operators/string_append/stringappend2.h
  39. 43
      utilities/merge_operators/string_append/stringappend_test.cc
  40. 4
      utilities/options/options_util.cc
  41. 8
      utilities/persistent_cache/block_cache_tier.cc
  42. 3
      utilities/persistent_cache/block_cache_tier.h
  43. 3
      utilities/persistent_cache/block_cache_tier_file.cc
  44. 9
      utilities/persistent_cache/block_cache_tier_file.h
  45. 2
      utilities/persistent_cache/block_cache_tier_file_buffer.h
  46. 1
      utilities/persistent_cache/block_cache_tier_metadata.h
  47. 1
      utilities/persistent_cache/hash_table.h
  48. 4
      utilities/persistent_cache/hash_table_test.cc
  49. 19
      utilities/persistent_cache/persistent_cache_test.cc
  50. 4
      utilities/table_properties_collectors/compact_on_deletion_collector.h
  51. 29
      utilities/table_properties_collectors/compact_on_deletion_collector_test.cc
  52. 3
      utilities/ttl/db_ttl_impl.cc
  53. 6
      utilities/ttl/db_ttl_impl.h
  54. 41
      utilities/ttl/ttl_test.cc
  55. 4
      utilities/write_batch_with_index/write_batch_with_index.cc
  56. 26
      utilities/write_batch_with_index/write_batch_with_index_test.cc

@ -88,9 +88,7 @@ const std::string kSharedChecksumDirSlash = kSharedChecksumDirName + "/";
void BackupStatistics::IncrementNumberSuccessBackup() { void BackupStatistics::IncrementNumberSuccessBackup() {
number_success_backup++; number_success_backup++;
} }
void BackupStatistics::IncrementNumberFailBackup() { void BackupStatistics::IncrementNumberFailBackup() { number_fail_backup++; }
number_fail_backup++;
}
uint32_t BackupStatistics::GetNumberSuccessBackup() const { uint32_t BackupStatistics::GetNumberSuccessBackup() const {
return number_success_backup; return number_success_backup;
@ -399,12 +397,8 @@ class BackupEngineImpl {
timestamp_ = /* something clearly fabricated */ 1; timestamp_ = /* something clearly fabricated */ 1;
} }
} }
int64_t GetTimestamp() const { int64_t GetTimestamp() const { return timestamp_; }
return timestamp_; uint64_t GetSize() const { return size_; }
}
uint64_t GetSize() const {
return size_;
}
uint32_t GetNumberFiles() const { uint32_t GetNumberFiles() const {
return static_cast<uint32_t>(files_.size()); return static_cast<uint32_t>(files_.size());
} }
@ -510,8 +504,7 @@ class BackupEngineImpl {
assert(relative_path.size() == 0 || relative_path[0] != '/'); assert(relative_path.size() == 0 || relative_path[0] != '/');
return options_.backup_dir + "/" + relative_path; return options_.backup_dir + "/" + relative_path;
} }
inline std::string GetPrivateFileRel(BackupID backup_id, inline std::string GetPrivateFileRel(BackupID backup_id, bool tmp = false,
bool tmp = false,
const std::string& file = "") const { const std::string& file = "") const {
assert(file.size() == 0 || file[0] != '/'); assert(file.size() == 0 || file[0] != '/');
return kPrivateDirSlash + std::to_string(backup_id) + (tmp ? ".tmp" : "") + return kPrivateDirSlash + std::to_string(backup_id) + (tmp ? ".tmp" : "") +
@ -832,8 +825,8 @@ class BackupEngineImpl {
std::map<BackupID, std::unique_ptr<BackupMeta>> backups_; std::map<BackupID, std::unique_ptr<BackupMeta>> backups_;
std::map<BackupID, std::pair<IOStatus, std::unique_ptr<BackupMeta>>> std::map<BackupID, std::pair<IOStatus, std::unique_ptr<BackupMeta>>>
corrupt_backups_; corrupt_backups_;
std::unordered_map<std::string, std::unordered_map<std::string, std::shared_ptr<FileInfo>>
std::shared_ptr<FileInfo>> backuped_file_infos_; backuped_file_infos_;
std::atomic<bool> stop_backup_; std::atomic<bool> stop_backup_;
// options data // options data
@ -1044,8 +1037,8 @@ IOStatus BackupEngineImpl::Initialize() {
options_.max_valid_backups_to_open = std::numeric_limits<int32_t>::max(); options_.max_valid_backups_to_open = std::numeric_limits<int32_t>::max();
ROCKS_LOG_WARN( ROCKS_LOG_WARN(
options_.info_log, options_.info_log,
"`max_valid_backups_to_open` is not set to the default value. Ignoring " "`max_valid_backups_to_open` is not set to the default value. "
"its value since BackupEngine is not read-only."); "Ignoring its value since BackupEngine is not read-only.");
} }
// gather the list of directories that we need to create // gather the list of directories that we need to create
@ -1147,8 +1140,7 @@ IOStatus BackupEngineImpl::Initialize() {
// load the backups if any, until valid_backups_to_open of the latest // load the backups if any, until valid_backups_to_open of the latest
// non-corrupted backups have been successfully opened. // non-corrupted backups have been successfully opened.
int valid_backups_to_open = options_.max_valid_backups_to_open; int valid_backups_to_open = options_.max_valid_backups_to_open;
for (auto backup_iter = backups_.rbegin(); for (auto backup_iter = backups_.rbegin(); backup_iter != backups_.rend();
backup_iter != backups_.rend();
++backup_iter) { ++backup_iter) {
assert(latest_backup_id_ == 0 || latest_backup_id_ > backup_iter->first); assert(latest_backup_id_ == 0 || latest_backup_id_ > backup_iter->first);
if (latest_backup_id_ == 0) { if (latest_backup_id_ == 0) {

@ -63,8 +63,11 @@ class DummyDB : public StackableDB {
public: public:
/* implicit */ /* implicit */
DummyDB(const Options& options, const std::string& dbname) DummyDB(const Options& options, const std::string& dbname)
: StackableDB(nullptr), options_(options), dbname_(dbname), : StackableDB(nullptr),
deletions_enabled_(true), sequence_number_(0) {} options_(options),
dbname_(dbname),
deletions_enabled_(true),
sequence_number_(0) {}
SequenceNumber GetLatestSequenceNumber() const override { SequenceNumber GetLatestSequenceNumber() const override {
return ++sequence_number_; return ++sequence_number_;
@ -608,8 +611,8 @@ class BackupEngineTest : public testing::Test {
kShareWithChecksum, kShareWithChecksum,
}; };
const std::vector<ShareOption> kAllShareOptions = { const std::vector<ShareOption> kAllShareOptions = {kNoShare, kShareNoChecksum,
kNoShare, kShareNoChecksum, kShareWithChecksum}; kShareWithChecksum};
BackupEngineTest() { BackupEngineTest() {
// set up files // set up files
@ -3540,8 +3543,7 @@ TEST_F(BackupEngineTest, Concurrency) {
std::array<std::thread, 4> restore_verify_threads; std::array<std::thread, 4> restore_verify_threads;
for (uint32_t i = 0; i < read_threads.size(); ++i) { for (uint32_t i = 0; i < read_threads.size(); ++i) {
uint32_t sleep_micros = rng() % 100000; uint32_t sleep_micros = rng() % 100000;
read_threads[i] = read_threads[i] = std::thread([this, i, sleep_micros, &db_opts, &be_opts,
std::thread([this, i, sleep_micros, &db_opts, &be_opts,
&restore_verify_threads, &limiter] { &restore_verify_threads, &limiter] {
test_db_env_->SleepForMicroseconds(sleep_micros); test_db_env_->SleepForMicroseconds(sleep_micros);
@ -3613,8 +3615,8 @@ TEST_F(BackupEngineTest, Concurrency) {
// (Ok now) Restore one of the backups, or "latest" // (Ok now) Restore one of the backups, or "latest"
if (latest) { if (latest) {
ASSERT_OK(my_be->RestoreDBFromLatestBackup(restore_db_dir, ASSERT_OK(
restore_db_dir)); my_be->RestoreDBFromLatestBackup(restore_db_dir, restore_db_dir));
} else { } else {
ASSERT_OK(my_be->VerifyBackup(to_restore, true)); ASSERT_OK(my_be->VerifyBackup(to_restore, true));
ASSERT_OK(my_be->RestoreDBFromBackup(to_restore, restore_db_dir, ASSERT_OK(my_be->RestoreDBFromBackup(to_restore, restore_db_dir,
@ -4196,7 +4198,7 @@ TEST_F(BackupEngineTest, FileTemperatures) {
} }
} }
} // anon namespace } // namespace
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -155,8 +155,7 @@ class BlobDB : public StackableDB {
using ROCKSDB_NAMESPACE::StackableDB::MultiGet; using ROCKSDB_NAMESPACE::StackableDB::MultiGet;
virtual std::vector<Status> MultiGet( virtual std::vector<Status> MultiGet(
const ReadOptions& options, const ReadOptions& options, const std::vector<Slice>& keys,
const std::vector<Slice>& keys,
std::vector<std::string>* values) override = 0; std::vector<std::string>* values) override = 0;
virtual std::vector<Status> MultiGet( virtual std::vector<Status> MultiGet(
const ReadOptions& options, const ReadOptions& options,
@ -179,8 +178,8 @@ class BlobDB : public StackableDB {
PinnableSlice* /*values*/, Status* statuses, PinnableSlice* /*values*/, Status* statuses,
const bool /*sorted_input*/ = false) override { const bool /*sorted_input*/ = false) override {
for (size_t i = 0; i < num_keys; ++i) { for (size_t i = 0; i < num_keys; ++i) {
statuses[i] = Status::NotSupported( statuses[i] =
"Blob DB doesn't support batched MultiGet"); Status::NotSupported("Blob DB doesn't support batched MultiGet");
} }
} }

@ -6,6 +6,7 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "utilities/blob_db/blob_db_impl.h" #include "utilities/blob_db/blob_db_impl.h"
#include <algorithm> #include <algorithm>
#include <cinttypes> #include <cinttypes>
#include <iomanip> #include <iomanip>
@ -1023,9 +1024,8 @@ Status BlobDBImpl::Put(const WriteOptions& options, const Slice& key,
return PutUntil(options, key, value, kNoExpiration); return PutUntil(options, key, value, kNoExpiration);
} }
Status BlobDBImpl::PutWithTTL(const WriteOptions& options, Status BlobDBImpl::PutWithTTL(const WriteOptions& options, const Slice& key,
const Slice& key, const Slice& value, const Slice& value, uint64_t ttl) {
uint64_t ttl) {
uint64_t now = EpochNow(); uint64_t now = EpochNow();
uint64_t expiration = kNoExpiration - now > ttl ? now + ttl : kNoExpiration; uint64_t expiration = kNoExpiration - now > ttl ? now + ttl : kNoExpiration;
return PutUntil(options, key, value, expiration); return PutUntil(options, key, value, expiration);
@ -1385,9 +1385,9 @@ Status BlobDBImpl::AppendBlob(const std::shared_ptr<BlobFile>& bfile,
return s; return s;
} }
std::vector<Status> BlobDBImpl::MultiGet( std::vector<Status> BlobDBImpl::MultiGet(const ReadOptions& read_options,
const ReadOptions& read_options, const std::vector<Slice>& keys,
const std::vector<Slice>& keys, std::vector<std::string>* values) { std::vector<std::string>* values) {
StopWatch multiget_sw(clock_, statistics_, BLOB_DB_MULTIGET_MICROS); StopWatch multiget_sw(clock_, statistics_, BLOB_DB_MULTIGET_MICROS);
RecordTick(statistics_, BLOB_DB_NUM_MULTIGET); RecordTick(statistics_, BLOB_DB_NUM_MULTIGET);
// Get a snapshot to avoid blob file get deleted between we // Get a snapshot to avoid blob file get deleted between we

@ -124,8 +124,7 @@ class BlobDBImpl : public BlobDB {
using BlobDB::MultiGet; using BlobDB::MultiGet;
virtual std::vector<Status> MultiGet( virtual std::vector<Status> MultiGet(
const ReadOptions& read_options, const ReadOptions& read_options, const std::vector<Slice>& keys,
const std::vector<Slice>& keys,
std::vector<std::string>* values) override; std::vector<std::string>* values) override;
using BlobDB::Write; using BlobDB::Write;

@ -58,8 +58,7 @@ class BlobDBTest : public testing::Test {
}; };
BlobDBTest() BlobDBTest()
: dbname_(test::PerThreadDBPath("blob_db_test")), : dbname_(test::PerThreadDBPath("blob_db_test")), blob_db_(nullptr) {
blob_db_(nullptr) {
mock_clock_ = std::make_shared<MockSystemClock>(SystemClock::Default()); mock_clock_ = std::make_shared<MockSystemClock>(SystemClock::Default());
mock_env_.reset(new CompositeEnvWrapper(Env::Default(), mock_clock_)); mock_env_.reset(new CompositeEnvWrapper(Env::Default(), mock_clock_));
fault_injection_env_.reset(new FaultInjectionTestEnv(Env::Default())); fault_injection_env_.reset(new FaultInjectionTestEnv(Env::Default()));

@ -226,7 +226,9 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob,
DumpSlice(Slice(slice.data(), static_cast<size_t>(key_size)), show_key); DumpSlice(Slice(slice.data(), static_cast<size_t>(key_size)), show_key);
if (show_blob != DisplayType::kNone) { if (show_blob != DisplayType::kNone) {
fprintf(stdout, " blob : "); fprintf(stdout, " blob : ");
DumpSlice(Slice(slice.data() + static_cast<size_t>(key_size), static_cast<size_t>(value_size)), show_blob); DumpSlice(Slice(slice.data() + static_cast<size_t>(key_size),
static_cast<size_t>(value_size)),
show_blob);
} }
if (show_uncompressed_blob != DisplayType::kNone) { if (show_uncompressed_blob != DisplayType::kNone) {
fprintf(stdout, " raw blob : "); fprintf(stdout, " raw blob : ");

@ -7,9 +7,9 @@
#include "utilities/blob_db/blob_file.h" #include "utilities/blob_db/blob_file.h"
#include <stdio.h> #include <stdio.h>
#include <cinttypes>
#include <algorithm> #include <algorithm>
#include <cinttypes>
#include <memory> #include <memory>
#include "db/column_family.h" #include "db/column_family.h"
@ -210,15 +210,13 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
file_size_ = file_size; file_size_ = file_size;
} else { } else {
ROCKS_LOG_ERROR(info_log_, ROCKS_LOG_ERROR(info_log_,
"Failed to get size of blob file %" PRIu64 "Failed to get size of blob file %" PRIu64 ", status: %s",
", status: %s",
file_number_, s.ToString().c_str()); file_number_, s.ToString().c_str());
return s; return s;
} }
if (file_size < BlobLogHeader::kSize) { if (file_size < BlobLogHeader::kSize) {
ROCKS_LOG_ERROR(info_log_, ROCKS_LOG_ERROR(
"Incomplete blob file blob file %" PRIu64 info_log_, "Incomplete blob file blob file %" PRIu64 ", size: %" PRIu64,
", size: %" PRIu64,
file_number_, file_size); file_number_, file_size);
return Status::Corruption("Incomplete blob file header."); return Status::Corruption("Incomplete blob file header.");
} }
@ -250,9 +248,8 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
Env::IO_TOTAL /* rate_limiter_priority */); Env::IO_TOTAL /* rate_limiter_priority */);
} }
if (!s.ok()) { if (!s.ok()) {
ROCKS_LOG_ERROR(info_log_, ROCKS_LOG_ERROR(
"Failed to read header of blob file %" PRIu64 info_log_, "Failed to read header of blob file %" PRIu64 ", status: %s",
", status: %s",
file_number_, s.ToString().c_str()); file_number_, s.ToString().c_str());
return s; return s;
} }
@ -294,9 +291,8 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
nullptr, Env::IO_TOTAL /* rate_limiter_priority */); nullptr, Env::IO_TOTAL /* rate_limiter_priority */);
} }
if (!s.ok()) { if (!s.ok()) {
ROCKS_LOG_ERROR(info_log_, ROCKS_LOG_ERROR(
"Failed to read footer of blob file %" PRIu64 info_log_, "Failed to read footer of blob file %" PRIu64 ", status: %s",
", status: %s",
file_number_, s.ToString().c_str()); file_number_, s.ToString().c_str());
return s; return s;
} }

@ -7,8 +7,6 @@
#include "table/block_based/block_based_table_reader.h" #include "table/block_based/block_based_table_reader.h"
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "utilities/cache_dump_load_impl.h"
#include "cache/cache_entry_roles.h" #include "cache/cache_entry_roles.h"
#include "file/writable_file_writer.h" #include "file/writable_file_writer.h"
#include "port/lang.h" #include "port/lang.h"
@ -17,6 +15,7 @@
#include "rocksdb/utilities/ldb_cmd.h" #include "rocksdb/utilities/ldb_cmd.h"
#include "table/format.h" #include "table/format.h"
#include "util/crc32c.h" #include "util/crc32c.h"
#include "utilities/cache_dump_load_impl.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {

@ -40,8 +40,8 @@ CompactionFilter::Decision CassandraCompactionFilter::FilterV2(
const Slice& existing_value, std::string* new_value, const Slice& existing_value, std::string* new_value,
std::string* /*skip_until*/) const { std::string* /*skip_until*/) const {
bool value_changed = false; bool value_changed = false;
RowValue row_value = RowValue::Deserialize( RowValue row_value =
existing_value.data(), existing_value.size()); RowValue::Deserialize(existing_value.data(), existing_value.size());
RowValue compacted = RowValue compacted =
options_.purge_ttl_on_expiration options_.purge_ttl_on_expiration
? row_value.RemoveExpiredColumns(&value_changed) ? row_value.RemoveExpiredColumns(&value_changed)

@ -5,12 +5,12 @@
#include <cstring> #include <cstring>
#include <memory> #include <memory>
#include "test_util/testharness.h" #include "test_util/testharness.h"
#include "utilities/cassandra/format.h" #include "utilities/cassandra/format.h"
#include "utilities/cassandra/serialize.h" #include "utilities/cassandra/serialize.h"
#include "utilities/cassandra/test_utils.h" #include "utilities/cassandra/test_utils.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
namespace cassandra { namespace cassandra {
@ -51,8 +51,8 @@ TEST(ColumnTest, Column) {
c1->Serialize(&dest); c1->Serialize(&dest);
EXPECT_EQ(dest.size(), 2 * c.Size()); EXPECT_EQ(dest.size(), 2 * c.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) ==
std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) == 0); 0);
// Verify the ColumnBase::Deserialization. // Verify the ColumnBase::Deserialization.
saved_dest = dest; saved_dest = dest;
@ -60,9 +60,8 @@ TEST(ColumnTest, Column) {
ColumnBase::Deserialize(saved_dest.c_str(), c.Size()); ColumnBase::Deserialize(saved_dest.c_str(), c.Size());
c2->Serialize(&dest); c2->Serialize(&dest);
EXPECT_EQ(dest.size(), 3 * c.Size()); EXPECT_EQ(dest.size(), 3 * c.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2,
std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2, c.Size()) c.Size()) == 0);
== 0);
} }
TEST(ExpiringColumnTest, ExpiringColumn) { TEST(ExpiringColumnTest, ExpiringColumn) {
@ -71,8 +70,8 @@ TEST(ExpiringColumnTest, ExpiringColumn) {
int8_t index = 3; int8_t index = 3;
int64_t timestamp = 1494022807044; int64_t timestamp = 1494022807044;
int32_t ttl = 3600; int32_t ttl = 3600;
ExpiringColumn c = ExpiringColumn(mask, index, timestamp, ExpiringColumn c =
sizeof(data), data, ttl); ExpiringColumn(mask, index, timestamp, sizeof(data), data, ttl);
EXPECT_EQ(c.Index(), index); EXPECT_EQ(c.Index(), index);
EXPECT_EQ(c.Timestamp(), timestamp); EXPECT_EQ(c.Timestamp(), timestamp);
@ -107,8 +106,8 @@ TEST(ExpiringColumnTest, ExpiringColumn) {
c1->Serialize(&dest); c1->Serialize(&dest);
EXPECT_EQ(dest.size(), 2 * c.Size()); EXPECT_EQ(dest.size(), 2 * c.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) ==
std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) == 0); 0);
// Verify the ColumnBase::Deserialization. // Verify the ColumnBase::Deserialization.
saved_dest = dest; saved_dest = dest;
@ -116,20 +115,21 @@ TEST(ExpiringColumnTest, ExpiringColumn) {
ColumnBase::Deserialize(saved_dest.c_str(), c.Size()); ColumnBase::Deserialize(saved_dest.c_str(), c.Size());
c2->Serialize(&dest); c2->Serialize(&dest);
EXPECT_EQ(dest.size(), 3 * c.Size()); EXPECT_EQ(dest.size(), 3 * c.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2,
std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2, c.Size()) c.Size()) == 0);
== 0);
} }
TEST(TombstoneTest, TombstoneCollectable) { TEST(TombstoneTest, TombstoneCollectable) {
int32_t now = (int32_t)time(nullptr); int32_t now = (int32_t)time(nullptr);
int32_t gc_grace_seconds = 16440; int32_t gc_grace_seconds = 16440;
int32_t time_delta_seconds = 10; int32_t time_delta_seconds = 10;
EXPECT_TRUE(Tombstone(ColumnTypeMask::DELETION_MASK, 0, EXPECT_TRUE(
Tombstone(ColumnTypeMask::DELETION_MASK, 0,
now - gc_grace_seconds - time_delta_seconds, now - gc_grace_seconds - time_delta_seconds,
ToMicroSeconds(now - gc_grace_seconds - time_delta_seconds)) ToMicroSeconds(now - gc_grace_seconds - time_delta_seconds))
.Collectable(gc_grace_seconds)); .Collectable(gc_grace_seconds));
EXPECT_FALSE(Tombstone(ColumnTypeMask::DELETION_MASK, 0, EXPECT_FALSE(
Tombstone(ColumnTypeMask::DELETION_MASK, 0,
now - gc_grace_seconds + time_delta_seconds, now - gc_grace_seconds + time_delta_seconds,
ToMicroSeconds(now - gc_grace_seconds + time_delta_seconds)) ToMicroSeconds(now - gc_grace_seconds + time_delta_seconds))
.Collectable(gc_grace_seconds)); .Collectable(gc_grace_seconds));
@ -140,8 +140,8 @@ TEST(TombstoneTest, Tombstone) {
int8_t index = 2; int8_t index = 2;
int32_t local_deletion_time = 1494022807; int32_t local_deletion_time = 1494022807;
int64_t marked_for_delete_at = 1494022807044; int64_t marked_for_delete_at = 1494022807044;
Tombstone c = Tombstone(mask, index, local_deletion_time, Tombstone c =
marked_for_delete_at); Tombstone(mask, index, local_deletion_time, marked_for_delete_at);
EXPECT_EQ(c.Index(), index); EXPECT_EQ(c.Index(), index);
EXPECT_EQ(c.Timestamp(), marked_for_delete_at); EXPECT_EQ(c.Timestamp(), marked_for_delete_at);
@ -170,17 +170,16 @@ TEST(TombstoneTest, Tombstone) {
c1->Serialize(&dest); c1->Serialize(&dest);
EXPECT_EQ(dest.size(), 2 * c.Size()); EXPECT_EQ(dest.size(), 2 * c.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) ==
std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) == 0); 0);
// Verify the ColumnBase::Deserialization. // Verify the ColumnBase::Deserialization.
std::shared_ptr<ColumnBase> c2 = std::shared_ptr<ColumnBase> c2 =
ColumnBase::Deserialize(dest.c_str(), c.Size()); ColumnBase::Deserialize(dest.c_str(), c.Size());
c2->Serialize(&dest); c2->Serialize(&dest);
EXPECT_EQ(dest.size(), 3 * c.Size()); EXPECT_EQ(dest.size(), 3 * c.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2,
std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2, c.Size()) c.Size()) == 0);
== 0);
} }
class RowValueTest : public testing::Test {}; class RowValueTest : public testing::Test {};
@ -213,8 +212,8 @@ TEST(RowValueTest, RowTombstone) {
r1.Serialize(&dest); r1.Serialize(&dest);
EXPECT_EQ(dest.size(), 2 * r.Size()); EXPECT_EQ(dest.size(), 2 * r.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str(), dest.c_str() + r.Size(), r.Size()) ==
std::memcmp(dest.c_str(), dest.c_str() + r.Size(), r.Size()) == 0); 0);
} }
TEST(RowValueTest, RowWithColumns) { TEST(RowValueTest, RowWithColumns) {
@ -227,8 +226,8 @@ TEST(RowValueTest, RowWithColumns) {
int64_t e_timestamp = 1494022807044; int64_t e_timestamp = 1494022807044;
int32_t e_ttl = 3600; int32_t e_ttl = 3600;
columns.push_back(std::shared_ptr<ExpiringColumn>( columns.push_back(std::shared_ptr<ExpiringColumn>(
new ExpiringColumn(ColumnTypeMask::EXPIRATION_MASK, e_index, new ExpiringColumn(ColumnTypeMask::EXPIRATION_MASK, e_index, e_timestamp,
e_timestamp, sizeof(e_data), e_data, e_ttl))); sizeof(e_data), e_data, e_ttl)));
columns_data_size += columns[0]->Size(); columns_data_size += columns[0]->Size();
char c_data[4] = {'d', 'a', 't', 'a'}; char c_data[4] = {'d', 'a', 't', 'a'};
@ -242,8 +241,8 @@ TEST(RowValueTest, RowWithColumns) {
int32_t t_local_deletion_time = 1494022801; int32_t t_local_deletion_time = 1494022801;
int64_t t_marked_for_delete_at = 1494022807043; int64_t t_marked_for_delete_at = 1494022807043;
columns.push_back(std::shared_ptr<Tombstone>( columns.push_back(std::shared_ptr<Tombstone>(
new Tombstone(ColumnTypeMask::DELETION_MASK, new Tombstone(ColumnTypeMask::DELETION_MASK, t_index,
t_index, t_local_deletion_time, t_marked_for_delete_at))); t_local_deletion_time, t_marked_for_delete_at)));
columns_data_size += columns[2]->Size(); columns_data_size += columns[2]->Size();
RowValue r = RowValue(std::move(columns), last_modified_time); RowValue r = RowValue(std::move(columns), last_modified_time);
@ -311,19 +310,20 @@ TEST(RowValueTest, RowWithColumns) {
r1.Serialize(&dest); r1.Serialize(&dest);
EXPECT_EQ(dest.size(), 2 * r.Size()); EXPECT_EQ(dest.size(), 2 * r.Size());
EXPECT_TRUE( EXPECT_TRUE(std::memcmp(dest.c_str(), dest.c_str() + r.Size(), r.Size()) ==
std::memcmp(dest.c_str(), dest.c_str() + r.Size(), r.Size()) == 0); 0);
} }
TEST(RowValueTest, PurgeTtlShouldRemvoeAllColumnsExpired) { TEST(RowValueTest, PurgeTtlShouldRemvoeAllColumnsExpired) {
int64_t now = time(nullptr); int64_t now = time(nullptr);
auto row_value = CreateTestRowValue({ auto row_value = CreateTestRowValue(
CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now)), {CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now)),
CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 10)), //expired CreateTestColumnSpec(kExpiringColumn, 1,
CreateTestColumnSpec(kExpiringColumn, 2, ToMicroSeconds(now)), // not expired ToMicroSeconds(now - kTtl - 10)), // expired
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now)) CreateTestColumnSpec(kExpiringColumn, 2,
}); ToMicroSeconds(now)), // not expired
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now))});
bool changed = false; bool changed = false;
auto purged = row_value.RemoveExpiredColumns(&changed); auto purged = row_value.RemoveExpiredColumns(&changed);
@ -343,12 +343,13 @@ TEST(RowValueTest, PurgeTtlShouldRemvoeAllColumnsExpired) {
TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones) { TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones) {
int64_t now = time(nullptr); int64_t now = time(nullptr);
auto row_value = CreateTestRowValue({ auto row_value = CreateTestRowValue(
CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now)), {CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now)),
CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 10)), //expired CreateTestColumnSpec(kExpiringColumn, 1,
CreateTestColumnSpec(kExpiringColumn, 2, ToMicroSeconds(now)), // not expired ToMicroSeconds(now - kTtl - 10)), // expired
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now)) CreateTestColumnSpec(kExpiringColumn, 2,
}); ToMicroSeconds(now)), // not expired
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now))});
bool changed = false; bool changed = false;
auto compacted = row_value.ConvertExpiredColumnsToTombstones(&changed); auto compacted = row_value.ConvertExpiredColumnsToTombstones(&changed);

@ -18,7 +18,6 @@
#include "utilities/cassandra/test_utils.h" #include "utilities/cassandra/test_utils.h"
#include "utilities/merge_operators.h" #include "utilities/merge_operators.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
namespace cassandra { namespace cassandra {
@ -77,9 +76,8 @@ class CassandraStore {
auto s = db_->Get(get_option_, key, &result); auto s = db_->Get(get_option_, key, &result);
if (s.ok()) { if (s.ok()) {
return std::make_tuple(true, return std::make_tuple(
RowValue::Deserialize(result.data(), true, RowValue::Deserialize(result.data(), result.size()));
result.size()));
} }
if (!s.IsNotFound()) { if (!s.IsNotFound()) {
@ -117,7 +115,6 @@ private:
int32_t gc_grace_period_in_seconds_; int32_t gc_grace_period_in_seconds_;
}; };
// The class for unit-testing // The class for unit-testing
class CassandraFunctionalTest : public testing::Test { class CassandraFunctionalTest : public testing::Test {
public: public:
@ -130,7 +127,8 @@ public:
DB* db; DB* db;
Options options; Options options;
options.create_if_missing = true; options.create_if_missing = true;
options.merge_operator.reset(new CassandraValueMergeOperator(gc_grace_period_in_seconds_)); options.merge_operator.reset(
new CassandraValueMergeOperator(gc_grace_period_in_seconds_));
auto* cf_factory = new TestCompactionFilterFactory( auto* cf_factory = new TestCompactionFilterFactory(
purge_ttl_on_expiration_, gc_grace_period_in_seconds_); purge_ttl_on_expiration_, gc_grace_period_in_seconds_);
options.compaction_filter_factory.reset(cf_factory); options.compaction_filter_factory.reset(cf_factory);
@ -148,18 +146,24 @@ TEST_F(CassandraFunctionalTest, SimpleMergeTest) {
CassandraStore store(OpenDb()); CassandraStore store(OpenDb());
int64_t now = time(nullptr); int64_t now = time(nullptr);
store.Append("k1", CreateTestRowValue({ store.Append(
"k1",
CreateTestRowValue({
CreateTestColumnSpec(kTombstone, 0, ToMicroSeconds(now + 5)), CreateTestColumnSpec(kTombstone, 0, ToMicroSeconds(now + 5)),
CreateTestColumnSpec(kColumn, 1, ToMicroSeconds(now + 8)), CreateTestColumnSpec(kColumn, 1, ToMicroSeconds(now + 8)),
CreateTestColumnSpec(kExpiringColumn, 2, ToMicroSeconds(now + 5)), CreateTestColumnSpec(kExpiringColumn, 2, ToMicroSeconds(now + 5)),
})); }));
store.Append("k1",CreateTestRowValue({ store.Append(
"k1",
CreateTestRowValue({
CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now + 2)), CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now + 2)),
CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now + 5)), CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now + 5)),
CreateTestColumnSpec(kTombstone, 2, ToMicroSeconds(now + 7)), CreateTestColumnSpec(kTombstone, 2, ToMicroSeconds(now + 7)),
CreateTestColumnSpec(kExpiringColumn, 7, ToMicroSeconds(now + 17)), CreateTestColumnSpec(kExpiringColumn, 7, ToMicroSeconds(now + 17)),
})); }));
store.Append("k1", CreateTestRowValue({ store.Append(
"k1",
CreateTestRowValue({
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now + 6)), CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now + 6)),
CreateTestColumnSpec(kTombstone, 1, ToMicroSeconds(now + 5)), CreateTestColumnSpec(kTombstone, 1, ToMicroSeconds(now + 5)),
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now + 4)), CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now + 4)),
@ -202,10 +206,12 @@ TEST_F(CassandraFunctionalTest,
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());
store.Append("k1",CreateTestRowValue({ store.Append(
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired "k1",
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now)) CreateTestRowValue(
})); {CreateTestColumnSpec(kExpiringColumn, 0,
ToMicroSeconds(now - kTtl - 10)), // expired
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now))}));
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());
ASSERT_OK(store.Compact()); ASSERT_OK(store.Compact());
@ -224,25 +230,29 @@ TEST_F(CassandraFunctionalTest,
ToMicroSeconds(now)); ToMicroSeconds(now));
} }
TEST_F(CassandraFunctionalTest, TEST_F(CassandraFunctionalTest,
CompactionShouldPurgeExpiredColumnsIfPurgeTtlIsOn) { CompactionShouldPurgeExpiredColumnsIfPurgeTtlIsOn) {
purge_ttl_on_expiration_ = true; purge_ttl_on_expiration_ = true;
CassandraStore store(OpenDb()); CassandraStore store(OpenDb());
int64_t now = time(nullptr); int64_t now = time(nullptr);
store.Append("k1", CreateTestRowValue({ store.Append(
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)), //expired "k1",
CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now)), // not expired CreateTestRowValue(
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now)) {CreateTestColumnSpec(kExpiringColumn, 0,
})); ToMicroSeconds(now - kTtl - 20)), // expired
CreateTestColumnSpec(kExpiringColumn, 1,
ToMicroSeconds(now)), // not expired
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now))}));
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());
store.Append("k1",CreateTestRowValue({ store.Append(
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired "k1",
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now)) CreateTestRowValue(
})); {CreateTestColumnSpec(kExpiringColumn, 0,
ToMicroSeconds(now - kTtl - 10)), // expired
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now))}));
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());
ASSERT_OK(store.Compact()); ASSERT_OK(store.Compact());
@ -266,14 +276,17 @@ TEST_F(CassandraFunctionalTest,
int64_t now = time(nullptr); int64_t now = time(nullptr);
store.Append("k1", CreateTestRowValue({ store.Append("k1", CreateTestRowValue({
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)), CreateTestColumnSpec(kExpiringColumn, 0,
CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 20)), ToMicroSeconds(now - kTtl - 20)),
CreateTestColumnSpec(kExpiringColumn, 1,
ToMicroSeconds(now - kTtl - 20)),
})); }));
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());
store.Append("k1", CreateTestRowValue({ store.Append("k1", CreateTestRowValue({
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), CreateTestColumnSpec(kExpiringColumn, 0,
ToMicroSeconds(now - kTtl - 10)),
})); }));
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());
@ -287,14 +300,15 @@ TEST_F(CassandraFunctionalTest,
CassandraStore store(OpenDb()); CassandraStore store(OpenDb());
int64_t now = time(nullptr); int64_t now = time(nullptr);
store.Append("k1", CreateTestRowValue({ store.Append("k1",
CreateTestColumnSpec(kTombstone, 0, ToMicroSeconds(now - gc_grace_period_in_seconds_ - 1)), CreateTestRowValue(
CreateTestColumnSpec(kColumn, 1, ToMicroSeconds(now)) {CreateTestColumnSpec(
})); kTombstone, 0,
ToMicroSeconds(now - gc_grace_period_in_seconds_ - 1)),
CreateTestColumnSpec(kColumn, 1, ToMicroSeconds(now))}));
store.Append("k2", CreateTestRowValue({ store.Append("k2", CreateTestRowValue({CreateTestColumnSpec(
CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now)) kColumn, 0, ToMicroSeconds(now))}));
}));
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());
@ -317,8 +331,11 @@ TEST_F(CassandraFunctionalTest, CompactionShouldRemoveTombstoneFromPut) {
CassandraStore store(OpenDb()); CassandraStore store(OpenDb());
int64_t now = time(nullptr); int64_t now = time(nullptr);
store.Put("k1", CreateTestRowValue({ store.Put("k1",
CreateTestColumnSpec(kTombstone, 0, ToMicroSeconds(now - gc_grace_period_in_seconds_ - 1)), CreateTestRowValue({
CreateTestColumnSpec(
kTombstone, 0,
ToMicroSeconds(now - gc_grace_period_in_seconds_ - 1)),
})); }));
ASSERT_OK(store.Flush()); ASSERT_OK(store.Flush());

@ -4,6 +4,7 @@
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
#include <memory> #include <memory>
#include "test_util/testharness.h" #include "test_util/testharness.h"
#include "utilities/cassandra/format.h" #include "utilities/cassandra/format.h"
#include "utilities/cassandra/test_utils.h" #include "utilities/cassandra/test_utils.h"
@ -15,31 +16,25 @@ class RowValueMergeTest : public testing::Test {};
TEST(RowValueMergeTest, Merge) { TEST(RowValueMergeTest, Merge) {
std::vector<RowValue> row_values; std::vector<RowValue> row_values;
row_values.push_back( row_values.push_back(CreateTestRowValue({
CreateTestRowValue({
CreateTestColumnSpec(kTombstone, 0, 5), CreateTestColumnSpec(kTombstone, 0, 5),
CreateTestColumnSpec(kColumn, 1, 8), CreateTestColumnSpec(kColumn, 1, 8),
CreateTestColumnSpec(kExpiringColumn, 2, 5), CreateTestColumnSpec(kExpiringColumn, 2, 5),
}) }));
);
row_values.push_back( row_values.push_back(CreateTestRowValue({
CreateTestRowValue({
CreateTestColumnSpec(kColumn, 0, 2), CreateTestColumnSpec(kColumn, 0, 2),
CreateTestColumnSpec(kExpiringColumn, 1, 5), CreateTestColumnSpec(kExpiringColumn, 1, 5),
CreateTestColumnSpec(kTombstone, 2, 7), CreateTestColumnSpec(kTombstone, 2, 7),
CreateTestColumnSpec(kExpiringColumn, 7, 17), CreateTestColumnSpec(kExpiringColumn, 7, 17),
}) }));
);
row_values.push_back( row_values.push_back(CreateTestRowValue({
CreateTestRowValue({
CreateTestColumnSpec(kExpiringColumn, 0, 6), CreateTestColumnSpec(kExpiringColumn, 0, 6),
CreateTestColumnSpec(kTombstone, 1, 5), CreateTestColumnSpec(kTombstone, 1, 5),
CreateTestColumnSpec(kColumn, 2, 4), CreateTestColumnSpec(kColumn, 2, 4),
CreateTestColumnSpec(kTombstone, 11, 11), CreateTestColumnSpec(kTombstone, 11, 11),
}) }));
);
RowValue merged = RowValue::Merge(std::move(row_values)); RowValue merged = RowValue::Merge(std::move(row_values));
EXPECT_FALSE(merged.IsTombstone()); EXPECT_FALSE(merged.IsTombstone());
@ -55,33 +50,25 @@ TEST(RowValueMergeTest, MergeWithRowTombstone) {
std::vector<RowValue> row_values; std::vector<RowValue> row_values;
// A row tombstone. // A row tombstone.
row_values.push_back( row_values.push_back(CreateRowTombstone(11));
CreateRowTombstone(11)
);
// This row's timestamp is smaller than tombstone. // This row's timestamp is smaller than tombstone.
row_values.push_back( row_values.push_back(CreateTestRowValue({
CreateTestRowValue({
CreateTestColumnSpec(kColumn, 0, 5), CreateTestColumnSpec(kColumn, 0, 5),
CreateTestColumnSpec(kColumn, 1, 6), CreateTestColumnSpec(kColumn, 1, 6),
}) }));
);
// Some of the column's row is smaller, some is larger. // Some of the column's row is smaller, some is larger.
row_values.push_back( row_values.push_back(CreateTestRowValue({
CreateTestRowValue({
CreateTestColumnSpec(kColumn, 2, 10), CreateTestColumnSpec(kColumn, 2, 10),
CreateTestColumnSpec(kColumn, 3, 12), CreateTestColumnSpec(kColumn, 3, 12),
}) }));
);
// All of the column's rows are larger than tombstone. // All of the column's rows are larger than tombstone.
row_values.push_back( row_values.push_back(CreateTestRowValue({
CreateTestRowValue({
CreateTestColumnSpec(kColumn, 4, 13), CreateTestColumnSpec(kColumn, 4, 13),
CreateTestColumnSpec(kColumn, 5, 14), CreateTestColumnSpec(kColumn, 5, 14),
}) }));
);
RowValue merged = RowValue::Merge(std::move(row_values)); RowValue merged = RowValue::Merge(std::move(row_values));
EXPECT_FALSE(merged.IsTombstone()); EXPECT_FALSE(merged.IsTombstone());
@ -92,13 +79,9 @@ TEST(RowValueMergeTest, MergeWithRowTombstone) {
// If the tombstone's timestamp is the latest, then it returns a // If the tombstone's timestamp is the latest, then it returns a
// row tombstone. // row tombstone.
row_values.push_back( row_values.push_back(CreateRowTombstone(15));
CreateRowTombstone(15)
);
row_values.push_back( row_values.push_back(CreateRowTombstone(17));
CreateRowTombstone(17)
);
merged = RowValue::Merge(std::move(row_values)); merged = RowValue::Merge(std::move(row_values));
EXPECT_TRUE(merged.IsTombstone()); EXPECT_TRUE(merged.IsTombstone());

@ -6,45 +6,38 @@
#include "test_util/testharness.h" #include "test_util/testharness.h"
#include "utilities/cassandra/serialize.h" #include "utilities/cassandra/serialize.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
namespace cassandra { namespace cassandra {
TEST(SerializeTest, SerializeI64) { TEST(SerializeTest, SerializeI64) {
std::string dest; std::string dest;
Serialize<int64_t>(0, &dest); Serialize<int64_t>(0, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
std::string( '\x00'}),
{'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'}),
dest); dest);
dest.clear(); dest.clear();
Serialize<int64_t>(1, &dest); Serialize<int64_t>(1, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
std::string( '\x01'}),
{'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x01'}),
dest); dest);
dest.clear(); dest.clear();
Serialize<int64_t>(-1, &dest); Serialize<int64_t>(-1, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff',
std::string( '\xff'}),
{'\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff'}),
dest); dest);
dest.clear(); dest.clear();
Serialize<int64_t>(9223372036854775807, &dest); Serialize<int64_t>(9223372036854775807, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x7f', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff',
std::string( '\xff'}),
{'\x7f', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff'}),
dest); dest);
dest.clear(); dest.clear();
Serialize<int64_t>(-9223372036854775807, &dest); Serialize<int64_t>(-9223372036854775807, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x80', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
std::string( '\x01'}),
{'\x80', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x01'}),
dest); dest);
} }
@ -74,39 +67,23 @@ TEST(SerializeTest, DeserializeI64) {
TEST(SerializeTest, SerializeI32) { TEST(SerializeTest, SerializeI32) {
std::string dest; std::string dest;
Serialize<int32_t>(0, &dest); Serialize<int32_t>(0, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x00', '\x00', '\x00', '\x00'}), dest);
std::string(
{'\x00', '\x00', '\x00', '\x00'}),
dest);
dest.clear(); dest.clear();
Serialize<int32_t>(1, &dest); Serialize<int32_t>(1, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x00', '\x00', '\x00', '\x01'}), dest);
std::string(
{'\x00', '\x00', '\x00', '\x01'}),
dest);
dest.clear(); dest.clear();
Serialize<int32_t>(-1, &dest); Serialize<int32_t>(-1, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\xff', '\xff', '\xff', '\xff'}), dest);
std::string(
{'\xff', '\xff', '\xff', '\xff'}),
dest);
dest.clear(); dest.clear();
Serialize<int32_t>(2147483647, &dest); Serialize<int32_t>(2147483647, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x7f', '\xff', '\xff', '\xff'}), dest);
std::string(
{'\x7f', '\xff', '\xff', '\xff'}),
dest);
dest.clear(); dest.clear();
Serialize<int32_t>(-2147483648LL, &dest); Serialize<int32_t>(-2147483648LL, &dest);
EXPECT_EQ( EXPECT_EQ(std::string({'\x80', '\x00', '\x00', '\x00'}), dest);
std::string(
{'\x80', '\x00', '\x00', '\x00'}),
dest);
} }
TEST(SerializeTest, DeserializeI32) { TEST(SerializeTest, DeserializeI32) {
@ -141,7 +118,6 @@ TEST(SerializeTest, SerializeI8) {
Serialize<int8_t>(1, &dest); Serialize<int8_t>(1, &dest);
EXPECT_EQ(std::string({'\x01'}), dest); EXPECT_EQ(std::string({'\x01'}), dest);
dest.clear(); dest.clear();
Serialize<int8_t>(-1, &dest); Serialize<int8_t>(-1, &dest);
EXPECT_EQ(std::string({'\xff'}), dest); EXPECT_EQ(std::string({'\xff'}), dest);

@ -14,26 +14,18 @@
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
namespace cassandra { namespace cassandra {
namespace { namespace {
const int32_t kDefaultLocalDeletionTime = const int32_t kDefaultLocalDeletionTime = std::numeric_limits<int32_t>::max();
std::numeric_limits<int32_t>::max(); const int64_t kDefaultMarkedForDeleteAt = std::numeric_limits<int64_t>::min();
const int64_t kDefaultMarkedForDeleteAt = } // namespace
std::numeric_limits<int64_t>::min();
}
ColumnBase::ColumnBase(int8_t mask, int8_t index) ColumnBase::ColumnBase(int8_t mask, int8_t index)
: mask_(mask), index_(index) {} : mask_(mask), index_(index) {}
std::size_t ColumnBase::Size() const { std::size_t ColumnBase::Size() const { return sizeof(mask_) + sizeof(index_); }
return sizeof(mask_) + sizeof(index_);
}
int8_t ColumnBase::Mask() const { int8_t ColumnBase::Mask() const { return mask_; }
return mask_;
}
int8_t ColumnBase::Index() const { int8_t ColumnBase::Index() const { return index_; }
return index_;
}
void ColumnBase::Serialize(std::string* dest) const { void ColumnBase::Serialize(std::string* dest) const {
ROCKSDB_NAMESPACE::cassandra::Serialize<int8_t>(mask_, dest); ROCKSDB_NAMESPACE::cassandra::Serialize<int8_t>(mask_, dest);
@ -52,22 +44,18 @@ std::shared_ptr<ColumnBase> ColumnBase::Deserialize(const char* src,
} }
} }
Column::Column( Column::Column(int8_t mask, int8_t index, int64_t timestamp, int32_t value_size,
int8_t mask, const char* value)
int8_t index, : ColumnBase(mask, index),
int64_t timestamp, timestamp_(timestamp),
int32_t value_size, value_size_(value_size),
const char* value value_(value) {}
) : ColumnBase(mask, index), timestamp_(timestamp),
value_size_(value_size), value_(value) {}
int64_t Column::Timestamp() const { int64_t Column::Timestamp() const { return timestamp_; }
return timestamp_;
}
std::size_t Column::Size() const { std::size_t Column::Size() const {
return ColumnBase::Size() + sizeof(timestamp_) + sizeof(value_size_) return ColumnBase::Size() + sizeof(timestamp_) + sizeof(value_size_) +
+ value_size_; value_size_;
} }
void Column::Serialize(std::string* dest) const { void Column::Serialize(std::string* dest) const {
@ -89,19 +77,14 @@ std::shared_ptr<Column> Column::Deserialize(const char *src,
int32_t value_size = int32_t value_size =
ROCKSDB_NAMESPACE::cassandra::Deserialize<int32_t>(src, offset); ROCKSDB_NAMESPACE::cassandra::Deserialize<int32_t>(src, offset);
offset += sizeof(value_size); offset += sizeof(value_size);
return std::make_shared<Column>( return std::make_shared<Column>(mask, index, timestamp, value_size,
mask, index, timestamp, value_size, src + offset); src + offset);
} }
ExpiringColumn::ExpiringColumn( ExpiringColumn::ExpiringColumn(int8_t mask, int8_t index, int64_t timestamp,
int8_t mask, int32_t value_size, const char* value,
int8_t index, int32_t ttl)
int64_t timestamp, : Column(mask, index, timestamp, value_size, value), ttl_(ttl) {}
int32_t value_size,
const char* value,
int32_t ttl
) : Column(mask, index, timestamp, value_size, value),
ttl_(ttl) {}
std::size_t ExpiringColumn::Size() const { std::size_t ExpiringColumn::Size() const {
return Column::Size() + sizeof(ttl_); return Column::Size() + sizeof(ttl_);
@ -112,8 +95,10 @@ void ExpiringColumn::Serialize(std::string* dest) const {
ROCKSDB_NAMESPACE::cassandra::Serialize<int32_t>(ttl_, dest); ROCKSDB_NAMESPACE::cassandra::Serialize<int32_t>(ttl_, dest);
} }
std::chrono::time_point<std::chrono::system_clock> ExpiringColumn::TimePoint() const { std::chrono::time_point<std::chrono::system_clock> ExpiringColumn::TimePoint()
return std::chrono::time_point<std::chrono::system_clock>(std::chrono::microseconds(Timestamp())); const {
return std::chrono::time_point<std::chrono::system_clock>(
std::chrono::microseconds(Timestamp()));
} }
std::chrono::seconds ExpiringColumn::Ttl() const { std::chrono::seconds ExpiringColumn::Ttl() const {
@ -131,15 +116,12 @@ std::shared_ptr<Tombstone> ExpiringColumn::ToTombstone() const {
int64_t marked_for_delete_at = int64_t marked_for_delete_at =
std::chrono::duration_cast<std::chrono::microseconds>(expired_at).count(); std::chrono::duration_cast<std::chrono::microseconds>(expired_at).count();
return std::make_shared<Tombstone>( return std::make_shared<Tombstone>(
static_cast<int8_t>(ColumnTypeMask::DELETION_MASK), static_cast<int8_t>(ColumnTypeMask::DELETION_MASK), Index(),
Index(), local_deletion_time, marked_for_delete_at);
local_deletion_time,
marked_for_delete_at);
} }
std::shared_ptr<ExpiringColumn> ExpiringColumn::Deserialize( std::shared_ptr<ExpiringColumn> ExpiringColumn::Deserialize(
const char *src, const char* src, std::size_t offset) {
std::size_t offset) {
int8_t mask = ROCKSDB_NAMESPACE::cassandra::Deserialize<int8_t>(src, offset); int8_t mask = ROCKSDB_NAMESPACE::cassandra::Deserialize<int8_t>(src, offset);
offset += sizeof(mask); offset += sizeof(mask);
int8_t index = ROCKSDB_NAMESPACE::cassandra::Deserialize<int8_t>(src, offset); int8_t index = ROCKSDB_NAMESPACE::cassandra::Deserialize<int8_t>(src, offset);
@ -153,25 +135,21 @@ std::shared_ptr<ExpiringColumn> ExpiringColumn::Deserialize(
const char* value = src + offset; const char* value = src + offset;
offset += value_size; offset += value_size;
int32_t ttl = ROCKSDB_NAMESPACE::cassandra::Deserialize<int32_t>(src, offset); int32_t ttl = ROCKSDB_NAMESPACE::cassandra::Deserialize<int32_t>(src, offset);
return std::make_shared<ExpiringColumn>( return std::make_shared<ExpiringColumn>(mask, index, timestamp, value_size,
mask, index, timestamp, value_size, value, ttl); value, ttl);
} }
Tombstone::Tombstone( Tombstone::Tombstone(int8_t mask, int8_t index, int32_t local_deletion_time,
int8_t mask, int64_t marked_for_delete_at)
int8_t index, : ColumnBase(mask, index),
int32_t local_deletion_time, local_deletion_time_(local_deletion_time),
int64_t marked_for_delete_at
) : ColumnBase(mask, index), local_deletion_time_(local_deletion_time),
marked_for_delete_at_(marked_for_delete_at) {} marked_for_delete_at_(marked_for_delete_at) {}
int64_t Tombstone::Timestamp() const { int64_t Tombstone::Timestamp() const { return marked_for_delete_at_; }
return marked_for_delete_at_;
}
std::size_t Tombstone::Size() const { std::size_t Tombstone::Size() const {
return ColumnBase::Size() + sizeof(local_deletion_time_) return ColumnBase::Size() + sizeof(local_deletion_time_) +
+ sizeof(marked_for_delete_at_); sizeof(marked_for_delete_at_);
} }
void Tombstone::Serialize(std::string* dest) const { void Tombstone::Serialize(std::string* dest) const {
@ -198,24 +176,25 @@ std::shared_ptr<Tombstone> Tombstone::Deserialize(const char *src,
offset += sizeof(int32_t); offset += sizeof(int32_t);
int64_t marked_for_delete_at = int64_t marked_for_delete_at =
ROCKSDB_NAMESPACE::cassandra::Deserialize<int64_t>(src, offset); ROCKSDB_NAMESPACE::cassandra::Deserialize<int64_t>(src, offset);
return std::make_shared<Tombstone>( return std::make_shared<Tombstone>(mask, index, local_deletion_time,
mask, index, local_deletion_time, marked_for_delete_at); marked_for_delete_at);
} }
RowValue::RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at) RowValue::RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at)
: local_deletion_time_(local_deletion_time), : local_deletion_time_(local_deletion_time),
marked_for_delete_at_(marked_for_delete_at), columns_(), marked_for_delete_at_(marked_for_delete_at),
columns_(),
last_modified_time_(0) {} last_modified_time_(0) {}
RowValue::RowValue(Columns columns, RowValue::RowValue(Columns columns, int64_t last_modified_time)
int64_t last_modified_time)
: local_deletion_time_(kDefaultLocalDeletionTime), : local_deletion_time_(kDefaultLocalDeletionTime),
marked_for_delete_at_(kDefaultMarkedForDeleteAt), marked_for_delete_at_(kDefaultMarkedForDeleteAt),
columns_(std::move(columns)), last_modified_time_(last_modified_time) {} columns_(std::move(columns)),
last_modified_time_(last_modified_time) {}
std::size_t RowValue::Size() const { std::size_t RowValue::Size() const {
std::size_t size = sizeof(local_deletion_time_) std::size_t size =
+ sizeof(marked_for_delete_at_); sizeof(local_deletion_time_) + sizeof(marked_for_delete_at_);
for (const auto& column : columns_) { for (const auto& column : columns_) {
size += column->Size(); size += column->Size();
} }
@ -298,9 +277,7 @@ RowValue RowValue::RemoveTombstones(int32_t gc_grace_period) const {
return RowValue(std::move(new_columns), last_modified_time_); return RowValue(std::move(new_columns), last_modified_time_);
} }
bool RowValue::Empty() const { bool RowValue::Empty() const { return columns_.empty(); }
return columns_.empty();
}
RowValue RowValue::Deserialize(const char* src, std::size_t size) { RowValue RowValue::Deserialize(const char* src, std::size_t size) {
std::size_t offset = 0; std::size_t offset = 0;
@ -386,5 +363,5 @@ RowValue RowValue::Merge(std::vector<RowValue>&& values) {
return RowValue(std::move(columns), last_modified_time); return RowValue(std::move(columns), last_modified_time);
} }
} // namepsace cassandrda } // namespace cassandra
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -58,6 +58,7 @@
#include <chrono> #include <chrono>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "rocksdb/merge_operator.h" #include "rocksdb/merge_operator.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
@ -70,7 +71,6 @@ enum ColumnTypeMask {
EXPIRATION_MASK = 0x02, EXPIRATION_MASK = 0x02,
}; };
class ColumnBase { class ColumnBase {
public: public:
ColumnBase(int8_t mask, int8_t index); ColumnBase(int8_t mask, int8_t index);
@ -91,8 +91,8 @@ private:
class Column : public ColumnBase { class Column : public ColumnBase {
public: public:
Column(int8_t mask, int8_t index, int64_t timestamp, Column(int8_t mask, int8_t index, int64_t timestamp, int32_t value_size,
int32_t value_size, const char* value); const char* value);
virtual int64_t Timestamp() const override; virtual int64_t Timestamp() const override;
virtual std::size_t Size() const override; virtual std::size_t Size() const override;
@ -108,8 +108,8 @@ private:
class Tombstone : public ColumnBase { class Tombstone : public ColumnBase {
public: public:
Tombstone(int8_t mask, int8_t index, Tombstone(int8_t mask, int8_t index, int32_t local_deletion_time,
int32_t local_deletion_time, int64_t marked_for_delete_at); int64_t marked_for_delete_at);
virtual int64_t Timestamp() const override; virtual int64_t Timestamp() const override;
virtual std::size_t Size() const override; virtual std::size_t Size() const override;
@ -149,8 +149,7 @@ public:
// Create a Row Tombstone. // Create a Row Tombstone.
RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at); RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at);
// Create a Row containing columns. // Create a Row containing columns.
RowValue(Columns columns, RowValue(Columns columns, int64_t last_modified_time);
int64_t last_modified_time);
RowValue(const RowValue& /*that*/) = delete; RowValue(const RowValue& /*that*/) = delete;
RowValue(RowValue&& /*that*/) noexcept = default; RowValue(RowValue&& /*that*/) noexcept = default;
RowValue& operator=(const RowValue& /*that*/) = delete; RowValue& operator=(const RowValue& /*that*/) = delete;
@ -180,5 +179,5 @@ public:
int64_t last_modified_time_; int64_t last_modified_time_;
}; };
} // namepsace cassandrda } // namespace cassandra
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -44,9 +44,8 @@ bool CassandraValueMergeOperator::FullMergeV2(
merge_out->new_value.clear(); merge_out->new_value.clear();
std::vector<RowValue> row_values; std::vector<RowValue> row_values;
if (merge_in.existing_value) { if (merge_in.existing_value) {
row_values.push_back( row_values.push_back(RowValue::Deserialize(
RowValue::Deserialize(merge_in.existing_value->data(), merge_in.existing_value->data(), merge_in.existing_value->size()));
merge_in.existing_value->size()));
} }
for (auto& operand : merge_in.operand_list) { for (auto& operand : merge_in.operand_list) {

@ -20,7 +20,7 @@ namespace cassandra {
namespace { namespace {
const int64_t kCharMask = 0xFFLL; const int64_t kCharMask = 0xFFLL;
const int32_t kBitsPerByte = 8; const int32_t kBitsPerByte = 8;
} } // namespace
template <typename T> template <typename T>
void Serialize(T val, std::string* dest); void Serialize(T val, std::string* dest);
@ -37,8 +37,9 @@ inline void Serialize<int8_t>(int8_t t, std::string* dest) {
template <> template <>
inline void Serialize<int32_t>(int32_t t, std::string* dest) { inline void Serialize<int32_t>(int32_t t, std::string* dest) {
for (unsigned long i = 0; i < sizeof(int32_t); i++) { for (unsigned long i = 0; i < sizeof(int32_t); i++) {
dest->append(1, static_cast<char>( dest->append(
(t >> (sizeof(int32_t) - 1 - i) * kBitsPerByte) & kCharMask)); 1, static_cast<char>((t >> (sizeof(int32_t) - 1 - i) * kBitsPerByte) &
kCharMask));
} }
} }
@ -46,8 +47,8 @@ template<>
inline void Serialize<int64_t>(int64_t t, std::string* dest) { inline void Serialize<int64_t>(int64_t t, std::string* dest) {
for (unsigned long i = 0; i < sizeof(int64_t); i++) { for (unsigned long i = 0; i < sizeof(int64_t); i++) {
dest->append( dest->append(
1, static_cast<char>( 1, static_cast<char>((t >> (sizeof(int64_t) - 1 - i) * kBitsPerByte) &
(t >> (sizeof(int64_t) - 1 - i) * kBitsPerByte) & kCharMask)); kCharMask));
} }
} }
@ -76,5 +77,5 @@ inline int64_t Deserialize<int64_t>(const char* src, std::size_t offset) {
return result; return result;
} }
} // namepsace cassandrda } // namespace cassandra
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -14,8 +14,7 @@ const int8_t kColumn = 0;
const int8_t kTombstone = 1; const int8_t kTombstone = 1;
const int8_t kExpiringColumn = 2; const int8_t kExpiringColumn = 2;
std::shared_ptr<ColumnBase> CreateTestColumn(int8_t mask, std::shared_ptr<ColumnBase> CreateTestColumn(int8_t mask, int8_t index,
int8_t index,
int64_t timestamp) { int64_t timestamp) {
if ((mask & ColumnTypeMask::DELETION_MASK) != 0) { if ((mask & ColumnTypeMask::DELETION_MASK) != 0) {
return std::shared_ptr<Tombstone>( return std::shared_ptr<Tombstone>(
@ -61,12 +60,10 @@ void VerifyRowValueColumns(
EXPECT_EQ(expected_index, columns[index_of_vector]->Index()); EXPECT_EQ(expected_index, columns[index_of_vector]->Index());
} }
int64_t ToMicroSeconds(int64_t seconds) { int64_t ToMicroSeconds(int64_t seconds) { return seconds * (int64_t)1000000; }
return seconds * (int64_t)1000000;
}
int32_t ToSeconds(int64_t microseconds) { int32_t ToSeconds(int64_t microseconds) {
return (int32_t)(microseconds / (int64_t)1000000); return (int32_t)(microseconds / (int64_t)1000000);
} }
} } // namespace cassandra
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -5,6 +5,7 @@
#pragma once #pragma once
#include <memory> #include <memory>
#include "test_util/testharness.h" #include "test_util/testharness.h"
#include "utilities/cassandra/format.h" #include "utilities/cassandra/format.h"
#include "utilities/cassandra/serialize.h" #include "utilities/cassandra/serialize.h"
@ -18,9 +19,7 @@ extern const int8_t kColumn;
extern const int8_t kTombstone; extern const int8_t kTombstone;
extern const int8_t kExpiringColumn; extern const int8_t kExpiringColumn;
std::shared_ptr<ColumnBase> CreateTestColumn(int8_t mask, int8_t index,
std::shared_ptr<ColumnBase> CreateTestColumn(int8_t mask,
int8_t index,
int64_t timestamp); int64_t timestamp);
std::tuple<int8_t, int8_t, int64_t> CreateTestColumnSpec(int8_t mask, std::tuple<int8_t, int8_t, int64_t> CreateTestColumnSpec(int8_t mask,
@ -39,5 +38,5 @@ void VerifyRowValueColumns(
int64_t ToMicroSeconds(int64_t seconds); int64_t ToMicroSeconds(int64_t seconds);
int32_t ToSeconds(int64_t microseconds); int32_t ToSeconds(int64_t microseconds);
} } // namespace cassandra
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -6,11 +6,11 @@
#pragma once #pragma once
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "rocksdb/utilities/checkpoint.h"
#include <string> #include <string>
#include "file/filename.h" #include "file/filename.h"
#include "rocksdb/db.h" #include "rocksdb/db.h"
#include "rocksdb/utilities/checkpoint.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {

@ -136,8 +136,7 @@ class CheckpointTest : public testing::Test {
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options)); ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
} }
Status TryReopenWithColumnFamilies( Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const std::vector<std::string>& cfs,
const std::vector<Options>& options) { const std::vector<Options>& options) {
Close(); Close();
EXPECT_EQ(cfs.size(), options.size()); EXPECT_EQ(cfs.size(), options.size());
@ -156,9 +155,7 @@ class CheckpointTest : public testing::Test {
return TryReopenWithColumnFamilies(cfs, v_opts); return TryReopenWithColumnFamilies(cfs, v_opts);
} }
void Reopen(const Options& options) { void Reopen(const Options& options) { ASSERT_OK(TryReopen(options)); }
ASSERT_OK(TryReopen(options));
}
void CompactAll() { void CompactAll() {
for (auto h : handles_) { for (auto h : handles_) {
@ -223,9 +220,7 @@ class CheckpointTest : public testing::Test {
return db_->Put(wo, handles_[cf], k, v); return db_->Put(wo, handles_[cf], k, v);
} }
Status Delete(const std::string& k) { Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
return db_->Delete(WriteOptions(), k);
}
Status Delete(int cf, const std::string& k) { Status Delete(int cf, const std::string& k) {
return db_->Delete(WriteOptions(), handles_[cf], k); return db_->Delete(WriteOptions(), handles_[cf], k);
@ -515,8 +510,8 @@ TEST_F(CheckpointTest, CheckpointCF) {
for (size_t i = 0; i < cfs.size(); ++i) { for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options)); column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
} }
ASSERT_OK(DB::Open(options, snapshot_name_, ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
column_families, &cphandles, &snapshotDB)); &snapshotDB));
ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result)); ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
ASSERT_EQ("Default1", result); ASSERT_EQ("Default1", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result)); ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));

@ -5,10 +5,11 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h"
#include <string> #include <string>
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {

@ -8,6 +8,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "rocksdb/utilities/info_log_finder.h" #include "rocksdb/utilities/info_log_finder.h"
#include "file/filename.h" #include "file/filename.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"

@ -7,6 +7,7 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "rocksdb/utilities/env_mirror.h" #include "rocksdb/utilities/env_mirror.h"
#include "env/mock_env.h" #include "env/mock_env.h"
#include "test_util/testharness.h" #include "test_util/testharness.h"
@ -97,8 +98,9 @@ TEST_F(EnvMirrorTest, Basics) {
ASSERT_TRUE( ASSERT_TRUE(
!env_->NewSequentialFile("/dir/non_existent", &seq_file, soptions_).ok()); !env_->NewSequentialFile("/dir/non_existent", &seq_file, soptions_).ok());
ASSERT_TRUE(!seq_file); ASSERT_TRUE(!seq_file);
ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file, ASSERT_TRUE(
soptions_).ok()); !env_->NewRandomAccessFile("/dir/non_existent", &rand_file, soptions_)
.ok());
ASSERT_TRUE(!rand_file); ASSERT_TRUE(!rand_file);
// Check that deleting works. // Check that deleting works.

@ -11,8 +11,7 @@
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
class TimedEnvTest : public testing::Test { class TimedEnvTest : public testing::Test {};
};
TEST_F(TimedEnvTest, BasicTest) { TEST_F(TimedEnvTest, BasicTest) {
SetPerfLevel(PerfLevel::kEnableTime); SetPerfLevel(PerfLevel::kEnableTime);

@ -85,8 +85,7 @@ class TestWritableFile : public WritableFile {
virtual Status Flush() override; virtual Status Flush() override;
virtual Status Sync() override; virtual Status Sync() override;
virtual bool IsSyncThreadSafe() const override { return true; } virtual bool IsSyncThreadSafe() const override { return true; }
virtual Status PositionedAppend(const Slice& data, virtual Status PositionedAppend(const Slice& data, uint64_t offset) override {
uint64_t offset) override {
return target_->PositionedAppend(data, offset); return target_->PositionedAppend(data, offset);
} }
virtual Status PositionedAppend( virtual Status PositionedAppend(
@ -227,8 +226,8 @@ class FaultInjectionTestEnv : public EnvWrapper {
MutexLock l(&mutex_); MutexLock l(&mutex_);
return filesystem_active_; return filesystem_active_;
} }
void SetFilesystemActiveNoLock(bool active, void SetFilesystemActiveNoLock(
Status error = Status::Corruption("Not active")) { bool active, Status error = Status::Corruption("Not active")) {
error.PermitUncheckedError(); error.PermitUncheckedError();
filesystem_active_ = active; filesystem_active_ = active;
if (!active) { if (!active) {

@ -386,8 +386,8 @@ IOStatus TestFSRandomRWFile::Sync(const IOOptions& options,
return target_->Sync(options, dbg); return target_->Sync(options, dbg);
} }
TestFSRandomAccessFile::TestFSRandomAccessFile(const std::string& /*fname*/, TestFSRandomAccessFile::TestFSRandomAccessFile(
std::unique_ptr<FSRandomAccessFile>&& f, const std::string& /*fname*/, std::unique_ptr<FSRandomAccessFile>&& f,
FaultInjectionTestFS* fs) FaultInjectionTestFS* fs)
: target_(std::move(f)), fs_(fs) { : target_(std::move(f)), fs_(fs) {
assert(target_ != nullptr); assert(target_ != nullptr);
@ -912,8 +912,7 @@ IOStatus FaultInjectionTestFS::InjectThreadSpecificReadError(
bool dummy_bool; bool dummy_bool;
bool& ret_fault_injected = fault_injected ? *fault_injected : dummy_bool; bool& ret_fault_injected = fault_injected ? *fault_injected : dummy_bool;
ret_fault_injected = false; ret_fault_injected = false;
ErrorContext* ctx = ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
static_cast<ErrorContext*>(thread_local_error_->Get());
if (ctx == nullptr || !ctx->enable_error_injection || !ctx->one_in) { if (ctx == nullptr || !ctx->enable_error_injection || !ctx->one_in) {
return IOStatus::OK(); return IOStatus::OK();
} }
@ -1019,8 +1018,7 @@ IOStatus FaultInjectionTestFS::InjectMetadataWriteError() {
void FaultInjectionTestFS::PrintFaultBacktrace() { void FaultInjectionTestFS::PrintFaultBacktrace() {
#if defined(OS_LINUX) #if defined(OS_LINUX)
ErrorContext* ctx = ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
static_cast<ErrorContext*>(thread_local_error_->Get());
if (ctx == nullptr) { if (ctx == nullptr) {
return; return;
} }

@ -331,8 +331,7 @@ class FaultInjectionTestFS : public FileSystemWrapper {
error.PermitUncheckedError(); error.PermitUncheckedError();
SetFilesystemActiveNoLock(active, error); SetFilesystemActiveNoLock(active, error);
} }
void SetFilesystemDirectWritable( void SetFilesystemDirectWritable(bool writable) {
bool writable) {
MutexLock l(&mutex_); MutexLock l(&mutex_);
filesystem_writable_ = writable; filesystem_writable_ = writable;
} }
@ -466,8 +465,7 @@ class FaultInjectionTestFS : public FileSystemWrapper {
// Get the count of how many times we injected since the previous call // Get the count of how many times we injected since the previous call
int GetAndResetErrorCount() { int GetAndResetErrorCount() {
ErrorContext* ctx = ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
static_cast<ErrorContext*>(thread_local_error_->Get());
int count = 0; int count = 0;
if (ctx != nullptr) { if (ctx != nullptr) {
count = ctx->count; count = ctx->count;
@ -477,8 +475,7 @@ class FaultInjectionTestFS : public FileSystemWrapper {
} }
void EnableErrorInjection() { void EnableErrorInjection() {
ErrorContext* ctx = ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
static_cast<ErrorContext*>(thread_local_error_->Get());
if (ctx) { if (ctx) {
ctx->enable_error_injection = true; ctx->enable_error_injection = true;
} }
@ -499,8 +496,7 @@ class FaultInjectionTestFS : public FileSystemWrapper {
} }
void DisableErrorInjection() { void DisableErrorInjection() {
ErrorContext* ctx = ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
static_cast<ErrorContext*>(thread_local_error_->Get());
if (ctx) { if (ctx) {
ctx->enable_error_injection = false; ctx->enable_error_injection = false;
} }

@ -8,6 +8,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "rocksdb/utilities/leveldb_options.h" #include "rocksdb/utilities/leveldb_options.h"
#include "rocksdb/cache.h" #include "rocksdb/cache.h"
#include "rocksdb/comparator.h" #include "rocksdb/comparator.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"

@ -4,13 +4,13 @@
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
// //
#pragma once #pragma once
#include "rocksdb/merge_operator.h"
#include <stdio.h> #include <stdio.h>
#include <memory> #include <memory>
#include <string> #include <string>
#include "rocksdb/merge_operator.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
class MergeOperators { class MergeOperators {
@ -19,7 +19,8 @@ class MergeOperators {
static std::shared_ptr<MergeOperator> CreateDeprecatedPutOperator(); static std::shared_ptr<MergeOperator> CreateDeprecatedPutOperator();
static std::shared_ptr<MergeOperator> CreateUInt64AddOperator(); static std::shared_ptr<MergeOperator> CreateUInt64AddOperator();
static std::shared_ptr<MergeOperator> CreateStringAppendOperator(); static std::shared_ptr<MergeOperator> CreateStringAppendOperator();
static std::shared_ptr<MergeOperator> CreateStringAppendOperator(char delim_char); static std::shared_ptr<MergeOperator> CreateStringAppendOperator(
char delim_char);
static std::shared_ptr<MergeOperator> CreateStringAppendOperator( static std::shared_ptr<MergeOperator> CreateStringAppendOperator(
const std::string& delim); const std::string& delim);
static std::shared_ptr<MergeOperator> CreateStringAppendTESTOperator(); static std::shared_ptr<MergeOperator> CreateStringAppendTESTOperator();

@ -3,28 +3,26 @@
// COPYING file in the root directory) and Apache 2.0 License // COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
#include "utilities/merge_operators/bytesxor.h"
#include <algorithm> #include <algorithm>
#include <string> #include <string>
#include "utilities/merge_operators/bytesxor.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
std::shared_ptr<MergeOperator> MergeOperators::CreateBytesXOROperator() { std::shared_ptr<MergeOperator> MergeOperators::CreateBytesXOROperator() {
return std::make_shared<BytesXOROperator>(); return std::make_shared<BytesXOROperator>();
} }
bool BytesXOROperator::Merge(const Slice& /*key*/, bool BytesXOROperator::Merge(const Slice& /*key*/, const Slice* existing_value,
const Slice* existing_value, const Slice& value, std::string* new_value,
const Slice& value,
std::string* new_value,
Logger* /*logger*/) const { Logger* /*logger*/) const {
XOR(existing_value, value, new_value); XOR(existing_value, value, new_value);
return true; return true;
} }
void BytesXOROperator::XOR(const Slice* existing_value, void BytesXOROperator::XOR(const Slice* existing_value, const Slice& value,
const Slice& value, std::string* new_value) const { std::string* new_value) const {
if (!existing_value) { if (!existing_value) {
new_value->clear(); new_value->clear();
new_value->assign(value.data(), value.size()); new_value->assign(value.data(), value.size());

@ -8,6 +8,7 @@
#include <algorithm> #include <algorithm>
#include <memory> #include <memory>
#include <string> #include <string>
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "rocksdb/merge_operator.h" #include "rocksdb/merge_operator.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
@ -22,10 +23,8 @@ class BytesXOROperator : public AssociativeMergeOperator {
public: public:
// XORs the two array of bytes one byte at a time and stores the result // XORs the two array of bytes one byte at a time and stores the result
// in new_value. len is the number of xored bytes, and the length of new_value // in new_value. len is the number of xored bytes, and the length of new_value
virtual bool Merge(const Slice& key, virtual bool Merge(const Slice& key, const Slice* existing_value,
const Slice* existing_value, const Slice& value, std::string* new_value,
const Slice& value,
std::string* new_value,
Logger* logger) const override; Logger* logger) const override;
static const char* kClassName() { return "BytesXOR"; } static const char* kClassName() { return "BytesXOR"; }

@ -4,8 +4,9 @@
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
#include <memory> #include <memory>
#include "rocksdb/slice.h"
#include "rocksdb/merge_operator.h" #include "rocksdb/merge_operator.h"
#include "rocksdb/slice.h"
#include "utilities/merge_operators.h" #include "utilities/merge_operators.h"
namespace { // anonymous namespace namespace { // anonymous namespace

@ -3,11 +3,11 @@
// COPYING file in the root directory) and Apache 2.0 License // COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
#include "utilities/merge_operators/sortlist.h" #include "utilities/merge_operators/sortlist.h"
#include "rocksdb/merge_operator.h" #include "rocksdb/merge_operator.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "utilities/merge_operators.h" #include "utilities/merge_operators.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
bool SortList::FullMergeV2(const MergeOperationInput& merge_in, bool SortList::FullMergeV2(const MergeOperationInput& merge_in,

@ -61,12 +61,12 @@ bool StringAppendOperator::Merge(const Slice& /*key*/,
return true; return true;
} }
std::shared_ptr<MergeOperator> MergeOperators::CreateStringAppendOperator() { std::shared_ptr<MergeOperator> MergeOperators::CreateStringAppendOperator() {
return std::make_shared<StringAppendOperator>(','); return std::make_shared<StringAppendOperator>(',');
} }
std::shared_ptr<MergeOperator> MergeOperators::CreateStringAppendOperator(char delim_char) { std::shared_ptr<MergeOperator> MergeOperators::CreateStringAppendOperator(
char delim_char) {
return std::make_shared<StringAppendOperator>(delim_char); return std::make_shared<StringAppendOperator>(delim_char);
} }

@ -16,10 +16,8 @@ class StringAppendOperator : public AssociativeMergeOperator {
explicit StringAppendOperator(char delim_char); explicit StringAppendOperator(char delim_char);
explicit StringAppendOperator(const std::string& delim); explicit StringAppendOperator(const std::string& delim);
virtual bool Merge(const Slice& key, virtual bool Merge(const Slice& key, const Slice* existing_value,
const Slice* existing_value, const Slice& value, std::string* new_value,
const Slice& value,
std::string* new_value,
Logger* logger) const override; Logger* logger) const override;
static const char* kClassName() { return "StringAppendOperator"; } static const char* kClassName() { return "StringAppendOperator"; }

@ -31,8 +31,8 @@ class StringAppendTESTOperator : public MergeOperator {
virtual bool PartialMergeMulti(const Slice& key, virtual bool PartialMergeMulti(const Slice& key,
const std::deque<Slice>& operand_list, const std::deque<Slice>& operand_list,
std::string* new_value, Logger* logger) const std::string* new_value,
override; Logger* logger) const override;
static const char* kClassName() { return "StringAppendTESTOperator"; } static const char* kClassName() { return "StringAppendTESTOperator"; }
static const char* kNickName() { return "stringappendtest"; } static const char* kNickName() { return "stringappendtest"; }

@ -27,7 +27,6 @@
#include "utilities/merge_operators.h" #include "utilities/merge_operators.h"
#include "utilities/merge_operators/string_append/stringappend2.h" #include "utilities/merge_operators/string_append/stringappend2.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
// Path to the database on file system // Path to the database on file system
@ -73,13 +72,10 @@ std::shared_ptr<DB> OpenTtlDb(const std::string& delim) {
/// Supports Append(list, string) and Get(list) /// Supports Append(list, string) and Get(list)
class StringLists { class StringLists {
public: public:
// Constructor: specifies the rocksdb db // Constructor: specifies the rocksdb db
/* implicit */ /* implicit */
StringLists(std::shared_ptr<DB> db) StringLists(std::shared_ptr<DB> db)
: db_(db), : db_(db), merge_option_(), get_option_() {
merge_option_(),
get_option_() {
assert(db); assert(db);
} }
@ -118,15 +114,12 @@ class StringLists {
return false; return false;
} }
private: private:
std::shared_ptr<DB> db_; std::shared_ptr<DB> db_;
WriteOptions merge_option_; WriteOptions merge_option_;
ReadOptions get_option_; ReadOptions get_option_;
}; };
// The class for unit-testing // The class for unit-testing
class StringAppendOperatorTest : public testing::Test, class StringAppendOperatorTest : public testing::Test,
public ::testing::WithParamInterface<bool> { public ::testing::WithParamInterface<bool> {
@ -153,14 +146,13 @@ class StringAppendOperatorTest : public testing::Test,
// Allows user to open databases with different configurations. // Allows user to open databases with different configurations.
// e.g.: Can open a DB or a TtlDB, etc. // e.g.: Can open a DB or a TtlDB, etc.
static void SetOpenDbFunction(OpenFuncPtr func) { static void SetOpenDbFunction(OpenFuncPtr func) { OpenDb = func; }
OpenDb = func;
}
protected: protected:
static OpenFuncPtr OpenDb; static OpenFuncPtr OpenDb;
}; };
StringAppendOperatorTest::OpenFuncPtr StringAppendOperatorTest::OpenDb = nullptr; StringAppendOperatorTest::OpenFuncPtr StringAppendOperatorTest::OpenDb =
nullptr;
// THE TEST CASES BEGIN HERE // THE TEST CASES BEGIN HERE
@ -206,7 +198,6 @@ TEST_P(StringAppendOperatorTest, IteratorTest) {
} }
} }
// Should release the snapshot and be aware of the new stuff now // Should release the snapshot and be aware of the new stuff now
it.reset(db_->NewIterator(ReadOptions())); it.reset(db_->NewIterator(ReadOptions()));
first = true; first = true;
@ -367,12 +358,13 @@ TEST_P(StringAppendOperatorTest, RandomMixGetAppend) {
// Generate a list of random keys and values // Generate a list of random keys and values
const int kWordCount = 15; const int kWordCount = 15;
std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf", "342839", std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf",
"dsuha", "mabuais", "sadajsid", "jf9834hf", "2d9j89", "342839", "dsuha", "mabuais", "sadajsid",
"dj9823jd", "a", "dk02ed2dh", "$(jd4h984$(*", "mabz"}; "jf9834hf", "2d9j89", "dj9823jd", "a",
"dk02ed2dh", "$(jd4h984$(*", "mabz"};
const int kKeyCount = 6; const int kKeyCount = 6;
std::string keys[] = {"dhaiusdhu", "denidw", "daisda", "keykey", "muki", std::string keys[] = {"dhaiusdhu", "denidw", "daisda",
"shzassdianmd"}; "keykey", "muki", "shzassdianmd"};
// Will store a local copy of all data in order to verify correctness // Will store a local copy of all data in order to verify correctness
std::map<std::string, std::string> parallel_copy; std::map<std::string, std::string> parallel_copy;
@ -390,7 +382,6 @@ TEST_P(StringAppendOperatorTest, RandomMixGetAppend) {
// Apply the query and any checks. // Apply the query and any checks.
if (query == APPEND_OP) { if (query == APPEND_OP) {
// Apply the rocksdb test-harness Append defined above // Apply the rocksdb test-harness Append defined above
slists.Append(key, word); // apply the rocksdb append slists.Append(key, word); // apply the rocksdb append
@ -407,7 +398,6 @@ TEST_P(StringAppendOperatorTest, RandomMixGetAppend) {
slists.Get(key, &res); slists.Get(key, &res);
ASSERT_EQ(res, parallel_copy[key]); ASSERT_EQ(res, parallel_copy[key]);
} }
} }
} }
@ -417,12 +407,13 @@ TEST_P(StringAppendOperatorTest, BIGRandomMixGetAppend) {
// Generate a list of random keys and values // Generate a list of random keys and values
const int kWordCount = 15; const int kWordCount = 15;
std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf", "342839", std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf",
"dsuha", "mabuais", "sadajsid", "jf9834hf", "2d9j89", "342839", "dsuha", "mabuais", "sadajsid",
"dj9823jd", "a", "dk02ed2dh", "$(jd4h984$(*", "mabz"}; "jf9834hf", "2d9j89", "dj9823jd", "a",
"dk02ed2dh", "$(jd4h984$(*", "mabz"};
const int kKeyCount = 6; const int kKeyCount = 6;
std::string keys[] = {"dhaiusdhu", "denidw", "daisda", "keykey", "muki", std::string keys[] = {"dhaiusdhu", "denidw", "daisda",
"shzassdianmd"}; "keykey", "muki", "shzassdianmd"};
// Will store a local copy of all data in order to verify correctness // Will store a local copy of all data in order to verify correctness
std::map<std::string, std::string> parallel_copy; std::map<std::string, std::string> parallel_copy;
@ -440,7 +431,6 @@ TEST_P(StringAppendOperatorTest, BIGRandomMixGetAppend) {
// Apply the query and any checks. // Apply the query and any checks.
if (query == APPEND_OP) { if (query == APPEND_OP) {
// Apply the rocksdb test-harness Append defined above // Apply the rocksdb test-harness Append defined above
slists.Append(key, word); // apply the rocksdb append slists.Append(key, word); // apply the rocksdb append
@ -457,7 +447,6 @@ TEST_P(StringAppendOperatorTest, BIGRandomMixGetAppend) {
slists.Get(key, &res); slists.Get(key, &res);
ASSERT_EQ(res, parallel_copy[key]); ASSERT_EQ(res, parallel_copy[key]);
} }
} }
} }

@ -57,8 +57,8 @@ Status LoadOptionsFromFile(const ConfigOptions& config_options,
return Status::OK(); return Status::OK();
} }
Status GetLatestOptionsFileName(const std::string& dbpath, Status GetLatestOptionsFileName(const std::string& dbpath, Env* env,
Env* env, std::string* options_file_name) { std::string* options_file_name) {
Status s; Status s;
std::string latest_file_name; std::string latest_file_name;
uint64_t latest_time_stamp = 0; uint64_t latest_time_stamp = 0;

@ -148,8 +148,7 @@ PersistentCache::StatsType BlockCacheTier::Stats() {
stats_.bytes_read_.Average()); stats_.bytes_read_.Average());
Add(&stats, "persistentcache.blockcachetier.insert_dropped", Add(&stats, "persistentcache.blockcachetier.insert_dropped",
stats_.insert_dropped_); stats_.insert_dropped_);
Add(&stats, "persistentcache.blockcachetier.cache_hits", Add(&stats, "persistentcache.blockcachetier.cache_hits", stats_.cache_hits_);
stats_.cache_hits_);
Add(&stats, "persistentcache.blockcachetier.cache_misses", Add(&stats, "persistentcache.blockcachetier.cache_misses",
stats_.cache_misses_); stats_.cache_misses_);
Add(&stats, "persistentcache.blockcachetier.cache_errors", Add(&stats, "persistentcache.blockcachetier.cache_errors",
@ -326,9 +325,8 @@ Status BlockCacheTier::NewCacheFile() {
TEST_SYNC_POINT_CALLBACK("BlockCacheTier::NewCacheFile:DeleteDir", TEST_SYNC_POINT_CALLBACK("BlockCacheTier::NewCacheFile:DeleteDir",
(void*)(GetCachePath().c_str())); (void*)(GetCachePath().c_str()));
std::unique_ptr<WriteableCacheFile> f( std::unique_ptr<WriteableCacheFile> f(new WriteableCacheFile(
new WriteableCacheFile(opt_.env, &buffer_allocator_, &writer_, opt_.env, &buffer_allocator_, &writer_, GetCachePath(), writer_cache_id_,
GetCachePath(), writer_cache_id_,
opt_.cache_file_size, opt_.log)); opt_.cache_file_size, opt_.log));
bool status = f->Create(opt_.enable_direct_writes, opt_.enable_direct_reads); bool status = f->Create(opt_.enable_direct_writes, opt_.enable_direct_reads);

@ -45,7 +45,8 @@ class BlockCacheTier : public PersistentCacheTier {
: opt_(opt), : opt_(opt),
insert_ops_(static_cast<size_t>(opt_.max_write_pipeline_backlog_size)), insert_ops_(static_cast<size_t>(opt_.max_write_pipeline_backlog_size)),
buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()), buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()),
writer_(this, opt_.writer_qdepth, static_cast<size_t>(opt_.writer_dispatch_size)) { writer_(this, opt_.writer_qdepth,
static_cast<size_t>(opt_.writer_dispatch_size)) {
Info(opt_.log, "Initializing allocator. size=%d B count=%" ROCKSDB_PRIszt, Info(opt_.log, "Initializing allocator. size=%d B count=%" ROCKSDB_PRIszt,
opt_.write_buffer_size, opt_.write_buffer_count()); opt_.write_buffer_size, opt_.write_buffer_count());
} }

@ -68,8 +68,7 @@ Status BlockCacheFile::Delete(uint64_t* size) {
// <-- 4 --><-- 4 --><-- 4 --><-- 4 --><-- key size --><-- v-size --> // <-- 4 --><-- 4 --><-- 4 --><-- 4 --><-- key size --><-- v-size -->
// //
struct CacheRecordHeader { struct CacheRecordHeader {
CacheRecordHeader() CacheRecordHeader() : magic_(0), crc_(0), key_size_(0), val_size_(0) {}
: magic_(0), crc_(0), key_size_(0), val_size_(0) {}
CacheRecordHeader(const uint32_t magic, const uint32_t key_size, CacheRecordHeader(const uint32_t magic, const uint32_t key_size,
const uint32_t val_size) const uint32_t val_size)
: magic_(magic), crc_(0), key_size_(key_size), val_size_(val_size) {} : magic_(magic), crc_(0), key_size_(key_size), val_size_(val_size) {}

@ -12,19 +12,16 @@
#include <vector> #include <vector>
#include "file/random_access_file_reader.h" #include "file/random_access_file_reader.h"
#include "port/port.h"
#include "rocksdb/comparator.h" #include "rocksdb/comparator.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "util/crc32c.h"
#include "util/mutexlock.h"
#include "utilities/persistent_cache/block_cache_tier_file_buffer.h" #include "utilities/persistent_cache/block_cache_tier_file_buffer.h"
#include "utilities/persistent_cache/lrulist.h" #include "utilities/persistent_cache/lrulist.h"
#include "utilities/persistent_cache/persistent_cache_tier.h" #include "utilities/persistent_cache/persistent_cache_tier.h"
#include "utilities/persistent_cache/persistent_cache_util.h" #include "utilities/persistent_cache/persistent_cache_util.h"
#include "port/port.h"
#include "util/crc32c.h"
#include "util/mutexlock.h"
// The io code path of persistent cache uses pipelined architecture // The io code path of persistent cache uses pipelined architecture
// //
// client -> In Queue <-- BlockCacheTier --> Out Queue <-- Writer <--> Kernel // client -> In Queue <-- BlockCacheTier --> Out Queue <-- Writer <--> Kernel

@ -8,8 +8,8 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include "rocksdb/comparator.h"
#include "memory/arena.h" #include "memory/arena.h"
#include "rocksdb/comparator.h"
#include "util/mutexlock.h" #include "util/mutexlock.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {

@ -11,7 +11,6 @@
#include <unordered_map> #include <unordered_map>
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "utilities/persistent_cache/block_cache_tier_file.h" #include "utilities/persistent_cache/block_cache_tier_file.h"
#include "utilities/persistent_cache/hash_table.h" #include "utilities/persistent_cache/hash_table.h"
#include "utilities/persistent_cache/hash_table_evictable.h" #include "utilities/persistent_cache/hash_table_evictable.h"

@ -8,6 +8,7 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include <assert.h> #include <assert.h>
#include <list> #include <list>
#include <vector> #include <vector>

@ -3,7 +3,10 @@
// COPYING file in the root directory) and Apache 2.0 License // COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
// //
#include "utilities/persistent_cache/hash_table.h"
#include <stdlib.h> #include <stdlib.h>
#include <iostream> #include <iostream>
#include <set> #include <set>
#include <string> #include <string>
@ -12,7 +15,6 @@
#include "memory/arena.h" #include "memory/arena.h"
#include "test_util/testharness.h" #include "test_util/testharness.h"
#include "util/random.h" #include "util/random.h"
#include "utilities/persistent_cache/hash_table.h"
#include "utilities/persistent_cache/hash_table_evictable.h" #include "utilities/persistent_cache/hash_table_evictable.h"
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE

@ -84,7 +84,8 @@ std::unique_ptr<PersistentCacheTier> NewBlockCache(
Env* env, const std::string& path, Env* env, const std::string& path,
const uint64_t max_size = std::numeric_limits<uint64_t>::max(), const uint64_t max_size = std::numeric_limits<uint64_t>::max(),
const bool enable_direct_writes = false) { const bool enable_direct_writes = false) {
const uint32_t max_file_size = static_cast<uint32_t>(12 * 1024 * 1024 * kStressFactor); const uint32_t max_file_size =
static_cast<uint32_t>(12 * 1024 * 1024 * kStressFactor);
auto log = std::make_shared<ConsoleLogger>(); auto log = std::make_shared<ConsoleLogger>();
PersistentCacheConfig opt(env, path, max_size, log); PersistentCacheConfig opt(env, path, max_size, log);
opt.cache_file_size = max_file_size; opt.cache_file_size = max_file_size;
@ -101,7 +102,8 @@ std::unique_ptr<PersistentTieredCache> NewTieredCache(
Env* env, const std::string& path, const uint64_t max_volatile_cache_size, Env* env, const std::string& path, const uint64_t max_volatile_cache_size,
const uint64_t max_block_cache_size = const uint64_t max_block_cache_size =
std::numeric_limits<uint64_t>::max()) { std::numeric_limits<uint64_t>::max()) {
const uint32_t max_file_size = static_cast<uint32_t>(12 * 1024 * 1024 * kStressFactor); const uint32_t max_file_size =
static_cast<uint32_t>(12 * 1024 * 1024 * kStressFactor);
auto log = std::make_shared<ConsoleLogger>(); auto log = std::make_shared<ConsoleLogger>();
auto opt = PersistentCacheConfig(env, path, max_block_cache_size, log); auto opt = PersistentCacheConfig(env, path, max_block_cache_size, log);
opt.cache_file_size = max_file_size; opt.cache_file_size = max_file_size;
@ -171,7 +173,8 @@ TEST_F(PersistentCacheTierTest, DISABLED_VolatileCacheInsertWithEviction) {
for (auto nthreads : {1, 5}) { for (auto nthreads : {1, 5}) {
for (auto max_keys : {1 * 1024 * 1024 * kStressFactor}) { for (auto max_keys : {1 * 1024 * 1024 * kStressFactor}) {
cache_ = std::make_shared<VolatileCacheTier>( cache_ = std::make_shared<VolatileCacheTier>(
/*compressed=*/true, /*size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor)); /*compressed=*/true,
/*size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor));
RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys)); RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys));
} }
} }
@ -197,7 +200,8 @@ TEST_F(PersistentCacheTierTest, DISABLED_BlockCacheInsert) {
TEST_F(PersistentCacheTierTest, DISABLED_BlockCacheInsertWithEviction) { TEST_F(PersistentCacheTierTest, DISABLED_BlockCacheInsertWithEviction) {
for (auto nthreads : {1, 5}) { for (auto nthreads : {1, 5}) {
for (auto max_keys : {1 * 1024 * 1024 * kStressFactor}) { for (auto max_keys : {1 * 1024 * 1024 * kStressFactor}) {
cache_ = NewBlockCache(Env::Default(), path_, cache_ = NewBlockCache(
Env::Default(), path_,
/*max_size=*/static_cast<size_t>(200 * 1024 * 1024 * kStressFactor)); /*max_size=*/static_cast<size_t>(200 * 1024 * 1024 * kStressFactor));
RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys)); RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys));
} }
@ -210,7 +214,8 @@ TEST_F(PersistentCacheTierTest, DISABLED_TieredCacheInsert) {
for (auto nthreads : {1, 5}) { for (auto nthreads : {1, 5}) {
for (auto max_keys : for (auto max_keys :
{10 * 1024 * kStressFactor, 1 * 1024 * 1024 * kStressFactor}) { {10 * 1024 * kStressFactor, 1 * 1024 * 1024 * kStressFactor}) {
cache_ = NewTieredCache(Env::Default(), path_, cache_ = NewTieredCache(
Env::Default(), path_,
/*memory_size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor)); /*memory_size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor));
RunInsertTest(nthreads, static_cast<size_t>(max_keys)); RunInsertTest(nthreads, static_cast<size_t>(max_keys));
} }
@ -226,7 +231,8 @@ TEST_F(PersistentCacheTierTest, DISABLED_TieredCacheInsertWithEviction) {
cache_ = NewTieredCache( cache_ = NewTieredCache(
Env::Default(), path_, Env::Default(), path_,
/*memory_size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor), /*memory_size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor),
/*block_cache_size*/ static_cast<size_t>(200 * 1024 * 1024 * kStressFactor)); /*block_cache_size*/
static_cast<size_t>(200 * 1024 * 1024 * kStressFactor));
RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys)); RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys));
} }
} }
@ -291,7 +297,6 @@ PersistentCacheDBTest::PersistentCacheDBTest()
void PersistentCacheDBTest::RunTest( void PersistentCacheDBTest::RunTest(
const std::function<std::shared_ptr<PersistentCacheTier>(bool)>& new_pcache, const std::function<std::shared_ptr<PersistentCacheTier>(bool)>& new_pcache,
const size_t max_keys = 100 * 1024, const size_t max_usecase = 5) { const size_t max_keys = 100 * 1024, const size_t max_usecase = 5) {
// number of insertion interations // number of insertion interations
int num_iter = static_cast<int>(max_keys * kStressFactor); int num_iter = static_cast<int>(max_keys * kStressFactor);

@ -41,9 +41,7 @@ class CompactOnDeletionCollector : public TablePropertiesCollector {
} }
// EXPERIMENTAL Return whether the output file should be further compacted // EXPERIMENTAL Return whether the output file should be further compacted
virtual bool NeedCompact() const override { virtual bool NeedCompact() const override { return need_compaction_; }
return need_compaction_;
}
static const int kNumBuckets = 128; static const int kNumBuckets = 128;

@ -80,10 +80,10 @@ TEST(CompactOnDeletionCollector, DeletionRatio) {
} }
TEST(CompactOnDeletionCollector, SlidingWindow) { TEST(CompactOnDeletionCollector, SlidingWindow) {
const int kWindowSizes[] = const int kWindowSizes[] = {1000, 10000, 10000, 127, 128, 129,
{1000, 10000, 10000, 127, 128, 129, 255, 256, 257, 2, 10000}; 255, 256, 257, 2, 10000};
const int kDeletionTriggers[] = const int kDeletionTriggers[] = {500, 9500, 4323, 47, 61, 128,
{500, 9500, 4323, 47, 61, 128, 250, 250, 250, 2, 2}; 250, 250, 250, 2, 2};
TablePropertiesCollectorFactory::Context context; TablePropertiesCollectorFactory::Context context;
context.column_family_id = context.column_family_id =
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily; TablePropertiesCollectorFactory::Context::kUnknownColumnFamily;
@ -134,13 +134,13 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0)); collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0));
} }
} }
if (collector->NeedCompact() != if (collector->NeedCompact() != (deletions >= kNumDeletionTrigger) &&
(deletions >= kNumDeletionTrigger) &&
std::abs(deletions - kNumDeletionTrigger) > kBias) { std::abs(deletions - kNumDeletionTrigger) > kBias) {
fprintf(stderr, "[Error] collector->NeedCompact() != (%d >= %d)" fprintf(stderr,
"[Error] collector->NeedCompact() != (%d >= %d)"
" with kWindowSize = %d and kNumDeletionTrigger = %d\n", " with kWindowSize = %d and kNumDeletionTrigger = %d\n",
deletions, kNumDeletionTrigger, deletions, kNumDeletionTrigger, kWindowSize,
kWindowSize, kNumDeletionTrigger); kNumDeletionTrigger);
ASSERT_TRUE(false); ASSERT_TRUE(false);
} }
ASSERT_OK(collector->Finish(nullptr)); ASSERT_OK(collector->Finish(nullptr));
@ -182,11 +182,11 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
} }
if (collector->NeedCompact() != (deletions >= kNumDeletionTrigger) && if (collector->NeedCompact() != (deletions >= kNumDeletionTrigger) &&
std::abs(deletions - kNumDeletionTrigger) > kBias) { std::abs(deletions - kNumDeletionTrigger) > kBias) {
fprintf(stderr, "[Error] collector->NeedCompact() %d != (%d >= %d)" fprintf(stderr,
"[Error] collector->NeedCompact() %d != (%d >= %d)"
" with kWindowSize = %d, kNumDeletionTrigger = %d\n", " with kWindowSize = %d, kNumDeletionTrigger = %d\n",
collector->NeedCompact(), collector->NeedCompact(), deletions, kNumDeletionTrigger,
deletions, kNumDeletionTrigger, kWindowSize, kWindowSize, kNumDeletionTrigger);
kNumDeletionTrigger);
ASSERT_TRUE(false); ASSERT_TRUE(false);
} }
ASSERT_OK(collector->Finish(nullptr)); ASSERT_OK(collector->Finish(nullptr));
@ -218,7 +218,8 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
} }
if (collector->NeedCompact() && if (collector->NeedCompact() &&
std::abs(kDeletionsPerSection - kNumDeletionTrigger) > kBias) { std::abs(kDeletionsPerSection - kNumDeletionTrigger) > kBias) {
fprintf(stderr, "[Error] collector->NeedCompact() != false" fprintf(stderr,
"[Error] collector->NeedCompact() != false"
" with kWindowSize = %d and kNumDeletionTrigger = %d\n", " with kWindowSize = %d and kNumDeletionTrigger = %d\n",
kWindowSize, kNumDeletionTrigger); kWindowSize, kNumDeletionTrigger);
ASSERT_TRUE(false); ASSERT_TRUE(false);

@ -601,8 +601,7 @@ void DBWithTTLImpl::SetTtl(ColumnFamilyHandle *h, int32_t ttl) {
opts = GetOptions(h); opts = GetOptions(h);
filter = std::static_pointer_cast<TtlCompactionFilterFactory>( filter = std::static_pointer_cast<TtlCompactionFilterFactory>(
opts.compaction_filter_factory); opts.compaction_filter_factory);
if (!filter) if (!filter) return;
return;
filter->SetTtl(ttl); filter->SetTtl(ttl);
} }

@ -111,7 +111,6 @@ class DBWithTTLImpl : public DBWithTTL {
}; };
class TtlIterator : public Iterator { class TtlIterator : public Iterator {
public: public:
explicit TtlIterator(Iterator* iter) : iter_(iter) { assert(iter_); } explicit TtlIterator(Iterator* iter) : iter_(iter) { assert(iter_); }
@ -189,9 +188,7 @@ class TtlCompactionFilterFactory : public CompactionFilterFactory {
std::unique_ptr<CompactionFilter> CreateCompactionFilter( std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& context) override; const CompactionFilter::Context& context) override;
void SetTtl(int32_t ttl) { void SetTtl(int32_t ttl) { ttl_ = ttl; }
ttl_ = ttl;
}
const char* Name() const override { return kClassName(); } const char* Name() const override { return kClassName(); }
static const char* kClassName() { return "TtlCompactionFilterFactory"; } static const char* kClassName() { return "TtlCompactionFilterFactory"; }
@ -209,7 +206,6 @@ class TtlCompactionFilterFactory : public CompactionFilterFactory {
}; };
class TtlMergeOperator : public MergeOperator { class TtlMergeOperator : public MergeOperator {
public: public:
explicit TtlMergeOperator(const std::shared_ptr<MergeOperator>& merge_op, explicit TtlMergeOperator(const std::shared_ptr<MergeOperator>& merge_op,
SystemClock* clock); SystemClock* clock);

@ -28,7 +28,7 @@ namespace {
using KVMap = std::map<std::string, std::string>; using KVMap = std::map<std::string, std::string>;
enum BatchOperation { OP_PUT = 0, OP_DELETE = 1 }; enum BatchOperation { OP_PUT = 0, OP_DELETE = 1 };
} } // namespace
class SpecialTimeEnv : public EnvWrapper { class SpecialTimeEnv : public EnvWrapper {
public: public:
@ -213,13 +213,16 @@ class TtlTest : public testing::Test {
for (auto& kv : kvmap_) { for (auto& kv : kvmap_) {
bool ret = db_ttl_->KeyMayExist(ropts, kv.first, &val, &value_found); bool ret = db_ttl_->KeyMayExist(ropts, kv.first, &val, &value_found);
if (ret == false || value_found == false) { if (ret == false || value_found == false) {
fprintf(stderr, "KeyMayExist could not find key=%s in the database but" fprintf(stderr,
" should have\n", kv.first.c_str()); "KeyMayExist could not find key=%s in the database but"
" should have\n",
kv.first.c_str());
FAIL(); FAIL();
} else if (val.compare(kv.second) != 0) { } else if (val.compare(kv.second) != 0) {
fprintf(stderr, " value for key=%s present in database is %s but" fprintf(stderr,
" should be %s\n", kv.first.c_str(), val.c_str(), " value for key=%s present in database is %s but"
kv.second.c_str()); " should be %s\n",
kv.first.c_str(), val.c_str(), kv.second.c_str());
FAIL(); FAIL();
} }
} }
@ -264,14 +267,16 @@ class TtlTest : public testing::Test {
FAIL(); FAIL();
} else if (s.ok()) { } else if (s.ok()) {
if (test_compaction_change && v.compare(kNewValue_) != 0) { if (test_compaction_change && v.compare(kNewValue_) != 0) {
fprintf(stderr, " value for key=%s present in database is %s but " fprintf(stderr,
" should be %s\n", kv_it_->first.c_str(), v.c_str(), " value for key=%s present in database is %s but "
kNewValue_.c_str()); " should be %s\n",
kv_it_->first.c_str(), v.c_str(), kNewValue_.c_str());
FAIL(); FAIL();
} else if (!test_compaction_change && v.compare(kv_it_->second) != 0) { } else if (!test_compaction_change && v.compare(kv_it_->second) != 0) {
fprintf(stderr, " value for key=%s present in database is %s but " fprintf(stderr,
" should be %s\n", kv_it_->first.c_str(), v.c_str(), " value for key=%s present in database is %s but "
kv_it_->second.c_str()); " should be %s\n",
kv_it_->first.c_str(), v.c_str(), kv_it_->second.c_str());
FAIL(); FAIL();
} }
} }
@ -329,9 +334,7 @@ class TtlTest : public testing::Test {
class TestFilter : public CompactionFilter { class TestFilter : public CompactionFilter {
public: public:
TestFilter(const int64_t kSampleSize, const std::string& kNewValue) TestFilter(const int64_t kSampleSize, const std::string& kNewValue)
: kSampleSize_(kSampleSize), : kSampleSize_(kSampleSize), kNewValue_(kNewValue) {}
kNewValue_(kNewValue) {
}
// Works on keys of the form "key<number>" // Works on keys of the form "key<number>"
// Drops key if number at the end of key is in [0, kSampleSize_/3), // Drops key if number at the end of key is in [0, kSampleSize_/3),
@ -380,9 +383,7 @@ class TtlTest : public testing::Test {
class TestFilterFactory : public CompactionFilterFactory { class TestFilterFactory : public CompactionFilterFactory {
public: public:
TestFilterFactory(const int64_t kSampleSize, const std::string& kNewValue) TestFilterFactory(const int64_t kSampleSize, const std::string& kNewValue)
: kSampleSize_(kSampleSize), : kSampleSize_(kSampleSize), kNewValue_(kNewValue) {}
kNewValue_(kNewValue) {
}
std::unique_ptr<CompactionFilter> CreateCompactionFilter( std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& /*context*/) override { const CompactionFilter::Context& /*context*/) override {
@ -397,7 +398,6 @@ class TtlTest : public testing::Test {
const std::string kNewValue_; const std::string kNewValue_;
}; };
// Choose carefully so that Put, Gets & Compaction complete in 1 second buffer // Choose carefully so that Put, Gets & Compaction complete in 1 second buffer
static const int64_t kSampleSize_ = 100; static const int64_t kSampleSize_ = 100;
std::string dbname_; std::string dbname_;
@ -466,7 +466,8 @@ TEST_F(TtlTest, PresentDuringTTL) {
OpenTtl(2); // T=0:Open the db with ttl = 2 OpenTtl(2); // T=0:Open the db with ttl = 2
PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=2 PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=2
SleepCompactCheck(1, 0, kSampleSize_, true); // T=1:Set1 should still be there SleepCompactCheck(1, 0, kSampleSize_,
true); // T=1:Set1 should still be there
CloseTtl(); CloseTtl();
} }

@ -207,8 +207,8 @@ Status WriteBatchWithIndex::Rep::ReBuildIndex() {
// set offset of current entry for call to AddNewEntry() // set offset of current entry for call to AddNewEntry()
last_entry_offset = input.data() - write_batch.Data().data(); last_entry_offset = input.data() - write_batch.Data().data();
s = ReadRecordFromWriteBatch(&input, &tag, &column_family_id, &key, s = ReadRecordFromWriteBatch(&input, &tag, &column_family_id, &key, &value,
&value, &blob, &xid); &blob, &xid);
if (!s.ok()) { if (!s.ok()) {
break; break;
} }

@ -237,7 +237,7 @@ void AssertIterEqual(WBWIIteratorImpl* wbwii,
} }
ASSERT_FALSE(wbwii->Valid()); ASSERT_FALSE(wbwii->Valid());
} }
} // namespace anonymous } // namespace
class WBWIBaseTest : public testing::Test { class WBWIBaseTest : public testing::Test {
public: public:
@ -512,14 +512,10 @@ void TestValueAsSecondaryIndexHelper(std::vector<Entry> entries,
TEST_F(WBWIKeepTest, TestValueAsSecondaryIndex) { TEST_F(WBWIKeepTest, TestValueAsSecondaryIndex) {
Entry entries[] = { Entry entries[] = {
{"aaa", "0005", kPutRecord}, {"aaa", "0005", kPutRecord}, {"b", "0002", kPutRecord},
{"b", "0002", kPutRecord}, {"cdd", "0002", kMergeRecord}, {"aab", "00001", kPutRecord},
{"cdd", "0002", kMergeRecord}, {"cc", "00005", kPutRecord}, {"cdd", "0002", kPutRecord},
{"aab", "00001", kPutRecord}, {"aab", "0003", kPutRecord}, {"cc", "00005", kDeleteRecord},
{"cc", "00005", kPutRecord},
{"cdd", "0002", kPutRecord},
{"aab", "0003", kPutRecord},
{"cc", "00005", kDeleteRecord},
}; };
std::vector<Entry> entries_list(entries, entries + 8); std::vector<Entry> entries_list(entries, entries + 8);
@ -531,14 +527,10 @@ TEST_F(WBWIKeepTest, TestValueAsSecondaryIndex) {
batch_->Clear(); batch_->Clear();
Entry new_entries[] = { Entry new_entries[] = {
{"aaa", "0005", kPutRecord}, {"aaa", "0005", kPutRecord}, {"e", "0002", kPutRecord},
{"e", "0002", kPutRecord}, {"add", "0002", kMergeRecord}, {"aab", "00001", kPutRecord},
{"add", "0002", kMergeRecord}, {"zz", "00005", kPutRecord}, {"add", "0002", kPutRecord},
{"aab", "00001", kPutRecord}, {"aab", "0003", kPutRecord}, {"zz", "00005", kDeleteRecord},
{"zz", "00005", kPutRecord},
{"add", "0002", kPutRecord},
{"aab", "0003", kPutRecord},
{"zz", "00005", kDeleteRecord},
}; };
entries_list = std::vector<Entry>(new_entries, new_entries + 8); entries_list = std::vector<Entry>(new_entries, new_entries + 8);

Loading…
Cancel
Save