diff --git a/db/arena_wrapped_db_iter.cc b/db/arena_wrapped_db_iter.cc index d3161c228..607403ccc 100644 --- a/db/arena_wrapped_db_iter.cc +++ b/db/arena_wrapped_db_iter.cc @@ -8,6 +8,7 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/arena_wrapped_db_iter.h" + #include "memory/arena.h" #include "rocksdb/env.h" #include "rocksdb/iterator.h" diff --git a/db/arena_wrapped_db_iter.h b/db/arena_wrapped_db_iter.h index 3275e42df..f15be306d 100644 --- a/db/arena_wrapped_db_iter.h +++ b/db/arena_wrapped_db_iter.h @@ -9,7 +9,9 @@ #pragma once #include + #include + #include "db/db_impl/db_impl.h" #include "db/db_iter.h" #include "db/range_del_aggregator.h" diff --git a/db/builder.cc b/db/builder.cc index d4bb395b1..9283ffd64 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -281,7 +281,8 @@ Status BuildTable( meta->fd.file_size = file_size; meta->marked_for_compaction = builder->NeedCompact(); assert(meta->fd.GetFileSize() > 0); - tp = builder->GetTableProperties(); // refresh now that builder is finished + tp = builder + ->GetTableProperties(); // refresh now that builder is finished if (memtable_payload_bytes != nullptr && memtable_garbage_bytes != nullptr) { const CompactionIterationStats& ci_stats = c_iter.iter_stats(); diff --git a/db/c.cc b/db/c.cc index 3ce278061..a7e4360c6 100644 --- a/db/c.cc +++ b/db/c.cc @@ -125,28 +125,48 @@ using ROCKSDB_NAMESPACE::WriteBatch; using ROCKSDB_NAMESPACE::WriteBatchWithIndex; using ROCKSDB_NAMESPACE::WriteOptions; -using std::vector; using std::unordered_set; +using std::vector; extern "C" { -struct rocksdb_t { DB* rep; }; -struct rocksdb_backup_engine_t { BackupEngine* rep; }; -struct rocksdb_backup_engine_info_t { std::vector rep; }; -struct rocksdb_restore_options_t { RestoreOptions rep; }; -struct rocksdb_iterator_t { Iterator* rep; }; -struct rocksdb_writebatch_t { WriteBatch rep; }; -struct rocksdb_writebatch_wi_t { WriteBatchWithIndex* rep; }; -struct rocksdb_snapshot_t { const Snapshot* rep; }; -struct rocksdb_flushoptions_t { FlushOptions rep; }; -struct rocksdb_fifo_compaction_options_t { CompactionOptionsFIFO rep; }; +struct rocksdb_t { + DB* rep; +}; +struct rocksdb_backup_engine_t { + BackupEngine* rep; +}; +struct rocksdb_backup_engine_info_t { + std::vector rep; +}; +struct rocksdb_restore_options_t { + RestoreOptions rep; +}; +struct rocksdb_iterator_t { + Iterator* rep; +}; +struct rocksdb_writebatch_t { + WriteBatch rep; +}; +struct rocksdb_writebatch_wi_t { + WriteBatchWithIndex* rep; +}; +struct rocksdb_snapshot_t { + const Snapshot* rep; +}; +struct rocksdb_flushoptions_t { + FlushOptions rep; +}; +struct rocksdb_fifo_compaction_options_t { + CompactionOptionsFIFO rep; +}; struct rocksdb_readoptions_t { - ReadOptions rep; - // stack variables to set pointers to in ReadOptions - Slice upper_bound; - Slice lower_bound; - Slice timestamp; - Slice iter_start_ts; + ReadOptions rep; + // stack variables to set pointers to in ReadOptions + Slice upper_bound; + Slice lower_bound; + Slice timestamp; + Slice iter_start_ts; }; struct rocksdb_writeoptions_t { WriteOptions rep; @@ -164,12 +184,24 @@ struct rocksdb_block_based_table_options_t { struct rocksdb_cuckoo_table_options_t { CuckooTableOptions rep; }; -struct rocksdb_seqfile_t { SequentialFile* rep; }; -struct rocksdb_randomfile_t { RandomAccessFile* rep; }; -struct rocksdb_writablefile_t { WritableFile* rep; }; -struct rocksdb_wal_iterator_t { TransactionLogIterator* rep; }; -struct rocksdb_wal_readoptions_t { TransactionLogIterator::ReadOptions rep; }; -struct rocksdb_filelock_t { FileLock* rep; }; +struct rocksdb_seqfile_t { + SequentialFile* rep; +}; +struct rocksdb_randomfile_t { + RandomAccessFile* rep; +}; +struct rocksdb_writablefile_t { + WritableFile* rep; +}; +struct rocksdb_wal_iterator_t { + TransactionLogIterator* rep; +}; +struct rocksdb_wal_readoptions_t { + TransactionLogIterator::ReadOptions rep; +}; +struct rocksdb_filelock_t { + FileLock* rep; +}; struct rocksdb_logger_t { std::shared_ptr rep; }; @@ -182,8 +214,12 @@ struct rocksdb_memory_allocator_t { struct rocksdb_cache_t { std::shared_ptr rep; }; -struct rocksdb_livefiles_t { std::vector rep; }; -struct rocksdb_column_family_handle_t { ColumnFamilyHandle* rep; }; +struct rocksdb_livefiles_t { + std::vector rep; +}; +struct rocksdb_column_family_handle_t { + ColumnFamilyHandle* rep; +}; struct rocksdb_column_family_metadata_t { ColumnFamilyMetaData rep; }; @@ -193,13 +229,21 @@ struct rocksdb_level_metadata_t { struct rocksdb_sst_file_metadata_t { const SstFileMetaData* rep; }; -struct rocksdb_envoptions_t { EnvOptions rep; }; -struct rocksdb_ingestexternalfileoptions_t { IngestExternalFileOptions rep; }; -struct rocksdb_sstfilewriter_t { SstFileWriter* rep; }; +struct rocksdb_envoptions_t { + EnvOptions rep; +}; +struct rocksdb_ingestexternalfileoptions_t { + IngestExternalFileOptions rep; +}; +struct rocksdb_sstfilewriter_t { + SstFileWriter* rep; +}; struct rocksdb_ratelimiter_t { std::shared_ptr rep; }; -struct rocksdb_perfcontext_t { PerfContext* rep; }; +struct rocksdb_perfcontext_t { + PerfContext* rep; +}; struct rocksdb_pinnableslice_t { PinnableSlice rep; }; @@ -235,13 +279,10 @@ struct rocksdb_compactionfiltercontext_t { struct rocksdb_compactionfilter_t : public CompactionFilter { void* state_; void (*destructor_)(void*); - unsigned char (*filter_)( - void*, - int level, - const char* key, size_t key_length, - const char* existing_value, size_t value_length, - char** new_value, size_t *new_value_length, - unsigned char* value_changed); + unsigned char (*filter_)(void*, int level, const char* key, size_t key_length, + const char* existing_value, size_t value_length, + char** new_value, size_t* new_value_length, + unsigned char* value_changed); const char* (*name_)(void*); unsigned char ignore_snapshots_; @@ -252,12 +293,10 @@ struct rocksdb_compactionfilter_t : public CompactionFilter { char* c_new_value = nullptr; size_t new_value_length = 0; unsigned char c_value_changed = 0; - unsigned char result = (*filter_)( - state_, - level, - key.data(), key.size(), - existing_value.data(), existing_value.size(), - &c_new_value, &new_value_length, &c_value_changed); + unsigned char result = + (*filter_)(state_, level, key.data(), key.size(), existing_value.data(), + existing_value.size(), &c_new_value, &new_value_length, + &c_value_changed); if (c_value_changed) { new_value->assign(c_new_value, new_value_length); *value_changed = true; @@ -350,20 +389,16 @@ struct rocksdb_mergeoperator_t : public MergeOperator { void* state_; void (*destructor_)(void*); const char* (*name_)(void*); - char* (*full_merge_)( - void*, - const char* key, size_t key_length, - const char* existing_value, size_t existing_value_length, - const char* const* operands_list, const size_t* operands_list_length, - int num_operands, - unsigned char* success, size_t* new_value_length); + char* (*full_merge_)(void*, const char* key, size_t key_length, + const char* existing_value, size_t existing_value_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length); char* (*partial_merge_)(void*, const char* key, size_t key_length, const char* const* operands_list, const size_t* operands_list_length, int num_operands, unsigned char* success, size_t* new_value_length); - void (*delete_value_)( - void*, - const char* value, size_t value_length); + void (*delete_value_)(void*, const char* value, size_t value_length); ~rocksdb_mergeoperator_t() override { (*destructor_)(state_); } @@ -447,16 +482,10 @@ struct rocksdb_slicetransform_t : public SliceTransform { void* state_; void (*destructor_)(void*); const char* (*name_)(void*); - char* (*transform_)( - void*, - const char* key, size_t length, - size_t* dst_length); - unsigned char (*in_domain_)( - void*, - const char* key, size_t length); - unsigned char (*in_range_)( - void*, - const char* key, size_t length); + char* (*transform_)(void*, const char* key, size_t length, + size_t* dst_length); + unsigned char (*in_domain_)(void*, const char* key, size_t length); + unsigned char (*in_range_)(void*, const char* key, size_t length); ~rocksdb_slicetransform_t() override { (*destructor_)(state_); } @@ -502,10 +531,8 @@ static char* CopyString(const std::string& str) { return result; } -rocksdb_t* rocksdb_open( - const rocksdb_options_t* options, - const char* name, - char** errptr) { +rocksdb_t* rocksdb_open(const rocksdb_options_t* options, const char* name, + char** errptr) { DB* db; if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) { return nullptr; @@ -515,11 +542,8 @@ rocksdb_t* rocksdb_open( return result; } -rocksdb_t* rocksdb_open_with_ttl( - const rocksdb_options_t* options, - const char* name, - int ttl, - char** errptr) { +rocksdb_t* rocksdb_open_with_ttl(const rocksdb_options_t* options, + const char* name, int ttl, char** errptr) { ROCKSDB_NAMESPACE::DBWithTTL* db; if (SaveError(errptr, ROCKSDB_NAMESPACE::DBWithTTL::Open( options->rep, std::string(name), &db, ttl))) { @@ -587,15 +611,13 @@ rocksdb_backup_engine_t* rocksdb_backup_engine_open_opts( } void rocksdb_backup_engine_create_new_backup(rocksdb_backup_engine_t* be, - rocksdb_t* db, - char** errptr) { + rocksdb_t* db, char** errptr) { SaveError(errptr, be->rep->CreateNewBackup(db->rep)); } -void rocksdb_backup_engine_create_new_backup_flush(rocksdb_backup_engine_t* be, - rocksdb_t* db, - unsigned char flush_before_backup, - char** errptr) { +void rocksdb_backup_engine_create_new_backup_flush( + rocksdb_backup_engine_t* be, rocksdb_t* db, + unsigned char flush_before_backup, char** errptr) { SaveError(errptr, be->rep->CreateNewBackup(db->rep, flush_before_backup)); } @@ -618,9 +640,8 @@ void rocksdb_restore_options_set_keep_log_files(rocksdb_restore_options_t* opt, opt->rep.keep_log_files = v; } - void rocksdb_backup_engine_verify_backup(rocksdb_backup_engine_t* be, - uint32_t backup_id, char** errptr) { + uint32_t backup_id, char** errptr) { SaveError(errptr, be->rep->VerifyBackup(static_cast(backup_id))); } @@ -885,13 +906,14 @@ rocksdb_t* rocksdb_open_column_families( DB* db; std::vector handles; - if (SaveError(errptr, DB::Open(DBOptions(db_options->rep), - std::string(name), column_families, &handles, &db))) { + if (SaveError(errptr, DB::Open(DBOptions(db_options->rep), std::string(name), + column_families, &handles, &db))) { return nullptr; } for (size_t i = 0; i < handles.size(); i++) { - rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t; + rocksdb_column_family_handle_t* c_handle = + new rocksdb_column_family_handle_t; c_handle->rep = handles[i]; column_family_handles[i] = c_handle; } @@ -958,7 +980,8 @@ rocksdb_t* rocksdb_open_for_read_only_column_families( } for (size_t i = 0; i < handles.size(); i++) { - rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t; + rocksdb_column_family_handle_t* c_handle = + new rocksdb_column_family_handle_t; c_handle->rep = handles[i]; column_family_handles[i] = c_handle; } @@ -998,18 +1021,16 @@ rocksdb_t* rocksdb_open_as_secondary_column_families( return result; } -char** rocksdb_list_column_families( - const rocksdb_options_t* options, - const char* name, - size_t* lencfs, - char** errptr) { +char** rocksdb_list_column_families(const rocksdb_options_t* options, + const char* name, size_t* lencfs, + char** errptr) { std::vector fams; - SaveError(errptr, - DB::ListColumnFamilies(DBOptions(options->rep), - std::string(name), &fams)); + SaveError(errptr, DB::ListColumnFamilies(DBOptions(options->rep), + std::string(name), &fams)); *lencfs = fams.size(); - char** column_families = static_cast(malloc(sizeof(char*) * fams.size())); + char** column_families = + static_cast(malloc(sizeof(char*) * fams.size())); for (size_t i = 0; i < fams.size(); i++) { column_families[i] = strdup(fams[i].c_str()); } @@ -1024,14 +1045,12 @@ void rocksdb_list_column_families_destroy(char** list, size_t len) { } rocksdb_column_family_handle_t* rocksdb_create_column_family( - rocksdb_t* db, - const rocksdb_options_t* column_family_options, - const char* column_family_name, - char** errptr) { + rocksdb_t* db, const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr) { rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t; - SaveError(errptr, - db->rep->CreateColumnFamily(ColumnFamilyOptions(column_family_options->rep), - std::string(column_family_name), &(handle->rep))); + SaveError(errptr, db->rep->CreateColumnFamily( + ColumnFamilyOptions(column_family_options->rep), + std::string(column_family_name), &(handle->rep))); return handle; } @@ -1047,10 +1066,9 @@ rocksdb_column_family_handle_t* rocksdb_create_column_family_with_ttl( return handle; } -void rocksdb_drop_column_family( - rocksdb_t* db, - rocksdb_column_family_handle_t* handle, - char** errptr) { +void rocksdb_drop_column_family(rocksdb_t* db, + rocksdb_column_family_handle_t* handle, + char** errptr) { SaveError(errptr, db->rep->DropColumnFamily(handle->rep)); } @@ -1066,17 +1084,15 @@ char* rocksdb_column_family_handle_get_name( return CopyString(name); } -void rocksdb_column_family_handle_destroy(rocksdb_column_family_handle_t* handle) { +void rocksdb_column_family_handle_destroy( + rocksdb_column_family_handle_t* handle) { delete handle->rep; delete handle; } -void rocksdb_put( - rocksdb_t* db, - const rocksdb_writeoptions_t* options, - const char* key, size_t keylen, - const char* val, size_t vallen, - char** errptr) { +void rocksdb_put(rocksdb_t* db, const rocksdb_writeoptions_t* options, + const char* key, size_t keylen, const char* val, size_t vallen, + char** errptr) { SaveError(errptr, db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen))); } @@ -1113,12 +1129,9 @@ void rocksdb_delete(rocksdb_t* db, const rocksdb_writeoptions_t* options, SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen))); } -void rocksdb_delete_cf( - rocksdb_t* db, - const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t keylen, - char** errptr) { +void rocksdb_delete_cf(rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t keylen, char** errptr) { SaveError(errptr, db->rep->Delete(options->rep, column_family->rep, Slice(key, keylen))); } @@ -1215,25 +1228,18 @@ void rocksdb_merge_cf(rocksdb_t* db, const rocksdb_writeoptions_t* options, rocksdb_column_family_handle_t* column_family, const char* key, size_t keylen, const char* val, size_t vallen, char** errptr) { - SaveError(errptr, - db->rep->Merge(options->rep, column_family->rep, - Slice(key, keylen), Slice(val, vallen))); + SaveError(errptr, db->rep->Merge(options->rep, column_family->rep, + Slice(key, keylen), Slice(val, vallen))); } -void rocksdb_write( - rocksdb_t* db, - const rocksdb_writeoptions_t* options, - rocksdb_writebatch_t* batch, - char** errptr) { +void rocksdb_write(rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr) { SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); } -char* rocksdb_get( - rocksdb_t* db, - const rocksdb_readoptions_t* options, - const char* key, size_t keylen, - size_t* vallen, - char** errptr) { +char* rocksdb_get(rocksdb_t* db, const rocksdb_readoptions_t* options, + const char* key, size_t keylen, size_t* vallen, + char** errptr) { char* result = nullptr; std::string tmp; Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp); @@ -1249,17 +1255,14 @@ char* rocksdb_get( return result; } -char* rocksdb_get_cf( - rocksdb_t* db, - const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t keylen, - size_t* vallen, - char** errptr) { +char* rocksdb_get_cf(rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t keylen, size_t* vallen, + char** errptr) { char* result = nullptr; std::string tmp; - Status s = db->rep->Get(options->rep, column_family->rep, - Slice(key, keylen), &tmp); + Status s = + db->rep->Get(options->rep, column_family->rep, Slice(key, keylen), &tmp); if (s.ok()) { *vallen = tmp.size(); result = CopyString(tmp); @@ -1539,21 +1542,19 @@ unsigned char rocksdb_key_may_exist_cf( } rocksdb_iterator_t* rocksdb_create_iterator( - rocksdb_t* db, - const rocksdb_readoptions_t* options) { + rocksdb_t* db, const rocksdb_readoptions_t* options) { rocksdb_iterator_t* result = new rocksdb_iterator_t; result->rep = db->rep->NewIterator(options->rep); return result; } rocksdb_wal_iterator_t* rocksdb_get_updates_since( - rocksdb_t* db, uint64_t seq_number, - const rocksdb_wal_readoptions_t* options, - char** errptr) { + rocksdb_t* db, uint64_t seq_number, + const rocksdb_wal_readoptions_t* options, char** errptr) { std::unique_ptr iter; TransactionLogIterator::ReadOptions ro; - if (options!=nullptr) { - ro = options->rep; + if (options != nullptr) { + ro = options->rep; } if (SaveError(errptr, db->rep->GetUpdatesSince(seq_number, &iter, ro))) { return nullptr; @@ -1563,24 +1564,24 @@ rocksdb_wal_iterator_t* rocksdb_get_updates_since( return result; } -void rocksdb_wal_iter_next(rocksdb_wal_iterator_t* iter) { - iter->rep->Next(); -} +void rocksdb_wal_iter_next(rocksdb_wal_iterator_t* iter) { iter->rep->Next(); } unsigned char rocksdb_wal_iter_valid(const rocksdb_wal_iterator_t* iter) { - return iter->rep->Valid(); + return iter->rep->Valid(); } -void rocksdb_wal_iter_status (const rocksdb_wal_iterator_t* iter, char** errptr) { - SaveError(errptr, iter->rep->status()); +void rocksdb_wal_iter_status(const rocksdb_wal_iterator_t* iter, + char** errptr) { + SaveError(errptr, iter->rep->status()); } -void rocksdb_wal_iter_destroy (const rocksdb_wal_iterator_t* iter) { +void rocksdb_wal_iter_destroy(const rocksdb_wal_iterator_t* iter) { delete iter->rep; delete iter; } -rocksdb_writebatch_t* rocksdb_wal_iter_get_batch (const rocksdb_wal_iterator_t* iter, uint64_t* seq) { +rocksdb_writebatch_t* rocksdb_wal_iter_get_batch( + const rocksdb_wal_iterator_t* iter, uint64_t* seq) { rocksdb_writebatch_t* result = rocksdb_writebatch_create(); BatchResult wal_batch = iter->rep->GetBatch(); result->rep = std::move(*wal_batch.writeBatchPtr); @@ -1590,26 +1591,22 @@ rocksdb_writebatch_t* rocksdb_wal_iter_get_batch (const rocksdb_wal_iterator_t* return result; } -uint64_t rocksdb_get_latest_sequence_number (rocksdb_t *db) { - return db->rep->GetLatestSequenceNumber(); +uint64_t rocksdb_get_latest_sequence_number(rocksdb_t* db) { + return db->rep->GetLatestSequenceNumber(); } rocksdb_iterator_t* rocksdb_create_iterator_cf( - rocksdb_t* db, - const rocksdb_readoptions_t* options, + rocksdb_t* db, const rocksdb_readoptions_t* options, rocksdb_column_family_handle_t* column_family) { rocksdb_iterator_t* result = new rocksdb_iterator_t; result->rep = db->rep->NewIterator(options->rep, column_family->rep); return result; } -void rocksdb_create_iterators( - rocksdb_t *db, - rocksdb_readoptions_t* opts, - rocksdb_column_family_handle_t** column_families, - rocksdb_iterator_t** iterators, - size_t size, - char** errptr) { +void rocksdb_create_iterators(rocksdb_t* db, rocksdb_readoptions_t* opts, + rocksdb_column_family_handle_t** column_families, + rocksdb_iterator_t** iterators, size_t size, + char** errptr) { std::vector column_families_vec; for (size_t i = 0; i < size; i++) { column_families_vec.push_back(column_families[i]->rep); @@ -1628,23 +1625,19 @@ void rocksdb_create_iterators( } } -const rocksdb_snapshot_t* rocksdb_create_snapshot( - rocksdb_t* db) { +const rocksdb_snapshot_t* rocksdb_create_snapshot(rocksdb_t* db) { rocksdb_snapshot_t* result = new rocksdb_snapshot_t; result->rep = db->rep->GetSnapshot(); return result; } -void rocksdb_release_snapshot( - rocksdb_t* db, - const rocksdb_snapshot_t* snapshot) { +void rocksdb_release_snapshot(rocksdb_t* db, + const rocksdb_snapshot_t* snapshot) { db->rep->ReleaseSnapshot(snapshot->rep); delete snapshot; } -char* rocksdb_property_value( - rocksdb_t* db, - const char* propname) { +char* rocksdb_property_value(rocksdb_t* db, const char* propname) { std::string tmp; if (db->rep->GetProperty(Slice(propname), &tmp)) { // We use strdup() since we expect human readable output. @@ -1654,10 +1647,8 @@ char* rocksdb_property_value( } } -int rocksdb_property_int( - rocksdb_t* db, - const char* propname, - uint64_t *out_val) { +int rocksdb_property_int(rocksdb_t* db, const char* propname, + uint64_t* out_val) { if (db->rep->GetIntProperty(Slice(propname), out_val)) { return 0; } else { @@ -1665,11 +1656,9 @@ int rocksdb_property_int( } } -int rocksdb_property_int_cf( - rocksdb_t* db, - rocksdb_column_family_handle_t* column_family, - const char* propname, - uint64_t *out_val) { +int rocksdb_property_int_cf(rocksdb_t* db, + rocksdb_column_family_handle_t* column_family, + const char* propname, uint64_t* out_val) { if (db->rep->GetIntProperty(column_family->rep, Slice(propname), out_val)) { return 0; } else { @@ -1677,10 +1666,9 @@ int rocksdb_property_int_cf( } } -char* rocksdb_property_value_cf( - rocksdb_t* db, - rocksdb_column_family_handle_t* column_family, - const char* propname) { +char* rocksdb_property_value_cf(rocksdb_t* db, + rocksdb_column_family_handle_t* column_family, + const char* propname) { std::string tmp; if (db->rep->GetProperty(column_family->rep, Slice(propname), &tmp)) { // We use strdup() since we expect human readable output. @@ -1726,23 +1714,19 @@ void rocksdb_approximate_sizes_cf( delete[] ranges; } -void rocksdb_delete_file( - rocksdb_t* db, - const char* name) { +void rocksdb_delete_file(rocksdb_t* db, const char* name) { db->rep->DeleteFile(name); } -const rocksdb_livefiles_t* rocksdb_livefiles( - rocksdb_t* db) { +const rocksdb_livefiles_t* rocksdb_livefiles(rocksdb_t* db) { rocksdb_livefiles_t* result = new rocksdb_livefiles_t; db->rep->GetLiveFilesMetaData(&result->rep); return result; } -void rocksdb_compact_range( - rocksdb_t* db, - const char* start_key, size_t start_key_len, - const char* limit_key, size_t limit_key_len) { +void rocksdb_compact_range(rocksdb_t* db, const char* start_key, + size_t start_key_len, const char* limit_key, + size_t limit_key_len) { Slice a, b; db->rep->CompactRange( CompactRangeOptions(), @@ -1751,11 +1735,10 @@ void rocksdb_compact_range( (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)); } -void rocksdb_compact_range_cf( - rocksdb_t* db, - rocksdb_column_family_handle_t* column_family, - const char* start_key, size_t start_key_len, - const char* limit_key, size_t limit_key_len) { +void rocksdb_compact_range_cf(rocksdb_t* db, + rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len) { Slice a, b; db->rep->CompactRange( CompactRangeOptions(), column_family->rep, @@ -1811,18 +1794,14 @@ void rocksdb_compact_range_cf_opt(rocksdb_t* db, (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)); } -void rocksdb_flush( - rocksdb_t* db, - const rocksdb_flushoptions_t* options, - char** errptr) { +void rocksdb_flush(rocksdb_t* db, const rocksdb_flushoptions_t* options, + char** errptr) { SaveError(errptr, db->rep->Flush(options->rep)); } -void rocksdb_flush_cf( - rocksdb_t* db, - const rocksdb_flushoptions_t* options, - rocksdb_column_family_handle_t* column_family, - char** errptr) { +void rocksdb_flush_cf(rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, + char** errptr) { SaveError(errptr, db->rep->Flush(options->rep, column_family->rep)); } @@ -1830,30 +1809,22 @@ void rocksdb_flush_wal(rocksdb_t* db, unsigned char sync, char** errptr) { SaveError(errptr, db->rep->FlushWAL(sync)); } -void rocksdb_disable_file_deletions( - rocksdb_t* db, - char** errptr) { +void rocksdb_disable_file_deletions(rocksdb_t* db, char** errptr) { SaveError(errptr, db->rep->DisableFileDeletions()); } -void rocksdb_enable_file_deletions( - rocksdb_t* db, - unsigned char force, - char** errptr) { +void rocksdb_enable_file_deletions(rocksdb_t* db, unsigned char force, + char** errptr) { SaveError(errptr, db->rep->EnableFileDeletions(force)); } -void rocksdb_destroy_db( - const rocksdb_options_t* options, - const char* name, - char** errptr) { +void rocksdb_destroy_db(const rocksdb_options_t* options, const char* name, + char** errptr) { SaveError(errptr, DestroyDB(name, options->rep)); } -void rocksdb_repair_db( - const rocksdb_options_t* options, - const char* name, - char** errptr) { +void rocksdb_repair_db(const rocksdb_options_t* options, const char* name, + char** errptr) { SaveError(errptr, RepairDB(name, options->rep)); } @@ -1883,13 +1854,9 @@ void rocksdb_iter_seek_for_prev(rocksdb_iterator_t* iter, const char* k, iter->rep->SeekForPrev(Slice(k, klen)); } -void rocksdb_iter_next(rocksdb_iterator_t* iter) { - iter->rep->Next(); -} +void rocksdb_iter_next(rocksdb_iterator_t* iter) { iter->rep->Next(); } -void rocksdb_iter_prev(rocksdb_iterator_t* iter) { - iter->rep->Prev(); -} +void rocksdb_iter_prev(rocksdb_iterator_t* iter) { iter->rep->Prev(); } const char* rocksdb_iter_key(const rocksdb_iterator_t* iter, size_t* klen) { Slice s = iter->rep->key(); @@ -1991,20 +1958,18 @@ void rocksdb_writebatch_merge(rocksdb_writebatch_t* b, const char* key, b->rep.Merge(Slice(key, klen), Slice(val, vlen)); } -void rocksdb_writebatch_merge_cf( - rocksdb_writebatch_t* b, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, - const char* val, size_t vlen) { +void rocksdb_writebatch_merge_cf(rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, + size_t vlen) { b->rep.Merge(column_family->rep, Slice(key, klen), Slice(val, vlen)); } -void rocksdb_writebatch_mergev( - rocksdb_writebatch_t* b, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes) { +void rocksdb_writebatch_mergev(rocksdb_writebatch_t* b, int num_keys, + const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, + const size_t* values_list_sizes) { std::vector key_slices(num_keys); for (int i = 0; i < num_keys; i++) { key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); @@ -2017,13 +1982,12 @@ void rocksdb_writebatch_mergev( SliceParts(value_slices.data(), num_values)); } -void rocksdb_writebatch_mergev_cf( - rocksdb_writebatch_t* b, - rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes) { +void rocksdb_writebatch_mergev_cf(rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, + const size_t* values_list_sizes) { std::vector key_slices(num_keys); for (int i = 0; i < num_keys; i++) { key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); @@ -2036,9 +2000,8 @@ void rocksdb_writebatch_mergev_cf( SliceParts(value_slices.data(), num_values)); } -void rocksdb_writebatch_delete( - rocksdb_writebatch_t* b, - const char* key, size_t klen) { +void rocksdb_writebatch_delete(rocksdb_writebatch_t* b, const char* key, + size_t klen) { b->rep.Delete(Slice(key, klen)); } @@ -2047,10 +2010,9 @@ void rocksdb_writebatch_singledelete(rocksdb_writebatch_t* b, const char* key, b->rep.SingleDelete(Slice(key, klen)); } -void rocksdb_writebatch_delete_cf( - rocksdb_writebatch_t* b, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen) { +void rocksdb_writebatch_delete_cf(rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen) { b->rep.Delete(column_family->rep, Slice(key, klen)); } @@ -2139,9 +2101,8 @@ void rocksdb_writebatch_delete_rangev_cf( SliceParts(end_key_slices.data(), num_keys)); } -void rocksdb_writebatch_put_log_data( - rocksdb_writebatch_t* b, - const char* blob, size_t len) { +void rocksdb_writebatch_put_log_data(rocksdb_writebatch_t* b, const char* blob, + size_t len) { b->rep.PutLogData(Slice(blob, len)); } @@ -2158,11 +2119,11 @@ class H : public WriteBatch::Handler { } }; -void rocksdb_writebatch_iterate( - rocksdb_writebatch_t* b, - void* state, - void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), - void (*deleted)(void*, const char* k, size_t klen)) { +void rocksdb_writebatch_iterate(rocksdb_writebatch_t* b, void* state, + void (*put)(void*, const char* k, size_t klen, + const char* v, size_t vlen), + void (*deleted)(void*, const char* k, + size_t klen)) { H handler; handler.state_ = state; handler.put_ = put; @@ -2188,9 +2149,11 @@ void rocksdb_writebatch_pop_save_point(rocksdb_writebatch_t* b, char** errptr) { SaveError(errptr, b->rep.PopSavePoint()); } -rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create(size_t reserved_bytes, unsigned char overwrite_key) { +rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create( + size_t reserved_bytes, unsigned char overwrite_key) { rocksdb_writebatch_wi_t* b = new rocksdb_writebatch_wi_t; - b->rep = new WriteBatchWithIndex(BytewiseComparator(), reserved_bytes, overwrite_key); + b->rep = new WriteBatchWithIndex(BytewiseComparator(), reserved_bytes, + overwrite_key); return b; } @@ -2209,27 +2172,23 @@ int rocksdb_writebatch_wi_count(rocksdb_writebatch_wi_t* b) { return b->rep->GetWriteBatch()->Count(); } -void rocksdb_writebatch_wi_put( - rocksdb_writebatch_wi_t* b, - const char* key, size_t klen, - const char* val, size_t vlen) { +void rocksdb_writebatch_wi_put(rocksdb_writebatch_wi_t* b, const char* key, + size_t klen, const char* val, size_t vlen) { b->rep->Put(Slice(key, klen), Slice(val, vlen)); } -void rocksdb_writebatch_wi_put_cf( - rocksdb_writebatch_wi_t* b, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, - const char* val, size_t vlen) { +void rocksdb_writebatch_wi_put_cf(rocksdb_writebatch_wi_t* b, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, + size_t vlen) { b->rep->Put(column_family->rep, Slice(key, klen), Slice(val, vlen)); } -void rocksdb_writebatch_wi_putv( - rocksdb_writebatch_wi_t* b, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes) { +void rocksdb_writebatch_wi_putv(rocksdb_writebatch_wi_t* b, int num_keys, + const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, + const size_t* values_list_sizes) { std::vector key_slices(num_keys); for (int i = 0; i < num_keys; i++) { key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); @@ -2239,14 +2198,12 @@ void rocksdb_writebatch_wi_putv( value_slices[i] = Slice(values_list[i], values_list_sizes[i]); } b->rep->Put(SliceParts(key_slices.data(), num_keys), - SliceParts(value_slices.data(), num_values)); + SliceParts(value_slices.data(), num_values)); } void rocksdb_writebatch_wi_putv_cf( - rocksdb_writebatch_wi_t* b, - rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, int num_values, const char* const* values_list, const size_t* values_list_sizes) { std::vector key_slices(num_keys); @@ -2258,30 +2215,25 @@ void rocksdb_writebatch_wi_putv_cf( value_slices[i] = Slice(values_list[i], values_list_sizes[i]); } b->rep->Put(column_family->rep, SliceParts(key_slices.data(), num_keys), - SliceParts(value_slices.data(), num_values)); + SliceParts(value_slices.data(), num_values)); } -void rocksdb_writebatch_wi_merge( - rocksdb_writebatch_wi_t* b, - const char* key, size_t klen, - const char* val, size_t vlen) { +void rocksdb_writebatch_wi_merge(rocksdb_writebatch_wi_t* b, const char* key, + size_t klen, const char* val, size_t vlen) { b->rep->Merge(Slice(key, klen), Slice(val, vlen)); } void rocksdb_writebatch_wi_merge_cf( - rocksdb_writebatch_wi_t* b, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, - const char* val, size_t vlen) { + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen) { b->rep->Merge(column_family->rep, Slice(key, klen), Slice(val, vlen)); } -void rocksdb_writebatch_wi_mergev( - rocksdb_writebatch_wi_t* b, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes) { +void rocksdb_writebatch_wi_mergev(rocksdb_writebatch_wi_t* b, int num_keys, + const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, + const size_t* values_list_sizes) { std::vector key_slices(num_keys); for (int i = 0; i < num_keys; i++) { key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); @@ -2291,14 +2243,12 @@ void rocksdb_writebatch_wi_mergev( value_slices[i] = Slice(values_list[i], values_list_sizes[i]); } b->rep->Merge(SliceParts(key_slices.data(), num_keys), - SliceParts(value_slices.data(), num_values)); + SliceParts(value_slices.data(), num_values)); } void rocksdb_writebatch_wi_mergev_cf( - rocksdb_writebatch_wi_t* b, - rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, int num_values, const char* const* values_list, const size_t* values_list_sizes) { std::vector key_slices(num_keys); @@ -2310,12 +2260,11 @@ void rocksdb_writebatch_wi_mergev_cf( value_slices[i] = Slice(values_list[i], values_list_sizes[i]); } b->rep->Merge(column_family->rep, SliceParts(key_slices.data(), num_keys), - SliceParts(value_slices.data(), num_values)); + SliceParts(value_slices.data(), num_values)); } -void rocksdb_writebatch_wi_delete( - rocksdb_writebatch_wi_t* b, - const char* key, size_t klen) { +void rocksdb_writebatch_wi_delete(rocksdb_writebatch_wi_t* b, const char* key, + size_t klen) { b->rep->Delete(Slice(key, klen)); } @@ -2325,8 +2274,7 @@ void rocksdb_writebatch_wi_singledelete(rocksdb_writebatch_wi_t* b, } void rocksdb_writebatch_wi_delete_cf( - rocksdb_writebatch_wi_t* b, - rocksdb_column_family_handle_t* column_family, + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, const char* key, size_t klen) { b->rep->Delete(column_family->rep, Slice(key, klen)); } @@ -2337,10 +2285,9 @@ void rocksdb_writebatch_wi_singledelete_cf( b->rep->SingleDelete(column_family->rep, Slice(key, klen)); } -void rocksdb_writebatch_wi_deletev( - rocksdb_writebatch_wi_t* b, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes) { +void rocksdb_writebatch_wi_deletev(rocksdb_writebatch_wi_t* b, int num_keys, + const char* const* keys_list, + const size_t* keys_list_sizes) { std::vector key_slices(num_keys); for (int i = 0; i < num_keys; i++) { key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); @@ -2349,10 +2296,8 @@ void rocksdb_writebatch_wi_deletev( } void rocksdb_writebatch_wi_deletev_cf( - rocksdb_writebatch_wi_t* b, - rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes) { + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes) { std::vector key_slices(num_keys); for (int i = 0; i < num_keys; i++) { key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); @@ -2361,11 +2306,12 @@ void rocksdb_writebatch_wi_deletev_cf( } void rocksdb_writebatch_wi_delete_range(rocksdb_writebatch_wi_t* b, - const char* start_key, - size_t start_key_len, const char* end_key, - size_t end_key_len) { + const char* start_key, + size_t start_key_len, + const char* end_key, + size_t end_key_len) { b->rep->DeleteRange(Slice(start_key, start_key_len), - Slice(end_key, end_key_len)); + Slice(end_key, end_key_len)); } void rocksdb_writebatch_wi_delete_range_cf( @@ -2373,14 +2319,15 @@ void rocksdb_writebatch_wi_delete_range_cf( const char* start_key, size_t start_key_len, const char* end_key, size_t end_key_len) { b->rep->DeleteRange(column_family->rep, Slice(start_key, start_key_len), - Slice(end_key, end_key_len)); + Slice(end_key, end_key_len)); } -void rocksdb_writebatch_wi_delete_rangev(rocksdb_writebatch_wi_t* b, int num_keys, - const char* const* start_keys_list, - const size_t* start_keys_list_sizes, - const char* const* end_keys_list, - const size_t* end_keys_list_sizes) { +void rocksdb_writebatch_wi_delete_rangev(rocksdb_writebatch_wi_t* b, + int num_keys, + const char* const* start_keys_list, + const size_t* start_keys_list_sizes, + const char* const* end_keys_list, + const size_t* end_keys_list_sizes) { std::vector start_key_slices(num_keys); std::vector end_key_slices(num_keys); for (int i = 0; i < num_keys; i++) { @@ -2388,7 +2335,7 @@ void rocksdb_writebatch_wi_delete_rangev(rocksdb_writebatch_wi_t* b, int num_key end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]); } b->rep->DeleteRange(SliceParts(start_key_slices.data(), num_keys), - SliceParts(end_key_slices.data(), num_keys)); + SliceParts(end_key_slices.data(), num_keys)); } void rocksdb_writebatch_wi_delete_rangev_cf( @@ -2403,19 +2350,17 @@ void rocksdb_writebatch_wi_delete_rangev_cf( end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]); } b->rep->DeleteRange(column_family->rep, - SliceParts(start_key_slices.data(), num_keys), - SliceParts(end_key_slices.data(), num_keys)); + SliceParts(start_key_slices.data(), num_keys), + SliceParts(end_key_slices.data(), num_keys)); } -void rocksdb_writebatch_wi_put_log_data( - rocksdb_writebatch_wi_t* b, - const char* blob, size_t len) { +void rocksdb_writebatch_wi_put_log_data(rocksdb_writebatch_wi_t* b, + const char* blob, size_t len) { b->rep->PutLogData(Slice(blob, len)); } void rocksdb_writebatch_wi_iterate( - rocksdb_writebatch_wi_t* b, - void* state, + rocksdb_writebatch_wi_t* b, void* state, void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), void (*deleted)(void*, const char* k, size_t klen)) { H handler; @@ -2425,7 +2370,8 @@ void rocksdb_writebatch_wi_iterate( b->rep->GetWriteBatch()->Iterate(&handler); } -const char* rocksdb_writebatch_wi_data(rocksdb_writebatch_wi_t* b, size_t* size) { +const char* rocksdb_writebatch_wi_data(rocksdb_writebatch_wi_t* b, + size_t* size) { WriteBatch* wb = b->rep->GetWriteBatch(); *size = wb->GetDataSize(); return wb->Data().c_str(); @@ -2436,13 +2382,12 @@ void rocksdb_writebatch_wi_set_save_point(rocksdb_writebatch_wi_t* b) { } void rocksdb_writebatch_wi_rollback_to_save_point(rocksdb_writebatch_wi_t* b, - char** errptr) { + char** errptr) { SaveError(errptr, b->rep->RollbackToSavePoint()); } rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_iterator_t* base_iterator) { + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator) { rocksdb_iterator_t* result = new rocksdb_iterator_t; result->rep = wbwi->rep->NewIteratorWithBase(base_iterator->rep); delete base_iterator; @@ -2459,12 +2404,10 @@ rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base_cf( return result; } -char* rocksdb_writebatch_wi_get_from_batch( - rocksdb_writebatch_wi_t* wbwi, - const rocksdb_options_t* options, - const char* key, size_t keylen, - size_t* vallen, - char** errptr) { +char* rocksdb_writebatch_wi_get_from_batch(rocksdb_writebatch_wi_t* wbwi, + const rocksdb_options_t* options, + const char* key, size_t keylen, + size_t* vallen, char** errptr) { char* result = nullptr; std::string tmp; Status s = wbwi->rep->GetFromBatch(options->rep, Slice(key, keylen), &tmp); @@ -2481,16 +2424,13 @@ char* rocksdb_writebatch_wi_get_from_batch( } char* rocksdb_writebatch_wi_get_from_batch_cf( - rocksdb_writebatch_wi_t* wbwi, - const rocksdb_options_t* options, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t keylen, - size_t* vallen, - char** errptr) { + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr) { char* result = nullptr; std::string tmp; Status s = wbwi->rep->GetFromBatch(column_family->rep, options->rep, - Slice(key, keylen), &tmp); + Slice(key, keylen), &tmp); if (s.ok()) { *vallen = tmp.size(); result = CopyString(tmp); @@ -2504,15 +2444,13 @@ char* rocksdb_writebatch_wi_get_from_batch_cf( } char* rocksdb_writebatch_wi_get_from_batch_and_db( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_t* db, - const rocksdb_readoptions_t* options, - const char* key, size_t keylen, - size_t* vallen, - char** errptr) { + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, const char* key, size_t keylen, + size_t* vallen, char** errptr) { char* result = nullptr; std::string tmp; - Status s = wbwi->rep->GetFromBatchAndDB(db->rep, options->rep, Slice(key, keylen), &tmp); + Status s = wbwi->rep->GetFromBatchAndDB(db->rep, options->rep, + Slice(key, keylen), &tmp); if (s.ok()) { *vallen = tmp.size(); result = CopyString(tmp); @@ -2526,17 +2464,14 @@ char* rocksdb_writebatch_wi_get_from_batch_and_db( } char* rocksdb_writebatch_wi_get_from_batch_and_db_cf( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_t* db, + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t keylen, - size_t* vallen, - char** errptr) { + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr) { char* result = nullptr; std::string tmp; - Status s = wbwi->rep->GetFromBatchAndDB(db->rep, options->rep, column_family->rep, - Slice(key, keylen), &tmp); + Status s = wbwi->rep->GetFromBatchAndDB( + db->rep, options->rep, column_family->rep, Slice(key, keylen), &tmp); if (s.ok()) { *vallen = tmp.size(); result = CopyString(tmp); @@ -2549,11 +2484,9 @@ char* rocksdb_writebatch_wi_get_from_batch_and_db_cf( return result; } -void rocksdb_write_writebatch_wi( - rocksdb_t* db, - const rocksdb_writeoptions_t* options, - rocksdb_writebatch_wi_t* wbwi, - char** errptr) { +void rocksdb_write_writebatch_wi(rocksdb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_writebatch_wi_t* wbwi, char** errptr) { WriteBatch* wb = wbwi->rep->GetWriteBatch(); SaveError(errptr, db->rep->Write(options->rep, wb)); } @@ -2608,8 +2541,7 @@ void rocksdb_load_latest_options_destroy( } } -rocksdb_block_based_table_options_t* -rocksdb_block_based_options_create() { +rocksdb_block_based_table_options_t* rocksdb_block_based_options_create() { return new rocksdb_block_based_table_options_t; } @@ -2639,22 +2571,26 @@ void rocksdb_block_based_options_set_block_restart_interval( } void rocksdb_block_based_options_set_index_block_restart_interval( - rocksdb_block_based_table_options_t* options, int index_block_restart_interval) { + rocksdb_block_based_table_options_t* options, + int index_block_restart_interval) { options->rep.index_block_restart_interval = index_block_restart_interval; } void rocksdb_block_based_options_set_metadata_block_size( - rocksdb_block_based_table_options_t* options, uint64_t metadata_block_size) { + rocksdb_block_based_table_options_t* options, + uint64_t metadata_block_size) { options->rep.metadata_block_size = metadata_block_size; } void rocksdb_block_based_options_set_partition_filters( - rocksdb_block_based_table_options_t* options, unsigned char partition_filters) { + rocksdb_block_based_table_options_t* options, + unsigned char partition_filters) { options->rep.partition_filters = partition_filters; } void rocksdb_block_based_options_set_use_delta_encoding( - rocksdb_block_based_table_options_t* options, unsigned char use_delta_encoding) { + rocksdb_block_based_table_options_t* options, + unsigned char use_delta_encoding) { options->rep.use_delta_encoding = use_delta_encoding; } @@ -2704,7 +2640,7 @@ void rocksdb_block_based_options_set_index_type( void rocksdb_block_based_options_set_data_block_index_type( rocksdb_block_based_table_options_t* options, int v) { options->rep.data_block_index_type = - static_cast(v); + static_cast(v); } void rocksdb_block_based_options_set_data_block_hash_ratio( @@ -2733,7 +2669,7 @@ void rocksdb_block_based_options_set_pin_top_level_index_and_filter( } void rocksdb_options_set_block_based_table_factory( - rocksdb_options_t *opt, + rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options) { if (table_options) { opt->rep.table_factory.reset( @@ -2741,13 +2677,11 @@ void rocksdb_options_set_block_based_table_factory( } } -rocksdb_cuckoo_table_options_t* -rocksdb_cuckoo_options_create() { +rocksdb_cuckoo_table_options_t* rocksdb_cuckoo_options_create() { return new rocksdb_cuckoo_table_options_t; } -void rocksdb_cuckoo_options_destroy( - rocksdb_cuckoo_table_options_t* options) { +void rocksdb_cuckoo_options_destroy(rocksdb_cuckoo_table_options_t* options) { delete options; } @@ -2777,51 +2711,44 @@ void rocksdb_cuckoo_options_set_use_module_hash( } void rocksdb_options_set_cuckoo_table_factory( - rocksdb_options_t *opt, - rocksdb_cuckoo_table_options_t* table_options) { + rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options) { if (table_options) { opt->rep.table_factory.reset( ROCKSDB_NAMESPACE::NewCuckooTableFactory(table_options->rep)); } } -void rocksdb_set_options( - rocksdb_t* db, int count, const char* const keys[], const char* const values[], char** errptr) { - std::unordered_map options_map; - for (int i=0; irep->SetOptions(options_map)); - } - -void rocksdb_set_options_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* handle, int count, const char* const keys[], const char* const values[], char** errptr) { - std::unordered_map options_map; - for (int i=0; irep->SetOptions(handle->rep, options_map)); - } - -rocksdb_options_t* rocksdb_options_create() { - return new rocksdb_options_t; +void rocksdb_set_options(rocksdb_t* db, int count, const char* const keys[], + const char* const values[], char** errptr) { + std::unordered_map options_map; + for (int i = 0; i < count; i++) options_map[keys[i]] = values[i]; + SaveError(errptr, db->rep->SetOptions(options_map)); } -void rocksdb_options_destroy(rocksdb_options_t* options) { - delete options; +void rocksdb_set_options_cf(rocksdb_t* db, + rocksdb_column_family_handle_t* handle, int count, + const char* const keys[], + const char* const values[], char** errptr) { + std::unordered_map options_map; + for (int i = 0; i < count; i++) options_map[keys[i]] = values[i]; + SaveError(errptr, db->rep->SetOptions(handle->rep, options_map)); } +rocksdb_options_t* rocksdb_options_create() { return new rocksdb_options_t; } + +void rocksdb_options_destroy(rocksdb_options_t* options) { delete options; } + rocksdb_options_t* rocksdb_options_create_copy(rocksdb_options_t* options) { return new rocksdb_options_t(*options); } -void rocksdb_options_increase_parallelism( - rocksdb_options_t* opt, int total_threads) { +void rocksdb_options_increase_parallelism(rocksdb_options_t* opt, + int total_threads) { opt->rep.IncreaseParallelism(total_threads); } -void rocksdb_options_optimize_for_point_lookup( - rocksdb_options_t* opt, uint64_t block_cache_size_mb) { +void rocksdb_options_optimize_for_point_lookup(rocksdb_options_t* opt, + uint64_t block_cache_size_mb) { opt->rep.OptimizeForPointLookup(block_cache_size_mb); } @@ -2835,8 +2762,8 @@ void rocksdb_options_optimize_universal_style_compaction( opt->rep.OptimizeUniversalStyleCompaction(memtable_memory_budget); } -void rocksdb_options_set_allow_ingest_behind( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_allow_ingest_behind(rocksdb_options_t* opt, + unsigned char v) { opt->rep.allow_ingest_behind = v; } @@ -2844,9 +2771,8 @@ unsigned char rocksdb_options_get_allow_ingest_behind(rocksdb_options_t* opt) { return opt->rep.allow_ingest_behind; } -void rocksdb_options_set_compaction_filter( - rocksdb_options_t* opt, - rocksdb_compactionfilter_t* filter) { +void rocksdb_options_set_compaction_filter(rocksdb_options_t* opt, + rocksdb_compactionfilter_t* filter) { opt->rep.compaction_filter = filter; } @@ -2856,8 +2782,8 @@ void rocksdb_options_set_compaction_filter_factory( std::shared_ptr(factory); } -void rocksdb_options_compaction_readahead_size( - rocksdb_options_t* opt, size_t s) { +void rocksdb_options_compaction_readahead_size(rocksdb_options_t* opt, + size_t s) { opt->rep.compaction_readahead_size = s; } @@ -2865,20 +2791,18 @@ size_t rocksdb_options_get_compaction_readahead_size(rocksdb_options_t* opt) { return opt->rep.compaction_readahead_size; } -void rocksdb_options_set_comparator( - rocksdb_options_t* opt, - rocksdb_comparator_t* cmp) { +void rocksdb_options_set_comparator(rocksdb_options_t* opt, + rocksdb_comparator_t* cmp) { opt->rep.comparator = cmp; } void rocksdb_options_set_merge_operator( - rocksdb_options_t* opt, - rocksdb_mergeoperator_t* merge_operator) { + rocksdb_options_t* opt, rocksdb_mergeoperator_t* merge_operator) { opt->rep.merge_operator = std::shared_ptr(merge_operator); } -void rocksdb_options_set_create_if_missing( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_create_if_missing(rocksdb_options_t* opt, + unsigned char v) { opt->rep.create_if_missing = v; } @@ -2886,8 +2810,8 @@ unsigned char rocksdb_options_get_create_if_missing(rocksdb_options_t* opt) { return opt->rep.create_if_missing; } -void rocksdb_options_set_create_missing_column_families( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_create_missing_column_families(rocksdb_options_t* opt, + unsigned char v) { opt->rep.create_missing_column_families = v; } @@ -2896,8 +2820,8 @@ unsigned char rocksdb_options_get_create_missing_column_families( return opt->rep.create_missing_column_families; } -void rocksdb_options_set_error_if_exists( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_error_if_exists(rocksdb_options_t* opt, + unsigned char v) { opt->rep.error_if_exists = v; } @@ -2905,8 +2829,8 @@ unsigned char rocksdb_options_get_error_if_exists(rocksdb_options_t* opt) { return opt->rep.error_if_exists; } -void rocksdb_options_set_paranoid_checks( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_paranoid_checks(rocksdb_options_t* opt, + unsigned char v) { opt->rep.paranoid_checks = v; } @@ -2934,8 +2858,7 @@ void rocksdb_options_set_info_log(rocksdb_options_t* opt, rocksdb_logger_t* l) { } } -void rocksdb_options_set_info_log_level( - rocksdb_options_t* opt, int v) { +void rocksdb_options_set_info_log_level(rocksdb_options_t* opt, int v) { opt->rep.info_log_level = static_cast(v); } @@ -2968,7 +2891,8 @@ int rocksdb_options_get_max_open_files(rocksdb_options_t* opt) { return opt->rep.max_open_files; } -void rocksdb_options_set_max_file_opening_threads(rocksdb_options_t* opt, int n) { +void rocksdb_options_set_max_file_opening_threads(rocksdb_options_t* opt, + int n) { opt->rep.max_file_opening_threads = n; } @@ -2976,7 +2900,8 @@ int rocksdb_options_get_max_file_opening_threads(rocksdb_options_t* opt) { return opt->rep.max_file_opening_threads; } -void rocksdb_options_set_max_total_wal_size(rocksdb_options_t* opt, uint64_t n) { +void rocksdb_options_set_max_total_wal_size(rocksdb_options_t* opt, + uint64_t n) { opt->rep.max_total_wal_size = n; } @@ -2984,8 +2909,8 @@ uint64_t rocksdb_options_get_max_total_wal_size(rocksdb_options_t* opt) { return opt->rep.max_total_wal_size; } -void rocksdb_options_set_target_file_size_base( - rocksdb_options_t* opt, uint64_t n) { +void rocksdb_options_set_target_file_size_base(rocksdb_options_t* opt, + uint64_t n) { opt->rep.target_file_size_base = n; } @@ -2993,8 +2918,8 @@ uint64_t rocksdb_options_get_target_file_size_base(rocksdb_options_t* opt) { return opt->rep.target_file_size_base; } -void rocksdb_options_set_target_file_size_multiplier( - rocksdb_options_t* opt, int n) { +void rocksdb_options_set_target_file_size_multiplier(rocksdb_options_t* opt, + int n) { opt->rep.target_file_size_multiplier = n; } @@ -3002,8 +2927,8 @@ int rocksdb_options_get_target_file_size_multiplier(rocksdb_options_t* opt) { return opt->rep.target_file_size_multiplier; } -void rocksdb_options_set_max_bytes_for_level_base( - rocksdb_options_t* opt, uint64_t n) { +void rocksdb_options_set_max_bytes_for_level_base(rocksdb_options_t* opt, + uint64_t n) { opt->rep.max_bytes_for_level_base = n; } @@ -3184,8 +3109,8 @@ int rocksdb_options_get_level0_file_num_compaction_trigger( return opt->rep.level0_file_num_compaction_trigger; } -void rocksdb_options_set_level0_slowdown_writes_trigger( - rocksdb_options_t* opt, int n) { +void rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t* opt, + int n) { opt->rep.level0_slowdown_writes_trigger = n; } @@ -3193,8 +3118,8 @@ int rocksdb_options_get_level0_slowdown_writes_trigger(rocksdb_options_t* opt) { return opt->rep.level0_slowdown_writes_trigger; } -void rocksdb_options_set_level0_stop_writes_trigger( - rocksdb_options_t* opt, int n) { +void rocksdb_options_set_level0_stop_writes_trigger(rocksdb_options_t* opt, + int n) { opt->rep.level0_stop_writes_trigger = n; } @@ -3202,7 +3127,7 @@ int rocksdb_options_get_level0_stop_writes_trigger(rocksdb_options_t* opt) { return opt->rep.level0_stop_writes_trigger; } -void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt,int mode) { +void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt, int mode) { opt->rep.wal_recovery_mode = static_cast(mode); } @@ -3232,7 +3157,7 @@ void rocksdb_options_set_compression_per_level(rocksdb_options_t* opt, opt->rep.compression_per_level.resize(num_levels); for (size_t i = 0; i < num_levels; ++i) { opt->rep.compression_per_level[i] = - static_cast(level_values[i]); + static_cast(level_values[i]); } } @@ -3331,8 +3256,7 @@ void rocksdb_options_set_prefix_extractor( opt->rep.prefix_extractor.reset(prefix_extractor); } -void rocksdb_options_set_use_fsync( - rocksdb_options_t* opt, int use_fsync) { +void rocksdb_options_set_use_fsync(rocksdb_options_t* opt, int use_fsync) { opt->rep.use_fsync = use_fsync; } @@ -3340,13 +3264,12 @@ int rocksdb_options_get_use_fsync(rocksdb_options_t* opt) { return opt->rep.use_fsync; } -void rocksdb_options_set_db_log_dir( - rocksdb_options_t* opt, const char* db_log_dir) { +void rocksdb_options_set_db_log_dir(rocksdb_options_t* opt, + const char* db_log_dir) { opt->rep.db_log_dir = db_log_dir; } -void rocksdb_options_set_wal_dir( - rocksdb_options_t* opt, const char* v) { +void rocksdb_options_set_wal_dir(rocksdb_options_t* opt, const char* v) { opt->rep.wal_dir = v; } @@ -3358,8 +3281,8 @@ uint64_t rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t* opt) { return opt->rep.WAL_ttl_seconds; } -void rocksdb_options_set_WAL_size_limit_MB( - rocksdb_options_t* opt, uint64_t limit) { +void rocksdb_options_set_WAL_size_limit_MB(rocksdb_options_t* opt, + uint64_t limit) { opt->rep.WAL_size_limit_MB = limit; } @@ -3367,8 +3290,8 @@ uint64_t rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t* opt) { return opt->rep.WAL_size_limit_MB; } -void rocksdb_options_set_manifest_preallocation_size( - rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_manifest_preallocation_size(rocksdb_options_t* opt, + size_t v) { opt->rep.manifest_preallocation_size = v; } @@ -3395,8 +3318,8 @@ unsigned char rocksdb_options_get_use_direct_io_for_flush_and_compaction( return opt->rep.use_direct_io_for_flush_and_compaction; } -void rocksdb_options_set_allow_mmap_reads( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_allow_mmap_reads(rocksdb_options_t* opt, + unsigned char v) { opt->rep.allow_mmap_reads = v; } @@ -3404,8 +3327,8 @@ unsigned char rocksdb_options_get_allow_mmap_reads(rocksdb_options_t* opt) { return opt->rep.allow_mmap_reads; } -void rocksdb_options_set_allow_mmap_writes( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_allow_mmap_writes(rocksdb_options_t* opt, + unsigned char v) { opt->rep.allow_mmap_writes = v; } @@ -3413,8 +3336,8 @@ unsigned char rocksdb_options_get_allow_mmap_writes(rocksdb_options_t* opt) { return opt->rep.allow_mmap_writes; } -void rocksdb_options_set_is_fd_close_on_exec( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_is_fd_close_on_exec(rocksdb_options_t* opt, + unsigned char v) { opt->rep.is_fd_close_on_exec = v; } @@ -3422,8 +3345,8 @@ unsigned char rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t* opt) { return opt->rep.is_fd_close_on_exec; } -void rocksdb_options_set_stats_dump_period_sec( - rocksdb_options_t* opt, unsigned int v) { +void rocksdb_options_set_stats_dump_period_sec(rocksdb_options_t* opt, + unsigned int v) { opt->rep.stats_dump_period_sec = v; } @@ -3441,8 +3364,8 @@ unsigned int rocksdb_options_get_stats_persist_period_sec( return opt->rep.stats_persist_period_sec; } -void rocksdb_options_set_advise_random_on_open( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_advise_random_on_open(rocksdb_options_t* opt, + unsigned char v) { opt->rep.advise_random_on_open = v; } @@ -3451,9 +3374,9 @@ unsigned char rocksdb_options_get_advise_random_on_open( return opt->rep.advise_random_on_open; } -void rocksdb_options_set_access_hint_on_compaction_start( - rocksdb_options_t* opt, int v) { - switch(v) { +void rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t* opt, + int v) { + switch (v) { case 0: opt->rep.access_hint_on_compaction_start = ROCKSDB_NAMESPACE::Options::NONE; @@ -3480,8 +3403,8 @@ int rocksdb_options_get_access_hint_on_compaction_start( return opt->rep.access_hint_on_compaction_start; } -void rocksdb_options_set_use_adaptive_mutex( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_use_adaptive_mutex(rocksdb_options_t* opt, + unsigned char v) { opt->rep.use_adaptive_mutex = v; } @@ -3489,8 +3412,8 @@ unsigned char rocksdb_options_get_use_adaptive_mutex(rocksdb_options_t* opt) { return opt->rep.use_adaptive_mutex; } -void rocksdb_options_set_wal_bytes_per_sync( - rocksdb_options_t* opt, uint64_t v) { +void rocksdb_options_set_wal_bytes_per_sync(rocksdb_options_t* opt, + uint64_t v) { opt->rep.wal_bytes_per_sync = v; } @@ -3498,8 +3421,7 @@ uint64_t rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t* opt) { return opt->rep.wal_bytes_per_sync; } -void rocksdb_options_set_bytes_per_sync( - rocksdb_options_t* opt, uint64_t v) { +void rocksdb_options_set_bytes_per_sync(rocksdb_options_t* opt, uint64_t v) { opt->rep.bytes_per_sync = v; } @@ -3547,7 +3469,8 @@ uint64_t rocksdb_options_get_max_sequential_skip_in_iterations( return opt->rep.max_sequential_skip_in_iterations; } -void rocksdb_options_set_max_write_buffer_number(rocksdb_options_t* opt, int n) { +void rocksdb_options_set_max_write_buffer_number(rocksdb_options_t* opt, + int n) { opt->rep.max_write_buffer_number = n; } @@ -3555,7 +3478,8 @@ int rocksdb_options_get_max_write_buffer_number(rocksdb_options_t* opt) { return opt->rep.max_write_buffer_number; } -void rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t* opt, int n) { +void rocksdb_options_set_min_write_buffer_number_to_merge( + rocksdb_options_t* opt, int n) { opt->rep.min_write_buffer_number_to_merge = n; } @@ -3620,7 +3544,8 @@ int rocksdb_options_get_max_background_jobs(rocksdb_options_t* opt) { return opt->rep.max_background_jobs; } -void rocksdb_options_set_max_background_compactions(rocksdb_options_t* opt, int n) { +void rocksdb_options_set_max_background_compactions(rocksdb_options_t* opt, + int n) { opt->rep.max_background_compactions = n; } @@ -3654,7 +3579,8 @@ size_t rocksdb_options_get_max_log_file_size(rocksdb_options_t* opt) { return opt->rep.max_log_file_size; } -void rocksdb_options_set_log_file_time_to_roll(rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_log_file_time_to_roll(rocksdb_options_t* opt, + size_t v) { opt->rep.log_file_time_to_roll = v; } @@ -3679,7 +3605,8 @@ size_t rocksdb_options_get_recycle_log_file_num(rocksdb_options_t* opt) { return opt->rep.recycle_log_file_num; } -void rocksdb_options_set_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_soft_pending_compaction_bytes_limit( + rocksdb_options_t* opt, size_t v) { opt->rep.soft_pending_compaction_bytes_limit = v; } @@ -3688,7 +3615,8 @@ size_t rocksdb_options_get_soft_pending_compaction_bytes_limit( return opt->rep.soft_pending_compaction_bytes_limit; } -void rocksdb_options_set_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_hard_pending_compaction_bytes_limit( + rocksdb_options_t* opt, size_t v) { opt->rep.hard_pending_compaction_bytes_limit = v; } @@ -3697,8 +3625,8 @@ size_t rocksdb_options_get_hard_pending_compaction_bytes_limit( return opt->rep.hard_pending_compaction_bytes_limit; } -void rocksdb_options_set_max_manifest_file_size( - rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_max_manifest_file_size(rocksdb_options_t* opt, + size_t v) { opt->rep.max_manifest_file_size = v; } @@ -3706,8 +3634,8 @@ size_t rocksdb_options_get_max_manifest_file_size(rocksdb_options_t* opt) { return opt->rep.max_manifest_file_size; } -void rocksdb_options_set_table_cache_numshardbits( - rocksdb_options_t* opt, int v) { +void rocksdb_options_set_table_cache_numshardbits(rocksdb_options_t* opt, + int v) { opt->rep.table_cache_numshardbits = v; } @@ -3715,8 +3643,7 @@ int rocksdb_options_get_table_cache_numshardbits(rocksdb_options_t* opt) { return opt->rep.table_cache_numshardbits; } -void rocksdb_options_set_arena_block_size( - rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_arena_block_size(rocksdb_options_t* opt, size_t v) { opt->rep.arena_block_size = v; } @@ -3724,7 +3651,8 @@ size_t rocksdb_options_get_arena_block_size(rocksdb_options_t* opt) { return opt->rep.arena_block_size; } -void rocksdb_options_set_disable_auto_compactions(rocksdb_options_t* opt, int disable) { +void rocksdb_options_set_disable_auto_compactions(rocksdb_options_t* opt, + int disable) { opt->rep.disable_auto_compactions = disable; } @@ -3733,7 +3661,8 @@ unsigned char rocksdb_options_get_disable_auto_compactions( return opt->rep.disable_auto_compactions; } -void rocksdb_options_set_optimize_filters_for_hits(rocksdb_options_t* opt, int v) { +void rocksdb_options_set_optimize_filters_for_hits(rocksdb_options_t* opt, + int v) { opt->rep.optimize_filters_for_hits = v; } @@ -3756,7 +3685,7 @@ void rocksdb_options_prepare_for_bulk_load(rocksdb_options_t* opt) { opt->rep.PrepareForBulkLoad(); } -void rocksdb_options_set_memtable_vector_rep(rocksdb_options_t *opt) { +void rocksdb_options_set_memtable_vector_rep(rocksdb_options_t* opt) { opt->rep.memtable_factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory); } @@ -3779,24 +3708,27 @@ size_t rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t* opt) { return opt->rep.memtable_huge_page_size; } -void rocksdb_options_set_hash_skip_list_rep( - rocksdb_options_t *opt, size_t bucket_count, - int32_t skiplist_height, int32_t skiplist_branching_factor) { +void rocksdb_options_set_hash_skip_list_rep(rocksdb_options_t* opt, + size_t bucket_count, + int32_t skiplist_height, + int32_t skiplist_branching_factor) { ROCKSDB_NAMESPACE::MemTableRepFactory* factory = ROCKSDB_NAMESPACE::NewHashSkipListRepFactory( bucket_count, skiplist_height, skiplist_branching_factor); opt->rep.memtable_factory.reset(factory); } -void rocksdb_options_set_hash_link_list_rep( - rocksdb_options_t *opt, size_t bucket_count) { +void rocksdb_options_set_hash_link_list_rep(rocksdb_options_t* opt, + size_t bucket_count) { opt->rep.memtable_factory.reset( ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(bucket_count)); } -void rocksdb_options_set_plain_table_factory( - rocksdb_options_t *opt, uint32_t user_key_len, int bloom_bits_per_key, - double hash_table_ratio, size_t index_sparseness) { +void rocksdb_options_set_plain_table_factory(rocksdb_options_t* opt, + uint32_t user_key_len, + int bloom_bits_per_key, + double hash_table_ratio, + size_t index_sparseness) { ROCKSDB_NAMESPACE::PlainTableOptions options; options.user_key_len = user_key_len; options.bloom_bits_per_key = bloom_bits_per_key; @@ -3808,8 +3740,8 @@ void rocksdb_options_set_plain_table_factory( opt->rep.table_factory.reset(factory); } -void rocksdb_options_set_max_successive_merges( - rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_max_successive_merges(rocksdb_options_t* opt, + size_t v) { opt->rep.max_successive_merges = v; } @@ -3817,8 +3749,7 @@ size_t rocksdb_options_get_max_successive_merges(rocksdb_options_t* opt) { return opt->rep.max_successive_merges; } -void rocksdb_options_set_bloom_locality( - rocksdb_options_t* opt, uint32_t v) { +void rocksdb_options_set_bloom_locality(rocksdb_options_t* opt, uint32_t v) { opt->rep.bloom_locality = v; } @@ -3826,8 +3757,8 @@ uint32_t rocksdb_options_get_bloom_locality(rocksdb_options_t* opt) { return opt->rep.bloom_locality; } -void rocksdb_options_set_inplace_update_support( - rocksdb_options_t* opt, unsigned char v) { +void rocksdb_options_set_inplace_update_support(rocksdb_options_t* opt, + unsigned char v) { opt->rep.inplace_update_support = v; } @@ -3836,8 +3767,8 @@ unsigned char rocksdb_options_get_inplace_update_support( return opt->rep.inplace_update_support; } -void rocksdb_options_set_inplace_update_num_locks( - rocksdb_options_t* opt, size_t v) { +void rocksdb_options_set_inplace_update_num_locks(rocksdb_options_t* opt, + size_t v) { opt->rep.inplace_update_num_locks = v; } @@ -3845,8 +3776,7 @@ size_t rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t* opt) { return opt->rep.inplace_update_num_locks; } -void rocksdb_options_set_report_bg_io_stats( - rocksdb_options_t* opt, int v) { +void rocksdb_options_set_report_bg_io_stats(rocksdb_options_t* opt, int v) { opt->rep.report_bg_io_stats = v; } @@ -3854,7 +3784,7 @@ unsigned char rocksdb_options_get_report_bg_io_stats(rocksdb_options_t* opt) { return opt->rep.report_bg_io_stats; } -void rocksdb_options_set_compaction_style(rocksdb_options_t *opt, int style) { +void rocksdb_options_set_compaction_style(rocksdb_options_t* opt, int style) { opt->rep.compaction_style = static_cast(style); } @@ -3863,17 +3793,17 @@ int rocksdb_options_get_compaction_style(rocksdb_options_t* opt) { return opt->rep.compaction_style; } -void rocksdb_options_set_universal_compaction_options(rocksdb_options_t *opt, rocksdb_universal_compaction_options_t *uco) { +void rocksdb_options_set_universal_compaction_options( + rocksdb_options_t* opt, rocksdb_universal_compaction_options_t* uco) { opt->rep.compaction_options_universal = *(uco->rep); } void rocksdb_options_set_fifo_compaction_options( - rocksdb_options_t* opt, - rocksdb_fifo_compaction_options_t* fifo) { + rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo) { opt->rep.compaction_options_fifo = fifo->rep; } -char *rocksdb_options_statistics_get_string(rocksdb_options_t *opt) { +char* rocksdb_options_statistics_get_string(rocksdb_options_t* opt) { ROCKSDB_NAMESPACE::Statistics* statistics = opt->rep.statistics.get(); if (statistics) { return strdup(statistics->ToString().c_str()); @@ -3881,7 +3811,8 @@ char *rocksdb_options_statistics_get_string(rocksdb_options_t *opt) { return nullptr; } -void rocksdb_options_set_ratelimiter(rocksdb_options_t *opt, rocksdb_ratelimiter_t *limiter) { +void rocksdb_options_set_ratelimiter(rocksdb_options_t* opt, + rocksdb_ratelimiter_t* limiter) { if (limiter) { opt->rep.rate_limiter = limiter->rep; } @@ -3913,23 +3844,22 @@ int rocksdb_options_get_wal_compression(rocksdb_options_t* opt) { return opt->rep.wal_compression; } -rocksdb_ratelimiter_t* rocksdb_ratelimiter_create( - int64_t rate_bytes_per_sec, - int64_t refill_period_us, - int32_t fairness) { +rocksdb_ratelimiter_t* rocksdb_ratelimiter_create(int64_t rate_bytes_per_sec, + int64_t refill_period_us, + int32_t fairness) { rocksdb_ratelimiter_t* rate_limiter = new rocksdb_ratelimiter_t; rate_limiter->rep.reset( - NewGenericRateLimiter(rate_bytes_per_sec, - refill_period_us, fairness)); + NewGenericRateLimiter(rate_bytes_per_sec, refill_period_us, fairness)); return rate_limiter; } -void rocksdb_ratelimiter_destroy(rocksdb_ratelimiter_t *limiter) { +void rocksdb_ratelimiter_destroy(rocksdb_ratelimiter_t* limiter) { delete limiter; } -void rocksdb_options_set_row_cache(rocksdb_options_t* opt, rocksdb_cache_t* cache) { - if(cache) { +void rocksdb_options_set_row_cache(rocksdb_options_t* opt, + rocksdb_cache_t* cache) { + if (cache) { opt->rep.row_cache = cache->rep; } } @@ -3958,12 +3888,12 @@ void rocksdb_perfcontext_reset(rocksdb_perfcontext_t* context) { } char* rocksdb_perfcontext_report(rocksdb_perfcontext_t* context, - unsigned char exclude_zero_counters) { + unsigned char exclude_zero_counters) { return strdup(context->rep->ToString(exclude_zero_counters).c_str()); } uint64_t rocksdb_perfcontext_metric(rocksdb_perfcontext_t* context, - int metric) { + int metric) { PerfContext* rep = context->rep; switch (metric) { case rocksdb_user_key_comparison_count: @@ -4143,15 +4073,12 @@ table_properties_collectors */ rocksdb_compactionfilter_t* rocksdb_compactionfilter_create( - void* state, - void (*destructor)(void*), - unsigned char (*filter)( - void*, - int level, - const char* key, size_t key_length, - const char* existing_value, size_t value_length, - char** new_value, size_t *new_value_length, - unsigned char* value_changed), + void* state, void (*destructor)(void*), + unsigned char (*filter)(void*, int level, const char* key, + size_t key_length, const char* existing_value, + size_t value_length, char** new_value, + size_t* new_value_length, + unsigned char* value_changed), const char* (*name)(void*)) { rocksdb_compactionfilter_t* result = new rocksdb_compactionfilter_t; result->state_ = state; @@ -4163,8 +4090,7 @@ rocksdb_compactionfilter_t* rocksdb_compactionfilter_create( } void rocksdb_compactionfilter_set_ignore_snapshots( - rocksdb_compactionfilter_t* filter, - unsigned char whether_ignore) { + rocksdb_compactionfilter_t* filter, unsigned char whether_ignore) { filter->ignore_snapshots_ = whether_ignore; } @@ -4202,12 +4128,9 @@ void rocksdb_compactionfilterfactory_destroy( } rocksdb_comparator_t* rocksdb_comparator_create( - void* state, - void (*destructor)(void*), - int (*compare)( - void*, - const char* a, size_t alen, - const char* b, size_t blen), + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), const char* (*name)(void*)) { rocksdb_comparator_t* result = new rocksdb_comparator_t; result->state_ = state; @@ -4360,13 +4283,10 @@ rocksdb_readoptions_t* rocksdb_readoptions_create() { return new rocksdb_readoptions_t; } -void rocksdb_readoptions_destroy(rocksdb_readoptions_t* opt) { - delete opt; -} +void rocksdb_readoptions_destroy(rocksdb_readoptions_t* opt) { delete opt; } -void rocksdb_readoptions_set_verify_checksums( - rocksdb_readoptions_t* opt, - unsigned char v) { +void rocksdb_readoptions_set_verify_checksums(rocksdb_readoptions_t* opt, + unsigned char v) { opt->rep.verify_checksums = v; } @@ -4375,8 +4295,8 @@ unsigned char rocksdb_readoptions_get_verify_checksums( return opt->rep.verify_checksums; } -void rocksdb_readoptions_set_fill_cache( - rocksdb_readoptions_t* opt, unsigned char v) { +void rocksdb_readoptions_set_fill_cache(rocksdb_readoptions_t* opt, + unsigned char v) { opt->rep.fill_cache = v; } @@ -4384,15 +4304,14 @@ unsigned char rocksdb_readoptions_get_fill_cache(rocksdb_readoptions_t* opt) { return opt->rep.fill_cache; } -void rocksdb_readoptions_set_snapshot( - rocksdb_readoptions_t* opt, - const rocksdb_snapshot_t* snap) { +void rocksdb_readoptions_set_snapshot(rocksdb_readoptions_t* opt, + const rocksdb_snapshot_t* snap) { opt->rep.snapshot = (snap ? snap->rep : nullptr); } -void rocksdb_readoptions_set_iterate_upper_bound( - rocksdb_readoptions_t* opt, - const char* key, size_t keylen) { +void rocksdb_readoptions_set_iterate_upper_bound(rocksdb_readoptions_t* opt, + const char* key, + size_t keylen) { if (key == nullptr) { opt->upper_bound = Slice(); opt->rep.iterate_upper_bound = nullptr; @@ -4403,9 +4322,9 @@ void rocksdb_readoptions_set_iterate_upper_bound( } } -void rocksdb_readoptions_set_iterate_lower_bound( - rocksdb_readoptions_t *opt, - const char* key, size_t keylen) { +void rocksdb_readoptions_set_iterate_lower_bound(rocksdb_readoptions_t* opt, + const char* key, + size_t keylen) { if (key == nullptr) { opt->lower_bound = Slice(); opt->rep.iterate_lower_bound = nullptr; @@ -4415,8 +4334,7 @@ void rocksdb_readoptions_set_iterate_lower_bound( } } -void rocksdb_readoptions_set_read_tier( - rocksdb_readoptions_t* opt, int v) { +void rocksdb_readoptions_set_read_tier(rocksdb_readoptions_t* opt, int v) { opt->rep.read_tier = static_cast(v); } @@ -4424,8 +4342,8 @@ int rocksdb_readoptions_get_read_tier(rocksdb_readoptions_t* opt) { return static_cast(opt->rep.read_tier); } -void rocksdb_readoptions_set_tailing( - rocksdb_readoptions_t* opt, unsigned char v) { +void rocksdb_readoptions_set_tailing(rocksdb_readoptions_t* opt, + unsigned char v) { opt->rep.tailing = v; } @@ -4433,13 +4351,13 @@ unsigned char rocksdb_readoptions_get_tailing(rocksdb_readoptions_t* opt) { return opt->rep.tailing; } -void rocksdb_readoptions_set_managed( - rocksdb_readoptions_t* opt, unsigned char v) { +void rocksdb_readoptions_set_managed(rocksdb_readoptions_t* opt, + unsigned char v) { opt->rep.managed = v; } -void rocksdb_readoptions_set_readahead_size( - rocksdb_readoptions_t* opt, size_t v) { +void rocksdb_readoptions_set_readahead_size(rocksdb_readoptions_t* opt, + size_t v) { opt->rep.readahead_size = v; } @@ -4447,8 +4365,8 @@ size_t rocksdb_readoptions_get_readahead_size(rocksdb_readoptions_t* opt) { return opt->rep.readahead_size; } -void rocksdb_readoptions_set_prefix_same_as_start( - rocksdb_readoptions_t* opt, unsigned char v) { +void rocksdb_readoptions_set_prefix_same_as_start(rocksdb_readoptions_t* opt, + unsigned char v) { opt->rep.prefix_same_as_start = v; } @@ -4477,8 +4395,7 @@ unsigned char rocksdb_readoptions_get_total_order_seek( } void rocksdb_readoptions_set_max_skippable_internal_keys( - rocksdb_readoptions_t* opt, - uint64_t v) { + rocksdb_readoptions_t* opt, uint64_t v) { opt->rep.max_skippable_internal_keys = v; } @@ -4497,8 +4414,8 @@ unsigned char rocksdb_readoptions_get_background_purge_on_iterator_cleanup( return opt->rep.background_purge_on_iterator_cleanup; } -void rocksdb_readoptions_set_ignore_range_deletions( - rocksdb_readoptions_t* opt, unsigned char v) { +void rocksdb_readoptions_set_ignore_range_deletions(rocksdb_readoptions_t* opt, + unsigned char v) { opt->rep.ignore_range_deletions = v; } @@ -4552,12 +4469,10 @@ rocksdb_writeoptions_t* rocksdb_writeoptions_create() { return new rocksdb_writeoptions_t; } -void rocksdb_writeoptions_destroy(rocksdb_writeoptions_t* opt) { - delete opt; -} +void rocksdb_writeoptions_destroy(rocksdb_writeoptions_t* opt) { delete opt; } -void rocksdb_writeoptions_set_sync( - rocksdb_writeoptions_t* opt, unsigned char v) { +void rocksdb_writeoptions_set_sync(rocksdb_writeoptions_t* opt, + unsigned char v) { opt->rep.sync = v; } @@ -4624,7 +4539,8 @@ void rocksdb_compactoptions_destroy(rocksdb_compactoptions_t* opt) { void rocksdb_compactoptions_set_bottommost_level_compaction( rocksdb_compactoptions_t* opt, unsigned char v) { - opt->rep.bottommost_level_compaction = static_cast(v); + opt->rep.bottommost_level_compaction = + static_cast(v); } unsigned char rocksdb_compactoptions_get_bottommost_level_compaction( @@ -4676,12 +4592,10 @@ rocksdb_flushoptions_t* rocksdb_flushoptions_create() { return new rocksdb_flushoptions_t; } -void rocksdb_flushoptions_destroy(rocksdb_flushoptions_t* opt) { - delete opt; -} +void rocksdb_flushoptions_destroy(rocksdb_flushoptions_t* opt) { delete opt; } -void rocksdb_flushoptions_set_wait( - rocksdb_flushoptions_t* opt, unsigned char v) { +void rocksdb_flushoptions_set_wait(rocksdb_flushoptions_t* opt, + unsigned char v) { opt->rep.wait = v; } @@ -4746,9 +4660,7 @@ rocksdb_cache_t* rocksdb_cache_create_lru_opts( return c; } -void rocksdb_cache_destroy(rocksdb_cache_t* cache) { - delete cache; -} +void rocksdb_cache_destroy(rocksdb_cache_t* cache) { delete cache; } void rocksdb_cache_disown_data(rocksdb_cache_t* cache) { cache->rep->DisownData(); @@ -4770,16 +4682,15 @@ size_t rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache) { return cache->rep->GetPinnedUsage(); } -rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, uint64_t target_size) { +rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, + uint64_t target_size) { rocksdb_dbpath_t* result = new rocksdb_dbpath_t; result->rep.path = std::string(path); result->rep.target_size = target_size; return result; } -void rocksdb_dbpath_destroy(rocksdb_dbpath_t* dbpath) { - delete dbpath; -} +void rocksdb_dbpath_destroy(rocksdb_dbpath_t* dbpath) { delete dbpath; } rocksdb_env_t* rocksdb_create_default_env() { rocksdb_env_t* result = new rocksdb_env_t; @@ -4812,7 +4723,8 @@ int rocksdb_env_get_bottom_priority_background_threads(rocksdb_env_t* env) { return env->rep->GetBackgroundThreads(Env::BOTTOM); } -void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n) { +void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, + int n) { env->rep->SetBackgroundThreads(n, Env::HIGH); } @@ -4837,7 +4749,8 @@ void rocksdb_env_lower_thread_pool_io_priority(rocksdb_env_t* env) { env->rep->LowerThreadPoolIOPriority(); } -void rocksdb_env_lower_high_priority_thread_pool_io_priority(rocksdb_env_t* env) { +void rocksdb_env_lower_high_priority_thread_pool_io_priority( + rocksdb_env_t* env) { env->rep->LowerThreadPoolIOPriority(Env::HIGH); } @@ -4845,7 +4758,8 @@ void rocksdb_env_lower_thread_pool_cpu_priority(rocksdb_env_t* env) { env->rep->LowerThreadPoolCPUPriority(); } -void rocksdb_env_lower_high_priority_thread_pool_cpu_priority(rocksdb_env_t* env) { +void rocksdb_env_lower_high_priority_thread_pool_cpu_priority( + rocksdb_env_t* env) { env->rep->LowerThreadPoolCPUPriority(Env::HIGH); } @@ -5017,18 +4931,11 @@ void rocksdb_try_catch_up_with_primary(rocksdb_t* db, char** errptr) { } rocksdb_slicetransform_t* rocksdb_slicetransform_create( - void* state, - void (*destructor)(void*), - char* (*transform)( - void*, - const char* key, size_t length, - size_t* dst_length), - unsigned char (*in_domain)( - void*, - const char* key, size_t length), - unsigned char (*in_range)( - void*, - const char* key, size_t length), + void* state, void (*destructor)(void*), + char* (*transform)(void*, const char* key, size_t length, + size_t* dst_length), + unsigned char (*in_domain)(void*, const char* key, size_t length), + unsigned char (*in_range)(void*, const char* key, size_t length), const char* (*name)(void*)) { rocksdb_slicetransform_t* result = new rocksdb_slicetransform_t; result->state_ = state; @@ -5040,9 +4947,7 @@ rocksdb_slicetransform_t* rocksdb_slicetransform_create( return result; } -void rocksdb_slicetransform_destroy(rocksdb_slicetransform_t* st) { - delete st; -} +void rocksdb_slicetransform_destroy(rocksdb_slicetransform_t* st) { delete st; } struct SliceTransformWrapper : public rocksdb_slicetransform_t { const SliceTransform* rep_; @@ -5052,14 +4957,13 @@ struct SliceTransformWrapper : public rocksdb_slicetransform_t { Slice Transform(const Slice& src) const override { return rep_->Transform(src); } - bool InDomain(const Slice& src) const override { - return rep_->InDomain(src); - } + bool InDomain(const Slice& src) const override { return rep_->InDomain(src); } bool InRange(const Slice& src) const override { return rep_->InRange(src); } - static void DoNothing(void*) { } + static void DoNothing(void*) {} }; -rocksdb_slicetransform_t* rocksdb_slicetransform_create_fixed_prefix(size_t prefixLen) { +rocksdb_slicetransform_t* rocksdb_slicetransform_create_fixed_prefix( + size_t prefixLen) { SliceTransformWrapper* wrapper = new SliceTransformWrapper; wrapper->rep_ = ROCKSDB_NAMESPACE::NewFixedPrefixTransform(prefixLen); wrapper->state_ = nullptr; @@ -5075,14 +4979,16 @@ rocksdb_slicetransform_t* rocksdb_slicetransform_create_noop() { return wrapper; } -rocksdb_universal_compaction_options_t* rocksdb_universal_compaction_options_create() { - rocksdb_universal_compaction_options_t* result = new rocksdb_universal_compaction_options_t; +rocksdb_universal_compaction_options_t* +rocksdb_universal_compaction_options_create() { + rocksdb_universal_compaction_options_t* result = + new rocksdb_universal_compaction_options_t; result->rep = new ROCKSDB_NAMESPACE::CompactionOptionsUniversal; return result; } void rocksdb_universal_compaction_options_set_size_ratio( - rocksdb_universal_compaction_options_t* uco, int ratio) { + rocksdb_universal_compaction_options_t* uco, int ratio) { uco->rep->size_ratio = ratio; } @@ -5092,7 +4998,7 @@ int rocksdb_universal_compaction_options_get_size_ratio( } void rocksdb_universal_compaction_options_set_min_merge_width( - rocksdb_universal_compaction_options_t* uco, int w) { + rocksdb_universal_compaction_options_t* uco, int w) { uco->rep->min_merge_width = w; } @@ -5102,7 +5008,7 @@ int rocksdb_universal_compaction_options_get_min_merge_width( } void rocksdb_universal_compaction_options_set_max_merge_width( - rocksdb_universal_compaction_options_t* uco, int w) { + rocksdb_universal_compaction_options_t* uco, int w) { uco->rep->max_merge_width = w; } @@ -5112,7 +5018,7 @@ int rocksdb_universal_compaction_options_get_max_merge_width( } void rocksdb_universal_compaction_options_set_max_size_amplification_percent( - rocksdb_universal_compaction_options_t* uco, int p) { + rocksdb_universal_compaction_options_t* uco, int p) { uco->rep->max_size_amplification_percent = p; } @@ -5122,7 +5028,7 @@ int rocksdb_universal_compaction_options_get_max_size_amplification_percent( } void rocksdb_universal_compaction_options_set_compression_size_percent( - rocksdb_universal_compaction_options_t* uco, int p) { + rocksdb_universal_compaction_options_t* uco, int p) { uco->rep->compression_size_percent = p; } @@ -5132,7 +5038,7 @@ int rocksdb_universal_compaction_options_get_compression_size_percent( } void rocksdb_universal_compaction_options_set_stop_style( - rocksdb_universal_compaction_options_t* uco, int style) { + rocksdb_universal_compaction_options_t* uco, int style) { uco->rep->stop_style = static_cast(style); } @@ -5143,14 +5049,15 @@ int rocksdb_universal_compaction_options_get_stop_style( } void rocksdb_universal_compaction_options_destroy( - rocksdb_universal_compaction_options_t* uco) { + rocksdb_universal_compaction_options_t* uco) { delete uco->rep; delete uco; } rocksdb_fifo_compaction_options_t* rocksdb_fifo_compaction_options_create() { - rocksdb_fifo_compaction_options_t* result = new rocksdb_fifo_compaction_options_t; - result->rep = CompactionOptionsFIFO(); + rocksdb_fifo_compaction_options_t* result = + new rocksdb_fifo_compaction_options_t; + result->rep = CompactionOptionsFIFO(); return result; } @@ -5169,7 +5076,8 @@ void rocksdb_fifo_compaction_options_destroy( delete fifo_opts; } -void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level) { +void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, + int level) { if (level >= 0) { assert(level <= opt->rep.num_levels); opt->rep.compression_per_level.resize(opt->rep.num_levels); @@ -5182,8 +5090,7 @@ void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level } } -int rocksdb_livefiles_count( - const rocksdb_livefiles_t* lf) { +int rocksdb_livefiles_count(const rocksdb_livefiles_t* lf) { return static_cast(lf->rep.size()); } @@ -5192,54 +5099,39 @@ const char* rocksdb_livefiles_column_family_name(const rocksdb_livefiles_t* lf, return lf->rep[index].column_family_name.c_str(); } -const char* rocksdb_livefiles_name( - const rocksdb_livefiles_t* lf, - int index) { +const char* rocksdb_livefiles_name(const rocksdb_livefiles_t* lf, int index) { return lf->rep[index].name.c_str(); } -int rocksdb_livefiles_level( - const rocksdb_livefiles_t* lf, - int index) { +int rocksdb_livefiles_level(const rocksdb_livefiles_t* lf, int index) { return lf->rep[index].level; } -size_t rocksdb_livefiles_size( - const rocksdb_livefiles_t* lf, - int index) { +size_t rocksdb_livefiles_size(const rocksdb_livefiles_t* lf, int index) { return lf->rep[index].size; } -const char* rocksdb_livefiles_smallestkey( - const rocksdb_livefiles_t* lf, - int index, - size_t* size) { +const char* rocksdb_livefiles_smallestkey(const rocksdb_livefiles_t* lf, + int index, size_t* size) { *size = lf->rep[index].smallestkey.size(); return lf->rep[index].smallestkey.data(); } -const char* rocksdb_livefiles_largestkey( - const rocksdb_livefiles_t* lf, - int index, - size_t* size) { +const char* rocksdb_livefiles_largestkey(const rocksdb_livefiles_t* lf, + int index, size_t* size) { *size = lf->rep[index].largestkey.size(); return lf->rep[index].largestkey.data(); } -uint64_t rocksdb_livefiles_entries( - const rocksdb_livefiles_t* lf, - int index) { +uint64_t rocksdb_livefiles_entries(const rocksdb_livefiles_t* lf, int index) { return lf->rep[index].num_entries; } -uint64_t rocksdb_livefiles_deletions( - const rocksdb_livefiles_t* lf, - int index) { +uint64_t rocksdb_livefiles_deletions(const rocksdb_livefiles_t* lf, int index) { return lf->rep[index].num_deletions; } -extern void rocksdb_livefiles_destroy( - const rocksdb_livefiles_t* lf) { +extern void rocksdb_livefiles_destroy(const rocksdb_livefiles_t* lf) { delete lf; } @@ -5394,7 +5286,8 @@ rocksdb_transactiondb_options_t* rocksdb_transactiondb_options_create() { return new rocksdb_transactiondb_options_t; } -void rocksdb_transactiondb_options_destroy(rocksdb_transactiondb_options_t* opt){ +void rocksdb_transactiondb_options_destroy( + rocksdb_transactiondb_options_t* opt) { delete opt; } @@ -5600,7 +5493,7 @@ rocksdb_transaction_t* rocksdb_transaction_begin( return result; } old_txn->rep = txn_db->rep->BeginTransaction(write_options->rep, - txn_options->rep, old_txn->rep); + txn_options->rep, old_txn->rep); return old_txn; } @@ -5672,7 +5565,8 @@ void rocksdb_transaction_set_savepoint(rocksdb_transaction_t* txn) { txn->rep->SetSavePoint(); } -void rocksdb_transaction_rollback_to_savepoint(rocksdb_transaction_t* txn, char** errptr) { +void rocksdb_transaction_rollback_to_savepoint(rocksdb_transaction_t* txn, + char** errptr) { SaveError(errptr, txn->rep->RollbackToSavePoint()); } @@ -5904,12 +5798,10 @@ void rocksdb_transaction_multi_get_cf( } // Read a key outside a transaction -char* rocksdb_transactiondb_get( - rocksdb_transactiondb_t* txn_db, - const rocksdb_readoptions_t* options, - const char* key, size_t klen, - size_t* vlen, - char** errptr){ +char* rocksdb_transactiondb_get(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, + char** errptr) { char* result = nullptr; std::string tmp; Status s = txn_db->rep->Get(options->rep, Slice(key, klen), &tmp); @@ -6086,11 +5978,9 @@ void rocksdb_transactiondb_put_cf(rocksdb_transactiondb_t* txn_db, } // Write batch into transaction db -void rocksdb_transactiondb_write( - rocksdb_transactiondb_t* db, - const rocksdb_writeoptions_t* options, - rocksdb_writebatch_t* batch, - char** errptr) { +void rocksdb_transactiondb_write(rocksdb_transactiondb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr) { SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); } @@ -6411,7 +6301,6 @@ struct rocksdb_memory_usage_t { // estimates amount of memory occupied by consumers (dbs and caches) rocksdb_memory_usage_t* rocksdb_approximate_memory_usage_create( rocksdb_memory_consumers_t* consumers, char** errptr) { - vector dbs; for (auto db : consumers->dbs) { dbs.push_back(db->rep); @@ -6433,7 +6322,8 @@ rocksdb_memory_usage_t* rocksdb_approximate_memory_usage_create( auto result = new rocksdb_memory_usage_t; result->mem_table_total = usage_by_type[MemoryUtil::kMemTableTotal]; result->mem_table_unflushed = usage_by_type[MemoryUtil::kMemTableUnFlushed]; - result->mem_table_readers_total = usage_by_type[MemoryUtil::kTableReadersTotal]; + result->mem_table_readers_total = + usage_by_type[MemoryUtil::kTableReadersTotal]; result->cache_total = usage_by_type[MemoryUtil::kCacheTotal]; return result; } diff --git a/db/c_test.c b/db/c_test.c index 12d6fd143..249ab9023 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -49,32 +49,32 @@ static void StartPhase(const char* name) { } #ifdef _MSC_VER #pragma warning(push) -#pragma warning (disable: 4996) // getenv security warning +#pragma warning(disable : 4996) // getenv security warning #endif static const char* GetTempDir(void) { - const char* ret = getenv("TEST_TMPDIR"); - if (ret == NULL || ret[0] == '\0') + const char* ret = getenv("TEST_TMPDIR"); + if (ret == NULL || ret[0] == '\0') #ifdef OS_WIN - ret = getenv("TEMP"); + ret = getenv("TEMP"); #else - ret = "/tmp"; + ret = "/tmp"; #endif - return ret; + return ret; } #ifdef _MSC_VER #pragma warning(pop) #endif -#define CheckNoError(err) \ - if ((err) != NULL) { \ +#define CheckNoError(err) \ + if ((err) != NULL) { \ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \ - abort(); \ + abort(); \ } -#define CheckCondition(cond) \ - if (!(cond)) { \ +#define CheckCondition(cond) \ + if (!(cond)) { \ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, #cond); \ - abort(); \ + abort(); \ } static void CheckEqual(const char* expected, const char* v, size_t n) { @@ -98,21 +98,15 @@ static void Free(char** ptr) { } } -static void CheckValue( - char* err, - const char* expected, - char** actual, - size_t actual_length) { +static void CheckValue(char* err, const char* expected, char** actual, + size_t actual_length) { CheckNoError(err); CheckEqual(expected, *actual, actual_length); Free(actual); } -static void CheckGet( - rocksdb_t* db, - const rocksdb_readoptions_t* options, - const char* key, - const char* expected) { +static void CheckGet(rocksdb_t* db, const rocksdb_readoptions_t* options, + const char* key, const char* expected) { char* err = NULL; size_t val_len; char* val; @@ -122,12 +116,9 @@ static void CheckGet( Free(&val); } -static void CheckGetCF( - rocksdb_t* db, - const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* handle, - const char* key, - const char* expected) { +static void CheckGetCF(rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* handle, const char* key, + const char* expected) { char* err = NULL; size_t val_len; char* val; @@ -174,8 +165,8 @@ static void CheckMultiGetValues(size_t num_keys, char** values, } } -static void CheckIter(rocksdb_iterator_t* iter, - const char* key, const char* val) { +static void CheckIter(rocksdb_iterator_t* iter, const char* key, + const char* val) { size_t len; const char* str; str = rocksdb_iter_key(iter, &len); @@ -185,10 +176,9 @@ static void CheckIter(rocksdb_iterator_t* iter, } // Callback from rocksdb_writebatch_iterate() -static void CheckPut(void* ptr, - const char* k, size_t klen, - const char* v, size_t vlen) { - int* state = (int*) ptr; +static void CheckPut(void* ptr, const char* k, size_t klen, const char* v, + size_t vlen) { + int* state = (int*)ptr; CheckCondition(*state < 2); switch (*state) { case 0: @@ -205,7 +195,7 @@ static void CheckPut(void* ptr, // Callback from rocksdb_writebatch_iterate() static void CheckDel(void* ptr, const char* k, size_t klen) { - int* state = (int*) ptr; + int* state = (int*)ptr; CheckCondition(*state == 2); CheckEqual("bar", k, klen); (*state)++; @@ -213,14 +203,16 @@ static void CheckDel(void* ptr, const char* k, size_t klen) { static void CmpDestroy(void* arg) { (void)arg; } -static int CmpCompare(void* arg, const char* a, size_t alen, - const char* b, size_t blen) { +static int CmpCompare(void* arg, const char* a, size_t alen, const char* b, + size_t blen) { (void)arg; size_t n = (alen < blen) ? alen : blen; int r = memcmp(a, b, n); if (r == 0) { - if (alen < blen) r = -1; - else if (alen > blen) r = +1; + if (alen < blen) + r = -1; + else if (alen > blen) + r = +1; } return r; } @@ -405,11 +397,9 @@ static const char* MergeOperatorName(void* arg) { return "TestMergeOperator"; } static char* MergeOperatorFullMerge( - void* arg, - const char* key, size_t key_length, - const char* existing_value, size_t existing_value_length, - const char* const* operands_list, const size_t* operands_list_length, - int num_operands, + void* arg, const char* key, size_t key_length, const char* existing_value, + size_t existing_value_length, const char* const* operands_list, + const size_t* operands_list_length, int num_operands, unsigned char* success, size_t* new_value_length) { (void)arg; (void)key; @@ -425,12 +415,12 @@ static char* MergeOperatorFullMerge( memcpy(result, "fake", 4); return result; } -static char* MergeOperatorPartialMerge( - void* arg, - const char* key, size_t key_length, - const char* const* operands_list, const size_t* operands_list_length, - int num_operands, - unsigned char* success, size_t* new_value_length) { +static char* MergeOperatorPartialMerge(void* arg, const char* key, + size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, + int num_operands, unsigned char* success, + size_t* new_value_length) { (void)arg; (void)key; (void)key_length; @@ -444,18 +434,16 @@ static char* MergeOperatorPartialMerge( return result; } -static void CheckTxnGet( - rocksdb_transaction_t* txn, - const rocksdb_readoptions_t* options, - const char* key, - const char* expected) { - char* err = NULL; - size_t val_len; - char* val; - val = rocksdb_transaction_get(txn, options, key, strlen(key), &val_len, &err); - CheckNoError(err); - CheckEqual(expected, val, val_len); - Free(&val); +static void CheckTxnGet(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, const char* key, + const char* expected) { + char* err = NULL; + size_t val_len; + char* val; + val = rocksdb_transaction_get(txn, options, key, strlen(key), &val_len, &err); + CheckNoError(err); + CheckEqual(expected, val, val_len); + Free(&val); } static void CheckTxnGetCF(rocksdb_transaction_t* txn, @@ -502,11 +490,9 @@ static void CheckTxnPinGetCF(rocksdb_transaction_t* txn, rocksdb_pinnableslice_destroy(p); } -static void CheckTxnDBGet( - rocksdb_transactiondb_t* txn_db, - const rocksdb_readoptions_t* options, - const char* key, - const char* expected) { +static void CheckTxnDBGet(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options, const char* key, + const char* expected) { char* err = NULL; size_t val_len; char* val; @@ -632,7 +618,7 @@ int main(int argc, char** argv) { rocksdb_t* db; rocksdb_comparator_t* cmp; rocksdb_cache_t* cache; - rocksdb_dbpath_t *dbpath; + rocksdb_dbpath_t* dbpath; rocksdb_env_t* env; rocksdb_options_t* options; rocksdb_compactoptions_t* coptions; @@ -649,30 +635,20 @@ int main(int argc, char** argv) { char* err = NULL; int run = -1; - snprintf(dbname, sizeof(dbname), - "%s/rocksdb_c_test-%d", - GetTempDir(), - ((int) geteuid())); + snprintf(dbname, sizeof(dbname), "%s/rocksdb_c_test-%d", GetTempDir(), + ((int)geteuid())); - snprintf(dbbackupname, sizeof(dbbackupname), - "%s/rocksdb_c_test-%d-backup", - GetTempDir(), - ((int) geteuid())); + snprintf(dbbackupname, sizeof(dbbackupname), "%s/rocksdb_c_test-%d-backup", + GetTempDir(), ((int)geteuid())); snprintf(dbcheckpointname, sizeof(dbcheckpointname), - "%s/rocksdb_c_test-%d-checkpoint", - GetTempDir(), - ((int) geteuid())); + "%s/rocksdb_c_test-%d-checkpoint", GetTempDir(), ((int)geteuid())); - snprintf(sstfilename, sizeof(sstfilename), - "%s/rocksdb_c_test-%d-sst", - GetTempDir(), - ((int)geteuid())); + snprintf(sstfilename, sizeof(sstfilename), "%s/rocksdb_c_test-%d-sst", + GetTempDir(), ((int)geteuid())); - snprintf(dbpathname, sizeof(dbpathname), - "%s/rocksdb_c_test-%d-dbpath", - GetTempDir(), - ((int) geteuid())); + snprintf(dbpathname, sizeof(dbpathname), "%s/rocksdb_c_test-%d-dbpath", + GetTempDir(), ((int)geteuid())); StartPhase("create_objects"); cmp = rocksdb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName); @@ -746,7 +722,8 @@ int main(int argc, char** argv) { rocksdb_destroy_db(options, dbbackupname, &err); CheckNoError(err); - rocksdb_backup_engine_t *be = rocksdb_backup_engine_open(options, dbbackupname, &err); + rocksdb_backup_engine_t* be = + rocksdb_backup_engine_open(options, dbbackupname, &err); CheckNoError(err); rocksdb_backup_engine_create_new_backup(be, db, &err); @@ -759,7 +736,8 @@ int main(int argc, char** argv) { rocksdb_backup_engine_create_new_backup(be, db, &err); CheckNoError(err); - const rocksdb_backup_engine_info_t* bei = rocksdb_backup_engine_get_backup_info(be); + const rocksdb_backup_engine_info_t* bei = + rocksdb_backup_engine_get_backup_info(be); CheckCondition(rocksdb_backup_engine_info_count(bei) > 1); rocksdb_backup_engine_info_destroy(bei); @@ -778,9 +756,11 @@ int main(int argc, char** argv) { rocksdb_destroy_db(options, dbname, &err); CheckNoError(err); - rocksdb_restore_options_t *restore_options = rocksdb_restore_options_create(); + rocksdb_restore_options_t* restore_options = + rocksdb_restore_options_create(); rocksdb_restore_options_set_keep_log_files(restore_options, 0); - rocksdb_backup_engine_restore_db_from_latest_backup(be, dbname, dbname, restore_options, &err); + rocksdb_backup_engine_restore_db_from_latest_backup(be, dbname, dbname, + restore_options, &err); CheckNoError(err); rocksdb_restore_options_destroy(restore_options); @@ -799,7 +779,8 @@ int main(int argc, char** argv) { rocksdb_destroy_db(options, dbcheckpointname, &err); CheckNoError(err); - rocksdb_checkpoint_t* checkpoint = rocksdb_checkpoint_object_create(db, &err); + rocksdb_checkpoint_t* checkpoint = + rocksdb_checkpoint_object_create(db, &err); CheckNoError(err); rocksdb_checkpoint_create(checkpoint, dbcheckpointname, 0, &err); @@ -976,10 +957,10 @@ int main(int argc, char** argv) { StartPhase("writebatch_vectors"); { rocksdb_writebatch_t* wb = rocksdb_writebatch_create(); - const char* k_list[2] = { "z", "ap" }; - const size_t k_sizes[2] = { 1, 2 }; - const char* v_list[3] = { "x", "y", "z" }; - const size_t v_sizes[3] = { 1, 1, 1 }; + const char* k_list[2] = {"z", "ap"}; + const size_t k_sizes[2] = {1, 2}; + const char* v_list[3] = {"x", "y", "z"}; + const size_t v_sizes[3] = {1, 1, 1}; rocksdb_writebatch_putv(wb, 2, k_list, k_sizes, 3, v_list, v_sizes); rocksdb_write(db, woptions, wb, &err); CheckNoError(err); @@ -1041,13 +1022,17 @@ int main(int argc, char** argv) { CheckCondition(count == 3); size_t size; char* value; - value = rocksdb_writebatch_wi_get_from_batch(wbi, options, "box", 3, &size, &err); + value = rocksdb_writebatch_wi_get_from_batch(wbi, options, "box", 3, &size, + &err); CheckValue(err, "c", &value, size); - value = rocksdb_writebatch_wi_get_from_batch(wbi, options, "bar", 3, &size, &err); + value = rocksdb_writebatch_wi_get_from_batch(wbi, options, "bar", 3, &size, + &err); CheckValue(err, NULL, &value, size); - value = rocksdb_writebatch_wi_get_from_batch_and_db(wbi, db, roptions, "foo", 3, &size, &err); + value = rocksdb_writebatch_wi_get_from_batch_and_db(wbi, db, roptions, + "foo", 3, &size, &err); CheckValue(err, "hello", &value, size); - value = rocksdb_writebatch_wi_get_from_batch_and_db(wbi, db, roptions, "box", 3, &size, &err); + value = rocksdb_writebatch_wi_get_from_batch_and_db(wbi, db, roptions, + "box", 3, &size, &err); CheckValue(err, "c", &value, size); rocksdb_write_writebatch_wi(db, woptions, wbi, &err); CheckNoError(err); @@ -1064,10 +1049,10 @@ int main(int argc, char** argv) { StartPhase("writebatch_wi_vectors"); { rocksdb_writebatch_wi_t* wb = rocksdb_writebatch_wi_create(0, 1); - const char* k_list[2] = { "z", "ap" }; - const size_t k_sizes[2] = { 1, 2 }; - const char* v_list[3] = { "x", "y", "z" }; - const size_t v_sizes[3] = { 1, 1, 1 }; + const char* k_list[2] = {"z", "ap"}; + const size_t k_sizes[2] = {1, 2}; + const char* v_list[3] = {"x", "y", "z"}; + const size_t v_sizes[3] = {1, 1, 1}; rocksdb_writebatch_wi_putv(wb, 2, k_list, k_sizes, 3, v_list, v_sizes); rocksdb_write_writebatch_wi(db, woptions, wb, &err); CheckNoError(err); @@ -1156,13 +1141,14 @@ int main(int argc, char** argv) { StartPhase("multiget"); { - const char* keys[3] = { "box", "foo", "notfound" }; - const size_t keys_sizes[3] = { 3, 3, 8 }; + const char* keys[3] = {"box", "foo", "notfound"}; + const size_t keys_sizes[3] = {3, 3, 8}; char* vals[3]; size_t vals_sizes[3]; char* errs[3]; const char* expected[3] = {"c", "hello", NULL}; - rocksdb_multi_get(db, roptions, 3, keys, keys_sizes, vals, vals_sizes, errs); + rocksdb_multi_get(db, roptions, 3, keys, keys_sizes, vals, vals_sizes, + errs); CheckMultiGetValues(3, vals, vals_sizes, errs, expected); } @@ -1180,10 +1166,10 @@ int main(int argc, char** argv) { char keybuf[100]; char valbuf[100]; uint64_t sizes[2]; - const char* start[2] = { "a", "k00000000000000010000" }; - size_t start_len[2] = { 1, 21 }; - const char* limit[2] = { "k00000000000000010000", "z" }; - size_t limit_len[2] = { 21, 1 }; + const char* start[2] = {"a", "k00000000000000010000"}; + size_t start_len[2] = {1, 21}; + const char* limit[2] = {"k00000000000000010000", "z"}; + size_t limit_len[2] = {21, 1}; rocksdb_writeoptions_set_sync(woptions, 0); for (i = 0; i < n; i++) { snprintf(keybuf, sizeof(keybuf), "k%020d", i); @@ -1393,8 +1379,8 @@ int main(int argc, char** argv) { factory); db = CheckCompaction(db, options_with_filter_factory, roptions, woptions); - rocksdb_options_set_compaction_filter_factory( - options_with_filter_factory, NULL); + rocksdb_options_set_compaction_filter_factory(options_with_filter_factory, + NULL); rocksdb_options_destroy(options_with_filter_factory); } @@ -1449,7 +1435,8 @@ int main(int argc, char** argv) { rocksdb_close(db); size_t cflen; - char** column_fams = rocksdb_list_column_families(db_options, dbname, &cflen, &err); + char** column_fams = + rocksdb_list_column_families(db_options, dbname, &cflen, &err); CheckNoError(err); CheckEqual("default", column_fams[0], 7); CheckEqual("cf1", column_fams[1], 3); @@ -1465,7 +1452,8 @@ int main(int argc, char** argv) { LoadAndCheckLatestOptions(dbname, env, false, cache, NULL, 2, cf_names, NULL); - db = rocksdb_open_column_families(db_options, dbname, 2, cf_names, cf_opts, handles, &err); + db = rocksdb_open_column_families(db_options, dbname, 2, cf_names, cf_opts, + handles, &err); CheckNoError(err); rocksdb_put_cf(db, woptions, handles[1], "foo", 3, "hello", 5, &err); @@ -1483,11 +1471,10 @@ int main(int argc, char** argv) { &err); CheckNoError(err); - rocksdb_flushoptions_t *flush_options = rocksdb_flushoptions_create(); + rocksdb_flushoptions_t* flush_options = rocksdb_flushoptions_create(); rocksdb_flushoptions_set_wait(flush_options, 1); rocksdb_flush_cf(db, flush_options, handles[1], &err); - CheckNoError(err) - rocksdb_flushoptions_destroy(flush_options); + CheckNoError(err) rocksdb_flushoptions_destroy(flush_options); CheckGetCF(db, roptions, handles[1], "foo", "hello"); CheckPinGetCF(db, roptions, handles[1], "foo", "hello"); @@ -1524,27 +1511,29 @@ int main(int argc, char** argv) { rocksdb_flush_wal(db, 1, &err); CheckNoError(err); - const char* keys[3] = { "box", "box", "barfooxx" }; - const rocksdb_column_family_handle_t* get_handles[3] = { handles[0], handles[1], handles[1] }; - const size_t keys_sizes[3] = { 3, 3, 8 }; + const char* keys[3] = {"box", "box", "barfooxx"}; + const rocksdb_column_family_handle_t* get_handles[3] = { + handles[0], handles[1], handles[1]}; + const size_t keys_sizes[3] = {3, 3, 8}; char* vals[3]; size_t vals_sizes[3]; char* errs[3]; - rocksdb_multi_get_cf(db, roptions, get_handles, 3, keys, keys_sizes, vals, vals_sizes, errs); + rocksdb_multi_get_cf(db, roptions, get_handles, 3, keys, keys_sizes, vals, + vals_sizes, errs); int i; for (i = 0; i < 3; i++) { CheckEqual(NULL, errs[i], 0); switch (i) { - case 0: - CheckEqual(NULL, vals[i], vals_sizes[i]); // wrong cf - break; - case 1: - CheckEqual("c", vals[i], vals_sizes[i]); // bingo - break; - case 2: - CheckEqual(NULL, vals[i], vals_sizes[i]); // normal not found - break; + case 0: + CheckEqual(NULL, vals[i], vals_sizes[i]); // wrong cf + break; + case 1: + CheckEqual("c", vals[i], vals_sizes[i]); // bingo + break; + case 2: + CheckEqual(NULL, vals[i], vals_sizes[i]); // normal not found + break; } Free(&vals[i]); } @@ -1592,7 +1581,8 @@ int main(int argc, char** argv) { } } - rocksdb_iterator_t* iter = rocksdb_create_iterator_cf(db, roptions, handles[1]); + rocksdb_iterator_t* iter = + rocksdb_create_iterator_cf(db, roptions, handles[1]); CheckCondition(!rocksdb_iter_valid(iter)); rocksdb_iter_seek_to_first(iter); CheckCondition(rocksdb_iter_valid(iter)); @@ -1605,9 +1595,11 @@ int main(int argc, char** argv) { CheckNoError(err); rocksdb_iter_destroy(iter); - rocksdb_column_family_handle_t* iters_cf_handles[2] = { handles[0], handles[1] }; + rocksdb_column_family_handle_t* iters_cf_handles[2] = {handles[0], + handles[1]}; rocksdb_iterator_t* iters_handles[2]; - rocksdb_create_iterators(db, roptions, iters_cf_handles, iters_handles, 2, &err); + rocksdb_create_iterators(db, roptions, iters_cf_handles, iters_handles, 2, + &err); CheckNoError(err); iter = iters_handles[0]; @@ -1652,7 +1644,8 @@ int main(int argc, char** argv) { { // Create new database rocksdb_options_set_allow_mmap_reads(options, 1); - rocksdb_options_set_prefix_extractor(options, rocksdb_slicetransform_create_fixed_prefix(3)); + rocksdb_options_set_prefix_extractor( + options, rocksdb_slicetransform_create_fixed_prefix(3)); rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4); rocksdb_options_set_plain_table_factory(options, 4, 10, 0.75, 16); rocksdb_options_set_allow_concurrent_memtable_write(options, 0); @@ -1747,8 +1740,9 @@ int main(int argc, char** argv) { // amount of memory used within memtables should grow CheckCondition(rocksdb_approximate_memory_usage_get_mem_table_total(mu2) >= rocksdb_approximate_memory_usage_get_mem_table_total(mu1)); - CheckCondition(rocksdb_approximate_memory_usage_get_mem_table_unflushed(mu2) >= - rocksdb_approximate_memory_usage_get_mem_table_unflushed(mu1)); + CheckCondition( + rocksdb_approximate_memory_usage_get_mem_table_unflushed(mu2) >= + rocksdb_approximate_memory_usage_get_mem_table_unflushed(mu1)); rocksdb_memory_consumers_destroy(consumers); rocksdb_approximate_memory_usage_destroy(mu1); @@ -2839,53 +2833,57 @@ int main(int argc, char** argv) { db = rocksdb_open(options, dbname, &err); CheckNoError(err); - rocksdb_put(db, woptions, "a", 1, "0", 1, &err); CheckNoError(err); - rocksdb_put(db, woptions, "foo", 3, "bar", 3, &err); CheckNoError(err); - rocksdb_put(db, woptions, "foo1", 4, "bar1", 4, &err); CheckNoError(err); - rocksdb_put(db, woptions, "g1", 2, "0", 1, &err); CheckNoError(err); + rocksdb_put(db, woptions, "a", 1, "0", 1, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "foo", 3, "bar", 3, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "foo1", 4, "bar1", 4, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "g1", 2, "0", 1, &err); + CheckNoError(err); // testing basic case with no iterate_upper_bound and no prefix_extractor { - rocksdb_readoptions_set_iterate_upper_bound(roptions, NULL, 0); - rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); + rocksdb_readoptions_set_iterate_upper_bound(roptions, NULL, 0); + rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); - rocksdb_iter_seek(iter, "foo", 3); - CheckCondition(rocksdb_iter_valid(iter)); - CheckIter(iter, "foo", "bar"); + rocksdb_iter_seek(iter, "foo", 3); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo", "bar"); - rocksdb_iter_next(iter); - CheckCondition(rocksdb_iter_valid(iter)); - CheckIter(iter, "foo1", "bar1"); + rocksdb_iter_next(iter); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo1", "bar1"); - rocksdb_iter_next(iter); - CheckCondition(rocksdb_iter_valid(iter)); - CheckIter(iter, "g1", "0"); + rocksdb_iter_next(iter); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "g1", "0"); - rocksdb_iter_destroy(iter); + rocksdb_iter_destroy(iter); } // testing iterate_upper_bound and forward iterator // to make sure it stops at bound { - // iterate_upper_bound points beyond the last expected entry - rocksdb_readoptions_set_iterate_upper_bound(roptions, "foo2", 4); + // iterate_upper_bound points beyond the last expected entry + rocksdb_readoptions_set_iterate_upper_bound(roptions, "foo2", 4); - rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); + rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); - rocksdb_iter_seek(iter, "foo", 3); - CheckCondition(rocksdb_iter_valid(iter)); - CheckIter(iter, "foo", "bar"); + rocksdb_iter_seek(iter, "foo", 3); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo", "bar"); - rocksdb_iter_next(iter); - CheckCondition(rocksdb_iter_valid(iter)); - CheckIter(iter, "foo1", "bar1"); + rocksdb_iter_next(iter); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo1", "bar1"); - rocksdb_iter_next(iter); - // should stop here... - CheckCondition(!rocksdb_iter_valid(iter)); + rocksdb_iter_next(iter); + // should stop here... + CheckCondition(!rocksdb_iter_valid(iter)); - rocksdb_iter_destroy(iter); - rocksdb_readoptions_set_iterate_upper_bound(roptions, NULL, 0); + rocksdb_iter_destroy(iter); + rocksdb_readoptions_set_iterate_upper_bound(roptions, NULL, 0); } } @@ -3009,7 +3007,7 @@ int main(int argc, char** argv) { snapshot = rocksdb_transactiondb_create_snapshot(txn_db); rocksdb_readoptions_set_snapshot(roptions, snapshot); - rocksdb_transactiondb_put(txn_db, woptions, "foo", 3, "hey", 3, &err); + rocksdb_transactiondb_put(txn_db, woptions, "foo", 3, "hey", 3, &err); CheckNoError(err); CheckTxnDBGet(txn_db, roptions, "foo", "hello"); @@ -3021,7 +3019,8 @@ int main(int argc, char** argv) { // iterate rocksdb_transaction_put(txn, "bar", 3, "hi", 2, &err); - rocksdb_iterator_t* iter = rocksdb_transaction_create_iterator(txn, roptions); + rocksdb_iterator_t* iter = + rocksdb_transaction_create_iterator(txn, roptions); CheckCondition(!rocksdb_iter_valid(iter)); rocksdb_iter_seek_to_first(iter); CheckCondition(rocksdb_iter_valid(iter)); diff --git a/db/column_family.cc b/db/column_family.cc index 0ce72ee2f..6ab0bcc7c 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -192,8 +192,7 @@ Status CheckCFPathsSupported(const DBOptions& db_options, return Status::NotSupported( "More than one CF paths are only supported in " "universal and level compaction styles. "); - } else if (cf_options.cf_paths.empty() && - db_options.db_paths.size() > 1) { + } else if (cf_options.cf_paths.empty() && db_options.db_paths.size() > 1) { return Status::NotSupported( "More than one DB paths are only supported in " "universal and level compaction styles. "); @@ -205,7 +204,7 @@ Status CheckCFPathsSupported(const DBOptions& db_options, namespace { const uint64_t kDefaultTtl = 0xfffffffffffffffe; const uint64_t kDefaultPeriodicCompSecs = 0xfffffffffffffffe; -} // namespace +} // anonymous namespace ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options, const ColumnFamilyOptions& src) { @@ -353,7 +352,8 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options, // were not deleted yet, when we open the DB we will find these .trash files // and schedule them to be deleted (or delete immediately if SstFileManager // was not used) - auto sfm = static_cast(db_options.sst_file_manager.get()); + auto sfm = + static_cast(db_options.sst_file_manager.get()); for (size_t i = 0; i < result.cf_paths.size(); i++) { DeleteScheduler::CleanupDirectory(db_options.env, sfm, result.cf_paths[i].path) @@ -610,8 +610,8 @@ ColumnFamilyData::ColumnFamilyData( compaction_picker_.reset( new FIFOCompactionPicker(ioptions_, &internal_comparator_)); } else if (ioptions_.compaction_style == kCompactionStyleNone) { - compaction_picker_.reset(new NullCompactionPicker( - ioptions_, &internal_comparator_)); + compaction_picker_.reset( + new NullCompactionPicker(ioptions_, &internal_comparator_)); ROCKS_LOG_WARN(ioptions_.logger, "Column family %s does not use any background compaction. " "Compactions can only be done via CompactFiles\n", @@ -878,7 +878,7 @@ int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger, return static_cast(res); } } -} // namespace +} // anonymous namespace std::pair ColumnFamilyData::GetWriteStallConditionAndCause( @@ -919,7 +919,7 @@ ColumnFamilyData::GetWriteStallConditionAndCause( } WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( - const MutableCFOptions& mutable_cf_options) { + const MutableCFOptions& mutable_cf_options) { auto write_stall_condition = WriteStallCondition::kNormal; if (current_ != nullptr) { auto* vstorage = current_->storage_info(); @@ -1012,7 +1012,8 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( mutable_cf_options.hard_pending_compaction_bytes_limit > 0 && (compaction_needed_bytes - mutable_cf_options.soft_pending_compaction_bytes_limit) > - 3 * (mutable_cf_options.hard_pending_compaction_bytes_limit - + 3 * + (mutable_cf_options.hard_pending_compaction_bytes_limit - mutable_cf_options.soft_pending_compaction_bytes_limit) / 4; @@ -1305,8 +1306,8 @@ bool ColumnFamilyData::ReturnThreadLocalSuperVersion(SuperVersion* sv) { return false; } -void ColumnFamilyData::InstallSuperVersion( - SuperVersionContext* sv_context, InstrumentedMutex* db_mutex) { +void ColumnFamilyData::InstallSuperVersion(SuperVersionContext* sv_context, + InstrumentedMutex* db_mutex) { db_mutex->AssertHeld(); return InstallSuperVersion(sv_context, mutable_cf_options_); } @@ -1483,8 +1484,8 @@ Env::WriteLifeTimeHint ColumnFamilyData::CalculateSSTWriteHint(int level) { // than base_level. return Env::WLTH_MEDIUM; } - return static_cast(level - base_level + - static_cast(Env::WLTH_MEDIUM)); + return static_cast( + level - base_level + static_cast(Env::WLTH_MEDIUM)); } Status ColumnFamilyData::AddDirectories( @@ -1580,8 +1581,8 @@ ColumnFamilyData* ColumnFamilySet::GetColumnFamily(uint32_t id) const { } } -ColumnFamilyData* ColumnFamilySet::GetColumnFamily(const std::string& name) - const { +ColumnFamilyData* ColumnFamilySet::GetColumnFamily( + const std::string& name) const { auto cfd_iter = column_families_.find(name); if (cfd_iter != column_families_.end()) { auto cfd = GetColumnFamily(cfd_iter->second); diff --git a/db/column_family.h b/db/column_family.h index 91a825374..3e6d01d22 100644 --- a/db/column_family.h +++ b/db/column_family.h @@ -163,8 +163,8 @@ extern const double kIncSlowdownRatio; class ColumnFamilyHandleImpl : public ColumnFamilyHandle { public: // create while holding the mutex - ColumnFamilyHandleImpl( - ColumnFamilyData* cfd, DBImpl* db, InstrumentedMutex* mutex); + ColumnFamilyHandleImpl(ColumnFamilyData* cfd, DBImpl* db, + InstrumentedMutex* mutex); // destroy without mutex virtual ~ColumnFamilyHandleImpl(); virtual ColumnFamilyData* cfd() const { return cfd_; } @@ -189,7 +189,8 @@ class ColumnFamilyHandleImpl : public ColumnFamilyHandle { class ColumnFamilyHandleInternal : public ColumnFamilyHandleImpl { public: ColumnFamilyHandleInternal() - : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr), internal_cfd_(nullptr) {} + : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr), + internal_cfd_(nullptr) {} void SetCFD(ColumnFamilyData* _cfd) { internal_cfd_ = _cfd; } virtual ColumnFamilyData* cfd() const override { return internal_cfd_; } @@ -357,7 +358,7 @@ class ColumnFamilyData { Version* current() { return current_; } Version* dummy_versions() { return dummy_versions_; } void SetCurrent(Version* _current); - uint64_t GetNumLiveVersions() const; // REQUIRE: DB mutex held + uint64_t GetNumLiveVersions() const; // REQUIRE: DB mutex held uint64_t GetTotalSstFilesSize() const; // REQUIRE: DB mutex held uint64_t GetLiveSstFilesSize() const; // REQUIRE: DB mutex held uint64_t GetTotalBlobFileSize() const; // REQUIRE: DB mutex held @@ -552,7 +553,7 @@ class ColumnFamilyData { Version* dummy_versions_; // Head of circular doubly-linked list of versions. Version* current_; // == dummy_versions->prev_ - std::atomic refs_; // outstanding references to ColumnFamilyData + std::atomic refs_; // outstanding references to ColumnFamilyData std::atomic initialized_; std::atomic dropped_; // true if client dropped it @@ -656,8 +657,7 @@ class ColumnFamilySet { // ColumnFamilySet supports iteration class iterator { public: - explicit iterator(ColumnFamilyData* cfd) - : current_(cfd) {} + explicit iterator(ColumnFamilyData* cfd) : current_(cfd) {} // NOTE: minimum operators for for-loop iteration iterator& operator++() { current_ = current_->next_; diff --git a/db/column_family_test.cc b/db/column_family_test.cc index aa8f73f63..d33cbe50a 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -39,9 +39,7 @@ class EnvCounter : public SpecialEnv { public: explicit EnvCounter(Env* base) : SpecialEnv(base), num_new_writable_file_(0) {} - int GetNumberOfNewWritableFileCalls() { - return num_new_writable_file_; - } + int GetNumberOfNewWritableFileCalls() { return num_new_writable_file_; } Status NewWritableFile(const std::string& f, std::unique_ptr* r, const EnvOptions& soptions) override { ++num_new_writable_file_; @@ -187,7 +185,7 @@ class ColumnFamilyTestBase : public testing::Test { } Status OpenReadOnly(std::vector cf, - std::vector options = {}) { + std::vector options = {}) { std::vector column_families; names_.clear(); for (size_t i = 0; i < cf.size(); ++i) { @@ -201,20 +199,17 @@ class ColumnFamilyTestBase : public testing::Test { #ifndef ROCKSDB_LITE // ReadOnlyDB is not supported void AssertOpenReadOnly(std::vector cf, - std::vector options = {}) { + std::vector options = {}) { ASSERT_OK(OpenReadOnly(cf, options)); } #endif // !ROCKSDB_LITE - void Open(std::vector cf, std::vector options = {}) { ASSERT_OK(TryOpen(cf, options)); } - void Open() { - Open({"default"}); - } + void Open() { Open({"default"}); } DBImpl* dbfull() { return static_cast_with_check(db_); } @@ -253,7 +248,7 @@ class ColumnFamilyTestBase : public testing::Test { } void Destroy(const std::vector& column_families = - std::vector()) { + std::vector()) { Close(); ASSERT_OK(DestroyDB(dbname_, Options(db_options_, column_family_options_), column_families)); @@ -335,9 +330,7 @@ class ColumnFamilyTestBase : public testing::Test { ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf])); } - void WaitForCompaction() { - ASSERT_OK(dbfull()->TEST_WaitForCompact()); - } + void WaitForCompaction() { ASSERT_OK(dbfull()->TEST_WaitForCompact()); } uint64_t MaxTotalInMemoryState() { return dbfull()->TEST_MaxTotalInMemoryState(); @@ -354,9 +347,7 @@ class ColumnFamilyTestBase : public testing::Test { Status Merge(int cf, const std::string& key, const std::string& value) { return db_->Merge(WriteOptions(), handles_[cf], Slice(key), Slice(value)); } - Status Flush(int cf) { - return db_->Flush(FlushOptions(), handles_[cf]); - } + Status Flush(int cf) { return db_->Flush(FlushOptions(), handles_[cf]); } std::string Get(int cf, const std::string& key) { ReadOptions options; @@ -409,8 +400,8 @@ class ColumnFamilyTestBase : public testing::Test { #ifndef ROCKSDB_LITE ASSERT_EQ(value, FilesPerLevel(cf)); #else - (void) value; - (void) cf; + (void)value; + (void)cf; #endif } @@ -426,7 +417,7 @@ class ColumnFamilyTestBase : public testing::Test { #ifndef ROCKSDB_LITE ASSERT_EQ(expected_value, CountLiveFiles()); #else - (void) expected_value; + (void)expected_value; #endif } @@ -476,7 +467,7 @@ class ColumnFamilyTestBase : public testing::Test { #ifndef ROCKSDB_LITE // GetSortedWalFiles is not supported ASSERT_EQ(value, CountLiveLogFiles()); #else - (void) value; + (void)value; #endif // !ROCKSDB_LITE } @@ -521,14 +512,14 @@ class ColumnFamilyTestBase : public testing::Test { return static_cast(files.size()); } - void RecalculateWriteStallConditions(ColumnFamilyData* cfd, - const MutableCFOptions& mutable_cf_options) { + void RecalculateWriteStallConditions( + ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options) { // add lock to avoid race condition between // `RecalculateWriteStallConditions` which writes to CFStats and // background `DBImpl::DumpStats()` threads which read CFStats dbfull()->TEST_LockMutex(); cfd->RecalculateWriteStallConditions(mutable_cf_options); - dbfull()-> TEST_UnlockMutex(); + dbfull()->TEST_UnlockMutex(); } std::vector handles_; @@ -970,8 +961,7 @@ TEST_P(ColumnFamilyTest, FlushTest) { } for (int i = 0; i < 3; ++i) { - uint64_t max_total_in_memory_state = - MaxTotalInMemoryState(); + uint64_t max_total_in_memory_state = MaxTotalInMemoryState(); ASSERT_OK(Flush(i)); AssertMaxTotalInMemoryState(max_total_in_memory_state); } @@ -1209,7 +1199,7 @@ TEST_P(ColumnFamilyTest, DifferentWriteBufferSizes) { WaitForFlush(2); AssertNumberOfImmutableMemtables({0, 0, 0, 0}); AssertCountLiveLogFiles(12); - PutRandomData(1, 2*200, 1000); + PutRandomData(1, 2 * 200, 1000); WaitForFlush(1); AssertNumberOfImmutableMemtables({0, 0, 0, 0}); AssertCountLiveLogFiles(7); @@ -2123,7 +2113,6 @@ TEST_P(ColumnFamilyTest, ReadOnlyDBTest) { ASSERT_EQ("bla", Get(1, "foo")); ASSERT_EQ("blablablabla", Get(2, "foo")); - // test newiterators { std::vector iterators; @@ -2488,7 +2477,7 @@ void DropSingleColumnFamily(ColumnFamilyTest* cf_test, int cf_id, } test_stage = kChildThreadFinishDroppingColumnFamily; } -} // namespace +} // anonymous namespace TEST_P(ColumnFamilyTest, CreateAndDropRace) { const int kCfCount = 5; diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 499220d7f..ef38946f7 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -348,9 +348,7 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { return true; } - void SetDB(DB* db) { - db_ = db; - } + void SetDB(DB* db) { db_ = db; } const char* Name() const override { return "FilterWithGet"; } @@ -358,7 +356,6 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { DB* db_; }; - std::shared_ptr cf(new FilterWithGet()); Options options; @@ -385,7 +382,6 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { db->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), {fname}, 0)); } - delete db; } @@ -400,10 +396,9 @@ TEST_F(CompactFilesTest, SentinelCompressionType) { } // Check that passing `CompressionType::kDisableCompressionOption` to // `CompactFiles` causes it to use the column family compression options. - for (auto compaction_style : - {CompactionStyle::kCompactionStyleLevel, - CompactionStyle::kCompactionStyleUniversal, - CompactionStyle::kCompactionStyleNone}) { + for (auto compaction_style : {CompactionStyle::kCompactionStyleLevel, + CompactionStyle::kCompactionStyleUniversal, + CompactionStyle::kCompactionStyleNone}) { ASSERT_OK(DestroyDB(db_name_, Options())); Options options; options.compaction_style = compaction_style; diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 229ab9a5a..e5e3493b3 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -17,7 +17,6 @@ #include "util/string_util.h" #include "utilities/merge_operators.h" - namespace ROCKSDB_NAMESPACE { namespace { @@ -249,7 +248,7 @@ class TwoStrComparator : public Comparator { void FindShortSuccessor(std::string* /*key*/) const override {} }; -} // namespace +} // anonymous namespace class ComparatorDBTest : public testing::Test, @@ -470,7 +469,7 @@ void VerifySuccessor(const Slice& s, const Slice& t) { ASSERT_FALSE(rbc->IsSameLengthImmediateSuccessor(t, s)); } -} // namespace +} // anonymous namespace TEST_P(ComparatorDBTest, IsSameLengthImmediateSuccessor) { { diff --git a/db/convenience.cc b/db/convenience.cc index 81389112d..6344d356d 100644 --- a/db/convenience.cc +++ b/db/convenience.cc @@ -26,8 +26,7 @@ Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family, } Status DeleteFilesInRanges(DB* db, ColumnFamilyHandle* column_family, - const RangePtr* ranges, size_t n, - bool include_end) { + const RangePtr* ranges, size_t n, bool include_end) { return (static_cast_with_check(db->GetRootDB())) ->DeleteFilesInRanges(column_family, ranges, n, include_end); } @@ -47,9 +46,8 @@ Status VerifySstFileChecksum(const Options& options, InternalKeyComparator internal_comparator(options.comparator); ImmutableOptions ioptions(options); - Status s = ioptions.fs->NewRandomAccessFile(file_path, - FileOptions(env_options), - &file, nullptr); + Status s = ioptions.fs->NewRandomAccessFile( + file_path, FileOptions(env_options), &file, nullptr); if (s.ok()) { s = ioptions.fs->GetFileSize(file_path, IOOptions(), &file_size, nullptr); } else { diff --git a/db/corruption_test.cc b/db/corruption_test.cc index b03692979..8ccac6130 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -65,7 +65,7 @@ class ErrorEnv : public EnvWrapper { return target()->NewWritableFile(fname, result, soptions); } }; -} // namespace +} // anonymous namespace class CorruptionTest : public testing::Test { public: std::shared_ptr env_guard_; @@ -138,9 +138,7 @@ class CorruptionTest : public testing::Test { return DB::Open(opt, dbname_, &db_); } - void Reopen(Options* options = nullptr) { - ASSERT_OK(TryReopen(options)); - } + void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); } void RepairDB() { delete db_; @@ -156,7 +154,7 @@ class CorruptionTest : public testing::Test { DBImpl* dbi = static_cast_with_check(db_); ASSERT_OK(dbi->TEST_FlushMemTable()); } - //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); + // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); Slice key = Key(i + start, &key_space); batch.Clear(); ASSERT_OK(batch.Put(key, Value(i + start, &value_space))); @@ -183,8 +181,7 @@ class CorruptionTest : public testing::Test { ASSERT_OK(iter->status()); uint64_t key; Slice in(iter->key()); - if (!ConsumeDecimalNumber(&in, &key) || - !in.empty() || + if (!ConsumeDecimalNumber(&in, &key) || !in.empty() || key < next_expected) { bad_keys++; continue; @@ -200,10 +197,11 @@ class CorruptionTest : public testing::Test { iter->status().PermitUncheckedError(); delete iter; - fprintf(stderr, - "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%llu\n", - min_expected, max_expected, correct, bad_keys, bad_values, - static_cast(missed)); + fprintf( + stderr, + "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%llu\n", + min_expected, max_expected, correct, bad_keys, bad_values, + static_cast(missed)); ASSERT_LE(min_expected, correct); ASSERT_GE(max_expected, correct); } @@ -217,8 +215,7 @@ class CorruptionTest : public testing::Test { std::string fname; int picked_number = -1; for (size_t i = 0; i < filenames.size(); i++) { - if (ParseFileName(filenames[i], &number, &type) && - type == filetype && + if (ParseFileName(filenames[i], &number, &type) && type == filetype && static_cast(number) > picked_number) { // Pick latest file fname = dbname_ + "/" + filenames[i]; picked_number = static_cast(number); @@ -244,7 +241,6 @@ class CorruptionTest : public testing::Test { FAIL() << "no file found at level"; } - int Property(const std::string& name) { std::string property; int result; diff --git a/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc index 2484c402d..868b798ea 100644 --- a/db/cuckoo_table_db_test.cc +++ b/db/cuckoo_table_db_test.cc @@ -77,9 +77,7 @@ class CuckooTableDBTest : public testing::Test { return db_->Put(WriteOptions(), k, v); } - Status Delete(const std::string& k) { - return db_->Delete(WriteOptions(), k); - } + Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); } std::string Get(const std::string& k) { ReadOptions options; @@ -313,23 +311,21 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) { // Write some keys using plain table. std::shared_ptr block_based_factory( NewBlockBasedTableFactory()); - std::shared_ptr plain_table_factory( - NewPlainTableFactory()); - std::shared_ptr cuckoo_table_factory( - NewCuckooTableFactory()); + std::shared_ptr plain_table_factory(NewPlainTableFactory()); + std::shared_ptr cuckoo_table_factory(NewCuckooTableFactory()); options.create_if_missing = false; - options.table_factory.reset(NewAdaptiveTableFactory( - plain_table_factory, block_based_factory, plain_table_factory, - cuckoo_table_factory)); + options.table_factory.reset( + NewAdaptiveTableFactory(plain_table_factory, block_based_factory, + plain_table_factory, cuckoo_table_factory)); Reopen(&options); ASSERT_OK(Put("key4", "v4")); ASSERT_OK(Put("key1", "v5")); ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Write some keys using block based table. - options.table_factory.reset(NewAdaptiveTableFactory( - block_based_factory, block_based_factory, plain_table_factory, - cuckoo_table_factory)); + options.table_factory.reset( + NewAdaptiveTableFactory(block_based_factory, block_based_factory, + plain_table_factory, cuckoo_table_factory)); Reopen(&options); ASSERT_OK(Put("key5", "v6")); ASSERT_OK(Put("key2", "v7")); diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 3515040dd..622ea2f6e 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -2805,7 +2805,7 @@ TEST_F(DBBasicTest, MultiGetIOBufferOverrun) { table_options.pin_l0_filter_and_index_blocks_in_cache = true; table_options.block_size = 16 * 1024; ASSERT_TRUE(table_options.block_size > - BlockBasedTable::kMultiGetReadStackBufSize); + BlockBasedTable::kMultiGetReadStackBufSize); options.table_factory.reset(NewBlockBasedTableFactory(table_options)); Reopen(options); @@ -2914,7 +2914,7 @@ class TableFileListener : public EventListener { InstrumentedMutex mutex_; std::unordered_map> cf_to_paths_; }; -} // namespace +} // anonymous namespace TEST_F(DBBasicTest, LastSstFileNotInManifest) { // If the last sst file is not tracked in MANIFEST, diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 38ecd7a81..3d75a927c 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -414,7 +414,7 @@ class ReadOnlyCacheWrapper : public CacheWrapper { } }; -} // namespace +} // anonymous namespace TEST_F(DBBlockCacheTest, TestWithSameCompressed) { auto table_options = GetTableOptions(); @@ -1973,7 +1973,7 @@ struct CacheKeyDecoder { DownwardInvolution(decoded_session_counter)); } }; -} // namespace +} // anonymous namespace TEST_F(CacheKeyTest, Encodings) { // This test primarily verifies this claim from cache_key.cc: diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index f757693fd..d68ab6115 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -43,7 +43,7 @@ const std::string kStandard128Ribbon = test::Standard128RibbonFilterPolicy::kClassName(); const std::string kAutoBloom = BloomFilterPolicy::kClassName(); const std::string kAutoRibbon = RibbonFilterPolicy::kClassName(); -} // namespace +} // anonymous namespace // DB tests related to bloom filter. @@ -622,7 +622,7 @@ class AlwaysTrueFilterPolicy : public ReadOnlyBuiltinFilterPolicy { bool skip_; }; -} // namespace +} // anonymous namespace TEST_P(DBBloomFilterTestWithParam, SkipFilterOnEssentiallyZeroBpk) { constexpr int maxKey = 10; @@ -767,10 +767,10 @@ INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P( FormatLatest, DBBloomFilterTestWithParam, - ::testing::Values( - std::make_tuple(kAutoBloom, true, kLatestFormatVersion), - std::make_tuple(kAutoBloom, false, kLatestFormatVersion), - std::make_tuple(kAutoRibbon, false, kLatestFormatVersion))); + ::testing::Values(std::make_tuple(kAutoBloom, true, kLatestFormatVersion), + std::make_tuple(kAutoBloom, false, kLatestFormatVersion), + std::make_tuple(kAutoRibbon, false, + kLatestFormatVersion))); #endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN) TEST_F(DBBloomFilterTest, BloomFilterRate) { @@ -840,7 +840,7 @@ std::vector kCompatibilityConfigs = { BlockBasedTableOptions().format_version}, {kCompatibilityRibbonPolicy, true, BlockBasedTableOptions().format_version}, }; -} // namespace +} // anonymous namespace TEST_F(DBBloomFilterTest, BloomFilterCompatibility) { Options options = CurrentOptions(); @@ -1678,7 +1678,7 @@ class TestingContextCustomFilterPolicy private: mutable std::string test_report_; }; -} // namespace +} // anonymous namespace TEST_F(DBBloomFilterTest, ContextCustomFilterPolicy) { auto policy = std::make_shared(15, 8, 5); @@ -2186,16 +2186,14 @@ INSTANTIATE_TEST_CASE_P(DBBloomFilterTestVaryPrefixAndFormatVer, std::make_tuple(false, 2), std::make_tuple(false, 3), std::make_tuple(false, 4), - std::make_tuple(false, 5), - std::make_tuple(true, 2), - std::make_tuple(true, 3), - std::make_tuple(true, 4), + std::make_tuple(false, 5), std::make_tuple(true, 2), + std::make_tuple(true, 3), std::make_tuple(true, 4), std::make_tuple(true, 5))); #ifndef ROCKSDB_LITE namespace { static const std::string kPlainTable = "test_PlainTableBloom"; -} // namespace +} // anonymous namespace class BloomStatsTestWithParam : public DBBloomFilterTest, @@ -2408,7 +2406,7 @@ void PrefixScanInit(DBBloomFilterTest* dbtest) { dbtest->Flush(); } } -} // namespace +} // anonymous namespace TEST_F(DBBloomFilterTest, PrefixScan) { while (ChangeFilterOptions()) { @@ -3169,7 +3167,7 @@ std::pair CheckedAndUseful(uint64_t checked, uint64_t useful) { return {checked, useful}; } -} // namespace +} // anonymous namespace // This uses a prefix_extractor + comparator combination that violates // one of the old obsolete, unnecessary axioms of prefix extraction: @@ -3377,7 +3375,7 @@ class NonIdempotentFixed4Transform : public SliceTransform { bool InDomain(const Slice& src) const override { return src.size() >= 5; } }; -} // namespace +} // anonymous namespace // This uses a prefix_extractor + comparator combination that violates // two of the old obsolete, unnecessary axioms of prefix extraction: diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index e67194660..73af26e39 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -255,9 +255,8 @@ Options DeletionTriggerOptions(Options options) { return options; } -bool HaveOverlappingKeyRanges( - const Comparator* c, - const SstFileMetaData& a, const SstFileMetaData& b) { +bool HaveOverlappingKeyRanges(const Comparator* c, const SstFileMetaData& a, + const SstFileMetaData& b) { if (c->CompareWithoutTimestamp(a.smallestkey, b.smallestkey) >= 0) { if (c->CompareWithoutTimestamp(a.smallestkey, b.largestkey) <= 0) { // b.smallestkey <= a.smallestkey <= b.largestkey @@ -282,18 +281,15 @@ bool HaveOverlappingKeyRanges( // Identifies all files between level "min_level" and "max_level" // which has overlapping key range with "input_file_meta". void GetOverlappingFileNumbersForLevelCompaction( - const ColumnFamilyMetaData& cf_meta, - const Comparator* comparator, - int min_level, int max_level, - const SstFileMetaData* input_file_meta, + const ColumnFamilyMetaData& cf_meta, const Comparator* comparator, + int min_level, int max_level, const SstFileMetaData* input_file_meta, std::set* overlapping_file_names) { std::set overlapping_files; overlapping_files.insert(input_file_meta); for (int m = min_level; m <= max_level; ++m) { for (auto& file : cf_meta.levels[m].files) { for (auto* included_file : overlapping_files) { - if (HaveOverlappingKeyRanges( - comparator, *included_file, file)) { + if (HaveOverlappingKeyRanges(comparator, *included_file, file)) { overlapping_files.insert(&file); overlapping_file_names->insert(file.name); break; @@ -316,12 +312,9 @@ void VerifyCompactionResult( #endif } -const SstFileMetaData* PickFileRandomly( - const ColumnFamilyMetaData& cf_meta, - Random* rand, - int* level = nullptr) { - auto file_id = rand->Uniform(static_cast( - cf_meta.file_count)) + 1; +const SstFileMetaData* PickFileRandomly(const ColumnFamilyMetaData& cf_meta, + Random* rand, int* level = nullptr) { + auto file_id = rand->Uniform(static_cast(cf_meta.file_count)) + 1; for (auto& level_meta : cf_meta.levels) { if (file_id <= level_meta.files.size()) { if (level != nullptr) { @@ -747,7 +740,6 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) { } } - TEST_P(DBCompactionTestWithParam, CompactionTrigger) { const int kNumKeysPerFile = 100; @@ -890,7 +882,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) { TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) { Options options = CurrentOptions(); - options.write_buffer_size = 100000000; // Large write buffer + options.write_buffer_size = 100000000; // Large write buffer options.max_subcompactions = max_subcompactions_; CreateAndReopenWithCF({"pikachu"}, options); @@ -1076,7 +1068,7 @@ TEST_F(DBCompactionTest, ZeroSeqIdCompaction) { compact_opt.compression = kNoCompression; compact_opt.output_file_size_limit = 4096; const size_t key_len = - static_cast(compact_opt.output_file_size_limit) / 5; + static_cast(compact_opt.output_file_size_limit) / 5; DestroyAndReopen(options); @@ -1254,14 +1246,8 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { DestroyAndReopen(options); // non overlapping ranges std::vector> ranges = { - {100, 199}, - {300, 399}, - {0, 99}, - {200, 299}, - {600, 699}, - {400, 499}, - {500, 550}, - {551, 599}, + {100, 199}, {300, 399}, {0, 99}, {200, 299}, + {600, 699}, {400, 499}, {500, 550}, {551, 599}, }; int32_t value_size = 10 * 1024; // 10 KB @@ -1304,14 +1290,15 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { DestroyAndReopen(options); // Same ranges as above but overlapping ranges = { - {100, 199}, - {300, 399}, - {0, 99}, - {200, 299}, - {600, 699}, - {400, 499}, - {500, 560}, // this range overlap with the next one - {551, 599}, + {100, 199}, + {300, 399}, + {0, 99}, + {200, 299}, + {600, 699}, + {400, 499}, + {500, 560}, // this range overlap with the next + // one + {551, 599}, }; for (size_t i = 0; i < ranges.size(); i++) { for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { @@ -1914,7 +1901,7 @@ TEST_F(DBCompactionTest, DeleteFilesInRanges) { ASSERT_EQ("0,0,10", FilesPerLevel(0)); // file [0 => 100), [200 => 300), ... [800, 900) - for (auto i = 0; i < 10; i+=2) { + for (auto i = 0; i < 10; i += 2) { for (auto j = 0; j < 100; j++) { auto k = i * 100 + j; ASSERT_OK(Put(Key(k), values[k])); @@ -2356,14 +2343,14 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionCFPathUse) { cf_opt1.cf_paths.emplace_back(dbname_ + "cf1_2", 4 * 1024 * 1024); cf_opt1.cf_paths.emplace_back(dbname_ + "cf1_3", 1024 * 1024 * 1024); option_vector.emplace_back(DBOptions(options), cf_opt1); - CreateColumnFamilies({"one"},option_vector[1]); + CreateColumnFamilies({"one"}, option_vector[1]); // Configure CF2 specific paths. cf_opt2.cf_paths.emplace_back(dbname_ + "cf2", 500 * 1024); cf_opt2.cf_paths.emplace_back(dbname_ + "cf2_2", 4 * 1024 * 1024); cf_opt2.cf_paths.emplace_back(dbname_ + "cf2_3", 1024 * 1024 * 1024); option_vector.emplace_back(DBOptions(options), cf_opt2); - CreateColumnFamilies({"two"},option_vector[2]); + CreateColumnFamilies({"two"}, option_vector[2]); ReopenWithColumnFamilies({"default", "one", "two"}, option_vector); @@ -2736,7 +2723,6 @@ TEST_P(DBCompactionTestWithParam, ManualCompaction) { } } - TEST_P(DBCompactionTestWithParam, ManualLevelCompactionOutputPathId) { Options options = CurrentOptions(); options.db_paths.emplace_back(dbname_ + "_2", 2 * 10485760); @@ -2873,14 +2859,13 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) { auto file_meta = PickFileRandomly(cf_meta, &rnd, &level); compaction_input_file_names.push_back(file_meta->name); GetOverlappingFileNumbersForLevelCompaction( - cf_meta, options.comparator, level, output_level, - file_meta, &overlapping_file_names); + cf_meta, options.comparator, level, output_level, file_meta, + &overlapping_file_names); } - ASSERT_OK(dbfull()->CompactFiles( - CompactionOptions(), handles_[1], - compaction_input_file_names, - output_level)); + ASSERT_OK(dbfull()->CompactFiles(CompactionOptions(), handles_[1], + compaction_input_file_names, + output_level)); // Make sure all overlapping files do not exist after compaction dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); @@ -2903,8 +2888,7 @@ TEST_P(DBCompactionTestWithParam, PartialCompactionFailure) { options.write_buffer_size = kKeysPerBuffer * kKvSize; options.max_write_buffer_number = 2; options.target_file_size_base = - options.write_buffer_size * - (options.max_write_buffer_number - 1); + options.write_buffer_size * (options.max_write_buffer_number - 1); options.level0_file_num_compaction_trigger = kNumL1Files; options.max_bytes_for_level_base = options.level0_file_num_compaction_trigger * @@ -2924,10 +2908,9 @@ TEST_P(DBCompactionTestWithParam, PartialCompactionFailure) { DestroyAndReopen(options); - const int kNumInsertedKeys = - options.level0_file_num_compaction_trigger * - (options.max_write_buffer_number - 1) * - kKeysPerBuffer; + const int kNumInsertedKeys = options.level0_file_num_compaction_trigger * + (options.max_write_buffer_number - 1) * + kKeysPerBuffer; Random rnd(301); std::vector keys; @@ -3625,9 +3608,8 @@ TEST_F(DBCompactionTest, CompactFilesPendingL0Bug) { ASSERT_EQ(kNumL0Files, cf_meta.levels[0].files.size()); std::vector input_filenames; input_filenames.push_back(cf_meta.levels[0].files.front().name); - ASSERT_OK(dbfull() - ->CompactFiles(CompactionOptions(), input_filenames, - 0 /* output_level */)); + ASSERT_OK(dbfull()->CompactFiles(CompactionOptions(), input_filenames, + 0 /* output_level */)); TEST_SYNC_POINT("DBCompactionTest::CompactFilesPendingL0Bug:ManualCompacted"); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } @@ -4510,9 +4492,9 @@ TEST_F(DBCompactionTest, LevelPeriodicAndTtlCompaction) { const int kValueSize = 100; Options options = CurrentOptions(); - options.ttl = 10 * 60 * 60; // 10 hours + options.ttl = 10 * 60 * 60; // 10 hours options.periodic_compaction_seconds = 48 * 60 * 60; // 2 days - options.max_open_files = -1; // needed for both periodic and ttl compactions + options.max_open_files = -1; // needed for both periodic and ttl compactions env_->SetMockSleep(); options.env = env_; @@ -4934,7 +4916,7 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) { {"DBImpl::FlushMemTable:StallWaitDone", "CompactionJob::Run():End"}}); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - //used for the delayable flushes + // used for the delayable flushes FlushOptions flush_opts; flush_opts.allow_write_stall = true; for (int i = 0; i < kNumL0FilesLimit - 1; ++i) { @@ -4953,7 +4935,8 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) { ASSERT_OK(Put(std::to_string(0), rnd.RandomString(1024))); ASSERT_OK(dbfull()->Flush(flush_opts)); ASSERT_OK(Put(std::to_string(0), rnd.RandomString(1024))); - TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush"); + TEST_SYNC_POINT( + "DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush"); manual_compaction_thread.join(); // If CompactRange's flush was skipped, the final Put above will still be @@ -5246,10 +5229,10 @@ TEST_F(DBCompactionTest, CompactionLimiter) { } std::shared_ptr unique_limiter( - NewConcurrentTaskLimiter("unique_limiter", -1)); + NewConcurrentTaskLimiter("unique_limiter", -1)); - const char* cf_names[] = {"default", "0", "1", "2", "3", "4", "5", - "6", "7", "8", "9", "a", "b", "c", "d", "e", "f" }; + const char* cf_names[] = {"default", "0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "a", "b", "c", "d", "e", "f"}; const unsigned int cf_count = sizeof cf_names / sizeof cf_names[0]; std::unordered_map cf_to_limiter; @@ -5261,10 +5244,10 @@ TEST_F(DBCompactionTest, CompactionLimiter) { options.level0_file_num_compaction_trigger = 4; options.level0_slowdown_writes_trigger = 64; options.level0_stop_writes_trigger = 64; - options.max_background_jobs = kMaxBackgroundThreads; // Enough threads + options.max_background_jobs = kMaxBackgroundThreads; // Enough threads options.memtable_factory.reset( test::NewSpecialSkipListFactory(kNumKeysPerFile)); - options.max_write_buffer_number = 10; // Enough memtables + options.max_write_buffer_number = 10; // Enough memtables DestroyAndReopen(options); std::vector option_vector; @@ -5292,9 +5275,8 @@ TEST_F(DBCompactionTest, CompactionLimiter) { CreateColumnFamilies({cf_names[cf]}, option_vector[cf]); } - ReopenWithColumnFamilies(std::vector(cf_names, - cf_names + cf_count), - option_vector); + ReopenWithColumnFamilies( + std::vector(cf_names, cf_names + cf_count), option_vector); port::Mutex mutex; @@ -5356,7 +5338,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) { // Enough L0 files to trigger compaction for (unsigned int cf = 0; cf < cf_count; cf++) { ASSERT_EQ(NumTableFilesAtLevel(0, cf), - options.level0_file_num_compaction_trigger); + options.level0_file_num_compaction_trigger); } // Create more files for one column family, which triggers speed up @@ -5399,7 +5381,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) { // flush one more file to cf 1 for (int i = 0; i < kNumKeysPerFile; i++) { - ASSERT_OK(Put(cf_test, Key(keyIndex++), "")); + ASSERT_OK(Put(cf_test, Key(keyIndex++), "")); } // put extra key to trigger flush ASSERT_OK(Put(cf_test, "", "")); @@ -5434,9 +5416,7 @@ TEST_P(DBCompactionDirectIOTest, DirectIO) { }); if (options.use_direct_io_for_flush_and_compaction) { SyncPoint::GetInstance()->SetCallBack( - "SanitizeOptions:direct_io", [&](void* /*arg*/) { - readahead = true; - }); + "SanitizeOptions:direct_io", [&](void* /*arg*/) { readahead = true; }); } SyncPoint::GetInstance()->EnableProcessing(); CreateAndReopenWithCF({"pikachu"}, options); @@ -8404,8 +8384,8 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); #else - (void) argc; - (void) argv; + (void)argc; + (void)argv; return 0; #endif } diff --git a/db/db_dynamic_level_test.cc b/db/db_dynamic_level_test.cc index 13a160958..17fa67cb2 100644 --- a/db/db_dynamic_level_test.cc +++ b/db/db_dynamic_level_test.cc @@ -500,8 +500,8 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); #else - (void) argc; - (void) argv; + (void)argc; + (void)argv; return 0; #endif } diff --git a/db/db_encryption_test.cc b/db/db_encryption_test.cc index b7000dd7a..73e89d158 100644 --- a/db/db_encryption_test.cc +++ b/db/db_encryption_test.cc @@ -43,7 +43,7 @@ TEST_F(DBEncryptionTest, CheckEncrypted) { Env* target = GetTargetEnv(); int hits = 0; - for (auto it = fileNames.begin() ; it != fileNames.end(); ++it) { + for (auto it = fileNames.begin(); it != fileNames.end(); ++it) { if (*it == "LOCK") { continue; } @@ -64,24 +64,24 @@ TEST_F(DBEncryptionTest, CheckEncrypted) { ASSERT_OK(status); if (data.ToString().find("foo567") != std::string::npos) { - hits++; - //std::cout << "Hit in " << filePath << "\n"; + hits++; + // std::cout << "Hit in " << filePath << "\n"; } if (data.ToString().find("v1.fetdq") != std::string::npos) { - hits++; - //std::cout << "Hit in " << filePath << "\n"; + hits++; + // std::cout << "Hit in " << filePath << "\n"; } if (data.ToString().find("bar123") != std::string::npos) { - hits++; - //std::cout << "Hit in " << filePath << "\n"; + hits++; + // std::cout << "Hit in " << filePath << "\n"; } if (data.ToString().find("v2.dfgkjdfghsd") != std::string::npos) { - hits++; - //std::cout << "Hit in " << filePath << "\n"; + hits++; + // std::cout << "Hit in " << filePath << "\n"; } if (data.ToString().find("dfgk") != std::string::npos) { - hits++; - //std::cout << "Hit in " << filePath << "\n"; + hits++; + // std::cout << "Hit in " << filePath << "\n"; } } if (encrypted_env_) { @@ -119,7 +119,7 @@ TEST_F(DBEncryptionTest, ReadEmptyFile) { ASSERT_TRUE(data.empty()); } -#endif // ROCKSDB_LITE +#endif // ROCKSDB_LITE } // namespace ROCKSDB_NAMESPACE diff --git a/db/db_filesnapshot.cc b/db/db_filesnapshot.cc index 515abb728..aa9bd738a 100644 --- a/db/db_filesnapshot.cc +++ b/db/db_filesnapshot.cc @@ -65,8 +65,7 @@ Status DBImpl::FlushForGetLiveFiles() { } Status DBImpl::GetLiveFiles(std::vector& ret, - uint64_t* manifest_file_size, - bool flush_memtable) { + uint64_t* manifest_file_size, bool flush_memtable) { *manifest_file_size = 0; mutex_.Lock(); diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index 724bf3246..3b3f7e183 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -57,7 +57,7 @@ TEST_F(DBFlushTest, FlushWhileWritingManifest) { Reopen(options); FlushOptions no_wait; no_wait.wait = false; - no_wait.allow_write_stall=true; + no_wait.allow_write_stall = true; SyncPoint::GetInstance()->LoadDependency( {{"VersionSet::LogAndApply:WriteManifest", @@ -1822,8 +1822,8 @@ TEST_F(DBFlushTest, ManualFlushFailsInReadOnlyMode) { ASSERT_NOK(dbfull()->TEST_WaitForFlushMemTable()); #ifndef ROCKSDB_LITE uint64_t num_bg_errors; - ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBackgroundErrors, - &num_bg_errors)); + ASSERT_TRUE( + db_->GetIntProperty(DB::Properties::kBackgroundErrors, &num_bg_errors)); ASSERT_GT(num_bg_errors, 0); #endif // ROCKSDB_LITE diff --git a/db/db_info_dumper.cc b/db/db_info_dumper.cc index df17a5c96..be8d5bee1 100644 --- a/db/db_info_dumper.cc +++ b/db/db_info_dumper.cc @@ -6,6 +6,7 @@ #include "db/db_info_dumper.h" #include + #include #include #include diff --git a/db/db_iter_stress_test.cc b/db/db_iter_stress_test.cc index ca1a1fd95..872f7e6bd 100644 --- a/db/db_iter_stress_test.cc +++ b/db/db_iter_stress_test.cc @@ -392,7 +392,7 @@ struct ReferenceIterator { } }; -} // namespace +} // anonymous namespace // Use an internal iterator that sometimes returns errors and sometimes // adds/removes entries on the fly. Do random operations on a DBIter and @@ -482,12 +482,11 @@ TEST_F(DBIteratorStressTest, StressTest) { std::cout << "entries:"; for (size_t i = 0; i < data.entries.size(); ++i) { Entry& e = data.entries[i]; - std::cout - << "\n idx " << i << ": \"" << e.key << "\": \"" - << e.value << "\" seq: " << e.sequence << " type: " - << (e.type == kTypeValue - ? "val" - : e.type == kTypeDeletion ? "del" : "merge"); + std::cout << "\n idx " << i << ": \"" << e.key << "\": \"" + << e.value << "\" seq: " << e.sequence << " type: " + << (e.type == kTypeValue ? "val" + : e.type == kTypeDeletion ? "del" + : "merge"); } std::cout << std::endl; } diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index 545d48a1f..65290bfad 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -3,12 +3,13 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#include -#include +#include "db/db_iter.h" + #include +#include #include +#include -#include "db/db_iter.h" #include "db/dbformat.h" #include "rocksdb/comparator.h" #include "rocksdb/options.h" @@ -82,8 +83,8 @@ class TestIterator : public InternalIterator { std::sort(data_.begin(), data_.end(), [this](std::pair a, std::pair b) { - return (cmp.Compare(a.first, b.first) < 0); - }); + return (cmp.Compare(a.first, b.first) < 0); + }); } // Removes the key from the set of keys over which this iterator iterates. @@ -429,7 +430,8 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); - ASSERT_EQ(static_cast(get_perf_context()->internal_key_skipped_count), 1); + ASSERT_EQ(static_cast(get_perf_context()->internal_key_skipped_count), + 1); ASSERT_EQ(db_iter->key().ToString(), "b"); SetPerfLevel(kDisable); @@ -557,7 +559,8 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); - ASSERT_EQ(static_cast(get_perf_context()->internal_delete_skipped_count), 0); + ASSERT_EQ( + static_cast(get_perf_context()->internal_delete_skipped_count), 0); ASSERT_EQ(db_iter->key().ToString(), "b"); SetPerfLevel(kDisable); @@ -3013,7 +3016,6 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace8) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } - TEST_F(DBIteratorTest, SeekPrefixTombstones) { ReadOptions ro; Options options; diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index 51c81585c..aaf1408b4 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -236,7 +236,7 @@ namespace { std::string MakeLongKey(size_t length, char c) { return std::string(length, c); } -} // namespace +} // anonymous namespace TEST_P(DBIteratorTest, IterLongKeys) { ASSERT_OK(Put(MakeLongKey(20, 0), "0")); @@ -1037,7 +1037,8 @@ TEST_P(DBIteratorTest, DBIteratorBoundTest) { iter->Next(); ASSERT_TRUE(iter->Valid()); - ASSERT_EQ(static_cast(get_perf_context()->internal_delete_skipped_count), 2); + ASSERT_EQ( + static_cast(get_perf_context()->internal_delete_skipped_count), 2); // now testing with iterate_bound Slice prefix("c"); @@ -1060,7 +1061,8 @@ TEST_P(DBIteratorTest, DBIteratorBoundTest) { // even though the key is deleted // hence internal_delete_skipped_count should be 0 ASSERT_TRUE(!iter->Valid()); - ASSERT_EQ(static_cast(get_perf_context()->internal_delete_skipped_count), 0); + ASSERT_EQ( + static_cast(get_perf_context()->internal_delete_skipped_count), 0); } } @@ -1536,7 +1538,7 @@ class DBIteratorTestForPinnedData : public DBIteratorTest { } delete iter; -} + } }; #if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN) @@ -2180,8 +2182,8 @@ TEST_P(DBIteratorTest, IteratorWithLocalStatistics) { ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_PREV), (uint64_t)total_prev); ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_PREV_FOUND), (uint64_t)total_prev_found); - ASSERT_EQ(TestGetTickerCount(options, ITER_BYTES_READ), (uint64_t)total_bytes); - + ASSERT_EQ(TestGetTickerCount(options, ITER_BYTES_READ), + (uint64_t)total_bytes); } TEST_P(DBIteratorTest, ReadAhead) { @@ -2310,8 +2312,8 @@ TEST_P(DBIteratorTest, DBIteratorSkipRecentDuplicatesTest) { EXPECT_EQ(get_perf_context()->internal_merge_count, 0); EXPECT_GE(get_perf_context()->internal_recent_skipped_count, 2); EXPECT_GE(get_perf_context()->seek_on_memtable_count, 2); - EXPECT_EQ(1, options.statistics->getTickerCount( - NUMBER_OF_RESEEKS_IN_ITERATION)); + EXPECT_EQ(1, + options.statistics->getTickerCount(NUMBER_OF_RESEEKS_IN_ITERATION)); } TEST_P(DBIteratorTest, Refresh) { @@ -2592,7 +2594,7 @@ TEST_P(DBIteratorTest, SkipStatistics) { } ASSERT_EQ(count, 3); delete iter; - skip_count += 8; // 3 deletes + 3 original keys + 2 lower in sequence + skip_count += 8; // 3 deletes + 3 original keys + 2 lower in sequence ASSERT_EQ(skip_count, TestGetTickerCount(options, NUMBER_ITER_SKIP)); iter = NewIterator(ReadOptions()); @@ -2603,7 +2605,7 @@ TEST_P(DBIteratorTest, SkipStatistics) { } ASSERT_EQ(count, 3); delete iter; - skip_count += 8; // Same as above, but in reverse order + skip_count += 8; // Same as above, but in reverse order ASSERT_EQ(skip_count, TestGetTickerCount(options, NUMBER_ITER_SKIP)); ASSERT_OK(Put("aa", "1")); @@ -2621,18 +2623,18 @@ TEST_P(DBIteratorTest, SkipStatistics) { iter = NewIterator(ro); count = 0; - for(iter->Seek("aa"); iter->Valid(); iter->Next()) { + for (iter->Seek("aa"); iter->Valid(); iter->Next()) { ASSERT_OK(iter->status()); count++; } ASSERT_EQ(count, 1); delete iter; - skip_count += 6; // 3 deletes + 3 original keys + skip_count += 6; // 3 deletes + 3 original keys ASSERT_EQ(skip_count, TestGetTickerCount(options, NUMBER_ITER_SKIP)); iter = NewIterator(ro); count = 0; - for(iter->SeekToLast(); iter->Valid(); iter->Prev()) { + for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { ASSERT_OK(iter->status()); count++; } diff --git a/db/db_log_iter_test.cc b/db/db_log_iter_test.cc index f0cf215e1..4e982858c 100644 --- a/db/db_log_iter_test.cc +++ b/db/db_log_iter_test.cc @@ -55,14 +55,13 @@ SequenceNumber ReadRecords(std::unique_ptr& iter, return res.sequence; } -void ExpectRecords( - const int expected_no_records, - std::unique_ptr& iter) { +void ExpectRecords(const int expected_no_records, + std::unique_ptr& iter) { int num_records; ReadRecords(iter, num_records); ASSERT_EQ(num_records, expected_no_records); } -} // namespace +} // anonymous namespace TEST_F(DBTestXactLogIterator, TransactionLogIterator) { do { @@ -95,10 +94,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIterator) { TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) { static const int LOG_ITERATOR_RACE_TEST_COUNT = 2; static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] = { - {"WalManager::GetSortedWalFiles:1", "WalManager::PurgeObsoleteFiles:1", + {"WalManager::GetSortedWalFiles:1", "WalManager::PurgeObsoleteFiles:1", "WalManager::PurgeObsoleteFiles:2", "WalManager::GetSortedWalFiles:2"}, - {"WalManager::GetSortedWalsOfType:1", - "WalManager::PurgeObsoleteFiles:1", + {"WalManager::GetSortedWalsOfType:1", "WalManager::PurgeObsoleteFiles:1", "WalManager::PurgeObsoleteFiles:2", "WalManager::GetSortedWalsOfType:2"}}; for (int test = 0; test < LOG_ITERATOR_RACE_TEST_COUNT; ++test) { @@ -300,8 +298,8 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); #else - (void) argc; - (void) argv; + (void)argc; + (void)argv; return 0; #endif } diff --git a/db/db_logical_block_size_cache_test.cc b/db/db_logical_block_size_cache_test.cc index ae5cded0e..13c16618e 100644 --- a/db/db_logical_block_size_cache_test.cc +++ b/db/db_logical_block_size_cache_test.cc @@ -224,8 +224,8 @@ TEST_F(DBLogicalBlockSizeCacheTest, CreateColumnFamilies) { // Now cf_path_0_ in cache_ has been properly decreased and cf_path_0_'s entry // is dropped from cache ASSERT_EQ(0, cache_->Size()); - ASSERT_OK(DestroyDB(dbname_, options, - {{"cf1", cf_options}, {"cf2", cf_options}})); + ASSERT_OK( + DestroyDB(dbname_, options, {{"cf1", cf_options}, {"cf2", cf_options}})); } TEST_F(DBLogicalBlockSizeCacheTest, OpenWithColumnFamilies) { @@ -313,8 +313,8 @@ TEST_F(DBLogicalBlockSizeCacheTest, OpenWithColumnFamilies) { delete db; ASSERT_EQ(0, cache_->Size()); } - ASSERT_OK(DestroyDB(dbname_, options, - {{"cf1", cf_options}, {"cf2", cf_options}})); + ASSERT_OK( + DestroyDB(dbname_, options, {{"cf1", cf_options}, {"cf2", cf_options}})); } TEST_F(DBLogicalBlockSizeCacheTest, DestroyColumnFamilyHandle) { diff --git a/db/db_merge_operand_test.cc b/db/db_merge_operand_test.cc index 1ae5f3287..cbec37138 100644 --- a/db/db_merge_operand_test.cc +++ b/db/db_merge_operand_test.cc @@ -39,7 +39,7 @@ class LimitedStringAppendMergeOp : public StringAppendTESTOperator { private: size_t limit_ = 0; }; -} // namespace +} // anonymous namespace class DBMergeOperandTest : public DBTestBase { public: diff --git a/db/db_merge_operator_test.cc b/db/db_merge_operator_test.cc index 8e551d08d..7c5505bd1 100644 --- a/db/db_merge_operator_test.cc +++ b/db/db_merge_operator_test.cc @@ -84,8 +84,7 @@ TEST_F(DBMergeOperatorTest, LimitMergeOperands) { Options options; options.create_if_missing = true; // Use only the latest two merge operands. - options.merge_operator = - std::make_shared(2, ','); + options.merge_operator = std::make_shared(2, ','); options.env = env_; Reopen(options); // All K1 values are in memtable. @@ -203,7 +202,6 @@ TEST_F(DBMergeOperatorTest, MergeErrorOnIteration) { VerifyDBInternal({{"k1", "v1"}, {"k2", "corrupted"}, {"k2", "v2"}}); } - class MergeOperatorPinningTest : public DBMergeOperatorTest, public testing::WithParamInterface { public: @@ -471,7 +469,7 @@ TEST_F(DBMergeOperatorTest, TailingIteratorMemtableUnrefedBySomeoneElse) { "DBIter::MergeValuesNewToOld:SteppedToNextOperand", [&](void*) { EXPECT_FALSE(stepped_to_next_operand); stepped_to_next_operand = true; - someone_else.reset(); // Unpin SuperVersion A + someone_else.reset(); // Unpin SuperVersion A }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); diff --git a/db/db_options_test.cc b/db/db_options_test.cc index 8ec184757..691081db9 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -402,7 +402,7 @@ TEST_F(DBOptionsTest, SetWalBytesPerSync) { // Do not flush. If we flush here, SwitchWAL will reuse old WAL file since its // empty and will not get the new wal_bytes_per_sync value. low_bytes_per_sync = counter; - //5242880 = 1024 * 1024 * 5 + // 5242880 = 1024 * 1024 * 5 ASSERT_OK(dbfull()->SetDBOptions({{"wal_bytes_per_sync", "5242880"}})); ASSERT_EQ(5242880, dbfull()->GetDBOptions().wal_bytes_per_sync); counter = 0; @@ -604,7 +604,7 @@ TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) { TEST_F(DBOptionsTest, SetBackgroundCompactionThreads) { Options options; options.create_if_missing = true; - options.max_background_compactions = 1; // default value + options.max_background_compactions = 1; // default value options.env = env_; Reopen(options); ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed()); @@ -627,7 +627,6 @@ TEST_F(DBOptionsTest, SetBackgroundFlushThreads) { ASSERT_EQ(3, dbfull()->TEST_BGFlushesAllowed()); } - TEST_F(DBOptionsTest, SetBackgroundJobs) { Options options; options.create_if_missing = true; @@ -691,7 +690,8 @@ TEST_F(DBOptionsTest, SetDelayedWriteRateOption) { options.delayed_write_rate = 2 * 1024U * 1024U; options.env = env_; Reopen(options); - ASSERT_EQ(2 * 1024U * 1024U, dbfull()->TEST_write_controler().max_delayed_write_rate()); + ASSERT_EQ(2 * 1024U * 1024U, + dbfull()->TEST_write_controler().max_delayed_write_rate()); ASSERT_OK(dbfull()->SetDBOptions({{"delayed_write_rate", "20000"}})); ASSERT_EQ(20000, dbfull()->TEST_write_controler().max_delayed_write_rate()); diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index 5d209593c..85cd5c04e 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -270,7 +270,8 @@ void GetExpectedTableProperties( const int kDeletionCount = kTableCount * kDeletionsPerTable; const int kMergeCount = kTableCount * kMergeOperandsPerTable; const int kRangeDeletionCount = kTableCount * kRangeDeletionsPerTable; - const int kKeyCount = kPutCount + kDeletionCount + kMergeCount + kRangeDeletionCount; + const int kKeyCount = + kPutCount + kDeletionCount + kMergeCount + kRangeDeletionCount; const int kAvgSuccessorSize = kKeySize / 5; const int kEncodingSavePerKey = kKeySize / 4; expected_tp->raw_key_size = kKeyCount * (kKeySize + 8); @@ -281,7 +282,8 @@ void GetExpectedTableProperties( expected_tp->num_merge_operands = kMergeCount; expected_tp->num_range_deletions = kRangeDeletionCount; expected_tp->num_data_blocks = - kTableCount * (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) / + kTableCount * + (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) / kBlockSize; expected_tp->data_size = kTableCount * (kKeysPerTable * (kKeySize + 8 + kValueSize)); @@ -1120,7 +1122,8 @@ class CountingUserTblPropCollector : public TablePropertiesCollector { std::string encoded; PutVarint32(&encoded, count_); *properties = UserCollectedProperties{ - {"CountingUserTblPropCollector", message_}, {"Count", encoded}, + {"CountingUserTblPropCollector", message_}, + {"Count", encoded}, }; return Status::OK(); } @@ -2122,7 +2125,7 @@ std::string PopMetaIndexKey(InternalIterator* meta_iter) { } } -} // namespace +} // anonymous namespace TEST_F(DBPropertiesTest, TableMetaIndexKeys) { // This is to detect unexpected churn in metaindex block keys. This is more diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index 8c3884863..abd9162bd 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -238,7 +238,8 @@ TEST_F(DBRangeDelTest, SentinelsOmittedFromOutputFile) { const Snapshot* snapshot = db_->GetSnapshot(); // gaps between ranges creates sentinels in our internal representation - std::vector> range_dels = {{"a", "b"}, {"c", "d"}, {"e", "f"}}; + std::vector> range_dels = { + {"a", "b"}, {"c", "d"}, {"e", "f"}}; for (const auto& range_del : range_dels) { ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), range_del.first, range_del.second)); @@ -567,8 +568,8 @@ TEST_F(DBRangeDelTest, PutDeleteRangeMergeFlush) { std::string val; PutFixed64(&val, 1); ASSERT_OK(db_->Put(WriteOptions(), "key", val)); - ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), - "key", "key_")); + ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "key", + "key_")); ASSERT_OK(db_->Merge(WriteOptions(), "key", val)); ASSERT_OK(db_->Flush(FlushOptions())); @@ -1332,7 +1333,7 @@ TEST_F(DBRangeDelTest, UntruncatedTombstoneDoesNotDeleteNewerKey) { const int kFileBytes = 1 << 20; const int kValueBytes = 1 << 10; const int kNumFiles = 4; - const int kMaxKey = kNumFiles* kFileBytes / kValueBytes; + const int kMaxKey = kNumFiles * kFileBytes / kValueBytes; const int kKeysOverwritten = 10; Options options = CurrentOptions(); @@ -1649,7 +1650,8 @@ TEST_F(DBRangeDelTest, RangeTombstoneWrittenToMinimalSsts) { const auto& table_props = name_and_table_props.second; // The range tombstone should only be output to the second L1 SST. if (name.size() >= l1_metadata[1].name.size() && - name.substr(name.size() - l1_metadata[1].name.size()).compare(l1_metadata[1].name) == 0) { + name.substr(name.size() - l1_metadata[1].name.size()) + .compare(l1_metadata[1].name) == 0) { ASSERT_EQ(1, table_props->num_range_deletions); ++num_range_deletions; } else { diff --git a/db/db_secondary_test.cc b/db/db_secondary_test.cc index 930ff468b..20d7534e0 100644 --- a/db/db_secondary_test.cc +++ b/db/db_secondary_test.cc @@ -499,7 +499,7 @@ class TraceFileEnv : public EnvWrapper { private: std::atomic files_closed_{0}; }; -} // namespace +} // anonymous namespace TEST_F(DBSecondaryTest, SecondaryCloseFiles) { Options options; diff --git a/db/db_statistics_test.cc b/db/db_statistics_test.cc index 91ae972cb..4d4655361 100644 --- a/db/db_statistics_test.cc +++ b/db/db_statistics_test.cc @@ -70,9 +70,9 @@ TEST_F(DBStatisticsTest, CompressionStatsTest) { options.compression = kNoCompression; DestroyAndReopen(options); uint64_t currentCompressions = - options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED); + options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED); uint64_t currentDecompressions = - options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED); + options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED); // Check that compressions do not occur when turned off for (int i = 0; i < kNumKeysWritten; ++i) { @@ -80,14 +80,16 @@ TEST_F(DBStatisticsTest, CompressionStatsTest) { ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a'))); } ASSERT_OK(Flush()); - ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED) - - currentCompressions, 0); + ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED) - + currentCompressions, + 0); for (int i = 0; i < kNumKeysWritten; ++i) { auto r = Get(Key(i)); } - ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED) - - currentDecompressions, 0); + ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED) - + currentDecompressions, + 0); } TEST_F(DBStatisticsTest, MutexWaitStatsDisabledByDefault) { diff --git a/db/db_table_properties_test.cc b/db/db_table_properties_test.cc index eb8c9a603..981a514ad 100644 --- a/db/db_table_properties_test.cc +++ b/db/db_table_properties_test.cc @@ -52,7 +52,7 @@ void VerifyTableProperties(DB* db, uint64_t expected_entries_size) { VerifySstUniqueIds(props); } -} // namespace +} // anonymous namespace class DBTablePropertiesTest : public DBTestBase, public testing::WithParamInterface { @@ -240,7 +240,6 @@ TablePropertiesCollection DBTablePropertiesTest::TestGetPropertiesOfTablesInRange( std::vector ranges, std::size_t* num_properties, std::size_t* num_files) { - // Since we deref zero element in the vector it can not be empty // otherwise we pass an address to some random memory EXPECT_GT(ranges.size(), 0U); @@ -469,12 +468,12 @@ INSTANTIATE_TEST_CASE_P( class DeletionTriggeredCompactionTestListener : public EventListener { public: - void OnCompactionBegin(DB* , const CompactionJobInfo& ci) override { + void OnCompactionBegin(DB*, const CompactionJobInfo& ci) override { ASSERT_EQ(ci.compaction_reason, CompactionReason::kFilesMarkedForCompaction); } - void OnCompactionCompleted(DB* , const CompactionJobInfo& ci) override { + void OnCompactionCompleted(DB*, const CompactionJobInfo& ci) override { ASSERT_EQ(ci.compaction_reason, CompactionReason::kFilesMarkedForCompaction); } @@ -485,13 +484,13 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { int kWindowSize = 100; int kNumDelsTrigger = 90; std::shared_ptr compact_on_del = - NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger); + NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger); Options opts = CurrentOptions(); opts.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics(); opts.table_properties_collector_factories.emplace_back(compact_on_del); - if(GetParam() == "kCompactionStyleUniversal") { + if (GetParam() == "kCompactionStyleUniversal") { opts.compaction_style = kCompactionStyleUniversal; } Reopen(opts); @@ -502,8 +501,8 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { ASSERT_OK(Flush()); MoveFilesToLevel(1); - DeletionTriggeredCompactionTestListener *listener = - new DeletionTriggeredCompactionTestListener(); + DeletionTriggeredCompactionTestListener* listener = + new DeletionTriggeredCompactionTestListener(); opts.listeners.emplace_back(listener); Reopen(opts); @@ -524,10 +523,10 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { // effect kWindowSize = 50; kNumDelsTrigger = 40; - static_cast - (compact_on_del.get())->SetWindowSize(kWindowSize); - static_cast - (compact_on_del.get())->SetDeletionTrigger(kNumDelsTrigger); + static_cast(compact_on_del.get()) + ->SetWindowSize(kWindowSize); + static_cast(compact_on_del.get()) + ->SetDeletionTrigger(kNumDelsTrigger); for (int i = 0; i < kNumKeys; ++i) { if (i >= kNumKeys - kWindowSize && i < kNumKeys - kWindowSize + kNumDelsTrigger) { @@ -543,10 +542,10 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { // Change the window size to disable delete triggered compaction kWindowSize = 0; - static_cast - (compact_on_del.get())->SetWindowSize(kWindowSize); - static_cast - (compact_on_del.get())->SetDeletionTrigger(kNumDelsTrigger); + static_cast(compact_on_del.get()) + ->SetWindowSize(kWindowSize); + static_cast(compact_on_del.get()) + ->SetDeletionTrigger(kNumDelsTrigger); for (int i = 0; i < kNumKeys; ++i) { if (i >= kNumKeys - kWindowSize && i < kNumKeys - kWindowSize + kNumDelsTrigger) { @@ -611,13 +610,9 @@ TEST_P(DBTablePropertiesTest, RatioBasedDeletionTriggeredCompactionMarking) { } } -INSTANTIATE_TEST_CASE_P( - DBTablePropertiesTest, - DBTablePropertiesTest, - ::testing::Values( - "kCompactionStyleLevel", - "kCompactionStyleUniversal" - )); +INSTANTIATE_TEST_CASE_P(DBTablePropertiesTest, DBTablePropertiesTest, + ::testing::Values("kCompactionStyleLevel", + "kCompactionStyleUniversal")); } // namespace ROCKSDB_NAMESPACE diff --git a/db/db_tailing_iter_test.cc b/db/db_tailing_iter_test.cc index 16aeee9eb..af3194ac4 100644 --- a/db/db_tailing_iter_test.cc +++ b/db/db_tailing_iter_test.cc @@ -399,7 +399,7 @@ TEST_P(DBTestTailingIterator, TailingIteratorSeekToSame) { // Write rows with keys 00000, 00002, 00004 etc. for (int i = 0; i < NROWS; ++i) { char buf[100]; - snprintf(buf, sizeof(buf), "%05d", 2*i); + snprintf(buf, sizeof(buf), "%05d", 2 * i); std::string key(buf); std::string value("value"); ASSERT_OK(db_->Put(WriteOptions(), key, value)); @@ -539,7 +539,6 @@ TEST_P(DBTestTailingIterator, SeekWithUpperBoundBug) { const Slice upper_bound("cc", 3); read_options.iterate_upper_bound = &upper_bound; - // 1st L0 file ASSERT_OK(db_->Put(WriteOptions(), "aa", "SEEN")); ASSERT_OK(Flush()); @@ -565,7 +564,6 @@ TEST_P(DBTestTailingIterator, SeekToFirstWithUpperBoundBug) { const Slice upper_bound("cc", 3); read_options.iterate_upper_bound = &upper_bound; - // 1st L0 file ASSERT_OK(db_->Put(WriteOptions(), "aa", "SEEN")); ASSERT_OK(Flush()); @@ -599,8 +597,8 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); #else - (void) argc; - (void) argv; + (void)argc; + (void)argv; return 0; #endif } diff --git a/db/db_test.cc b/db/db_test.cc index f8bd52768..9575248b4 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -1135,7 +1135,7 @@ class DelayFilterFactory : public CompactionFilterFactory { private: DBTestBase* db_test; }; -} // namespace +} // anonymous namespace #ifndef ROCKSDB_LITE @@ -1490,7 +1490,7 @@ bool MinLevelToCompress(CompressionType& type, Options& options, int wbits, } return true; } -} // namespace +} // anonymous namespace TEST_F(DBTest, MinLevelToCompress1) { Options options = CurrentOptions(); @@ -2843,7 +2843,7 @@ static void MTThreadBody(void* arg) { fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter)); } -} // namespace +} // anonymous namespace class MultiThreadedDBTest : public DBTest, @@ -2929,7 +2929,7 @@ static void GCThreadBody(void* arg) { t->done = true; } -} // namespace +} // anonymous namespace TEST_F(DBTest, GroupCommitTest) { do { @@ -4645,7 +4645,7 @@ void VerifyOperationCount(Env* env, ThreadStatus::OperationType op_type, } ASSERT_EQ(op_count, expected_count); } -} // namespace +} // anonymous namespace TEST_F(DBTest, GetThreadStatus) { Options options; diff --git a/db/db_test2.cc b/db/db_test2.cc index 1afd2322a..dd4742e78 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -669,33 +669,33 @@ TEST_F(DBTest2, TestWriteBufferNoLimitWithCache) { } namespace { - void ValidateKeyExistence(DB* db, const std::vector& keys_must_exist, - const std::vector& keys_must_not_exist) { - // Ensure that expected keys exist - std::vector values; - if (keys_must_exist.size() > 0) { - std::vector status_list = +void ValidateKeyExistence(DB* db, const std::vector& keys_must_exist, + const std::vector& keys_must_not_exist) { + // Ensure that expected keys exist + std::vector values; + if (keys_must_exist.size() > 0) { + std::vector status_list = db->MultiGet(ReadOptions(), keys_must_exist, &values); - for (size_t i = 0; i < keys_must_exist.size(); i++) { - ASSERT_OK(status_list[i]); - } + for (size_t i = 0; i < keys_must_exist.size(); i++) { + ASSERT_OK(status_list[i]); } + } - // Ensure that given keys don't exist - if (keys_must_not_exist.size() > 0) { - std::vector status_list = + // Ensure that given keys don't exist + if (keys_must_not_exist.size() > 0) { + std::vector status_list = db->MultiGet(ReadOptions(), keys_must_not_exist, &values); - for (size_t i = 0; i < keys_must_not_exist.size(); i++) { - ASSERT_TRUE(status_list[i].IsNotFound()); - } + for (size_t i = 0; i < keys_must_not_exist.size(); i++) { + ASSERT_TRUE(status_list[i].IsNotFound()); } } +} -} // namespace +} // anonymous namespace TEST_F(DBTest2, WalFilterTest) { class TestWalFilter : public WalFilter { - private: + private: // Processing option that is requested to be applied at the given index WalFilter::WalProcessingOption wal_processing_option_; // Index at which to apply wal_processing_option_ @@ -705,12 +705,12 @@ TEST_F(DBTest2, WalFilterTest) { // Current record index, incremented with each record encountered. size_t current_record_index_; - public: + public: TestWalFilter(WalFilter::WalProcessingOption wal_processing_option, - size_t apply_option_for_record_index) - : wal_processing_option_(wal_processing_option), - apply_option_at_record_index_(apply_option_for_record_index), - current_record_index_(0) {} + size_t apply_option_for_record_index) + : wal_processing_option_(wal_processing_option), + apply_option_at_record_index_(apply_option_for_record_index), + current_record_index_(0) {} WalProcessingOption LogRecord(const WriteBatch& /*batch*/, WriteBatch* /*new_batch*/, @@ -719,8 +719,7 @@ TEST_F(DBTest2, WalFilterTest) { if (current_record_index_ == apply_option_at_record_index_) { option_to_return = wal_processing_option_; - } - else { + } else { option_to_return = WalProcessingOption::kContinueProcessing; } @@ -747,12 +746,12 @@ TEST_F(DBTest2, WalFilterTest) { // Test with all WAL processing options for (int option = 0; - option < static_cast( - WalFilter::WalProcessingOption::kWalProcessingOptionMax); - option++) { + option < static_cast( + WalFilter::WalProcessingOption::kWalProcessingOptionMax); + option++) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); - CreateAndReopenWithCF({ "pikachu" }, options); + CreateAndReopenWithCF({"pikachu"}, options); // Write given keys in given batches for (size_t i = 0; i < batch_keys.size(); i++) { @@ -764,28 +763,27 @@ TEST_F(DBTest2, WalFilterTest) { } WalFilter::WalProcessingOption wal_processing_option = - static_cast(option); + static_cast(option); // Create a test filter that would apply wal_processing_option at the first // record size_t apply_option_for_record_index = 1; TestWalFilter test_wal_filter(wal_processing_option, - apply_option_for_record_index); + apply_option_for_record_index); // Reopen database with option to use WAL filter options = OptionsForLogIterTest(); options.wal_filter = &test_wal_filter; Status status = - TryReopenWithColumnFamilies({ "default", "pikachu" }, options); + TryReopenWithColumnFamilies({"default", "pikachu"}, options); if (wal_processing_option == - WalFilter::WalProcessingOption::kCorruptedRecord) { + WalFilter::WalProcessingOption::kCorruptedRecord) { ASSERT_NOK(status); // In case of corruption we can turn off paranoid_checks to reopen // databse options.paranoid_checks = false; - ReopenWithColumnFamilies({ "default", "pikachu" }, options); - } - else { + ReopenWithColumnFamilies({"default", "pikachu"}, options); + } else { ASSERT_OK(status); } @@ -794,56 +792,54 @@ TEST_F(DBTest2, WalFilterTest) { std::vector keys_must_exist; std::vector keys_must_not_exist; switch (wal_processing_option) { - case WalFilter::WalProcessingOption::kCorruptedRecord: - case WalFilter::WalProcessingOption::kContinueProcessing: { - fprintf(stderr, "Testing with complete WAL processing\n"); - // we expect all records to be processed - for (size_t i = 0; i < batch_keys.size(); i++) { - for (size_t j = 0; j < batch_keys[i].size(); j++) { - keys_must_exist.push_back(Slice(batch_keys[i][j])); - } - } - break; - } - case WalFilter::WalProcessingOption::kIgnoreCurrentRecord: { - fprintf(stderr, - "Testing with ignoring record %" ROCKSDB_PRIszt " only\n", - apply_option_for_record_index); - // We expect the record with apply_option_for_record_index to be not - // found. - for (size_t i = 0; i < batch_keys.size(); i++) { - for (size_t j = 0; j < batch_keys[i].size(); j++) { - if (i == apply_option_for_record_index) { - keys_must_not_exist.push_back(Slice(batch_keys[i][j])); - } - else { + case WalFilter::WalProcessingOption::kCorruptedRecord: + case WalFilter::WalProcessingOption::kContinueProcessing: { + fprintf(stderr, "Testing with complete WAL processing\n"); + // we expect all records to be processed + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { keys_must_exist.push_back(Slice(batch_keys[i][j])); } } + break; } - break; - } - case WalFilter::WalProcessingOption::kStopReplay: { - fprintf(stderr, - "Testing with stopping replay from record %" ROCKSDB_PRIszt - "\n", - apply_option_for_record_index); - // We expect records beyond apply_option_for_record_index to be not - // found. - for (size_t i = 0; i < batch_keys.size(); i++) { - for (size_t j = 0; j < batch_keys[i].size(); j++) { - if (i >= apply_option_for_record_index) { - keys_must_not_exist.push_back(Slice(batch_keys[i][j])); + case WalFilter::WalProcessingOption::kIgnoreCurrentRecord: { + fprintf(stderr, + "Testing with ignoring record %" ROCKSDB_PRIszt " only\n", + apply_option_for_record_index); + // We expect the record with apply_option_for_record_index to be not + // found. + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { + if (i == apply_option_for_record_index) { + keys_must_not_exist.push_back(Slice(batch_keys[i][j])); + } else { + keys_must_exist.push_back(Slice(batch_keys[i][j])); + } } - else { - keys_must_exist.push_back(Slice(batch_keys[i][j])); + } + break; + } + case WalFilter::WalProcessingOption::kStopReplay: { + fprintf(stderr, + "Testing with stopping replay from record %" ROCKSDB_PRIszt + "\n", + apply_option_for_record_index); + // We expect records beyond apply_option_for_record_index to be not + // found. + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { + if (i >= apply_option_for_record_index) { + keys_must_not_exist.push_back(Slice(batch_keys[i][j])); + } else { + keys_must_exist.push_back(Slice(batch_keys[i][j])); + } } } + break; } - break; - } - default: - FAIL(); // unhandled case + default: + FAIL(); // unhandled case } bool checked_after_reopen = false; @@ -861,7 +857,7 @@ TEST_F(DBTest2, WalFilterTest) { //(even if they were skipped) // reopn database with option to use WAL filter options = OptionsForLogIterTest(); - ReopenWithColumnFamilies({ "default", "pikachu" }, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); checked_after_reopen = true; } @@ -870,7 +866,7 @@ TEST_F(DBTest2, WalFilterTest) { TEST_F(DBTest2, WalFilterTestWithChangeBatch) { class ChangeBatchHandler : public WriteBatch::Handler { - private: + private: // Batch to insert keys in WriteBatch* new_write_batch_; // Number of keys to add in the new batch @@ -878,12 +874,12 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { // Number of keys added to new batch size_t num_keys_added_; - public: + public: ChangeBatchHandler(WriteBatch* new_write_batch, - size_t num_keys_to_add_in_new_batch) - : new_write_batch_(new_write_batch), - num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch), - num_keys_added_(0) {} + size_t num_keys_to_add_in_new_batch) + : new_write_batch_(new_write_batch), + num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch), + num_keys_added_(0) {} void Put(const Slice& key, const Slice& value) override { if (num_keys_added_ < num_keys_to_add_in_new_batch_) { ASSERT_OK(new_write_batch_->Put(key, value)); @@ -893,7 +889,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { }; class TestWalFilterWithChangeBatch : public WalFilter { - private: + private: // Index at which to start changing records size_t change_records_from_index_; // Number of keys to add in the new batch @@ -901,12 +897,12 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { // Current record index, incremented with each record encountered. size_t current_record_index_; - public: + public: TestWalFilterWithChangeBatch(size_t change_records_from_index, - size_t num_keys_to_add_in_new_batch) - : change_records_from_index_(change_records_from_index), - num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch), - current_record_index_(0) {} + size_t num_keys_to_add_in_new_batch) + : change_records_from_index_(change_records_from_index), + num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch), + current_record_index_(0) {} WalProcessingOption LogRecord(const WriteBatch& batch, WriteBatch* new_batch, @@ -925,7 +921,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { // object, however we modify it for our own purpose here and hence // cast the constness away. (const_cast(this) - ->current_record_index_)++; + ->current_record_index_)++; return WalProcessingOption::kContinueProcessing; } @@ -944,7 +940,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); - CreateAndReopenWithCF({ "pikachu" }, options); + CreateAndReopenWithCF({"pikachu"}, options); // Write given keys in given batches for (size_t i = 0; i < batch_keys.size(); i++) { @@ -960,12 +956,12 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { size_t change_records_from_index = 1; size_t num_keys_to_add_in_new_batch = 1; TestWalFilterWithChangeBatch test_wal_filter_with_change_batch( - change_records_from_index, num_keys_to_add_in_new_batch); + change_records_from_index, num_keys_to_add_in_new_batch); // Reopen database with option to use WAL filter options = OptionsForLogIterTest(); options.wal_filter = &test_wal_filter_with_change_batch; - ReopenWithColumnFamilies({ "default", "pikachu" }, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); // Ensure that all keys exist before change_records_from_index_ // And after that index only single key exists @@ -977,8 +973,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { for (size_t j = 0; j < batch_keys[i].size(); j++) { if (i >= change_records_from_index && j >= num_keys_to_add_in_new_batch) { keys_must_not_exist.push_back(Slice(batch_keys[i][j])); - } - else { + } else { keys_must_exist.push_back(Slice(batch_keys[i][j])); } } @@ -999,7 +994,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { //(even if they were skipped) // reopn database with option to use WAL filter options = OptionsForLogIterTest(); - ReopenWithColumnFamilies({ "default", "pikachu" }, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); checked_after_reopen = true; } @@ -1007,22 +1002,23 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) { TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) { class TestWalFilterWithChangeBatchAddExtraKeys : public WalFilter { - public: - WalProcessingOption LogRecord(const WriteBatch& batch, WriteBatch* new_batch, - bool* batch_changed) const override { - *new_batch = batch; - Status s = new_batch->Put("key_extra", "value_extra"); - if (s.ok()) { - *batch_changed = true; - } else { - assert(false); - } - return WalProcessingOption::kContinueProcessing; - } - - const char* Name() const override { - return "WalFilterTestWithChangeBatchExtraKeys"; - } + public: + WalProcessingOption LogRecord(const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) const override { + *new_batch = batch; + Status s = new_batch->Put("key_extra", "value_extra"); + if (s.ok()) { + *batch_changed = true; + } else { + assert(false); + } + return WalProcessingOption::kContinueProcessing; + } + + const char* Name() const override { + return "WalFilterTestWithChangeBatchExtraKeys"; + } }; std::vector> batch_keys(3); @@ -1036,7 +1032,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); - CreateAndReopenWithCF({ "pikachu" }, options); + CreateAndReopenWithCF({"pikachu"}, options); // Write given keys in given batches for (size_t i = 0; i < batch_keys.size(); i++) { @@ -1059,7 +1055,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) { // Reopen without filter, now reopen should succeed - previous // attempt to open must not have altered the db. options = OptionsForLogIterTest(); - ReopenWithColumnFamilies({ "default", "pikachu" }, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); std::vector keys_must_exist; std::vector keys_must_not_exist; // empty vector @@ -1075,7 +1071,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) { TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { class TestWalFilterWithColumnFamilies : public WalFilter { - private: + private: // column_family_id -> log_number map (provided to WALFilter) std::map cf_log_number_map_; // column_family_name -> column_family_id map (provided to WALFilter) @@ -1085,31 +1081,34 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { // during recovery (i.e. aren't already flushed to SST file(s)) // for verification against the keys we expect. std::map> cf_wal_keys_; - public: - void ColumnFamilyLogNumberMap( - const std::map& cf_lognumber_map, - const std::map& cf_name_id_map) override { - cf_log_number_map_ = cf_lognumber_map; - cf_name_id_map_ = cf_name_id_map; - } - - WalProcessingOption LogRecordFound(unsigned long long log_number, - const std::string& /*log_file_name*/, - const WriteBatch& batch, - WriteBatch* /*new_batch*/, - bool* /*batch_changed*/) override { - class LogRecordBatchHandler : public WriteBatch::Handler { - private: - const std::map & cf_log_number_map_; - std::map> & cf_wal_keys_; + + public: + void ColumnFamilyLogNumberMap( + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map) override { + cf_log_number_map_ = cf_lognumber_map; + cf_name_id_map_ = cf_name_id_map; + } + + WalProcessingOption LogRecordFound(unsigned long long log_number, + const std::string& /*log_file_name*/, + const WriteBatch& batch, + WriteBatch* /*new_batch*/, + bool* /*batch_changed*/) override { + class LogRecordBatchHandler : public WriteBatch::Handler { + private: + const std::map& cf_log_number_map_; + std::map>& cf_wal_keys_; unsigned long long log_number_; - public: - LogRecordBatchHandler(unsigned long long current_log_number, - const std::map & cf_log_number_map, - std::map> & cf_wal_keys) : - cf_log_number_map_(cf_log_number_map), - cf_wal_keys_(cf_wal_keys), - log_number_(current_log_number){} + + public: + LogRecordBatchHandler( + unsigned long long current_log_number, + const std::map& cf_log_number_map, + std::map>& cf_wal_keys) + : cf_log_number_map_(cf_log_number_map), + cf_wal_keys_(cf_wal_keys), + log_number_(current_log_number) {} Status PutCF(uint32_t column_family_id, const Slice& key, const Slice& /*value*/) override { @@ -1120,8 +1119,8 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { // (i.e. isn't flushed to SST file(s) for column_family_id) // add it to the cf_wal_keys_ map for verification. if (log_number_ >= log_number_for_cf) { - cf_wal_keys_[column_family_id].push_back(std::string(key.data(), - key.size())); + cf_wal_keys_[column_family_id].push_back( + std::string(key.data(), key.size())); } return Status::OK(); } @@ -1134,17 +1133,17 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { } return WalProcessingOption::kContinueProcessing; - } + } - const char* Name() const override { - return "WalFilterTestWithColumnFamilies"; - } + const char* Name() const override { + return "WalFilterTestWithColumnFamilies"; + } const std::map>& GetColumnFamilyKeys() { return cf_wal_keys_; } - const std::map & GetColumnFamilyNameIdMap() { + const std::map& GetColumnFamilyNameIdMap() { return cf_name_id_map_; } }; @@ -1160,7 +1159,7 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); - CreateAndReopenWithCF({ "pikachu" }, options); + CreateAndReopenWithCF({"pikachu"}, options); // Write given keys in given batches for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) { @@ -1174,7 +1173,7 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { ASSERT_OK(dbfull()->Write(WriteOptions(), &batch)); } - //Flush default column-family + // Flush default column-family ASSERT_OK(db_->Flush(FlushOptions(), handles_[0])); // Do some more writes @@ -1208,8 +1207,7 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { // Reopen database with option to use WAL filter options = OptionsForLogIterTest(); options.wal_filter = &test_wal_filter_column_families; - Status status = - TryReopenWithColumnFamilies({ "default", "pikachu" }, options); + Status status = TryReopenWithColumnFamilies({"default", "pikachu"}, options); ASSERT_TRUE(status.ok()); // verify that handles_[0] only has post_flush keys @@ -1218,7 +1216,7 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { auto name_id_map = test_wal_filter_column_families.GetColumnFamilyNameIdMap(); size_t index = 0; auto keys_cf = cf_wal_keys[name_id_map[kDefaultColumnFamilyName]]; - //default column-family, only post_flush keys are expected + // default column-family, only post_flush keys are expected for (size_t i = 0; i < batch_keys_post_flush.size(); i++) { for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) { Slice key_from_the_log(keys_cf[index++]); @@ -1230,7 +1228,7 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { index = 0; keys_cf = cf_wal_keys[name_id_map["pikachu"]]; - //pikachu column-family, all keys are expected + // pikachu column-family, all keys are expected for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) { for (size_t j = 0; j < batch_keys_pre_flush[i].size(); j++) { Slice key_from_the_log(keys_cf[index++]); @@ -1280,7 +1278,7 @@ TEST_F(DBTest2, PresetCompressionDict) { #if LZ4_VERSION_NUMBER >= 10400 // r124+ compression_types.push_back(kLZ4Compression); compression_types.push_back(kLZ4HCCompression); -#endif // LZ4_VERSION_NUMBER >= 10400 +#endif // LZ4_VERSION_NUMBER >= 10400 if (ZSTD_Supported()) { compression_types.push_back(kZSTD); } @@ -1960,7 +1958,8 @@ TEST_F(DBTest2, CompressionOptions) { class CompactionStallTestListener : public EventListener { public: - CompactionStallTestListener() : compacting_files_cnt_(0), compacted_files_cnt_(0) {} + CompactionStallTestListener() + : compacting_files_cnt_(0), compacted_files_cnt_(0) {} void OnCompactionBegin(DB* /*db*/, const CompactionJobInfo& ci) override { ASSERT_EQ(ci.cf_name, "default"); @@ -2039,7 +2038,8 @@ TEST_F(DBTest2, CompactionStall) { options.level0_file_num_compaction_trigger); ASSERT_GT(listener->compacted_files_cnt_.load(), 10 - options.level0_file_num_compaction_trigger); - ASSERT_EQ(listener->compacting_files_cnt_.load(), listener->compacted_files_cnt_.load()); + ASSERT_EQ(listener->compacting_files_cnt_.load(), + listener->compacted_files_cnt_.load()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } @@ -2664,7 +2664,7 @@ namespace { void CountSyncPoint() { TEST_SYNC_POINT_CALLBACK("DBTest2::MarkedPoint", nullptr /* arg */); } -} // namespace +} // anonymous namespace TEST_F(DBTest2, SyncPointMarker) { std::atomic sync_point_called(0); @@ -2797,7 +2797,7 @@ TEST_F(DBTest2, ReadAmpBitmap) { } } -#ifndef OS_SOLARIS // GetUniqueIdFromFile is not implemented +#ifndef OS_SOLARIS // GetUniqueIdFromFile is not implemented TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) { { const int kIdBufLen = 100; @@ -2899,7 +2899,6 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) { size_t total_loaded_bytes_iter2 = options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES); - // Read amp is on average 100% since we read all what we loaded in memory if (k == 0) { ASSERT_EQ(total_useful_bytes_iter1 + total_useful_bytes_iter2, @@ -2911,7 +2910,7 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) { } } } -#endif // !OS_SOLARIS +#endif // !OS_SOLARIS #ifndef ROCKSDB_LITE TEST_F(DBTest2, AutomaticCompactionOverlapManualCompaction) { @@ -5192,7 +5191,7 @@ TEST_F(DBTest2, TraceWithFilter) { ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions())); handles.clear(); - DB* db3 = nullptr; + DB* db3 = nullptr; ASSERT_OK(DB::Open(db_opts, dbname3, column_families, &handles, &db3)); env_->SleepForMicroseconds(100); @@ -5200,12 +5199,12 @@ TEST_F(DBTest2, TraceWithFilter) { ASSERT_TRUE(db3->Get(ro, handles[0], "a", &value).IsNotFound()); ASSERT_TRUE(db3->Get(ro, handles[0], "g", &value).IsNotFound()); - //The tracer will not record the READ ops. + // The tracer will not record the READ ops. trace_opts.filter = TraceFilterType::kTraceFilterGet; std::string trace_filename3 = dbname_ + "/rocksdb.trace_3"; std::unique_ptr trace_writer3; ASSERT_OK( - NewFileTraceWriter(env_, env_opts, trace_filename3, &trace_writer3)); + NewFileTraceWriter(env_, env_opts, trace_filename3, &trace_writer3)); ASSERT_OK(db3->StartTrace(trace_opts, std::move(trace_writer3))); ASSERT_OK(db3->Put(wo, handles[0], "a", "1")); @@ -5227,7 +5226,7 @@ TEST_F(DBTest2, TraceWithFilter) { std::unique_ptr trace_reader3; ASSERT_OK( - NewFileTraceReader(env_, env_opts, trace_filename3, &trace_reader3)); + NewFileTraceReader(env_, env_opts, trace_filename3, &trace_reader3)); // Count the number of records in the trace file; int count = 0; @@ -5503,16 +5502,20 @@ TEST_F(DBTest2, TestGetColumnFamilyHandleUnlocked) { port::Thread user_thread1([&]() { auto cfh = dbi->GetColumnFamilyHandleUnlocked(handles_[0]->GetID()); ASSERT_EQ(cfh->GetID(), handles_[0]->GetID()); - TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked1"); - TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::ReadColumnFamilyHandle1"); + TEST_SYNC_POINT( + "TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked1"); + TEST_SYNC_POINT( + "TestGetColumnFamilyHandleUnlocked::ReadColumnFamilyHandle1"); ASSERT_EQ(cfh->GetID(), handles_[0]->GetID()); }); port::Thread user_thread2([&]() { - TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::PreGetColumnFamilyHandleUnlocked2"); + TEST_SYNC_POINT( + "TestGetColumnFamilyHandleUnlocked::PreGetColumnFamilyHandleUnlocked2"); auto cfh = dbi->GetColumnFamilyHandleUnlocked(handles_[1]->GetID()); ASSERT_EQ(cfh->GetID(), handles_[1]->GetID()); - TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked2"); + TEST_SYNC_POINT( + "TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked2"); ASSERT_EQ(cfh->GetID(), handles_[1]->GetID()); }); @@ -5666,7 +5669,7 @@ class DummyOldStats : public Statistics { std::atomic num_rt{0}; std::atomic num_mt{0}; }; -} // namespace +} // anonymous namespace TEST_F(DBTest2, OldStatsInterface) { DummyOldStats* dos = new DummyOldStats(); diff --git a/db/db_test_util.cc b/db/db_test_util.cc index a6e36ad4c..d53bca51a 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -29,7 +29,7 @@ int64_t MaybeCurrentTime(Env* env) { env->GetCurrentTime(&time).PermitUncheckedError(); return time; } -} // namespace +} // anonymous namespace // Special Env used to delay background operations @@ -125,54 +125,54 @@ DBTestBase::~DBTestBase() { bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) { #ifdef ROCKSDB_LITE - // These options are not supported in ROCKSDB_LITE - if (option_config == kHashSkipList || - option_config == kPlainTableFirstBytePrefix || - option_config == kPlainTableCappedPrefix || - option_config == kPlainTableCappedPrefixNonMmap || - option_config == kPlainTableAllBytesPrefix || - option_config == kVectorRep || option_config == kHashLinkList || - option_config == kUniversalCompaction || - option_config == kUniversalCompactionMultiLevel || - option_config == kUniversalSubcompactions || - option_config == kFIFOCompaction || - option_config == kConcurrentSkipList) { - return true; - } + // These options are not supported in ROCKSDB_LITE + if (option_config == kHashSkipList || + option_config == kPlainTableFirstBytePrefix || + option_config == kPlainTableCappedPrefix || + option_config == kPlainTableCappedPrefixNonMmap || + option_config == kPlainTableAllBytesPrefix || + option_config == kVectorRep || option_config == kHashLinkList || + option_config == kUniversalCompaction || + option_config == kUniversalCompactionMultiLevel || + option_config == kUniversalSubcompactions || + option_config == kFIFOCompaction || + option_config == kConcurrentSkipList) { + return true; + } #endif - if ((skip_mask & kSkipUniversalCompaction) && - (option_config == kUniversalCompaction || - option_config == kUniversalCompactionMultiLevel || - option_config == kUniversalSubcompactions)) { - return true; - } - if ((skip_mask & kSkipMergePut) && option_config == kMergePut) { - return true; - } - if ((skip_mask & kSkipNoSeekToLast) && - (option_config == kHashLinkList || option_config == kHashSkipList)) { - return true; - } - if ((skip_mask & kSkipPlainTable) && - (option_config == kPlainTableAllBytesPrefix || - option_config == kPlainTableFirstBytePrefix || - option_config == kPlainTableCappedPrefix || - option_config == kPlainTableCappedPrefixNonMmap)) { - return true; - } - if ((skip_mask & kSkipHashIndex) && - (option_config == kBlockBasedTableWithPrefixHashIndex || - option_config == kBlockBasedTableWithWholeKeyHashIndex)) { - return true; - } - if ((skip_mask & kSkipFIFOCompaction) && option_config == kFIFOCompaction) { - return true; - } - if ((skip_mask & kSkipMmapReads) && option_config == kWalDirAndMmapReads) { - return true; - } - return false; + if ((skip_mask & kSkipUniversalCompaction) && + (option_config == kUniversalCompaction || + option_config == kUniversalCompactionMultiLevel || + option_config == kUniversalSubcompactions)) { + return true; + } + if ((skip_mask & kSkipMergePut) && option_config == kMergePut) { + return true; + } + if ((skip_mask & kSkipNoSeekToLast) && + (option_config == kHashLinkList || option_config == kHashSkipList)) { + return true; + } + if ((skip_mask & kSkipPlainTable) && + (option_config == kPlainTableAllBytesPrefix || + option_config == kPlainTableFirstBytePrefix || + option_config == kPlainTableCappedPrefix || + option_config == kPlainTableCappedPrefixNonMmap)) { + return true; + } + if ((skip_mask & kSkipHashIndex) && + (option_config == kBlockBasedTableWithPrefixHashIndex || + option_config == kBlockBasedTableWithWholeKeyHashIndex)) { + return true; + } + if ((skip_mask & kSkipFIFOCompaction) && option_config == kFIFOCompaction) { + return true; + } + if ((skip_mask & kSkipMmapReads) && option_config == kWalDirAndMmapReads) { + return true; + } + return false; } // Switch to a fresh database with the next option configuration to @@ -424,13 +424,13 @@ Options DBTestBase::GetOptions( options.allow_concurrent_memtable_write = false; options.unordered_write = false; break; - case kDirectIO: { - options.use_direct_reads = true; - options.use_direct_io_for_flush_and_compaction = true; - options.compaction_readahead_size = 2 * 1024 * 1024; - SetupSyncPointsToMockDirectIO(); - break; - } + case kDirectIO: { + options.use_direct_reads = true; + options.use_direct_io_for_flush_and_compaction = true; + options.compaction_readahead_size = 2 * 1024 * 1024; + SetupSyncPointsToMockDirectIO(); + break; + } #endif // ROCKSDB_LITE case kMergePut: options.merge_operator = MergeOperators::CreatePutOperator(); @@ -1308,12 +1308,14 @@ void DBTestBase::GetSstFiles(Env* env, std::string path, std::vector* files) { EXPECT_OK(env->GetChildren(path, files)); - files->erase( - std::remove_if(files->begin(), files->end(), [](std::string name) { - uint64_t number; - FileType type; - return !(ParseFileName(name, &number, &type) && type == kTableFile); - }), files->end()); + files->erase(std::remove_if(files->begin(), files->end(), + [](std::string name) { + uint64_t number; + FileType type; + return !(ParseFileName(name, &number, &type) && + type == kTableFile); + }), + files->end()); } int DBTestBase::GetSstFileCount(std::string path) { @@ -1583,8 +1585,8 @@ void DBTestBase::VerifyDBFromMap(std::map true_data, iter_cnt++; total_reads++; } - ASSERT_EQ(data_iter, true_data.end()) << iter_cnt << " / " - << true_data.size(); + ASSERT_EQ(data_iter, true_data.end()) + << iter_cnt << " / " << true_data.size(); delete iter; // Verify Iterator::Prev() @@ -1606,8 +1608,8 @@ void DBTestBase::VerifyDBFromMap(std::map true_data, iter_cnt++; total_reads++; } - ASSERT_EQ(data_rev, true_data.rend()) << iter_cnt << " / " - << true_data.size(); + ASSERT_EQ(data_rev, true_data.rend()) + << iter_cnt << " / " << true_data.size(); // Verify Iterator::Seek() for (auto kv : true_data) { @@ -1637,8 +1639,8 @@ void DBTestBase::VerifyDBFromMap(std::map true_data, iter_cnt++; total_reads++; } - ASSERT_EQ(data_iter, true_data.end()) << iter_cnt << " / " - << true_data.size(); + ASSERT_EQ(data_iter, true_data.end()) + << iter_cnt << " / " << true_data.size(); // Verify ForwardIterator::Seek() for (auto kv : true_data) { diff --git a/db/db_test_util.h b/db/db_test_util.h index de3541a3c..c6f740c92 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -220,9 +220,7 @@ class SpecialEnv : public EnvWrapper { Env::IOPriority GetIOPriority() override { return base_->GetIOPriority(); } - bool use_direct_io() const override { - return base_->use_direct_io(); - } + bool use_direct_io() const override { return base_->use_direct_io(); } Status Allocate(uint64_t offset, uint64_t len) override { return base_->Allocate(offset, len); } diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index 9b63e6e16..f53c36f22 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -39,8 +39,8 @@ class DBTestUniversalCompactionBase class DBTestUniversalCompaction : public DBTestUniversalCompactionBase { public: - DBTestUniversalCompaction() : - DBTestUniversalCompactionBase("/db_universal_compaction_test") {} + DBTestUniversalCompaction() + : DBTestUniversalCompactionBase("/db_universal_compaction_test") {} }; class DBTestUniversalCompaction2 : public DBTestBase { @@ -93,7 +93,7 @@ class KeepFilterFactory : public CompactionFilterFactory { std::atomic_bool expect_full_compaction_; std::atomic_bool expect_manual_compaction_; }; -} // namespace +} // anonymous namespace // Make sure we don't trigger a problem if the trigger condtion is given // to be 0, which is invalid. @@ -563,8 +563,7 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) { } if (compaction_input_file_names.size() == 0) { - compaction_input_file_names.push_back( - cf_meta.levels[0].files[0].name); + compaction_input_file_names.push_back(cf_meta.levels[0].files[0].name); } // expect fail since universal compaction only allow L0 output @@ -574,28 +573,23 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) { .ok()); // expect ok and verify the compacted files no longer exist. - ASSERT_OK(dbfull()->CompactFiles( - CompactionOptions(), handles_[1], - compaction_input_file_names, 0)); + ASSERT_OK(dbfull()->CompactFiles(CompactionOptions(), handles_[1], + compaction_input_file_names, 0)); dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); VerifyCompactionResult( - cf_meta, - std::set(compaction_input_file_names.begin(), - compaction_input_file_names.end())); + cf_meta, std::set(compaction_input_file_names.begin(), + compaction_input_file_names.end())); compaction_input_file_names.clear(); // Pick the first and the last file, expect everything is // compacted into one single file. + compaction_input_file_names.push_back(cf_meta.levels[0].files[0].name); compaction_input_file_names.push_back( - cf_meta.levels[0].files[0].name); - compaction_input_file_names.push_back( - cf_meta.levels[0].files[ - cf_meta.levels[0].files.size() - 1].name); - ASSERT_OK(dbfull()->CompactFiles( - CompactionOptions(), handles_[1], - compaction_input_file_names, 0)); + cf_meta.levels[0].files[cf_meta.levels[0].files.size() - 1].name); + ASSERT_OK(dbfull()->CompactFiles(CompactionOptions(), handles_[1], + compaction_input_file_names, 0)); dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); ASSERT_EQ(cf_meta.levels[0].files.size(), 1U); @@ -604,7 +598,7 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) { TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) { Options options = CurrentOptions(); options.compaction_style = kCompactionStyleUniversal; - options.write_buffer_size = 100 << 10; // 100KB + options.write_buffer_size = 100 << 10; // 100KB options.num_levels = 7; options.disable_auto_compactions = true; DestroyAndReopen(options); @@ -640,9 +634,9 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) { class DBTestUniversalCompactionMultiLevels : public DBTestUniversalCompactionBase { public: - DBTestUniversalCompactionMultiLevels() : - DBTestUniversalCompactionBase( - "/db_universal_compaction_multi_levels_test") {} + DBTestUniversalCompactionMultiLevels() + : DBTestUniversalCompactionBase( + "/db_universal_compaction_multi_levels_test") {} }; TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionMultiLevels) { @@ -725,12 +719,11 @@ INSTANTIATE_TEST_CASE_P(MultiLevels, DBTestUniversalCompactionMultiLevels, ::testing::Combine(::testing::Values(3, 20), ::testing::Bool())); -class DBTestUniversalCompactionParallel : - public DBTestUniversalCompactionBase { +class DBTestUniversalCompactionParallel : public DBTestUniversalCompactionBase { public: - DBTestUniversalCompactionParallel() : - DBTestUniversalCompactionBase( - "/db_universal_compaction_prallel_test") {} + DBTestUniversalCompactionParallel() + : DBTestUniversalCompactionBase("/db_universal_compaction_prallel_test") { + } }; TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) { @@ -919,8 +912,8 @@ INSTANTIATE_TEST_CASE_P(Parallel, DBTestUniversalCompactionParallel, TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) { Options options = CurrentOptions(); options.compaction_style = kCompactionStyleUniversal; - options.write_buffer_size = 105 << 10; // 105KB - options.arena_block_size = 4 << 10; // 4KB + options.write_buffer_size = 105 << 10; // 105KB + options.arena_block_size = 4 << 10; // 4KB options.target_file_size_base = 32 << 10; // 32KB options.level0_file_num_compaction_trigger = 4; options.num_levels = num_levels_; @@ -951,8 +944,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) { TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) { Options options = CurrentOptions(); options.compaction_style = kCompactionStyleUniversal; - options.write_buffer_size = 105 << 10; // 105KB - options.arena_block_size = 4 << 10; // 4KB + options.write_buffer_size = 105 << 10; // 105KB + options.arena_block_size = 4 << 10; // 4KB options.target_file_size_base = 32 << 10; // 32KB // trigger compaction if there are >= 4 files options.level0_file_num_compaction_trigger = 4; @@ -1353,7 +1346,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionCFPathUse) { cf_opt1.cf_paths.emplace_back(dbname_ + "cf1_3", 500 * 1024); cf_opt1.cf_paths.emplace_back(dbname_ + "cf1_4", 1024 * 1024 * 1024); option_vector.emplace_back(DBOptions(options), cf_opt1); - CreateColumnFamilies({"one"},option_vector[1]); + CreateColumnFamilies({"one"}, option_vector[1]); // Configura CF2 specific paths. cf_opt2.cf_paths.emplace_back(dbname_ + "cf2", 300 * 1024); @@ -1361,7 +1354,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionCFPathUse) { cf_opt2.cf_paths.emplace_back(dbname_ + "cf2_3", 500 * 1024); cf_opt2.cf_paths.emplace_back(dbname_ + "cf2_4", 1024 * 1024 * 1024); option_vector.emplace_back(DBOptions(options), cf_opt2); - CreateColumnFamilies({"two"},option_vector[2]); + CreateColumnFamilies({"two"}, option_vector[2]); ReopenWithColumnFamilies({"default", "one", "two"}, option_vector); @@ -1567,7 +1560,6 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { verify_func(max_key3); } - TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) { if (!Snappy_Supported()) { return; @@ -1829,9 +1821,9 @@ INSTANTIATE_TEST_CASE_P(NumLevels, DBTestUniversalCompaction, class DBTestUniversalManualCompactionOutputPathId : public DBTestUniversalCompactionBase { public: - DBTestUniversalManualCompactionOutputPathId() : - DBTestUniversalCompactionBase( - "/db_universal_compaction_manual_pid_test") {} + DBTestUniversalManualCompactionOutputPathId() + : DBTestUniversalCompactionBase( + "/db_universal_compaction_manual_pid_test") {} }; TEST_P(DBTestUniversalManualCompactionOutputPathId, @@ -2236,8 +2228,8 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); #else - (void) argc; - (void) argv; + (void)argc; + (void)argv; return 0; #endif } diff --git a/db/db_write_test.cc b/db/db_write_test.cc index 2c3781ae1..1011d5c9e 100644 --- a/db/db_write_test.cc +++ b/db/db_write_test.cc @@ -170,7 +170,8 @@ TEST_P(DBWriteTest, WriteStallRemoveNoSlowdownWrite) { TEST_P(DBWriteTest, WriteThreadHangOnWriteStall) { Options options = GetOptions(); - options.level0_stop_writes_trigger = options.level0_slowdown_writes_trigger = 4; + options.level0_stop_writes_trigger = options.level0_slowdown_writes_trigger = + 4; std::vector threads; std::atomic thread_num(0); port::Mutex mutex; @@ -195,7 +196,7 @@ TEST_P(DBWriteTest, WriteThreadHangOnWriteStall) { Status s = dbfull()->Put(wo, key, "bar"); ASSERT_TRUE(s.ok() || s.IsIncomplete()); }; - std::function unblock_main_thread_func = [&](void *) { + std::function unblock_main_thread_func = [&](void*) { mutex.Lock(); ++writers; cv.SignalAll(); @@ -254,8 +255,9 @@ TEST_P(DBWriteTest, WriteThreadHangOnWriteStall) { ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(nullptr)); // This would have triggered a write stall. Unblock the write group leader TEST_SYNC_POINT("DBWriteTest::WriteThreadHangOnWriteStall:2"); - // The leader is going to create missing newer links. When the leader finishes, - // the next leader is going to delay writes and fail writers with no_slowdown + // The leader is going to create missing newer links. When the leader + // finishes, the next leader is going to delay writes and fail writers with + // no_slowdown TEST_SYNC_POINT("DBWriteTest::WriteThreadHangOnWriteStall:3"); for (auto& t : threads) { @@ -623,42 +625,43 @@ TEST_P(DBWriteTest, LockWalInEffect) { } TEST_P(DBWriteTest, ConcurrentlyDisabledWAL) { - Options options = GetOptions(); - options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics(); - options.statistics->set_stats_level(StatsLevel::kAll); - Reopen(options); - std::string wal_key_prefix = "WAL_KEY_"; - std::string no_wal_key_prefix = "K_"; - // 100 KB value each for NO-WAL operation - std::string no_wal_value(1024 * 100, 'X'); - // 1B value each for WAL operation - std::string wal_value = "0"; - std::thread threads[10]; - for (int t = 0; t < 10; t++) { - threads[t] = std::thread([t, wal_key_prefix, wal_value, no_wal_key_prefix, no_wal_value, this] { - for(int i = 0; i < 10; i++) { - ROCKSDB_NAMESPACE::WriteOptions write_option_disable; - write_option_disable.disableWAL = true; - ROCKSDB_NAMESPACE::WriteOptions write_option_default; - std::string no_wal_key = no_wal_key_prefix + std::to_string(t) + - "_" + std::to_string(i); - ASSERT_OK( - this->Put(no_wal_key, no_wal_value, write_option_disable)); - std::string wal_key = - wal_key_prefix + std::to_string(i) + "_" + std::to_string(i); - ASSERT_OK(this->Put(wal_key, wal_value, write_option_default)); - ASSERT_OK(dbfull()->SyncWAL()); - } - return; - }); - } - for (auto& t: threads) { - t.join(); - } - uint64_t bytes_num = options.statistics->getTickerCount( - ROCKSDB_NAMESPACE::Tickers::WAL_FILE_BYTES); - // written WAL size should less than 100KB (even included HEADER & FOOTER overhead) - ASSERT_LE(bytes_num, 1024 * 100); + Options options = GetOptions(); + options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics(); + options.statistics->set_stats_level(StatsLevel::kAll); + Reopen(options); + std::string wal_key_prefix = "WAL_KEY_"; + std::string no_wal_key_prefix = "K_"; + // 100 KB value each for NO-WAL operation + std::string no_wal_value(1024 * 100, 'X'); + // 1B value each for WAL operation + std::string wal_value = "0"; + std::thread threads[10]; + for (int t = 0; t < 10; t++) { + threads[t] = std::thread([t, wal_key_prefix, wal_value, no_wal_key_prefix, + no_wal_value, this] { + for (int i = 0; i < 10; i++) { + ROCKSDB_NAMESPACE::WriteOptions write_option_disable; + write_option_disable.disableWAL = true; + ROCKSDB_NAMESPACE::WriteOptions write_option_default; + std::string no_wal_key = + no_wal_key_prefix + std::to_string(t) + "_" + std::to_string(i); + ASSERT_OK(this->Put(no_wal_key, no_wal_value, write_option_disable)); + std::string wal_key = + wal_key_prefix + std::to_string(i) + "_" + std::to_string(i); + ASSERT_OK(this->Put(wal_key, wal_value, write_option_default)); + ASSERT_OK(dbfull()->SyncWAL()); + } + return; + }); + } + for (auto& t : threads) { + t.join(); + } + uint64_t bytes_num = options.statistics->getTickerCount( + ROCKSDB_NAMESPACE::Tickers::WAL_FILE_BYTES); + // written WAL size should less than 100KB (even included HEADER & FOOTER + // overhead) + ASSERT_LE(bytes_num, 1024 * 100); } INSTANTIATE_TEST_CASE_P(DBWriteTestInstance, DBWriteTest, diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc index b52b0192c..8dc3387df 100644 --- a/db/dbformat_test.cc +++ b/db/dbformat_test.cc @@ -15,8 +15,7 @@ namespace ROCKSDB_NAMESPACE { -static std::string IKey(const std::string& user_key, - uint64_t seq, +static std::string IKey(const std::string& user_key, uint64_t seq, ValueType vt) { std::string encoded; AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt)); @@ -37,9 +36,7 @@ static std::string ShortSuccessor(const std::string& s) { return result; } -static void TestKey(const std::string& key, - uint64_t seq, - ValueType vt) { +static void TestKey(const std::string& key, uint64_t seq, ValueType vt) { std::string encoded = IKey(key, seq, vt); Slice in(encoded); @@ -56,13 +53,19 @@ static void TestKey(const std::string& key, class FormatTest : public testing::Test {}; TEST_F(FormatTest, InternalKey_EncodeDecode) { - const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" }; - const uint64_t seq[] = { - 1, 2, 3, - (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1, - (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1, - (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1 - }; + const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"}; + const uint64_t seq[] = {1, + 2, + 3, + (1ull << 8) - 1, + 1ull << 8, + (1ull << 8) + 1, + (1ull << 16) - 1, + 1ull << 16, + (1ull << 16) + 1, + (1ull << 32) - 1, + 1ull << 32, + (1ull << 32) + 1}; for (unsigned int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) { for (unsigned int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) { TestKey(keys[k], seq[s], kTypeValue); @@ -74,27 +77,25 @@ TEST_F(FormatTest, InternalKey_EncodeDecode) { TEST_F(FormatTest, InternalKeyShortSeparator) { // When user keys are same ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 99, kTypeValue))); - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 101, kTypeValue))); - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 100, kTypeValue))); - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 100, kTypeDeletion))); + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion))); // When user keys are misordered ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("bar", 99, kTypeValue))); + Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue))); // When user keys are different, but correctly ordered - ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), - Shorten(IKey("foo", 100, kTypeValue), - IKey("hello", 200, kTypeValue))); + ASSERT_EQ( + IKey("g", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue))); ASSERT_EQ(IKey("ABC2", kMaxSequenceNumber, kValueTypeForSeek), Shorten(IKey("ABC1AAAAA", 100, kTypeValue), @@ -121,14 +122,14 @@ TEST_F(FormatTest, InternalKeyShortSeparator) { Shorten(IKey("AAA1", 100, kTypeValue), IKey("AAA2", 200, kTypeValue))); // When start user key is prefix of limit user key - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foobar", 200, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue))); // When limit user key is prefix of start user key - ASSERT_EQ(IKey("foobar", 100, kTypeValue), - Shorten(IKey("foobar", 100, kTypeValue), - IKey("foo", 200, kTypeValue))); + ASSERT_EQ( + IKey("foobar", 100, kTypeValue), + Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue))); } TEST_F(FormatTest, InternalKeyShortestSuccessor) { diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index 18f2577e9..34925e828 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -10,9 +10,11 @@ #ifndef ROCKSDB_LITE #include + #include #include #include + #include "db/db_impl/db_impl.h" #include "db/db_test_util.h" #include "db/version_set.h" @@ -55,7 +57,7 @@ class DeleteFileTest : public DBTestBase { WriteOptions options; options.sync = false; ReadOptions roptions; - for (int i = startkey; i < (numkeys + startkey) ; i++) { + for (int i = startkey; i < (numkeys + startkey); i++) { std::string temp = std::to_string(i); Slice key(temp); Slice value(temp); @@ -63,10 +65,8 @@ class DeleteFileTest : public DBTestBase { } } - int numKeysInLevels( - std::vector &metadata, - std::vector *keysperlevel = nullptr) { - + int numKeysInLevels(std::vector& metadata, + std::vector* keysperlevel = nullptr) { if (keysperlevel != nullptr) { keysperlevel->resize(numlevels_); } @@ -82,8 +82,7 @@ class DeleteFileTest : public DBTestBase { } fprintf(stderr, "level %d name %s smallest %s largest %s\n", metadata[i].level, metadata[i].name.c_str(), - metadata[i].smallestkey.c_str(), - metadata[i].largestkey.c_str()); + metadata[i].smallestkey.c_str(), metadata[i].largestkey.c_str()); } return numKeys; } @@ -214,7 +213,7 @@ TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) { // this time, we keep an iterator alive Reopen(options); - Iterator *itr = nullptr; + Iterator* itr = nullptr; CreateTwoLevels(); itr = db_->NewIterator(ReadOptions()); ASSERT_OK(itr->status()); @@ -481,12 +480,12 @@ TEST_F(DeleteFileTest, DeleteFileWithIterator) { } Status status = db_->DeleteFile(level2file); - fprintf(stdout, "Deletion status %s: %s\n", - level2file.c_str(), status.ToString().c_str()); + fprintf(stdout, "Deletion status %s: %s\n", level2file.c_str(), + status.ToString().c_str()); ASSERT_OK(status); it->SeekToFirst(); int numKeysIterated = 0; - while(it->Valid()) { + while (it->Valid()) { numKeysIterated++; it->Next(); } diff --git a/db/error_handler.cc b/db/error_handler.cc index 1df01267f..7f68bb026 100644 --- a/db/error_handler.cc +++ b/db/error_handler.cc @@ -234,8 +234,8 @@ void ErrorHandler::CancelErrorRecovery() { // We'll release the lock before calling sfm, so make sure no new // recovery gets scheduled at that point auto_recovery_ = false; - SstFileManagerImpl* sfm = reinterpret_cast( - db_options_.sst_file_manager.get()); + SstFileManagerImpl* sfm = + reinterpret_cast(db_options_.sst_file_manager.get()); if (sfm) { // This may or may not cancel a pending recovery db_mutex_->Unlock(); @@ -292,8 +292,8 @@ const Status& ErrorHandler::HandleKnownErrors(const Status& bg_err, bool found = false; { - auto entry = ErrorSeverityMap.find(std::make_tuple(reason, bg_err.code(), - bg_err.subcode(), paranoid)); + auto entry = ErrorSeverityMap.find( + std::make_tuple(reason, bg_err.code(), bg_err.subcode(), paranoid)); if (entry != ErrorSeverityMap.end()) { sev = entry->second; found = true; @@ -301,8 +301,8 @@ const Status& ErrorHandler::HandleKnownErrors(const Status& bg_err, } if (!found) { - auto entry = DefaultErrorSeverityMap.find(std::make_tuple(reason, - bg_err.code(), paranoid)); + auto entry = DefaultErrorSeverityMap.find( + std::make_tuple(reason, bg_err.code(), paranoid)); if (entry != DefaultErrorSeverityMap.end()) { sev = entry->second; found = true; diff --git a/db/error_handler.h b/db/error_handler.h index e7c47b763..34e08a525 100644 --- a/db/error_handler.h +++ b/db/error_handler.h @@ -26,100 +26,99 @@ struct DBRecoverContext { }; class ErrorHandler { - public: - ErrorHandler(DBImpl* db, const ImmutableDBOptions& db_options, - InstrumentedMutex* db_mutex) - : db_(db), - db_options_(db_options), - cv_(db_mutex), - end_recovery_(false), - recovery_thread_(nullptr), - db_mutex_(db_mutex), - auto_recovery_(false), - recovery_in_prog_(false), - soft_error_no_bg_work_(false), - is_db_stopped_(false), - bg_error_stats_(db_options.statistics) { - // Clear the checked flag for uninitialized errors - bg_error_.PermitUncheckedError(); - recovery_error_.PermitUncheckedError(); - recovery_io_error_.PermitUncheckedError(); - } - - void EnableAutoRecovery() { auto_recovery_ = true; } - - Status::Severity GetErrorSeverity(BackgroundErrorReason reason, - Status::Code code, - Status::SubCode subcode); - - const Status& SetBGError(const Status& bg_err, BackgroundErrorReason reason); - - Status GetBGError() const { return bg_error_; } - - Status GetRecoveryError() const { return recovery_error_; } - - Status ClearBGError(); - - bool IsDBStopped() { return is_db_stopped_.load(std::memory_order_acquire); } - - bool IsBGWorkStopped() { - assert(db_mutex_); - db_mutex_->AssertHeld(); - return !bg_error_.ok() && - (bg_error_.severity() >= Status::Severity::kHardError || - !auto_recovery_ || soft_error_no_bg_work_); - } - - bool IsSoftErrorNoBGWork() { return soft_error_no_bg_work_; } - - bool IsRecoveryInProgress() { return recovery_in_prog_; } - - Status RecoverFromBGError(bool is_manual = false); - void CancelErrorRecovery(); - - void EndAutoRecovery(); - - private: - DBImpl* db_; - const ImmutableDBOptions& db_options_; - Status bg_error_; - // A separate Status variable used to record any errors during the - // recovery process from hard errors - Status recovery_error_; - // A separate IO Status variable used to record any IO errors during - // the recovery process. At the same time, recovery_error_ is also set. - IOStatus recovery_io_error_; - // The condition variable used with db_mutex during auto resume for time - // wait. - InstrumentedCondVar cv_; - bool end_recovery_; - std::unique_ptr recovery_thread_; - - InstrumentedMutex* db_mutex_; - // A flag indicating whether automatic recovery from errors is enabled - bool auto_recovery_; - bool recovery_in_prog_; - // A flag to indicate that for the soft error, we should not allow any - // background work except the work is from recovery. - bool soft_error_no_bg_work_; - - // Used to store the context for recover, such as flush reason. - DBRecoverContext recover_context_; - std::atomic is_db_stopped_; - - // The pointer of DB statistics. - std::shared_ptr bg_error_stats_; - - const Status& HandleKnownErrors(const Status& bg_err, - BackgroundErrorReason reason); - Status OverrideNoSpaceError(const Status& bg_error, bool* auto_recovery); - void RecoverFromNoSpace(); - const Status& StartRecoverFromRetryableBGIOError(const IOStatus& io_error); - void RecoverFromRetryableBGIOError(); - // First, if it is in recovery and the recovery_error is ok. Set the - // recovery_error_ to bg_err. Second, if the severity is higher than the - // current bg_error_, overwrite it. - void CheckAndSetRecoveryAndBGError(const Status& bg_err); + public: + ErrorHandler(DBImpl* db, const ImmutableDBOptions& db_options, + InstrumentedMutex* db_mutex) + : db_(db), + db_options_(db_options), + cv_(db_mutex), + end_recovery_(false), + recovery_thread_(nullptr), + db_mutex_(db_mutex), + auto_recovery_(false), + recovery_in_prog_(false), + soft_error_no_bg_work_(false), + is_db_stopped_(false), + bg_error_stats_(db_options.statistics) { + // Clear the checked flag for uninitialized errors + bg_error_.PermitUncheckedError(); + recovery_error_.PermitUncheckedError(); + recovery_io_error_.PermitUncheckedError(); + } + + void EnableAutoRecovery() { auto_recovery_ = true; } + + Status::Severity GetErrorSeverity(BackgroundErrorReason reason, + Status::Code code, Status::SubCode subcode); + + const Status& SetBGError(const Status& bg_err, BackgroundErrorReason reason); + + Status GetBGError() const { return bg_error_; } + + Status GetRecoveryError() const { return recovery_error_; } + + Status ClearBGError(); + + bool IsDBStopped() { return is_db_stopped_.load(std::memory_order_acquire); } + + bool IsBGWorkStopped() { + assert(db_mutex_); + db_mutex_->AssertHeld(); + return !bg_error_.ok() && + (bg_error_.severity() >= Status::Severity::kHardError || + !auto_recovery_ || soft_error_no_bg_work_); + } + + bool IsSoftErrorNoBGWork() { return soft_error_no_bg_work_; } + + bool IsRecoveryInProgress() { return recovery_in_prog_; } + + Status RecoverFromBGError(bool is_manual = false); + void CancelErrorRecovery(); + + void EndAutoRecovery(); + + private: + DBImpl* db_; + const ImmutableDBOptions& db_options_; + Status bg_error_; + // A separate Status variable used to record any errors during the + // recovery process from hard errors + Status recovery_error_; + // A separate IO Status variable used to record any IO errors during + // the recovery process. At the same time, recovery_error_ is also set. + IOStatus recovery_io_error_; + // The condition variable used with db_mutex during auto resume for time + // wait. + InstrumentedCondVar cv_; + bool end_recovery_; + std::unique_ptr recovery_thread_; + + InstrumentedMutex* db_mutex_; + // A flag indicating whether automatic recovery from errors is enabled + bool auto_recovery_; + bool recovery_in_prog_; + // A flag to indicate that for the soft error, we should not allow any + // background work except the work is from recovery. + bool soft_error_no_bg_work_; + + // Used to store the context for recover, such as flush reason. + DBRecoverContext recover_context_; + std::atomic is_db_stopped_; + + // The pointer of DB statistics. + std::shared_ptr bg_error_stats_; + + const Status& HandleKnownErrors(const Status& bg_err, + BackgroundErrorReason reason); + Status OverrideNoSpaceError(const Status& bg_error, bool* auto_recovery); + void RecoverFromNoSpace(); + const Status& StartRecoverFromRetryableBGIOError(const IOStatus& io_error); + void RecoverFromRetryableBGIOError(); + // First, if it is in recovery and the recovery_error is ok. Set the + // recovery_error_ to bg_err. Second, if the severity is higher than the + // current bg_error_, overwrite it. + void CheckAndSetRecoveryAndBGError(const Status& bg_err); }; } // namespace ROCKSDB_NAMESPACE diff --git a/db/event_helpers.cc b/db/event_helpers.cc index 6a5c93661..7987b8ec6 100644 --- a/db/event_helpers.cc +++ b/db/event_helpers.cc @@ -23,7 +23,7 @@ template inline T SafeDivide(T a, T b) { return b == 0 ? 0 : a / b; } -} // namespace +} // anonymous namespace void EventHelpers::AppendCurrentTime(JSONWriter* jwriter) { *jwriter << "time_micros" diff --git a/db/event_helpers.h b/db/event_helpers.h index ad299670f..68d819fe6 100644 --- a/db/event_helpers.h +++ b/db/event_helpers.h @@ -39,9 +39,9 @@ class EventHelpers { const std::string& file_checksum, const std::string& file_checksum_func_name); static void LogAndNotifyTableFileDeletion( - EventLogger* event_logger, int job_id, - uint64_t file_number, const std::string& file_path, - const Status& status, const std::string& db_name, + EventLogger* event_logger, int job_id, uint64_t file_number, + const std::string& file_path, const Status& status, + const std::string& db_name, const std::vector>& listeners); static void NotifyOnErrorRecoveryEnd( const std::vector>& listeners, diff --git a/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc index 6648a76d9..ba1277eab 100644 --- a/db/external_sst_file_ingestion_job.cc +++ b/db/external_sst_file_ingestion_job.cc @@ -106,9 +106,8 @@ Status ExternalSstFileIngestionJob::Prepare( for (IngestedFileInfo& f : files_to_ingest_) { f.copy_file = false; const std::string path_outside_db = f.external_file_path; - const std::string path_inside_db = - TableFileName(cfd_->ioptions()->cf_paths, f.fd.GetNumber(), - f.fd.GetPathId()); + const std::string path_inside_db = TableFileName( + cfd_->ioptions()->cf_paths, f.fd.GetNumber(), f.fd.GetPathId()); if (ingestion_options_.move_files) { status = fs_->LinkFile(path_outside_db, path_inside_db, IOOptions(), nullptr); @@ -491,7 +490,8 @@ void ExternalSstFileIngestionJob::UpdateStats() { stream.StartArray(); for (IngestedFileInfo& f : files_to_ingest_) { - InternalStats::CompactionStats stats(CompactionReason::kExternalSstIngestion, 1); + InternalStats::CompactionStats stats( + CompactionReason::kExternalSstIngestion, 1); stats.micros = total_time; // If actual copy occurred for this file, then we need to count the file // size as the actual bytes written. If the file was linked, then we ignore @@ -591,8 +591,8 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo( std::unique_ptr sst_file; std::unique_ptr sst_file_reader; - status = fs_->NewRandomAccessFile(external_file, env_options_, - &sst_file, nullptr); + status = + fs_->NewRandomAccessFile(external_file, env_options_, &sst_file, nullptr); if (!status.ok()) { return status; } @@ -658,9 +658,9 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo( assert(seqno_iter == uprops.end()); file_to_ingest->original_seqno = 0; if (ingestion_options_.allow_blocking_flush || - ingestion_options_.allow_global_seqno) { + ingestion_options_.allow_global_seqno) { return Status::InvalidArgument( - "External SST file V1 does not support global seqno"); + "External SST file V1 does not support global seqno"); } } else { return Status::InvalidArgument("External file version is not supported"); @@ -857,7 +857,7 @@ Status ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile( return status; } - TEST_SYNC_POINT_CALLBACK( + TEST_SYNC_POINT_CALLBACK( "ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile", &overlap_with_db); file_to_ingest->picked_level = target_level; @@ -872,10 +872,10 @@ Status ExternalSstFileIngestionJob::CheckLevelForIngestedBehindFile( auto* vstorage = cfd_->current()->storage_info(); // first check if new files fit in the bottommost level int bottom_lvl = cfd_->NumberLevels() - 1; - if(!IngestedFileFitInLevel(file_to_ingest, bottom_lvl)) { + if (!IngestedFileFitInLevel(file_to_ingest, bottom_lvl)) { return Status::InvalidArgument( - "Can't ingest_behind file as it doesn't fit " - "at the bottommost level!"); + "Can't ingest_behind file as it doesn't fit " + "at the bottommost level!"); } // second check if despite allow_ingest_behind=true we still have 0 seqnums @@ -884,8 +884,8 @@ Status ExternalSstFileIngestionJob::CheckLevelForIngestedBehindFile( for (auto file : vstorage->LevelFiles(lvl)) { if (file->fd.smallest_seqno == 0) { return Status::InvalidArgument( - "Can't ingest_behind file as despite allow_ingest_behind=true " - "there are files with 0 seqno in database at upper levels!"); + "Can't ingest_behind file as despite allow_ingest_behind=true " + "there are files with 0 seqno in database at upper levels!"); } } } @@ -912,9 +912,8 @@ Status ExternalSstFileIngestionJob::AssignGlobalSeqnoForIngestedFile( // If the file system does not support random write, then we should not. // Otherwise we should. std::unique_ptr rwfile; - Status status = - fs_->NewRandomRWFile(file_to_ingest->internal_file_path, env_options_, - &rwfile, nullptr); + Status status = fs_->NewRandomRWFile(file_to_ingest->internal_file_path, + env_options_, &rwfile, nullptr); TEST_SYNC_POINT_CALLBACK("ExternalSstFileIngestionJob::NewRandomRWFile", &status); if (status.ok()) { diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 559f9957d..d16f6a58c 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -301,7 +301,8 @@ TEST_F(ExternalSSTFileTest, Basic) { SstFileWriter sst_file_writer(EnvOptions(), options); - // Current file size should be 0 after sst_file_writer init and before open a file. + // Current file size should be 0 after sst_file_writer init and before open + // a file. ASSERT_EQ(sst_file_writer.FileSize(), 0); // file1.sst (0 => 99) @@ -2318,7 +2319,6 @@ TEST_F(ExternalSSTFileTest, SkipBloomFilter) { table_options.cache_index_and_filter_blocks = true; options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - // Create external SST file and include bloom filters options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics(); DestroyAndReopen(options); diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index b2c7870d9..ddd4b47cc 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -338,8 +338,7 @@ class FaultInjectionTest FaultInjectionTest::kValExpectNoError)); } - void NoWriteTestPreFault() { - } + void NoWriteTestPreFault() {} void NoWriteTestReopenWithFault(ResetMethod reset_method) { CloseDB(); diff --git a/db/file_indexer.cc b/db/file_indexer.cc index 523cb3c16..608f1cb28 100644 --- a/db/file_indexer.cc +++ b/db/file_indexer.cc @@ -8,8 +8,10 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/file_indexer.h" + #include #include + #include "db/version_edit.h" #include "rocksdb/comparator.h" diff --git a/db/file_indexer.h b/db/file_indexer.h index fd889b031..45cb13615 100644 --- a/db/file_indexer.h +++ b/db/file_indexer.h @@ -12,6 +12,7 @@ #include #include #include + #include "memory/arena.h" #include "port/port.h" #include "util/autovector.h" @@ -66,7 +67,7 @@ class FileIndexer { struct IndexUnit { IndexUnit() - : smallest_lb(0), largest_lb(0), smallest_rb(-1), largest_rb(-1) {} + : smallest_lb(0), largest_lb(0), smallest_rb(-1), largest_rb(-1) {} // During file search, a key is compared against smallest and largest // from a FileMetaData. It can have 3 possible outcomes: // (1) key is smaller than smallest, implying it is also smaller than diff --git a/db/file_indexer_test.cc b/db/file_indexer_test.cc index 99ce93993..5c82189ef 100644 --- a/db/file_indexer_test.cc +++ b/db/file_indexer_test.cc @@ -8,7 +8,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/file_indexer.h" + #include + #include "db/dbformat.h" #include "db/version_edit.h" #include "port/stack_trace.h" @@ -73,8 +75,8 @@ class FileIndexerTest : public testing::Test { } void GetNextLevelIndex(const uint32_t level, const uint32_t file_index, - const int cmp_smallest, const int cmp_largest, int32_t* left_index, - int32_t* right_index) { + const int cmp_smallest, const int cmp_largest, + int32_t* left_index, int32_t* right_index) { *left_index = 100; *right_index = 100; indexer->GetNextLevelIndex(level, file_index, cmp_smallest, cmp_largest, diff --git a/db/filename_test.cc b/db/filename_test.cc index bba275dc0..04c81b333 100644 --- a/db/filename_test.cc +++ b/db/filename_test.cc @@ -69,35 +69,33 @@ TEST_F(FileNameTest, Parse) { } // Errors - static const char* errors[] = { - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "META", - "METADB", - "METADB-", - "XMETADB-3", - "METADB-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop" - }; + static const char* errors[] = {"", + "foo", + "foo-dx-100.log", + ".log", + "", + "manifest", + "CURREN", + "CURRENTX", + "MANIFES", + "MANIFEST", + "MANIFEST-", + "XMANIFEST-3", + "MANIFEST-3x", + "META", + "METADB", + "METADB-", + "XMETADB-3", + "METADB-3x", + "LOC", + "LOCKx", + "LO", + "LOGx", + "18446744073709551616.log", + "184467440737095516150.log", + "100", + "100.", + "100.lop"}; for (unsigned int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) { std::string f = errors[i]; ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f; diff --git a/db/flush_job.cc b/db/flush_job.cc index 1b28be08e..645e42f44 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -48,7 +48,7 @@ namespace ROCKSDB_NAMESPACE { -const char* GetFlushReasonString (FlushReason flush_reason) { +const char* GetFlushReasonString(FlushReason flush_reason) { switch (flush_reason) { case FlushReason::kOthers: return "Other Reasons"; @@ -136,17 +136,14 @@ FlushJob::FlushJob( TEST_SYNC_POINT("FlushJob::FlushJob()"); } -FlushJob::~FlushJob() { - ThreadStatusUtil::ResetThreadStatus(); -} +FlushJob::~FlushJob() { ThreadStatusUtil::ResetThreadStatus(); } void FlushJob::ReportStartedFlush() { ThreadStatusUtil::SetColumnFamily(cfd_, cfd_->ioptions()->env, db_options_.enable_thread_tracking); ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_FLUSH); - ThreadStatusUtil::SetThreadOperationProperty( - ThreadStatus::COMPACTION_JOB_ID, - job_context_->job_id); + ThreadStatusUtil::SetThreadOperationProperty(ThreadStatus::COMPACTION_JOB_ID, + job_context_->job_id); IOSTATS_RESET(bytes_written); } @@ -156,8 +153,7 @@ void FlushJob::ReportFlushInputSize(const autovector& mems) { input_size += mem->ApproximateMemoryUsage(); } ThreadStatusUtil::IncreaseThreadOperationProperty( - ThreadStatus::FLUSH_BYTES_MEMTABLES, - input_size); + ThreadStatus::FLUSH_BYTES_MEMTABLES, input_size); } void FlushJob::RecordFlushIOStats() { @@ -220,8 +216,7 @@ Status FlushJob::Run(LogsWithPrepTracker* prep_tracker, FileMetaData* file_meta, double mempurge_threshold = mutable_cf_options_.experimental_mempurge_threshold; - AutoThreadOperationStageUpdater stage_run( - ThreadStatus::STAGE_FLUSH_RUN); + AutoThreadOperationStageUpdater stage_run(ThreadStatus::STAGE_FLUSH_RUN); if (mems_.empty()) { ROCKS_LOG_BUFFER(log_buffer_, "[%s] Nothing in memtable to flush", cfd_->GetName().c_str()); @@ -906,8 +901,7 @@ Status FlushJob::WriteLevel0Table() { } const uint64_t current_time = static_cast(_current_time); - uint64_t oldest_key_time = - mems_.front()->ApproximateOldestKeyTime(); + uint64_t oldest_key_time = mems_.front()->ApproximateOldestKeyTime(); // It's not clear whether oldest_key_time is always available. In case // it is not available, use current_time. diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index c3275b856..f994b4e9b 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -214,8 +214,8 @@ TEST_F(FlushJobTest, NonEmpty) { // Note: the first two blob references will not be considered when resolving // the oldest blob file referenced (the first one is inlined TTL, while the // second one is TTL and thus points to a TTL blob file). - constexpr std::array blob_file_numbers{{ - kInvalidBlobFileNumber, 5, 103, 17, 102, 101}}; + constexpr std::array blob_file_numbers{ + {kInvalidBlobFileNumber, 5, 103, 17, 102, 101}}; for (size_t i = 0; i < blob_file_numbers.size(); ++i) { std::string key(std::to_string(i + 10001)); std::string blob_index; diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index 13a94cb81..3fbc2cf47 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -104,9 +104,7 @@ class ForwardLevelIterator : public InternalIterator { status_ = Status::NotSupported("ForwardLevelIterator::Prev()"); valid_ = false; } - bool Valid() const override { - return valid_; - } + bool Valid() const override { return valid_; } void SeekToFirst() override { assert(file_iter_ != nullptr); if (!status_.ok()) { @@ -249,9 +247,7 @@ ForwardIterator::ForwardIterator(DBImpl* db, const ReadOptions& read_options, immutable_status_.PermitUncheckedError(); } -ForwardIterator::~ForwardIterator() { - Cleanup(true); -} +ForwardIterator::~ForwardIterator() { Cleanup(true); } void ForwardIterator::SVCleanup(DBImpl* db, SuperVersion* sv, bool background_purge_on_iterator_cleanup) { @@ -284,13 +280,13 @@ struct SVCleanupParams { SuperVersion* sv; bool background_purge_on_iterator_cleanup; }; -} +} // anonymous namespace // Used in PinnedIteratorsManager to release pinned SuperVersion void ForwardIterator::DeferredSVCleanup(void* arg) { auto d = reinterpret_cast(arg); - ForwardIterator::SVCleanup( - d->db, d->sv, d->background_purge_on_iterator_cleanup); + ForwardIterator::SVCleanup(d->db, d->sv, + d->background_purge_on_iterator_cleanup); delete d; } @@ -547,8 +543,7 @@ void ForwardIterator::Next() { assert(valid_); bool update_prev_key = false; - if (sv_ == nullptr || - sv_->version_number != cfd_->GetSuperVersionNumber()) { + if (sv_ == nullptr || sv_->version_number != cfd_->GetSuperVersionNumber()) { std::string current_key = key().ToString(); Slice old_key(current_key.data(), current_key.size()); @@ -578,7 +573,6 @@ void ForwardIterator::Next() { update_prev_key = true; } - if (update_prev_key) { prev_key_.SetInternalKey(current_->key()); is_prev_set_ = true; @@ -635,7 +629,7 @@ bool ForwardIterator::PrepareValue() { assert(!current_->Valid()); assert(!current_->status().ok()); - assert(current_ != mutable_iter_); // memtable iterator can't fail + assert(current_ != mutable_iter_); // memtable iterator can't fail assert(immutable_status_.ok()); valid_ = false; @@ -950,11 +944,11 @@ bool ForwardIterator::NeedToSeekImmutable(const Slice& target) { } Slice prev_key = prev_key_.GetInternalKey(); if (prefix_extractor_ && prefix_extractor_->Transform(target).compare( - prefix_extractor_->Transform(prev_key)) != 0) { + prefix_extractor_->Transform(prev_key)) != 0) { return true; } if (cfd_->internal_comparator().InternalKeyComparator::Compare( - prev_key, target) >= (is_prev_inclusive_ ? 1 : 0)) { + prev_key, target) >= (is_prev_inclusive_ ? 1 : 0)) { return true; } @@ -963,8 +957,8 @@ bool ForwardIterator::NeedToSeekImmutable(const Slice& target) { return false; } if (cfd_->internal_comparator().InternalKeyComparator::Compare( - target, current_ == mutable_iter_ ? immutable_min_heap_.top()->key() - : current_->key()) > 0) { + target, current_ == mutable_iter_ ? immutable_min_heap_.top()->key() + : current_->key()) > 0) { return true; } return false; @@ -1040,11 +1034,11 @@ uint32_t ForwardIterator::FindFileInRange( uint32_t left, uint32_t right) { auto cmp = [&](const FileMetaData* f, const Slice& k) -> bool { return cfd_->internal_comparator().InternalKeyComparator::Compare( - f->largest.Encode(), k) < 0; + f->largest.Encode(), k) < 0; }; - const auto &b = files.begin(); - return static_cast(std::lower_bound(b + left, - b + right, internal_key, cmp) - b); + const auto& b = files.begin(); + return static_cast( + std::lower_bound(b + left, b + right, internal_key, cmp) - b); } void ForwardIterator::DeleteIterator(InternalIterator* iter, bool is_arena) { diff --git a/db/forward_iterator.h b/db/forward_iterator.h index 21cbd7001..5a5c6f0f3 100644 --- a/db/forward_iterator.h +++ b/db/forward_iterator.h @@ -7,9 +7,9 @@ #include "rocksdb/comparator.h" #ifndef ROCKSDB_LITE +#include #include #include -#include #include "memory/arena.h" #include "rocksdb/db.h" @@ -35,6 +35,7 @@ class MinIterComparator { bool operator()(InternalIterator* a, InternalIterator* b) { return comparator_->Compare(a->key(), b->key()) > 0; } + private: const CompareInterface* comparator_; }; @@ -92,8 +93,8 @@ class ForwardIterator : public InternalIterator { // either done immediately or deferred until this iterator is unpinned by // PinnedIteratorsManager. void SVCleanup(); - static void SVCleanup( - DBImpl* db, SuperVersion* sv, bool background_purge_on_iterator_cleanup); + static void SVCleanup(DBImpl* db, SuperVersion* sv, + bool background_purge_on_iterator_cleanup); static void DeferredSVCleanup(void* arg); void RebuildIterators(bool refresh_sv); @@ -107,9 +108,9 @@ class ForwardIterator : public InternalIterator { void UpdateCurrent(); bool NeedToSeekImmutable(const Slice& internal_key); void DeleteCurrentIter(); - uint32_t FindFileInRange( - const std::vector& files, const Slice& internal_key, - uint32_t left, uint32_t right); + uint32_t FindFileInRange(const std::vector& files, + const Slice& internal_key, uint32_t left, + uint32_t right); bool IsOverUpperBound(const Slice& internal_key) const; diff --git a/db/forward_iterator_bench.cc b/db/forward_iterator_bench.cc index f03c734d6..325661cef 100644 --- a/db/forward_iterator_bench.cc +++ b/db/forward_iterator_bench.cc @@ -14,6 +14,7 @@ int main() { int main() { return 0; } #else #include + #include #include #include @@ -281,8 +282,9 @@ struct StatsThread { } auto now = std::chrono::steady_clock::now(); double elapsed = - std::chrono::duration_cast >( - now - tlast).count(); + std::chrono::duration_cast >(now - + tlast) + .count(); uint64_t w = ::stats.written.load(); uint64_t r = ::stats.read.load(); fprintf(stderr, diff --git a/db/import_column_family_job.cc b/db/import_column_family_job.cc index 0832ff571..34985666a 100644 --- a/db/import_column_family_job.cc +++ b/db/import_column_family_job.cc @@ -228,8 +228,8 @@ Status ImportColumnFamilyJob::GetIngestedFileInfo( std::unique_ptr sst_file; std::unique_ptr sst_file_reader; - status = fs_->NewRandomAccessFile(external_file, env_options_, - &sst_file, nullptr); + status = + fs_->NewRandomAccessFile(external_file, env_options_, &sst_file, nullptr); if (!status.ok()) { return status; } diff --git a/db/internal_stats.h b/db/internal_stats.h index cb29e82e9..b0cd5899b 100644 --- a/db/internal_stats.h +++ b/db/internal_stats.h @@ -353,7 +353,7 @@ class InternalStats { this->num_output_records += c.num_output_records; this->count += c.count; int num_of_reasons = static_cast(CompactionReason::kNumOfReasons); - for (int i = 0; i< num_of_reasons; i++) { + for (int i = 0; i < num_of_reasons; i++) { counts[i] += c.counts[i]; } } @@ -648,8 +648,8 @@ class InternalStats { struct CFStatsSnapshot { // ColumnFamily-level stats CompactionStats comp_stats; - uint64_t ingest_bytes_flush; // Bytes written to L0 (Flush) - uint64_t stall_count; // Stall count + uint64_t ingest_bytes_flush; // Bytes written to L0 (Flush) + uint64_t stall_count; // Stall count // Stats from compaction jobs - bytes written, bytes read, duration. uint64_t compact_bytes_write; uint64_t compact_bytes_read; @@ -691,10 +691,10 @@ class InternalStats { struct DBStatsSnapshot { // DB-level stats - uint64_t ingest_bytes; // Bytes written by user - uint64_t wal_bytes; // Bytes written to WAL - uint64_t wal_synced; // Number of times WAL is synced - uint64_t write_with_wal; // Number of writes that request WAL + uint64_t ingest_bytes; // Bytes written by user + uint64_t wal_bytes; // Bytes written to WAL + uint64_t wal_synced; // Number of times WAL is synced + uint64_t write_with_wal; // Number of writes that request WAL // These count the number of writes processed by the calling thread or // another thread. uint64_t write_other; @@ -980,13 +980,14 @@ class InternalStats { return false; } - bool GetIntProperty(const DBPropertyInfo& /*property_info*/, uint64_t* /*value*/, - DBImpl* /*db*/) const { + bool GetIntProperty(const DBPropertyInfo& /*property_info*/, + uint64_t* /*value*/, DBImpl* /*db*/) const { return false; } bool GetIntPropertyOutOfMutex(const DBPropertyInfo& /*property_info*/, - Version* /*version*/, uint64_t* /*value*/) const { + Version* /*version*/, + uint64_t* /*value*/) const { return false; } }; diff --git a/db/job_context.h b/db/job_context.h index d7d05b11a..352c58e82 100644 --- a/db/job_context.h +++ b/db/job_context.h @@ -35,7 +35,7 @@ struct SuperVersionContext { new_superversion; // if nullptr no new superversion explicit SuperVersionContext(bool create_superversion = false) - : new_superversion(create_superversion ? new SuperVersion() : nullptr) {} + : new_superversion(create_superversion ? new SuperVersion() : nullptr) {} explicit SuperVersionContext(SuperVersionContext&& other) noexcept : superversions_to_free(std::move(other.superversions_to_free)), @@ -54,8 +54,7 @@ struct SuperVersionContext { inline bool HaveSomethingToDelete() const { #ifndef ROCKSDB_DISABLE_STALL_NOTIFICATION - return !superversions_to_free.empty() || - !write_stall_notifications.empty(); + return !superversions_to_free.empty() || !write_stall_notifications.empty(); #else return !superversions_to_free.empty(); #endif @@ -77,7 +76,8 @@ struct SuperVersionContext { (void)new_cond; (void)name; (void)ioptions; -#endif // !defined(ROCKSDB_LITE) && !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION) +#endif // !defined(ROCKSDB_LITE) && + // !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION) } void Clean() { @@ -139,8 +139,7 @@ struct JobContext { CandidateFileInfo(std::string name, std::string path) : file_name(std::move(name)), file_path(std::move(path)) {} bool operator==(const CandidateFileInfo& other) const { - return file_name == other.file_name && - file_path == other.file_path; + return file_name == other.file_name && file_path == other.file_path; } }; diff --git a/db/listener_test.cc b/db/listener_test.cc index 0d88c8f55..160866bb7 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -89,7 +89,7 @@ class TestCompactionListener : public EventListener { public: explicit TestCompactionListener(EventListenerTest* test) : test_(test) {} - void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) override { + void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { std::lock_guard lock(mutex_); compacted_dbs_.push_back(db); ASSERT_GT(ci.input_files.size(), 0U); @@ -172,9 +172,9 @@ TEST_F(EventListenerTest, OnSingleDBCompactionTest) { TestCompactionListener* listener = new TestCompactionListener(this); options.listeners.emplace_back(listener); - std::vector cf_names = { - "pikachu", "ilya", "muromec", "dobrynia", - "nikitich", "alyosha", "popovich"}; + std::vector cf_names = {"pikachu", "ilya", "muromec", + "dobrynia", "nikitich", "alyosha", + "popovich"}; CreateAndReopenWithCF(cf_names, options); ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p'))); @@ -214,8 +214,7 @@ class TestFlushListener : public EventListener { virtual ~TestFlushListener() { prev_fc_info_.status.PermitUncheckedError(); // Ignore the status } - void OnTableFileCreated( - const TableFileCreationInfo& info) override { + void OnTableFileCreated(const TableFileCreationInfo& info) override { // remember the info for later checking the FlushJobInfo. prev_fc_info_ = info; ASSERT_GT(info.db_name.size(), 0U); @@ -250,8 +249,7 @@ class TestFlushListener : public EventListener { #endif // ROCKSDB_USING_THREAD_STATUS } - void OnFlushCompleted( - DB* db, const FlushJobInfo& info) override { + void OnFlushCompleted(DB* db, const FlushJobInfo& info) override { flushed_dbs_.push_back(db); flushed_column_family_names_.push_back(info.cf_name); if (info.triggered_writes_slowdown) { @@ -317,9 +315,9 @@ TEST_F(EventListenerTest, OnSingleDBFlushTest) { #endif // ROCKSDB_USING_THREAD_STATUS TestFlushListener* listener = new TestFlushListener(options.env, this); options.listeners.emplace_back(listener); - std::vector cf_names = { - "pikachu", "ilya", "muromec", "dobrynia", - "nikitich", "alyosha", "popovich"}; + std::vector cf_names = {"pikachu", "ilya", "muromec", + "dobrynia", "nikitich", "alyosha", + "popovich"}; options.table_properties_collector_factories.push_back( std::make_shared()); CreateAndReopenWithCF(cf_names, options); @@ -421,9 +419,9 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { listeners.emplace_back(new TestFlushListener(options.env, this)); } - std::vector cf_names = { - "pikachu", "ilya", "muromec", "dobrynia", - "nikitich", "alyosha", "popovich"}; + std::vector cf_names = {"pikachu", "ilya", "muromec", + "dobrynia", "nikitich", "alyosha", + "popovich"}; options.create_if_missing = true; for (int i = 0; i < kNumListeners; ++i) { @@ -433,7 +431,7 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { ColumnFamilyOptions cf_opts(options); std::vector dbs; - std::vector> vec_handles; + std::vector> vec_handles; for (int d = 0; d < kNumDBs; ++d) { ASSERT_OK(DestroyDB(dbname_ + std::to_string(d), options)); @@ -452,8 +450,8 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { for (int d = 0; d < kNumDBs; ++d) { for (size_t c = 0; c < cf_names.size(); ++c) { - ASSERT_OK(dbs[d]->Put(WriteOptions(), vec_handles[d][c], - cf_names[c], cf_names[c])); + ASSERT_OK(dbs[d]->Put(WriteOptions(), vec_handles[d][c], cf_names[c], + cf_names[c])); } } @@ -483,7 +481,6 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { } } - for (auto handles : vec_handles) { for (auto h : handles) { delete h; @@ -887,16 +884,17 @@ TEST_F(EventListenerTest, TableFileCreationListenersTest) { } class MemTableSealedListener : public EventListener { -private: + private: SequenceNumber latest_seq_number_; -public: + + public: MemTableSealedListener() {} void OnMemTableSealed(const MemTableInfo& info) override { latest_seq_number_ = info.first_seqno; } void OnFlushCompleted(DB* /*db*/, - const FlushJobInfo& flush_job_info) override { + const FlushJobInfo& flush_job_info) override { ASSERT_LE(flush_job_info.smallest_seqno, latest_seq_number_); } }; @@ -911,8 +909,8 @@ TEST_F(EventListenerTest, MemTableSealedListenerTest) { for (unsigned int i = 0; i < 10; i++) { std::string tag = std::to_string(i); - ASSERT_OK(Put("foo"+tag, "aaa")); - ASSERT_OK(Put("bar"+tag, "bbb")); + ASSERT_OK(Put("foo" + tag, "aaa")); + ASSERT_OK(Put("bar" + tag, "bbb")); ASSERT_OK(Flush()); } diff --git a/db/log_reader.cc b/db/log_reader.cc index eb5c88d25..a21868776 100644 --- a/db/log_reader.cc +++ b/db/log_reader.cc @@ -21,8 +21,7 @@ namespace ROCKSDB_NAMESPACE { namespace log { -Reader::Reporter::~Reporter() { -} +Reader::Reporter::~Reporter() {} Reader::Reader(std::shared_ptr info_log, std::unique_ptr&& _file, @@ -241,9 +240,8 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch, FALLTHROUGH_INTENDED; case kBadRecordChecksum: - if (recycled_ && - wal_recovery_mode == - WALRecoveryMode::kTolerateCorruptedTailRecords) { + if (recycled_ && wal_recovery_mode == + WALRecoveryMode::kTolerateCorruptedTailRecords) { scratch->clear(); return false; } @@ -297,9 +295,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch, return false; } -uint64_t Reader::LastRecordOffset() { - return last_record_offset_; -} +uint64_t Reader::LastRecordOffset() { return last_record_offset_; } uint64_t Reader::LastRecordEnd() { return end_of_buffer_offset_ - buffer_.size(); @@ -361,11 +357,11 @@ void Reader::UnmarkEOFInternal() { if (read_buffer.data() != backing_store_ + eof_offset_) { // Read did not write to backing_store_ memmove(backing_store_ + eof_offset_, read_buffer.data(), - read_buffer.size()); + read_buffer.size()); } buffer_ = Slice(backing_store_ + consumed_bytes, - eof_offset_ + added - consumed_bytes); + eof_offset_ + added - consumed_bytes); if (added < remaining) { eof_ = true; @@ -385,7 +381,7 @@ void Reader::ReportDrop(size_t bytes, const Status& reason) { } } -bool Reader::ReadMore(size_t* drop_size, int *error) { +bool Reader::ReadMore(size_t* drop_size, int* error) { if (!eof_ && !read_error_) { // Last read was a full read, so this is a trailer to skip buffer_.clear(); diff --git a/db/log_reader.h b/db/log_reader.h index 2ebeaaca9..e3be1570e 100644 --- a/db/log_reader.h +++ b/db/log_reader.h @@ -85,9 +85,7 @@ class Reader { uint64_t LastRecordEnd(); // returns true if the reader has encountered an eof condition. - bool IsEOF() { - return eof_; - } + bool IsEOF() { return eof_; } // returns true if the reader has encountered read error. bool hasReadError() const { return read_error_; } @@ -122,8 +120,8 @@ class Reader { // Internal state variables used for reading records Slice buffer_; - bool eof_; // Last Read() indicated EOF by returning < kBlockSize - bool read_error_; // Error occurred while reading from file + bool eof_; // Last Read() indicated EOF by returning < kBlockSize + bool read_error_; // Error occurred while reading from file // Offset of the file position indicator within the last block when an // EOF was detected. @@ -182,7 +180,7 @@ class Reader { uint64_t* fragment_checksum = nullptr); // Read some more - bool ReadMore(size_t* drop_size, int *error); + bool ReadMore(size_t* drop_size, int* error); void UnmarkEOFInternal(); diff --git a/db/log_test.cc b/db/log_test.cc index a055d72f6..2a43dc152 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -128,7 +128,7 @@ class LogTest size_t dropped_bytes_; std::string message_; - ReportCollector() : dropped_bytes_(0) { } + ReportCollector() : dropped_bytes_(0) {} void Corruption(size_t bytes, const Status& status) override { dropped_bytes_ += bytes; message_.append(status.ToString()); @@ -185,9 +185,7 @@ class LogTest ASSERT_OK(writer_->AddRecord(Slice(msg))); } - size_t WrittenBytes() const { - return dest_contents().size(); - } + size_t WrittenBytes() const { return dest_contents().size(); } std::string Read(const WALRecoveryMode wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords) { @@ -235,13 +233,9 @@ class LogTest source_->force_error_position_ = position; } - size_t DroppedBytes() const { - return report_.dropped_bytes_; - } + size_t DroppedBytes() const { return report_.dropped_bytes_; } - std::string ReportMessage() const { - return report_.message_; - } + std::string ReportMessage() const { return report_.message_; } void ForceEOF(size_t position = 0) { source_->force_eof_ = true; @@ -389,7 +383,7 @@ TEST_P(LogTest, BadRecordType) { TEST_P(LogTest, TruncatedTrailingRecordIsIgnored) { Write("foo"); - ShrinkSize(4); // Drop all payload as well as a header byte + ShrinkSize(4); // Drop all payload as well as a header byte ASSERT_EQ("EOF", Read()); // Truncated last record is ignored, not treated as an error ASSERT_EQ(0U, DroppedBytes()); @@ -581,7 +575,7 @@ TEST_P(LogTest, ErrorJoinsRecords) { Write("correct"); // Wipe the middle block - for (unsigned int offset = kBlockSize; offset < 2*kBlockSize; offset++) { + for (unsigned int offset = kBlockSize; offset < 2 * kBlockSize; offset++) { SetByte(offset, 'x'); } diff --git a/db/log_writer.h b/db/log_writer.h index 4d0d49a86..5d266e434 100644 --- a/db/log_writer.h +++ b/db/log_writer.h @@ -100,7 +100,7 @@ class Writer { private: std::unique_ptr dest_; - size_t block_offset_; // Current offset in block + size_t block_offset_; // Current offset in block uint64_t log_number_; bool recycle_log_files_; diff --git a/db/logs_with_prep_tracker.h b/db/logs_with_prep_tracker.h index 7f9ece76b..f72f0ca07 100644 --- a/db/logs_with_prep_tracker.h +++ b/db/logs_with_prep_tracker.h @@ -58,6 +58,5 @@ class LogsWithPrepTracker { // both logs_with_prep_ and prepared_section_completed_. std::unordered_map prepared_section_completed_; std::mutex prepared_section_completed_mutex_; - }; } // namespace ROCKSDB_NAMESPACE diff --git a/db/lookup_key.h b/db/lookup_key.h index 75686cc52..68851bddd 100644 --- a/db/lookup_key.h +++ b/db/lookup_key.h @@ -10,6 +10,7 @@ #pragma once #include #include + #include "rocksdb/slice.h" #include "rocksdb/types.h" diff --git a/db/malloc_stats.cc b/db/malloc_stats.cc index 8f58ab2cf..52f2e6e0f 100644 --- a/db/malloc_stats.cc +++ b/db/malloc_stats.cc @@ -10,9 +10,10 @@ #include "db/malloc_stats.h" #ifndef ROCKSDB_LITE -#include #include +#include + #include "port/jemalloc_helper.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/manual_compaction_test.cc b/db/manual_compaction_test.cc index d41eca589..b92cb794b 100644 --- a/db/manual_compaction_test.cc +++ b/db/manual_compaction_test.cc @@ -42,9 +42,7 @@ std::string Key1(int i) { return buf; } -std::string Key2(int i) { - return Key1(i) + "_xxx"; -} +std::string Key2(int i) { return Key1(i) + "_xxx"; } class ManualCompactionTest : public testing::Test { public: @@ -102,10 +100,10 @@ TEST_F(ManualCompactionTest, CompactTouchesAllKeys) { for (int iter = 0; iter < 2; ++iter) { DB* db; Options options; - if (iter == 0) { // level compaction + if (iter == 0) { // level compaction options.num_levels = 3; options.compaction_style = CompactionStyle::kCompactionStyleLevel; - } else { // universal compaction + } else { // universal compaction options.compaction_style = CompactionStyle::kCompactionStyleUniversal; } options.create_if_missing = true; diff --git a/db/memtable.cc b/db/memtable.cc index 300f38eff..4d8640711 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -330,9 +330,8 @@ int MemTable::KeyComparator::operator()(const char* prefix_len_key1, return comparator.CompareKeySeq(k1, k2); } -int MemTable::KeyComparator::operator()(const char* prefix_len_key, - const KeyComparator::DecodedType& key) - const { +int MemTable::KeyComparator::operator()( + const char* prefix_len_key, const KeyComparator::DecodedType& key) const { // Internal keys are encoded as length-prefixed strings. Slice a = GetLengthPrefixedSlice(prefix_len_key); return comparator.CompareKeySeq(a, key); @@ -914,7 +913,7 @@ struct Saver { return true; } }; -} // namespace +} // anonymous namespace static bool SaveValue(void* arg, const char* entry) { TEST_SYNC_POINT_CALLBACK("Memtable::SaveValue:Begin:entry", &entry); diff --git a/db/memtable.h b/db/memtable.h index 546797e50..6db2721e4 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -88,7 +88,7 @@ class MemTable { public: struct KeyComparator : public MemTableRep::KeyComparator { const InternalKeyComparator comparator; - explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { } + explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {} virtual int operator()(const char* prefix_len_key1, const char* prefix_len_key2) const override; virtual int operator()(const char* prefix_len_key, @@ -448,9 +448,7 @@ class MemTable { // persisted. // REQUIRES: external synchronization to prevent simultaneous // operations on the same MemTable. - void MarkFlushed() { - table_->MarkFlushed(); - } + void MarkFlushed() { table_->MarkFlushed(); } // return true if the current MemTableRep supports merge operator. bool IsMergeOperatorSupported() const { @@ -562,8 +560,8 @@ class MemTable { std::atomic write_buffer_size_; // These are used to manage memtable flushes to storage - bool flush_in_progress_; // started the flush - bool flush_completed_; // finished the flush + bool flush_in_progress_; // started the flush + bool flush_completed_; // finished the flush uint64_t file_number_; // filled up after flush is complete // The updates to be applied to the transaction log when this diff --git a/db/memtable_list_test.cc b/db/memtable_list_test.cc index 458dc18a3..258f02f96 100644 --- a/db/memtable_list_test.cc +++ b/db/memtable_list_test.cc @@ -4,9 +4,11 @@ // (found in the LICENSE.Apache file in the root directory). #include "db/memtable_list.h" + #include #include #include + #include "db/merge_context.h" #include "db/version_set.h" #include "db/write_controller.h" diff --git a/db/merge_context.h b/db/merge_context.h index 925bfc0e0..8a7b07290 100644 --- a/db/merge_context.h +++ b/db/merge_context.h @@ -8,6 +8,7 @@ #include #include #include + #include "rocksdb/slice.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/merge_helper.h b/db/merge_helper.h index 1def78e6d..956e3ff78 100644 --- a/db/merge_helper.h +++ b/db/merge_helper.h @@ -167,7 +167,7 @@ class MergeHelper { const CompactionFilter* compaction_filter_; const std::atomic* shutting_down_; Logger* logger_; - bool assert_valid_internal_key_; // enforce no internal key corruption? + bool assert_valid_internal_key_; // enforce no internal key corruption? bool allow_single_operand_; SequenceNumber latest_snapshot_; const SnapshotChecker* const snapshot_checker_; diff --git a/db/merge_operator.cc b/db/merge_operator.cc index 75dea432c..d32585640 100644 --- a/db/merge_operator.cc +++ b/db/merge_operator.cc @@ -74,12 +74,11 @@ bool AssociativeMergeOperator::FullMergeV2( // Call the user defined simple merge on the operands; // NOTE: It is assumed that the client's merge-operator will handle any errors. -bool AssociativeMergeOperator::PartialMerge( - const Slice& key, - const Slice& left_operand, - const Slice& right_operand, - std::string* new_value, - Logger* logger) const { +bool AssociativeMergeOperator::PartialMerge(const Slice& key, + const Slice& left_operand, + const Slice& right_operand, + std::string* new_value, + Logger* logger) const { return Merge(key, &left_operand, right_operand, new_value, logger); } diff --git a/db/obsolete_files_test.cc b/db/obsolete_files_test.cc index 8fc47e3f5..8e9f28f65 100644 --- a/db/obsolete_files_test.cc +++ b/db/obsolete_files_test.cc @@ -10,10 +10,12 @@ #ifndef ROCKSDB_LITE #include + #include #include #include #include + #include "db/db_impl/db_impl.h" #include "db/db_test_util.h" #include "db/version_set.h" @@ -28,7 +30,6 @@ #include "test_util/testutil.h" #include "util/string_util.h" - namespace ROCKSDB_NAMESPACE { class ObsoleteFilesTest : public DBTestBase { @@ -40,7 +41,7 @@ class ObsoleteFilesTest : public DBTestBase { void AddKeys(int numkeys, int startkey) { WriteOptions options; options.sync = false; - for (int i = startkey; i < (numkeys + startkey) ; i++) { + for (int i = startkey; i < (numkeys + startkey); i++) { std::string temp = std::to_string(i); Slice key(temp); Slice value(temp); @@ -117,7 +118,7 @@ TEST_F(ObsoleteFilesTest, RaceForObsoleteFileDeletion) { "ObsoleteFilesTest::RaceForObsoleteFileDeletion:1"}, {"DBImpl::BackgroundCallCompaction:PurgedObsoleteFiles", "ObsoleteFilesTest::RaceForObsoleteFileDeletion:2"}, - }); + }); SyncPoint::GetInstance()->SetCallBack( "DBImpl::DeleteObsoleteFileImpl:AfterDeletion", [&](void* arg) { Status* p_status = reinterpret_cast(arg); diff --git a/db/options_file_test.cc b/db/options_file_test.cc index 283d19344..eb02e6ca4 100644 --- a/db/options_file_test.cc +++ b/db/options_file_test.cc @@ -59,7 +59,7 @@ void VerifyOptionsFileName( } } } -} // namespace +} // anonymous namespace TEST_F(OptionsFileTest, NumberOfOptionsFiles) { const int kReopenCount = 20; diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index 0416dee08..454d12dc5 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -39,31 +39,31 @@ const std::string kDbName = namespace ROCKSDB_NAMESPACE { std::shared_ptr OpenDb(bool read_only = false) { - DB* db; - Options options; - options.create_if_missing = true; - options.max_open_files = -1; - options.write_buffer_size = FLAGS_write_buffer_size; - options.max_write_buffer_number = FLAGS_max_write_buffer_number; - options.min_write_buffer_number_to_merge = + DB* db; + Options options; + options.create_if_missing = true; + options.max_open_files = -1; + options.write_buffer_size = FLAGS_write_buffer_size; + options.max_write_buffer_number = FLAGS_max_write_buffer_number; + options.min_write_buffer_number_to_merge = FLAGS_min_write_buffer_number_to_merge; - if (FLAGS_use_set_based_memetable) { + if (FLAGS_use_set_based_memetable) { #ifndef ROCKSDB_LITE - options.prefix_extractor.reset( - ROCKSDB_NAMESPACE::NewFixedPrefixTransform(0)); - options.memtable_factory.reset(NewHashSkipListRepFactory()); + options.prefix_extractor.reset( + ROCKSDB_NAMESPACE::NewFixedPrefixTransform(0)); + options.memtable_factory.reset(NewHashSkipListRepFactory()); #endif // ROCKSDB_LITE - } + } - Status s; - if (!read_only) { - s = DB::Open(options, kDbName, &db); - } else { - s = DB::OpenForReadOnly(options, kDbName, &db); - } - EXPECT_OK(s); - return std::shared_ptr(db); + Status s; + if (!read_only) { + s = DB::Open(options, kDbName, &db); + } else { + s = DB::OpenForReadOnly(options, kDbName, &db); + } + EXPECT_OK(s); + return std::shared_ptr(db); } class PerfContextTest : public testing::Test {}; @@ -81,7 +81,7 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { ASSERT_OK(db->Put(write_options, key, value)); } - for (int i = 0; i < FLAGS_total_keys -1 ; ++i) { + for (int i = 0; i < FLAGS_total_keys - 1; ++i) { std::string key = "k" + std::to_string(i); ASSERT_OK(db->Delete(write_options, key)); } @@ -103,8 +103,9 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { } if (FLAGS_verbose) { - std::cout << "Get user key comparison: \n" << hist_get.ToString() - << "Get time: \n" << hist_get_time.ToString(); + std::cout << "Get user key comparison: \n" + << hist_get.ToString() << "Get time: \n" + << hist_get_time.ToString(); } { @@ -139,7 +140,8 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { hist_seek.Add(get_perf_context()->user_key_comparison_count); if (FLAGS_verbose) { std::cout << "seek cmp: " << get_perf_context()->user_key_comparison_count - << " ikey skipped " << get_perf_context()->internal_key_skipped_count + << " ikey skipped " + << get_perf_context()->internal_key_skipped_count << " idelete skipped " << get_perf_context()->internal_delete_skipped_count << " elapsed: " << elapsed_nanos << "ns\n"; @@ -322,7 +324,8 @@ void ProfileQueries(bool enabled_time = false) { hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time); hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time); hist_mget_files.Add(get_perf_context()->get_from_output_files_time); - hist_mget_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count); + hist_mget_num_memtable_checked.Add( + get_perf_context()->get_from_memtable_count); hist_mget_post_process.Add(get_perf_context()->get_post_process_time); hist_mget.Add(get_perf_context()->user_key_comparison_count); } @@ -337,12 +340,14 @@ void ProfileQueries(bool enabled_time = false) { << hist_write_wal_time.ToString() << "\n" << " Writing Mem Table time: \n" << hist_write_memtable_time.ToString() << "\n" - << " Write Delay: \n" << hist_write_delay_time.ToString() << "\n" + << " Write Delay: \n" + << hist_write_delay_time.ToString() << "\n" << " Waiting for Batch time: \n" << hist_write_thread_wait_nanos.ToString() << "\n" << " Scheduling Flushes and Compactions Time: \n" << hist_write_scheduling_time.ToString() << "\n" - << " Total DB mutex nanos: \n" << total_db_mutex_nanos << "\n"; + << " Total DB mutex nanos: \n" + << total_db_mutex_nanos << "\n"; std::cout << "Get(): Time to get snapshot: \n" << hist_get_snapshot.ToString() @@ -352,8 +357,8 @@ void ProfileQueries(bool enabled_time = false) { << hist_get_files.ToString() << "\n" << " Number of memtables checked: \n" << hist_num_memtable_checked.ToString() << "\n" - << " Time to post process: \n" << hist_get_post_process.ToString() - << "\n"; + << " Time to post process: \n" + << hist_get_post_process.ToString() << "\n"; std::cout << "MultiGet(): Time to get snapshot: \n" << hist_mget_snapshot.ToString() @@ -440,7 +445,8 @@ void ProfileQueries(bool enabled_time = false) { hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time); hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time); hist_mget_files.Add(get_perf_context()->get_from_output_files_time); - hist_mget_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count); + hist_mget_num_memtable_checked.Add( + get_perf_context()->get_from_memtable_count); hist_mget_post_process.Add(get_perf_context()->get_post_process_time); hist_mget.Add(get_perf_context()->user_key_comparison_count); } @@ -459,8 +465,8 @@ void ProfileQueries(bool enabled_time = false) { << hist_get_files.ToString() << "\n" << " Number of memtables checked: \n" << hist_num_memtable_checked.ToString() << "\n" - << " Time to post process: \n" << hist_get_post_process.ToString() - << "\n"; + << " Time to post process: \n" + << hist_get_post_process.ToString() << "\n"; std::cout << "ReadOnly MultiGet(): Time to get snapshot: \n" << hist_mget_snapshot.ToString() @@ -556,7 +562,8 @@ TEST_F(PerfContextTest, SeekKeyComparison) { } if (FLAGS_verbose) { - std::cout << "Put time:\n" << hist_put_time.ToString() << "WAL time:\n" + std::cout << "Put time:\n" + << hist_put_time.ToString() << "WAL time:\n" << hist_wal_time.ToString() << "time diff:\n" << hist_time_diff.ToString(); } @@ -584,7 +591,8 @@ TEST_F(PerfContextTest, SeekKeyComparison) { } ASSERT_OK(iter->status()); if (FLAGS_verbose) { - std::cout << "Seek:\n" << hist_seek.ToString() << "Next:\n" + std::cout << "Seek:\n" + << hist_seek.ToString() << "Next:\n" << hist_next.ToString(); } } @@ -614,7 +622,7 @@ TEST_F(PerfContextTest, DBMutexLockCounter) { SystemClock::Default()->SleepForMicroseconds(100); mutex.Unlock(); child_thread.join(); - } + } } } @@ -806,14 +814,18 @@ TEST_F(PerfContextTest, PerfContextByLevelGetSet) { .bloom_filter_full_positive); ASSERT_EQ(1, (*(get_perf_context()->level_to_perf_context))[2] .bloom_filter_full_true_positive); - ASSERT_EQ(1, (*(get_perf_context()->level_to_perf_context))[0] - .block_cache_hit_count); - ASSERT_EQ(5, (*(get_perf_context()->level_to_perf_context))[2] - .block_cache_hit_count); - ASSERT_EQ(2, (*(get_perf_context()->level_to_perf_context))[3] - .block_cache_miss_count); - ASSERT_EQ(4, (*(get_perf_context()->level_to_perf_context))[1] - .block_cache_miss_count); + ASSERT_EQ( + 1, + (*(get_perf_context()->level_to_perf_context))[0].block_cache_hit_count); + ASSERT_EQ( + 5, + (*(get_perf_context()->level_to_perf_context))[2].block_cache_hit_count); + ASSERT_EQ( + 2, + (*(get_perf_context()->level_to_perf_context))[3].block_cache_miss_count); + ASSERT_EQ( + 4, + (*(get_perf_context()->level_to_perf_context))[1].block_cache_miss_count); std::string zero_excluded = get_perf_context()->ToString(true); ASSERT_NE(std::string::npos, zero_excluded.find("bloom_filter_useful = 1@level5, 2@level7")); diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index a38ed8742..755b639b0 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -38,7 +38,6 @@ #include "util/string_util.h" #include "utilities/merge_operators.h" - namespace ROCKSDB_NAMESPACE { class PlainTableKeyDecoderTest : public testing::Test {}; @@ -148,9 +147,7 @@ class PlainTableDBTest : public testing::Test, DBImpl* dbfull() { return static_cast_with_check(db_); } - void Reopen(Options* options = nullptr) { - ASSERT_OK(TryReopen(options)); - } + void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); } void Close() { delete db_; @@ -160,7 +157,7 @@ class PlainTableDBTest : public testing::Test, bool mmap_mode() const { return mmap_mode_; } void DestroyAndReopen(Options* options = nullptr) { - //Destroy using last options + // Destroy using last options Destroy(&last_options_); ASSERT_OK(TryReopen(options)); } @@ -200,9 +197,7 @@ class PlainTableDBTest : public testing::Test, return db_->Put(WriteOptions(), k, v); } - Status Delete(const std::string& k) { - return db_->Delete(WriteOptions(), k); - } + Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); } std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { ReadOptions options; @@ -217,7 +212,6 @@ class PlainTableDBTest : public testing::Test, return result; } - int NumTableFilesAtLevel(int level) { std::string property; EXPECT_TRUE(db_->GetProperty( @@ -448,99 +442,100 @@ TEST_P(PlainTableDBTest, Flush) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (EncodingType encoding_type : {kPlain, kPrefix}) { - for (int bloom = -1; bloom <= 117; bloom += 117) { - const int bloom_bits = std::max(bloom, 0); - const bool full_scan_mode = bloom < 0; - for (int total_order = 0; total_order <= 1; total_order++) { - for (int store_index_in_file = 0; store_index_in_file <= 1; - ++store_index_in_file) { - Options options = CurrentOptions(); - options.create_if_missing = true; - // Set only one bucket to force bucket conflict. - // Test index interval for the same prefix to be 1, 2 and 4 - if (total_order) { - options.prefix_extractor.reset(); - - PlainTableOptions plain_table_options; - plain_table_options.user_key_len = 0; - plain_table_options.bloom_bits_per_key = bloom_bits; - plain_table_options.hash_table_ratio = 0; - plain_table_options.index_sparseness = 2; - plain_table_options.huge_page_tlb_size = huge_page_tlb_size; - plain_table_options.encoding_type = encoding_type; - plain_table_options.full_scan_mode = full_scan_mode; - plain_table_options.store_index_in_file = store_index_in_file; - - options.table_factory.reset( - NewPlainTableFactory(plain_table_options)); - } else { - PlainTableOptions plain_table_options; - plain_table_options.user_key_len = 0; - plain_table_options.bloom_bits_per_key = bloom_bits; - plain_table_options.hash_table_ratio = 0.75; - plain_table_options.index_sparseness = 16; - plain_table_options.huge_page_tlb_size = huge_page_tlb_size; - plain_table_options.encoding_type = encoding_type; - plain_table_options.full_scan_mode = full_scan_mode; - plain_table_options.store_index_in_file = store_index_in_file; - - options.table_factory.reset( - NewPlainTableFactory(plain_table_options)); - } - DestroyAndReopen(&options); - uint64_t int_num; - ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.estimate-table-readers-mem", &int_num)); - ASSERT_EQ(int_num, 0U); - - ASSERT_OK(Put("1000000000000foo", "v1")); - ASSERT_OK(Put("0000000000000bar", "v2")); - ASSERT_OK(Put("1000000000000foo", "v3")); - ASSERT_OK(dbfull()->TEST_FlushMemTable()); - - ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.estimate-table-readers-mem", &int_num)); - ASSERT_GT(int_num, 0U); - - TablePropertiesCollection ptc; - ASSERT_OK( - reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc)); - ASSERT_EQ(1U, ptc.size()); - auto row = ptc.begin(); - auto tp = row->second; - - if (full_scan_mode) { - // Does not support Get/Seek - std::unique_ptr iter(dbfull()->NewIterator(ReadOptions())); - iter->SeekToFirst(); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("0000000000000bar", iter->key().ToString()); - ASSERT_EQ("v2", iter->value().ToString()); - iter->Next(); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000000foo", iter->key().ToString()); - ASSERT_EQ("v3", iter->value().ToString()); - iter->Next(); - ASSERT_TRUE(!iter->Valid()); - ASSERT_TRUE(iter->status().ok()); - } else { - if (!store_index_in_file) { - ASSERT_EQ(total_order ? "4" : "12", - (tp->user_collected_properties) - .at("plain_table_hash_table_size")); - ASSERT_EQ("0", (tp->user_collected_properties) - .at("plain_table_sub_index_size")); + for (int bloom = -1; bloom <= 117; bloom += 117) { + const int bloom_bits = std::max(bloom, 0); + const bool full_scan_mode = bloom < 0; + for (int total_order = 0; total_order <= 1; total_order++) { + for (int store_index_in_file = 0; store_index_in_file <= 1; + ++store_index_in_file) { + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + if (total_order) { + options.prefix_extractor.reset(); + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 0; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + plain_table_options.full_scan_mode = full_scan_mode; + plain_table_options.store_index_in_file = store_index_in_file; + + options.table_factory.reset( + NewPlainTableFactory(plain_table_options)); + } else { + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 0; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0.75; + plain_table_options.index_sparseness = 16; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + plain_table_options.full_scan_mode = full_scan_mode; + plain_table_options.store_index_in_file = store_index_in_file; + + options.table_factory.reset( + NewPlainTableFactory(plain_table_options)); + } + DestroyAndReopen(&options); + uint64_t int_num; + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_EQ(int_num, 0U); + + ASSERT_OK(Put("1000000000000foo", "v1")); + ASSERT_OK(Put("0000000000000bar", "v2")); + ASSERT_OK(Put("1000000000000foo", "v3")); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_GT(int_num, 0U); + + TablePropertiesCollection ptc; + ASSERT_OK(reinterpret_cast(dbfull())->GetPropertiesOfAllTables( + &ptc)); + ASSERT_EQ(1U, ptc.size()); + auto row = ptc.begin(); + auto tp = row->second; + + if (full_scan_mode) { + // Does not support Get/Seek + std::unique_ptr iter( + dbfull()->NewIterator(ReadOptions())); + iter->SeekToFirst(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("0000000000000bar", iter->key().ToString()); + ASSERT_EQ("v2", iter->value().ToString()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000000foo", iter->key().ToString()); + ASSERT_EQ("v3", iter->value().ToString()); + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + ASSERT_TRUE(iter->status().ok()); } else { - ASSERT_EQ("0", (tp->user_collected_properties) - .at("plain_table_hash_table_size")); - ASSERT_EQ("0", (tp->user_collected_properties) - .at("plain_table_sub_index_size")); + if (!store_index_in_file) { + ASSERT_EQ(total_order ? "4" : "12", + (tp->user_collected_properties) + .at("plain_table_hash_table_size")); + ASSERT_EQ("0", (tp->user_collected_properties) + .at("plain_table_sub_index_size")); + } else { + ASSERT_EQ("0", (tp->user_collected_properties) + .at("plain_table_hash_table_size")); + ASSERT_EQ("0", (tp->user_collected_properties) + .at("plain_table_sub_index_size")); + } + ASSERT_EQ("v3", Get("1000000000000foo")); + ASSERT_EQ("v2", Get("0000000000000bar")); } - ASSERT_EQ("v3", Get("1000000000000foo")); - ASSERT_EQ("v2", Get("0000000000000bar")); } } - } } } } @@ -550,79 +545,79 @@ TEST_P(PlainTableDBTest, Flush2) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (EncodingType encoding_type : {kPlain, kPrefix}) { - for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) { - for (int total_order = 0; total_order <= 1; total_order++) { - for (int store_index_in_file = 0; store_index_in_file <= 1; - ++store_index_in_file) { - if (encoding_type == kPrefix && total_order) { - continue; - } - if (!bloom_bits && store_index_in_file) { - continue; - } - if (total_order && store_index_in_file) { - continue; - } - bool expect_bloom_not_match = false; - Options options = CurrentOptions(); - options.create_if_missing = true; - // Set only one bucket to force bucket conflict. - // Test index interval for the same prefix to be 1, 2 and 4 - PlainTableOptions plain_table_options; - if (total_order) { - options.prefix_extractor = nullptr; - plain_table_options.hash_table_ratio = 0; - plain_table_options.index_sparseness = 2; - } else { - plain_table_options.hash_table_ratio = 0.75; - plain_table_options.index_sparseness = 16; - } - plain_table_options.user_key_len = kPlainTableVariableLength; - plain_table_options.bloom_bits_per_key = bloom_bits; - plain_table_options.huge_page_tlb_size = huge_page_tlb_size; - plain_table_options.encoding_type = encoding_type; - plain_table_options.store_index_in_file = store_index_in_file; - options.table_factory.reset(new TestPlainTableFactory( - &expect_bloom_not_match, plain_table_options, - 0 /* column_family_id */, kDefaultColumnFamilyName)); - - DestroyAndReopen(&options); - ASSERT_OK(Put("0000000000000bar", "b")); - ASSERT_OK(Put("1000000000000foo", "v1")); - ASSERT_OK(dbfull()->TEST_FlushMemTable()); - - ASSERT_OK(Put("1000000000000foo", "v2")); - ASSERT_OK(dbfull()->TEST_FlushMemTable()); - ASSERT_EQ("v2", Get("1000000000000foo")); - - ASSERT_OK(Put("0000000000000eee", "v3")); - ASSERT_OK(dbfull()->TEST_FlushMemTable()); - ASSERT_EQ("v3", Get("0000000000000eee")); - - ASSERT_OK(Delete("0000000000000bar")); - ASSERT_OK(dbfull()->TEST_FlushMemTable()); - ASSERT_EQ("NOT_FOUND", Get("0000000000000bar")); - - ASSERT_OK(Put("0000000000000eee", "v5")); - ASSERT_OK(Put("9000000000000eee", "v5")); - ASSERT_OK(dbfull()->TEST_FlushMemTable()); - ASSERT_EQ("v5", Get("0000000000000eee")); - - // Test Bloom Filter - if (bloom_bits > 0) { - // Neither key nor value should exist. - expect_bloom_not_match = true; - ASSERT_EQ("NOT_FOUND", Get("5_not00000000bar")); - // Key doesn't exist any more but prefix exists. - if (total_order) { - ASSERT_EQ("NOT_FOUND", Get("1000000000000not")); - ASSERT_EQ("NOT_FOUND", Get("0000000000000not")); + for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) { + for (int total_order = 0; total_order <= 1; total_order++) { + for (int store_index_in_file = 0; store_index_in_file <= 1; + ++store_index_in_file) { + if (encoding_type == kPrefix && total_order) { + continue; + } + if (!bloom_bits && store_index_in_file) { + continue; + } + if (total_order && store_index_in_file) { + continue; + } + bool expect_bloom_not_match = false; + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + PlainTableOptions plain_table_options; + if (total_order) { + options.prefix_extractor = nullptr; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2; + } else { + plain_table_options.hash_table_ratio = 0.75; + plain_table_options.index_sparseness = 16; + } + plain_table_options.user_key_len = kPlainTableVariableLength; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + plain_table_options.store_index_in_file = store_index_in_file; + options.table_factory.reset(new TestPlainTableFactory( + &expect_bloom_not_match, plain_table_options, + 0 /* column_family_id */, kDefaultColumnFamilyName)); + + DestroyAndReopen(&options); + ASSERT_OK(Put("0000000000000bar", "b")); + ASSERT_OK(Put("1000000000000foo", "v1")); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + + ASSERT_OK(Put("1000000000000foo", "v2")); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + ASSERT_EQ("v2", Get("1000000000000foo")); + + ASSERT_OK(Put("0000000000000eee", "v3")); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + ASSERT_EQ("v3", Get("0000000000000eee")); + + ASSERT_OK(Delete("0000000000000bar")); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + ASSERT_EQ("NOT_FOUND", Get("0000000000000bar")); + + ASSERT_OK(Put("0000000000000eee", "v5")); + ASSERT_OK(Put("9000000000000eee", "v5")); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + ASSERT_EQ("v5", Get("0000000000000eee")); + + // Test Bloom Filter + if (bloom_bits > 0) { + // Neither key nor value should exist. + expect_bloom_not_match = true; + ASSERT_EQ("NOT_FOUND", Get("5_not00000000bar")); + // Key doesn't exist any more but prefix exists. + if (total_order) { + ASSERT_EQ("NOT_FOUND", Get("1000000000000not")); + ASSERT_EQ("NOT_FOUND", Get("0000000000000not")); + } + expect_bloom_not_match = false; + } } - expect_bloom_not_match = false; } } - } - } } } } @@ -675,129 +670,129 @@ TEST_P(PlainTableDBTest, Iterator) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (EncodingType encoding_type : {kPlain, kPrefix}) { - for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) { - for (int total_order = 0; total_order <= 1; total_order++) { - if (encoding_type == kPrefix && total_order == 1) { - continue; - } - bool expect_bloom_not_match = false; - Options options = CurrentOptions(); - options.create_if_missing = true; - // Set only one bucket to force bucket conflict. - // Test index interval for the same prefix to be 1, 2 and 4 - if (total_order) { - options.prefix_extractor = nullptr; - - PlainTableOptions plain_table_options; - plain_table_options.user_key_len = 16; - plain_table_options.bloom_bits_per_key = bloom_bits; - plain_table_options.hash_table_ratio = 0; - plain_table_options.index_sparseness = 2; - plain_table_options.huge_page_tlb_size = huge_page_tlb_size; - plain_table_options.encoding_type = encoding_type; - - options.table_factory.reset(new TestPlainTableFactory( - &expect_bloom_not_match, plain_table_options, - 0 /* column_family_id */, kDefaultColumnFamilyName)); - } else { - PlainTableOptions plain_table_options; - plain_table_options.user_key_len = 16; - plain_table_options.bloom_bits_per_key = bloom_bits; - plain_table_options.hash_table_ratio = 0.75; - plain_table_options.index_sparseness = 16; - plain_table_options.huge_page_tlb_size = huge_page_tlb_size; - plain_table_options.encoding_type = encoding_type; - - options.table_factory.reset(new TestPlainTableFactory( - &expect_bloom_not_match, plain_table_options, - 0 /* column_family_id */, kDefaultColumnFamilyName)); - } - DestroyAndReopen(&options); - - ASSERT_OK(Put("1000000000foo002", "v_2")); - ASSERT_OK(Put("0000000000000bar", "random")); - ASSERT_OK(Put("1000000000foo001", "v1")); - ASSERT_OK(Put("3000000000000bar", "bar_v")); - ASSERT_OK(Put("1000000000foo003", "v__3")); - ASSERT_OK(Put("1000000000foo004", "v__4")); - ASSERT_OK(Put("1000000000foo005", "v__5")); - ASSERT_OK(Put("1000000000foo007", "v__7")); - ASSERT_OK(Put("1000000000foo008", "v__8")); - ASSERT_OK(dbfull()->TEST_FlushMemTable()); - ASSERT_EQ("v1", Get("1000000000foo001")); - ASSERT_EQ("v__3", Get("1000000000foo003")); - Iterator* iter = dbfull()->NewIterator(ReadOptions()); - iter->Seek("1000000000foo000"); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo001", iter->key().ToString()); - ASSERT_EQ("v1", iter->value().ToString()); - - iter->Next(); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo002", iter->key().ToString()); - ASSERT_EQ("v_2", iter->value().ToString()); - - iter->Next(); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo003", iter->key().ToString()); - ASSERT_EQ("v__3", iter->value().ToString()); - - iter->Next(); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo004", iter->key().ToString()); - ASSERT_EQ("v__4", iter->value().ToString()); - - iter->Seek("3000000000000bar"); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("3000000000000bar", iter->key().ToString()); - ASSERT_EQ("bar_v", iter->value().ToString()); - - iter->Seek("1000000000foo000"); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo001", iter->key().ToString()); - ASSERT_EQ("v1", iter->value().ToString()); - - iter->Seek("1000000000foo005"); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo005", iter->key().ToString()); - ASSERT_EQ("v__5", iter->value().ToString()); - - iter->Seek("1000000000foo006"); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo007", iter->key().ToString()); - ASSERT_EQ("v__7", iter->value().ToString()); - - iter->Seek("1000000000foo008"); - ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("1000000000foo008", iter->key().ToString()); - ASSERT_EQ("v__8", iter->value().ToString()); - - if (total_order == 0) { - iter->Seek("1000000000foo009"); + for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) { + for (int total_order = 0; total_order <= 1; total_order++) { + if (encoding_type == kPrefix && total_order == 1) { + continue; + } + bool expect_bloom_not_match = false; + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + if (total_order) { + options.prefix_extractor = nullptr; + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + + options.table_factory.reset(new TestPlainTableFactory( + &expect_bloom_not_match, plain_table_options, + 0 /* column_family_id */, kDefaultColumnFamilyName)); + } else { + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0.75; + plain_table_options.index_sparseness = 16; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + + options.table_factory.reset(new TestPlainTableFactory( + &expect_bloom_not_match, plain_table_options, + 0 /* column_family_id */, kDefaultColumnFamilyName)); + } + DestroyAndReopen(&options); + + ASSERT_OK(Put("1000000000foo002", "v_2")); + ASSERT_OK(Put("0000000000000bar", "random")); + ASSERT_OK(Put("1000000000foo001", "v1")); + ASSERT_OK(Put("3000000000000bar", "bar_v")); + ASSERT_OK(Put("1000000000foo003", "v__3")); + ASSERT_OK(Put("1000000000foo004", "v__4")); + ASSERT_OK(Put("1000000000foo005", "v__5")); + ASSERT_OK(Put("1000000000foo007", "v__7")); + ASSERT_OK(Put("1000000000foo008", "v__8")); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + ASSERT_EQ("v1", Get("1000000000foo001")); + ASSERT_EQ("v__3", Get("1000000000foo003")); + Iterator* iter = dbfull()->NewIterator(ReadOptions()); + iter->Seek("1000000000foo000"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo001", iter->key().ToString()); + ASSERT_EQ("v1", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo002", iter->key().ToString()); + ASSERT_EQ("v_2", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo003", iter->key().ToString()); + ASSERT_EQ("v__3", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo004", iter->key().ToString()); + ASSERT_EQ("v__4", iter->value().ToString()); + + iter->Seek("3000000000000bar"); ASSERT_TRUE(iter->Valid()); ASSERT_EQ("3000000000000bar", iter->key().ToString()); - } + ASSERT_EQ("bar_v", iter->value().ToString()); - // Test Bloom Filter - if (bloom_bits > 0) { - if (!total_order) { - // Neither key nor value should exist. - expect_bloom_not_match = true; - iter->Seek("2not000000000bar"); - ASSERT_TRUE(!iter->Valid()); - ASSERT_EQ("NOT_FOUND", Get("2not000000000bar")); - expect_bloom_not_match = false; - } else { - expect_bloom_not_match = true; - ASSERT_EQ("NOT_FOUND", Get("2not000000000bar")); - expect_bloom_not_match = false; + iter->Seek("1000000000foo000"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo001", iter->key().ToString()); + ASSERT_EQ("v1", iter->value().ToString()); + + iter->Seek("1000000000foo005"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo005", iter->key().ToString()); + ASSERT_EQ("v__5", iter->value().ToString()); + + iter->Seek("1000000000foo006"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo007", iter->key().ToString()); + ASSERT_EQ("v__7", iter->value().ToString()); + + iter->Seek("1000000000foo008"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo008", iter->key().ToString()); + ASSERT_EQ("v__8", iter->value().ToString()); + + if (total_order == 0) { + iter->Seek("1000000000foo009"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("3000000000000bar", iter->key().ToString()); + } + + // Test Bloom Filter + if (bloom_bits > 0) { + if (!total_order) { + // Neither key nor value should exist. + expect_bloom_not_match = true; + iter->Seek("2not000000000bar"); + ASSERT_TRUE(!iter->Valid()); + ASSERT_EQ("NOT_FOUND", Get("2not000000000bar")); + expect_bloom_not_match = false; + } else { + expect_bloom_not_match = true; + ASSERT_EQ("NOT_FOUND", Get("2not000000000bar")); + expect_bloom_not_match = false; + } } + ASSERT_OK(iter->status()); + delete iter; } - ASSERT_OK(iter->status()); - delete iter; } } - } } } @@ -863,7 +858,7 @@ namespace { std::string MakeLongKey(size_t length, char c) { return std::string(length, c); } -} // namespace +} // anonymous namespace TEST_P(PlainTableDBTest, IteratorLargeKeys) { Options options = CurrentOptions(); @@ -878,15 +873,10 @@ TEST_P(PlainTableDBTest, IteratorLargeKeys) { options.prefix_extractor.reset(); DestroyAndReopen(&options); - std::string key_list[] = { - MakeLongKey(30, '0'), - MakeLongKey(16, '1'), - MakeLongKey(32, '2'), - MakeLongKey(60, '3'), - MakeLongKey(90, '4'), - MakeLongKey(50, '5'), - MakeLongKey(26, '6') - }; + std::string key_list[] = {MakeLongKey(30, '0'), MakeLongKey(16, '1'), + MakeLongKey(32, '2'), MakeLongKey(60, '3'), + MakeLongKey(90, '4'), MakeLongKey(50, '5'), + MakeLongKey(26, '6')}; for (size_t i = 0; i < 7; i++) { ASSERT_OK(Put(key_list[i], std::to_string(i))); @@ -913,7 +903,7 @@ namespace { std::string MakeLongKeyWithPrefix(size_t length, char c) { return "00000000" + std::string(length - 8, c); } -} // namespace +} // anonymous namespace TEST_P(PlainTableDBTest, IteratorLargeKeysWithPrefix) { Options options = CurrentOptions(); @@ -1275,7 +1265,7 @@ TEST_P(PlainTableDBTest, CompactionTrigger) { Random rnd(301); for (int num = 0; num < options.level0_file_num_compaction_trigger - 1; - num++) { + num++) { std::vector values; // Write 120KB (10 values, each 12K) for (int i = 0; i < 10; i++) { @@ -1287,7 +1277,7 @@ TEST_P(PlainTableDBTest, CompactionTrigger) { ASSERT_EQ(NumTableFilesAtLevel(0), num + 1); } - //generate one more file in level-0, and should trigger level-0 compaction + // generate one more file in level-0, and should trigger level-0 compaction std::vector values; for (int i = 0; i < 12; i++) { values.push_back(rnd.RandomString(10000)); @@ -1315,8 +1305,7 @@ TEST_P(PlainTableDBTest, AdaptiveTable) { options.create_if_missing = false; std::shared_ptr block_based_factory( NewBlockBasedTableFactory()); - std::shared_ptr plain_table_factory( - NewPlainTableFactory()); + std::shared_ptr plain_table_factory(NewPlainTableFactory()); std::shared_ptr dummy_factory; options.table_factory.reset(NewAdaptiveTableFactory( block_based_factory, block_based_factory, plain_table_factory)); diff --git a/db/prefix_test.cc b/db/prefix_test.cc index 74f0ce6be..8592b8f31 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -69,7 +69,7 @@ struct TestKey { }; // return a slice backed by test_key -inline Slice TestKeyToSlice(std::string &s, const TestKey& test_key) { +inline Slice TestKeyToSlice(std::string& s, const TestKey& test_key) { s.clear(); PutFixed64(&s, test_key.prefix); PutFixed64(&s, test_key.sorted); @@ -77,20 +77,18 @@ inline Slice TestKeyToSlice(std::string &s, const TestKey& test_key) { } inline const TestKey SliceToTestKey(const Slice& slice) { - return TestKey(DecodeFixed64(slice.data()), - DecodeFixed64(slice.data() + 8)); + return TestKey(DecodeFixed64(slice.data()), DecodeFixed64(slice.data() + 8)); } class TestKeyComparator : public Comparator { public: - // Compare needs to be aware of the possibility of a and/or b is // prefix only int Compare(const Slice& a, const Slice& b) const override { const TestKey kkey_a = SliceToTestKey(a); const TestKey kkey_b = SliceToTestKey(b); - const TestKey *key_a = &kkey_a; - const TestKey *key_b = &kkey_b; + const TestKey* key_a = &kkey_a; + const TestKey* key_b = &kkey_b; if (key_a->prefix != key_b->prefix) { if (key_a->prefix < key_b->prefix) return -1; if (key_a->prefix > key_b->prefix) return 1; @@ -215,7 +213,7 @@ class SamePrefixTransform : public SliceTransform { bool FullLengthEnabled(size_t* /*len*/) const override { return false; } }; -} // namespace +} // anonymous namespace class PrefixTest : public testing::Test { public: @@ -226,7 +224,7 @@ class PrefixTest : public testing::Test { options.write_buffer_size = FLAGS_write_buffer_size; options.max_write_buffer_number = FLAGS_max_write_buffer_number; options.min_write_buffer_number_to_merge = - FLAGS_min_write_buffer_number_to_merge; + FLAGS_min_write_buffer_number_to_merge; options.memtable_prefix_bloom_size_ratio = FLAGS_memtable_prefix_bloom_size_ratio; @@ -239,21 +237,19 @@ class PrefixTest : public testing::Test { options.table_factory.reset(NewBlockBasedTableFactory(bbto)); options.allow_concurrent_memtable_write = false; - Status s = DB::Open(options, kDbName, &db); + Status s = DB::Open(options, kDbName, &db); EXPECT_OK(s); return std::shared_ptr(db); } - void FirstOption() { - option_config_ = kBegin; - } + void FirstOption() { option_config_ = kBegin; } bool NextOptions(int bucket_count) { // skip some options option_config_++; if (option_config_ < kEnd) { options.prefix_extractor.reset(NewFixedPrefixTransform(8)); - switch(option_config_) { + switch (option_config_) { case kHashSkipList: options.memtable_factory.reset( NewHashSkipListRepFactory(bucket_count, FLAGS_skiplist_height)); @@ -350,8 +346,7 @@ TEST_F(PrefixTest, TestResult) { FirstOption(); while (NextOptions(num_buckets)) { std::cout << "*** Mem table: " << options.memtable_factory->Name() - << " number of buckets: " << num_buckets - << std::endl; + << " number of buckets: " << num_buckets << std::endl; ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; @@ -581,7 +576,7 @@ TEST_F(PrefixTest, PrefixValid) { TEST_F(PrefixTest, DynamicPrefixIterator) { while (NextOptions(FLAGS_bucket_count)) { std::cout << "*** Mem table: " << options.memtable_factory->Name() - << std::endl; + << std::endl; ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; @@ -600,7 +595,7 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { HistogramImpl hist_put_comparison; // insert x random prefix, each with y continuous element. for (auto prefix : prefixes) { - for (uint64_t sorted = 0; sorted < FLAGS_items_per_prefix; sorted++) { + for (uint64_t sorted = 0; sorted < FLAGS_items_per_prefix; sorted++) { TestKey test_key(prefix, sorted); std::string s; @@ -615,8 +610,9 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { } } - std::cout << "Put key comparison: \n" << hist_put_comparison.ToString() - << "Put time: \n" << hist_put_time.ToString(); + std::cout << "Put key comparison: \n" + << hist_put_comparison.ToString() << "Put time: \n" + << hist_put_time.ToString(); // test seek existing keys HistogramImpl hist_seek_time; @@ -635,8 +631,7 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { auto key_prefix = options.prefix_extractor->Transform(key); uint64_t total_keys = 0; for (iter->Seek(key); - iter->Valid() && iter->key().starts_with(key_prefix); - iter->Next()) { + iter->Valid() && iter->key().starts_with(key_prefix); iter->Next()) { if (FLAGS_trigger_deadlock) { std::cout << "Behold the deadlock!\n"; db->Delete(write_options, iter->key()); @@ -645,12 +640,12 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { } hist_seek_time.Add(timer.ElapsedNanos()); hist_seek_comparison.Add(get_perf_context()->user_key_comparison_count); - ASSERT_EQ(total_keys, FLAGS_items_per_prefix - FLAGS_items_per_prefix/2); + ASSERT_EQ(total_keys, + FLAGS_items_per_prefix - FLAGS_items_per_prefix / 2); } std::cout << "Seek key comparison: \n" - << hist_seek_comparison.ToString() - << "Seek time: \n" + << hist_seek_comparison.ToString() << "Seek time: \n" << hist_seek_time.ToString(); // test non-existing keys @@ -658,8 +653,7 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { HistogramImpl hist_no_seek_comparison; for (auto prefix = FLAGS_total_prefixes; - prefix < FLAGS_total_prefixes + 10000; - prefix++) { + prefix < FLAGS_total_prefixes + 10000; prefix++) { TestKey test_key(prefix, 0); std::string s; Slice key = TestKeyToSlice(s, test_key); @@ -668,7 +662,8 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { StopWatchNano timer(SystemClock::Default().get(), true); iter->Seek(key); hist_no_seek_time.Add(timer.ElapsedNanos()); - hist_no_seek_comparison.Add(get_perf_context()->user_key_comparison_count); + hist_no_seek_comparison.Add( + get_perf_context()->user_key_comparison_count); ASSERT_TRUE(!iter->Valid()); ASSERT_OK(iter->status()); } diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index b45d5b4d4..c03efa11f 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -502,7 +502,7 @@ class TruncatedRangeDelMergingIter : public InternalIterator { size_t ts_sz_; }; -} // namespace +} // anonymous namespace std::unique_ptr CompactionRangeDelAggregator::NewIterator(const Slice* lower_bound, diff --git a/db/range_del_aggregator_test.cc b/db/range_del_aggregator_test.cc index 3541fafd1..7fe35276a 100644 --- a/db/range_del_aggregator_test.cc +++ b/db/range_del_aggregator_test.cc @@ -192,7 +192,7 @@ void VerifyFragmentedRangeDels( EXPECT_FALSE(iter->Valid()); } -} // namespace +} // anonymous namespace TEST_F(RangeDelAggregatorTest, EmptyTruncatedIter) { auto range_del_iter = MakeRangeDelIter({}); diff --git a/db/repair.cc b/db/repair.cc index 34da5ba05..1829a79f2 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -281,7 +281,7 @@ class Repairer { std::vector to_search_paths; for (size_t path_id = 0; path_id < db_options_.db_paths.size(); path_id++) { - to_search_paths.push_back(db_options_.db_paths[path_id].path); + to_search_paths.push_back(db_options_.db_paths[path_id].path); } // search wal_dir if user uses a customize wal_dir @@ -332,7 +332,8 @@ class Repairer { void ConvertLogFilesToTables() { const auto& wal_dir = immutable_db_options_.GetWalDir(); for (size_t i = 0; i < logs_.size(); i++) { - // we should use LogFileName(wal_dir, logs_[i]) here. user might uses wal_dir option. + // we should use LogFileName(wal_dir, logs_[i]) here. user might uses + // wal_dir option. std::string logname = LogFileName(wal_dir, logs_[i]); Status status = ConvertLogToTable(wal_dir, logs_[i]); if (!status.ok()) { @@ -393,8 +394,8 @@ class Repairer { int counter = 0; while (reader.ReadRecord(&record, &scratch)) { if (record.size() < WriteBatchInternal::kHeader) { - reporter.Corruption( - record.size(), Status::Corruption("log record too small")); + reporter.Corruption(record.size(), + Status::Corruption("log record too small")); continue; } Status record_status = WriteBatchInternal::SetContents(&batch, record); @@ -715,8 +716,7 @@ Status GetDefaultCFOptions( } // anonymous namespace Status RepairDB(const std::string& dbname, const DBOptions& db_options, - const std::vector& column_families - ) { + const std::vector& column_families) { ColumnFamilyOptions default_cf_opts; Status status = GetDefaultCFOptions(column_families, &default_cf_opts); if (!status.ok()) { @@ -756,8 +756,7 @@ Status RepairDB(const std::string& dbname, const Options& options) { DBOptions db_options(opts); ColumnFamilyOptions cf_options(opts); - Repairer repairer(dbname, db_options, - {}, cf_options /* default_cf_opts */, + Repairer repairer(dbname, db_options, {}, cf_options /* default_cf_opts */, cf_options /* unknown_cf_opts */, true /* create_unknown_cfs */); Status status = repairer.Run(); diff --git a/db/repair_test.cc b/db/repair_test.cc index b93f1f951..644a9270d 100644 --- a/db/repair_test.cc +++ b/db/repair_test.cc @@ -279,7 +279,7 @@ TEST_F(RepairTest, SeparateWalDir) { ASSERT_EQ(total_ssts_size, 0); } std::string manifest_path = - DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); Close(); ASSERT_OK(env_->FileExists(manifest_path)); @@ -301,7 +301,7 @@ TEST_F(RepairTest, SeparateWalDir) { ASSERT_EQ(Get("key"), "val"); ASSERT_EQ(Get("foo"), "bar"); - } while(ChangeWalOptions()); + } while (ChangeWalOptions()); } TEST_F(RepairTest, RepairMultipleColumnFamilies) { @@ -387,8 +387,7 @@ TEST_F(RepairTest, RepairColumnFamilyOptions) { ASSERT_EQ(fname_to_props.size(), 2U); for (const auto& fname_and_props : fname_to_props) { std::string comparator_name(rev_opts.comparator->Name()); - ASSERT_EQ(comparator_name, - fname_and_props.second->comparator_name); + ASSERT_EQ(comparator_name, fname_and_props.second->comparator_name); } Close(); diff --git a/db/snapshot_impl.cc b/db/snapshot_impl.cc index b9228c797..98b475463 100644 --- a/db/snapshot_impl.cc +++ b/db/snapshot_impl.cc @@ -3,14 +3,13 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#include "rocksdb/snapshot.h" - #include "rocksdb/db.h" +#include "rocksdb/snapshot.h" namespace ROCKSDB_NAMESPACE { -ManagedSnapshot::ManagedSnapshot(DB* db) : db_(db), - snapshot_(db->GetSnapshot()) {} +ManagedSnapshot::ManagedSnapshot(DB* db) + : db_(db), snapshot_(db->GetSnapshot()) {} ManagedSnapshot::ManagedSnapshot(DB* db, const Snapshot* _snapshot) : db_(db), snapshot_(_snapshot) {} @@ -21,6 +20,6 @@ ManagedSnapshot::~ManagedSnapshot() { } } -const Snapshot* ManagedSnapshot::snapshot() { return snapshot_;} +const Snapshot* ManagedSnapshot::snapshot() { return snapshot_; } } // namespace ROCKSDB_NAMESPACE diff --git a/db/snapshot_impl.h b/db/snapshot_impl.h index 59f491615..23e5e98cd 100644 --- a/db/snapshot_impl.h +++ b/db/snapshot_impl.h @@ -41,7 +41,7 @@ class SnapshotImpl : public Snapshot { SnapshotImpl* prev_; SnapshotImpl* next_; - SnapshotList* list_; // just for sanity checks + SnapshotList* list_; // just for sanity checks int64_t unix_time_; @@ -56,7 +56,7 @@ class SnapshotList { SnapshotList() { list_.prev_ = &list_; list_.next_ = &list_; - list_.number_ = 0xFFFFFFFFL; // placeholder marker, for debugging + list_.number_ = 0xFFFFFFFFL; // placeholder marker, for debugging // Set all the variables to make UBSAN happy. list_.list_ = nullptr; list_.unix_time_ = 0; @@ -72,8 +72,14 @@ class SnapshotList { assert(list_.next_ != &list_ || 0 == count_); return list_.next_ == &list_; } - SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; } - SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; } + SnapshotImpl* oldest() const { + assert(!empty()); + return list_.next_; + } + SnapshotImpl* newest() const { + assert(!empty()); + return list_.prev_; + } SnapshotImpl* New(SnapshotImpl* s, SequenceNumber seq, uint64_t unix_time, bool is_write_conflict_boundary, diff --git a/db/table_cache.cc b/db/table_cache.cc index c44578f8b..a9ea14348 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -38,7 +38,7 @@ static void DeleteEntry(const Slice& /*key*/, void* value) { T* typed_value = reinterpret_cast(value); delete typed_value; } -} // namespace +} // anonymous namespace } // namespace ROCKSDB_NAMESPACE // Generate the regular and coroutine versions of some methods by @@ -79,7 +79,7 @@ void AppendVarint64(IterKey* key, uint64_t v) { #endif // ROCKSDB_LITE -} // namespace +} // anonymous namespace const int kLoadConcurency = 128; @@ -103,8 +103,7 @@ TableCache::TableCache(const ImmutableOptions& ioptions, } } -TableCache::~TableCache() { -} +TableCache::~TableCache() {} TableReader* TableCache::GetTableReaderFromHandle(Cache::Handle* handle) { return reinterpret_cast(cache_->Value(handle)); diff --git a/db/table_properties_collector.cc b/db/table_properties_collector.cc index 591c1d04a..edb9a1b63 100644 --- a/db/table_properties_collector.cc +++ b/db/table_properties_collector.cc @@ -27,7 +27,7 @@ uint64_t GetUint64Property(const UserCollectedProperties& props, return GetVarint64(&raw, &val) ? val : 0; } -} // namespace +} // anonymous namespace Status UserKeyTablePropertiesCollector::InternalAdd(const Slice& key, const Slice& value, @@ -54,13 +54,12 @@ Status UserKeyTablePropertiesCollector::Finish( return collector_->Finish(properties); } -UserCollectedProperties -UserKeyTablePropertiesCollector::GetReadableProperties() const { +UserCollectedProperties UserKeyTablePropertiesCollector::GetReadableProperties() + const { return collector_->GetReadableProperties(); } -uint64_t GetDeletedKeys( - const UserCollectedProperties& props) { +uint64_t GetDeletedKeys(const UserCollectedProperties& props) { bool property_present_ignored; return GetUint64Property(props, TablePropertiesNames::kDeletedKeys, &property_present_ignored); @@ -68,8 +67,8 @@ uint64_t GetDeletedKeys( uint64_t GetMergeOperands(const UserCollectedProperties& props, bool* property_present) { - return GetUint64Property( - props, TablePropertiesNames::kMergeOperands, property_present); + return GetUint64Property(props, TablePropertiesNames::kMergeOperands, + property_present); } } // namespace ROCKSDB_NAMESPACE diff --git a/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc index 4098677b1..5f0f205da 100644 --- a/db/table_properties_collector_test.cc +++ b/db/table_properties_collector_test.cc @@ -61,30 +61,30 @@ void MakeBuilder( } // namespace // Collects keys that starts with "A" in a table. -class RegularKeysStartWithA: public TablePropertiesCollector { +class RegularKeysStartWithA : public TablePropertiesCollector { public: const char* Name() const override { return "RegularKeysStartWithA"; } Status Finish(UserCollectedProperties* properties) override { - std::string encoded; - std::string encoded_num_puts; - std::string encoded_num_deletes; - std::string encoded_num_single_deletes; - std::string encoded_num_size_changes; - PutVarint32(&encoded, count_); - PutVarint32(&encoded_num_puts, num_puts_); - PutVarint32(&encoded_num_deletes, num_deletes_); - PutVarint32(&encoded_num_single_deletes, num_single_deletes_); - PutVarint32(&encoded_num_size_changes, num_size_changes_); - *properties = UserCollectedProperties{ - {"TablePropertiesTest", message_}, - {"Count", encoded}, - {"NumPuts", encoded_num_puts}, - {"NumDeletes", encoded_num_deletes}, - {"NumSingleDeletes", encoded_num_single_deletes}, - {"NumSizeChanges", encoded_num_size_changes}, - }; - return Status::OK(); + std::string encoded; + std::string encoded_num_puts; + std::string encoded_num_deletes; + std::string encoded_num_single_deletes; + std::string encoded_num_size_changes; + PutVarint32(&encoded, count_); + PutVarint32(&encoded_num_puts, num_puts_); + PutVarint32(&encoded_num_deletes, num_deletes_); + PutVarint32(&encoded_num_single_deletes, num_single_deletes_); + PutVarint32(&encoded_num_size_changes, num_size_changes_); + *properties = UserCollectedProperties{ + {"TablePropertiesTest", message_}, + {"Count", encoded}, + {"NumPuts", encoded_num_puts}, + {"NumDeletes", encoded_num_deletes}, + {"NumSingleDeletes", encoded_num_single_deletes}, + {"NumSizeChanges", encoded_num_size_changes}, + }; + return Status::OK(); } Status AddUserKey(const Slice& user_key, const Slice& /*value*/, @@ -338,7 +338,7 @@ void TestCustomizedTablePropertiesCollector( TEST_P(TablePropertiesTest, CustomizedTablePropertiesCollector) { // Test properties collectors with internal keys or regular keys // for block based table - for (bool encode_as_internal : { true, false }) { + for (bool encode_as_internal : {true, false}) { Options options; BlockBasedTableOptions table_options; table_options.flush_block_policy_factory = @@ -404,7 +404,7 @@ void TestInternalKeyPropertiesCollector( // HACK: Set options.info_log to avoid writing log in // SanitizeOptions(). options.info_log = std::make_shared(); - options = SanitizeOptions("db", // just a place holder + options = SanitizeOptions("db", // just a place holder options); ImmutableOptions ioptions(options); GetIntTblPropCollectorFactory(ioptions, &int_tbl_prop_collector_factories); diff --git a/db/transaction_log_impl.cc b/db/transaction_log_impl.cc index 044adc2c5..3878b428a 100644 --- a/db/transaction_log_impl.cc +++ b/db/transaction_log_impl.cc @@ -41,7 +41,7 @@ TransactionLogIteratorImpl::TransactionLogIteratorImpl( current_status_.PermitUncheckedError(); // Clear on start reporter_.env = options_->env; reporter_.info_log = options_->info_log.get(); - SeekToStartSequence(); // Seek till starting sequence + SeekToStartSequence(); // Seek till starting sequence } Status TransactionLogIteratorImpl::OpenLogFile( @@ -62,8 +62,7 @@ Status TransactionLogIteratorImpl::OpenLogFile( // If cannot open file in DB directory. // Try the archive dir, as it could have moved in the meanwhile. fname = ArchivedLogFileName(dir_, log_file->LogNumber()); - s = fs->NewSequentialFile(fname, optimized_env_options, - &file, nullptr); + s = fs->NewSequentialFile(fname, optimized_env_options, &file, nullptr); } } if (s.ok()) { @@ -74,7 +73,7 @@ Status TransactionLogIteratorImpl::OpenLogFile( return s; } -BatchResult TransactionLogIteratorImpl::GetBatch() { +BatchResult TransactionLogIteratorImpl::GetBatch() { assert(is_valid_); // cannot call in a non valid state. BatchResult result; result.sequence = current_batch_seq_; @@ -124,8 +123,8 @@ void TransactionLogIteratorImpl::SeekToStartSequence(uint64_t start_file_index, } while (RestrictedRead(&record)) { if (record.size() < WriteBatchInternal::kHeader) { - reporter_.Corruption( - record.size(), Status::Corruption("very small log record")); + reporter_.Corruption(record.size(), + Status::Corruption("very small log record")); continue; } UpdateCurrentWriteBatch(record); @@ -137,11 +136,12 @@ void TransactionLogIteratorImpl::SeekToStartSequence(uint64_t start_file_index, reporter_.Info(current_status_.ToString().c_str()); return; } else if (strict) { - reporter_.Info("Could seek required sequence number. Iterator will " - "continue."); + reporter_.Info( + "Could seek required sequence number. Iterator will " + "continue."); } is_valid_ = true; - started_ = true; // set started_ as we could seek till starting sequence + started_ = true; // set started_ as we could seek till starting sequence return; } else { is_valid_ = false; @@ -182,15 +182,15 @@ void TransactionLogIteratorImpl::NextImpl(bool internal) { // Runs every time until we can seek to the start sequence SeekToStartSequence(); } - while(true) { + while (true) { assert(current_log_reader_); if (current_log_reader_->IsEOF()) { current_log_reader_->UnmarkEOF(); } while (RestrictedRead(&record)) { if (record.size() < WriteBatchInternal::kHeader) { - reporter_.Corruption( - record.size(), Status::Corruption("very small log record")); + reporter_.Corruption(record.size(), + Status::Corruption("very small log record")); continue; } else { // started_ should be true if called by application diff --git a/db/transaction_log_impl.h b/db/transaction_log_impl.h index 6ec7b14e1..e8c6efc02 100644 --- a/db/transaction_log_impl.h +++ b/db/transaction_log_impl.h @@ -23,12 +23,11 @@ namespace ROCKSDB_NAMESPACE { class LogFileImpl : public LogFile { public: LogFileImpl(uint64_t logNum, WalFileType logType, SequenceNumber startSeq, - uint64_t sizeBytes) : - logNumber_(logNum), - type_(logType), - startSequence_(startSeq), - sizeFileBytes_(sizeBytes) { - } + uint64_t sizeBytes) + : logNumber_(logNum), + type_(logType), + startSequence_(startSeq), + sizeFileBytes_(sizeBytes) {} std::string PathName() const override { if (type_ == kArchivedLogFile) { @@ -45,7 +44,7 @@ class LogFileImpl : public LogFile { uint64_t SizeFileBytes() const override { return sizeFileBytes_; } - bool operator < (const LogFile& that) const { + bool operator<(const LogFile& that) const { return LogNumber() < that.LogNumber(); } @@ -54,7 +53,6 @@ class LogFileImpl : public LogFile { WalFileType type_; SequenceNumber startSequence_; uint64_t sizeFileBytes_; - }; class TransactionLogIteratorImpl : public TransactionLogIterator { diff --git a/db/trim_history_scheduler.h b/db/trim_history_scheduler.h index b17f6170f..252802a7a 100644 --- a/db/trim_history_scheduler.h +++ b/db/trim_history_scheduler.h @@ -6,8 +6,10 @@ #pragma once #include + #include #include + #include "util/autovector.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/version_builder_test.cc b/db/version_builder_test.cc index a751b697f..ee5c3f2e3 100644 --- a/db/version_builder_test.cc +++ b/db/version_builder_test.cc @@ -1656,10 +1656,10 @@ TEST_F(VersionBuilderTest, CheckConsistencyForFileDeletedTwice) { UpdateVersionStorageInfo(&new_vstorage); VersionBuilder version_builder2(env_options, &ioptions_, table_cache, - &new_vstorage, version_set); + &new_vstorage, version_set); VersionStorageInfo new_vstorage2(&icmp_, ucmp_, options_.num_levels, - kCompactionStyleLevel, nullptr, - true /* force_consistency_checks */); + kCompactionStyleLevel, nullptr, + true /* force_consistency_checks */); ASSERT_NOK(version_builder2.Apply(&version_edit)); UnrefFilesInVersion(&new_vstorage); diff --git a/db/version_edit.cc b/db/version_edit.cc index fbf83f9db..c763d98e8 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -20,9 +20,7 @@ namespace ROCKSDB_NAMESPACE { -namespace { - -} // anonymous namespace +namespace {} // anonymous namespace uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id) { assert(number <= kFileNumberMask); @@ -501,8 +499,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) { break; case kCompactCursor: - if (GetLevel(&input, &level, &msg) && - GetInternalKey(&input, &key)) { + if (GetLevel(&input, &level, &msg) && GetInternalKey(&input, &key)) { // Here we re-use the output format of compact pointer in LevelDB // to persist compact_cursors_ compact_cursors_.push_back(std::make_pair(level, key)); diff --git a/db/version_edit.h b/db/version_edit.h index aba9c0957..c9800a3c0 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -114,7 +114,7 @@ struct FileDescriptor { // Table reader in table_reader_handle TableReader* table_reader; uint64_t packed_number_and_path_id; - uint64_t file_size; // File size in bytes + uint64_t file_size; // File size in bytes SequenceNumber smallest_seqno; // The smallest seqno in this file SequenceNumber largest_seqno; // The largest seqno in this file @@ -146,8 +146,8 @@ struct FileDescriptor { return packed_number_and_path_id & kFileNumberMask; } uint32_t GetPathId() const { - return static_cast( - packed_number_and_path_id / (kFileNumberMask + 1)); + return static_cast(packed_number_and_path_id / + (kFileNumberMask + 1)); } uint64_t GetFileSize() const { return file_size; } }; @@ -166,8 +166,8 @@ struct FileSampledStats { struct FileMetaData { FileDescriptor fd; - InternalKey smallest; // Smallest internal key served by table - InternalKey largest; // Largest internal key served by table + InternalKey smallest; // Smallest internal key served by table + InternalKey largest; // Largest internal key served by table // Needs to be disposed when refs becomes 0. Cache::Handle* table_reader_handle = nullptr; @@ -312,15 +312,11 @@ struct FileMetaData { struct FdWithKeyRange { FileDescriptor fd; FileMetaData* file_metadata; // Point to all metadata - Slice smallest_key; // slice that contain smallest key - Slice largest_key; // slice that contain largest key + Slice smallest_key; // slice that contain smallest key + Slice largest_key; // slice that contain largest key FdWithKeyRange() - : fd(), - file_metadata(nullptr), - smallest_key(), - largest_key() { - } + : fd(), file_metadata(nullptr), smallest_key(), largest_key() {} FdWithKeyRange(FileDescriptor _fd, Slice _smallest_key, Slice _largest_key, FileMetaData* _file_metadata) diff --git a/db/version_set.cc b/db/version_set.cc index 8e5438d8c..c27933deb 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -93,23 +93,19 @@ namespace { // Find File in LevelFilesBrief data structure // Within an index range defined by left and right int FindFileInRange(const InternalKeyComparator& icmp, - const LevelFilesBrief& file_level, - const Slice& key, - uint32_t left, - uint32_t right) { + const LevelFilesBrief& file_level, const Slice& key, + uint32_t left, uint32_t right) { auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool { return icmp.InternalKeyComparator::Compare(f.largest_key, k) < 0; }; - const auto &b = file_level.files; - return static_cast(std::lower_bound(b + left, - b + right, key, cmp) - b); + const auto& b = file_level.files; + return static_cast(std::lower_bound(b + left, b + right, key, cmp) - b); } Status OverlapWithIterator(const Comparator* ucmp, - const Slice& smallest_user_key, - const Slice& largest_user_key, - InternalIterator* iter, - bool* overlap) { + const Slice& smallest_user_key, + const Slice& largest_user_key, + InternalIterator* iter, bool* overlap) { InternalKey range_start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek); iter->Seek(range_start.Encode()); @@ -187,9 +183,9 @@ class FilePicker { // Do key range filtering of files or/and fractional cascading if: // (1) not all the files are in level 0, or // (2) there are more than 3 current level files - // If there are only 3 or less current level files in the system, we skip - // the key range filtering. In this case, more likely, the system is - // highly tuned to minimize number of tables queried by each query, + // If there are only 3 or less current level files in the system, we + // skip the key range filtering. In this case, more likely, the system + // is highly tuned to minimize number of tables queried by each query, // so it is unlikely that key range filtering is more efficient than // querying the files. if (num_levels_ > 1 || curr_file_level_->num_files > 3) { @@ -211,11 +207,9 @@ class FilePicker { // Setup file search bound for the next level based on the // comparison results if (curr_level_ > 0) { - file_indexer_->GetNextLevelIndex(curr_level_, - curr_index_in_curr_level_, - cmp_smallest, cmp_largest, - &search_left_bound_, - &search_right_bound_); + file_indexer_->GetNextLevelIndex( + curr_level_, curr_index_in_curr_level_, cmp_smallest, + cmp_largest, &search_left_bound_, &search_right_bound_); } // Key falls out of current file's range if (cmp_smallest < 0 || cmp_largest > 0) { @@ -846,22 +840,21 @@ Version::~Version() { } int FindFile(const InternalKeyComparator& icmp, - const LevelFilesBrief& file_level, - const Slice& key) { + const LevelFilesBrief& file_level, const Slice& key) { return FindFileInRange(icmp, file_level, key, 0, static_cast(file_level.num_files)); } void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level, - const std::vector& files, - Arena* arena) { + const std::vector& files, + Arena* arena) { assert(file_level); assert(arena); size_t num = files.size(); file_level->num_files = num; char* mem = arena->AllocateAligned(num * sizeof(FdWithKeyRange)); - file_level->files = new (mem)FdWithKeyRange[num]; + file_level->files = new (mem) FdWithKeyRange[num]; for (size_t i = 0; i < num; i++) { Slice smallest_key = files[i]->smallest.Encode(); @@ -882,28 +875,27 @@ void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level, } } -static bool AfterFile(const Comparator* ucmp, - const Slice* user_key, const FdWithKeyRange* f) { +static bool AfterFile(const Comparator* ucmp, const Slice* user_key, + const FdWithKeyRange* f) { // nullptr user_key occurs before all keys and is therefore never after *f return (user_key != nullptr && ucmp->CompareWithoutTimestamp(*user_key, ExtractUserKey(f->largest_key)) > 0); } -static bool BeforeFile(const Comparator* ucmp, - const Slice* user_key, const FdWithKeyRange* f) { +static bool BeforeFile(const Comparator* ucmp, const Slice* user_key, + const FdWithKeyRange* f) { // nullptr user_key occurs after all keys and is therefore never before *f return (user_key != nullptr && ucmp->CompareWithoutTimestamp(*user_key, ExtractUserKey(f->smallest_key)) < 0); } -bool SomeFileOverlapsRange( - const InternalKeyComparator& icmp, - bool disjoint_sorted_files, - const LevelFilesBrief& file_level, - const Slice* smallest_user_key, - const Slice* largest_user_key) { +bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, + bool disjoint_sorted_files, + const LevelFilesBrief& file_level, + const Slice* smallest_user_key, + const Slice* largest_user_key) { const Comparator* ucmp = icmp.user_comparator(); if (!disjoint_sorted_files) { // Need to check against all files @@ -1025,9 +1017,7 @@ class LevelIterator final : public InternalIterator { return file_iter_.iter() ? file_iter_.status() : Status::OK(); } - bool PrepareValue() override { - return file_iter_.PrepareValue(); - } + bool PrepareValue() override { return file_iter_.PrepareValue(); } inline bool MayBeOutOfLowerBound() override { assert(Valid()); @@ -1561,9 +1551,8 @@ Status Version::GetTableProperties(std::shared_ptr* tp, if (fname != nullptr) { file_name = *fname; } else { - file_name = - TableFileName(ioptions->cf_paths, file_meta->fd.GetNumber(), - file_meta->fd.GetPathId()); + file_name = TableFileName(ioptions->cf_paths, file_meta->fd.GetNumber(), + file_meta->fd.GetPathId()); } s = ioptions->fs->NewRandomAccessFile(file_name, file_options_, &file, nullptr); @@ -1690,8 +1679,8 @@ Status Version::GetPropertiesOfTablesInRange( false); for (const auto& file_meta : files) { auto fname = - TableFileName(cfd_->ioptions()->cf_paths, - file_meta->fd.GetNumber(), file_meta->fd.GetPathId()); + TableFileName(cfd_->ioptions()->cf_paths, file_meta->fd.GetNumber(), + file_meta->fd.GetPathId()); if (props->count(fname) == 0) { // 1. If the table is already present in table cache, load table // properties from there. @@ -1788,8 +1777,7 @@ void Version::GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta) { files.back().num_deletions = file->num_deletions; level_size += file->fd.GetFileSize(); } - cf_meta->levels.emplace_back( - level, level_size, std::move(files)); + cf_meta->levels.emplace_back(level, level_size, std::move(files)); cf_meta->size += level_size; } for (const auto& meta : vstorage->GetBlobFiles()) { @@ -1880,10 +1868,8 @@ uint64_t VersionStorageInfo::GetEstimatedActiveKeys() const { if (current_num_samples_ < file_count) { // casting to avoid overflowing - return - static_cast( - (est * static_cast(file_count) / current_num_samples_) - ); + return static_cast( + (est * static_cast(file_count) / current_num_samples_)); } else { return est; } @@ -2020,8 +2006,8 @@ Status Version::OverlapWithLevelIterator(const ReadOptions& read_options, /*smallest_compaction_key=*/nullptr, /*largest_compaction_key=*/nullptr, /*allow_unprepared_value=*/false)); - status = OverlapWithIterator( - ucmp, smallest_user_key, largest_user_key, iter.get(), overlap); + status = OverlapWithIterator(ucmp, smallest_user_key, largest_user_key, + iter.get(), overlap); if (!status.ok() || *overlap) { break; } @@ -2035,8 +2021,8 @@ Status Version::OverlapWithLevelIterator(const ReadOptions& read_options, cfd_->internal_stats()->GetFileReadHist(level), TableReaderCaller::kUserIterator, IsFilterSkipped(level), level, &range_del_agg)); - status = OverlapWithIterator( - ucmp, smallest_user_key, largest_user_key, iter.get(), overlap); + status = OverlapWithIterator(ucmp, smallest_user_key, largest_user_key, + iter.get(), overlap); } if (status.ok() && *overlap == false && @@ -2396,7 +2382,7 @@ void Version::Get(const ReadOptions& read_options, const LookupKey& k, return; } if (!merge_operator_) { - *status = Status::InvalidArgument( + *status = Status::InvalidArgument( "merge_operator is not properly initialized."); return; } @@ -2418,7 +2404,7 @@ void Version::Get(const ReadOptions& read_options, const LookupKey& k, if (key_exists != nullptr) { *key_exists = false; } - *status = Status::NotFound(); // Use an empty error message for speed + *status = Status::NotFound(); // Use an empty error message for speed } } @@ -2905,8 +2891,8 @@ bool Version::IsFilterSkipped(int level, bool is_file_last_in_level) { void VersionStorageInfo::GenerateLevelFilesBrief() { level_files_brief_.resize(num_non_empty_levels_); for (int level = 0; level < num_non_empty_levels_; level++) { - DoGenerateLevelFilesBrief( - &level_files_brief_[level], files_[level], &arena_); + DoGenerateLevelFilesBrief(&level_files_brief_[level], files_[level], + &arena_); } } @@ -2940,8 +2926,7 @@ void Version::PrepareAppend(const MutableCFOptions& mutable_cf_options, } bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) { - if (file_meta->init_stats_from_file || - file_meta->compensated_file_size > 0) { + if (file_meta->init_stats_from_file || file_meta->compensated_file_size > 0) { return false; } std::shared_ptr tp; @@ -3608,9 +3593,9 @@ struct Fsize { // In normal mode: descending size bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) { return (first.file->compensated_file_size > - second.file->compensated_file_size); + second.file->compensated_file_size); } -} // anonymous namespace +} // anonymous namespace void VersionStorageInfo::AddFile(int level, FileMetaData* f) { auto& level_files = files_[level]; @@ -3806,7 +3791,7 @@ void SortFileByRoundRobin(const InternalKeyComparator& icmp, } } } -} // namespace +} // anonymous namespace void VersionStorageInfo::UpdateFilesByCompactionPri( const ImmutableOptions& ioptions, const MutableCFOptions& options) { @@ -3978,9 +3963,7 @@ void VersionStorageInfo::ComputeBottommostFilesMarkedForCompaction() { } } -void Version::Ref() { - ++refs_; -} +void Version::Ref() { ++refs_; } bool Version::Unref() { assert(refs_ >= 1); @@ -4112,9 +4095,8 @@ void VersionStorageInfo::GetCleanInputsWithinInterval( return; } - GetOverlappingInputsRangeBinarySearch(level, begin, end, inputs, - hint_index, file_index, - true /* within_interval */); + GetOverlappingInputsRangeBinarySearch(level, begin, end, inputs, hint_index, + file_index, true /* within_interval */); } // Store in "*inputs" all files in "level" that overlap [begin,end] @@ -4277,8 +4259,7 @@ const char* VersionStorageInfo::LevelFileSummary(FileSummaryStorage* scratch, "#%" PRIu64 "(seq=%" PRIu64 ",sz=%s,%d) ", f->fd.GetNumber(), f->fd.smallest_seqno, sztxt, static_cast(f->being_compacted)); - if (ret < 0 || ret >= sz) - break; + if (ret < 0 || ret >= sz) break; len += ret; } // overwrite the last space (only if files_[level].size() is non-zero) @@ -4456,13 +4437,13 @@ uint64_t VersionStorageInfo::EstimateLiveDataSize() const { // no potential overlap, we can safely insert the rest of this level // (if the level is not 0) into the map without checking again because // the elements in the level are sorted and non-overlapping. - auto lb = (found_end && l != 0) ? - ranges.end() : ranges.lower_bound(&file->smallest); + auto lb = (found_end && l != 0) ? ranges.end() + : ranges.lower_bound(&file->smallest); found_end = (lb == ranges.end()); if (found_end || internal_comparator_->Compare( - file->largest, (*lb).second->smallest) < 0) { - ranges.emplace_hint(lb, &file->largest, file); - size += file->fd.file_size; + file->largest, (*lb).second->smallest) < 0) { + ranges.emplace_hint(lb, &file->largest, file); + size += file->fd.file_size; } } } @@ -5674,7 +5655,7 @@ std::string ManifestPicker::GetNextManifest(uint64_t* number, } return ret; } -} // namespace +} // anonymous namespace Status VersionSet::TryRecover( const std::vector& column_families, bool read_only, @@ -5876,7 +5857,7 @@ Status VersionSet::ReduceNumberOfLevels(const std::string& dbname, } } - delete[] vstorage -> files_; + delete[] vstorage->files_; vstorage->files_ = new_files_list; vstorage->num_levels_ = new_levels; vstorage->ResizeCompactCursors(new_levels); @@ -5885,9 +5866,9 @@ Status VersionSet::ReduceNumberOfLevels(const std::string& dbname, VersionEdit ve; InstrumentedMutex dummy_mutex; InstrumentedMutexLock l(&dummy_mutex); - return versions.LogAndApply( - versions.GetColumnFamilySet()->GetDefault(), - mutable_cf_options, &ve, &dummy_mutex, nullptr, true); + return versions.LogAndApply(versions.GetColumnFamilySet()->GetDefault(), + mutable_cf_options, &ve, &dummy_mutex, nullptr, + true); } // Get the checksum information including the checksum and checksum function @@ -5973,9 +5954,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname, std::unique_ptr file; const std::shared_ptr& fs = options.env->GetFileSystem(); s = fs->NewSequentialFile( - dscname, - fs->OptimizeForManifestRead(file_options_), &file, - nullptr); + dscname, fs->OptimizeForManifestRead(file_options_), &file, nullptr); if (!s.ok()) { return s; } @@ -6078,8 +6057,8 @@ Status VersionSet::WriteCurrentStateToManifest( cfd->internal_comparator().user_comparator()->Name()); std::string record; if (!edit.EncodeTo(&record)) { - return Status::Corruption( - "Unable to Encode VersionEdit:" + edit.DebugString(true)); + return Status::Corruption("Unable to Encode VersionEdit:" + + edit.DebugString(true)); } io_s = log->AddRecord(record); if (!io_s.ok()) { @@ -6137,9 +6116,10 @@ Status VersionSet::WriteCurrentStateToManifest( edit.SetLogNumber(log_number); if (cfd->GetID() == 0) { - // min_log_number_to_keep is for the whole db, not for specific column family. - // So it does not need to be set for every column family, just need to be set once. - // Since default CF can never be dropped, we set the min_log to the default CF here. + // min_log_number_to_keep is for the whole db, not for specific column + // family. So it does not need to be set for every column family, just + // need to be set once. Since default CF can never be dropped, we set + // the min_log to the default CF here. uint64_t min_log = min_log_number_to_keep(); if (min_log != 0) { edit.SetMinLogNumberToKeep(min_log); @@ -6155,8 +6135,8 @@ Status VersionSet::WriteCurrentStateToManifest( std::string record; if (!edit.EncodeTo(&record)) { - return Status::Corruption( - "Unable to Encode VersionEdit:" + edit.DebugString(true)); + return Status::Corruption("Unable to Encode VersionEdit:" + + edit.DebugString(true)); } io_s = log->AddRecord(record); if (!io_s.ok()) { @@ -6479,7 +6459,7 @@ InternalIterator* VersionSet::MakeInputIterator( const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files + c->num_input_levels() - 1 : c->num_input_levels()); - InternalIterator** list = new InternalIterator* [space]; + InternalIterator** list = new InternalIterator*[space]; size_t num = 0; for (size_t which = 0; which < c->num_input_levels(); which++) { if (c->input_levels(which)->num_files != 0) { @@ -6588,8 +6568,8 @@ void VersionSet::GetLiveFilesMetaData(std::vector* metadata) { filemetadata.largestkey = file->largest.user_key().ToString(); filemetadata.smallest_seqno = file->fd.smallest_seqno; filemetadata.largest_seqno = file->fd.largest_seqno; - filemetadata.num_reads_sampled = file->stats.num_reads_sampled.load( - std::memory_order_relaxed); + filemetadata.num_reads_sampled = + file->stats.num_reads_sampled.load(std::memory_order_relaxed); filemetadata.being_compacted = file->being_compacted; filemetadata.num_entries = file->num_entries; filemetadata.num_deletions = file->num_deletions; diff --git a/db/version_set.h b/db/version_set.h index 4d546e8d9..cf6f9af36 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -230,9 +230,7 @@ class VersionStorageInfo { double blob_garbage_collection_age_cutoff, double blob_garbage_collection_force_threshold); - bool level0_non_overlapping() const { - return level0_non_overlapping_; - } + bool level0_non_overlapping() const { return level0_non_overlapping_; } // Updates the oldest snapshot and related internal state, like the bottommost // files marked for compaction. @@ -814,8 +812,8 @@ class Version { Status OverlapWithLevelIterator(const ReadOptions&, const FileOptions&, const Slice& smallest_user_key, - const Slice& largest_user_key, - int level, bool* overlap); + const Slice& largest_user_key, int level, + bool* overlap); // Lookup the value for key or get all merge operands for key. // If do_merge = true (default) then lookup value for key. @@ -1032,10 +1030,10 @@ class Version { const MergeOperator* merge_operator_; VersionStorageInfo storage_info_; - VersionSet* vset_; // VersionSet to which this Version belongs - Version* next_; // Next version in linked list - Version* prev_; // Previous version in linked list - int refs_; // Number of live refs to this version + VersionSet* vset_; // VersionSet to which this Version belongs + Version* next_; // Next version in linked list + Version* prev_; // Previous version in linked list + int refs_; // Number of live refs to this version const FileOptions file_options_; const MutableCFOptions mutable_cf_options_; // Cached value to avoid recomputing it on every read. @@ -1398,7 +1396,7 @@ class VersionSet { FileMetaData** metadata, ColumnFamilyData** cfd); // This function doesn't support leveldb SST filenames - void GetLiveFilesMetaData(std::vector *metadata); + void GetLiveFilesMetaData(std::vector* metadata); void AddObsoleteBlobFile(uint64_t blob_file_number, std::string path) { assert(table_cache_); diff --git a/db/version_set_test.cc b/db/version_set_test.cc index d38e3ad73..7d17406c1 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -32,7 +32,7 @@ class GenerateLevelFilesBriefTest : public testing::Test { LevelFilesBrief file_level_; Arena arena_; - GenerateLevelFilesBriefTest() { } + GenerateLevelFilesBriefTest() {} ~GenerateLevelFilesBriefTest() override { for (size_t i = 0; i < files_.size(); i++) { @@ -481,7 +481,8 @@ TEST_F(VersionStorageInfoTest, EstimateLiveDataSize2) { TEST_F(VersionStorageInfoTest, GetOverlappingInputs) { // Two files that overlap at the range deletion tombstone sentinel. - Add(1, 1U, {"a", 0, kTypeValue}, {"b", kMaxSequenceNumber, kTypeRangeDeletion}, 1); + Add(1, 1U, {"a", 0, kTypeValue}, + {"b", kMaxSequenceNumber, kTypeRangeDeletion}, 1); Add(1, 2U, {"b", 0, kTypeValue}, {"c", 0, kTypeValue}, 1); // Two files that overlap at the same user key. Add(1, 3U, {"d", 0, kTypeValue}, {"e", kMaxSequenceNumber, kTypeValue}, 1); @@ -492,24 +493,26 @@ TEST_F(VersionStorageInfoTest, GetOverlappingInputs) { UpdateVersionStorageInfo(); - ASSERT_EQ("1,2", GetOverlappingFiles( - 1, {"a", 0, kTypeValue}, {"b", 0, kTypeValue})); - ASSERT_EQ("1", GetOverlappingFiles( - 1, {"a", 0, kTypeValue}, {"b", kMaxSequenceNumber, kTypeRangeDeletion})); - ASSERT_EQ("2", GetOverlappingFiles( - 1, {"b", kMaxSequenceNumber, kTypeValue}, {"c", 0, kTypeValue})); - ASSERT_EQ("3,4", GetOverlappingFiles( - 1, {"d", 0, kTypeValue}, {"e", 0, kTypeValue})); - ASSERT_EQ("3", GetOverlappingFiles( - 1, {"d", 0, kTypeValue}, {"e", kMaxSequenceNumber, kTypeRangeDeletion})); - ASSERT_EQ("3,4", GetOverlappingFiles( - 1, {"e", kMaxSequenceNumber, kTypeValue}, {"f", 0, kTypeValue})); - ASSERT_EQ("3,4", GetOverlappingFiles( - 1, {"e", 0, kTypeValue}, {"f", 0, kTypeValue})); - ASSERT_EQ("5", GetOverlappingFiles( - 1, {"g", 0, kTypeValue}, {"h", 0, kTypeValue})); - ASSERT_EQ("6", GetOverlappingFiles( - 1, {"i", 0, kTypeValue}, {"j", 0, kTypeValue})); + ASSERT_EQ("1,2", + GetOverlappingFiles(1, {"a", 0, kTypeValue}, {"b", 0, kTypeValue})); + ASSERT_EQ("1", + GetOverlappingFiles(1, {"a", 0, kTypeValue}, + {"b", kMaxSequenceNumber, kTypeRangeDeletion})); + ASSERT_EQ("2", GetOverlappingFiles(1, {"b", kMaxSequenceNumber, kTypeValue}, + {"c", 0, kTypeValue})); + ASSERT_EQ("3,4", + GetOverlappingFiles(1, {"d", 0, kTypeValue}, {"e", 0, kTypeValue})); + ASSERT_EQ("3", + GetOverlappingFiles(1, {"d", 0, kTypeValue}, + {"e", kMaxSequenceNumber, kTypeRangeDeletion})); + ASSERT_EQ("3,4", GetOverlappingFiles(1, {"e", kMaxSequenceNumber, kTypeValue}, + {"f", 0, kTypeValue})); + ASSERT_EQ("3,4", + GetOverlappingFiles(1, {"e", 0, kTypeValue}, {"f", 0, kTypeValue})); + ASSERT_EQ("5", + GetOverlappingFiles(1, {"g", 0, kTypeValue}, {"h", 0, kTypeValue})); + ASSERT_EQ("6", + GetOverlappingFiles(1, {"i", 0, kTypeValue}, {"j", 0, kTypeValue})); } TEST_F(VersionStorageInfoTest, FileLocationAndMetaDataByNumber) { @@ -925,13 +928,13 @@ class FindLevelFileTest : public testing::Test { bool disjoint_sorted_files_; Arena arena_; - FindLevelFileTest() : disjoint_sorted_files_(true) { } + FindLevelFileTest() : disjoint_sorted_files_(true) {} ~FindLevelFileTest() override {} void LevelFileInit(size_t num = 0) { char* mem = arena_.AllocateAligned(num * sizeof(FdWithKeyRange)); - file_level_.files = new (mem)FdWithKeyRange[num]; + file_level_.files = new (mem) FdWithKeyRange[num]; file_level_.num_files = 0; } @@ -944,19 +947,18 @@ class FindLevelFileTest : public testing::Test { Slice smallest_slice = smallest_key.Encode(); Slice largest_slice = largest_key.Encode(); - char* mem = arena_.AllocateAligned( - smallest_slice.size() + largest_slice.size()); + char* mem = + arena_.AllocateAligned(smallest_slice.size() + largest_slice.size()); memcpy(mem, smallest_slice.data(), smallest_slice.size()); memcpy(mem + smallest_slice.size(), largest_slice.data(), - largest_slice.size()); + largest_slice.size()); // add to file_level_ size_t num = file_level_.num_files; auto& file = file_level_.files[num]; file.fd = FileDescriptor(num + 1, 0, 0); file.smallest_key = Slice(mem, smallest_slice.size()); - file.largest_key = Slice(mem + smallest_slice.size(), - largest_slice.size()); + file.largest_key = Slice(mem + smallest_slice.size(), largest_slice.size()); file_level_.num_files++; } @@ -980,10 +982,10 @@ TEST_F(FindLevelFileTest, LevelEmpty) { LevelFileInit(0); ASSERT_EQ(0, Find("foo")); - ASSERT_TRUE(! Overlaps("a", "z")); - ASSERT_TRUE(! Overlaps(nullptr, "z")); - ASSERT_TRUE(! Overlaps("a", nullptr)); - ASSERT_TRUE(! Overlaps(nullptr, nullptr)); + ASSERT_TRUE(!Overlaps("a", "z")); + ASSERT_TRUE(!Overlaps(nullptr, "z")); + ASSERT_TRUE(!Overlaps("a", nullptr)); + ASSERT_TRUE(!Overlaps(nullptr, nullptr)); } TEST_F(FindLevelFileTest, LevelSingle) { @@ -997,8 +999,8 @@ TEST_F(FindLevelFileTest, LevelSingle) { ASSERT_EQ(1, Find("q1")); ASSERT_EQ(1, Find("z")); - ASSERT_TRUE(! Overlaps("a", "b")); - ASSERT_TRUE(! Overlaps("z1", "z2")); + ASSERT_TRUE(!Overlaps("a", "b")); + ASSERT_TRUE(!Overlaps("z1", "z2")); ASSERT_TRUE(Overlaps("a", "p")); ASSERT_TRUE(Overlaps("a", "q")); ASSERT_TRUE(Overlaps("a", "z")); @@ -1010,8 +1012,8 @@ TEST_F(FindLevelFileTest, LevelSingle) { ASSERT_TRUE(Overlaps("q", "q")); ASSERT_TRUE(Overlaps("q", "q1")); - ASSERT_TRUE(! Overlaps(nullptr, "j")); - ASSERT_TRUE(! Overlaps("r", nullptr)); + ASSERT_TRUE(!Overlaps(nullptr, "j")); + ASSERT_TRUE(!Overlaps("r", nullptr)); ASSERT_TRUE(Overlaps(nullptr, "p")); ASSERT_TRUE(Overlaps(nullptr, "p1")); ASSERT_TRUE(Overlaps("q", nullptr)); @@ -1043,10 +1045,10 @@ TEST_F(FindLevelFileTest, LevelMultiple) { ASSERT_EQ(3, Find("450")); ASSERT_EQ(4, Find("451")); - ASSERT_TRUE(! Overlaps("100", "149")); - ASSERT_TRUE(! Overlaps("251", "299")); - ASSERT_TRUE(! Overlaps("451", "500")); - ASSERT_TRUE(! Overlaps("351", "399")); + ASSERT_TRUE(!Overlaps("100", "149")); + ASSERT_TRUE(!Overlaps("251", "299")); + ASSERT_TRUE(!Overlaps("451", "500")); + ASSERT_TRUE(!Overlaps("351", "399")); ASSERT_TRUE(Overlaps("100", "150")); ASSERT_TRUE(Overlaps("100", "200")); @@ -1065,8 +1067,8 @@ TEST_F(FindLevelFileTest, LevelMultipleNullBoundaries) { Add("200", "250"); Add("300", "350"); Add("400", "450"); - ASSERT_TRUE(! Overlaps(nullptr, "149")); - ASSERT_TRUE(! Overlaps("451", nullptr)); + ASSERT_TRUE(!Overlaps(nullptr, "149")); + ASSERT_TRUE(!Overlaps("451", nullptr)); ASSERT_TRUE(Overlaps(nullptr, nullptr)); ASSERT_TRUE(Overlaps(nullptr, "150")); ASSERT_TRUE(Overlaps(nullptr, "199")); @@ -1084,8 +1086,8 @@ TEST_F(FindLevelFileTest, LevelOverlapSequenceChecks) { LevelFileInit(1); Add("200", "200", 5000, 3000); - ASSERT_TRUE(! Overlaps("199", "199")); - ASSERT_TRUE(! Overlaps("201", "300")); + ASSERT_TRUE(!Overlaps("199", "199")); + ASSERT_TRUE(!Overlaps("201", "300")); ASSERT_TRUE(Overlaps("200", "200")); ASSERT_TRUE(Overlaps("190", "200")); ASSERT_TRUE(Overlaps("200", "210")); @@ -1097,8 +1099,8 @@ TEST_F(FindLevelFileTest, LevelOverlappingFiles) { Add("150", "600"); Add("400", "500"); disjoint_sorted_files_ = false; - ASSERT_TRUE(! Overlaps("100", "149")); - ASSERT_TRUE(! Overlaps("601", "700")); + ASSERT_TRUE(!Overlaps("100", "149")); + ASSERT_TRUE(!Overlaps("601", "700")); ASSERT_TRUE(Overlaps("100", "150")); ASSERT_TRUE(Overlaps("100", "200")); ASSERT_TRUE(Overlaps("100", "300")); diff --git a/db/wal_manager.cc b/db/wal_manager.cc index ed76905d4..a6060235f 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -355,7 +355,8 @@ Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs, // Binary Search. avoid opening all files. while (end >= start) { int64_t mid = start + (end - start) / 2; // Avoid overflow. - SequenceNumber current_seq_num = all_logs.at(static_cast(mid))->StartSequence(); + SequenceNumber current_seq_num = + all_logs.at(static_cast(mid))->StartSequence(); if (current_seq_num == target) { end = mid; break; @@ -366,7 +367,8 @@ Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs, } } // end could be -ve. - size_t start_index = static_cast(std::max(static_cast(0), end)); + size_t start_index = + static_cast(std::max(static_cast(0), end)); // The last wal file is always included all_logs.erase(all_logs.begin(), all_logs.begin() + start_index); return Status::OK(); @@ -468,9 +470,8 @@ Status WalManager::ReadFirstLine(const std::string& fname, }; std::unique_ptr file; - Status status = fs_->NewSequentialFile(fname, - fs_->OptimizeForLogRead(file_options_), - &file, nullptr); + Status status = fs_->NewSequentialFile( + fname, fs_->OptimizeForLogRead(file_options_), &file, nullptr); std::unique_ptr file_reader( new SequentialFileReader(std::move(file), fname, io_tracer_)); diff --git a/db/wal_manager.h b/db/wal_manager.h index 771c48495..8cc067935 100644 --- a/db/wal_manager.h +++ b/db/wal_manager.h @@ -11,11 +11,11 @@ #include #include #include +#include #include +#include #include #include -#include -#include #include "db/version_set.h" #include "file/file_util.h" diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index e3c96c90c..4ad4e9749 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -69,7 +69,7 @@ class WalManagerTest : public testing::Test { // NOT thread safe void Put(const std::string& key, const std::string& value) { assert(current_log_writer_.get() != nullptr); - uint64_t seq = versions_->LastSequence() + 1; + uint64_t seq = versions_->LastSequence() + 1; WriteBatch batch; ASSERT_OK(batch.Put(key, value)); WriteBatchInternal::SetSequence(&batch, seq); @@ -88,7 +88,8 @@ class WalManagerTest : public testing::Test { std::unique_ptr file_writer; ASSERT_OK(WritableFileWriter::Create(fs, fname, env_options_, &file_writer, nullptr)); - current_log_writer_.reset(new log::Writer(std::move(file_writer), 0, false)); + current_log_writer_.reset( + new log::Writer(std::move(file_writer), 0, false)); } void CreateArchiveLogs(int num_logs, int entries_per_log) { @@ -215,7 +216,7 @@ int CountRecords(TransactionLogIterator* iter) { EXPECT_OK(iter->status()); return count; } -} // namespace +} // anonymous namespace TEST_F(WalManagerTest, WALArchivalSizeLimit) { db_options_.WAL_ttl_seconds = 0; diff --git a/db/write_batch.cc b/db/write_batch.cc index 7b63e59b2..c5042acf0 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -158,7 +158,7 @@ struct BatchContentClassifier : public WriteBatch::Handler { } }; -} // anon namespace +} // anonymous namespace struct SavePoints { std::stack> stack; @@ -231,18 +231,16 @@ WriteBatch& WriteBatch::operator=(WriteBatch&& src) { return *this; } -WriteBatch::~WriteBatch() { } +WriteBatch::~WriteBatch() {} -WriteBatch::Handler::~Handler() { } +WriteBatch::Handler::~Handler() {} void WriteBatch::Handler::LogData(const Slice& /*blob*/) { // If the user has not specified something to do with blobs, then we ignore // them. } -bool WriteBatch::Handler::Continue() { - return true; -} +bool WriteBatch::Handler::Continue() { return true; } void WriteBatch::Clear() { rep_.clear(); @@ -779,7 +777,7 @@ Status CheckColumnFamilyTimestampSize(ColumnFamilyHandle* column_family, } return Status::OK(); } -} // namespace +} // anonymous namespace Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id, const Slice& key, const Slice& value) { @@ -1746,7 +1744,6 @@ Status WriteBatch::VerifyChecksum() const { namespace { class MemTableInserter : public WriteBatch::Handler { - SequenceNumber sequence_; ColumnFamilyMemTables* const cf_mems_; FlushScheduler* const flush_scheduler_; @@ -1757,7 +1754,7 @@ class MemTableInserter : public WriteBatch::Handler { uint64_t log_number_ref_; DBImpl* db_; const bool concurrent_memtable_writes_; - bool post_info_created_; + bool post_info_created_; const WriteBatch::ProtectionInfo* prot_info_; size_t prot_info_idx_; @@ -1783,8 +1780,8 @@ class MemTableInserter : public WriteBatch::Handler { // Whether this batch was unprepared or not bool unprepared_batch_; using DupDetector = std::aligned_storage::type; - DupDetector duplicate_detector_; - bool dup_dectector_on_; + DupDetector duplicate_detector_; + bool dup_dectector_on_; bool hint_per_batch_; bool hint_created_; @@ -1804,7 +1801,7 @@ class MemTableInserter : public WriteBatch::Handler { MemPostInfoMap& GetPostMap() { assert(concurrent_memtable_writes_); - if(!post_info_created_) { + if (!post_info_created_) { new (&mem_post_info_map_) MemPostInfoMap(); post_info_created_ = true; } @@ -1818,8 +1815,8 @@ class MemTableInserter : public WriteBatch::Handler { new (&duplicate_detector_) DuplicateDetector(db_); dup_dectector_on_ = true; } - return reinterpret_cast - (&duplicate_detector_)->IsDuplicateKeySeq(column_family_id, key, sequence_); + return reinterpret_cast(&duplicate_detector_) + ->IsDuplicateKeySeq(column_family_id, key, sequence_); } const ProtectionInfoKVOC64* NextProtectionInfo() { @@ -1895,12 +1892,11 @@ class MemTableInserter : public WriteBatch::Handler { ~MemTableInserter() override { if (dup_dectector_on_) { - reinterpret_cast - (&duplicate_detector_)->~DuplicateDetector(); + reinterpret_cast(&duplicate_detector_) + ->~DuplicateDetector(); } if (post_info_created_) { - reinterpret_cast - (&mem_post_info_map_)->~MemPostInfoMap(); + reinterpret_cast(&mem_post_info_map_)->~MemPostInfoMap(); } if (hint_created_) { for (auto iter : GetHintMap()) { @@ -1942,7 +1938,7 @@ class MemTableInserter : public WriteBatch::Handler { assert(concurrent_memtable_writes_); // If post info was not created there is nothing // to process and no need to create on demand - if(post_info_created_) { + if (post_info_created_) { for (auto& pair : GetPostMap()) { pair.first->BatchPostProcess(pair.second); } @@ -2864,7 +2860,7 @@ class MemTableInserter : public WriteBatch::Handler { } }; -} // namespace +} // anonymous namespace // This function can only be called in these conditions: // 1) During Recovery() @@ -3035,7 +3031,7 @@ class ProtectionInfoUpdater : public WriteBatch::Handler { WriteBatch::ProtectionInfo* const prot_info_ = nullptr; }; -} // namespace +} // anonymous namespace Status WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) { assert(contents.size() >= WriteBatchInternal::kHeader); diff --git a/db/write_batch_internal.h b/db/write_batch_internal.h index f4ea75893..1be0bd140 100644 --- a/db/write_batch_internal.h +++ b/db/write_batch_internal.h @@ -77,7 +77,6 @@ struct WriteBatch::ProtectionInfo { // WriteBatch that we don't want in the public WriteBatch interface. class WriteBatchInternal { public: - // WriteBatch header has an 8-byte sequence number followed by a 4-byte count. static constexpr size_t kHeader = 12; @@ -149,13 +148,9 @@ class WriteBatchInternal { // This offset is only valid if the batch is not empty. static size_t GetFirstOffset(WriteBatch* batch); - static Slice Contents(const WriteBatch* batch) { - return Slice(batch->rep_); - } + static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); } - static size_t ByteSize(const WriteBatch* batch) { - return batch->rep_.size(); - } + static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); } static Status SetContents(WriteBatch* batch, const Slice& contents); diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 2714d7a01..d233853e2 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -174,9 +174,10 @@ TEST_F(WriteBatchTest, Corruption) { Slice contents = WriteBatchInternal::Contents(&batch); ASSERT_OK(WriteBatchInternal::SetContents( &batch, Slice(contents.data(), contents.size() - 1))); - ASSERT_EQ("Put(foo, bar)@200" - "Corruption: bad WriteBatch Delete", - PrintContents(&batch)); + ASSERT_EQ( + "Put(foo, bar)@200" + "Corruption: bad WriteBatch Delete", + PrintContents(&batch)); } TEST_F(WriteBatchTest, Append) { @@ -184,28 +185,28 @@ TEST_F(WriteBatchTest, Append) { WriteBatchInternal::SetSequence(&b1, 200); WriteBatchInternal::SetSequence(&b2, 300); ASSERT_OK(WriteBatchInternal::Append(&b1, &b2)); - ASSERT_EQ("", - PrintContents(&b1)); + ASSERT_EQ("", PrintContents(&b1)); ASSERT_EQ(0u, b1.Count()); ASSERT_OK(b2.Put("a", "va")); ASSERT_OK(WriteBatchInternal::Append(&b1, &b2)); - ASSERT_EQ("Put(a, va)@200", - PrintContents(&b1)); + ASSERT_EQ("Put(a, va)@200", PrintContents(&b1)); ASSERT_EQ(1u, b1.Count()); b2.Clear(); ASSERT_OK(b2.Put("b", "vb")); ASSERT_OK(WriteBatchInternal::Append(&b1, &b2)); - ASSERT_EQ("Put(a, va)@200" - "Put(b, vb)@201", - PrintContents(&b1)); + ASSERT_EQ( + "Put(a, va)@200" + "Put(b, vb)@201", + PrintContents(&b1)); ASSERT_EQ(2u, b1.Count()); ASSERT_OK(b2.Delete("foo")); ASSERT_OK(WriteBatchInternal::Append(&b1, &b2)); - ASSERT_EQ("Put(a, va)@200" - "Put(b, vb)@202" - "Put(b, vb)@201" - "Delete(foo)@203", - PrintContents(&b1)); + ASSERT_EQ( + "Put(a, va)@200" + "Put(b, vb)@202" + "Put(b, vb)@201" + "Delete(foo)@203", + PrintContents(&b1)); ASSERT_EQ(4u, b1.Count()); b2.Clear(); ASSERT_OK(b2.Put("c", "cc")); @@ -247,89 +248,88 @@ TEST_F(WriteBatchTest, SingleDeletion) { } namespace { - struct TestHandler : public WriteBatch::Handler { - std::string seen; - Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { - if (column_family_id == 0) { - seen += "Put(" + key.ToString() + ", " + value.ToString() + ")"; - } else { - seen += "PutCF(" + std::to_string(column_family_id) + ", " + - key.ToString() + ", " + value.ToString() + ")"; - } - return Status::OK(); - } - Status DeleteCF(uint32_t column_family_id, const Slice& key) override { - if (column_family_id == 0) { - seen += "Delete(" + key.ToString() + ")"; - } else { - seen += "DeleteCF(" + std::to_string(column_family_id) + ", " + - key.ToString() + ")"; - } - return Status::OK(); - } - Status SingleDeleteCF(uint32_t column_family_id, - const Slice& key) override { - if (column_family_id == 0) { - seen += "SingleDelete(" + key.ToString() + ")"; - } else { - seen += "SingleDeleteCF(" + std::to_string(column_family_id) + ", " + - key.ToString() + ")"; - } - return Status::OK(); - } - Status DeleteRangeCF(uint32_t column_family_id, const Slice& begin_key, - const Slice& end_key) override { - if (column_family_id == 0) { - seen += "DeleteRange(" + begin_key.ToString() + ", " + - end_key.ToString() + ")"; - } else { - seen += "DeleteRangeCF(" + std::to_string(column_family_id) + ", " + - begin_key.ToString() + ", " + end_key.ToString() + ")"; - } - return Status::OK(); - } - Status MergeCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { - if (column_family_id == 0) { - seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")"; - } else { - seen += "MergeCF(" + std::to_string(column_family_id) + ", " + - key.ToString() + ", " + value.ToString() + ")"; - } - return Status::OK(); - } - void LogData(const Slice& blob) override { - seen += "LogData(" + blob.ToString() + ")"; - } - Status MarkBeginPrepare(bool unprepare) override { - seen += - "MarkBeginPrepare(" + std::string(unprepare ? "true" : "false") + ")"; - return Status::OK(); - } - Status MarkEndPrepare(const Slice& xid) override { - seen += "MarkEndPrepare(" + xid.ToString() + ")"; - return Status::OK(); +struct TestHandler : public WriteBatch::Handler { + std::string seen; + Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + if (column_family_id == 0) { + seen += "Put(" + key.ToString() + ", " + value.ToString() + ")"; + } else { + seen += "PutCF(" + std::to_string(column_family_id) + ", " + + key.ToString() + ", " + value.ToString() + ")"; } - Status MarkNoop(bool empty_batch) override { - seen += "MarkNoop(" + std::string(empty_batch ? "true" : "false") + ")"; - return Status::OK(); + return Status::OK(); + } + Status DeleteCF(uint32_t column_family_id, const Slice& key) override { + if (column_family_id == 0) { + seen += "Delete(" + key.ToString() + ")"; + } else { + seen += "DeleteCF(" + std::to_string(column_family_id) + ", " + + key.ToString() + ")"; } - Status MarkCommit(const Slice& xid) override { - seen += "MarkCommit(" + xid.ToString() + ")"; - return Status::OK(); + return Status::OK(); + } + Status SingleDeleteCF(uint32_t column_family_id, const Slice& key) override { + if (column_family_id == 0) { + seen += "SingleDelete(" + key.ToString() + ")"; + } else { + seen += "SingleDeleteCF(" + std::to_string(column_family_id) + ", " + + key.ToString() + ")"; } - Status MarkCommitWithTimestamp(const Slice& xid, const Slice& ts) override { - seen += "MarkCommitWithTimestamp(" + xid.ToString() + ", " + - ts.ToString(true) + ")"; - return Status::OK(); + return Status::OK(); + } + Status DeleteRangeCF(uint32_t column_family_id, const Slice& begin_key, + const Slice& end_key) override { + if (column_family_id == 0) { + seen += "DeleteRange(" + begin_key.ToString() + ", " + + end_key.ToString() + ")"; + } else { + seen += "DeleteRangeCF(" + std::to_string(column_family_id) + ", " + + begin_key.ToString() + ", " + end_key.ToString() + ")"; } - Status MarkRollback(const Slice& xid) override { - seen += "MarkRollback(" + xid.ToString() + ")"; - return Status::OK(); + return Status::OK(); + } + Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + if (column_family_id == 0) { + seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")"; + } else { + seen += "MergeCF(" + std::to_string(column_family_id) + ", " + + key.ToString() + ", " + value.ToString() + ")"; } - }; -} + return Status::OK(); + } + void LogData(const Slice& blob) override { + seen += "LogData(" + blob.ToString() + ")"; + } + Status MarkBeginPrepare(bool unprepare) override { + seen += + "MarkBeginPrepare(" + std::string(unprepare ? "true" : "false") + ")"; + return Status::OK(); + } + Status MarkEndPrepare(const Slice& xid) override { + seen += "MarkEndPrepare(" + xid.ToString() + ")"; + return Status::OK(); + } + Status MarkNoop(bool empty_batch) override { + seen += "MarkNoop(" + std::string(empty_batch ? "true" : "false") + ")"; + return Status::OK(); + } + Status MarkCommit(const Slice& xid) override { + seen += "MarkCommit(" + xid.ToString() + ")"; + return Status::OK(); + } + Status MarkCommitWithTimestamp(const Slice& xid, const Slice& ts) override { + seen += "MarkCommitWithTimestamp(" + xid.ToString() + ", " + + ts.ToString(true) + ")"; + return Status::OK(); + } + Status MarkRollback(const Slice& xid) override { + seen += "MarkRollback(" + xid.ToString() + ")"; + return Status::OK(); + } +}; +} // anonymous namespace TEST_F(WriteBatchTest, PutNotImplemented) { WriteBatch batch; @@ -609,24 +609,25 @@ TEST_F(WriteBatchTest, PutGatherSlices) { { // Try a write where the key is one slice but the value is two Slice key_slice("baz"); - Slice value_slices[2] = { Slice("header"), Slice("payload") }; + Slice value_slices[2] = {Slice("header"), Slice("payload")}; ASSERT_OK( batch.Put(SliceParts(&key_slice, 1), SliceParts(value_slices, 2))); } { // One where the key is composite but the value is a single slice - Slice key_slices[3] = { Slice("key"), Slice("part2"), Slice("part3") }; + Slice key_slices[3] = {Slice("key"), Slice("part2"), Slice("part3")}; Slice value_slice("value"); ASSERT_OK( batch.Put(SliceParts(key_slices, 3), SliceParts(&value_slice, 1))); } WriteBatchInternal::SetSequence(&batch, 100); - ASSERT_EQ("Put(baz, headerpayload)@101" - "Put(foo, bar)@100" - "Put(keypart2part3, value)@102", - PrintContents(&batch)); + ASSERT_EQ( + "Put(baz, headerpayload)@101" + "Put(foo, bar)@100" + "Put(keypart2part3, value)@102", + PrintContents(&batch)); ASSERT_EQ(3u, batch.Count()); } @@ -646,7 +647,7 @@ class ColumnFamilyHandleImplDummy : public ColumnFamilyHandleImpl { uint32_t id_; const Comparator* const ucmp_ = BytewiseComparator(); }; -} // namespace anonymous +} // anonymous namespace TEST_F(WriteBatchTest, ColumnFamiliesBatchTest) { WriteBatch batch; @@ -948,7 +949,7 @@ Status CheckTimestampsInWriteBatch( TimestampChecker ts_checker(cf_to_ucmps, timestamp); return wb.Iterate(&ts_checker); } -} // namespace +} // anonymous namespace TEST_F(WriteBatchTest, SanityChecks) { ColumnFamilyHandleImplDummy cf0(0, diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 16061739e..e6ebaae08 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -5,6 +5,8 @@ #ifndef ROCKSDB_LITE +#include "db/write_callback.h" + #include #include #include @@ -12,7 +14,6 @@ #include #include "db/db_impl/db_impl.h" -#include "db/write_callback.h" #include "port/port.h" #include "rocksdb/db.h" #include "rocksdb/write_batch.h" @@ -37,11 +38,11 @@ class WriteCallbackTestWriteCallback1 : public WriteCallback { public: bool was_called = false; - Status Callback(DB *db) override { + Status Callback(DB* db) override { was_called = true; // Make sure db is a DBImpl - DBImpl* db_impl = dynamic_cast (db); + DBImpl* db_impl = dynamic_cast(db); if (db_impl == nullptr) { return Status::InvalidArgument(""); } @@ -397,7 +398,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) { Status s = DB::Open(options, dbname, &db); ASSERT_OK(s); - db_impl = dynamic_cast (db); + db_impl = dynamic_cast(db); ASSERT_TRUE(db_impl); WriteBatch wb; diff --git a/db/write_controller.h b/db/write_controller.h index c32b70b94..bcead165b 100644 --- a/db/write_controller.h +++ b/db/write_controller.h @@ -9,6 +9,7 @@ #include #include + #include "rocksdb/rate_limiter.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/write_controller_test.cc b/db/write_controller_test.cc index 69c2418e9..b6321a3bc 100644 --- a/db/write_controller_test.cc +++ b/db/write_controller_test.cc @@ -20,7 +20,7 @@ class TimeSetClock : public SystemClockWrapper { uint64_t now_micros_ = 6666; uint64_t NowNanos() override { return now_micros_ * std::milli::den; } }; -} // namespace +} // anonymous namespace class WriteControllerTest : public testing::Test { public: WriteControllerTest() { clock_ = std::make_shared(); } diff --git a/db/write_thread.cc b/db/write_thread.cc index 40580710a..cc8645f37 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -397,8 +397,9 @@ void WriteThread::JoinBatchGroup(Writer* w) { * writes in parallel. */ TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:BeganWaiting", w); - AwaitState(w, STATE_GROUP_LEADER | STATE_MEMTABLE_WRITER_LEADER | - STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED, + AwaitState(w, + STATE_GROUP_LEADER | STATE_MEMTABLE_WRITER_LEADER | + STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED, &jbg_ctx); TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:DoneWaiting", w); } @@ -595,10 +596,10 @@ void WriteThread::LaunchParallelMemTableWriters(WriteGroup* write_group) { } } -static WriteThread::AdaptationContext cpmtw_ctx("CompleteParallelMemTableWriter"); +static WriteThread::AdaptationContext cpmtw_ctx( + "CompleteParallelMemTableWriter"); // This method is called by both the leader and parallel followers bool WriteThread::CompleteParallelMemTableWriter(Writer* w) { - auto* write_group = w->write_group; if (!w->status.ok()) { std::lock_guard guard(write_group->leader->StateMutex()); @@ -718,8 +719,9 @@ void WriteThread::ExitAsBatchGroupLeader(WriteGroup& write_group, SetState(new_leader, STATE_GROUP_LEADER); } - AwaitState(leader, STATE_MEMTABLE_WRITER_LEADER | - STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED, + AwaitState(leader, + STATE_MEMTABLE_WRITER_LEADER | STATE_PARALLEL_MEMTABLE_WRITER | + STATE_COMPLETED, &eabgl_ctx); } else { Writer* head = newest_writer_.load(std::memory_order_acquire);