@ -70,6 +70,7 @@ namespace rocksdb {
int DBImpl : : SuperVersion : : dummy = 0 ;
void * const DBImpl : : SuperVersion : : kSVInUse = & DBImpl : : SuperVersion : : dummy ;
void * const DBImpl : : SuperVersion : : kSVObsolete = nullptr ;
const std : : string kNullString = " NULL " ;
void DumpLeveldbBuildVersion ( Logger * log ) ;
@ -118,12 +119,129 @@ struct DBImpl::CompactionState {
}
// Create a client visible context of this compaction
CompactionFilter : : Context GetFilterContext ( ) {
CompactionFilter : : Context context ;
CompactionFilterContext GetFilterContext ( ) {
CompactionFilterContext context ;
context . is_full_compaction = compaction - > IsFullCompaction ( ) ;
context . is_manual_compaction = compaction - > IsManualCompaction ( ) ;
return context ;
}
std : : vector < Slice > key_buf_ ;
std : : vector < Slice > existing_value_buf_ ;
std : : vector < std : : string > key_str_buf_ ;
std : : vector < std : : string > existing_value_str_buf_ ;
// new_value_buf_ will only be appended if a value changes
std : : vector < std : : string > new_value_buf_ ;
// if values_changed_buf_[i] is true
// new_value_buf_ will add a new entry with the changed value
std : : vector < bool > value_changed_buf_ ;
// to_delete_buf_[i] is true iff key_buf_[i] is deleted
std : : vector < bool > to_delete_buf_ ;
// buffer for the parsed internal keys, the string buffer is backed
// by key_str_buf_
std : : vector < ParsedInternalKey > ikey_buf_ ;
std : : vector < Slice > other_key_buf_ ;
std : : vector < Slice > other_value_buf_ ;
std : : vector < std : : string > other_key_str_buf_ ;
std : : vector < std : : string > other_value_str_buf_ ;
std : : vector < Slice > combined_key_buf_ ;
std : : vector < Slice > combined_value_buf_ ;
std : : string cur_prefix_ ;
// Buffers the kv-pair that will be run through compaction filter V2
// in the future.
void BufferKeyValueSlices ( const Slice & key , const Slice & value ) {
key_str_buf_ . emplace_back ( key . ToString ( ) ) ;
existing_value_str_buf_ . emplace_back ( value . ToString ( ) ) ;
key_buf_ . emplace_back ( Slice ( key_str_buf_ . back ( ) ) ) ;
existing_value_buf_ . emplace_back ( Slice ( existing_value_str_buf_ . back ( ) ) ) ;
ParsedInternalKey ikey ;
ParseInternalKey ( key_buf_ . back ( ) , & ikey ) ;
ikey_buf_ . emplace_back ( ikey ) ;
}
// Buffers the kv-pair that will not be run through compaction filter V2
// in the future.
void BufferOtherKeyValueSlices ( const Slice & key , const Slice & value ) {
other_key_str_buf_ . emplace_back ( key . ToString ( ) ) ;
other_value_str_buf_ . emplace_back ( value . ToString ( ) ) ;
other_key_buf_ . emplace_back ( Slice ( other_key_str_buf_ . back ( ) ) ) ;
other_value_buf_ . emplace_back ( Slice ( other_value_str_buf_ . back ( ) ) ) ;
}
// Add a kv-pair to the combined buffer
void AddToCombinedKeyValueSlices ( const Slice & key , const Slice & value ) {
// The real strings are stored in the batch buffers
combined_key_buf_ . emplace_back ( key ) ;
combined_value_buf_ . emplace_back ( value ) ;
}
// Merging the two buffers
void MergeKeyValueSliceBuffer ( const InternalKeyComparator * comparator ) {
size_t i = 0 ;
size_t j = 0 ;
size_t total_size = key_buf_ . size ( ) + other_key_buf_ . size ( ) ;
combined_key_buf_ . reserve ( total_size ) ;
combined_value_buf_ . reserve ( total_size ) ;
while ( i + j < total_size ) {
int comp_res = 0 ;
if ( i < key_buf_ . size ( ) & & j < other_key_buf_ . size ( ) ) {
comp_res = comparator - > Compare ( key_buf_ [ i ] , other_key_buf_ [ j ] ) ;
} else if ( i > = key_buf_ . size ( ) & & j < other_key_buf_ . size ( ) ) {
comp_res = 1 ;
} else if ( j > = other_key_buf_ . size ( ) & & i < key_buf_ . size ( ) ) {
comp_res = - 1 ;
}
if ( comp_res > 0 ) {
AddToCombinedKeyValueSlices ( other_key_buf_ [ j ] , other_value_buf_ [ j ] ) ;
j + + ;
} else if ( comp_res < 0 ) {
AddToCombinedKeyValueSlices ( key_buf_ [ i ] , existing_value_buf_ [ i ] ) ;
i + + ;
}
}
}
void CleanupBatchBuffer ( ) {
to_delete_buf_ . clear ( ) ;
key_buf_ . clear ( ) ;
existing_value_buf_ . clear ( ) ;
key_str_buf_ . clear ( ) ;
existing_value_str_buf_ . clear ( ) ;
new_value_buf_ . clear ( ) ;
value_changed_buf_ . clear ( ) ;
ikey_buf_ . clear ( ) ;
to_delete_buf_ . shrink_to_fit ( ) ;
key_buf_ . shrink_to_fit ( ) ;
existing_value_buf_ . shrink_to_fit ( ) ;
key_str_buf_ . shrink_to_fit ( ) ;
existing_value_str_buf_ . shrink_to_fit ( ) ;
new_value_buf_ . shrink_to_fit ( ) ;
value_changed_buf_ . shrink_to_fit ( ) ;
ikey_buf_ . shrink_to_fit ( ) ;
other_key_buf_ . clear ( ) ;
other_value_buf_ . clear ( ) ;
other_key_str_buf_ . clear ( ) ;
other_value_str_buf_ . clear ( ) ;
other_key_buf_ . shrink_to_fit ( ) ;
other_value_buf_ . shrink_to_fit ( ) ;
other_key_str_buf_ . shrink_to_fit ( ) ;
other_value_str_buf_ . shrink_to_fit ( ) ;
}
void CleanupMergedBuffer ( ) {
combined_key_buf_ . clear ( ) ;
combined_value_buf_ . clear ( ) ;
combined_key_buf_ . shrink_to_fit ( ) ;
combined_value_buf_ . shrink_to_fit ( ) ;
}
} ;
// Fix user-supplied options to be reasonable
@ -2401,66 +2519,27 @@ inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
return 0 ;
}
Status DBImpl : : DoCompactionWork ( CompactionState * compact ,
DeletionState & deletion_state ,
LogBuffer * log_buffer ) {
assert ( compact ) ;
int64_t imm_micros = 0 ; // Micros spent doing imm_ compactions
Log ( options_ . info_log ,
" Compacting %d@%d + %d@%d files, score %.2f slots available %d " ,
compact - > compaction - > num_input_files ( 0 ) ,
compact - > compaction - > level ( ) ,
compact - > compaction - > num_input_files ( 1 ) ,
compact - > compaction - > output_level ( ) ,
compact - > compaction - > score ( ) ,
options_ . max_background_compactions - bg_compaction_scheduled_ ) ;
char scratch [ 2345 ] ;
compact - > compaction - > Summary ( scratch , sizeof ( scratch ) ) ;
Log ( options_ . info_log , " Compaction start summary: %s \n " , scratch ) ;
assert ( versions_ - > current ( ) - > NumLevelFiles ( compact - > compaction - > level ( ) ) > 0 ) ;
assert ( compact - > builder = = nullptr ) ;
assert ( ! compact - > outfile ) ;
SequenceNumber visible_at_tip = 0 ;
SequenceNumber earliest_snapshot ;
SequenceNumber latest_snapshot = 0 ;
snapshots_ . getAll ( compact - > existing_snapshots ) ;
if ( compact - > existing_snapshots . size ( ) = = 0 ) {
// optimize for fast path if there are no snapshots
visible_at_tip = versions_ - > LastSequence ( ) ;
earliest_snapshot = visible_at_tip ;
} else {
latest_snapshot = compact - > existing_snapshots . back ( ) ;
// Add the current seqno as the 'latest' virtual
// snapshot to the end of this list.
compact - > existing_snapshots . push_back ( versions_ - > LastSequence ( ) ) ;
earliest_snapshot = compact - > existing_snapshots [ 0 ] ;
}
// Is this compaction producing files at the bottommost level?
bool bottommost_level = compact - > compaction - > BottomMostLevel ( ) ;
// Allocate the output file numbers before we release the lock
AllocateCompactionOutputFileNumbers ( compact ) ;
// Release mutex while we're actually doing the compaction work
mutex_ . Unlock ( ) ;
// flush log buffer immediately after releasing the mutex
log_buffer - > FlushBufferToLog ( ) ;
const uint64_t start_micros = env_ - > NowMicros ( ) ;
unique_ptr < Iterator > input ( versions_ - > MakeInputIterator ( compact - > compaction ) ) ;
input - > SeekToFirst ( ) ;
Status DBImpl : : ProcessKeyValueCompaction (
SequenceNumber visible_at_tip ,
SequenceNumber earliest_snapshot ,
SequenceNumber latest_snapshot ,
DeletionState & deletion_state ,
bool bottommost_level ,
int64_t & imm_micros ,
Iterator * input ,
CompactionState * compact ,
bool is_compaction_v2 ,
LogBuffer * log_buffer ) {
size_t combined_idx = 0 ;
Status status ;
std : : string compaction_filter_value ;
ParsedInternalKey ikey ;
std : : string current_user_key ;
bool has_current_user_key = false ;
std : : vector < char > delete_key ; // for compaction filter
SequenceNumber last_sequence_for_key __attribute__ ( ( unused ) ) =
kMaxSequenceNumber ;
SequenceNumber visible_in_snapshot = kMaxSequenceNumber ;
std : : string compaction_filter_value ;
std : : vector < char > delete_key ; // for compaction filter
MergeHelper merge ( user_comparator ( ) , options_ . merge_operator . get ( ) ,
options_ . info_log . get ( ) ,
options_ . min_partial_merge_operands ,
@ -2490,12 +2569,31 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
imm_micros + = ( env_ - > NowMicros ( ) - imm_start ) ;
}
Slice key = input - > key ( ) ;
Slice value = input - > value ( ) ;
Slice key ;
Slice value ;
// If is_compaction_v2 is on, kv-pairs are reset to the prefix batch.
// This prefix batch should contain results after calling
// compaction_filter_v2.
//
// If is_compaction_v2 is off, this function will go through all the
// kv-pairs in input.
if ( ! is_compaction_v2 ) {
key = input - > key ( ) ;
value = input - > value ( ) ;
} else {
if ( combined_idx > = compact - > combined_key_buf_ . size ( ) ) {
break ;
}
assert ( combined_idx < compact - > combined_key_buf_ . size ( ) ) ;
key = compact - > combined_key_buf_ [ combined_idx ] ;
value = compact - > combined_value_buf_ [ combined_idx ] ;
+ + combined_idx ;
}
if ( compact - > compaction - > ShouldStopBefore ( key ) & &
compact - > builder ! = nullptr ) {
status = FinishCompactionOutputFile ( compact , input . get ( ) ) ;
status = FinishCompactionOutputFile ( compact , input ) ;
if ( ! status . ok ( ) ) {
break ;
}
@ -2515,15 +2613,14 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
} else {
if ( ! has_current_user_key | |
user_comparator ( ) - > Compare ( ikey . user_key ,
Slice ( current_user_key ) ) ! = 0 ) {
Slice ( current_user_key ) ) ! = 0 ) {
// First occurrence of this user key
current_user_key . assign ( ikey . user_key . data ( ) , ikey . user_key . size ( ) ) ;
has_current_user_key = true ;
last_sequence_for_key = kMaxSequenceNumber ;
visible_in_snapshot = kMaxSequenceNumber ;
// apply the compaction filter to the first occurrence of the user key
if ( compaction_filter & &
if ( compaction_filter & & ! is_compaction_v2 & &
ikey . type = = kTypeValue & &
( visible_at_tip | | ikey . sequence > latest_snapshot ) ) {
// If the user has specified a compaction filter and the sequence
@ -2535,15 +2632,15 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
compaction_filter_value . clear ( ) ;
bool to_delete =
compaction_filter - > Filter ( compact - > compaction - > level ( ) ,
ikey . user_key , value ,
& compaction_filter_value ,
& value_changed ) ;
ikey . user_key , value ,
& compaction_filter_value ,
& value_changed ) ;
if ( to_delete ) {
// make a copy of the original key
delete_key . assign ( key . data ( ) , key . data ( ) + key . size ( ) ) ;
// convert it to a delete
UpdateInternalKey ( & delete_key [ 0 ] , delete_key . size ( ) ,
ikey . sequence , kTypeDeletion ) ;
ikey . sequence , kTypeDeletion ) ;
// anchor the key again
key = Slice ( & delete_key [ 0 ] , delete_key . size ( ) ) ;
// needed because ikey is backed by key
@ -2565,8 +2662,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
SequenceNumber visible = visible_at_tip ?
visible_at_tip :
findEarliestVisibleSnapshot ( ikey . sequence ,
compact - > existing_snapshots ,
& prev_snapshot ) ;
compact - > existing_snapshots ,
& prev_snapshot ) ;
if ( visible_in_snapshot = = visible ) {
// If the earliest snapshot is which this key is visible in
@ -2578,8 +2675,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
drop = true ; // (A)
RecordTick ( options_ . statistics . get ( ) , COMPACTION_KEY_DROP_NEWER_ENTRY ) ;
} else if ( ikey . type = = kTypeDeletion & &
ikey . sequence < = earliest_snapshot & &
compact - > compaction - > IsBaseLevelForKey ( ikey . user_key ) ) {
ikey . sequence < = earliest_snapshot & &
compact - > compaction - > IsBaseLevelForKey ( ikey . user_key ) ) {
// For this user key:
// (1) there is no data in higher levels
// (2) data in lower levels will have larger sequence numbers
@ -2596,8 +2693,12 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
// object to minimize change to the existing flow. Turn out this
// logic could also be nicely re-used for memtable flush purge
// optimization in BuildTable.
merge . MergeUntil ( input . get ( ) , prev_snapshot , bottommost_level ,
options_ . statistics . get ( ) ) ;
int steps = 0 ;
merge . MergeUntil ( input , prev_snapshot , bottommost_level ,
options_ . statistics . get ( ) , & steps ) ;
// Skip the Merge ops
combined_idx = combined_idx - 1 + steps ;
current_entry_is_merging = true ;
if ( merge . IsSuccess ( ) ) {
// Successfully found Put/Delete/(end-of-key-range) while merging
@ -2699,7 +2800,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
// Close output file if it is big enough
if ( compact - > builder - > FileSize ( ) > =
compact - > compaction - > MaxOutputFileSize ( ) ) {
status = FinishCompactionOutputFile ( compact , input . get ( ) ) ;
status = FinishCompactionOutputFile ( compact , input ) ;
if ( ! status . ok ( ) ) {
break ;
}
@ -2736,6 +2837,278 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
}
}
return status ;
}
void DBImpl : : CallCompactionFilterV2 ( CompactionState * compact ,
CompactionFilterV2 * compaction_filter_v2 ) {
if ( compact = = nullptr | | compaction_filter_v2 = = nullptr ) {
return ;
}
std : : vector < Slice > user_key_buf ;
for ( const auto & key : compact - > ikey_buf_ ) {
user_key_buf . emplace_back ( key . user_key ) ;
}
// If the user has specified a compaction filter and the sequence
// number is greater than any external snapshot, then invoke the
// filter.
// If the return value of the compaction filter is true, replace
// the entry with a delete marker.
compact - > to_delete_buf_ = compaction_filter_v2 - > Filter (
compact - > compaction - > level ( ) ,
user_key_buf , compact - > existing_value_buf_ ,
& compact - > new_value_buf_ ,
& compact - > value_changed_buf_ ) ;
// new_value_buf_.size() <= to_delete__buf_.size(). "=" iff all
// kv-pairs in this compaction run needs to be deleted.
assert ( compact - > to_delete_buf_ . size ( ) = =
compact - > key_buf_ . size ( ) ) ;
assert ( compact - > to_delete_buf_ . size ( ) = =
compact - > existing_value_buf_ . size ( ) ) ;
assert ( compact - > to_delete_buf_ . size ( ) = =
compact - > value_changed_buf_ . size ( ) ) ;
int new_value_idx = 0 ;
for ( unsigned int i = 0 ; i < compact - > to_delete_buf_ . size ( ) ; + + i ) {
if ( compact - > to_delete_buf_ [ i ] ) {
// update the string buffer directly
// the Slice buffer points to the updated buffer
UpdateInternalKey ( & compact - > key_str_buf_ [ i ] [ 0 ] ,
compact - > key_str_buf_ [ i ] . size ( ) ,
compact - > ikey_buf_ [ i ] . sequence ,
kTypeDeletion ) ;
// no value associated with delete
compact - > existing_value_buf_ [ i ] . clear ( ) ;
RecordTick ( options_ . statistics . get ( ) , COMPACTION_KEY_DROP_USER ) ;
} else if ( compact - > value_changed_buf_ [ i ] ) {
compact - > existing_value_buf_ [ i ] =
Slice ( compact - > new_value_buf_ [ new_value_idx + + ] ) ;
}
} // for
}
Status DBImpl : : DoCompactionWork ( CompactionState * compact ,
DeletionState & deletion_state ,
LogBuffer * log_buffer ) {
assert ( compact ) ;
compact - > CleanupBatchBuffer ( ) ;
compact - > CleanupMergedBuffer ( ) ;
compact - > cur_prefix_ = kNullString ;
int64_t imm_micros = 0 ; // Micros spent doing imm_ compactions
Log ( options_ . info_log ,
" Compacting %d@%d + %d@%d files, score %.2f slots available %d " ,
compact - > compaction - > num_input_files ( 0 ) ,
compact - > compaction - > level ( ) ,
compact - > compaction - > num_input_files ( 1 ) ,
compact - > compaction - > output_level ( ) ,
compact - > compaction - > score ( ) ,
options_ . max_background_compactions - bg_compaction_scheduled_ ) ;
char scratch [ 2345 ] ;
compact - > compaction - > Summary ( scratch , sizeof ( scratch ) ) ;
Log ( options_ . info_log , " Compaction start summary: %s \n " , scratch ) ;
assert ( versions_ - > current ( ) - > NumLevelFiles ( compact - > compaction - > level ( ) ) > 0 ) ;
assert ( compact - > builder = = nullptr ) ;
assert ( ! compact - > outfile ) ;
SequenceNumber visible_at_tip = 0 ;
SequenceNumber earliest_snapshot ;
SequenceNumber latest_snapshot = 0 ;
snapshots_ . getAll ( compact - > existing_snapshots ) ;
if ( compact - > existing_snapshots . size ( ) = = 0 ) {
// optimize for fast path if there are no snapshots
visible_at_tip = versions_ - > LastSequence ( ) ;
earliest_snapshot = visible_at_tip ;
} else {
latest_snapshot = compact - > existing_snapshots . back ( ) ;
// Add the current seqno as the 'latest' virtual
// snapshot to the end of this list.
compact - > existing_snapshots . push_back ( versions_ - > LastSequence ( ) ) ;
earliest_snapshot = compact - > existing_snapshots [ 0 ] ;
}
// Is this compaction producing files at the bottommost level?
bool bottommost_level = compact - > compaction - > BottomMostLevel ( ) ;
// Allocate the output file numbers before we release the lock
AllocateCompactionOutputFileNumbers ( compact ) ;
// Release mutex while we're actually doing the compaction work
mutex_ . Unlock ( ) ;
const uint64_t start_micros = env_ - > NowMicros ( ) ;
unique_ptr < Iterator > input ( versions_ - > MakeInputIterator ( compact - > compaction ) ) ;
input - > SeekToFirst ( ) ;
shared_ptr < Iterator > backup_input (
versions_ - > MakeInputIterator ( compact - > compaction ) ) ;
backup_input - > SeekToFirst ( ) ;
Status status ;
ParsedInternalKey ikey ;
std : : unique_ptr < CompactionFilterV2 > compaction_filter_from_factory_v2
= nullptr ;
auto context = compact - > GetFilterContext ( ) ;
compaction_filter_from_factory_v2 =
options_ . compaction_filter_factory_v2 - > CreateCompactionFilterV2 ( context ) ;
auto compaction_filter_v2 =
compaction_filter_from_factory_v2 . get ( ) ;
// temp_backup_input always point to the start of the current buffer
// temp_backup_input = backup_input;
// iterate through input,
// 1) buffer ineligible keys and value keys into 2 separate buffers;
// 2) send value_buffer to compaction filter and alternate the values;
// 3) merge value_buffer with ineligible_value_buffer;
// 4) run the modified "compaction" using the old for loop.
if ( compaction_filter_v2 ) {
for ( ; backup_input - > Valid ( ) & & ! shutting_down_ . Acquire_Load ( ) ; ) {
// Prioritize immutable compaction work
if ( imm_ . imm_flush_needed . NoBarrier_Load ( ) ! = nullptr ) {
const uint64_t imm_start = env_ - > NowMicros ( ) ;
LogFlush ( options_ . info_log ) ;
mutex_ . Lock ( ) ;
if ( imm_ . IsFlushPending ( ) ) {
FlushMemTableToOutputFile ( nullptr , deletion_state , log_buffer ) ;
bg_cv_ . SignalAll ( ) ; // Wakeup MakeRoomForWrite() if necessary
}
mutex_ . Unlock ( ) ;
imm_micros + = ( env_ - > NowMicros ( ) - imm_start ) ;
}
Slice key = backup_input - > key ( ) ;
Slice value = backup_input - > value ( ) ;
const SliceTransform * transformer =
options_ . compaction_filter_factory_v2 - > GetPrefixExtractor ( ) ;
std : : string key_prefix = transformer - > Transform ( key ) . ToString ( ) ;
if ( compact - > cur_prefix_ = = kNullString ) {
compact - > cur_prefix_ = key_prefix ;
}
if ( ! ParseInternalKey ( key , & ikey ) ) {
// log error
Log ( options_ . info_log , " Failed to parse key: %s " ,
key . ToString ( ) . c_str ( ) ) ;
continue ;
} else {
// If the prefix remains the same, keep buffering
if ( key_prefix = = compact - > cur_prefix_ ) {
// Apply the compaction filter V2 to all the kv pairs sharing
// the same prefix
if ( ikey . type = = kTypeValue & &
( visible_at_tip | | ikey . sequence > latest_snapshot ) ) {
// Buffer all keys sharing the same prefix for CompactionFilterV2
// Iterate through keys to check prefix
compact - > BufferKeyValueSlices ( key , value ) ;
} else {
// buffer ineligible keys
compact - > BufferOtherKeyValueSlices ( key , value ) ;
}
backup_input - > Next ( ) ;
continue ;
// finish changing values for eligible keys
} else {
// Now prefix changes, this batch is done.
// Call compaction filter on the buffered values to change the value
if ( compact - > key_buf_ . size ( ) > 0 ) {
CallCompactionFilterV2 ( compact , compaction_filter_v2 ) ;
}
compact - > cur_prefix_ = key_prefix ;
}
}
// Merge this batch of data (values + ineligible keys)
compact - > MergeKeyValueSliceBuffer ( & internal_comparator_ ) ;
// Done buffering for the current prefix. Spit it out to disk
// Now just iterate through all the kv-pairs
status = ProcessKeyValueCompaction (
visible_at_tip ,
earliest_snapshot ,
latest_snapshot ,
deletion_state ,
bottommost_level ,
imm_micros ,
input . get ( ) ,
compact ,
true ,
log_buffer ) ;
if ( ! status . ok ( ) ) {
break ;
}
// After writing the kv-pairs, we can safely remove the reference
// to the string buffer and clean them up
compact - > CleanupBatchBuffer ( ) ;
compact - > CleanupMergedBuffer ( ) ;
// Buffer the key that triggers the mismatch in prefix
if ( ikey . type = = kTypeValue & &
( visible_at_tip | | ikey . sequence > latest_snapshot ) ) {
compact - > BufferKeyValueSlices ( key , value ) ;
} else {
compact - > BufferOtherKeyValueSlices ( key , value ) ;
}
backup_input - > Next ( ) ;
if ( ! backup_input - > Valid ( ) ) {
// If this is the single last value, we need to merge it.
if ( compact - > key_buf_ . size ( ) > 0 ) {
CallCompactionFilterV2 ( compact , compaction_filter_v2 ) ;
}
compact - > MergeKeyValueSliceBuffer ( & internal_comparator_ ) ;
status = ProcessKeyValueCompaction (
visible_at_tip ,
earliest_snapshot ,
latest_snapshot ,
deletion_state ,
bottommost_level ,
imm_micros ,
input . get ( ) ,
compact ,
true ,
log_buffer ) ;
compact - > CleanupBatchBuffer ( ) ;
compact - > CleanupMergedBuffer ( ) ;
}
} // done processing all prefix batches
// finish the last batch
if ( compact - > key_buf_ . size ( ) > 0 ) {
CallCompactionFilterV2 ( compact , compaction_filter_v2 ) ;
}
compact - > MergeKeyValueSliceBuffer ( & internal_comparator_ ) ;
status = ProcessKeyValueCompaction (
visible_at_tip ,
earliest_snapshot ,
latest_snapshot ,
deletion_state ,
bottommost_level ,
imm_micros ,
input . get ( ) ,
compact ,
true ,
log_buffer ) ;
} // checking for compaction filter v2
if ( ! compaction_filter_v2 ) {
status = ProcessKeyValueCompaction (
visible_at_tip ,
earliest_snapshot ,
latest_snapshot ,
deletion_state ,
bottommost_level ,
imm_micros ,
input . get ( ) ,
compact ,
false ,
log_buffer ) ;
}
if ( status . ok ( ) & & shutting_down_ . Acquire_Load ( ) ) {
status = Status : : ShutdownInProgress (
" Database shutdown started during compaction " ) ;