@ -7,8 +7,8 @@
# include "rocksdb/utilities/write_batch_with_index.h"
# include "rocksdb/utilities/write_batch_with_index.h"
# include <limits>
# include <memory>
# include <memory>
# include <vector>
# include "db/column_family.h"
# include "db/column_family.h"
# include "db/db_impl.h"
# include "db/db_impl.h"
@ -399,6 +399,7 @@ struct WriteBatchWithIndex::Rep {
WriteBatchEntrySkipList skip_list ;
WriteBatchEntrySkipList skip_list ;
bool overwrite_key ;
bool overwrite_key ;
size_t last_entry_offset ;
size_t last_entry_offset ;
std : : vector < size_t > obsolete_offsets ;
// Remember current offset of internal write batch, which is used as
// Remember current offset of internal write batch, which is used as
// the starting offset of the next record.
// the starting offset of the next record.
@ -450,6 +451,7 @@ bool WriteBatchWithIndex::Rep::UpdateExistingEntryWithCfId(
}
}
WriteBatchIndexEntry * non_const_entry =
WriteBatchIndexEntry * non_const_entry =
const_cast < WriteBatchIndexEntry * > ( iter . GetRawEntry ( ) ) ;
const_cast < WriteBatchIndexEntry * > ( iter . GetRawEntry ( ) ) ;
obsolete_offsets . push_back ( non_const_entry - > offset ) ;
non_const_entry - > offset = last_entry_offset ;
non_const_entry - > offset = last_entry_offset ;
return true ;
return true ;
}
}
@ -576,6 +578,66 @@ void WriteBatchWithIndex::Rep::AddNewEntry(uint32_t column_family_id) {
WriteBatch * WriteBatchWithIndex : : GetWriteBatch ( ) { return & rep - > write_batch ; }
WriteBatch * WriteBatchWithIndex : : GetWriteBatch ( ) { return & rep - > write_batch ; }
bool WriteBatchWithIndex : : Collapse ( ) {
if ( rep - > obsolete_offsets . size ( ) = = 0 ) {
return false ;
}
WriteBatch & write_batch = rep - > write_batch ;
assert ( write_batch . Count ( ) ! = 0 ) ;
size_t offset = WriteBatchInternal : : GetFirstOffset ( & write_batch ) ;
Slice input ( write_batch . Data ( ) ) ;
input . remove_prefix ( offset ) ;
std : : string collapsed_buf ;
collapsed_buf . resize ( WriteBatchInternal : : kHeader ) ;
size_t count = 0 ;
Status s ;
// Loop through all entries in the write batch and add keep them if they are
// not obsolete by a newere entry.
while ( s . ok ( ) & & ! input . empty ( ) ) {
Slice key , value , blob , xid ;
uint32_t column_family_id = 0 ; // default
char tag = 0 ;
// set offset of current entry for call to AddNewEntry()
size_t last_entry_offset = input . data ( ) - write_batch . Data ( ) . data ( ) ;
s = ReadRecordFromWriteBatch ( & input , & tag , & column_family_id , & key ,
& value , & blob , & xid ) ;
if ( rep - > obsolete_offsets . front ( ) = = last_entry_offset ) {
rep - > obsolete_offsets . erase ( rep - > obsolete_offsets . begin ( ) ) ;
continue ;
}
switch ( tag ) {
case kTypeColumnFamilyValue :
case kTypeValue :
case kTypeColumnFamilyDeletion :
case kTypeDeletion :
case kTypeColumnFamilySingleDeletion :
case kTypeSingleDeletion :
case kTypeColumnFamilyMerge :
case kTypeMerge :
count + + ;
break ;
case kTypeLogData :
case kTypeBeginPrepareXID :
case kTypeEndPrepareXID :
case kTypeCommitXID :
case kTypeRollbackXID :
case kTypeNoop :
break ;
default :
assert ( 0 ) ;
}
size_t entry_offset = input . data ( ) - write_batch . Data ( ) . data ( ) ;
const std : : string & wb_data = write_batch . Data ( ) ;
Slice entry_ptr = Slice ( wb_data . data ( ) + last_entry_offset ,
entry_offset - last_entry_offset ) ;
collapsed_buf . append ( entry_ptr . data ( ) , entry_ptr . size ( ) ) ;
}
write_batch . rep_ = std : : move ( collapsed_buf ) ;
WriteBatchInternal : : SetCount ( & write_batch , static_cast < int > ( count ) ) ;
return true ;
}
WBWIIterator * WriteBatchWithIndex : : NewIterator ( ) {
WBWIIterator * WriteBatchWithIndex : : NewIterator ( ) {
return new WBWIIteratorImpl ( 0 , & ( rep - > skip_list ) , & rep - > write_batch ) ;
return new WBWIIteratorImpl ( 0 , & ( rep - > skip_list ) , & rep - > write_batch ) ;
}
}
@ -689,7 +751,15 @@ Status WriteBatchWithIndex::Merge(ColumnFamilyHandle* column_family,
rep - > SetLastEntryOffset ( ) ;
rep - > SetLastEntryOffset ( ) ;
auto s = rep - > write_batch . Merge ( column_family , key , value ) ;
auto s = rep - > write_batch . Merge ( column_family , key , value ) ;
if ( s . ok ( ) ) {
if ( s . ok ( ) ) {
auto size_before = rep - > obsolete_offsets . size ( ) ;
rep - > AddOrUpdateIndex ( column_family , key ) ;
rep - > AddOrUpdateIndex ( column_family , key ) ;
auto size_after = rep - > obsolete_offsets . size ( ) ;
bool duplicate_key = size_before ! = size_after ;
if ( ! allow_dup_merge_ & & duplicate_key ) {
assert ( 0 ) ;
return Status : : NotSupported (
" Duplicate key with merge value is not supported yet " ) ;
}
}
}
return s ;
return s ;
}
}
@ -698,7 +768,15 @@ Status WriteBatchWithIndex::Merge(const Slice& key, const Slice& value) {
rep - > SetLastEntryOffset ( ) ;
rep - > SetLastEntryOffset ( ) ;
auto s = rep - > write_batch . Merge ( key , value ) ;
auto s = rep - > write_batch . Merge ( key , value ) ;
if ( s . ok ( ) ) {
if ( s . ok ( ) ) {
auto size_before = rep - > obsolete_offsets . size ( ) ;
rep - > AddOrUpdateIndex ( key ) ;
rep - > AddOrUpdateIndex ( key ) ;
auto size_after = rep - > obsolete_offsets . size ( ) ;
bool duplicate_key = size_before ! = size_after ;
if ( ! allow_dup_merge_ & & duplicate_key ) {
assert ( 0 ) ;
return Status : : NotSupported (
" Duplicate key with merge value is not supported yet " ) ;
}
}
}
return s ;
return s ;
}
}