@ -104,15 +104,15 @@ FilterBlockBuilder* CreateFilterBlockBuilder(
}
}
bool GoodCompressionRatio ( size_t compressed_size , size_t raw _size) {
bool GoodCompressionRatio ( size_t compressed_size , size_t uncomp _size) {
// Check to see if compressed less than 12.5%
return compressed_size < raw_size - ( raw _size / 8u ) ;
return compressed_size < uncomp_size - ( uncomp _size / 8u ) ;
}
} // namespace
// format_version is the block format as defined in include/rocksdb/table.h
Slice CompressBlock ( const Slice & raw , const CompressionInfo & info ,
Slice CompressBlock ( const Slice & uncompressed_data , const CompressionInfo & info ,
CompressionType * type , uint32_t format_version ,
bool do_sample , std : : string * compressed_output ,
std : : string * sampled_output_fast ,
@ -139,7 +139,8 @@ Slice CompressBlock(const Slice& raw, const CompressionInfo& info,
CompressionDict : : GetEmptyDict ( ) , c ,
info . SampleForCompression ( ) ) ;
CompressData ( raw , info_tmp , GetCompressFormatForVersion ( format_version ) ,
CompressData ( uncompressed_data , info_tmp ,
GetCompressFormatForVersion ( format_version ) ,
sampled_output_fast ) ;
}
@ -152,29 +153,32 @@ Slice CompressBlock(const Slice& raw, const CompressionInfo& info,
CompressionDict : : GetEmptyDict ( ) , c ,
info . SampleForCompression ( ) ) ;
CompressData ( raw , info_tmp , GetCompressFormatForVersion ( format_version ) ,
CompressData ( uncompressed_data , info_tmp ,
GetCompressFormatForVersion ( format_version ) ,
sampled_output_slow ) ;
}
}
if ( info . type ( ) = = kNoCompression ) {
* type = kNoCompression ;
return raw ;
return uncompressed_data ;
}
// Actually compress the data; if the compression method is not supported,
// or the compression fails etc., just fall back to uncompressed
if ( ! CompressData ( raw , info , GetCompressFormatForVersion ( format_version ) ,
if ( ! CompressData ( uncompressed_data , info ,
GetCompressFormatForVersion ( format_version ) ,
compressed_output ) ) {
* type = kNoCompression ;
return raw ;
return uncompressed_data ;
}
// Check the compression ratio; if it's not good enough, just fall back to
// uncompressed
if ( ! GoodCompressionRatio ( compressed_output - > size ( ) , raw . size ( ) ) ) {
if ( ! GoodCompressionRatio ( compressed_output - > size ( ) ,
uncompressed_data . size ( ) ) ) {
* type = kNoCompression ;
return raw ;
return uncompressed_data ;
}
* type = info . type ( ) ;
@ -216,7 +220,7 @@ class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
return Status : : OK ( ) ;
}
virtual void BlockAdd ( uint64_t /* block_raw _bytes */ ,
virtual void BlockAdd ( uint64_t /* block_uncomp _bytes */ ,
uint64_t /* block_compressed_bytes_fast */ ,
uint64_t /* block_compressed_bytes_slow */ ) override {
// Intentionally left blank. No interest in collecting stats for
@ -665,21 +669,21 @@ struct BlockBasedTableBuilder::ParallelCompressionRep {
class FileSizeEstimator {
public :
explicit FileSizeEstimator ( )
: raw _bytes_compressed( 0 ) ,
raw _bytes_curr_block( 0 ) ,
raw _bytes_curr_block_set( false ) ,
raw _bytes_inflight( 0 ) ,
: uncomp _bytes_compressed( 0 ) ,
uncomp _bytes_curr_block( 0 ) ,
uncomp _bytes_curr_block_set( false ) ,
uncomp _bytes_inflight( 0 ) ,
blocks_inflight ( 0 ) ,
curr_compression_ratio ( 0 ) ,
estimated_file_size ( 0 ) { }
// Estimate file size when a block is about to be emitted to
// compression thread
void EmitBlock ( uint64_t raw _block_size, uint64_t curr_file_size ) {
uint64_t new_raw _bytes_inflight =
raw _bytes_inflight. fetch_add ( raw _block_size,
std : : memory_order_relaxed ) +
raw _block_size;
void EmitBlock ( uint64_t uncomp _block_size, uint64_t curr_file_size ) {
uint64_t new_uncomp _bytes_inflight =
uncomp _bytes_inflight. fetch_add ( uncomp _block_size,
std : : memory_order_relaxed ) +
uncomp _block_size;
uint64_t new_blocks_inflight =
blocks_inflight . fetch_add ( 1 , std : : memory_order_relaxed ) + 1 ;
@ -687,7 +691,7 @@ struct BlockBasedTableBuilder::ParallelCompressionRep {
estimated_file_size . store (
curr_file_size +
static_cast < uint64_t > (
static_cast < double > ( new_raw _bytes_inflight ) *
static_cast < double > ( new_uncomp _bytes_inflight ) *
curr_compression_ratio . load ( std : : memory_order_relaxed ) ) +
new_blocks_inflight * kBlockTrailerSize ,
std : : memory_order_relaxed ) ;
@ -696,24 +700,24 @@ struct BlockBasedTableBuilder::ParallelCompressionRep {
// Estimate file size when a block is already reaped from
// compression thread
void ReapBlock ( uint64_t compressed_block_size , uint64_t curr_file_size ) {
assert ( raw _bytes_curr_block_set) ;
assert ( uncomp _bytes_curr_block_set) ;
uint64_t new_raw _bytes_compressed =
raw_bytes_compressed + raw _bytes_curr_block;
assert ( new_raw _bytes_compressed > 0 ) ;
uint64_t new_uncomp _bytes_compressed =
uncomp_bytes_compressed + uncomp _bytes_curr_block;
assert ( new_uncomp _bytes_compressed > 0 ) ;
curr_compression_ratio . store (
( curr_compression_ratio . load ( std : : memory_order_relaxed ) *
raw _bytes_compressed +
uncomp _bytes_compressed +
compressed_block_size ) /
static_cast < double > ( new_raw _bytes_compressed ) ,
static_cast < double > ( new_uncomp _bytes_compressed ) ,
std : : memory_order_relaxed ) ;
raw_bytes_compressed = new_raw _bytes_compressed;
uncomp_bytes_compressed = new_uncomp _bytes_compressed;
uint64_t new_raw _bytes_inflight =
raw _bytes_inflight. fetch_sub ( raw _bytes_curr_block,
std : : memory_order_relaxed ) -
raw _bytes_curr_block;
uint64_t new_uncomp _bytes_inflight =
uncomp _bytes_inflight. fetch_sub ( uncomp _bytes_curr_block,
std : : memory_order_relaxed ) -
uncomp _bytes_curr_block;
uint64_t new_blocks_inflight =
blocks_inflight . fetch_sub ( 1 , std : : memory_order_relaxed ) - 1 ;
@ -721,12 +725,12 @@ struct BlockBasedTableBuilder::ParallelCompressionRep {
estimated_file_size . store (
curr_file_size +
static_cast < uint64_t > (
static_cast < double > ( new_raw _bytes_inflight ) *
static_cast < double > ( new_uncomp _bytes_inflight ) *
curr_compression_ratio . load ( std : : memory_order_relaxed ) ) +
new_blocks_inflight * kBlockTrailerSize ,
std : : memory_order_relaxed ) ;
raw _bytes_curr_block_set = false ;
uncomp _bytes_curr_block_set = false ;
}
void SetEstimatedFileSize ( uint64_t size ) {
@ -737,24 +741,24 @@ struct BlockBasedTableBuilder::ParallelCompressionRep {
return estimated_file_size . load ( std : : memory_order_relaxed ) ;
}
void SetCurrBlockRaw Size ( uint64_t size ) {
raw _bytes_curr_block = size ;
raw _bytes_curr_block_set = true ;
void SetCurrBlockUncomp Size ( uint64_t size ) {
uncomp _bytes_curr_block = size ;
uncomp _bytes_curr_block_set = true ;
}
private :
// Raw bytes compressed so far.
uint64_t raw _bytes_compressed;
// Input bytes compressed so far.
uint64_t uncomp _bytes_compressed;
// Size of current block being appended.
uint64_t raw _bytes_curr_block;
// Whether raw _bytes_curr_block has been set for next
uint64_t uncomp _bytes_curr_block;
// Whether uncomp _bytes_curr_block has been set for next
// ReapBlock call.
bool raw _bytes_curr_block_set;
// Raw bytes under compression and not appended yet.
std : : atomic < uint64_t > raw _bytes_inflight;
bool uncomp _bytes_curr_block_set;
// Input bytes under compression and not appended yet.
std : : atomic < uint64_t > uncomp _bytes_inflight;
// Number of blocks under compression and not appended yet.
std : : atomic < uint64_t > blocks_inflight ;
// Current compression ratio, maintained by BGWorkWriteRaw Block.
// Current compression ratio, maintained by BGWorkWriteMaybeCompressed Block.
std : : atomic < double > curr_compression_ratio ;
// Estimated SST file size.
std : : atomic < uint64_t > estimated_file_size ;
@ -1040,19 +1044,19 @@ void BlockBasedTableBuilder::WriteBlock(BlockBuilder* block,
BlockHandle * handle ,
BlockType block_type ) {
block - > Finish ( ) ;
std : : string raw_block_contents ;
raw_block_contents . reserve ( rep_ - > table_options . block_size ) ;
block - > SwapAndReset ( raw_block_contents ) ;
std : : string uncompressed_block_data ;
uncompressed_block_data . reserve ( rep_ - > table_options . block_size ) ;
block - > SwapAndReset ( uncompressed_block_data ) ;
if ( rep_ - > state = = Rep : : State : : kBuffered ) {
assert ( block_type = = BlockType : : kData ) ;
rep_ - > data_block_buffers . emplace_back ( std : : move ( raw_block_contents ) ) ;
rep_ - > data_block_buffers . emplace_back ( std : : move ( uncompressed_block_data ) ) ;
rep_ - > data_begin_offset + = rep_ - > data_block_buffers . back ( ) . size ( ) ;
return ;
}
WriteBlock ( raw_block_contents , handle , block_type ) ;
WriteBlock ( uncompressed_block_data , handle , block_type ) ;
}
void BlockBasedTableBuilder : : WriteBlock ( const Slice & raw_block_contents ,
void BlockBasedTableBuilder : : WriteBlock ( const Slice & uncompressed_block_data ,
BlockHandle * handle ,
BlockType block_type ) {
Rep * r = rep_ ;
@ -1061,7 +1065,7 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
CompressionType type ;
Status compress_status ;
bool is_data_block = block_type = = BlockType : : kData ;
CompressAndVerifyBlock ( raw_block_contents , is_data_block ,
CompressAndVerifyBlock ( uncompressed_block_data , is_data_block ,
* ( r - > compression_ctxs [ 0 ] ) , r - > verify_ctxs [ 0 ] . get ( ) ,
& ( r - > compressed_output ) , & ( block_contents ) , & type ,
& compress_status ) ;
@ -1070,7 +1074,8 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
return ;
}
WriteRawBlock ( block_contents , type , handle , block_type , & raw_block_contents ) ;
WriteMaybeCompressedBlock ( block_contents , type , handle , block_type ,
& uncompressed_block_data ) ;
r - > compressed_output . clear ( ) ;
if ( is_data_block ) {
r - > props . data_size = r - > get_offset ( ) ;
@ -1094,7 +1099,7 @@ void BlockBasedTableBuilder::BGWorkCompression(
}
void BlockBasedTableBuilder : : CompressAndVerifyBlock (
const Slice & raw_block_contents , bool is_data_block ,
const Slice & uncompressed_block_data , bool is_data_block ,
const CompressionContext & compression_ctx , UncompressionContext * verify_ctx ,
std : : string * compressed_output , Slice * block_contents ,
CompressionType * type , Status * out_status ) {
@ -1116,9 +1121,9 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock(
r - > ioptions . clock ,
ShouldReportDetailedTime ( r - > ioptions . env , r - > ioptions . stats ) ) ;
if ( is_status_ok & & raw_block_contents . size ( ) < kCompressionSizeLimit ) {
if ( is_status_ok & & uncompressed_block_data . size ( ) < kCompressionSizeLimit ) {
if ( is_data_block ) {
r - > compressible_input_data_bytes . fetch_add ( raw_block_contents . size ( ) ,
r - > compressible_input_data_bytes . fetch_add ( uncompressed_block_data . size ( ) ,
std : : memory_order_relaxed ) ;
}
const CompressionDict * compression_dict ;
@ -1135,14 +1140,14 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock(
std : : string sampled_output_fast ;
std : : string sampled_output_slow ;
* block_contents = CompressBlock (
raw_block_contents , compression_info , type ,
uncompressed_block_data , compression_info , type ,
r - > table_options . format_version , is_data_block /* do_sample */ ,
compressed_output , & sampled_output_fast , & sampled_output_slow ) ;
if ( sampled_output_slow . size ( ) > 0 | | sampled_output_fast . size ( ) > 0 ) {
// Currently compression sampling is only enabled for data block.
assert ( is_data_block ) ;
r - > sampled_input_data_bytes . fetch_add ( raw_block_contents . size ( ) ,
r - > sampled_input_data_bytes . fetch_add ( uncompressed_block_data . size ( ) ,
std : : memory_order_relaxed ) ;
r - > sampled_output_slow_data_bytes . fetch_add ( sampled_output_slow . size ( ) ,
std : : memory_order_relaxed ) ;
@ -1151,7 +1156,7 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock(
}
// notify collectors on block add
NotifyCollectTableCollectorsOnBlockAdd (
r - > table_properties_collectors , raw_block_contents . size ( ) ,
r - > table_properties_collectors , uncompressed_block_data . size ( ) ,
sampled_output_fast . size ( ) , sampled_output_slow . size ( ) ) ;
// Some of the compression algorithms are known to be unreliable. If
@ -1169,19 +1174,20 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock(
BlockContents contents ;
UncompressionInfo uncompression_info ( * verify_ctx , * verify_dict ,
r - > compression_type ) ;
Status stat = UncompressBlockContentsForCompressionType (
Status stat = UncompressBlockData (
uncompression_info , block_contents - > data ( ) , block_contents - > size ( ) ,
& contents , r - > table_options . format_version , r - > ioptions ) ;
if ( stat . ok ( ) ) {
bool compressed_ok = contents . data . compare ( raw_block_contents ) = = 0 ;
bool compressed_ok =
contents . data . compare ( uncompressed_block_data ) = = 0 ;
if ( ! compressed_ok ) {
// The result of the compression was invalid. abort.
abort_compression = true ;
ROCKS_LOG_ERROR ( r - > ioptions . logger ,
" Decompressed block did not match raw block " ) ;
* out_status =
Status : : Corruption ( " Decompressed block did not match raw block " ) ;
const char * const msg =
" Decompressed block did not match pre-compression block " ;
ROCKS_LOG_ERROR ( r - > ioptions . logger , " %s " , msg ) ;
* out_status = Status : : Corruption ( msg ) ;
}
} else {
// Decompression reported an error. abort.
@ -1193,8 +1199,8 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock(
} else {
// Block is too big to be compressed.
if ( is_data_block ) {
r - > uncompressible_input_data_bytes . fetch_add ( raw_block_contents . size ( ) ,
std : : memory_order_relaxed ) ;
r - > uncompressible_input_data_bytes . fetch_add (
uncompressed_block_data . size ( ) , std : : memory_order_relaxed ) ;
}
abort_compression = true ;
}
@ -1208,27 +1214,26 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock(
if ( abort_compression ) {
RecordTick ( r - > ioptions . stats , NUMBER_BLOCK_NOT_COMPRESSED ) ;
* type = kNoCompression ;
* block_contents = raw_block_contents ;
* block_contents = uncompressed_block_data ;
} else if ( * type ! = kNoCompression ) {
if ( ShouldReportDetailedTime ( r - > ioptions . env , r - > ioptions . stats ) ) {
RecordTimeToHistogram ( r - > ioptions . stats , COMPRESSION_TIMES_NANOS ,
timer . ElapsedNanos ( ) ) ;
}
RecordInHistogram ( r - > ioptions . stats , BYTES_COMPRESSED ,
raw_block_contents . size ( ) ) ;
uncompressed_block_data . size ( ) ) ;
RecordTick ( r - > ioptions . stats , NUMBER_BLOCK_COMPRESSED ) ;
} else if ( * type ! = r - > compression_type ) {
RecordTick ( r - > ioptions . stats , NUMBER_BLOCK_NOT_COMPRESSED ) ;
}
}
void BlockBasedTableBuilder : : WriteRawBlock ( const Slice & block_contents ,
CompressionType type ,
BlockHandle * handle ,
BlockType block_type ,
const Slice * raw_block_contents ) {
void BlockBasedTableBuilder : : WriteMaybeCompressedBlock (
const Slice & block_contents , CompressionType type , BlockHandle * handle ,
BlockType block_type , const Slice * uncompressed_block_data ) {
Rep * r = rep_ ;
bool is_data_block = block_type = = BlockType : : kData ;
// Old, misleading name of this function: WriteRawBlock
StopWatch sw ( r - > ioptions . clock , r - > ioptions . stats , WRITE_RAW_BLOCK_MICROS ) ;
handle - > set_offset ( r - > get_offset ( ) ) ;
handle - > set_size ( block_contents . size ( ) ) ;
@ -1259,7 +1264,7 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
EncodeFixed32 ( trailer . data ( ) + 1 , checksum ) ;
TEST_SYNC_POINT_CALLBACK (
" BlockBasedTableBuilder::WriteRaw Block:TamperWithChecksum " ,
" BlockBasedTableBuilder::WriteMaybeCompressed Block:TamperWithChecksum " ,
trailer . data ( ) ) ;
{
IOStatus io_s = r - > file - > Append ( Slice ( trailer . data ( ) , trailer . size ( ) ) ) ;
@ -1287,8 +1292,9 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
if ( warm_cache ) {
if ( type = = kNoCompression ) {
s = InsertBlockInCacheHelper ( block_contents , handle , block_type ) ;
} else if ( raw_block_contents ! = nullptr ) {
s = InsertBlockInCacheHelper ( * raw_block_contents , handle , block_type ) ;
} else if ( uncompressed_block_data ! = nullptr ) {
s = InsertBlockInCacheHelper ( * uncompressed_block_data , handle ,
block_type ) ;
}
if ( ! s . ok ( ) ) {
r - > SetStatus ( s ) ;
@ -1327,7 +1333,7 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
}
}
void BlockBasedTableBuilder : : BGWorkWriteRaw Block ( ) {
void BlockBasedTableBuilder : : BGWorkWriteMaybeCompressed Block ( ) {
Rep * r = rep_ ;
ParallelCompressionRep : : BlockRepSlot * slot = nullptr ;
ParallelCompressionRep : : BlockRep * block_rep = nullptr ;
@ -1354,9 +1360,11 @@ void BlockBasedTableBuilder::BGWorkWriteRawBlock() {
r - > index_builder - > OnKeyAdded ( key ) ;
}
r - > pc_rep - > file_size_estimator . SetCurrBlockRawSize ( block_rep - > data - > size ( ) ) ;
WriteRawBlock ( block_rep - > compressed_contents , block_rep - > compression_type ,
& r - > pending_handle , BlockType : : kData , & block_rep - > contents ) ;
r - > pc_rep - > file_size_estimator . SetCurrBlockUncompSize (
block_rep - > data - > size ( ) ) ;
WriteMaybeCompressedBlock ( block_rep - > compressed_contents ,
block_rep - > compression_type , & r - > pending_handle ,
BlockType : : kData , & block_rep - > contents ) ;
if ( ! ok ( ) ) {
break ;
}
@ -1391,7 +1399,7 @@ void BlockBasedTableBuilder::StartParallelCompression() {
} ) ;
}
rep_ - > pc_rep - > write_thread . reset (
new port : : Thread ( [ this ] { BGWorkWriteRaw Block ( ) ; } ) ) ;
new port : : Thread ( [ this ] { BGWorkWriteMaybeCompressed Block ( ) ; } ) ) ;
}
void BlockBasedTableBuilder : : StopParallelCompression ( ) {
@ -1438,7 +1446,7 @@ Status BlockBasedTableBuilder::InsertBlockInCompressedCache(
BlockContents * block_contents_to_cache =
new BlockContents ( std : : move ( ubuf ) , size ) ;
# ifndef NDEBUG
block_contents_to_cache - > is_raw_block = true ;
block_contents_to_cache - > has_trailer = true ;
# endif // NDEBUG
CacheKey key = BlockBasedTable : : GetCacheKey ( rep_ - > base_cache_key , * handle ) ;
@ -1567,8 +1575,8 @@ void BlockBasedTableBuilder::WriteFilterBlock(
BlockType btype = is_partitioned_filter & & /* last */ s . ok ( )
? BlockType : : kFilterPartitionIndex
: BlockType : : kFilter ;
WriteRaw Block ( filter_content , kNoCompression , & filter_block_handle , btype ,
nullptr /*raw_contents*/ ) ;
WriteMaybeCompressed Block ( filter_content , kNoCompression ,
& filter_block_handle , btype ) ;
}
rep_ - > filter_builder - > ResetFilterBitsBuilder ( ) ;
}
@ -1613,8 +1621,9 @@ void BlockBasedTableBuilder::WriteIndexBlock(
WriteBlock ( index_blocks . index_block_contents , index_block_handle ,
BlockType : : kIndex ) ;
} else {
WriteRawBlock ( index_blocks . index_block_contents , kNoCompression ,
index_block_handle , BlockType : : kIndex ) ;
WriteMaybeCompressedBlock ( index_blocks . index_block_contents ,
kNoCompression , index_block_handle ,
BlockType : : kIndex ) ;
}
}
// If there are more index partitions, finish them and write them out
@ -1638,8 +1647,9 @@ void BlockBasedTableBuilder::WriteIndexBlock(
WriteBlock ( index_blocks . index_block_contents , index_block_handle ,
BlockType : : kIndex ) ;
} else {
WriteRawBlock ( index_blocks . index_block_contents , kNoCompression ,
index_block_handle , BlockType : : kIndex ) ;
WriteMaybeCompressedBlock ( index_blocks . index_block_contents ,
kNoCompression , index_block_handle ,
BlockType : : kIndex ) ;
}
// The last index_block_handle will be for the partition index block
}
@ -1727,8 +1737,8 @@ void BlockBasedTableBuilder::WritePropertiesBlock(
Slice block_data = property_block_builder . Finish ( ) ;
TEST_SYNC_POINT_CALLBACK (
" BlockBasedTableBuilder::WritePropertiesBlock:BlockData " , & block_data ) ;
WriteRaw Block ( block_data , kNoCompression , & properties_block_handle ,
BlockType : : kProperties ) ;
WriteMaybeCompressed Block ( block_data , kNoCompression ,
& properties_block_handle , BlockType : : kProperties ) ;
}
if ( ok ( ) ) {
# ifndef NDEBUG
@ -1758,9 +1768,9 @@ void BlockBasedTableBuilder::WriteCompressionDictBlock(
rep_ - > compression_dict - > GetRawDict ( ) . size ( ) ) {
BlockHandle compression_dict_block_handle ;
if ( ok ( ) ) {
WriteRaw Block ( rep_ - > compression_dict - > GetRawDict ( ) , kNoCompression ,
& compression_dict_block_handle ,
BlockType : : kCompressionDictionary ) ;
WriteMaybeCompressed Block ( rep_ - > compression_dict - > GetRawDict ( ) ,
kNoCompression , & compression_dict_block_handle ,
BlockType : : kCompressionDictionary ) ;
# ifndef NDEBUG
Slice compression_dict = rep_ - > compression_dict - > GetRawDict ( ) ;
TEST_SYNC_POINT_CALLBACK (
@ -1779,8 +1789,9 @@ void BlockBasedTableBuilder::WriteRangeDelBlock(
MetaIndexBuilder * meta_index_builder ) {
if ( ok ( ) & & ! rep_ - > range_del_block . empty ( ) ) {
BlockHandle range_del_block_handle ;
WriteRawBlock ( rep_ - > range_del_block . Finish ( ) , kNoCompression ,
& range_del_block_handle , BlockType : : kRangeDeletion ) ;
WriteMaybeCompressedBlock ( rep_ - > range_del_block . Finish ( ) , kNoCompression ,
& range_del_block_handle ,
BlockType : : kRangeDeletion ) ;
meta_index_builder - > Add ( kRangeDelBlockName , range_del_block_handle ) ;
}
}
@ -2001,8 +2012,8 @@ Status BlockBasedTableBuilder::Finish() {
WritePropertiesBlock ( & meta_index_builder ) ;
if ( ok ( ) ) {
// flush the meta index block
WriteRaw Block ( meta_index_builder . Finish ( ) , kNoCompression ,
& metaindex_block_handle , BlockType : : kMetaIndex ) ;
WriteMaybeCompressed Block ( meta_index_builder . Finish ( ) , kNoCompression ,
& metaindex_block_handle , BlockType : : kMetaIndex ) ;
}
if ( ok ( ) ) {
WriteFooter ( metaindex_block_handle , index_block_handle ) ;
@ -2036,7 +2047,7 @@ uint64_t BlockBasedTableBuilder::FileSize() const { return rep_->offset; }
uint64_t BlockBasedTableBuilder : : EstimatedFileSize ( ) const {
if ( rep_ - > IsParallelCompressionEnabled ( ) ) {
// Use compression ratio so far and inflight raw bytes to estimate
// Use compression ratio so far and inflight uncompressed bytes to estimate
// final SST size.
return rep_ - > pc_rep - > file_size_estimator . GetEstimatedFileSize ( ) ;
} else {