From fb6456b00d2f8b22bf538fd14a37632c53289524 Mon Sep 17 00:00:00 2001 From: Torrie Fischer Date: Fri, 15 Aug 2014 15:05:09 -0700 Subject: [PATCH] Replace naked calls to operator new and delete (Fixes #222) This replaces a mishmash of pointers in the Block and BlockContents classes with std::unique_ptr. It also changes the semantics of BlockContents to be limited to use as a constructor parameter for Block objects, as it owns any block buffers handed to it. --- table/block.cc | 11 +- table/block.h | 15 +-- table/block_based_filter_block.cc | 13 ++- table/block_based_filter_block.h | 8 +- table/block_based_table_builder.cc | 14 +-- table/block_based_table_reader.cc | 40 ++----- table/block_test.cc | 6 +- table/filter_block.h | 5 + table/format.cc | 167 +++++++++-------------------- table/format.h | 31 ++++-- table/full_filter_block.cc | 13 ++- table/full_filter_block.h | 8 +- table/meta_blocks.cc | 44 ++++---- table/plain_table_reader.cc | 1 + table/table_test.cc | 3 +- 15 files changed, 153 insertions(+), 226 deletions(-) diff --git a/table/block.cc b/table/block.cc index 0db23a1bd..1a1accb2f 100644 --- a/table/block.cc +++ b/table/block.cc @@ -299,10 +299,7 @@ uint32_t Block::NumRestarts() const { Block::Block(const BlockContents& contents) : data_(contents.data.data()), - size_(contents.data.size()), - owned_(contents.heap_allocated), - cachable_(contents.cachable), - compression_type_(contents.compression_type) { + size_(contents.data.size()) { if (size_ < sizeof(uint32_t)) { size_ = 0; // Error marker } else { @@ -315,10 +312,8 @@ Block::Block(const BlockContents& contents) } } -Block::~Block() { - if (owned_) { - delete[] data_; - } +Block::Block(BlockContents&& contents) : Block(contents) { + contents_ = std::move(contents); } Iterator* Block::NewIterator( diff --git a/table/block.h b/table/block.h index 49bcf12cf..21dacc395 100644 --- a/table/block.h +++ b/table/block.h @@ -14,6 +14,10 @@ #include "rocksdb/iterator.h" #include "rocksdb/options.h" #include "db/dbformat.h" +#include "table/block_prefix_index.h" +#include "table/block_hash_index.h" + +#include "format.h" namespace rocksdb { @@ -26,15 +30,16 @@ class BlockPrefixIndex; class Block { public: // Initialize the block with the specified contents. + explicit Block(BlockContents&& contents); explicit Block(const BlockContents& contents); - ~Block(); + ~Block() = default; size_t size() const { return size_; } const char* data() const { return data_; } - bool cachable() const { return cachable_; } + bool cachable() const { return contents_.cachable; } uint32_t NumRestarts() const; - CompressionType compression_type() const { return compression_type_; } + CompressionType compression_type() const { return contents_.compression_type; } // If hash index lookup is enabled and `use_hash_index` is true. This block // will do hash lookup for the key prefix. @@ -58,12 +63,10 @@ class Block { size_t ApproximateMemoryUsage() const; private: + BlockContents contents_; const char* data_; size_t size_; uint32_t restart_offset_; // Offset in data_ of restart array - bool owned_; // Block owns data_[] - bool cachable_; - CompressionType compression_type_; std::unique_ptr hash_index_; std::unique_ptr prefix_index_; diff --git a/table/block_based_filter_block.cc b/table/block_based_filter_block.cc index c2c34c628..bed605a68 100644 --- a/table/block_based_filter_block.cc +++ b/table/block_based_filter_block.cc @@ -138,7 +138,7 @@ void BlockBasedFilterBlockBuilder::GenerateFilter() { BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( const SliceTransform* prefix_extractor, const BlockBasedTableOptions& table_opt, - const Slice& contents, bool delete_contents_after_use) + const Slice& contents) : policy_(table_opt.filter_policy.get()), prefix_extractor_(prefix_extractor), whole_key_filtering_(table_opt.whole_key_filtering), @@ -155,9 +155,14 @@ BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( data_ = contents.data(); offset_ = data_ + last_word; num_ = (n - 5 - last_word) / 4; - if (delete_contents_after_use) { - filter_data.reset(contents.data()); - } +} + +BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( + const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt, + BlockContents &&contents) + : BlockBasedFilterBlockReader (prefix_extractor, table_opt, contents.data) { + contents_ = std::move(contents); } bool BlockBasedFilterBlockReader::KeyMayMatch(const Slice& key, diff --git a/table/block_based_filter_block.h b/table/block_based_filter_block.h index 9bbc93531..856b88910 100644 --- a/table/block_based_filter_block.h +++ b/table/block_based_filter_block.h @@ -74,8 +74,10 @@ class BlockBasedFilterBlockReader : public FilterBlockReader { // REQUIRES: "contents" and *policy must stay live while *this is live. BlockBasedFilterBlockReader(const SliceTransform* prefix_extractor, const BlockBasedTableOptions& table_opt, - const Slice& contents, - bool delete_contents_after_use = false); + const Slice& contents); + BlockBasedFilterBlockReader(const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt, + BlockContents&& contents); virtual bool IsBlockBased() override { return true; } virtual bool KeyMayMatch(const Slice& key, uint64_t block_offset = kNotValid) override; @@ -91,7 +93,7 @@ class BlockBasedFilterBlockReader : public FilterBlockReader { const char* offset_; // Pointer to beginning of offset array (at block-end) size_t num_; // Number of entries in offset array size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file) - std::unique_ptr filter_data; + BlockContents contents_; bool MayMatch(const Slice& entry, uint64_t block_offset); diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index 7fb662d88..eb32e9942 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include "db/dbformat.h" @@ -634,18 +635,13 @@ Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents, Cache::Handle* cache_handle = nullptr; size_t size = block_contents.size(); - char* ubuf = new char[size + 1]; // make a new copy - memcpy(ubuf, block_contents.data(), size); + std::unique_ptr ubuf(new char[size+1]); + memcpy(ubuf.get(), block_contents.data(), size); ubuf[size] = type; - BlockContents results; - Slice sl(ubuf, size); - results.data = sl; - results.cachable = true; // XXX - results.heap_allocated = true; - results.compression_type = type; + BlockContents results(std::move(ubuf), size, true, type); - Block* block = new Block(results); + Block* block = new Block(std::move(results)); // make cache key by appending the file offset to the cache prefix id char* end = EncodeVarint64( diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index b38f88588..1b41085af 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -66,7 +66,7 @@ Status ReadBlockFromFile(RandomAccessFile* file, const Footer& footer, Status s = ReadBlockContents(file, footer, options, handle, &contents, env, do_uncompress); if (s.ok()) { - *result = new Block(contents); + *result = new Block(std::move(contents)); } return s; @@ -252,9 +252,6 @@ class HashIndexReader : public IndexReader { &prefixes_meta_contents, env, true /* do decompression */); if (!s.ok()) { - if (prefixes_contents.heap_allocated) { - delete[] prefixes_contents.data.data(); - } // TODO: log error return Status::OK(); } @@ -269,7 +266,7 @@ class HashIndexReader : public IndexReader { // TODO: log error if (s.ok()) { new_index_reader->index_block_->SetBlockHashIndex(hash_index); - new_index_reader->OwnPrefixesContents(prefixes_contents); + new_index_reader->OwnPrefixesContents(std::move(prefixes_contents)); } } else { BlockPrefixIndex* prefix_index = nullptr; @@ -283,18 +280,6 @@ class HashIndexReader : public IndexReader { } } - // Always release prefix meta block - if (prefixes_meta_contents.heap_allocated) { - delete[] prefixes_meta_contents.data.data(); - } - - // Release prefix content block if we don't own it. - if (!new_index_reader->own_prefixes_contents_) { - if (prefixes_contents.heap_allocated) { - delete[] prefixes_contents.data.data(); - } - } - return Status::OK(); } @@ -314,24 +299,18 @@ class HashIndexReader : public IndexReader { private: HashIndexReader(const Comparator* comparator, Block* index_block) : IndexReader(comparator), - index_block_(index_block), - own_prefixes_contents_(false) { + index_block_(index_block) { assert(index_block_ != nullptr); } ~HashIndexReader() { - if (own_prefixes_contents_ && prefixes_contents_.heap_allocated) { - delete[] prefixes_contents_.data.data(); - } } - void OwnPrefixesContents(const BlockContents& prefixes_contents) { - prefixes_contents_ = prefixes_contents; - own_prefixes_contents_ = true; + void OwnPrefixesContents(BlockContents&& prefixes_contents) { + prefixes_contents_ = std::move(prefixes_contents); } std::unique_ptr index_block_; - bool own_prefixes_contents_; BlockContents prefixes_contents_; }; @@ -677,7 +656,7 @@ Status BlockBasedTable::GetDataBlockFromCache( // Insert uncompressed block into block cache if (s.ok()) { - block->value = new Block(contents); // uncompressed block + block->value = new Block(std::move(contents)); // uncompressed block assert(block->value->compression_type() == kNoCompression); if (block_cache != nullptr && block->value->cachable() && read_options.fill_cache) { @@ -715,7 +694,7 @@ Status BlockBasedTable::PutDataBlockToCache( } if (raw_block->compression_type() != kNoCompression) { - block->value = new Block(contents); // uncompressed block + block->value = new Block(std::move(contents)); // uncompressed block } else { block->value = raw_block; raw_block = nullptr; @@ -768,15 +747,14 @@ FilterBlockReader* BlockBasedTable::ReadFilter( assert(rep->filter_policy); if (kFilterBlockPrefix == filter_block_prefix) { return new BlockBasedFilterBlockReader(rep->ioptions.prefix_extractor, - rep->table_options, block.data, block.heap_allocated); + rep->table_options, std::move(block)); } else if (kFullFilterBlockPrefix == filter_block_prefix) { auto filter_bits_reader = rep->filter_policy-> GetFilterBitsReader(block.data); if (filter_bits_reader != nullptr) { return new FullFilterBlockReader(rep->ioptions.prefix_extractor, - rep->table_options, block.data, filter_bits_reader, - block.heap_allocated); + rep->table_options, std::move(block), filter_bits_reader); } } return nullptr; diff --git a/table/block_test.cc b/table/block_test.cc index b36787f8f..c341617a7 100644 --- a/table/block_test.cc +++ b/table/block_test.cc @@ -92,8 +92,7 @@ TEST(BlockTest, SimpleTest) { BlockContents contents; contents.data = rawblock; contents.cachable = false; - contents.heap_allocated = false; - Block reader(contents); + Block reader(std::move(contents)); // read contents of block sequentially int count = 0; @@ -143,12 +142,11 @@ BlockContents GetBlockContents(std::unique_ptr *builder, BlockContents contents; contents.data = rawblock; contents.cachable = false; - contents.heap_allocated = false; return contents; } -void CheckBlockContents(BlockContents contents, const int max_key, +void CheckBlockContents(const BlockContents &contents, const int max_key, const std::vector &keys, const std::vector &values) { const size_t prefix_size = 6; diff --git a/table/filter_block.h b/table/filter_block.h index adbb7c496..197676827 100644 --- a/table/filter_block.h +++ b/table/filter_block.h @@ -18,17 +18,22 @@ #pragma once +#include #include #include #include #include #include "rocksdb/options.h" #include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" #include "rocksdb/table.h" +#include "util/hash.h" +#include "format.h" namespace rocksdb { const uint64_t kNotValid = ULLONG_MAX; +class FilterPolicy; // A FilterBlockBuilder is used to construct all of the filters for a // particular Table. It generates a single string which is stored as diff --git a/table/format.cc b/table/format.cc index 70cc6eb83..255e1e834 100644 --- a/table/format.cc +++ b/table/format.cc @@ -255,112 +255,53 @@ Status ReadBlock(RandomAccessFile* file, const Footer& footer, return s; } -// Decompress a block according to params -// May need to malloc a space for cache usage -Status DecompressBlock(BlockContents* result, size_t block_size, - bool do_uncompress, const char* buf, - const Slice& contents, bool use_stack_buf) { - Status s; - size_t n = block_size; - const char* data = contents.data(); - - result->data = Slice(); - result->cachable = false; - result->heap_allocated = false; - - PERF_TIMER_GUARD(block_decompress_time); - rocksdb::CompressionType compression_type = - static_cast(data[n]); - // If the caller has requested that the block not be uncompressed - if (!do_uncompress || compression_type == kNoCompression) { - if (data != buf) { - // File implementation gave us pointer to some other data. - // Use it directly under the assumption that it will be live - // while the file is open. - result->data = Slice(data, n); - result->heap_allocated = false; - result->cachable = false; // Do not double-cache - } else { - if (use_stack_buf) { - // Need to allocate space in heap for cache usage - char* new_buf = new char[n]; - memcpy(new_buf, buf, n); - result->data = Slice(new_buf, n); - } else { - result->data = Slice(buf, n); - } - - result->heap_allocated = true; - result->cachable = true; - } - result->compression_type = compression_type; - s = Status::OK(); +Status ReadBlockContents(RandomAccessFile* file, const Footer& footer, + const ReadOptions& options, const BlockHandle& handle, + BlockContents *contents, Env* env, + bool decompression_requested) { + Status status; + Slice slice; + size_t n = static_cast(handle.size()); + std::unique_ptr heap_buf; + char stack_buf[DefaultStackBufferSize]; + char *used_buf = nullptr; + rocksdb::CompressionType compression_type; + + if (decompression_requested && n + kBlockTrailerSize < DefaultStackBufferSize) { + //If we've got a small enough hunk of data, read it in to the + //trivially allocated stack buffer instead of needing a full malloc() + used_buf = &stack_buf[0]; } else { - s = UncompressBlockContents(data, n, result); + heap_buf = std::unique_ptr(new char[n + kBlockTrailerSize]); + used_buf = heap_buf.get(); } - return s; -} -// Read and Decompress block -// Use buf in stack as temp reading buffer -Status ReadAndDecompressFast(RandomAccessFile* file, const Footer& footer, - const ReadOptions& options, - const BlockHandle& handle, BlockContents* result, - Env* env, bool do_uncompress) { - Status s; - Slice contents; - size_t n = static_cast(handle.size()); - char buf[DefaultStackBufferSize]; + status = ReadBlock(file, footer, options, handle, &slice, used_buf); - s = ReadBlock(file, footer, options, handle, &contents, buf); - if (!s.ok()) { - return s; - } - s = DecompressBlock(result, n, do_uncompress, buf, contents, true); - if (!s.ok()) { - return s; + if (!status.ok()) { + return status; } - return s; -} -// Read and Decompress block -// Use buf in heap as temp reading buffer -Status ReadAndDecompress(RandomAccessFile* file, const Footer& footer, - const ReadOptions& options, const BlockHandle& handle, - BlockContents* result, Env* env, bool do_uncompress) { - Status s; - Slice contents; - size_t n = static_cast(handle.size()); - char* buf = new char[n + kBlockTrailerSize]; + PERF_TIMER_GUARD(block_decompress_time); - s = ReadBlock(file, footer, options, handle, &contents, buf); - if (!s.ok()) { - delete[] buf; - return s; - } - s = DecompressBlock(result, n, do_uncompress, buf, contents, false); - if (!s.ok()) { - delete[] buf; - return s; + compression_type = static_cast(slice.data()[n]); + + if (decompression_requested && compression_type != kNoCompression) { + return UncompressBlockContents(slice.data(), n, contents); } - if (result->data.data() != buf) { - delete[] buf; + if (slice.data() != used_buf) { + *contents = BlockContents(Slice(slice.data(), n), false, compression_type); + return status; } - return s; -} -Status ReadBlockContents(RandomAccessFile* file, const Footer& footer, - const ReadOptions& options, const BlockHandle& handle, - BlockContents* result, Env* env, bool do_uncompress) { - size_t n = static_cast(handle.size()); - if (do_uncompress && n + kBlockTrailerSize < DefaultStackBufferSize) { - return ReadAndDecompressFast(file, footer, options, handle, result, env, - do_uncompress); - } else { - return ReadAndDecompress(file, footer, options, handle, result, env, - do_uncompress); + if (used_buf == &stack_buf[0]) { + heap_buf = std::unique_ptr(new char[n]); + memcpy(heap_buf.get(), stack_buf, n); } + + *contents = BlockContents(std::move(heap_buf), n, true, compression_type); + return status; } // @@ -370,8 +311,8 @@ Status ReadBlockContents(RandomAccessFile* file, const Footer& footer, // buffer is returned via 'result' and it is upto the caller to // free this buffer. Status UncompressBlockContents(const char* data, size_t n, - BlockContents* result) { - char* ubuf = nullptr; + BlockContents* contents) { + std::unique_ptr ubuf; int decompress_size = 0; assert(data[n] != kNoCompression); switch (data[n]) { @@ -382,64 +323,52 @@ Status UncompressBlockContents(const char* data, size_t n, if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) { return Status::Corruption(snappy_corrupt_msg); } - ubuf = new char[ulength]; - if (!port::Snappy_Uncompress(data, n, ubuf)) { - delete[] ubuf; + ubuf = std::unique_ptr(new char[ulength]); + if (!port::Snappy_Uncompress(data, n, ubuf.get())) { return Status::Corruption(snappy_corrupt_msg); } - result->data = Slice(ubuf, ulength); - result->heap_allocated = true; - result->cachable = true; + *contents = BlockContents(std::move(ubuf), ulength, true, kNoCompression); break; } case kZlibCompression: - ubuf = port::Zlib_Uncompress(data, n, &decompress_size); + ubuf = std::unique_ptr(port::Zlib_Uncompress(data, n, &decompress_size)); static char zlib_corrupt_msg[] = "Zlib not supported or corrupted Zlib compressed block contents"; if (!ubuf) { return Status::Corruption(zlib_corrupt_msg); } - result->data = Slice(ubuf, decompress_size); - result->heap_allocated = true; - result->cachable = true; + *contents = BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); break; case kBZip2Compression: - ubuf = port::BZip2_Uncompress(data, n, &decompress_size); + ubuf = std::unique_ptr(port::BZip2_Uncompress(data, n, &decompress_size)); static char bzip2_corrupt_msg[] = "Bzip2 not supported or corrupted Bzip2 compressed block contents"; if (!ubuf) { return Status::Corruption(bzip2_corrupt_msg); } - result->data = Slice(ubuf, decompress_size); - result->heap_allocated = true; - result->cachable = true; + *contents = BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); break; case kLZ4Compression: - ubuf = port::LZ4_Uncompress(data, n, &decompress_size); + ubuf = std::unique_ptr(port::LZ4_Uncompress(data, n, &decompress_size)); static char lz4_corrupt_msg[] = "LZ4 not supported or corrupted LZ4 compressed block contents"; if (!ubuf) { return Status::Corruption(lz4_corrupt_msg); } - result->data = Slice(ubuf, decompress_size); - result->heap_allocated = true; - result->cachable = true; + *contents = BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); break; case kLZ4HCCompression: - ubuf = port::LZ4_Uncompress(data, n, &decompress_size); + ubuf = std::unique_ptr(port::LZ4_Uncompress(data, n, &decompress_size)); static char lz4hc_corrupt_msg[] = "LZ4HC not supported or corrupted LZ4HC compressed block contents"; if (!ubuf) { return Status::Corruption(lz4hc_corrupt_msg); } - result->data = Slice(ubuf, decompress_size); - result->heap_allocated = true; - result->cachable = true; + *contents = BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); break; default: return Status::Corruption("bad block type"); } - result->compression_type = kNoCompression; // not compressed any more return Status::OK(); } diff --git a/table/format.h b/table/format.h index a971c1a67..9f5d6ce89 100644 --- a/table/format.h +++ b/table/format.h @@ -160,28 +160,39 @@ static const size_t kBlockTrailerSize = 5; struct BlockContents { Slice data; // Actual contents of data bool cachable; // True iff data can be cached - bool heap_allocated; // True iff caller should delete[] data.data() CompressionType compression_type; + std::unique_ptr allocation; + + BlockContents() + : cachable(false), + compression_type(kNoCompression) {} + + BlockContents(const Slice &_data, bool _cachable, CompressionType _compression_type) + : data(_data), + cachable(_cachable), + compression_type(_compression_type) {} + + BlockContents(std::unique_ptr &&_data, size_t _size, bool _cachable, CompressionType _compression_type) + : data(_data.get(), _size), + cachable(_cachable), + compression_type(_compression_type), + allocation(std::move(_data)) {} }; // Read the block identified by "handle" from "file". On failure // return non-OK. On success fill *result and return OK. -extern Status ReadBlockContents(RandomAccessFile* file, - const Footer& footer, +extern Status ReadBlockContents(RandomAccessFile* file, const Footer& footer, const ReadOptions& options, - const BlockHandle& handle, - BlockContents* result, - Env* env, - bool do_uncompress); + const BlockHandle& handle, BlockContents* contents, + Env* env, bool do_uncompress); // The 'data' points to the raw block contents read in from file. // This method allocates a new heap buffer and the raw block // contents are uncompresed into this buffer. This buffer is // returned via 'result' and it is upto the caller to // free this buffer. -extern Status UncompressBlockContents(const char* data, - size_t n, - BlockContents* result); +extern Status UncompressBlockContents(const char* data, size_t n, + BlockContents* contents); // Implementation details follow. Clients should ignore, diff --git a/table/full_filter_block.cc b/table/full_filter_block.cc index 4ccc2e2b4..b0efef39a 100644 --- a/table/full_filter_block.cc +++ b/table/full_filter_block.cc @@ -56,16 +56,21 @@ FullFilterBlockReader::FullFilterBlockReader( const SliceTransform* prefix_extractor, const BlockBasedTableOptions& table_opt, const Slice& contents, - FilterBitsReader* filter_bits_reader, bool delete_contents_after_use) + FilterBitsReader* filter_bits_reader) : prefix_extractor_(prefix_extractor), whole_key_filtering_(table_opt.whole_key_filtering), contents_(contents) { assert(filter_bits_reader != nullptr); filter_bits_reader_.reset(filter_bits_reader); +} - if (delete_contents_after_use) { - filter_data_.reset(contents.data()); - } +FullFilterBlockReader::FullFilterBlockReader( + const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt, + BlockContents&& contents, + FilterBitsReader* filter_bits_reader) + : FullFilterBlockReader(prefix_extractor, table_opt, contents.data, filter_bits_reader) { + block_contents_ = std::move(contents); } bool FullFilterBlockReader::KeyMayMatch(const Slice& key, diff --git a/table/full_filter_block.h b/table/full_filter_block.h index 46ba5d1de..6d6294cf2 100644 --- a/table/full_filter_block.h +++ b/table/full_filter_block.h @@ -75,8 +75,11 @@ class FullFilterBlockReader : public FilterBlockReader { explicit FullFilterBlockReader(const SliceTransform* prefix_extractor, const BlockBasedTableOptions& table_opt, const Slice& contents, - FilterBitsReader* filter_bits_reader, - bool delete_contents_after_use = false); + FilterBitsReader* filter_bits_reader); + explicit FullFilterBlockReader(const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt, + BlockContents&& contents, + FilterBitsReader* filter_bits_reader); // bits_reader is created in filter_policy, it should be passed in here // directly. and be deleted here @@ -95,6 +98,7 @@ class FullFilterBlockReader : public FilterBlockReader { std::unique_ptr filter_bits_reader_; Slice contents_; + BlockContents block_contents_; std::unique_ptr filter_data_; bool MayMatch(const Slice& entry); diff --git a/table/meta_blocks.cc b/table/meta_blocks.cc index d9d0ed6c9..ebbe0f5a5 100644 --- a/table/meta_blocks.cc +++ b/table/meta_blocks.cc @@ -141,14 +141,15 @@ Status ReadProperties(const Slice &handle_value, RandomAccessFile *file, BlockContents block_contents; ReadOptions read_options; read_options.verify_checksums = false; - Status s = ReadBlockContents(file, footer, read_options, handle, - &block_contents, env, false); + Status s; + s = ReadBlockContents(file, footer, read_options, handle, &block_contents, + env, false); if (!s.ok()) { return s; } - Block properties_block(block_contents); + Block properties_block(std::move(block_contents)); std::unique_ptr iter( properties_block.NewIterator(BytewiseComparator())); @@ -228,12 +229,12 @@ Status ReadTableProperties(RandomAccessFile* file, uint64_t file_size, BlockContents metaindex_contents; ReadOptions read_options; read_options.verify_checksums = false; - s = ReadBlockContents(file, footer, read_options, metaindex_handle, - &metaindex_contents, env, false); + s = ReadBlockContents(file, footer, read_options, metaindex_handle, &metaindex_contents, + env, false); if (!s.ok()) { return s; } - Block metaindex_block(metaindex_contents); + Block metaindex_block(std::move(metaindex_contents)); std::unique_ptr meta_iter( metaindex_block.NewIterator(BytewiseComparator())); @@ -287,7 +288,7 @@ Status FindMetaBlock(RandomAccessFile* file, uint64_t file_size, if (!s.ok()) { return s; } - Block metaindex_block(metaindex_contents); + Block metaindex_block(std::move(metaindex_contents)); std::unique_ptr meta_iter; meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator())); @@ -299,41 +300,36 @@ Status ReadMetaBlock(RandomAccessFile* file, uint64_t file_size, uint64_t table_magic_number, Env* env, const std::string& meta_block_name, BlockContents* contents) { + Status status; Footer footer(table_magic_number); - auto s = ReadFooterFromFile(file, file_size, &footer); - if (!s.ok()) { - return s; - } + status = ReadFooterFromFile(file, file_size, &footer); + if (!status.ok()) return status; // Reading metaindex block auto metaindex_handle = footer.metaindex_handle(); BlockContents metaindex_contents; ReadOptions read_options; read_options.verify_checksums = false; - s = ReadBlockContents(file, footer, read_options, metaindex_handle, - &metaindex_contents, env, false); - if (!s.ok()) { - return s; - } + status = ReadBlockContents(file, footer, read_options, metaindex_handle, + &metaindex_contents, env, false); + if (!status.ok()) return status; // Finding metablock - Block metaindex_block(metaindex_contents); + Block metaindex_block(std::move(metaindex_contents)); std::unique_ptr meta_iter; meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator())); BlockHandle block_handle; - s = FindMetaBlock(meta_iter.get(), meta_block_name, &block_handle); + status = FindMetaBlock(meta_iter.get(), meta_block_name, &block_handle); - if (!s.ok()) { - return s; + if (!status.ok()) { + return status; } // Reading metablock - s = ReadBlockContents(file, footer, read_options, block_handle, contents, env, - false); - - return s; + return ReadBlockContents(file, footer, read_options, block_handle, contents, + env, false); } } // namespace rocksdb diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index 23e53bcf7..3a6d48be8 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -20,6 +20,7 @@ #include "table/block.h" #include "table/bloom_block.h" +#include "table/filter_block.h" #include "table/format.h" #include "table/meta_blocks.h" #include "table/two_level_iterator.h" diff --git a/table/table_test.cc b/table/table_test.cc index 118291daa..0dd8fd7d4 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -265,8 +265,7 @@ class BlockConstructor: public Constructor { BlockContents contents; contents.data = data_; contents.cachable = false; - contents.heap_allocated = false; - block_ = new Block(contents); + block_ = new Block(std::move(contents)); return Status::OK(); } virtual Iterator* NewIterator() const {