Use a different approach to make sure BlockBasedTableReader can use hash index on older files

Summary:
A recent commit e37dd216f9 makes sure hash index can be used when reading existing files. This patch uses another way to achieve the approach:
(1) Currently, always writing kBinarySearch to files, despite of BlockBasedTableOptions.IndexType setting.
(2) When reading a file, read out the field, and make sure it is kBinarySearch, while always use index type by users.

The reason for doing it is, to reserve kHashSearch property on disk to future. If now we write out binary index for both of kHashSearch and kBinarySearch. We have to use a new flag in the future for hash index on disk, otherwise compatibility would break. Also, we want the real index type and type shown in properties block to be consistent.

Test Plan: make all check

Reviewers: haobo, kailiu

Reviewed By: kailiu

CC: igor, ljin, yhchiang, xjin, dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D18009
main
sdong 11 years ago
parent 35c968f3a5
commit 27d3bc184e
  1. 8
      table/block_based_table_builder.cc
  2. 19
      table/block_based_table_reader.cc

@ -159,9 +159,6 @@ IndexBuilder* CreateIndexBuilder(IndexType type, const Comparator* comparator) {
case BlockBasedTableOptions::kBinarySearch: {
return new ShortenedIndexBuilder(comparator);
}
case BlockBasedTableOptions::kHashSearch: {
return new ShortenedIndexBuilder(comparator);
}
default: {
assert(!"Do not recognize the index type ");
return nullptr;
@ -324,13 +321,16 @@ struct BlockBasedTableBuilder::Rep {
}
};
// TODO(sdong): Currently only write out binary search index. In
// BlockBasedTableReader, Hash index will be built using binary search index.
BlockBasedTableBuilder::BlockBasedTableBuilder(
const Options& options, const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator, WritableFile* file,
CompressionType compression_type)
: rep_(new Rep(options, internal_comparator, file,
table_options.flush_block_policy_factory.get(),
compression_type, table_options.index_type)) {
compression_type,
BlockBasedTableOptions::IndexType::kBinarySearch)) {
if (rep_->filter_block != nullptr) {
rep_->filter_block->StartBlock(0);
}

@ -1030,14 +1030,29 @@ bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader) {
// Some old version of block-based tables don't have index type present in
// table properties. If that's the case we can safely use the kBinarySearch.
auto index_type = rep_->index_type;
auto index_type_on_file = BlockBasedTableOptions::kBinarySearch;
if (rep_->table_properties) {
auto& props = rep_->table_properties->user_collected_properties;
auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
if (pos != props.end()) {
index_type_on_file = static_cast<BlockBasedTableOptions::IndexType>(
DecodeFixed32(pos->second.c_str()));
}
}
// TODO(sdong): Currently binary index is the only index type we support in
// files. Hash index is built on top of binary index too.
if (index_type_on_file != BlockBasedTableOptions::kBinarySearch) {
return Status::NotSupported("File Contains not supported index type: ",
std::to_string(index_type_on_file));
}
auto file = rep_->file.get();
const auto& index_handle = rep_->index_handle;
auto env = rep_->options.env;
auto comparator = &rep_->internal_comparator;
switch (index_type) {
switch (rep_->index_type) {
case BlockBasedTableOptions::kBinarySearch: {
return BinarySearchIndexReader::Create(file, index_handle, env,
comparator, index_reader);

Loading…
Cancel
Save