print table options

Summary: Add a virtual function in table factory that will print table options

Test Plan: make release

Reviewers: igor, yhchiang, sdong

Reviewed By: sdong

Subscribers: leveldb

Differential Revision: https://reviews.facebook.net/D22149
main
Lei Jin 10 years ago
parent 66f62e5c78
commit a98badff16
  1. 4
      db/simple_table_db_test.cc
  2. 88
      include/rocksdb/table.h
  3. 33
      table/adaptive_table_factory.cc
  4. 2
      table/adaptive_table_factory.h
  5. 60
      table/block_based_table_factory.cc
  6. 2
      table/block_based_table_factory.h
  7. 15
      table/cuckoo_table_factory.cc
  8. 3
      table/cuckoo_table_factory.h
  9. 33
      table/plain_table_factory.cc
  10. 2
      table/plain_table_factory.h
  11. 2
      util/options.cc

@ -559,6 +559,10 @@ public:
virtual Status SanitizeDBOptions(DBOptions* db_opts) const override { virtual Status SanitizeDBOptions(DBOptions* db_opts) const override {
return Status::OK(); return Status::OK();
} }
virtual std::string GetPrintableTableOptions() const override {
return std::string();
}
}; };
Status SimpleTableFactory::NewTableReader( Status SimpleTableFactory::NewTableReader(

@ -166,47 +166,49 @@ struct PlainTablePropertyNames {
const uint32_t kPlainTableVariableLength = 0; const uint32_t kPlainTableVariableLength = 0;
struct PlainTableOptions { struct PlainTableOptions {
// @user_key_len: plain table has optimization for fix-sized keys, which can be // @user_key_len: plain table has optimization for fix-sized keys, which can
// specified via user_key_len. Alternatively, you can pass // be specified via user_key_len. Alternatively, you can pass
// `kPlainTableVariableLength` if your keys have variable // `kPlainTableVariableLength` if your keys have variable
// lengths. // lengths.
uint32_t user_key_len = kPlainTableVariableLength; uint32_t user_key_len = kPlainTableVariableLength;
// @bloom_bits_per_key: the number of bits used for bloom filer per prefix. You // @bloom_bits_per_key: the number of bits used for bloom filer per prefix.
// may disable it by passing a zero. // You may disable it by passing a zero.
int bloom_bits_per_key = 10; int bloom_bits_per_key = 10;
// @hash_table_ratio: the desired utilization of the hash table used for prefix // @hash_table_ratio: the desired utilization of the hash table used for
// hashing. hash_table_ratio = number of prefixes / #buckets // prefix hashing.
// in the hash table // hash_table_ratio = number of prefixes / #buckets in the
double hash_table_ratio = 0.75; // hash table
double hash_table_ratio = 0.75;
// @index_sparseness: inside each prefix, need to build one index record for how
// many keys for binary search inside each hash bucket. // @index_sparseness: inside each prefix, need to build one index record for
// For encoding type kPrefix, the value will be used when // how many keys for binary search inside each hash bucket.
// writing to determine an interval to rewrite the full key. // For encoding type kPrefix, the value will be used when
// It will also be used as a suggestion and satisfied when // writing to determine an interval to rewrite the full
// possible. // key. It will also be used as a suggestion and satisfied
size_t index_sparseness = 16; // when possible.
size_t index_sparseness = 16;
// @huge_page_tlb_size: if <=0, allocate hash indexes and blooms from malloc.
// Otherwise from huge page TLB. The user needs to reserve // @huge_page_tlb_size: if <=0, allocate hash indexes and blooms from malloc.
// huge pages for it to be allocated, like: // Otherwise from huge page TLB. The user needs to
// sysctl -w vm.nr_hugepages=20 // reserve huge pages for it to be allocated, like:
// See linux doc Documentation/vm/hugetlbpage.txt // sysctl -w vm.nr_hugepages=20
size_t huge_page_tlb_size = 0; // See linux doc Documentation/vm/hugetlbpage.txt
size_t huge_page_tlb_size = 0;
// @encoding_type: how to encode the keys. See enum EncodingType above for
// the choices. The value will determine how to encode keys // @encoding_type: how to encode the keys. See enum EncodingType above for
// when writing to a new SST file. This value will be stored // the choices. The value will determine how to encode keys
// inside the SST file which will be used when reading from the // when writing to a new SST file. This value will be stored
// file, which makes it possible for users to choose different // inside the SST file which will be used when reading from
// encoding type when reopening a DB. Files with different // the file, which makes it possible for users to choose
// encoding types can co-exist in the same DB and can be read. // different encoding type when reopening a DB. Files with
EncodingType encoding_type = kPlain; // different encoding types can co-exist in the same DB and
// can be read.
// @full_scan_mode: mode for reading the whole file one record by one without EncodingType encoding_type = kPlain;
// using the index.
// @full_scan_mode: mode for reading the whole file one record by one without
// using the index.
bool full_scan_mode = false; bool full_scan_mode = false;
// @store_index_in_file: compute plain table index and bloom filter during // @store_index_in_file: compute plain table index and bloom filter during
@ -299,6 +301,10 @@ class TableFactory {
// If the function cannot find a way to sanitize the input DB Options, // If the function cannot find a way to sanitize the input DB Options,
// a non-ok Status will be returned. // a non-ok Status will be returned.
virtual Status SanitizeDBOptions(DBOptions* db_opts) const = 0; virtual Status SanitizeDBOptions(DBOptions* db_opts) const = 0;
// Return a string that contains printable format of table configurations.
// RocksDB prints configurations at DB Open().
virtual std::string GetPrintableTableOptions() const = 0;
}; };
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE

@ -70,6 +70,39 @@ TableBuilder* AdaptiveTableFactory::NewTableBuilder(
file, compression_type); file, compression_type);
} }
std::string AdaptiveTableFactory::GetPrintableTableOptions() const {
std::string ret;
ret.reserve(20000);
const int kBufferSize = 200;
char buffer[kBufferSize];
if (!table_factory_to_write_) {
snprintf(buffer, kBufferSize, " write factory (%s) options:\n%s\n",
table_factory_to_write_->Name(),
table_factory_to_write_->GetPrintableTableOptions().c_str());
ret.append(buffer);
}
if (!plain_table_factory_) {
snprintf(buffer, kBufferSize, " %s options:\n%s\n",
plain_table_factory_->Name(),
plain_table_factory_->GetPrintableTableOptions().c_str());
ret.append(buffer);
}
if (!block_based_table_factory_) {
snprintf(buffer, kBufferSize, " %s options:\n%s\n",
block_based_table_factory_->Name(),
block_based_table_factory_->GetPrintableTableOptions().c_str());
ret.append(buffer);
}
if (!cuckoo_table_factory_) {
snprintf(buffer, kBufferSize, " %s options:\n%s\n",
cuckoo_table_factory_->Name(),
cuckoo_table_factory_->GetPrintableTableOptions().c_str());
ret.append(buffer);
}
return ret;
}
extern TableFactory* NewAdaptiveTableFactory( extern TableFactory* NewAdaptiveTableFactory(
std::shared_ptr<TableFactory> table_factory_to_write, std::shared_ptr<TableFactory> table_factory_to_write,
std::shared_ptr<TableFactory> block_based_table_factory, std::shared_ptr<TableFactory> block_based_table_factory,

@ -6,6 +6,7 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include <string>
#include "rocksdb/options.h" #include "rocksdb/options.h"
#include "rocksdb/table.h" #include "rocksdb/table.h"
@ -50,6 +51,7 @@ class AdaptiveTableFactory : public TableFactory {
return Status::OK(); return Status::OK();
} }
std::string GetPrintableTableOptions() const override;
private: private:
std::shared_ptr<TableFactory> table_factory_to_write_; std::shared_ptr<TableFactory> table_factory_to_write_;

@ -64,6 +64,66 @@ TableBuilder* BlockBasedTableFactory::NewTableBuilder(
return table_builder; return table_builder;
} }
std::string BlockBasedTableFactory::GetPrintableTableOptions() const {
std::string ret;
ret.reserve(20000);
const int kBufferSize = 200;
char buffer[kBufferSize];
snprintf(buffer, kBufferSize, " flush_block_policy_factory: %s (%p)\n",
table_options_.flush_block_policy_factory->Name(),
table_options_.flush_block_policy_factory.get());
ret.append(buffer);
snprintf(buffer, kBufferSize, " cache_index_and_filter_blocks: %d\n",
table_options_.cache_index_and_filter_blocks);
ret.append(buffer);
snprintf(buffer, kBufferSize, " index_type: %d\n",
table_options_.index_type);
ret.append(buffer);
snprintf(buffer, kBufferSize, " hash_index_allow_collision: %d\n",
table_options_.hash_index_allow_collision);
ret.append(buffer);
snprintf(buffer, kBufferSize, " checksum: %d\n",
table_options_.checksum);
ret.append(buffer);
snprintf(buffer, kBufferSize, " no_block_cache: %d\n",
table_options_.no_block_cache);
ret.append(buffer);
snprintf(buffer, kBufferSize, " block_cache: %p\n",
table_options_.block_cache.get());
ret.append(buffer);
if (table_options_.block_cache) {
snprintf(buffer, kBufferSize, " block_cache_size: %zd\n",
table_options_.block_cache->GetCapacity());
ret.append(buffer);
}
snprintf(buffer, kBufferSize, " block_cache_compressed: %p\n",
table_options_.block_cache_compressed.get());
ret.append(buffer);
if (table_options_.block_cache_compressed) {
snprintf(buffer, kBufferSize, " block_cache_compressed_size: %zd\n",
table_options_.block_cache_compressed->GetCapacity());
ret.append(buffer);
}
snprintf(buffer, kBufferSize, " block_size: %zd\n",
table_options_.block_size);
ret.append(buffer);
snprintf(buffer, kBufferSize, " block_size_deviation: %d\n",
table_options_.block_size_deviation);
ret.append(buffer);
snprintf(buffer, kBufferSize, " block_restart_interval: %d\n",
table_options_.block_restart_interval);
ret.append(buffer);
snprintf(buffer, kBufferSize, " filter_policy: %s\n",
table_options_.filter_policy == nullptr ?
"nullptr" : table_options_.filter_policy->Name());
ret.append(buffer);
snprintf(buffer, kBufferSize, " whole_key_filtering: %d\n",
table_options_.whole_key_filtering);
ret.append(buffer);
return ret;
}
TableFactory* NewBlockBasedTableFactory( TableFactory* NewBlockBasedTableFactory(
const BlockBasedTableOptions& table_options) { const BlockBasedTableOptions& table_options) {
return new BlockBasedTableFactory(table_options); return new BlockBasedTableFactory(table_options);

@ -49,6 +49,8 @@ class BlockBasedTableFactory : public TableFactory {
return Status::OK(); return Status::OK();
} }
std::string GetPrintableTableOptions() const override;
private: private:
BlockBasedTableOptions table_options_; BlockBasedTableOptions table_options_;
}; };

@ -51,6 +51,21 @@ TableBuilder* CuckooTableFactory::NewTableBuilder(
max_search_depth_, GetSliceMurmurHash); max_search_depth_, GetSliceMurmurHash);
} }
std::string CuckooTableFactory::GetPrintableTableOptions() const {
std::string ret;
ret.reserve(2000);
const int kBufferSize = 200;
char buffer[kBufferSize];
snprintf(buffer, kBufferSize, " hash_table_ratio: %lf\n",
hash_table_ratio_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " max_search_depth: %u\n",
max_search_depth_);
ret.append(buffer);
return ret;
}
TableFactory* NewCuckooTableFactory(double hash_table_ratio, TableFactory* NewCuckooTableFactory(double hash_table_ratio,
uint32_t max_search_depth) { uint32_t max_search_depth) {
return new CuckooTableFactory(hash_table_ratio, max_search_depth); return new CuckooTableFactory(hash_table_ratio, max_search_depth);

@ -6,6 +6,7 @@
#pragma once #pragma once
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include <string>
#include "rocksdb/table.h" #include "rocksdb/table.h"
namespace rocksdb { namespace rocksdb {
@ -45,6 +46,8 @@ class CuckooTableFactory : public TableFactory {
return Status::OK(); return Status::OK();
} }
std::string GetPrintableTableOptions() const override;
private: private:
const double hash_table_ratio_; const double hash_table_ratio_;
const uint32_t max_search_depth_; const uint32_t max_search_depth_;

@ -35,6 +35,39 @@ TableBuilder* PlainTableFactory::NewTableBuilder(
store_index_in_file_); store_index_in_file_);
} }
std::string PlainTableFactory::GetPrintableTableOptions() const {
std::string ret;
ret.reserve(20000);
const int kBufferSize = 200;
char buffer[kBufferSize];
snprintf(buffer, kBufferSize, " user_key_len: %u\n",
user_key_len_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " bloom_bits_per_key: %d\n",
bloom_bits_per_key_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " hash_table_ratio: %lf\n",
hash_table_ratio_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " index_sparseness: %zd\n",
index_sparseness_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " huge_page_tlb_size: %zd\n",
huge_page_tlb_size_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " encoding_type: %d\n",
encoding_type_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " full_scan_mode: %d\n",
full_scan_mode_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " store_index_in_file: %d\n",
store_index_in_file_);
ret.append(buffer);
return ret;
}
extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options) { extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options) {
return new PlainTableFactory(options); return new PlainTableFactory(options);
} }

@ -164,6 +164,8 @@ class PlainTableFactory : public TableFactory {
CompressionType compression_type) const CompressionType compression_type) const
override; override;
std::string GetPrintableTableOptions() const override;
static const char kValueTypeSeqId0 = 0xFF; static const char kValueTypeSeqId0 = 0xFF;
// Sanitizes the specified DB Options. // Sanitizes the specified DB Options.

@ -305,6 +305,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
compaction_filter_factory_v2->Name()); compaction_filter_factory_v2->Name());
Log(log, " Options.memtable_factory: %s", memtable_factory->Name()); Log(log, " Options.memtable_factory: %s", memtable_factory->Name());
Log(log, " Options.table_factory: %s", table_factory->Name()); Log(log, " Options.table_factory: %s", table_factory->Name());
Log(log, " table_factory options: %s",
table_factory->GetPrintableTableOptions().c_str());
Log(log, " Options.write_buffer_size: %zd", write_buffer_size); Log(log, " Options.write_buffer_size: %zd", write_buffer_size);
Log(log, " Options.max_write_buffer_number: %d", max_write_buffer_number); Log(log, " Options.max_write_buffer_number: %d", max_write_buffer_number);
if (!compression_per_level.empty()) { if (!compression_per_level.empty()) {

Loading…
Cancel
Save