Fail DB::Open() when the requested compression is not available

Summary:
Currently RocksDB silently ignores this issue and doesn't compress the data. Based on discussion, we agree that this is pretty bad because it can cause confusion for our users.

This patch fails DB::Open() if we don't support the compression that is specified in the options.

Test Plan: make check with LZ4 not present. If Snappy is not present all tests will just fail because Snappy is our default library. We should make Snappy the requirement, since without it our default DB::Open() fails.

Reviewers: sdong, MarkCallaghan, rven, yhchiang

Reviewed By: yhchiang

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D39687
main
Igor Canadi 10 years ago
parent 69bb210d58
commit 760e9a94de
  1. 1
      HISTORY.md
  2. 41
      db/column_family.cc
  3. 2
      db/column_family.h
  4. 22
      db/db_impl.cc
  5. 29
      db/db_test.cc
  6. 10
      include/rocksdb/options.h
  7. 40
      util/compression.h
  8. 46
      util/options.cc
  9. 16
      utilities/spatialdb/spatial_db_test.cc

@ -19,6 +19,7 @@
* DB::CompactRange() now accept CompactRangeOptions instead of multiple paramters. CompactRangeOptions is defined in include/rocksdb/options.h. * DB::CompactRange() now accept CompactRangeOptions instead of multiple paramters. CompactRangeOptions is defined in include/rocksdb/options.h.
* Add force_bottommost_level_compaction option to CompactRangeOptions, which prevent compaction from skipping compacting bottommost level. * Add force_bottommost_level_compaction option to CompactRangeOptions, which prevent compaction from skipping compacting bottommost level.
* Add Cache.GetPinnedUsage() to get the size of memory occupied by entries that are in use by the system. * Add Cache.GetPinnedUsage() to get the size of memory occupied by entries that are in use by the system.
* DB:Open() will fail if the compression specified in Options is not linked with the binary. If you see this failure, recompile RocksDB with compression libraries present on your system. Also, previously our default compression was snappy. This behavior is now changed. Now, the default compression is snappy only if it's available on the system. If it isn't we change the default to kNoCompression.
## 3.11.0 (5/19/2015) ## 3.11.0 (5/19/2015)
### New Features ### New Features

@ -29,6 +29,7 @@
#include "db/version_set.h" #include "db/version_set.h"
#include "db/write_controller.h" #include "db/write_controller.h"
#include "util/autovector.h" #include "util/autovector.h"
#include "util/compression.h"
#include "util/hash_skiplist_rep.h" #include "util/hash_skiplist_rep.h"
#include "util/options_helper.h" #include "util/options_helper.h"
#include "util/thread_status_util.h" #include "util/thread_status_util.h"
@ -87,6 +88,28 @@ void GetIntTblPropCollectorFactory(
new InternalKeyPropertiesCollectorFactory); new InternalKeyPropertiesCollectorFactory);
} }
Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) {
if (!cf_options.compression_per_level.empty()) {
for (size_t level = 0; level < cf_options.compression_per_level.size();
++level) {
if (!CompressionTypeSupported(cf_options.compression_per_level[level])) {
return Status::InvalidArgument(
"Compression type " +
CompressionTypeToString(cf_options.compression_per_level[level]) +
" is not linked with the binary.");
}
}
} else {
if (!CompressionTypeSupported(cf_options.compression)) {
return Status::InvalidArgument(
"Compression type " +
CompressionTypeToString(cf_options.compression) +
" is not linked with the binary.");
}
}
return Status::OK();
}
ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options, ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
const InternalKeyComparator* icmp, const InternalKeyComparator* icmp,
const ColumnFamilyOptions& src) { const ColumnFamilyOptions& src) {
@ -141,24 +164,6 @@ ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
} }
} }
if (!src.compression_per_level.empty()) {
for (size_t level = 0; level < src.compression_per_level.size(); ++level) {
if (!CompressionTypeSupported(src.compression_per_level[level])) {
Log(InfoLogLevel::WARN_LEVEL, db_options.info_log,
"Compression type chosen for level %zu is not supported: %s. "
"RocksDB "
"will not compress data on level %zu.",
level, CompressionTypeToString(src.compression_per_level[level]),
level);
}
}
} else if (!CompressionTypeSupported(src.compression)) {
Log(InfoLogLevel::WARN_LEVEL, db_options.info_log,
"Compression type chosen is not supported: %s. RocksDB will not "
"compress data.",
CompressionTypeToString(src.compression));
}
if (result.compaction_style == kCompactionStyleFIFO) { if (result.compaction_style == kCompactionStyleFIFO) {
result.num_levels = 1; result.num_levels = 1;
// since we delete level0 files in FIFO compaction when there are too many // since we delete level0 files in FIFO compaction when there are too many

@ -130,6 +130,8 @@ struct SuperVersion {
autovector<MemTable*> to_delete; autovector<MemTable*> to_delete;
}; };
extern Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options);
extern ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options, extern ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
const InternalKeyComparator* icmp, const InternalKeyComparator* icmp,
const ColumnFamilyOptions& src); const ColumnFamilyOptions& src);

@ -2918,6 +2918,12 @@ Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
ColumnFamilyHandle** handle) { ColumnFamilyHandle** handle) {
Status s; Status s;
*handle = nullptr; *handle = nullptr;
s = CheckCompressionSupported(cf_options);
if (!s.ok()) {
return s;
}
{ {
InstrumentedMutexLock l(&mutex_); InstrumentedMutexLock l(&mutex_);
@ -4154,8 +4160,12 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
return s; return s;
} }
if (db_options.db_paths.size() > 1) { for (auto& cfd : column_families) {
for (auto& cfd : column_families) { s = CheckCompressionSupported(cfd.options);
if (!s.ok()) {
return s;
}
if (db_options.db_paths.size() > 1) {
if ((cfd.options.compaction_style != kCompactionStyleUniversal) && if ((cfd.options.compaction_style != kCompactionStyleUniversal) &&
(cfd.options.compaction_style != kCompactionStyleLevel)) { (cfd.options.compaction_style != kCompactionStyleLevel)) {
return Status::NotSupported( return Status::NotSupported(
@ -4163,11 +4173,11 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
"universal and level compaction styles. "); "universal and level compaction styles. ");
} }
} }
}
if (db_options.db_paths.size() > 4) { if (db_options.db_paths.size() > 4) {
return Status::NotSupported( return Status::NotSupported(
"More than four DB paths are not supported yet. "); "More than four DB paths are not supported yet. ");
}
} }
*dbptr = nullptr; *dbptr = nullptr;

@ -11354,6 +11354,9 @@ TEST_F(DBTest, FlushOnDestroy) {
} }
TEST_F(DBTest, DynamicLevelMaxBytesBase) { TEST_F(DBTest, DynamicLevelMaxBytesBase) {
if (!Snappy_Supported() || !LZ4_Supported()) {
return;
}
// Use InMemoryEnv, or it would be too slow. // Use InMemoryEnv, or it would be too slow.
unique_ptr<Env> env(new MockEnv(env_)); unique_ptr<Env> env(new MockEnv(env_));
@ -11925,6 +11928,9 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
} }
TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
if (!Snappy_Supported() || !LZ4_Supported() || !Zlib_Supported()) {
return;
}
const int kNKeys = 500; const int kNKeys = 500;
int keys[kNKeys]; int keys[kNKeys];
for (int i = 0; i < kNKeys; i++) { for (int i = 0; i < kNKeys; i++) {
@ -12707,6 +12713,9 @@ TEST_F(DBTest, EncodeDecompressedBlockSizeTest) {
CompressionType compressions[] = {kZlibCompression, kBZip2Compression, CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
kLZ4Compression, kLZ4HCCompression}; kLZ4Compression, kLZ4HCCompression};
for (int iter = 0; iter < 4; ++iter) { for (int iter = 0; iter < 4; ++iter) {
if (!CompressionTypeSupported(compressions[iter])) {
continue;
}
// first_table_version 1 -- generate with table_version == 1, read with // first_table_version 1 -- generate with table_version == 1, read with
// table_version == 2 // table_version == 2
// first_table_version 2 -- generate with table_version == 2, read with // first_table_version 2 -- generate with table_version == 2, read with
@ -13859,6 +13868,26 @@ TEST_F(DBTest, ForceBottommostLevelCompaction) {
rocksdb::SyncPoint::GetInstance()->DisableProcessing(); rocksdb::SyncPoint::GetInstance()->DisableProcessing();
} }
TEST_F(DBTest, FailWhenCompressionNotSupportedTest) {
CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
kLZ4Compression, kLZ4HCCompression};
for (int iter = 0; iter < 4; ++iter) {
if (!CompressionTypeSupported(compressions[iter])) {
// not supported, we should fail the Open()
Options options = CurrentOptions();
options.compression = compressions[iter];
ASSERT_TRUE(!TryReopen(options).ok());
// Try if CreateColumnFamily also fails
options.compression = kNoCompression;
ASSERT_OK(TryReopen(options));
ColumnFamilyOptions cf_options(options);
cf_options.compression = compressions[iter];
ColumnFamilyHandle* handle;
ASSERT_TRUE(!db_->CreateColumnFamily(cf_options, "name", &handle).ok());
}
}
}
} // namespace rocksdb } // namespace rocksdb
int main(int argc, char** argv) { int main(int argc, char** argv) {

@ -55,12 +55,6 @@ enum CompressionType : char {
kBZip2Compression = 0x3, kLZ4Compression = 0x4, kLZ4HCCompression = 0x5 kBZip2Compression = 0x3, kLZ4Compression = 0x4, kLZ4HCCompression = 0x5
}; };
// returns true if RocksDB was correctly linked with compression library and
// supports the compression type
extern bool CompressionTypeSupported(CompressionType compression_type);
// Returns a human-readable name of the compression type
extern const char* CompressionTypeToString(CompressionType compression_type);
enum CompactionStyle : char { enum CompactionStyle : char {
// level based compaction style // level based compaction style
kCompactionStyleLevel = 0x0, kCompactionStyleLevel = 0x0,
@ -260,8 +254,8 @@ struct ColumnFamilyOptions {
// Compress blocks using the specified compression algorithm. This // Compress blocks using the specified compression algorithm. This
// parameter can be changed dynamically. // parameter can be changed dynamically.
// //
// Default: kSnappyCompression, which gives lightweight but fast // Default: kSnappyCompression, if it's supported. If snappy is not linked
// compression. // with the library, the default is kNoCompression.
// //
// Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
// ~200-500MB/s compression // ~200-500MB/s compression

@ -62,6 +62,46 @@ inline bool LZ4_Supported() {
return false; return false;
} }
inline bool CompressionTypeSupported(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return true;
case kSnappyCompression:
return Snappy_Supported();
case kZlibCompression:
return Zlib_Supported();
case kBZip2Compression:
return BZip2_Supported();
case kLZ4Compression:
return LZ4_Supported();
case kLZ4HCCompression:
return LZ4_Supported();
default:
assert(false);
return false;
}
}
inline std::string CompressionTypeToString(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return "NoCompression";
case kSnappyCompression:
return "Snappy";
case kZlibCompression:
return "Zlib";
case kBZip2Compression:
return "BZip2";
case kLZ4Compression:
return "LZ4";
case kLZ4HCCompression:
return "LZ4HC";
default:
assert(false);
return "";
}
}
// compress_format_version can have two values: // compress_format_version can have two values:
// 1 -- decompressed sizes for BZip2 and Zlib are not included in the compressed // 1 -- decompressed sizes for BZip2 and Zlib are not included in the compressed
// block. Also, decompressed sizes for LZ4 are encoded in platform-dependent // block. Also, decompressed sizes for LZ4 are encoded in platform-dependent

@ -84,7 +84,7 @@ ColumnFamilyOptions::ColumnFamilyOptions()
max_write_buffer_number(2), max_write_buffer_number(2),
min_write_buffer_number_to_merge(1), min_write_buffer_number_to_merge(1),
max_write_buffer_number_to_maintain(0), max_write_buffer_number_to_maintain(0),
compression(kSnappyCompression), compression(Snappy_Supported() ? kSnappyCompression : kNoCompression),
prefix_extractor(nullptr), prefix_extractor(nullptr),
num_levels(7), num_levels(7),
level0_file_num_compaction_trigger(4), level0_file_num_compaction_trigger(4),
@ -380,11 +380,11 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
if (!compression_per_level.empty()) { if (!compression_per_level.empty()) {
for (unsigned int i = 0; i < compression_per_level.size(); i++) { for (unsigned int i = 0; i < compression_per_level.size(); i++) {
Warn(log, " Options.compression[%d]: %s", i, Warn(log, " Options.compression[%d]: %s", i,
CompressionTypeToString(compression_per_level[i])); CompressionTypeToString(compression_per_level[i]).c_str());
} }
} else { } else {
Warn(log, " Options.compression: %s", Warn(log, " Options.compression: %s",
CompressionTypeToString(compression)); CompressionTypeToString(compression).c_str());
} }
Warn(log, " Options.prefix_extractor: %s", Warn(log, " Options.prefix_extractor: %s",
prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name()); prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name());
@ -546,46 +546,6 @@ Options::PrepareForBulkLoad()
return this; return this;
} }
const char* CompressionTypeToString(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return "NoCompression";
case kSnappyCompression:
return "Snappy";
case kZlibCompression:
return "Zlib";
case kBZip2Compression:
return "BZip2";
case kLZ4Compression:
return "LZ4";
case kLZ4HCCompression:
return "LZ4HC";
default:
assert(false);
return "";
}
}
bool CompressionTypeSupported(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return true;
case kSnappyCompression:
return Snappy_Supported();
case kZlibCompression:
return Zlib_Supported();
case kBZip2Compression:
return BZip2_Supported();
case kLZ4Compression:
return LZ4_Supported();
case kLZ4HCCompression:
return LZ4_Supported();
default:
assert(false);
return false;
}
}
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
// Optimization functions // Optimization functions
ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForPointLookup( ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForPointLookup(

@ -8,6 +8,7 @@
#include <set> #include <set>
#include "rocksdb/utilities/spatial_db.h" #include "rocksdb/utilities/spatial_db.h"
#include "util/compression.h"
#include "util/testharness.h" #include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
#include "util/random.h" #include "util/random.h"
@ -47,6 +48,9 @@ class SpatialDBTest : public testing::Test {
}; };
TEST_F(SpatialDBTest, FeatureSetSerializeTest) { TEST_F(SpatialDBTest, FeatureSetSerializeTest) {
if (!LZ4_Supported()) {
return;
}
FeatureSet fs; FeatureSet fs;
fs.Set("a", std::string("b")); fs.Set("a", std::string("b"));
@ -94,6 +98,9 @@ TEST_F(SpatialDBTest, FeatureSetSerializeTest) {
} }
TEST_F(SpatialDBTest, TestNextID) { TEST_F(SpatialDBTest, TestNextID) {
if (!LZ4_Supported()) {
return;
}
ASSERT_OK(SpatialDB::Create( ASSERT_OK(SpatialDB::Create(
SpatialDBOptions(), dbname_, SpatialDBOptions(), dbname_,
{SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)})); {SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)}));
@ -117,6 +124,9 @@ TEST_F(SpatialDBTest, TestNextID) {
} }
TEST_F(SpatialDBTest, FeatureSetTest) { TEST_F(SpatialDBTest, FeatureSetTest) {
if (!LZ4_Supported()) {
return;
}
ASSERT_OK(SpatialDB::Create( ASSERT_OK(SpatialDB::Create(
SpatialDBOptions(), dbname_, SpatialDBOptions(), dbname_,
{SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)})); {SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)}));
@ -151,6 +161,9 @@ TEST_F(SpatialDBTest, FeatureSetTest) {
} }
TEST_F(SpatialDBTest, SimpleTest) { TEST_F(SpatialDBTest, SimpleTest) {
if (!LZ4_Supported()) {
return;
}
// iter 0 -- not read only // iter 0 -- not read only
// iter 1 -- read only // iter 1 -- read only
for (int iter = 0; iter < 2; ++iter) { for (int iter = 0; iter < 2; ++iter) {
@ -227,6 +240,9 @@ BoundingBox<double> ScaleBB(BoundingBox<int> b, double step) {
} // namespace } // namespace
TEST_F(SpatialDBTest, RandomizedTest) { TEST_F(SpatialDBTest, RandomizedTest) {
if (!LZ4_Supported()) {
return;
}
Random rnd(301); Random rnd(301);
std::vector<std::pair<std::string, BoundingBox<int>>> elements; std::vector<std::pair<std::string, BoundingBox<int>>> elements;

Loading…
Cancel
Save