Mark files for compaction in stress/crash tests (#7231)

Summary:
The mechanism to mark files for compaction is most commonly used in
delete-triggered compaction. This PR adds an option to exercise the
marking mechanism on random files created by db_stress. This PR also
enables that option in db_crashtest.py on its db_stress runs at random.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7231

Test Plan:
- ran some minified crash tests; verified they succeed and we see `"compaction_reason": "FilesMarkedForCompaction"` regularly in the logs.

```
$ TEST_TMPDIR=/dev/shm python tools/db_crashtest.py blackbox --duration=600 --interval=30 --max_key=10000000 --write_buffer_size=1048576 --target_file_size_base=1048576 --max_bytes_for_level_base=4194304 --value_size_mult=33
$ TEST_TMPDIR=/dev/shm python tools/db_crashtest.py whitebox --duration=600 --interval=30 --max_key=1000000 --write_buffer_size=1048576 --target_file_size_base=1048576 --max_bytes_for_level_base=4194304 --value_size_mult=33 --random_kill_odd=8887
```

Reviewed By: anand1976

Differential Revision: D23025156

Pulled By: ajkr

fbshipit-source-id: a404c467ebc12afa94dae35956ea9b372f592a96
main
Andrew Kryczka 4 years ago committed by Facebook GitHub Bot
parent f308da5273
commit 7eebe6d38a
  1. 1
      db_stress_tool/db_stress_common.h
  2. 6
      db_stress_tool/db_stress_gflags.cc
  3. 64
      db_stress_tool/db_stress_table_properties_collector.h
  4. 3
      db_stress_tool/db_stress_test_base.cc
  5. 1
      tools/db_crashtest.py

@ -179,6 +179,7 @@ DECLARE_int32(ingest_external_file_one_in);
DECLARE_int32(ingest_external_file_width); DECLARE_int32(ingest_external_file_width);
DECLARE_int32(compact_files_one_in); DECLARE_int32(compact_files_one_in);
DECLARE_int32(compact_range_one_in); DECLARE_int32(compact_range_one_in);
DECLARE_int32(mark_for_compaction_one_file_in);
DECLARE_int32(flush_one_in); DECLARE_int32(flush_one_in);
DECLARE_int32(pause_background_one_in); DECLARE_int32(pause_background_one_in);
DECLARE_int32(compact_range_width); DECLARE_int32(compact_range_width);

@ -500,6 +500,12 @@ DEFINE_int32(compact_range_one_in, 0,
"If non-zero, then CompactRange() will be called once for every N " "If non-zero, then CompactRange() will be called once for every N "
"operations on average. 0 indicates CompactRange() is disabled."); "operations on average. 0 indicates CompactRange() is disabled.");
DEFINE_int32(mark_for_compaction_one_file_in, 0,
"A `TablePropertiesCollectorFactory` will be registered, which "
"creates a `TablePropertiesCollector` with `NeedCompact()` "
"returning true once for every N files on average. 0 or negative "
"mean `NeedCompact()` always returns false.");
DEFINE_int32(flush_one_in, 0, DEFINE_int32(flush_one_in, 0,
"If non-zero, then Flush() will be called once for every N ops " "If non-zero, then Flush() will be called once for every N ops "
"on average. 0 indicates calls to Flush() are disabled."); "on average. 0 indicates calls to Flush() are disabled.");

@ -0,0 +1,64 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include "rocksdb/table.h"
#include "util/gflags_compat.h"
DECLARE_int32(mark_for_compaction_one_file_in);
namespace ROCKSDB_NAMESPACE {
// A `DbStressTablePropertiesCollector` ignores what keys/values were added to
// the table, adds no properties to the table, and decides at random whether the
// table will be marked for compaction according to
// `FLAGS_mark_for_compaction_one_file_in`.
class DbStressTablePropertiesCollector : public TablePropertiesCollector {
public:
DbStressTablePropertiesCollector()
: need_compact_(Random::GetTLSInstance()->OneInOpt(
FLAGS_mark_for_compaction_one_file_in)) {}
virtual Status AddUserKey(const Slice& /* key */, const Slice& /* value */,
EntryType /*type*/, SequenceNumber /*seq*/,
uint64_t /*file_size*/) override {
return Status::OK();
}
virtual Status Finish(UserCollectedProperties* /* properties */) override {
return Status::OK();
}
virtual UserCollectedProperties GetReadableProperties() const override {
return UserCollectedProperties{};
}
virtual const char* Name() const override {
return "DbStressTablePropertiesCollector";
}
virtual bool NeedCompact() const override { return need_compact_; }
private:
const bool need_compact_;
};
// A `DbStressTablePropertiesCollectorFactory` creates
// `DbStressTablePropertiesCollectorFactory`s.
class DbStressTablePropertiesCollectorFactory
: public TablePropertiesCollectorFactory {
public:
virtual TablePropertiesCollector* CreateTablePropertiesCollector(
TablePropertiesCollectorFactory::Context /* context */) override {
return new DbStressTablePropertiesCollector();
}
virtual const char* Name() const override {
return "DbStressTablePropertiesCollectorFactory";
}
};
} // namespace ROCKSDB_NAMESPACE

@ -12,6 +12,7 @@
#include "db_stress_tool/db_stress_common.h" #include "db_stress_tool/db_stress_common.h"
#include "db_stress_tool/db_stress_compaction_filter.h" #include "db_stress_tool/db_stress_compaction_filter.h"
#include "db_stress_tool/db_stress_driver.h" #include "db_stress_tool/db_stress_driver.h"
#include "db_stress_tool/db_stress_table_properties_collector.h"
#include "rocksdb/convenience.h" #include "rocksdb/convenience.h"
#include "rocksdb/sst_file_manager.h" #include "rocksdb/sst_file_manager.h"
#include "util/cast_util.h" #include "util/cast_util.h"
@ -2015,6 +2016,8 @@ void StressTest::Open() {
options_.compaction_filter_factory = options_.compaction_filter_factory =
std::make_shared<DbStressCompactionFilterFactory>(); std::make_shared<DbStressCompactionFilterFactory>();
} }
options_.table_properties_collector_factories.emplace_back(
std::make_shared<DbStressTablePropertiesCollectorFactory>());
options_.best_efforts_recovery = FLAGS_best_efforts_recovery; options_.best_efforts_recovery = FLAGS_best_efforts_recovery;

@ -66,6 +66,7 @@ default_params = {
# Temporarily disable hash index # Temporarily disable hash index
"index_type": lambda: random.choice([0, 0, 0, 2, 2, 3]), "index_type": lambda: random.choice([0, 0, 0, 2, 2, 3]),
"iterpercent": 10, "iterpercent": 10,
"mark_for_compaction_one_file_in": lambda: 10 * random.randint(0, 1),
"max_background_compactions": 20, "max_background_compactions": 20,
"max_bytes_for_level_base": 10485760, "max_bytes_for_level_base": 10485760,
"max_key": 100000000, "max_key": 100000000,

Loading…
Cancel
Save