You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/TARGETS

1082 lines
27 KiB

load("@fbcode_macros//build_defs:auto_headers.bzl", "AutoHeaders")
load("@fbcode_macros//build_defs:cpp_library.bzl", "cpp_library")
load(":defs.bzl", "test_binary")
REPO_PATH = package_name() + "/"
ROCKSDB_COMPILER_FLAGS = [
"-fno-builtin-memcmp",
"-DROCKSDB_PLATFORM_POSIX",
"-DROCKSDB_LIB_IO_POSIX",
"-DROCKSDB_FALLOCATE_PRESENT",
"-DROCKSDB_MALLOC_USABLE_SIZE",
"-DROCKSDB_RANGESYNC_PRESENT",
"-DROCKSDB_SCHED_GETCPU_PRESENT",
"-DROCKSDB_SUPPORT_THREAD_LOCAL",
"-DOS_LINUX",
# Flags to enable libs we include
"-DSNAPPY",
"-DZLIB",
"-DBZIP2",
"-DLZ4",
"-DZSTD",
"-DZSTD_STATIC_LINKING_ONLY",
"-DGFLAGS=gflags",
"-DNUMA",
"-DTBB",
# Needed to compile in fbcode
"-Wno-expansion-to-defined",
# Added missing flags from output of build_detect_platform
"-DROCKSDB_PTHREAD_ADAPTIVE_MUTEX",
"-DROCKSDB_BACKTRACE",
"-Wnarrowing",
]
ROCKSDB_EXTERNAL_DEPS = [
("bzip2", None, "bz2"),
("snappy", None, "snappy"),
("zlib", None, "z"),
("gflags", None, "gflags"),
("lz4", None, "lz4"),
("zstd", None),
("tbb", None),
("numa", None, "numa"),
("googletest", None, "gtest"),
]
ROCKSDB_PREPROCESSOR_FLAGS = [
# Directories with files for #include
"-I" + REPO_PATH + "include/",
"-I" + REPO_PATH,
]
ROCKSDB_ARCH_PREPROCESSOR_FLAGS = {
"x86_64": [
"-DHAVE_SSE42",
"-DHAVE_PCLMUL",
],
}
build_mode = read_config("fbcode", "build_mode")
is_opt_mode = build_mode.startswith("opt")
# -DNDEBUG is added by default in opt mode in fbcode. But adding it twice
# doesn't harm and avoid forgetting to add it.
ROCKSDB_COMPILER_FLAGS += (["-DNDEBUG"] if is_opt_mode else [])
sanitizer = read_config("fbcode", "sanitizer")
# Do not enable jemalloc if sanitizer presents. RocksDB will further detect
# whether the binary is linked with jemalloc at runtime.
ROCKSDB_COMPILER_FLAGS += (["-DROCKSDB_JEMALLOC"] if sanitizer == "" else [])
ROCKSDB_EXTERNAL_DEPS += ([("jemalloc", None, "headers")] if sanitizer == "" else [])
cpp_library(
name = "rocksdb_lib",
srcs = [
"cache/clock_cache.cc",
"cache/lru_cache.cc",
"cache/sharded_cache.cc",
"db/builder.cc",
"db/c.cc",
"db/column_family.cc",
"db/compacted_db_impl.cc",
"db/compaction.cc",
"db/compaction_iterator.cc",
"db/compaction_job.cc",
"db/compaction_picker.cc",
"db/compaction_picker_fifo.cc",
"db/compaction_picker_universal.cc",
"db/convenience.cc",
"db/db_filesnapshot.cc",
"db/db_impl.cc",
"db/db_impl_compaction_flush.cc",
"db/db_impl_debug.cc",
"db/db_impl_experimental.cc",
"db/db_impl_files.cc",
"db/db_impl_open.cc",
"db/db_impl_readonly.cc",
Support for single-primary, multi-secondary instances (#4899) Summary: This PR allows RocksDB to run in single-primary, multi-secondary process mode. The writer is a regular RocksDB (e.g. an `DBImpl`) instance playing the role of a primary. Multiple `DBImplSecondary` processes (secondaries) share the same set of SST files, MANIFEST, WAL files with the primary. Secondaries tail the MANIFEST of the primary and apply updates to their own in-memory state of the file system, e.g. `VersionStorageInfo`. This PR has several components: 1. (Originally in #4745). Add a `PathNotFound` subcode to `IOError` to denote the failure when a secondary tries to open a file which has been deleted by the primary. 2. (Similar to #4602). Add `FragmentBufferedReader` to handle partially-read, trailing record at the end of a log from where future read can continue. 3. (Originally in #4710 and #4820). Add implementation of the secondary, i.e. `DBImplSecondary`. 3.1 Tail the primary's MANIFEST during recovery. 3.2 Tail the primary's MANIFEST during normal processing by calling `ReadAndApply`. 3.3 Tailing WAL will be in a future PR. 4. Add an example in 'examples/multi_processes_example.cc' to demonstrate the usage of secondary RocksDB instance in a multi-process setting. Instructions to run the example can be found at the beginning of the source code. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4899 Differential Revision: D14510945 Pulled By: riversand963 fbshipit-source-id: 4ac1c5693e6012ad23f7b4b42d3c374fecbe8886
6 years ago
"db/db_impl_secondary.cc",
"db/db_impl_write.cc",
"db/db_info_dumper.cc",
"db/db_iter.cc",
"db/dbformat.cc",
"db/error_handler.cc",
"db/event_helpers.cc",
"db/experimental.cc",
"db/external_sst_file_ingestion_job.cc",
"db/file_indexer.cc",
"db/flush_job.cc",
"db/flush_scheduler.cc",
"db/forward_iterator.cc",
"db/in_memory_stats_history.cc",
"db/internal_stats.cc",
"db/log_reader.cc",
"db/log_writer.cc",
Skip deleted WALs during recovery Summary: This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic. Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction) This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2. Closes https://github.com/facebook/rocksdb/pull/3765 Differential Revision: D7747618 Pulled By: siying fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
7 years ago
"db/logs_with_prep_tracker.cc",
"db/malloc_stats.cc",
"db/memtable.cc",
"db/memtable_list.cc",
"db/merge_helper.cc",
"db/merge_operator.cc",
"db/range_del_aggregator.cc",
Use only "local" range tombstones during Get (#4449) Summary: Previously, range tombstones were accumulated from every level, which was necessary if a range tombstone in a higher level covered a key in a lower level. However, RangeDelAggregator::AddTombstones's complexity is based on the number of tombstones that are currently stored in it, which is wasteful in the Get case, where we only need to know the highest sequence number of range tombstones that cover the key from higher levels, and compute the highest covering sequence number at the current level. This change introduces this optimization, and removes the use of RangeDelAggregator from the Get path. In the benchmark results, the following command was used to initialize the database: ``` ./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8 ``` ...and the following command was used to measure read throughput: ``` ./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32 ``` The filluniquerandom command was only run once, and the resulting database was used to measure read performance before and after the PR. Both binaries were compiled with `DEBUG_LEVEL=0`. Readrandom results before PR: ``` readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found) ``` Readrandom results after PR: ``` readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found) ``` So it's actually slower right now, but this PR paves the way for future optimizations (see #4493). ---- Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449 Differential Revision: D10370575 Pulled By: abhimadan fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
6 years ago
"db/range_tombstone_fragmenter.cc",
"db/repair.cc",
"db/snapshot_impl.cc",
"db/table_cache.cc",
"db/table_properties_collector.cc",
"db/transaction_log_impl.cc",
"db/version_builder.cc",
"db/version_edit.cc",
"db/version_set.cc",
"db/wal_manager.cc",
"db/write_batch.cc",
"db/write_batch_base.cc",
"db/write_controller.cc",
"db/write_thread.cc",
"env/env.cc",
"env/env_chroot.cc",
"env/env_encryption.cc",
"env/env_hdfs.cc",
"env/env_posix.cc",
"env/io_posix.cc",
"env/mock_env.cc",
"file/delete_scheduler.cc",
"file/file_util.cc",
"file/filename.cc",
"file/sst_file_manager_impl.cc",
"memtable/alloc_tracker.cc",
"memtable/hash_linklist_rep.cc",
"memtable/hash_skiplist_rep.cc",
"memtable/skiplistrep.cc",
"memtable/vectorrep.cc",
"memtable/write_buffer_manager.cc",
"monitoring/histogram.cc",
"monitoring/histogram_windowing.cc",
"monitoring/instrumented_mutex.cc",
"monitoring/iostats_context.cc",
"monitoring/perf_context.cc",
"monitoring/perf_level.cc",
"monitoring/statistics.cc",
"monitoring/thread_status_impl.cc",
"monitoring/thread_status_updater.cc",
"monitoring/thread_status_updater_debug.cc",
"monitoring/thread_status_util.cc",
"monitoring/thread_status_util_debug.cc",
"options/cf_options.cc",
"options/db_options.cc",
"options/options.cc",
"options/options_helper.cc",
"options/options_parser.cc",
"options/options_sanity_check.cc",
"port/port_posix.cc",
"port/stack_trace.cc",
"table/adaptive_table_factory.cc",
"table/block.cc",
"table/block_based_filter_block.cc",
"table/block_based_table_builder.cc",
"table/block_based_table_factory.cc",
"table/block_based_table_reader.cc",
"table/block_builder.cc",
"table/block_fetcher.cc",
"table/block_prefix_index.cc",
"table/bloom_block.cc",
"table/cuckoo_table_builder.cc",
"table/cuckoo_table_factory.cc",
"table/cuckoo_table_reader.cc",
"table/data_block_footer.cc",
"table/data_block_hash_index.cc",
"table/flush_block_policy.cc",
"table/format.cc",
"table/full_filter_block.cc",
"table/get_context.cc",
"table/index_builder.cc",
"table/iterator.cc",
"table/merging_iterator.cc",
"table/meta_blocks.cc",
"table/partitioned_filter_block.cc",
"table/persistent_cache_helper.cc",
"table/plain_table_builder.cc",
"table/plain_table_factory.cc",
"table/plain_table_index.cc",
"table/plain_table_key_coding.cc",
"table/plain_table_reader.cc",
"table/sst_file_reader.cc",
"table/sst_file_writer.cc",
"table/table_properties.cc",
"table/two_level_iterator.cc",
"tools/dump/db_dump_tool.cc",
"tools/ldb_cmd.cc",
"tools/ldb_tool.cc",
"tools/sst_dump_tool.cc",
"util/arena.cc",
"util/auto_roll_logger.cc",
"util/bloom.cc",
"util/build_version.cc",
"util/coding.cc",
"util/compaction_job_stats_impl.cc",
"util/comparator.cc",
"util/compression_context_cache.cc",
"util/concurrent_arena.cc",
Concurrent task limiter for compaction thread control (#4332) Summary: The PR is targeting to resolve the issue of: https://github.com/facebook/rocksdb/issues/3972#issue-330771918 We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD. When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth. Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us. With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening. ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant. The usage is straight forward: e.g.: // // Enable compaction thread limiter thru ColumnFamilyOptions // std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4)); Options options; ColumnFamilyOptions cf_opt(options); cf_opt.compaction_thread_limiter = ctl; ... // // Compaction thread limiter can be tuned or disabled on-the-fly // ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks ... ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter ctl->SetMaxOutstandingTask(-1); // Same as above ... ctl->SetMaxOutstandingTask(0); // full throttle (0 task) // // Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue) // std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8)); std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4)); Options options; ColumnFamilyOptions cf_opt_ssd1(options); ColumnFamilyOptions cf_opt_ssd2(options); ColumnFamilyOptions cf_opt_hdd1(options); ColumnFamilyOptions cf_opt_hdd2(options); ColumnFamilyOptions cf_opt_hdd3(options); // SSD CFs cf_opt_ssd1.compaction_thread_limiter = ctl_ssd; cf_opt_ssd2.compaction_thread_limiter = ctl_ssd; // HDD CFs cf_opt_hdd1.compaction_thread_limiter = ctl_hdd; cf_opt_hdd2.compaction_thread_limiter = ctl_hdd; cf_opt_hdd3.compaction_thread_limiter = ctl_hdd; ... // // The limiter is disabled by default (or set to nullptr explicitly) // Options options; ColumnFamilyOptions cf_opt(options); cf_opt.compaction_thread_limiter = nullptr; Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332 Differential Revision: D13226590 Pulled By: siying fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
6 years ago
"util/concurrent_task_limiter_impl.cc",
"util/crc32c.cc",
"util/dynamic_bloom.cc",
"util/event_logger.cc",
"util/file_reader_writer.cc",
"util/filter_policy.cc",
"util/hash.cc",
"util/jemalloc_nodump_allocator.cc",
"util/log_buffer.cc",
"util/murmurhash.cc",
"util/random.cc",
"util/rate_limiter.cc",
"util/slice.cc",
"util/status.cc",
"util/string_util.cc",
"util/sync_point.cc",
"util/sync_point_impl.cc",
"util/thread_local.cc",
"util/threadpool_imp.cc",
"util/trace_replay.cc",
"util/transaction_test_util.cc",
"util/xxhash.cc",
"utilities/backupable/backupable_db.cc",
"utilities/blob_db/blob_compaction_filter.cc",
"utilities/blob_db/blob_db.cc",
"utilities/blob_db/blob_db_impl.cc",
"utilities/blob_db/blob_db_impl_filesnapshot.cc",
"utilities/blob_db/blob_dump_tool.cc",
"utilities/blob_db/blob_file.cc",
"utilities/blob_db/blob_log_format.cc",
"utilities/blob_db/blob_log_reader.cc",
"utilities/blob_db/blob_log_writer.cc",
"utilities/cassandra/cassandra_compaction_filter.cc",
"utilities/cassandra/format.cc",
"utilities/cassandra/merge_operator.cc",
"utilities/checkpoint/checkpoint_impl.cc",
"utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc",
"utilities/convenience/info_log_finder.cc",
"utilities/debug.cc",
"utilities/env_mirror.cc",
"utilities/env_timed.cc",
"utilities/leveldb_options/leveldb_options.cc",
"utilities/memory/memory_util.cc",
"utilities/merge_operators/bytesxor.cc",
"utilities/merge_operators/max.cc",
"utilities/merge_operators/put.cc",
"utilities/merge_operators/string_append/stringappend.cc",
"utilities/merge_operators/string_append/stringappend2.cc",
"utilities/merge_operators/uint64add.cc",
"utilities/option_change_migration/option_change_migration.cc",
"utilities/options/options_util.cc",
"utilities/persistent_cache/block_cache_tier.cc",
"utilities/persistent_cache/block_cache_tier_file.cc",
"utilities/persistent_cache/block_cache_tier_metadata.cc",
"utilities/persistent_cache/persistent_cache_tier.cc",
"utilities/persistent_cache/volatile_tier_impl.cc",
"utilities/simulator_cache/sim_cache.cc",
"utilities/table_properties_collectors/compact_on_deletion_collector.cc",
"utilities/trace/file_trace_reader_writer.cc",
"utilities/transactions/optimistic_transaction.cc",
"utilities/transactions/optimistic_transaction_db_impl.cc",
"utilities/transactions/pessimistic_transaction.cc",
"utilities/transactions/pessimistic_transaction_db.cc",
"utilities/transactions/snapshot_checker.cc",
"utilities/transactions/transaction_base.cc",
"utilities/transactions/transaction_db_mutex_impl.cc",
"utilities/transactions/transaction_lock_mgr.cc",
"utilities/transactions/transaction_util.cc",
"utilities/transactions/write_prepared_txn.cc",
"utilities/transactions/write_prepared_txn_db.cc",
"utilities/transactions/write_unprepared_txn.cc",
"utilities/transactions/write_unprepared_txn_db.cc",
"utilities/ttl/db_ttl_impl.cc",
"utilities/write_batch_with_index/write_batch_with_index.cc",
"utilities/write_batch_with_index/write_batch_with_index_internal.cc",
],
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
compiler_flags = ROCKSDB_COMPILER_FLAGS,
preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
deps = [],
external_deps = ROCKSDB_EXTERNAL_DEPS,
)
cpp_library(
name = "rocksdb_test_lib",
srcs = [
"db/db_test_util.cc",
"table/mock_table.cc",
"tools/trace_analyzer_tool.cc",
"util/fault_injection_test_env.cc",
"util/testharness.cc",
"util/testutil.cc",
"utilities/cassandra/test_utils.cc",
],
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
compiler_flags = ROCKSDB_COMPILER_FLAGS,
preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
deps = [":rocksdb_lib"],
external_deps = ROCKSDB_EXTERNAL_DEPS,
)
cpp_library(
name = "rocksdb_tools_lib",
srcs = [
"tools/db_bench_tool.cc",
"tools/trace_analyzer_tool.cc",
"util/testutil.cc",
],
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
compiler_flags = ROCKSDB_COMPILER_FLAGS,
preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
deps = [":rocksdb_lib"],
external_deps = ROCKSDB_EXTERNAL_DEPS,
)
cpp_library(
name = "env_basic_test_lib",
srcs = ["env/env_basic_test.cc"],
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
compiler_flags = ROCKSDB_COMPILER_FLAGS,
preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
deps = [":rocksdb_test_lib"],
external_deps = ROCKSDB_EXTERNAL_DEPS,
)
# [test_name, test_src, test_type]
ROCKS_TESTS = [
[
"arena_test",
"util/arena_test.cc",
"serial",
],
[
"auto_roll_logger_test",
"util/auto_roll_logger_test.cc",
"serial",
],
[
"autovector_test",
"util/autovector_test.cc",
"serial",
],
[
"backupable_db_test",
"utilities/backupable/backupable_db_test.cc",
"parallel",
],
[
"blob_db_test",
"utilities/blob_db/blob_db_test.cc",
"serial",
],
[
"block_based_filter_block_test",
"table/block_based_filter_block_test.cc",
"serial",
],
[
"block_test",
"table/block_test.cc",
"serial",
],
[
"bloom_test",
"util/bloom_test.cc",
"serial",
],
[
"c_test",
"db/c_test.c",
"serial",
],
[
"cache_test",
"cache/cache_test.cc",
"serial",
],
[
"cassandra_format_test",
"utilities/cassandra/cassandra_format_test.cc",
"serial",
],
[
"cassandra_functional_test",
"utilities/cassandra/cassandra_functional_test.cc",
"serial",
],
[
"cassandra_row_merge_test",
"utilities/cassandra/cassandra_row_merge_test.cc",
"serial",
],
[
"cassandra_serialize_test",
"utilities/cassandra/cassandra_serialize_test.cc",
"serial",
],
[
"checkpoint_test",
"utilities/checkpoint/checkpoint_test.cc",
"serial",
],
[
"cleanable_test",
"table/cleanable_test.cc",
"serial",
],
[
"coding_test",
"util/coding_test.cc",
"serial",
],
[
"column_family_test",
"db/column_family_test.cc",
"serial",
],
[
"compact_files_test",
"db/compact_files_test.cc",
"serial",
],
[
"compact_on_deletion_collector_test",
"utilities/table_properties_collectors/compact_on_deletion_collector_test.cc",
"serial",
],
[
"compaction_iterator_test",
"db/compaction_iterator_test.cc",
"serial",
],
[
"compaction_job_stats_test",
"db/compaction_job_stats_test.cc",
"serial",
],
[
"compaction_job_test",
"db/compaction_job_test.cc",
"serial",
],
[
"compaction_picker_test",
"db/compaction_picker_test.cc",
"serial",
],
[
"comparator_db_test",
"db/comparator_db_test.cc",
"serial",
],
[
"corruption_test",
"db/corruption_test.cc",
"serial",
],
[
"crc32c_test",
"util/crc32c_test.cc",
"serial",
],
[
"cuckoo_table_builder_test",
"table/cuckoo_table_builder_test.cc",
"serial",
],
[
"cuckoo_table_db_test",
"db/cuckoo_table_db_test.cc",
"serial",
],
[
"cuckoo_table_reader_test",
"table/cuckoo_table_reader_test.cc",
"serial",
],
[
"data_block_hash_index_test",
"table/data_block_hash_index_test.cc",
"serial",
],
[
"db_basic_test",
"db/db_basic_test.cc",
"serial",
],
[
"db_blob_index_test",
"db/db_blob_index_test.cc",
"serial",
],
[
"db_block_cache_test",
"db/db_block_cache_test.cc",
"serial",
],
[
"db_bloom_filter_test",
"db/db_bloom_filter_test.cc",
"serial",
],
[
"db_compaction_filter_test",
"db/db_compaction_filter_test.cc",
"parallel",
],
[
"db_compaction_test",
"db/db_compaction_test.cc",
"parallel",
],
[
"db_dynamic_level_test",
"db/db_dynamic_level_test.cc",
"serial",
],
[
"db_encryption_test",
"db/db_encryption_test.cc",
"serial",
],
[
"db_flush_test",
"db/db_flush_test.cc",
"serial",
],
[
"db_inplace_update_test",
"db/db_inplace_update_test.cc",
"serial",
],
[
"db_io_failure_test",
"db/db_io_failure_test.cc",
"serial",
],
[
"db_iter_stress_test",
"db/db_iter_stress_test.cc",
"serial",
],
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs Summary: Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are: * If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted. * When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not. However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours). This PR changes the convention to: * If status() is not ok, Valid() always returns false. * Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.) This does sacrifice the two use cases listed above, but siying said it's ok. Overview of the changes: * A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario. * Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed. * A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator. To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types: Iterators that didn't need changes: * status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator. * Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator. Iterators with changes (see inline comments for details): * DBIter - an overhaul: - It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back. - It had a few code paths silently discarding subiterator's status. The stress test caught a few. - The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example. - Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption. - It used to not reset status on seek for some types of errors. - Some simplifications and better comments. - Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer. * MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status. * ForwardIterator - changed to the new convention, also slightly simplified. * ForwardLevelIterator - fixed some bugs and simplified. * LevelIterator - simplified. * TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_. * BlockBasedTableIterator - minor changes. * BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid. * PlainTableIterator - some seeks used to not reset status. * CuckooTableIterator - tiny code cleanup. * ManagedIterator - fixed some bugs. * BaseDeltaIterator - changed to the new convention and fixed a bug. * BlobDBIterator - seeks used to not reset status. * KeyConvertingIterator - some small change. Closes https://github.com/facebook/rocksdb/pull/3810 Differential Revision: D7888019 Pulled By: al13n321 fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
7 years ago
[
"db_iter_test",
"db/db_iter_test.cc",
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs Summary: Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are: * If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted. * When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not. However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours). This PR changes the convention to: * If status() is not ok, Valid() always returns false. * Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.) This does sacrifice the two use cases listed above, but siying said it's ok. Overview of the changes: * A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario. * Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed. * A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator. To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types: Iterators that didn't need changes: * status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator. * Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator. Iterators with changes (see inline comments for details): * DBIter - an overhaul: - It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back. - It had a few code paths silently discarding subiterator's status. The stress test caught a few. - The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example. - Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption. - It used to not reset status on seek for some types of errors. - Some simplifications and better comments. - Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer. * MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status. * ForwardIterator - changed to the new convention, also slightly simplified. * ForwardLevelIterator - fixed some bugs and simplified. * LevelIterator - simplified. * TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_. * BlockBasedTableIterator - minor changes. * BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid. * PlainTableIterator - some seeks used to not reset status. * CuckooTableIterator - tiny code cleanup. * ManagedIterator - fixed some bugs. * BaseDeltaIterator - changed to the new convention and fixed a bug. * BlobDBIterator - seeks used to not reset status. * KeyConvertingIterator - some small change. Closes https://github.com/facebook/rocksdb/pull/3810 Differential Revision: D7888019 Pulled By: al13n321 fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
7 years ago
"serial",
],
[
"db_iterator_test",
"db/db_iterator_test.cc",
"serial",
],
[
"db_log_iter_test",
"db/db_log_iter_test.cc",
"serial",
],
[
"db_memtable_test",
"db/db_memtable_test.cc",
"serial",
],
[
"db_merge_operator_test",
"db/db_merge_operator_test.cc",
"parallel",
],
[
"db_options_test",
"db/db_options_test.cc",
"serial",
],
[
"db_properties_test",
"db/db_properties_test.cc",
"serial",
],
[
"db_range_del_test",
"db/db_range_del_test.cc",
"serial",
],
Support for single-primary, multi-secondary instances (#4899) Summary: This PR allows RocksDB to run in single-primary, multi-secondary process mode. The writer is a regular RocksDB (e.g. an `DBImpl`) instance playing the role of a primary. Multiple `DBImplSecondary` processes (secondaries) share the same set of SST files, MANIFEST, WAL files with the primary. Secondaries tail the MANIFEST of the primary and apply updates to their own in-memory state of the file system, e.g. `VersionStorageInfo`. This PR has several components: 1. (Originally in #4745). Add a `PathNotFound` subcode to `IOError` to denote the failure when a secondary tries to open a file which has been deleted by the primary. 2. (Similar to #4602). Add `FragmentBufferedReader` to handle partially-read, trailing record at the end of a log from where future read can continue. 3. (Originally in #4710 and #4820). Add implementation of the secondary, i.e. `DBImplSecondary`. 3.1 Tail the primary's MANIFEST during recovery. 3.2 Tail the primary's MANIFEST during normal processing by calling `ReadAndApply`. 3.3 Tailing WAL will be in a future PR. 4. Add an example in 'examples/multi_processes_example.cc' to demonstrate the usage of secondary RocksDB instance in a multi-process setting. Instructions to run the example can be found at the beginning of the source code. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4899 Differential Revision: D14510945 Pulled By: riversand963 fbshipit-source-id: 4ac1c5693e6012ad23f7b4b42d3c374fecbe8886
6 years ago
[
"db_secondary_test",
"db/db_secondary_test.cc",
"serial",
],
[
"db_sst_test",
"db/db_sst_test.cc",
"parallel",
],
[
"db_statistics_test",
"db/db_statistics_test.cc",
"serial",
],
[
"db_table_properties_test",
"db/db_table_properties_test.cc",
"serial",
],
[
"db_tailing_iter_test",
"db/db_tailing_iter_test.cc",
"serial",
],
[
"db_test",
"db/db_test.cc",
"parallel",
],
[
"db_test2",
"db/db_test2.cc",
"serial",
],
[
"db_universal_compaction_test",
"db/db_universal_compaction_test.cc",
"parallel",
],
[
"db_wal_test",
"db/db_wal_test.cc",
"parallel",
],
[
"db_write_test",
"db/db_write_test.cc",
"serial",
],
[
"dbformat_test",
"db/dbformat_test.cc",
"serial",
],
[
"delete_scheduler_test",
"file/delete_scheduler_test.cc",
"serial",
],
[
"deletefile_test",
"db/deletefile_test.cc",
"serial",
],
[
"dynamic_bloom_test",
"util/dynamic_bloom_test.cc",
"serial",
],
[
"env_basic_test",
"env/env_basic_test.cc",
"serial",
],
[
"env_test",
"env/env_test.cc",
"serial",
],
[
"env_timed_test",
"utilities/env_timed_test.cc",
"serial",
],
[
"error_handler_test",
"db/error_handler_test.cc",
"serial",
],
[
"event_logger_test",
"util/event_logger_test.cc",
"serial",
],
[
"external_sst_file_basic_test",
"db/external_sst_file_basic_test.cc",
"serial",
],
[
"external_sst_file_test",
"db/external_sst_file_test.cc",
"parallel",
],
[
"fault_injection_test",
"db/fault_injection_test.cc",
"parallel",
],
[
"file_indexer_test",
"db/file_indexer_test.cc",
"serial",
],
[
"file_reader_writer_test",
"util/file_reader_writer_test.cc",
"serial",
],
[
"filelock_test",
"util/filelock_test.cc",
"serial",
],
[
"filename_test",
"db/filename_test.cc",
"serial",
],
[
"flush_job_test",
"db/flush_job_test.cc",
"serial",
],
[
"full_filter_block_test",
"table/full_filter_block_test.cc",
"serial",
],
[
"hash_table_test",
"utilities/persistent_cache/hash_table_test.cc",
"serial",
],
[
"hash_test",
"util/hash_test.cc",
"serial",
],
[
"heap_test",
"util/heap_test.cc",
"serial",
],
[
"histogram_test",
"monitoring/histogram_test.cc",
"serial",
],
[
"inlineskiplist_test",
"memtable/inlineskiplist_test.cc",
"parallel",
],
[
"iostats_context_test",
"monitoring/iostats_context_test.cc",
"serial",
],
[
"ldb_cmd_test",
"tools/ldb_cmd_test.cc",
"serial",
],
[
"listener_test",
"db/listener_test.cc",
"serial",
],
[
"log_test",
"db/log_test.cc",
"serial",
],
[
"lru_cache_test",
"cache/lru_cache_test.cc",
"serial",
],
[
"manual_compaction_test",
"db/manual_compaction_test.cc",
"parallel",
],
[
"memory_test",
"utilities/memory/memory_test.cc",
"serial",
],
[
"memtable_list_test",
"db/memtable_list_test.cc",
"serial",
],
[
"merge_helper_test",
"db/merge_helper_test.cc",
"serial",
],
[
"merge_test",
"db/merge_test.cc",
"serial",
],
[
"merger_test",
"table/merger_test.cc",
"serial",
],
[
"mock_env_test",
"env/mock_env_test.cc",
"serial",
],
[
"object_registry_test",
"utilities/object_registry_test.cc",
"serial",
],
Skip deleted WALs during recovery Summary: This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic. Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction) This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2. Closes https://github.com/facebook/rocksdb/pull/3765 Differential Revision: D7747618 Pulled By: siying fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
7 years ago
[
"obsolete_files_test",
"db/obsolete_files_test.cc",
"serial",
],
[
"optimistic_transaction_test",
"utilities/transactions/optimistic_transaction_test.cc",
"serial",
],
[
"option_change_migration_test",
"utilities/option_change_migration/option_change_migration_test.cc",
"serial",
],
[
"options_file_test",
"db/options_file_test.cc",
"serial",
],
[
"options_settable_test",
"options/options_settable_test.cc",
"serial",
],
[
"options_test",
"options/options_test.cc",
"serial",
],
[
"options_util_test",
"utilities/options/options_util_test.cc",
"serial",
],
[
"partitioned_filter_block_test",
"table/partitioned_filter_block_test.cc",
"serial",
],
[
"perf_context_test",
"db/perf_context_test.cc",
"serial",
],
[
"persistent_cache_test",
"utilities/persistent_cache/persistent_cache_test.cc",
"parallel",
],
[
"plain_table_db_test",
"db/plain_table_db_test.cc",
"serial",
],
[
"prefix_test",
"db/prefix_test.cc",
"serial",
],
[
"range_del_aggregator_test",
"db/range_del_aggregator_test.cc",
"serial",
],
[
"range_tombstone_fragmenter_test",
"db/range_tombstone_fragmenter_test.cc",
"serial",
],
[
"rate_limiter_test",
"util/rate_limiter_test.cc",
"serial",
],
[
"reduce_levels_test",
"tools/reduce_levels_test.cc",
"serial",
],
[
"repair_test",
"db/repair_test.cc",
"serial",
],
[
"repeatable_thread_test",
"util/repeatable_thread_test.cc",
"serial",
],
[
"sim_cache_test",
"utilities/simulator_cache/sim_cache_test.cc",
"serial",
],
[
"skiplist_test",
"memtable/skiplist_test.cc",
"serial",
],
[
"slice_transform_test",
"util/slice_transform_test.cc",
"serial",
],
[
"sst_dump_test",
"tools/sst_dump_test.cc",
"serial",
],
[
"sst_file_reader_test",
"table/sst_file_reader_test.cc",
"serial",
],
[
"statistics_test",
"monitoring/statistics_test.cc",
"serial",
],
[
"stringappend_test",
"utilities/merge_operators/string_append/stringappend_test.cc",
"serial",
],
[
"table_properties_collector_test",
"db/table_properties_collector_test.cc",
"serial",
],
[
"table_test",
"table/table_test.cc",
"parallel",
],
[
"thread_list_test",
"util/thread_list_test.cc",
"serial",
],
[
"thread_local_test",
"util/thread_local_test.cc",
"serial",
],
[
"timer_queue_test",
"util/timer_queue_test.cc",
"serial",
],
[
"trace_analyzer_test",
"tools/trace_analyzer_test.cc",
"serial",
],
[
"transaction_test",
"utilities/transactions/transaction_test.cc",
"parallel",
],
[
"ttl_test",
"utilities/ttl/ttl_test.cc",
"serial",
],
[
"util_merge_operators_test",
"utilities/util_merge_operators_test.cc",
"serial",
],
[
"version_builder_test",
"db/version_builder_test.cc",
"serial",
],
[
"version_edit_test",
"db/version_edit_test.cc",
"serial",
],
[
"version_set_test",
"db/version_set_test.cc",
"serial",
],
[
"wal_manager_test",
"db/wal_manager_test.cc",
"serial",
],
[
"write_batch_test",
"db/write_batch_test.cc",
"serial",
],
[
"write_batch_with_index_test",
"utilities/write_batch_with_index/write_batch_with_index_test.cc",
"serial",
],
[
"write_buffer_manager_test",
"memtable/write_buffer_manager_test.cc",
"serial",
],
[
"write_callback_test",
"db/write_callback_test.cc",
"serial",
],
[
"write_controller_test",
"db/write_controller_test.cc",
"serial",
],
[
"write_prepared_transaction_test",
"utilities/transactions/write_prepared_transaction_test.cc",
"parallel",
],
[
"write_unprepared_transaction_test",
"utilities/transactions/write_unprepared_transaction_test.cc",
"parallel",
],
]
# Generate a test rule for each entry in ROCKS_TESTS
# Do not build the tests in opt mode, since SyncPoint and other test code
# will not be included.
[
test_binary(
parallelism = parallelism,
rocksdb_arch_preprocessor_flags = ROCKSDB_ARCH_PREPROCESSOR_FLAGS,
rocksdb_compiler_flags = ROCKSDB_COMPILER_FLAGS,
rocksdb_external_deps = ROCKSDB_EXTERNAL_DEPS,
rocksdb_preprocessor_flags = ROCKSDB_PREPROCESSOR_FLAGS,
test_cc = test_cc,
test_name = test_name,
)
for test_name, test_cc, parallelism in ROCKS_TESTS
if not is_opt_mode
]