diff --git a/CMakeLists.txt b/CMakeLists.txt index 08b48d2eb..6401ab105 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -751,6 +751,8 @@ set(SOURCES utilities/debug.cc utilities/env_mirror.cc utilities/env_timed.cc + utilities/fault_injection_env.cc + utilities/fault_injection_fs.cc utilities/leveldb_options/leveldb_options.cc utilities/memory/memory_util.cc utilities/merge_operators/bytesxor.cc @@ -1172,8 +1174,6 @@ if(WITH_TESTS) db/db_test_util.cc monitoring/thread_status_updater_debug.cc table/mock_table.cc - test_util/fault_injection_test_env.cc - test_util/fault_injection_test_fs.cc utilities/cassandra/test_utils.cc ) enable_testing() diff --git a/Makefile b/Makefile index ccbf8e054..9a5da537b 100644 --- a/Makefile +++ b/Makefile @@ -636,18 +636,12 @@ LIBRARY=$(SHARED1) TEST_LIBRARY=$(SHARED_TEST_LIBRARY) TOOLS_LIBRARY=$(SHARED_TOOLS_LIBRARY) STRESS_LIBRARY=$(SHARED_STRESS_LIBRARY) -ifeq ($(DEBUG_LEVEL),0) -STRESS_LIBRARY_RUNTIME_DEPS=$(SHARED_TOOLS_LIBRARY) -else -STRESS_LIBRARY_RUNTIME_DEPS=$(SHARED_TEST_LIBRARY) $(SHARED_TOOLS_LIBRARY) -endif CLOUD_LIBRARY=$(SHARED_CLOUD_LIBRARY) else LIBRARY=$(STATIC_LIBRARY) TEST_LIBRARY=$(STATIC_TEST_LIBRARY) TOOLS_LIBRARY=$(STATIC_TOOLS_LIBRARY) STRESS_LIBRARY=$(STATIC_STRESS_LIBRARY) -STRESS_LIBRARY_RUNTIME_DEPS= endif ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3) @@ -1166,29 +1160,23 @@ $(STATIC_TEST_LIBRARY): $(TEST_OBJECTS) $(AM_V_AR)rm -f $@ $(SHARED_TEST_LIBRARY) $(AM_V_at)$(AR) $(ARFLAGS) $@ $^ -$(STATIC_TOOLS_LIBRARY): $(BENCH_OBJECTS) $(TOOL_OBJECTS) $(TESTUTIL) +$(STATIC_TOOLS_LIBRARY): $(BENCH_OBJECTS) $(TOOL_OBJECTS) $(AM_V_AR)rm -f $@ $(SHARED_TOOLS_LIBRARY) $(AM_V_at)$(AR) $(ARFLAGS) $@ $^ -ifeq ($(DEBUG_LEVEL),0) -$(STATIC_STRESS_LIBRARY): $(TESTUTIL) $(ANALYZE_OBJECTS) $(STRESS_OBJECTS) +$(STATIC_STRESS_LIBRARY): $(ANALYZE_OBJECTS) $(STRESS_OBJECTS) $(AM_V_AR)rm -f $@ $(SHARED_STRESS_LIBRARY) $(AM_V_at)$(AR) $(ARFLAGS) $@ $^ -else -$(STATIC_STRESS_LIBRARY): $(TEST_OBJECTS) $(ANALYZE_OBJECTS) $(STRESS_OBJECTS) - $(AM_V_AR)rm -f $@ $(SHARED_STRESS_LIBRARY) - $(AM_V_at)$(AR) $(ARFLAGS) $@ $^ -endif $(SHARED_TEST_LIBRARY): $(TEST_OBJECTS) $(SHARED1) $(AM_V_AR)rm -f $@ $(STATIC_TEST_LIBRARY) $(AM_SHARE) -$(SHARED_TOOLS_LIBRARY): $(TOOL_OBJECTS) $(TESTUTIL) $(SHARED1) +$(SHARED_TOOLS_LIBRARY): $(TOOL_OBJECTS) $(SHARED1) $(AM_V_AR)rm -f $@ $(STATIC_TOOLS_LIBRARY) $(AM_SHARE) -$(SHARED_STRESS_LIBRARY): $(ANALYZE_OBJECTS) $(STRESS_OBJECTS) $(STRESS_LIBRARY_RUNTIME_DEPS) $(SHARED1) +$(SHARED_STRESS_LIBRARY): $(ANALYZE_OBJECTS) $(STRESS_OBJECTS) $(SHARED_TOOLS_LIBRARY) $(SHARED1) $(AM_V_AR)rm -f $@ $(STATIC_STRESS_LIBRARY) $(AM_SHARE) @@ -1216,13 +1204,13 @@ cache_bench: $(OBJ_DIR)/cache/cache_bench.o $(LIBRARY) persistent_cache_bench: $(OBJ_DIR)/utilities/persistent_cache/persistent_cache_bench.o $(LIBRARY) $(AM_LINK) -memtablerep_bench: $(OBJ_DIR)/memtable/memtablerep_bench.o $(TESTUTIL) $(LIBRARY) +memtablerep_bench: $(OBJ_DIR)/memtable/memtablerep_bench.o $(LIBRARY) $(AM_LINK) filter_bench: $(OBJ_DIR)/util/filter_bench.o $(LIBRARY) $(AM_LINK) -db_stress: $(OBJ_DIR)/db_stress_tool/db_stress.o $(STRESS_LIBRARY) $(STRESS_LIBRARY_RUNTIME_DEPS) $(LIBRARY) +db_stress: $(OBJ_DIR)/db_stress_tool/db_stress.o $(STRESS_LIBRARY) $(TOOLS_LIBRARY) $(LIBRARY) $(AM_LINK) write_stress: $(OBJ_DIR)/tools/write_stress.o $(LIBRARY) @@ -1231,7 +1219,7 @@ write_stress: $(OBJ_DIR)/tools/write_stress.o $(LIBRARY) db_sanity_test: $(OBJ_DIR)/tools/db_sanity_test.o $(LIBRARY) $(AM_LINK) -db_repl_stress: $(OBJ_DIR)/tools/db_repl_stress.o $(TESTUTIL) $(LIBRARY) +db_repl_stress: $(OBJ_DIR)/tools/db_repl_stress.o $(LIBRARY) $(AM_LINK) arena_test: $(OBJ_DIR)/memory/arena_test.o $(TEST_LIBRARY) $(LIBRARY) diff --git a/TARGETS b/TARGETS index becaff39f..c888ce5b0 100644 --- a/TARGETS +++ b/TARGETS @@ -332,6 +332,8 @@ cpp_library( "utilities/debug.cc", "utilities/env_mirror.cc", "utilities/env_timed.cc", + "utilities/fault_injection_env.cc", + "utilities/fault_injection_fs.cc", "utilities/leveldb_options/leveldb_options.cc", "utilities/memory/memory_util.cc", "utilities/merge_operators/bytesxor.cc", @@ -385,8 +387,6 @@ cpp_library( srcs = [ "db/db_test_util.cc", "table/mock_table.cc", - "test_util/fault_injection_test_env.cc", - "test_util/fault_injection_test_fs.cc", "test_util/testharness.cc", "test_util/testutil.cc", "tools/block_cache_analyzer/block_cache_trace_analyzer.cc", diff --git a/db/column_family_test.cc b/db/column_family_test.cc index c4181d9b8..04ef9a30f 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -22,26 +22,18 @@ #include "rocksdb/env.h" #include "rocksdb/iterator.h" #include "rocksdb/utilities/object_registry.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "test_util/testutil.h" #include "util/coding.h" #include "util/string_util.h" +#include "utilities/fault_injection_env.h" #include "utilities/merge_operators.h" namespace ROCKSDB_NAMESPACE { static const int kValueSize = 1000; -namespace { -std::string RandomString(Random* rnd, int len) { - std::string r; - test::RandomString(rnd, len, &r); - return r; -} -} // anonymous namespace - // counts how many operations were performed class EnvCounter : public EnvWrapper { public: @@ -109,11 +101,11 @@ class ColumnFamilyTestBase : public testing::Test { // preserves the implementation that was in place when all of the // magic values in this file were picked. *storage = std::string(kValueSize, ' '); - return Slice(*storage); } else { Random r(k); - return test::RandomString(&r, kValueSize, storage); + *storage = r.RandomString(kValueSize); } + return Slice(*storage); } void Build(int base, int n, int flush_every = 0) { @@ -329,11 +321,11 @@ class ColumnFamilyTestBase : public testing::Test { // 10 bytes for key, rest is value if (!save) { ASSERT_OK(Put(cf, test::RandomKey(&rnd_, 11), - RandomString(&rnd_, key_value_size - 10))); + rnd_.RandomString(key_value_size - 10))); } else { std::string key = test::RandomKey(&rnd_, 11); keys_[cf].insert(key); - ASSERT_OK(Put(cf, key, RandomString(&rnd_, key_value_size - 10))); + ASSERT_OK(Put(cf, key, rnd_.RandomString(key_value_size - 10))); } } db_->FlushWAL(false); diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 49f287a97..4eac91e2a 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -13,6 +13,7 @@ #include "test_util/testutil.h" #include "util/hash.h" #include "util/kv_map.h" +#include "util/random.h" #include "util/string_util.h" #include "utilities/merge_operators.h" @@ -342,12 +343,12 @@ TEST_P(ComparatorDBTest, SimpleSuffixReverseComparator) { std::vector source_prefixes; // Randomly generate 5 prefixes for (int i = 0; i < 5; i++) { - source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8)); + source_prefixes.push_back(rnd.HumanReadableString(8)); } for (int j = 0; j < 20; j++) { int prefix_index = rnd.Uniform(static_cast(source_prefixes.size())); std::string key = source_prefixes[prefix_index] + - test::RandomHumanReadableString(&rnd, rnd.Uniform(8)); + rnd.HumanReadableString(rnd.Uniform(8)); source_strings.push_back(key); } diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 4641ec7b1..cae3ad728 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -9,13 +9,13 @@ #ifndef ROCKSDB_LITE -#include "rocksdb/db.h" - #include #include #include #include + #include + #include "db/db_impl/db_impl.h" #include "db/db_test_util.h" #include "db/log_format.h" @@ -24,6 +24,7 @@ #include "file/filename.h" #include "rocksdb/cache.h" #include "rocksdb/convenience.h" +#include "rocksdb/db.h" #include "rocksdb/env.h" #include "rocksdb/table.h" #include "rocksdb/write_batch.h" @@ -31,6 +32,7 @@ #include "table/meta_blocks.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" #include "util/string_util.h" namespace ROCKSDB_NAMESPACE { @@ -219,11 +221,11 @@ class CorruptionTest : public testing::Test { // preserves the implementation that was in place when all of the // magic values in this file were picked. *storage = std::string(kValueSize, ' '); - return Slice(*storage); } else { Random r(k); - return test::RandomString(&r, kValueSize, storage); + *storage = r.RandomString(kValueSize); } + return Slice(*storage); } }; diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 3cf52f71b..3795a08b7 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -16,10 +16,11 @@ #include "rocksdb/utilities/debug.h" #include "table/block_based/block_based_table_reader.h" #include "table/block_based/block_builder.h" -#include "test_util/fault_injection_test_env.h" #if !defined(ROCKSDB_LITE) #include "test_util/sync_point.h" #endif +#include "util/random.h" +#include "utilities/fault_injection_env.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/string_append/stringappend.h" @@ -2040,7 +2041,7 @@ TEST_F(DBBasicTest, MultiGetIOBufferOverrun) { for (int i = 0; i < 100; ++i) { // Make the value compressible. A purely random string doesn't compress // and the resultant data block will not be compressed - std::string value(RandomString(&rnd, 128) + zero_str); + std::string value(rnd.RandomString(128) + zero_str); assert(Put(Key(i), value) == Status::OK()); } Flush(); @@ -2430,7 +2431,7 @@ class DBBasicTestMultiGet : public DBTestBase { for (int i = 0; i < 100; ++i) { // Make the value compressible. A purely random string doesn't compress // and the resultant data block will not be compressed - values_.emplace_back(RandomString(&rnd, 128) + zero_str); + values_.emplace_back(rnd.RandomString(128) + zero_str); assert(((num_cfs == 1) ? Put(Key(i), values_[i]) : Put(cf, Key(i), values_[i])) == Status::OK()); } @@ -2442,7 +2443,7 @@ class DBBasicTestMultiGet : public DBTestBase { for (int i = 0; i < 100; ++i) { // block cannot gain space by compression - uncompressable_values_.emplace_back(RandomString(&rnd, 256) + '\0'); + uncompressable_values_.emplace_back(rnd.RandomString(256) + '\0'); std::string tmp_key = "a" + Key(i); assert(((num_cfs == 1) ? Put(tmp_key, uncompressable_values_[i]) : Put(cf, tmp_key, uncompressable_values_[i])) == @@ -3210,7 +3211,7 @@ TEST_F(DBBasicTest, PointLookupDeadline) { Random rnd(301); for (int i = 0; i < 400; ++i) { std::string key = "k" + ToString(i); - Put(key, RandomString(&rnd, 100)); + Put(key, rnd.RandomString(100)); } Flush(); diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 0841a655e..045967de3 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -7,10 +7,12 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include + #include "cache/lru_cache.h" #include "db/db_test_util.h" #include "port/stack_trace.h" #include "util/compression.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -764,7 +766,7 @@ TEST_F(DBBlockCacheTest, CompressedCache) { std::string str; for (int i = 0; i < num_iter; i++) { if (i % 4 == 0) { // high compression ratio - str = RandomString(&rnd, 1000); + str = rnd.RandomString(1000); } values.push_back(str); ASSERT_OK(Put(1, Key(i), values[i])); @@ -851,7 +853,7 @@ TEST_F(DBBlockCacheTest, CacheCompressionDict) { for (int i = 0; i < kNumFiles; ++i) { ASSERT_EQ(i, NumTableFilesAtLevel(0, 0)); for (int j = 0; j < kNumEntriesPerFile; ++j) { - std::string value = RandomString(&rnd, kNumBytesPerEntry); + std::string value = rnd.RandomString(kNumBytesPerEntry); ASSERT_OK(Put(Key(j * kNumFiles + i), value.c_str())); } ASSERT_OK(Flush()); diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index eb86ec2dc..aae1c1100 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -14,9 +14,10 @@ #include "rocksdb/experimental.h" #include "rocksdb/sst_file_writer.h" #include "rocksdb/utilities/convenience.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "util/concurrent_task_limiter_impl.h" +#include "util/random.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { @@ -295,7 +296,7 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTrigger) { const int kTestSize = kCDTKeysPerBuffer * 1024; std::vector values; for (int k = 0; k < kTestSize; ++k) { - values.push_back(RandomString(&rnd, kCDTValueSize)); + values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } dbfull()->TEST_WaitForFlushMemTable(); @@ -343,7 +344,7 @@ TEST_P(DBCompactionTestWithParam, CompactionsPreserveDeletes) { const int kTestSize = kCDTKeysPerBuffer; std::vector values; for (int k = 0; k < kTestSize; ++k) { - values.push_back(RandomString(&rnd, kCDTValueSize)); + values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } @@ -408,7 +409,7 @@ TEST_F(DBCompactionTest, SkipStatsUpdateTest) { const int kTestSize = kCDTKeysPerBuffer * 512; std::vector values; for (int k = 0; k < kTestSize; ++k) { - values.push_back(RandomString(&rnd, kCDTValueSize)); + values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } @@ -555,7 +556,7 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTriggerReopen) { const int kTestSize = kCDTKeysPerBuffer * 512; std::vector values; for (int k = 0; k < kTestSize; ++k) { - values.push_back(RandomString(&rnd, kCDTValueSize)); + values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } dbfull()->TEST_WaitForFlushMemTable(); @@ -673,7 +674,7 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) { const int kTestSize = kCDTKeysPerBuffer * 512; std::vector values; for (int k = 0; k < kTestSize; ++k) { - values.push_back(RandomString(&rnd, kCDTValueSize)); + values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } dbfull()->TEST_WaitForFlushMemTable(); @@ -736,7 +737,7 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) { std::vector values; // Write 100KB (100 values, each 1K) for (int i = 0; i < kNumKeysPerFile; i++) { - values.push_back(RandomString(&rnd, 990)); + values.push_back(rnd.RandomString(990)); ASSERT_OK(Put(1, Key(i), values[i])); } // put extra key to trigger flush @@ -748,7 +749,7 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) { // generate one more file in level-0, and should trigger level-0 compaction std::vector values; for (int i = 0; i < kNumKeysPerFile; i++) { - values.push_back(RandomString(&rnd, 990)); + values.push_back(rnd.RandomString(990)); ASSERT_OK(Put(1, Key(i), values[i])); } // put extra key to trigger flush @@ -867,7 +868,7 @@ TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) { ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); std::vector values; for (int i = 0; i < 80; i++) { - values.push_back(RandomString(&rnd, 100000)); + values.push_back(rnd.RandomString(100000)); ASSERT_OK(Put(1, Key(i), values[i])); } @@ -1105,7 +1106,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) { Random rnd(301); std::vector values; for (int i = 0; i < num_keys; i++) { - values.push_back(RandomString(&rnd, value_size)); + values.push_back(rnd.RandomString(value_size)); ASSERT_OK(Put(Key(i), values[i])); } @@ -1177,7 +1178,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { std::map values; for (size_t i = 0; i < ranges.size(); i++) { for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { - values[j] = RandomString(&rnd, value_size); + values[j] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(j), values[j])); } ASSERT_OK(Flush()); @@ -1223,7 +1224,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { }; for (size_t i = 0; i < ranges.size(); i++) { for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { - values[j] = RandomString(&rnd, value_size); + values[j] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(j), values[j])); } ASSERT_OK(Flush()); @@ -1268,14 +1269,14 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveTargetLevel) { // file 1 [0 => 300] for (int32_t i = 0; i <= 300; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); // file 2 [600 => 700] for (int32_t i = 600; i <= 700; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1349,14 +1350,14 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { // file 1 [0 => 100] for (int32_t i = 0; i < 100; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); // file 2 [100 => 300] for (int32_t i = 100; i < 300; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1377,7 +1378,7 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { // file 3 [ 0 => 200] for (int32_t i = 0; i < 200; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1409,21 +1410,21 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { TEST_SYNC_POINT("DBCompaction::ManualPartial:1"); // file 4 [300 => 400) for (int32_t i = 300; i <= 400; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); // file 5 [400 => 500) for (int32_t i = 400; i <= 500; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); // file 6 [500 => 600) for (int32_t i = 500; i <= 600; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } // Second non-trivial compaction is triggered @@ -1491,14 +1492,14 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) { // file 1 [0 => 100] for (int32_t i = 0; i < 100; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); // file 2 [100 => 300] for (int32_t i = 100; i < 300; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1517,7 +1518,7 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) { // file 3 [ 0 => 200] for (int32_t i = 0; i < 200; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1549,7 +1550,7 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) { ASSERT_OK(Flush()); dbfull()->TEST_WaitForFlushMemTable(); } - values[j] = RandomString(&rnd, value_size); + values[j] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(j), values[j])); } } @@ -1620,14 +1621,14 @@ TEST_F(DBCompactionTest, DeleteFileRange) { // file 1 [0 => 100] for (int32_t i = 0; i < 100; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); // file 2 [100 => 300] for (int32_t i = 100; i < 300; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1643,7 +1644,7 @@ TEST_F(DBCompactionTest, DeleteFileRange) { // file 3 [ 0 => 200] for (int32_t i = 0; i < 200; i++) { - values[i] = RandomString(&rnd, value_size); + values[i] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1655,7 +1656,7 @@ TEST_F(DBCompactionTest, DeleteFileRange) { ASSERT_OK(Flush()); dbfull()->TEST_WaitForFlushMemTable(); } - values[j] = RandomString(&rnd, value_size); + values[j] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(j), values[j])); } } @@ -1742,7 +1743,7 @@ TEST_F(DBCompactionTest, DeleteFilesInRanges) { for (auto i = 0; i < 10; i++) { for (auto j = 0; j < 100; j++) { auto k = i * 100 + j; - values[k] = RandomString(&rnd, value_size); + values[k] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(k), values[k])); } ASSERT_OK(Flush()); @@ -1874,7 +1875,7 @@ TEST_F(DBCompactionTest, DeleteFileRangeFileEndpointsOverlapBug) { // would cause `1 -> vals[0]` (an older key) to reappear. std::string vals[kNumL0Files]; for (int i = 0; i < kNumL0Files; ++i) { - vals[i] = RandomString(&rnd, kValSize); + vals[i] = rnd.RandomString(kValSize); Put(Key(i), vals[i]); Put(Key(i + 1), vals[i]); Flush(); @@ -1916,7 +1917,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) { std::vector values; // File with keys [ 0 => 99 ] for (int i = 0; i < 100; i++) { - values.push_back(RandomString(&rnd, value_size)); + values.push_back(rnd.RandomString(value_size)); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -1934,7 +1935,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) { // File with keys [ 100 => 199 ] for (int i = 100; i < 200; i++) { - values.push_back(RandomString(&rnd, value_size)); + values.push_back(rnd.RandomString(value_size)); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Flush()); @@ -2329,7 +2330,7 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { for (int i = 0; i <= max_key_level_insert; i++) { // each value is 10K - ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000))); } ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); @@ -2387,7 +2388,7 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { ReopenWithColumnFamilies({"default", "pikachu"}, options); for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) { - ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000))); } dbfull()->Flush(FlushOptions()); ASSERT_OK(Flush(1)); @@ -2682,7 +2683,7 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) { Random rnd(301); for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { - ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize))); + ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize))); } dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); @@ -2758,8 +2759,8 @@ TEST_P(DBCompactionTestWithParam, PartialCompactionFailure) { std::vector keys; std::vector values; for (int k = 0; k < kNumInsertedKeys; ++k) { - keys.emplace_back(RandomString(&rnd, kKeySize)); - values.emplace_back(RandomString(&rnd, kKvSize - kKeySize)); + keys.emplace_back(rnd.RandomString(kKeySize)); + values.emplace_back(rnd.RandomString(kKvSize - kKeySize)); ASSERT_OK(Put(Slice(keys[k]), Slice(values[k]))); dbfull()->TEST_WaitForFlushMemTable(); } @@ -2825,7 +2826,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) { for (int i = 0; i < 2; ++i) { // Create 1MB sst file for (int j = 0; j < 100; ++j) { - ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024))); + ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024))); } ASSERT_OK(Flush()); } @@ -2860,7 +2861,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) { for (int i = 0; i < 2; ++i) { // Create 1MB sst file for (int j = 0; j < 100; ++j) { - ASSERT_OK(Put(Key(i * 50 + j + 100), RandomString(&rnd, 10 * 1024))); + ASSERT_OK(Put(Key(i * 50 + j + 100), rnd.RandomString(10 * 1024))); } ASSERT_OK(Flush()); } @@ -3118,7 +3119,7 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) { std::vector values; // File with keys [ 0 => 99 ] for (int i = 0; i < 100; i++) { - values.push_back(RandomString(&rnd, value_size)); + values.push_back(rnd.RandomString(value_size)); ASSERT_OK(Put(ShortKey(i), values[i])); } ASSERT_OK(Flush()); @@ -3135,7 +3136,7 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) { // File with keys [ 100 => 199 ] for (int i = 100; i < 200; i++) { - values.push_back(RandomString(&rnd, value_size)); + values.push_back(rnd.RandomString(value_size)); ASSERT_OK(Put(ShortKey(i), values[i])); } ASSERT_OK(Flush()); @@ -3153,7 +3154,7 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) { // File with keys [ 200 => 299 ] for (int i = 200; i < 300; i++) { - values.push_back(RandomString(&rnd, value_size)); + values.push_back(rnd.RandomString(value_size)); ASSERT_OK(Put(ShortKey(i), values[i])); } ASSERT_OK(Flush()); @@ -3197,7 +3198,7 @@ TEST_P(DBCompactionTestWithParam, IntraL0Compaction) { const size_t kValueSize = 1 << 20; Random rnd(301); - std::string value(RandomString(&rnd, kValueSize)); + std::string value(rnd.RandomString(kValueSize)); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency( {{"LevelCompactionPicker::PickCompactionBySize:0", @@ -3261,7 +3262,7 @@ TEST_P(DBCompactionTestWithParam, IntraL0CompactionDoesNotObsoleteDeletions) { const size_t kValueSize = 1 << 20; Random rnd(301); - std::string value(RandomString(&rnd, kValueSize)); + std::string value(rnd.RandomString(kValueSize)); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency( {{"LevelCompactionPicker::PickCompactionBySize:0", @@ -3480,7 +3481,7 @@ TEST_F(DBCompactionTest, CompactBottomLevelFilesWithDeletions) { for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } if (i == kNumLevelFiles - 1) { snapshot = db_->GetSnapshot(); @@ -3552,7 +3553,7 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); } @@ -3598,7 +3599,7 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); } @@ -3693,7 +3694,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { // Add two L6 files with key ranges: [1 .. 100], [101 .. 200]. Random rnd(301); for (int i = 1; i <= 100; ++i) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize))); + ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } Flush(); // Get the first file's creation time. This will be the oldest file in the @@ -3706,7 +3707,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { // Add 1 hour and do another flush. env_->addon_time_.fetch_add(1 * 60 * 60); for (int i = 101; i <= 200; ++i) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize))); + ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } Flush(); MoveFilesToLevel(6); @@ -3715,12 +3716,12 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { env_->addon_time_.fetch_add(1 * 60 * 60); // Add two L4 files with key ranges: [1 .. 50], [51 .. 150]. for (int i = 1; i <= 50; ++i) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize))); + ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } Flush(); env_->addon_time_.fetch_add(1 * 60 * 60); for (int i = 51; i <= 150; ++i) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize))); + ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } Flush(); MoveFilesToLevel(4); @@ -3729,7 +3730,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { env_->addon_time_.fetch_add(1 * 60 * 60); // Add one L1 file with key range: [26, 75]. for (int i = 26; i <= 75; ++i) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize))); + ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } Flush(); dbfull()->TEST_WaitForCompact(); @@ -3840,8 +3841,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompaction) { Random rnd(301); for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { - ASSERT_OK(Put(Key(i * kNumKeysPerFile + j), - RandomString(&rnd, kValueSize))); + ASSERT_OK( + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); } @@ -3935,7 +3936,7 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithOldDB) { for (int i = 0; i < kNumFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); // Move the first two files to L2. @@ -3998,7 +3999,7 @@ TEST_F(DBCompactionTest, LevelPeriodicAndTtlCompaction) { for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); } @@ -4109,7 +4110,7 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) { for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); } @@ -4169,7 +4170,7 @@ TEST_F(DBCompactionTest, CompactRangeDelayedByL0FileCount) { Random rnd(301); for (int j = 0; j < kNumL0FilesLimit - 1; ++j) { for (int k = 0; k < 2; ++k) { - ASSERT_OK(Put(Key(k), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(k), rnd.RandomString(1024))); } Flush(); } @@ -4223,7 +4224,7 @@ TEST_F(DBCompactionTest, CompactRangeDelayedByImmMemTableCount) { Random rnd(301); for (int j = 0; j < kNumImmMemTableLimit - 1; ++j) { - ASSERT_OK(Put(Key(0), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(0), rnd.RandomString(1024))); FlushOptions flush_opts; flush_opts.wait = false; flush_opts.allow_write_stall = true; @@ -4271,7 +4272,7 @@ TEST_F(DBCompactionTest, CompactRangeShutdownWhileDelayed) { Random rnd(301); for (int j = 0; j < kNumL0FilesLimit - 1; ++j) { for (int k = 0; k < 2; ++k) { - ASSERT_OK(Put(1, Key(k), RandomString(&rnd, 1024))); + ASSERT_OK(Put(1, Key(k), rnd.RandomString(1024))); } Flush(1); } @@ -4331,7 +4332,7 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) { flush_opts.allow_write_stall = true; for (int i = 0; i < kNumL0FilesLimit - 1; ++i) { for (int j = 0; j < 2; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(j), rnd.RandomString(1024))); } dbfull()->Flush(flush_opts); } @@ -4342,9 +4343,9 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) { }); TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PreFlush"); - Put(ToString(0), RandomString(&rnd, 1024)); + Put(ToString(0), rnd.RandomString(1024)); dbfull()->Flush(flush_opts); - Put(ToString(0), RandomString(&rnd, 1024)); + Put(ToString(0), rnd.RandomString(1024)); TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush"); manual_compaction_thread.join(); @@ -4784,7 +4785,7 @@ TEST_P(CompactionPriTest, Test) { RandomShuffle(std::begin(keys), std::end(keys), rnd.Next()); for (int i = 0; i < kNKeys; i++) { - ASSERT_OK(Put(Key(keys[i]), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(keys[i]), rnd.RandomString(102))); } dbfull()->TEST_WaitForCompact(); @@ -4826,7 +4827,7 @@ TEST_F(DBCompactionTest, PartialManualCompaction) { Random rnd(301); for (auto i = 0; i < 8; ++i) { for (auto j = 0; j < 10; ++j) { - Merge("foo", RandomString(&rnd, 1024)); + Merge("foo", rnd.RandomString(1024)); } Flush(); } @@ -4858,8 +4859,8 @@ TEST_F(DBCompactionTest, ManualCompactionFailsInReadOnlyMode) { Random rnd(301); for (int i = 0; i < kNumL0Files; ++i) { // Make sure files are overlapping in key-range to prevent trivial move. - Put("key1", RandomString(&rnd, 1024)); - Put("key2", RandomString(&rnd, 1024)); + Put("key1", rnd.RandomString(1024)); + Put("key2", rnd.RandomString(1024)); Flush(); } ASSERT_EQ(kNumL0Files, NumTableFilesAtLevel(0)); @@ -4868,7 +4869,7 @@ TEST_F(DBCompactionTest, ManualCompactionFailsInReadOnlyMode) { mock_env->SetFilesystemActive(false); // Make sure this is outside `CompactRange`'s range so that it doesn't fail // early trying to flush memtable. - ASSERT_NOK(Put("key3", RandomString(&rnd, 1024))); + ASSERT_NOK(Put("key3", rnd.RandomString(1024))); // In the bug scenario, the first manual compaction would fail and forget to // unregister itself, causing the second one to hang forever due to conflict @@ -4907,7 +4908,7 @@ TEST_F(DBCompactionTest, ManualCompactionBottomLevelOptimized) { for (auto i = 0; i < 8; ++i) { for (auto j = 0; j < 10; ++j) { ASSERT_OK( - Put("foo" + std::to_string(i * 10 + j), RandomString(&rnd, 1024))); + Put("foo" + std::to_string(i * 10 + j), rnd.RandomString(1024))); } Flush(); } @@ -4917,7 +4918,7 @@ TEST_F(DBCompactionTest, ManualCompactionBottomLevelOptimized) { for (auto i = 0; i < 8; ++i) { for (auto j = 0; j < 10; ++j) { ASSERT_OK( - Put("bar" + std::to_string(i * 10 + j), RandomString(&rnd, 1024))); + Put("bar" + std::to_string(i * 10 + j), rnd.RandomString(1024))); } Flush(); } @@ -4951,7 +4952,7 @@ TEST_F(DBCompactionTest, CompactionDuringShutdown) { for (auto i = 0; i < 2; ++i) { for (auto j = 0; j < 10; ++j) { ASSERT_OK( - Put("foo" + std::to_string(i * 10 + j), RandomString(&rnd, 1024))); + Put("foo" + std::to_string(i * 10 + j), rnd.RandomString(1024))); } Flush(); } @@ -4974,7 +4975,7 @@ TEST_P(DBCompactionTestWithParam, FixFileIngestionCompactionDeadlock) { // Generate an external SST file containing a single key, i.e. 99 std::string sst_files_dir = dbname_ + "/sst_files/"; - test::DestroyDir(env_, sst_files_dir); + DestroyDir(env_, sst_files_dir); ASSERT_OK(env_->CreateDir(sst_files_dir)); SstFileWriter sst_writer(EnvOptions(), options); const std::string sst_file_path = sst_files_dir + "test.sst"; @@ -5001,7 +5002,7 @@ TEST_P(DBCompactionTestWithParam, FixFileIngestionCompactionDeadlock) { // Generate level0_stop_writes_trigger L0 files to trigger write stop for (int i = 0; i != options.level0_file_num_compaction_trigger; ++i) { for (int j = 0; j != kNumKeysPerFile; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 990))); + ASSERT_OK(Put(Key(j), rnd.RandomString(990))); } if (0 == i) { // When we reach here, the memtables have kNumKeysPerFile keys. Note that @@ -5093,7 +5094,7 @@ TEST_F(DBCompactionTest, ConsistencyFailTest2) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); Random rnd(301); - std::string value = RandomString(&rnd, 1000); + std::string value = rnd.RandomString(1000); ASSERT_OK(Put("foo1", value)); ASSERT_OK(Put("z", "")); @@ -5140,7 +5141,7 @@ TEST_P(DBCompactionTestWithParam, const size_t kValueSize = 1 << 20; Random rnd(301); std::atomic pick_intra_l0_count(0); - std::string value(RandomString(&rnd, kValueSize)); + std::string value(rnd.RandomString(kValueSize)); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency( {{"DBCompactionTestWithParam::FlushAfterIntraL0:1", @@ -5207,8 +5208,8 @@ TEST_P(DBCompactionTestWithParam, const size_t kValueSize = 1 << 20; Random rnd(301); - std::string value(RandomString(&rnd, kValueSize)); - std::string value2(RandomString(&rnd, kValueSize)); + std::string value(rnd.RandomString(kValueSize)); + std::string value2(rnd.RandomString(kValueSize)); std::string bigvalue = value + value; // prevents trivial move diff --git a/db/db_dynamic_level_test.cc b/db/db_dynamic_level_test.cc index ec059feb9..4273354ae 100644 --- a/db/db_dynamic_level_test.cc +++ b/db/db_dynamic_level_test.cc @@ -15,6 +15,7 @@ #include "db/db_test_util.h" #include "port/port.h" #include "port/stack_trace.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { class DBTestDynamicLevel : public DBTestBase { @@ -80,9 +81,9 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) { for (int i = 0; i < kNKeys; i++) { int key = keys[i]; - ASSERT_OK(Put(Key(kNKeys + key), RandomString(&rnd, 102))); - ASSERT_OK(Put(Key(key), RandomString(&rnd, 102))); - ASSERT_OK(Put(Key(kNKeys * 2 + key), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(kNKeys + key), rnd.RandomString(102))); + ASSERT_OK(Put(Key(key), rnd.RandomString(102))); + ASSERT_OK(Put(Key(kNKeys * 2 + key), rnd.RandomString(102))); ASSERT_OK(Delete(Key(kNKeys + keys[i / 10]))); env_->SleepForMicroseconds(5000); } @@ -158,7 +159,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { // Put about 28K to L0 for (int i = 0; i < 70; i++) { ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), - RandomString(&rnd, 380))); + rnd.RandomString(380))); } ASSERT_OK(dbfull()->SetOptions({ {"disable_auto_compactions", "false"}, @@ -175,7 +176,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { })); for (int i = 0; i < 70; i++) { ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), - RandomString(&rnd, 380))); + rnd.RandomString(380))); } ASSERT_OK(dbfull()->SetOptions({ @@ -197,7 +198,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { // Write about 40K more for (int i = 0; i < 100; i++) { ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), - RandomString(&rnd, 380))); + rnd.RandomString(380))); } ASSERT_OK(dbfull()->SetOptions({ {"disable_auto_compactions", "false"}, @@ -216,7 +217,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { // Each file is about 11KB, with 9KB of data. for (int i = 0; i < 1300; i++) { ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), - RandomString(&rnd, 380))); + rnd.RandomString(380))); } // Make sure that the compaction starts before the last bit of data is @@ -257,7 +258,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:1"); for (int i = 0; i < 2; i++) { ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), - RandomString(&rnd, 380))); + rnd.RandomString(380))); } TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:2"); @@ -310,15 +311,15 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) { // Put about 7K to L0 for (int i = 0; i < 140; i++) { - ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), - RandomString(&rnd, 80))); + ASSERT_OK( + Put(Key(static_cast(rnd.Uniform(kMaxKey))), rnd.RandomString(80))); } Flush(); dbfull()->TEST_WaitForCompact(); if (NumTableFilesAtLevel(0) == 0) { // Make sure level 0 is not empty - ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), - RandomString(&rnd, 80))); + ASSERT_OK( + Put(Key(static_cast(rnd.Uniform(kMaxKey))), rnd.RandomString(80))); Flush(); } @@ -382,7 +383,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) { const int total_keys = 3000; const int random_part_size = 100; for (int i = 0; i < total_keys; i++) { - std::string value = RandomString(&rnd, random_part_size); + std::string value = rnd.RandomString(random_part_size); PutFixed32(&value, static_cast(i)); ASSERT_OK(Put(Key(i), value)); } @@ -441,8 +442,8 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) { int total_keys = 1000; for (int i = 0; i < total_keys; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 102))); - ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(i), rnd.RandomString(102))); + ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102))); ASSERT_OK(Delete(Key(i / 10))); } verify_func(total_keys, false); @@ -475,8 +476,8 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) { int total_keys2 = 2000; for (int i = total_keys; i < total_keys2; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 102))); - ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(i), rnd.RandomString(102))); + ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102))); ASSERT_OK(Delete(Key(i / 10))); } diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index 6255bbd51..0a0914484 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -13,10 +13,10 @@ #include "db/db_test_util.h" #include "port/port.h" #include "port/stack_trace.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "util/cast_util.h" #include "util/mutexlock.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/db_impl/db_secondary_test.cc b/db/db_impl/db_secondary_test.cc index d54a52493..00284b1a4 100644 --- a/db/db_impl/db_secondary_test.cc +++ b/db/db_impl/db_secondary_test.cc @@ -10,8 +10,8 @@ #include "db/db_impl/db_impl_secondary.h" #include "db/db_test_util.h" #include "port/stack_trace.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/db_io_failure_test.cc b/db/db_io_failure_test.cc index f8d562447..42b4c3476 100644 --- a/db/db_io_failure_test.cc +++ b/db/db_io_failure_test.cc @@ -9,6 +9,7 @@ #include "db/db_test_util.h" #include "port/stack_trace.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -281,8 +282,8 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) { Random rnd(301); std::string rnd_str = - RandomString(&rnd, static_cast(options.bytes_per_sync / 2)); - std::string rnd_str_512kb = RandomString(&rnd, 512 * 1024); + rnd.RandomString(static_cast(options.bytes_per_sync / 2)); + std::string rnd_str_512kb = rnd.RandomString(512 * 1024); ASSERT_OK(Put(1, "foo", "bar")); // First 1MB doesn't get range synced @@ -330,8 +331,8 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) { Random rnd(301); std::string rnd_str = - RandomString(&rnd, static_cast(options.bytes_per_sync / 2)); - std::string rnd_str_512kb = RandomString(&rnd, 512 * 1024); + rnd.RandomString(static_cast(options.bytes_per_sync / 2)); + std::string rnd_str_512kb = rnd.RandomString(512 * 1024); ASSERT_OK(Put(1, "foo", "bar")); // First 1MB doesn't get range synced diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index 6d06fa6ea..968511b1f 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -17,6 +17,7 @@ #include "rocksdb/iostats_context.h" #include "rocksdb/perf_context.h" #include "table/block_based/flush_block_policy.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -194,10 +195,10 @@ TEST_P(DBIteratorTest, IterReseekNewUpperBound) { options.compression = kNoCompression; Reopen(options); - ASSERT_OK(Put("a", RandomString(&rnd, 400))); - ASSERT_OK(Put("aabb", RandomString(&rnd, 400))); - ASSERT_OK(Put("aaef", RandomString(&rnd, 400))); - ASSERT_OK(Put("b", RandomString(&rnd, 400))); + ASSERT_OK(Put("a", rnd.RandomString(400))); + ASSERT_OK(Put("aabb", rnd.RandomString(400))); + ASSERT_OK(Put("aaef", rnd.RandomString(400))); + ASSERT_OK(Put("b", rnd.RandomString(400))); dbfull()->Flush(FlushOptions()); ReadOptions opts; Slice ub = Slice("aa"); @@ -1360,7 +1361,7 @@ class DBIteratorTestForPinnedData : public DBIteratorTest { std::vector generated_keys(key_pool); for (int i = 0; i < key_pool; i++) { - generated_keys[i] = RandomString(&rnd, key_size); + generated_keys[i] = rnd.RandomString(key_size); } std::map true_data; @@ -1368,7 +1369,7 @@ class DBIteratorTestForPinnedData : public DBIteratorTest { std::vector deleted_keys; for (int i = 0; i < puts; i++) { auto& k = generated_keys[rnd.Next() % key_pool]; - auto v = RandomString(&rnd, val_size); + auto v = rnd.RandomString(val_size); // Insert data to true_data map and to DB true_data[k] = v; @@ -1531,7 +1532,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) { Random rnd(301); for (int i = 1; i <= 1000; i++) { std::string k = Key(i * 3); - std::string v = RandomString(&rnd, 100); + std::string v = rnd.RandomString(100); ASSERT_OK(Put(k, v)); true_data[k] = v; if (i % 250 == 0) { @@ -1545,7 +1546,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) { // Generate 4 sst files in L0 for (int i = 1; i <= 1000; i++) { std::string k = Key(i * 2); - std::string v = RandomString(&rnd, 100); + std::string v = rnd.RandomString(100); ASSERT_OK(Put(k, v)); true_data[k] = v; if (i % 250 == 0) { @@ -1557,7 +1558,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) { // Add some keys/values in memtables for (int i = 1; i <= 1000; i++) { std::string k = Key(i); - std::string v = RandomString(&rnd, 100); + std::string v = rnd.RandomString(100); ASSERT_OK(Put(k, v)); true_data[k] = v; } @@ -1659,8 +1660,8 @@ TEST_P(DBIteratorTest, PinnedDataIteratorReadAfterUpdate) { std::map true_data; for (int i = 0; i < 1000; i++) { - std::string k = RandomString(&rnd, 10); - std::string v = RandomString(&rnd, 1000); + std::string k = rnd.RandomString(10); + std::string v = rnd.RandomString(1000); ASSERT_OK(Put(k, v)); true_data[k] = v; } @@ -1674,7 +1675,7 @@ TEST_P(DBIteratorTest, PinnedDataIteratorReadAfterUpdate) { if (rnd.OneIn(2)) { ASSERT_OK(Delete(kv.first)); } else { - std::string new_val = RandomString(&rnd, 1000); + std::string new_val = rnd.RandomString(1000); ASSERT_OK(Put(kv.first, new_val)); } } @@ -1931,7 +1932,7 @@ TEST_P(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) { for (int i = 0; i < kNumKeys; i++) { gen_key = Key(i); - gen_val = RandomString(&rnd, kValSize); + gen_val = rnd.RandomString(kValSize); ASSERT_OK(Put(gen_key, gen_val)); true_data[gen_key] = gen_val; @@ -1949,7 +1950,7 @@ TEST_P(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) { for (int j = 0; j < kNumMergeOperands; j++) { gen_key = Key(i); - gen_val = RandomString(&rnd, kValSize); + gen_val = rnd.RandomString(kValSize); ASSERT_OK(db_->Merge(WriteOptions(), gen_key, gen_val)); true_data[gen_key] += "," + gen_val; @@ -2049,7 +2050,7 @@ TEST_P(DBIteratorTest, IteratorWithLocalStatistics) { Random rnd(301); for (int i = 0; i < 1000; i++) { // Key 10 bytes / Value 10 bytes - ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10))); + ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10))); } std::atomic total_next(0); @@ -2705,7 +2706,7 @@ TEST_P(DBIteratorTest, AvoidReseekLevelIterator) { Reopen(options); Random rnd(301); - std::string random_str = RandomString(&rnd, 180); + std::string random_str = rnd.RandomString(180); ASSERT_OK(Put("1", random_str)); ASSERT_OK(Put("2", random_str)); diff --git a/db/db_merge_operand_test.cc b/db/db_merge_operand_test.cc index a0ab34e01..555bec090 100644 --- a/db/db_merge_operand_test.cc +++ b/db/db_merge_operand_test.cc @@ -8,11 +8,11 @@ #include "rocksdb/perf_context.h" #include "rocksdb/utilities/debug.h" #include "table/block_based/block_builder.h" -#include "test_util/fault_injection_test_env.h" #if !defined(ROCKSDB_LITE) #include "test_util/sync_point.h" #endif #include "rocksdb/merge_operator.h" +#include "utilities/fault_injection_env.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/sortlist.h" #include "utilities/merge_operators/string_append/stringappend2.h" diff --git a/db/db_merge_operator_test.cc b/db/db_merge_operator_test.cc index 4f762468d..616d27671 100644 --- a/db/db_merge_operator_test.cc +++ b/db/db_merge_operator_test.cc @@ -9,6 +9,7 @@ #include "db/forward_iterator.h" #include "port/stack_trace.h" #include "rocksdb/merge_operator.h" +#include "util/random.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/string_append/stringappend2.h" @@ -242,7 +243,7 @@ TEST_P(MergeOperatorPinningTest, OperandsMultiBlocks) { std::string key = Key(key_id % 35); key_id++; for (int k = 0; k < kOperandsPerKeyPerFile; k++) { - std::string val = RandomString(&rnd, kOperandSize); + std::string val = rnd.RandomString(kOperandSize); ASSERT_OK(db_->Merge(WriteOptions(), key, val)); if (true_data[key].size() == 0) { true_data[key] = val; @@ -327,7 +328,7 @@ TEST_P(MergeOperatorPinningTest, EvictCacheBeforeMerge) { for (int i = 0; i < kNumOperands; i++) { for (int j = 0; j < kNumKeys; j++) { std::string k = Key(j); - std::string v = RandomString(&rnd, kOperandSize); + std::string v = rnd.RandomString(kOperandSize); ASSERT_OK(db_->Merge(WriteOptions(), k, v)); true_data[k] = std::max(true_data[k], v); @@ -620,7 +621,7 @@ TEST_P(PerConfigMergeOperatorPinningTest, Randomized) { // kNumPutBefore keys will have base values for (int i = 0; i < kNumPutBefore; i++) { std::string key = Key(rnd.Next() % kKeyRange); - std::string value = RandomString(&rnd, kOperandSize); + std::string value = rnd.RandomString(kOperandSize); ASSERT_OK(db_->Put(WriteOptions(), key, value)); true_data[key] = value; @@ -629,7 +630,7 @@ TEST_P(PerConfigMergeOperatorPinningTest, Randomized) { // Do kTotalMerges merges for (int i = 0; i < kTotalMerges; i++) { std::string key = Key(rnd.Next() % kKeyRange); - std::string value = RandomString(&rnd, kOperandSize); + std::string value = rnd.RandomString(kOperandSize); ASSERT_OK(db_->Merge(WriteOptions(), key, value)); if (true_data[key] < value) { @@ -640,7 +641,7 @@ TEST_P(PerConfigMergeOperatorPinningTest, Randomized) { // Overwrite random kNumPutAfter keys for (int i = 0; i < kNumPutAfter; i++) { std::string key = Key(rnd.Next() % kKeyRange); - std::string value = RandomString(&rnd, kOperandSize); + std::string value = rnd.RandomString(kOperandSize); ASSERT_OK(db_->Put(WriteOptions(), key, value)); true_data[key] = value; diff --git a/db/db_options_test.cc b/db/db_options_test.cc index 8cc09ec54..8d61ee883 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -493,8 +493,7 @@ TEST_F(DBOptionsTest, SetDelayedWriteRateOption) { TEST_F(DBOptionsTest, MaxTotalWalSizeChange) { Random rnd(1044); const auto value_size = size_t(1024); - std::string value; - test::RandomString(&rnd, value_size, &value); + std::string value = rnd.RandomString(value_size); Options options; options.create_if_missing = true; @@ -715,7 +714,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); } @@ -746,7 +745,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); } @@ -778,7 +777,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); } @@ -842,7 +841,7 @@ TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); } diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index 50dc3efef..c7baed920 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -126,8 +126,8 @@ TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) { Random rnd(301); for (auto* handle : handles_) { for (int i = 0; i < kKeyNum; ++i) { - db_->Put(WriteOptions(), handle, RandomString(&rnd, kKeySize), - RandomString(&rnd, kValueSize)); + db_->Put(WriteOptions(), handle, rnd.RandomString(kKeySize), + rnd.RandomString(kValueSize)); } } @@ -346,18 +346,18 @@ TEST_F(DBPropertiesTest, AggregatedTableProperties) { Random rnd(5632); for (int table = 1; table <= kTableCount; ++table) { for (int i = 0; i < kPutsPerTable; ++i) { - db_->Put(WriteOptions(), RandomString(&rnd, kKeySize), - RandomString(&rnd, kValueSize)); + db_->Put(WriteOptions(), rnd.RandomString(kKeySize), + rnd.RandomString(kValueSize)); } for (int i = 0; i < kDeletionsPerTable; i++) { - db_->Delete(WriteOptions(), RandomString(&rnd, kKeySize)); + db_->Delete(WriteOptions(), rnd.RandomString(kKeySize)); } for (int i = 0; i < kMergeOperandsPerTable; i++) { - db_->Merge(WriteOptions(), RandomString(&rnd, kKeySize), - RandomString(&rnd, kValueSize)); + db_->Merge(WriteOptions(), rnd.RandomString(kKeySize), + rnd.RandomString(kValueSize)); } for (int i = 0; i < kRangeDeletionsPerTable; i++) { - std::string start = RandomString(&rnd, kKeySize); + std::string start = rnd.RandomString(kKeySize); std::string end = start; end.resize(kValueSize); db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end); @@ -546,18 +546,18 @@ TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) { TableProperties tp, sum_tp, expected_tp; for (int table = 1; table <= kTableCount; ++table) { for (int i = 0; i < kPutsPerTable; ++i) { - db_->Put(WriteOptions(), RandomString(&rnd, kKeySize), - RandomString(&rnd, kValueSize)); + db_->Put(WriteOptions(), rnd.RandomString(kKeySize), + rnd.RandomString(kValueSize)); } for (int i = 0; i < kDeletionsPerTable; i++) { - db_->Delete(WriteOptions(), RandomString(&rnd, kKeySize)); + db_->Delete(WriteOptions(), rnd.RandomString(kKeySize)); } for (int i = 0; i < kMergeOperandsPerTable; i++) { - db_->Merge(WriteOptions(), RandomString(&rnd, kKeySize), - RandomString(&rnd, kValueSize)); + db_->Merge(WriteOptions(), rnd.RandomString(kKeySize), + rnd.RandomString(kValueSize)); } for (int i = 0; i < kRangeDeletionsPerTable; i++) { - std::string start = RandomString(&rnd, kKeySize); + std::string start = rnd.RandomString(kKeySize); std::string end = start; end.resize(kValueSize); db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end); @@ -920,7 +920,7 @@ TEST_F(DBPropertiesTest, ApproximateMemoryUsage) { for (int r = 0; r < kNumRounds; ++r) { for (int f = 0; f < kFlushesPerRound; ++f) { for (int w = 0; w < kWritesPerFlush; ++w) { - Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize)); + Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize)); } } // Make sure that there is no flush between getting the two properties. @@ -938,7 +938,7 @@ TEST_F(DBPropertiesTest, ApproximateMemoryUsage) { iters.push_back(db_->NewIterator(ReadOptions())); for (int f = 0; f < kFlushesPerRound; ++f) { for (int w = 0; w < kWritesPerFlush; ++w) { - Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize)); + Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize)); } } // Force flush to prevent flush from happening between getting the @@ -1296,8 +1296,8 @@ TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) { const int kMaxKey = 1000; for (int i = 0; i < kMaxKey; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 102))); - ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(i), rnd.RandomString(102))); + ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102))); } Flush(); dbfull()->TEST_WaitForCompact(); diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index b6f721b71..ca4a6f9fc 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -7,6 +7,7 @@ #include "port/stack_trace.h" #include "rocksdb/utilities/write_batch_with_index.h" #include "test_util/testutil.h" +#include "util/random.h" #include "utilities/merge_operators.h" namespace ROCKSDB_NAMESPACE { @@ -124,7 +125,7 @@ TEST_F(DBRangeDelTest, CompactionOutputFilesExactlyFilled) { std::vector values; // Write 12K (4 values, each 3K) for (int j = 0; j < kNumPerFile; j++) { - values.push_back(RandomString(&rnd, 3 << 10)); + values.push_back(rnd.RandomString(3 << 10)); ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j])); if (j == 0 && i > 0) { dbfull()->TEST_WaitForFlushMemTable(); @@ -172,7 +173,7 @@ TEST_F(DBRangeDelTest, MaxCompactionBytesCutsOutputFiles) { std::vector values; // Write 1MB (256 values, each 4K) for (int j = 0; j < kNumPerFile; j++) { - values.push_back(RandomString(&rnd, kBytesPerVal)); + values.push_back(rnd.RandomString(kBytesPerVal)); ASSERT_OK(Put(GetNumericStr(kNumPerFile * i + j), values[j])); } // extra entry to trigger SpecialSkipListFactory's flush @@ -378,7 +379,7 @@ TEST_F(DBRangeDelTest, ValidLevelSubcompactionBoundaries) { std::vector values; // Write 100KB (100 values, each 1K) for (int k = 0; k < kNumPerFile; k++) { - values.push_back(RandomString(&rnd, 990)); + values.push_back(rnd.RandomString(990)); ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k])); } // put extra key to trigger flush @@ -438,7 +439,7 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) { std::vector values; // Write 100KB (100 values, each 1K) for (int k = 0; k < kNumPerFile; k++) { - values.push_back(RandomString(&rnd, 990)); + values.push_back(rnd.RandomString(990)); ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k])); } // put extra key to trigger flush @@ -990,7 +991,7 @@ TEST_F(DBRangeDelTest, CompactionTreatsSplitInputLevelDeletionAtomically) { Key(2 * kNumFilesPerLevel)); Random rnd(301); - std::string value = RandomString(&rnd, kValueBytes); + std::string value = rnd.RandomString(kValueBytes); for (int j = 0; j < kNumFilesPerLevel; ++j) { // give files overlapping key-ranges to prevent trivial move ASSERT_OK(Put(Key(j), value)); @@ -1063,7 +1064,7 @@ TEST_F(DBRangeDelTest, RangeTombstoneEndKeyAsSstableUpperBound) { // [key000000#3,1, key000004#72057594037927935,15] // [key000001#5,1, key000002#6,1] Random rnd(301); - std::string value = RandomString(&rnd, kValueBytes); + std::string value = rnd.RandomString(kValueBytes); for (int j = 0; j < kNumFilesPerLevel; ++j) { // Give files overlapping key-ranges to prevent a trivial move when we // compact from L0 to L1. @@ -1198,7 +1199,7 @@ TEST_F(DBRangeDelTest, KeyAtOverlappingEndpointReappears) { const Snapshot* snapshot = nullptr; for (int i = 0; i < kNumFiles; ++i) { for (int j = 0; j < kFileBytes / kValueBytes; ++j) { - auto value = RandomString(&rnd, kValueBytes); + auto value = rnd.RandomString(kValueBytes); ASSERT_OK(db_->Merge(WriteOptions(), "key", value)); } if (i == kNumFiles - 1) { @@ -1282,7 +1283,7 @@ TEST_F(DBRangeDelTest, UntruncatedTombstoneDoesNotDeleteNewerKey) { const Snapshot* snapshots[] = {nullptr, nullptr}; for (int i = 0; i < kNumFiles; ++i) { for (int j = 0; j < kFileBytes / kValueBytes; ++j) { - auto value = RandomString(&rnd, kValueBytes); + auto value = rnd.RandomString(kValueBytes); std::string key; if (i < kNumFiles / 2) { key = Key(0); @@ -1328,7 +1329,7 @@ TEST_F(DBRangeDelTest, UntruncatedTombstoneDoesNotDeleteNewerKey) { // Now overwrite a few keys that are in L1 files that definitely don't have // overlapping boundary keys. for (int i = kMaxKey; i > kMaxKey - kKeysOverwritten; --i) { - auto value = RandomString(&rnd, kValueBytes); + auto value = rnd.RandomString(kValueBytes); ASSERT_OK(db_->Merge(WriteOptions(), Key(i), value)); } ASSERT_OK(db_->Flush(FlushOptions())); @@ -1375,7 +1376,7 @@ TEST_F(DBRangeDelTest, DeletedMergeOperandReappearsIterPrev) { const Snapshot* snapshot = nullptr; for (int i = 0; i < kNumFiles; ++i) { for (int j = 0; j < kFileBytes / kValueBytes; ++j) { - auto value = RandomString(&rnd, kValueBytes); + auto value = rnd.RandomString(kValueBytes); ASSERT_OK(db_->Merge(WriteOptions(), Key(j % kNumKeys), value)); if (i == 0 && j == kNumKeys) { // Take snapshot to prevent covered merge operands from being dropped or @@ -1515,7 +1516,7 @@ TEST_F(DBRangeDelTest, RangeTombstoneWrittenToMinimalSsts) { for (int i = 0; i < kFileBytes / kValueBytes; ++i) { std::string key(1, first_char); key.append(Key(i)); - std::string value = RandomString(&rnd, kValueBytes); + std::string value = rnd.RandomString(kValueBytes); ASSERT_OK(Put(key, value)); } db_->Flush(FlushOptions()); @@ -1597,7 +1598,7 @@ TEST_F(DBRangeDelTest, OverlappedTombstones) { std::vector values; // Write 12K (4 values, each 3K) for (int j = 0; j < kNumPerFile; j++) { - values.push_back(RandomString(&rnd, 3 << 10)); + values.push_back(rnd.RandomString(3 << 10)); ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j])); } } @@ -1636,7 +1637,7 @@ TEST_F(DBRangeDelTest, OverlappedKeys) { std::vector values; // Write 12K (4 values, each 3K) for (int j = 0; j < kNumPerFile; j++) { - values.push_back(RandomString(&rnd, 3 << 10)); + values.push_back(rnd.RandomString(3 << 10)); ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j])); } } diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 687d3b852..44e2380dc 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -12,6 +12,7 @@ #include "port/port.h" #include "port/stack_trace.h" #include "rocksdb/sst_file_manager.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -163,7 +164,7 @@ TEST_F(DBSSTTest, DontDeleteMovedFile) { for (int i = 0; i < 2; ++i) { // Create 1MB sst file for (int j = 0; j < 100; ++j) { - ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024))); + ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024))); } ASSERT_OK(Flush()); } @@ -211,7 +212,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { for (int i = 0; i < 2; ++i) { // Create 1MB sst file for (int j = 0; j < 100; ++j) { - ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024))); + ASSERT_OK(Put(Key(i * 50 + j), rnd.RandomString(10 * 1024))); } ASSERT_OK(Flush()); } @@ -242,7 +243,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { // write_buffer_size. The flush will be blocked with block_first_time // pending_file is protecting all the files created after for (int j = 0; j < 256; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 10 * 1024))); + ASSERT_OK(Put(Key(j), rnd.RandomString(10 * 1024))); } blocking_thread.WaitUntilSleeping(); @@ -758,7 +759,7 @@ TEST_F(DBSSTTest, DBWithMaxSpaceAllowed) { // Generate a file containing 100 keys. for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); @@ -799,7 +800,7 @@ TEST_F(DBSSTTest, CancellingCompactionsWorks) { // Generate a file containing 10 keys. for (int i = 0; i < 10; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); uint64_t total_file_size = 0; @@ -809,7 +810,7 @@ TEST_F(DBSSTTest, CancellingCompactionsWorks) { // Generate another file to trigger compaction. for (int i = 0; i < 10; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); dbfull()->TEST_WaitForCompact(true); @@ -846,7 +847,7 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) { // Generate a file containing 10 keys. for (int i = 0; i < 10; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); uint64_t total_file_size = 0; @@ -856,7 +857,7 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) { // Generate another file to trigger compaction. for (int i = 0; i < 10; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); @@ -953,7 +954,7 @@ TEST_F(DBSSTTest, DBWithMaxSpaceAllowedRandomized) { // It is easy to detect if the test is stuck in a loop. No need for // complex termination logic. while (true) { - auto s = Put(RandomString(&rnd, 10), RandomString(&rnd, 50)); + auto s = Put(rnd.RandomString(10), rnd.RandomString(50)); if (!s.ok()) { break; } diff --git a/db/db_statistics_test.cc b/db/db_statistics_test.cc index 8fbbb96d5..2450b1a53 100644 --- a/db/db_statistics_test.cc +++ b/db/db_statistics_test.cc @@ -9,6 +9,7 @@ #include "monitoring/thread_status_util.h" #include "port/stack_trace.h" #include "rocksdb/statistics.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -55,7 +56,7 @@ TEST_F(DBStatisticsTest, CompressionStatsTest) { Random rnd(301); for (int i = 0; i < kNumKeysWritten; ++i) { // compressible string - ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a'))); + ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a'))); } ASSERT_OK(Flush()); ASSERT_GT(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED), 0); @@ -75,7 +76,7 @@ TEST_F(DBStatisticsTest, CompressionStatsTest) { // Check that compressions do not occur when turned off for (int i = 0; i < kNumKeysWritten; ++i) { // compressible string - ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a'))); + ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a'))); } ASSERT_OK(Flush()); ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED) diff --git a/db/db_table_properties_test.cc b/db/db_table_properties_test.cc index 98512e22f..cf71cc94a 100644 --- a/db/db_table_properties_test.cc +++ b/db/db_table_properties_test.cc @@ -16,6 +16,7 @@ #include "rocksdb/utilities/table_properties_collectors.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" #ifndef ROCKSDB_LITE @@ -155,12 +156,12 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfTablesInRange) { // build a decent LSM for (int i = 0; i < 10000; i++) { - ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102))); + ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102))); } Flush(); dbfull()->TEST_WaitForCompact(); if (NumTableFilesAtLevel(0) == 0) { - ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102))); + ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102))); Flush(); } diff --git a/db/db_test.cc b/db/db_test.cc index 4fecae582..00668cc06 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -64,6 +64,7 @@ #include "test_util/testutil.h" #include "util/compression.h" #include "util/mutexlock.h" +#include "util/random.h" #include "util/rate_limiter.h" #include "util/string_util.h" #include "utilities/merge_operators.h" @@ -914,7 +915,7 @@ TEST_F(DBTest, FlushSchedule) { WriteOptions wo; // this should fill up 2 memtables for (int k = 0; k < 5000; ++k) { - ASSERT_OK(db_->Put(wo, handles_[a & 1], RandomString(&rnd, 13), "")); + ASSERT_OK(db_->Put(wo, handles_[a & 1], rnd.RandomString(13), "")); } }; @@ -1171,7 +1172,7 @@ void MinLevelHelper(DBTest* self, Options& options) { std::vector values; // Write 120KB (12 values, each 10K) for (int i = 0; i < 12; i++) { - values.push_back(DBTestBase::RandomString(&rnd, 10000)); + values.push_back(rnd.RandomString(10000)); ASSERT_OK(self->Put(DBTestBase::Key(i), values[i])); } self->dbfull()->TEST_WaitForFlushMemTable(); @@ -1181,7 +1182,7 @@ void MinLevelHelper(DBTest* self, Options& options) { // generate one more file in level-0, and should trigger level-0 compaction std::vector values; for (int i = 0; i < 12; i++) { - values.push_back(DBTestBase::RandomString(&rnd, 10000)); + values.push_back(rnd.RandomString(10000)); ASSERT_OK(self->Put(DBTestBase::Key(i), values[i])); } self->dbfull()->TEST_WaitForCompact(); @@ -1294,7 +1295,7 @@ TEST_F(DBTest, DISABLED_RepeatedWritesToSameKey) { Random rnd(301); std::string value = - RandomString(&rnd, static_cast(2 * options.write_buffer_size)); + rnd.RandomString(static_cast(2 * options.write_buffer_size)); for (int i = 0; i < 5 * kMaxFiles; i++) { ASSERT_OK(Put(1, "key", value)); ASSERT_LE(TotalTableFiles(1), kMaxFiles); @@ -1370,7 +1371,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) { const int N = 128; Random rnd(301); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(i), rnd.RandomString(1024))); } uint64_t size; @@ -1394,7 +1395,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) { ASSERT_EQ(size, 0); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024))); } start = Key(500); @@ -1426,7 +1427,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) { RandomShuffle(std::begin(keys), std::end(keys), rnd.Next()); for (int i = 0; i < N * 3; i++) { - ASSERT_OK(Put(Key(keys[i] + 1000), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(keys[i] + 1000), rnd.RandomString(1024))); } start = Key(100); @@ -1460,7 +1461,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) { Flush(); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i + 1000), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(i + 1000), rnd.RandomString(1024))); } start = Key(1050); @@ -1508,7 +1509,7 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) { const int N = 64000; Random rnd(301); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 24))); + ASSERT_OK(Put(Key(i), rnd.RandomString(24))); } // Flush everything to files Flush(); @@ -1517,7 +1518,7 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) { // Write more keys for (int i = N; i < (N + N / 4); i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 24))); + ASSERT_OK(Put(Key(i), rnd.RandomString(24))); } // Flush everything to files again Flush(); @@ -1576,7 +1577,7 @@ TEST_F(DBTest, GetApproximateMemTableStats) { const int N = 128; Random rnd(301); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(i), rnd.RandomString(1024))); } uint64_t count; @@ -1608,7 +1609,7 @@ TEST_F(DBTest, GetApproximateMemTableStats) { ASSERT_EQ(size, 0); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(1000 + i), rnd.RandomString(1024))); } start = Key(100); @@ -1639,7 +1640,7 @@ TEST_F(DBTest, ApproximateSizes) { static const int S2 = 105000; // Allow some expansion from metadata Random rnd(301); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(1, Key(i), RandomString(&rnd, S1))); + ASSERT_OK(Put(1, Key(i), rnd.RandomString(S1))); } // 0 because GetApproximateSizes() does not account for memtable space @@ -1682,15 +1683,15 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { CreateAndReopenWithCF({"pikachu"}, options); Random rnd(301); - std::string big1 = RandomString(&rnd, 100000); - ASSERT_OK(Put(1, Key(0), RandomString(&rnd, 10000))); - ASSERT_OK(Put(1, Key(1), RandomString(&rnd, 10000))); + std::string big1 = rnd.RandomString(100000); + ASSERT_OK(Put(1, Key(0), rnd.RandomString(10000))); + ASSERT_OK(Put(1, Key(1), rnd.RandomString(10000))); ASSERT_OK(Put(1, Key(2), big1)); - ASSERT_OK(Put(1, Key(3), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(3), rnd.RandomString(10000))); ASSERT_OK(Put(1, Key(4), big1)); - ASSERT_OK(Put(1, Key(5), RandomString(&rnd, 10000))); - ASSERT_OK(Put(1, Key(6), RandomString(&rnd, 300000))); - ASSERT_OK(Put(1, Key(7), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(5), rnd.RandomString(10000))); + ASSERT_OK(Put(1, Key(6), rnd.RandomString(300000))); + ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000))); // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { @@ -1797,7 +1798,7 @@ TEST_F(DBTest, HiddenValuesAreRemoved) { Random rnd(301); FillLevels("a", "z", 1); - std::string big = RandomString(&rnd, 50000); + std::string big = rnd.RandomString(50000); Put(1, "foo", big); Put(1, "pastfoo", "v"); const Snapshot* snapshot = db_->GetSnapshot(); @@ -2187,7 +2188,7 @@ TEST_F(DBTest, SnapshotFiles) { ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); std::vector values; for (int i = 0; i < 80; i++) { - values.push_back(RandomString(&rnd, 100000)); + values.push_back(rnd.RandomString(100000)); ASSERT_OK(Put((i < 40), Key(i), values[i])); } @@ -2240,7 +2241,7 @@ TEST_F(DBTest, SnapshotFiles) { // overwrite one key, this key should not appear in the snapshot std::vector extras; for (unsigned int i = 0; i < 1; i++) { - extras.push_back(RandomString(&rnd, 100000)); + extras.push_back(rnd.RandomString(100000)); ASSERT_OK(Put(0, Key(i), extras[i])); } @@ -3206,8 +3207,8 @@ TEST_P(DBTestRandomized, Randomized) { } if (p < 45) { // Put k = RandomKey(&rnd, minimum); - v = RandomString(&rnd, - rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8)); + v = rnd.RandomString(rnd.OneIn(20) ? 100 + rnd.Uniform(100) + : rnd.Uniform(8)); ASSERT_OK(model.Put(WriteOptions(), k, v)); ASSERT_OK(db_->Put(WriteOptions(), k, v)); } else if (p < 90) { // Delete @@ -3225,7 +3226,7 @@ TEST_P(DBTestRandomized, Randomized) { // we have multiple entries in the write batch for the same key } if (rnd.OneIn(2)) { - v = RandomString(&rnd, rnd.Uniform(10)); + v = rnd.RandomString(rnd.Uniform(10)); b.Put(k, v); } else { b.Delete(k); @@ -3395,7 +3396,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) { Random rnd(301); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 110; ++j) { - ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 100 + j), rnd.RandomString(980))); } // flush should happen here ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); @@ -3433,7 +3434,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3444,7 +3445,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3474,9 +3475,9 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) { Random rnd(301); for (int i = 0; i < 3; i++) { // Each file contains a different key which will be dropped later. - ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500))); + ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500))); ASSERT_OK(Put("key" + ToString(i), "")); - ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500))); + ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500))); Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } @@ -3486,9 +3487,9 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) { } for (int i = 0; i < 3; i++) { // Each file contains a different key which will be dropped later. - ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500))); + ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500))); ASSERT_OK(Delete("key" + ToString(i))); - ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500))); + ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500))); Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } @@ -3558,7 +3559,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3593,7 +3594,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3609,7 +3610,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { // Create 1 more file to trigger TTL compaction. The old files are dropped. for (int i = 0; i < 1; i++) { for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); } @@ -3635,7 +3636,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 3; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3650,7 +3651,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 5; i++) { for (int j = 0; j < 140; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3673,7 +3674,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3692,7 +3693,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { // Create 10 more files. The old 5 files are dropped as their ttl expired. for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3717,7 +3718,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3728,7 +3729,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980))); + ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980))); } Flush(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3771,8 +3772,7 @@ TEST_F(DBTest, DISABLED_RateLimitingTest) { uint64_t start = env_->NowMicros(); // Write ~96M data for (int64_t i = 0; i < (96 << 10); ++i) { - ASSERT_OK( - Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo)); + ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo)); } uint64_t elapsed = env_->NowMicros() - start; double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed; @@ -3790,8 +3790,7 @@ TEST_F(DBTest, DISABLED_RateLimitingTest) { start = env_->NowMicros(); // Write ~96M data for (int64_t i = 0; i < (96 << 10); ++i) { - ASSERT_OK( - Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo)); + ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo)); } rate_limiter_drains = TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) - @@ -3816,8 +3815,7 @@ TEST_F(DBTest, DISABLED_RateLimitingTest) { start = env_->NowMicros(); // Write ~96M data for (int64_t i = 0; i < (96 << 10); ++i) { - ASSERT_OK( - Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo)); + ASSERT_OK(Put(rnd.RandomString(32), rnd.RandomString((1 << 10) + 1), wo)); } elapsed = env_->NowMicros() - start; rate_limiter_drains = @@ -4018,7 +4016,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { const int kNumPutsBeforeWaitForFlush = 64; Random rnd(301); for (int i = 0; i < size; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(i), rnd.RandomString(1024))); // The following condition prevents a race condition between flush jobs // acquiring work and this thread filling up multiple memtables. Without @@ -4092,7 +4090,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); while (!sleeping_task_low.WokenUp() && count < 256) { - ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions())); + ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions())); count++; } ASSERT_GT(static_cast(count), 128 * 0.8); @@ -4112,7 +4110,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { Env::Priority::LOW); count = 0; while (!sleeping_task_low.WokenUp() && count < 1024) { - ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions())); + ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions())); count++; } // Windows fails this test. Will tune in the future and figure out @@ -4136,7 +4134,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { count = 0; while (!sleeping_task_low.WokenUp() && count < 1024) { - ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions())); + ASSERT_OK(Put(Key(count), rnd.RandomString(1024), WriteOptions())); count++; } // Windows fails this test. Will tune in the future and figure out @@ -4323,7 +4321,7 @@ TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) { for (int file = 0; file < kNumL0Files; ++file) { for (int key = 0; key < kEntriesPerBuffer; ++key) { ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer), - RandomString(&rnd, kTestValueSize))); + rnd.RandomString(kTestValueSize))); } Flush(); } @@ -4471,7 +4469,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) { int operation_count[ThreadStatus::NUM_OP_TYPES] = {0}; for (int file = 0; file < 16 * kNumL0Files; ++file) { for (int k = 0; k < kEntriesPerBuffer; ++k) { - ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize))); + ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize))); } Status s = env_->GetThreadList(&thread_list); @@ -4558,7 +4556,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) { int operation_count[ThreadStatus::NUM_OP_TYPES] = {0}; for (int file = 0; file < 16 * kNumL0Files; ++file) { for (int k = 0; k < kEntriesPerBuffer; ++k) { - ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize))); + ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize))); } Status s = env_->GetThreadList(&thread_list); @@ -4744,7 +4742,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); for (int i = 0; i < 100; i++) { - std::string value = RandomString(&rnd, 200); + std::string value = rnd.RandomString(200); ASSERT_OK(Put(Key(keys[i]), value)); if (i % 25 == 24) { Flush(); @@ -4789,7 +4787,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); for (int i = 101; i < 500; i++) { - std::string value = RandomString(&rnd, 200); + std::string value = rnd.RandomString(200); ASSERT_OK(Put(Key(keys[i]), value)); if (i % 100 == 99) { Flush(); @@ -4841,7 +4839,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { auto gen_l0_kb = [this](int start, int size, int stride) { Random rnd(301); for (int i = 0; i < size; i++) { - ASSERT_OK(Put(Key(start + stride * i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(start + stride * i), rnd.RandomString(1024))); } dbfull()->TEST_WaitForFlushMemTable(); }; @@ -4936,7 +4934,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { Random rnd(301); WriteOptions wo; while (count < 64) { - ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo)); + ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo)); dbfull()->TEST_FlushMemTable(true, true); count++; if (dbfull()->TEST_write_controler().IsStopped()) { @@ -4964,7 +4962,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { sleeping_task_low.WaitUntilSleeping(); count = 0; while (count < 64) { - ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo)); + ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo)); dbfull()->TEST_FlushMemTable(true, true); count++; if (dbfull()->TEST_write_controler().IsStopped()) { @@ -4986,7 +4984,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { ASSERT_EQ(NumTableFilesAtLevel(0), 0); for (int i = 0; i < 4; ++i) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(i), rnd.RandomString(1024))); // Wait for compaction so that put won't stop dbfull()->TEST_FlushMemTable(true); } @@ -5000,7 +4998,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { ASSERT_EQ(NumTableFilesAtLevel(0), 0); for (int i = 0; i < 4; ++i) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(i), rnd.RandomString(1024))); // Wait for compaction so that put won't stop dbfull()->TEST_FlushMemTable(true); } @@ -5174,7 +5172,7 @@ TEST_F(DBTest, FileCreationRandomFailure) { } for (int k = 0; k < kTestSize; ++k) { // here we expect some of the Put fails. - std::string value = RandomString(&rnd, 100); + std::string value = rnd.RandomString(100); Status s = Put(Key(k), Slice(value)); if (s.ok()) { // update the latest successful put @@ -5223,11 +5221,11 @@ TEST_F(DBTest, DynamicMiscOptions) { int key1 = key_start + 1; int key2 = key_start + 2; Random rnd(301); - ASSERT_OK(Put(Key(key0), RandomString(&rnd, 8))); + ASSERT_OK(Put(Key(key0), rnd.RandomString(8))); for (int i = 0; i < 10; ++i) { - ASSERT_OK(Put(Key(key1), RandomString(&rnd, 8))); + ASSERT_OK(Put(Key(key1), rnd.RandomString(8))); } - ASSERT_OK(Put(Key(key2), RandomString(&rnd, 8))); + ASSERT_OK(Put(Key(key2), rnd.RandomString(8))); std::unique_ptr iter(db_->NewIterator(ReadOptions())); iter->Seek(Key(key1)); ASSERT_TRUE(iter->Valid()); @@ -5367,7 +5365,7 @@ TEST_F(DBTest, EncodeDecompressedBlockSizeTest) { Random rnd(301); for (int i = 0; i < kNumKeysWritten; ++i) { // compressible string - ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a'))); + ASSERT_OK(Put(Key(i), rnd.RandomString(128) + std::string(128, 'a'))); } table_options.format_version = first_table_version == 1 ? 2 : 1; @@ -5712,7 +5710,7 @@ TEST_F(DBTest, PromoteL0) { std::map values; for (const auto& range : ranges) { for (int32_t j = range.first; j < range.second; j++) { - values[j] = RandomString(&rnd, value_size); + values[j] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(j), values[j])); } ASSERT_OK(Flush()); @@ -5773,7 +5771,7 @@ TEST_F(DBTest, CompactRangeWithEmptyBottomLevel) { Random rnd(301); for (int i = 0; i < kNumL0Files; ++i) { - ASSERT_OK(Put(Key(0), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(0), rnd.RandomString(1024))); Flush(); } ASSERT_EQ(NumTableFilesAtLevel(0), kNumL0Files); @@ -5812,7 +5810,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) { for (int i = 0; i < 2; ++i) { // put two keys to ensure no trivial move for (int j = 0; j < 2; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(j), rnd.RandomString(1024))); } ASSERT_OK(Flush()); } @@ -5826,7 +5824,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) { for (int i = 0; i < kNumL0Files; ++i) { // put two keys to ensure no trivial move for (int j = 0; j < 2; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(j), rnd.RandomString(1024))); } ASSERT_OK(Flush()); } @@ -5855,7 +5853,7 @@ TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) { for (int i = 0; i < 2; ++i) { // put two keys to ensure no trivial move for (int j = 0; j < 2; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(j), rnd.RandomString(1024))); } ASSERT_OK(Flush()); } @@ -5885,7 +5883,7 @@ TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) { // generate enough files to trigger compaction for (int i = 0; i < 20; ++i) { for (int j = 0; j < 2; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024))); + ASSERT_OK(Put(Key(j), rnd.RandomString(1024))); } ASSERT_OK(Flush()); } @@ -6496,7 +6494,7 @@ TEST_F(DBTest, PauseBackgroundWorkTest) { threads.emplace_back([&]() { Random rnd(301); for (int i = 0; i < 10000; ++i) { - Put(RandomString(&rnd, 10), RandomString(&rnd, 10)); + Put(rnd.RandomString(10), rnd.RandomString(10)); } done.store(true); }); @@ -6626,7 +6624,7 @@ TEST_F(DBTest, CreationTimeOfOldestFile) { for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); } @@ -6651,7 +6649,7 @@ TEST_F(DBTest, CreationTimeOfOldestFile) { for (int i = 0; i < kNumLevelFiles; ++i) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK( - Put(Key(i * kNumKeysPerFile + j), RandomString(&rnd, kValueSize))); + Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } Flush(); } diff --git a/db/db_test2.cc b/db/db_test2.cc index 95a7f557a..b01d15dba 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -16,7 +16,8 @@ #include "port/stack_trace.h" #include "rocksdb/persistent_cache.h" #include "rocksdb/wal_filter.h" -#include "test_util/fault_injection_test_env.h" +#include "util/random.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { @@ -166,7 +167,7 @@ TEST_F(DBTest2, PartitionedIndexUserToInternalKey) { for (int i = 0; i < 3000; i++) { int j = i % 30; - std::string value = RandomString(&rnd, 10500); + std::string value = rnd.RandomString(10500); ASSERT_OK(Put("keykey_" + std::to_string(j), value)); snapshots.push_back(db_->GetSnapshot()); } @@ -1274,7 +1275,7 @@ TEST_F(DBTest2, PresetCompressionDict) { std::string seq_datas[10]; for (int j = 0; j < 10; ++j) { seq_datas[j] = - RandomString(&rnd, kBlockSizeBytes - kApproxPerBlockOverheadBytes); + rnd.RandomString(kBlockSizeBytes - kApproxPerBlockOverheadBytes); } ASSERT_EQ(0, NumTableFilesAtLevel(0, 1)); @@ -1349,7 +1350,7 @@ TEST_F(DBTest2, PresetCompressionDictLocality) { for (int i = 0; i < kNumFiles; ++i) { for (int j = 0; j < kNumEntriesPerFile; ++j) { ASSERT_OK(Put(Key(i * kNumEntriesPerFile + j), - RandomString(&rnd, kNumBytesPerEntry))); + rnd.RandomString(kNumBytesPerEntry))); } ASSERT_OK(Flush()); MoveFilesToLevel(1); @@ -1519,9 +1520,9 @@ TEST_P(CompressionFailuresTest, CompressionFailures) { // Write 10 random files for (int i = 0; i < 10; i++) { for (int j = 0; j < 5; j++) { - std::string key = RandomString(&rnd, kKeySize); + std::string key = rnd.RandomString(kKeySize); // Ensure good compression ratio - std::string valueUnit = RandomString(&rnd, kValUnitSize); + std::string valueUnit = rnd.RandomString(kValUnitSize); std::string value; for (int k = 0; k < kValSize; k += kValUnitSize) { value += valueUnit; @@ -1623,8 +1624,8 @@ TEST_F(DBTest2, CompressionOptions) { // Write 10 random files for (int i = 0; i < 10; i++) { for (int j = 0; j < 5; j++) { - std::string key = RandomString(&rnd, kKeySize); - std::string value = RandomString(&rnd, kValSize); + std::string key = rnd.RandomString(kKeySize); + std::string value = rnd.RandomString(kValSize); key_value_written[key] = value; ASSERT_OK(Put(key, value)); } @@ -1696,7 +1697,7 @@ TEST_F(DBTest2, CompactionStall) { // 4 Files in L0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10))); + ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10))); } ASSERT_OK(Flush()); } @@ -1711,7 +1712,7 @@ TEST_F(DBTest2, CompactionStall) { // Another 6 L0 files to trigger compaction again for (int i = 0; i < 6; i++) { for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10))); + ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10))); } ASSERT_OK(Flush()); } @@ -2311,7 +2312,7 @@ TEST_F(DBTest2, PersistentCache) { std::string str; for (int i = 0; i < num_iter; i++) { if (i % 4 == 0) { // high compression ratio - str = RandomString(&rnd, 1000); + str = rnd.RandomString(1000); } values.push_back(str); ASSERT_OK(Put(1, Key(i), values[i])); @@ -2409,7 +2410,7 @@ TEST_F(DBTest2, ReadAmpBitmap) { Random rnd(301); for (size_t i = 0; i < kNumEntries; i++) { - ASSERT_OK(Put(Key(static_cast(i)), RandomString(&rnd, 100))); + ASSERT_OK(Put(Key(static_cast(i)), rnd.RandomString(100))); } ASSERT_OK(Flush()); @@ -2516,7 +2517,7 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) { Random rnd(301); for (int i = 0; i < kNumEntries; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); + ASSERT_OK(Put(Key(i), rnd.RandomString(100))); } ASSERT_OK(Flush()); @@ -2739,13 +2740,13 @@ TEST_F(DBTest2, PausingManualCompaction1) { Random rnd(301); // Generate a file containing 10 keys. for (int i = 0; i < 10; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); // Generate another file containing same keys for (int i = 0; i < 10; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); @@ -2818,7 +2819,7 @@ TEST_F(DBTest2, PausingManualCompaction2) { for (int i = 0; i < 2; i++) { // Generate a file containing 10 keys. for (int j = 0; j < 100; j++) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(j), rnd.RandomString(50))); } ASSERT_OK(Flush()); } @@ -2840,7 +2841,7 @@ TEST_F(DBTest2, PausingManualCompaction3) { for (int i = 0; i < options.num_levels; i++) { for (int j = 0; j < options.num_levels - i + 1; j++) { for (int k = 0; k < 1000; k++) { - ASSERT_OK(Put(Key(k + j * 1000), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(k + j * 1000), rnd.RandomString(50))); } Flush(); } @@ -2894,7 +2895,7 @@ TEST_F(DBTest2, PausingManualCompaction4) { for (int i = 0; i < options.num_levels; i++) { for (int j = 0; j < options.num_levels - i + 1; j++) { for (int k = 0; k < 1000; k++) { - ASSERT_OK(Put(Key(k + j * 1000), RandomString(&rnd, 50))); + ASSERT_OK(Put(Key(k + j * 1000), rnd.RandomString(50))); } Flush(); } @@ -4021,7 +4022,7 @@ TEST_F(DBTest2, DISABLED_IteratorPinnedMemory) { Reopen(options); Random rnd(301); - std::string v = RandomString(&rnd, 400); + std::string v = rnd.RandomString(400); // Since v is the size of a block, each key should take a block // of 400+ bytes. @@ -4749,7 +4750,7 @@ TEST_F(DBTest2, BlockBasedTablePrefixIndexSeekForPrev) { Reopen(options); Random rnd(301); - std::string large_value = RandomString(&rnd, 500); + std::string large_value = rnd.RandomString(500); ASSERT_OK(Put("a1", large_value)); ASSERT_OK(Put("x1", large_value)); @@ -5011,7 +5012,7 @@ TEST_F(DBTest2, AutoPrefixMode1) { Reopen(options); Random rnd(301); - std::string large_value = RandomString(&rnd, 500); + std::string large_value = rnd.RandomString(500); ASSERT_OK(Put("a1", large_value)); ASSERT_OK(Put("x1", large_value)); diff --git a/db/db_test_util.cc b/db/db_test_util.cc index c2c1d44ea..b40eac672 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -8,9 +8,11 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/db_test_util.h" + #include "db/forward_iterator.h" #include "rocksdb/env_encryption.h" #include "rocksdb/utilities/object_registry.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -408,7 +410,7 @@ Options DBTestBase::GetOptions( options.use_direct_reads = true; options.use_direct_io_for_flush_and_compaction = true; options.compaction_readahead_size = 2 * 1024 * 1024; - test::SetupSyncPointsToMockDirectIO(); + SetupSyncPointsToMockDirectIO(); break; } #endif // ROCKSDB_LITE @@ -1192,7 +1194,7 @@ int DBTestBase::GetSstFileCount(std::string path) { void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx, bool nowait) { for (int i = 0; i < KNumKeysByGenerateNewFile; i++) { - ASSERT_OK(Put(cf, Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990))); + ASSERT_OK(Put(cf, Key(*key_idx), rnd->RandomString((i == 99) ? 1 : 990))); (*key_idx)++; } if (!nowait) { @@ -1204,7 +1206,7 @@ void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx, // this will generate non-overlapping files since it keeps increasing key_idx void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) { for (int i = 0; i < KNumKeysByGenerateNewFile; i++) { - ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990))); + ASSERT_OK(Put(Key(*key_idx), rnd->RandomString((i == 99) ? 1 : 990))); (*key_idx)++; } if (!nowait) { @@ -1217,9 +1219,9 @@ const int DBTestBase::kNumKeysByGenerateNewRandomFile = 51; void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) { for (int i = 0; i < kNumKeysByGenerateNewRandomFile; i++) { - ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 2000))); + ASSERT_OK(Put("key" + rnd->RandomString(7), rnd->RandomString(2000))); } - ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 200))); + ASSERT_OK(Put("key" + rnd->RandomString(7), rnd->RandomString(200))); if (!nowait) { dbfull()->TEST_WaitForFlushMemTable(); dbfull()->TEST_WaitForCompact(); diff --git a/db/db_test_util.h b/db/db_test_util.h index 2dbf0fd6b..9a635139c 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -45,7 +45,6 @@ #include "test_util/mock_time_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" -#include "test_util/testutil.h" #include "util/cast_util.h" #include "util/compression.h" #include "util/mutexlock.h" @@ -876,12 +875,6 @@ class DBTestBase : public testing::Test { ~DBTestBase(); - static std::string RandomString(Random* rnd, int len) { - std::string r; - test::RandomString(rnd, len, &r); - return r; - } - static std::string Key(int i) { char buf[100]; snprintf(buf, sizeof(buf), "key%06d", i); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index 889e380dd..056af711d 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -12,6 +12,7 @@ #if !defined(ROCKSDB_LITE) #include "rocksdb/utilities/table_properties_collectors.h" #include "test_util/sync_point.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -361,7 +362,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) { num++) { // Write 110KB (11 values, each 10K) for (int i = 0; i < 11; i++) { - ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000))); key_idx++; } dbfull()->TEST_WaitForFlushMemTable(handles_[1]); @@ -419,7 +420,7 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionSizeAmplification) { num++) { // Write 110KB (11 values, each 10K) for (int i = 0; i < 11; i++) { - ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000))); key_idx++; } dbfull()->TEST_WaitForFlushMemTable(handles_[1]); @@ -498,7 +499,7 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionReadAmplification) { for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) { // Write 110KB (11 values, each 10K) for (int i = 0; i < 11; i++) { - ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000))); key_idx++; } dbfull()->TEST_WaitForFlushMemTable(handles_[1]); @@ -576,7 +577,7 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) { ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal); Random rnd(301); for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) { - ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize))); + ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize))); } dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); @@ -639,17 +640,17 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) { // Generate 3 overlapping files Random rnd(301); for (int i = 0; i < 210; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); + ASSERT_OK(Put(Key(i), rnd.RandomString(100))); } ASSERT_OK(Flush()); for (int i = 200; i < 300; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); + ASSERT_OK(Put(Key(i), rnd.RandomString(100))); } ASSERT_OK(Flush()); for (int i = 250; i < 260; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); + ASSERT_OK(Put(Key(i), rnd.RandomString(100))); } ASSERT_OK(Flush()); @@ -960,7 +961,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) { for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) { // Write 100KB (100 values, each 1K) for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 990))); + ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(990))); key_idx++; } dbfull()->TEST_WaitForFlushMemTable(handles_[1]); @@ -998,7 +999,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) { num++) { // Write 100KB (100 values, each 1K) for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990))); key_idx++; } dbfull()->TEST_WaitForFlushMemTable(); @@ -1008,7 +1009,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) { // Generate one more file at level-0, which should trigger level-0 // compaction. for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990))); key_idx++; } dbfull()->TEST_WaitForCompact(); @@ -1029,7 +1030,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) { num++) { // Write 110KB (11 values, each 10K) for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990))); key_idx++; } dbfull()->TEST_WaitForFlushMemTable(); @@ -1039,7 +1040,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) { // Generate one more file at level-0, which should trigger level-0 // compaction. for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990))); key_idx++; } dbfull()->TEST_WaitForCompact(); @@ -1050,7 +1051,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) { // Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one // more file at level-0, which should trigger level-0 compaction. for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990))); key_idx++; } dbfull()->TEST_WaitForCompact(); @@ -1530,7 +1531,7 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { for (int i = 0; i <= max_key1; i++) { // each value is 10K - ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000))); dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); } @@ -1548,7 +1549,7 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { // Insert more keys for (int i = max_key1 + 1; i <= max_key2; i++) { // each value is 10K - ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000))); dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); } @@ -1580,7 +1581,7 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { // Insert more keys for (int i = max_key2 + 1; i <= max_key3; i++) { // each value is 10K - ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000))); dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); } diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index 5412cd4f4..537ee04a0 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -12,8 +12,8 @@ #include "options/options_helper.h" #include "port/port.h" #include "port/stack_trace.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { class DBWALTestBase : public DBTestBase { diff --git a/db/db_with_timestamp_basic_test.cc b/db/db_with_timestamp_basic_test.cc index a2a7313af..2a62fc533 100644 --- a/db/db_with_timestamp_basic_test.cc +++ b/db/db_with_timestamp_basic_test.cc @@ -13,10 +13,10 @@ #include "rocksdb/utilities/debug.h" #include "table/block_based/block_based_table_reader.h" #include "table/block_based/block_builder.h" -#include "test_util/fault_injection_test_env.h" #if !defined(ROCKSDB_LITE) #include "test_util/sync_point.h" #endif +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { class DBBasicTestWithTimestampBase : public DBTestBase { diff --git a/db/db_write_test.cc b/db/db_write_test.cc index cc1aaac08..7910db70f 100644 --- a/db/db_write_test.cc +++ b/db/db_write_test.cc @@ -4,18 +4,20 @@ // (found in the LICENSE.Apache file in the root directory). #include +#include #include #include #include -#include + #include "db/db_test_util.h" #include "db/write_batch_internal.h" #include "db/write_thread.h" #include "port/port.h" #include "port/stack_trace.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" +#include "util/random.h" #include "util/string_util.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { @@ -246,7 +248,7 @@ TEST_P(DBWriteTest, IOErrorOnSwitchMemtable) { mock_env->SetFilesystemActive(false, Status::IOError("Not active")); Status s; for (int i = 0; i < 4 * 512; ++i) { - s = Put(Key(i), RandomString(&rnd, 1024)); + s = Put(Key(i), rnd.RandomString(1024)); if (!s.ok()) { break; } diff --git a/db/error_handler_fs_test.cc b/db/error_handler_fs_test.cc index 912baa1ba..a944c6721 100644 --- a/db/error_handler_fs_test.cc +++ b/db/error_handler_fs_test.cc @@ -13,11 +13,12 @@ #include "rocksdb/io_status.h" #include "rocksdb/perf_context.h" #include "rocksdb/sst_file_manager.h" -#include "test_util/fault_injection_test_env.h" -#include "test_util/fault_injection_test_fs.h" #if !defined(ROCKSDB_LITE) #include "test_util/sync_point.h" #endif +#include "util/random.h" +#include "utilities/fault_injection_env.h" +#include "utilities/fault_injection_fs.h" namespace ROCKSDB_NAMESPACE { @@ -744,7 +745,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteError) { WriteBatch batch; for (auto i = 0; i < 100; ++i) { - batch.Put(Key(i), RandomString(&rnd, 1024)); + batch.Put(Key(i), rnd.RandomString(1024)); } WriteOptions wopts; @@ -757,7 +758,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteError) { int write_error = 0; for (auto i = 100; i < 199; ++i) { - batch.Put(Key(i), RandomString(&rnd, 1024)); + batch.Put(Key(i), rnd.RandomString(1024)); } SyncPoint::GetInstance()->SetCallBack( @@ -820,7 +821,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) { WriteBatch batch; for (auto i = 0; i < 100; ++i) { - batch.Put(Key(i), RandomString(&rnd, 1024)); + batch.Put(Key(i), rnd.RandomString(1024)); } WriteOptions wopts; @@ -835,7 +836,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) { int write_error = 0; for (auto i = 100; i < 200; ++i) { - batch.Put(Key(i), RandomString(&rnd, 1024)); + batch.Put(Key(i), rnd.RandomString(1024)); } SyncPoint::GetInstance()->SetCallBack( @@ -871,7 +872,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) { WriteBatch batch; for (auto i = 200; i < 300; ++i) { - batch.Put(Key(i), RandomString(&rnd, 1024)); + batch.Put(Key(i), rnd.RandomString(1024)); } WriteOptions wopts; @@ -912,7 +913,7 @@ TEST_F(DBErrorHandlingFSTest, MultiCFWALWriteError) { for (auto i = 1; i < 4; ++i) { for (auto j = 0; j < 100; ++j) { - batch.Put(handles_[i], Key(j), RandomString(&rnd, 1024)); + batch.Put(handles_[i], Key(j), rnd.RandomString(1024)); } } @@ -927,7 +928,7 @@ TEST_F(DBErrorHandlingFSTest, MultiCFWALWriteError) { // Write to one CF for (auto i = 100; i < 199; ++i) { - batch.Put(handles_[2], Key(i), RandomString(&rnd, 1024)); + batch.Put(handles_[2], Key(i), rnd.RandomString(1024)); } SyncPoint::GetInstance()->SetCallBack( @@ -1016,7 +1017,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) { WriteBatch batch; for (auto j = 0; j <= 100; ++j) { - batch.Put(Key(j), RandomString(&rnd, 1024)); + batch.Put(Key(j), rnd.RandomString(1024)); } WriteOptions wopts; @@ -1031,7 +1032,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) { // Write to one CF for (auto j = 100; j < 199; ++j) { - batch.Put(Key(j), RandomString(&rnd, 1024)); + batch.Put(Key(j), rnd.RandomString(1024)); } WriteOptions wopts; @@ -1129,7 +1130,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) { WriteBatch batch; for (auto j = 0; j <= 100; ++j) { - batch.Put(Key(j), RandomString(&rnd, 1024)); + batch.Put(Key(j), rnd.RandomString(1024)); } WriteOptions wopts; @@ -1144,7 +1145,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) { // Write to one CF for (auto j = 100; j < 199; ++j) { - batch.Put(Key(j), RandomString(&rnd, 1024)); + batch.Put(Key(j), rnd.RandomString(1024)); } WriteOptions wopts; diff --git a/db/external_sst_file_basic_test.cc b/db/external_sst_file_basic_test.cc index 7e62963ec..26301c5ce 100644 --- a/db/external_sst_file_basic_test.cc +++ b/db/external_sst_file_basic_test.cc @@ -10,8 +10,9 @@ #include "port/port.h" #include "port/stack_trace.h" #include "rocksdb/sst_file_writer.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/testutil.h" +#include "util/random.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { @@ -27,7 +28,7 @@ class ExternalSSTFileBasicTest } void DestroyAndRecreateExternalSSTFilesDir() { - test::DestroyDir(env_, sst_files_dir_); + DestroyDir(env_, sst_files_dir_); env_->CreateDir(sst_files_dir_); } @@ -160,9 +161,7 @@ class ExternalSSTFileBasicTest write_global_seqno, verify_checksums_before_ingest, true_data); } - ~ExternalSSTFileBasicTest() override { - test::DestroyDir(env_, sst_files_dir_); - } + ~ExternalSSTFileBasicTest() override { DestroyDir(env_, sst_files_dir_); } protected: std::string sst_files_dir_; @@ -1147,7 +1146,7 @@ TEST_F(ExternalSSTFileBasicTest, VerifyChecksumReadahead) { std::string file_name = sst_files_dir_ + "verify_checksum_readahead_test.sst"; ASSERT_OK(sst_file_writer->Open(file_name)); Random rnd(301); - std::string value = DBTestBase::RandomString(&rnd, 4000); + std::string value = rnd.RandomString(4000); for (int i = 0; i < 5000; i++) { ASSERT_OK(sst_file_writer->Put(DBTestBase::Key(i), value)); } diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 649bf7ac2..cd7757443 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -14,8 +14,9 @@ #include "port/stack_trace.h" #include "rocksdb/sst_file_reader.h" #include "rocksdb/sst_file_writer.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/testutil.h" +#include "util/random.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { @@ -46,7 +47,7 @@ class ExternSSTFileLinkFailFallbackTest : DBTestBase("/external_sst_file_test"), test_env_(new ExternalSSTTestEnv(env_, true)) { sst_files_dir_ = dbname_ + "/sst_files/"; - test::DestroyDir(env_, sst_files_dir_); + DestroyDir(env_, sst_files_dir_); env_->CreateDir(sst_files_dir_); options_ = CurrentOptions(); options_.disable_auto_compactions = true; @@ -77,7 +78,7 @@ class ExternalSSTFileTest } void DestroyAndRecreateExternalSSTFilesDir() { - test::DestroyDir(env_, sst_files_dir_); + DestroyDir(env_, sst_files_dir_); env_->CreateDir(sst_files_dir_); } @@ -280,7 +281,7 @@ class ExternalSSTFileTest return db_->IngestExternalFile(files, opts); } - ~ExternalSSTFileTest() override { test::DestroyDir(env_, sst_files_dir_); } + ~ExternalSSTFileTest() override { DestroyDir(env_, sst_files_dir_); } protected: int last_file_id_ = 0; @@ -1751,10 +1752,8 @@ TEST_P(ExternalSSTFileTest, IngestFileWithGlobalSeqnoRandomized) { for (int i = 0; i < 500; i++) { std::vector> random_data; for (int j = 0; j < 100; j++) { - std::string k; - std::string v; - test::RandomString(&rnd, rnd.Next() % 20, &k); - test::RandomString(&rnd, rnd.Next() % 50, &v); + std::string k = rnd.RandomString(rnd.Next() % 20); + std::string v = rnd.RandomString(rnd.Next() % 50); random_data.emplace_back(k, v); } @@ -2388,8 +2387,7 @@ TEST_F(ExternalSSTFileTest, IngestFileWrittenWithCompressionDictionary) { Random rnd(301); std::vector> random_data; for (int i = 0; i < kNumEntries; i++) { - std::string val; - test::RandomString(&rnd, kNumBytesPerEntry, &val); + std::string val = rnd.RandomString(kNumBytesPerEntry); random_data.emplace_back(Key(i), std::move(val)); } ASSERT_OK(GenerateAndAddExternalFile(options, std::move(random_data))); @@ -2844,7 +2842,7 @@ TEST_P(ExternalSSTFileTest, DeltaEncodingWhileGlobalSeqnoPresent) { DestroyAndReopen(options); constexpr size_t kValueSize = 8; Random rnd(301); - std::string value(RandomString(&rnd, kValueSize)); + std::string value = rnd.RandomString(kValueSize); // Write some key to make global seqno larger than zero for (int i = 0; i < 10; i++) { @@ -2888,7 +2886,7 @@ TEST_P(ExternalSSTFileTest, Options options = CurrentOptions(); Random rnd(301); - std::string value(RandomString(&rnd, kValueSize)); + std::string value = rnd.RandomString(kValueSize); std::string key0 = "aa"; std::string key1 = "ab"; diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index f4ca3458a..7e208bbf9 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -22,11 +22,12 @@ #include "rocksdb/env.h" #include "rocksdb/table.h" #include "rocksdb/write_batch.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "test_util/testutil.h" #include "util/mutexlock.h" +#include "util/random.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { @@ -249,7 +250,8 @@ class FaultInjectionTest // Return the value to associate with the specified key Slice Value(int k, std::string* storage) const { Random r(k); - return test::RandomString(&r, kValueSize, storage); + *storage = r.RandomString(kValueSize); + return Slice(*storage); } void CloseDB() { diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index 72cceb522..1f0fdef57 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -3,6 +3,8 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#include "db/flush_job.h" + #include #include #include @@ -11,7 +13,6 @@ #include "db/blob/blob_index.h" #include "db/column_family.h" #include "db/db_impl/db_impl.h" -#include "db/flush_job.h" #include "db/version_set.h" #include "file/writable_file_writer.h" #include "rocksdb/cache.h" @@ -19,6 +20,7 @@ #include "table/mock_table.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" #include "util/string_util.h" namespace ROCKSDB_NAMESPACE { @@ -447,7 +449,7 @@ TEST_F(FlushJobTest, Snapshots) { std::string key(ToString(i)); int insertions = rnd.Uniform(max_inserts_per_keys); for (int j = 0; j < insertions; ++j) { - std::string value(test::RandomHumanReadableString(&rnd, 10)); + std::string value(rnd.HumanReadableString(10)); auto seqno = ++current_seqno; new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value); // a key is visible only if: diff --git a/db/import_column_family_test.cc b/db/import_column_family_test.cc index e17895b46..6961b3677 100644 --- a/db/import_column_family_test.cc +++ b/db/import_column_family_test.cc @@ -1,11 +1,13 @@ #ifndef ROCKSDB_LITE #include + #include "db/db_test_util.h" #include "port/port.h" #include "port/stack_trace.h" #include "rocksdb/sst_file_writer.h" #include "test_util/testutil.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -35,14 +37,14 @@ class ImportColumnFamilyTest : public DBTestBase { delete metadata_ptr_; metadata_ptr_ = nullptr; } - test::DestroyDir(env_, sst_files_dir_); - test::DestroyDir(env_, export_files_dir_); + DestroyDir(env_, sst_files_dir_); + DestroyDir(env_, export_files_dir_); } void DestroyAndRecreateExternalSSTFilesDir() { - test::DestroyDir(env_, sst_files_dir_); + DestroyDir(env_, sst_files_dir_); env_->CreateDir(sst_files_dir_); - test::DestroyDir(env_, export_files_dir_); + DestroyDir(env_, export_files_dir_); } LiveFileMetaData LiveFileMetaDataInit(std::string name, std::string path, @@ -411,7 +413,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) { // Create a new db and import the files. DB* db_copy; - test::DestroyDir(env_, dbname_ + "/db_copy"); + DestroyDir(env_, dbname_ + "/db_copy"); ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy)); ColumnFamilyHandle* cfh = nullptr; ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo", @@ -427,7 +429,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) { db_copy->DropColumnFamily(cfh); db_copy->DestroyColumnFamilyHandle(cfh); delete db_copy; - test::DestroyDir(env_, dbname_ + "/db_copy"); + DestroyDir(env_, dbname_ + "/db_copy"); } TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) { @@ -450,7 +452,7 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) { snapshots.reserve(kFileBytes / kValueBytes * kNumFiles); for (int i = 0; i < kNumFiles; ++i) { for (int j = 0; j < kFileBytes / kValueBytes; ++j) { - auto value = RandomString(&rnd, kValueBytes); + auto value = rnd.RandomString(kValueBytes); ASSERT_OK(Put(1, "key", value)); snapshots.push_back(db_->GetSnapshot()); } @@ -471,7 +473,7 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) { // Create a new db and import the files. DB* db_copy; - test::DestroyDir(env_, dbname_ + "/db_copy"); + DestroyDir(env_, dbname_ + "/db_copy"); ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy)); ColumnFamilyHandle* cfh = nullptr; ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo", @@ -486,7 +488,7 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) { db_copy->DropColumnFamily(cfh); db_copy->DestroyColumnFamilyHandle(cfh); delete db_copy; - test::DestroyDir(env_, dbname_ + "/db_copy"); + DestroyDir(env_, dbname_ + "/db_copy"); for (const Snapshot* snapshot : snapshots) { db_->ReleaseSnapshot(snapshot); } diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index ea14f9a2a..dd428da9b 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -35,6 +35,7 @@ #include "util/cast_util.h" #include "util/hash.h" #include "util/mutexlock.h" +#include "util/random.h" #include "util/string_util.h" #include "utilities/merge_operators.h" @@ -44,10 +45,10 @@ namespace ROCKSDB_NAMESPACE { class PlainTableKeyDecoderTest : public testing::Test {}; TEST_F(PlainTableKeyDecoderTest, ReadNonMmap) { - std::string tmp; Random rnd(301); const uint32_t kLength = 2222; - Slice contents = test::RandomString(&rnd, kLength, &tmp); + std::string tmp = rnd.RandomString(kLength); + Slice contents(tmp); test::StringSource* string_source = new test::StringSource(contents, 0, false); @@ -1267,12 +1268,6 @@ static std::string Key(int i) { return std::string(buf); } -static std::string RandomString(Random* rnd, int len) { - std::string r; - test::RandomString(rnd, len, &r); - return r; -} - TEST_P(PlainTableDBTest, CompactionTrigger) { Options options = CurrentOptions(); options.write_buffer_size = 120 << 10; // 120KB @@ -1287,7 +1282,7 @@ TEST_P(PlainTableDBTest, CompactionTrigger) { std::vector values; // Write 120KB (10 values, each 12K) for (int i = 0; i < 10; i++) { - values.push_back(RandomString(&rnd, 12 << 10)); + values.push_back(rnd.RandomString(12 << 10)); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Put(Key(999), "")); @@ -1298,7 +1293,7 @@ TEST_P(PlainTableDBTest, CompactionTrigger) { //generate one more file in level-0, and should trigger level-0 compaction std::vector values; for (int i = 0; i < 12; i++) { - values.push_back(RandomString(&rnd, 10000)); + values.push_back(rnd.RandomString(10000)); ASSERT_OK(Put(Key(i), values[i])); } ASSERT_OK(Put(Key(999), "")); diff --git a/db_stress_tool/cf_consistency_stress.cc b/db_stress_tool/cf_consistency_stress.cc index c6ba5d699..acddae159 100644 --- a/db_stress_tool/cf_consistency_stress.cc +++ b/db_stress_tool/cf_consistency_stress.cc @@ -9,6 +9,7 @@ #ifdef GFLAGS #include "db_stress_tool/db_stress_common.h" +#include "file/file_util.h" namespace ROCKSDB_NAMESPACE { class CfConsistencyStressTest : public StressTest { @@ -307,7 +308,7 @@ class CfConsistencyStressTest : public StressTest { if (db_stress_env->FileExists(checkpoint_dir).ok()) { // If the directory might still exist, try to delete the files one by one. // Likely a trash file is still there. - Status my_s = test::DestroyDir(db_stress_env, checkpoint_dir); + Status my_s = DestroyDir(db_stress_env, checkpoint_dir); if (!my_s.ok()) { fprintf(stderr, "Fail to destory directory before checkpoint: %s", my_s.ToString().c_str()); diff --git a/db_stress_tool/db_stress_common.h b/db_stress_tool/db_stress_common.h index 8c9c9bfa6..f4654cbb5 100644 --- a/db_stress_tool/db_stress_common.h +++ b/db_stress_tool/db_stress_common.h @@ -26,6 +26,7 @@ #include #include #include + #include #include #include @@ -58,9 +59,7 @@ #include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction_db.h" #include "rocksdb/write_batch.h" -#ifndef NDEBUG -#include "test_util/fault_injection_test_fs.h" -#endif +#include "test_util/testutil.h" #include "util/coding.h" #include "util/compression.h" #include "util/crc32c.h" @@ -69,9 +68,6 @@ #include "util/random.h" #include "util/string_util.h" #include "utilities/blob_db/blob_db.h" -#include "test_util/testutil.h" -#include "test_util/fault_injection_test_env.h" - #include "utilities/merge_operators.h" using GFLAGS_NAMESPACE::ParseCommandLineFlags; @@ -248,6 +244,9 @@ const int kValueMaxLen = 100; // wrapped posix or hdfs environment extern ROCKSDB_NAMESPACE::DbStressEnvWrapper* db_stress_env; #ifndef NDEBUG +namespace ROCKSDB_NAMESPACE { +class FaultInjectionTestFS; +} // namespace ROCKSDB_NAMESPACE extern std::shared_ptr fault_fs_guard; #endif diff --git a/db_stress_tool/db_stress_driver.cc b/db_stress_tool/db_stress_driver.cc index 0ae848049..69411aa29 100644 --- a/db_stress_tool/db_stress_driver.cc +++ b/db_stress_tool/db_stress_driver.cc @@ -10,6 +10,7 @@ #ifdef GFLAGS #include "db_stress_tool/db_stress_common.h" +#include "utilities/fault_injection_fs.h" namespace ROCKSDB_NAMESPACE { void ThreadBody(void* v) { diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index 481a7bfd7..723517040 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -15,6 +15,7 @@ #include "rocksdb/convenience.h" #include "rocksdb/sst_file_manager.h" #include "util/cast_util.h" +#include "utilities/fault_injection_fs.h" namespace ROCKSDB_NAMESPACE { StressTest::StressTest() @@ -1341,7 +1342,7 @@ Status StressTest::TestCheckpoint(ThreadState* thread, if (db_stress_env->FileExists(checkpoint_dir).ok()) { // If the directory might still exist, try to delete the files one by one. // Likely a trash file is still there. - Status my_s = test::DestroyDir(db_stress_env, checkpoint_dir); + Status my_s = DestroyDir(db_stress_env, checkpoint_dir); if (!my_s.ok()) { fprintf(stderr, "Fail to destory directory before checkpoint: %s", my_s.ToString().c_str()); diff --git a/db_stress_tool/db_stress_tool.cc b/db_stress_tool/db_stress_tool.cc index 498462e56..3e950726b 100644 --- a/db_stress_tool/db_stress_tool.cc +++ b/db_stress_tool/db_stress_tool.cc @@ -24,7 +24,7 @@ #include "db_stress_tool/db_stress_common.h" #include "db_stress_tool/db_stress_driver.h" #ifndef NDEBUG -#include "test_util/fault_injection_test_fs.h" +#include "utilities/fault_injection_fs.h" #endif namespace ROCKSDB_NAMESPACE { @@ -47,7 +47,7 @@ int db_stress_tool(int argc, char** argv) { #ifndef NDEBUG if (FLAGS_mock_direct_io) { - test::SetupSyncPointsToMockDirectIO(); + SetupSyncPointsToMockDirectIO(); } #endif if (FLAGS_statistics) { diff --git a/db_stress_tool/no_batched_ops_stress.cc b/db_stress_tool/no_batched_ops_stress.cc index 654f0c974..269d0886d 100644 --- a/db_stress_tool/no_batched_ops_stress.cc +++ b/db_stress_tool/no_batched_ops_stress.cc @@ -10,7 +10,7 @@ #ifdef GFLAGS #include "db_stress_tool/db_stress_common.h" #ifndef NDEBUG -#include "test_util/fault_injection_test_fs.h" +#include "utilities/fault_injection_fs.h" #endif // NDEBUG namespace ROCKSDB_NAMESPACE { diff --git a/env/env_test.cc b/env/env_test.cc index 714422472..73b5c95a6 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -35,14 +35,15 @@ #include "port/malloc.h" #include "port/port.h" #include "rocksdb/env.h" -#include "test_util/fault_injection_test_env.h" -#include "test_util/fault_injection_test_fs.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "test_util/testutil.h" #include "util/coding.h" #include "util/mutexlock.h" +#include "util/random.h" #include "util/string_util.h" +#include "utilities/fault_injection_env.h" +#include "utilities/fault_injection_fs.h" namespace ROCKSDB_NAMESPACE { @@ -287,7 +288,7 @@ TEST_F(EnvPosixTest, MemoryMappedFileBuffer) { ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions)); Random rnd(301); - test::RandomString(&rnd, kFileBytes, &expected_data); + expected_data = rnd.RandomString(kFileBytes); ASSERT_OK(wfile->Append(expected_data)); } @@ -1264,9 +1265,8 @@ TEST_F(EnvPosixTest, MultiReadNonAlignedLargeNum) { std::string fname = test::PerThreadDBPath(env_, "testfile"); const size_t kTotalSize = 81920; - std::string expected_data; Random rnd(301); - test::RandomString(&rnd, kTotalSize, &expected_data); + std::string expected_data = rnd.RandomString(kTotalSize); // Create file. { @@ -1949,7 +1949,7 @@ TEST_P(EnvPosixTestWithParam, PosixRandomRWFileRandomized) { std::string buf; for (int i = 0; i < 10000; i++) { // Genrate random data - test::RandomString(&rnd, 10, &buf); + buf = rnd.RandomString(10); // Pick random offset for write size_t write_off = rnd.Next() % 1000; diff --git a/file/delete_scheduler_test.cc b/file/delete_scheduler_test.cc index 993fba12a..67eaa50e6 100644 --- a/file/delete_scheduler_test.cc +++ b/file/delete_scheduler_test.cc @@ -3,18 +3,20 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#include "file/delete_scheduler.h" + #include #include #include #include -#include "file/delete_scheduler.h" +#include "env/composite_env_wrapper.h" +#include "file/file_util.h" #include "file/sst_file_manager_impl.h" #include "rocksdb/env.h" #include "rocksdb/options.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" -#include "test_util/testutil.h" #include "util/string_util.h" #ifndef ROCKSDB_LITE @@ -40,12 +42,12 @@ class DeleteSchedulerTest : public testing::Test { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({}); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks(); for (const auto& dummy_files_dir : dummy_files_dirs_) { - test::DestroyDir(env_, dummy_files_dir); + DestroyDir(env_, dummy_files_dir); } } void DestroyAndCreateDir(const std::string& dir) { - ASSERT_OK(test::DestroyDir(env_, dir)); + ASSERT_OK(DestroyDir(env_, dir)); EXPECT_OK(env_->CreateDir(dir)); } diff --git a/file/file_util.cc b/file/file_util.cc index 70178a0bd..0fda7aa83 100644 --- a/file/file_util.cc +++ b/file/file_util.cc @@ -187,4 +187,49 @@ IOStatus GenerateOneFileChecksum(FileSystem* fs, const std::string& file_path, return IOStatus::OK(); } +Status DestroyDir(Env* env, const std::string& dir) { + Status s; + if (env->FileExists(dir).IsNotFound()) { + return s; + } + std::vector files_in_dir; + s = env->GetChildren(dir, &files_in_dir); + if (s.ok()) { + for (auto& file_in_dir : files_in_dir) { + if (file_in_dir == "." || file_in_dir == "..") { + continue; + } + std::string path = dir + "/" + file_in_dir; + bool is_dir = false; + s = env->IsDirectory(path, &is_dir); + if (s.ok()) { + if (is_dir) { + s = DestroyDir(env, path); + } else { + s = env->DeleteFile(path); + } + } + if (!s.ok()) { + // IsDirectory, etc. might not report NotFound + if (s.IsNotFound() || env->FileExists(path).IsNotFound()) { + // Allow files to be deleted externally + s = Status::OK(); + } else { + break; + } + } + } + } + + if (s.ok()) { + s = env->DeleteDir(dir); + // DeleteDir might or might not report NotFound + if (!s.ok() && (s.IsNotFound() || env->FileExists(dir).IsNotFound())) { + // Allow to be deleted externally + s = Status::OK(); + } + } + return s; +} + } // namespace ROCKSDB_NAMESPACE diff --git a/file/file_util.h b/file/file_util.h index ac7257e05..1f77b760a 100644 --- a/file/file_util.h +++ b/file/file_util.h @@ -53,4 +53,7 @@ inline IOStatus PrepareIOFromReadOptions(const ReadOptions& ro, Env* env, return IOStatus::OK(); } +// Test method to delete the input directory and all of its contents. +// This method is destructive and is meant for use only in tests!!! +Status DestroyDir(Env* env, const std::string& dir); } // namespace ROCKSDB_NAMESPACE diff --git a/file/random_access_file_reader_test.cc b/file/random_access_file_reader_test.cc index 7580cdd72..a8be671a5 100644 --- a/file/random_access_file_reader_test.cc +++ b/file/random_access_file_reader_test.cc @@ -3,19 +3,22 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#include "file/random_access_file_reader.h" + +#include "file/file_util.h" #include "port/port.h" #include "port/stack_trace.h" #include "rocksdb/file_system.h" -#include "file/random_access_file_reader.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { class RandomAccessFileReaderTest : public testing::Test { public: void SetUp() override { - test::SetupSyncPointsToMockDirectIO(); + SetupSyncPointsToMockDirectIO(); env_ = Env::Default(); fs_ = FileSystem::Default(); test_dir_ = test::PerThreadDBPath("random_access_file_reader_test"); @@ -23,9 +26,7 @@ class RandomAccessFileReaderTest : public testing::Test { ComputeAndSetAlignment(); } - void TearDown() override { - EXPECT_OK(test::DestroyDir(env_, test_dir_)); - } + void TearDown() override { EXPECT_OK(DestroyDir(env_, test_dir_)); } void Write(const std::string& fname, const std::string& content) { std::unique_ptr f; @@ -79,8 +80,7 @@ class RandomAccessFileReaderTest : public testing::Test { TEST_F(RandomAccessFileReaderTest, ReadDirectIO) { std::string fname = "read-direct-io"; Random rand(0); - std::string content; - test::RandomString(&rand, static_cast(alignment()), &content); + std::string content = rand.RandomString(static_cast(alignment())); Write(fname, content); FileOptions opts; @@ -104,8 +104,7 @@ TEST_F(RandomAccessFileReaderTest, MultiReadDirectIO) { // Creates a file with 3 pages. std::string fname = "multi-read-direct-io"; Random rand(0); - std::string content; - test::RandomString(&rand, 3 * static_cast(alignment()), &content); + std::string content = rand.RandomString(3 * static_cast(alignment())); Write(fname, content); FileOptions opts; diff --git a/memtable/memtablerep_bench.cc b/memtable/memtablerep_bench.cc index cbe62cc85..0f6203042 100644 --- a/memtable/memtablerep_bench.cc +++ b/memtable/memtablerep_bench.cc @@ -141,7 +141,7 @@ class RandomGenerator { RandomGenerator() { Random rnd(301); auto size = (unsigned)std::max(1048576, FLAGS_item_size); - test::RandomString(&rnd, size, &data_); + data_ = rnd.RandomString(size); pos_ = 0; } diff --git a/src.mk b/src.mk index c4fec346e..2e438f9df 100644 --- a/src.mk +++ b/src.mk @@ -212,6 +212,8 @@ LIB_SOURCES = \ utilities/debug.cc \ utilities/env_mirror.cc \ utilities/env_timed.cc \ + utilities/fault_injection_env.cc \ + utilities/fault_injection_fs.cc \ utilities/leveldb_options/leveldb_options.cc \ utilities/memory/memory_util.cc \ utilities/merge_operators/max.cc \ @@ -277,8 +279,6 @@ ANALYZER_LIB_SOURCES = \ MOCK_LIB_SOURCES = \ table/mock_table.cc \ - test_util/fault_injection_test_fs.cc \ - test_util/fault_injection_test_env.cc BENCH_LIB_SOURCES = \ tools/db_bench_tool.cc \ diff --git a/table/block_based/block_based_table_reader_test.cc b/table/block_based/block_based_table_reader_test.cc index 6062b3d4e..e46e3de11 100644 --- a/table/block_based/block_based_table_reader_test.cc +++ b/table/block_based/block_based_table_reader_test.cc @@ -4,18 +4,20 @@ // (found in the LICENSE.Apache file in the root directory). #include "table/block_based/block_based_table_reader.h" -#include "rocksdb/file_system.h" -#include "table/block_based/partitioned_index_iterator.h" #include "db/table_properties_collector.h" +#include "file/file_util.h" #include "options/options_helper.h" #include "port/port.h" #include "port/stack_trace.h" +#include "rocksdb/file_system.h" #include "table/block_based/block_based_table_builder.h" #include "table/block_based/block_based_table_factory.h" +#include "table/block_based/partitioned_index_iterator.h" #include "table/format.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -33,7 +35,7 @@ class BlockBasedTableReaderTest std::tie(compression_type_, use_direct_reads_, index_type, no_block_cache) = GetParam(); - test::SetupSyncPointsToMockDirectIO(); + SetupSyncPointsToMockDirectIO(); test_dir_ = test::PerThreadDBPath("block_based_table_reader_test"); env_ = Env::Default(); fs_ = FileSystem::Default(); @@ -46,7 +48,7 @@ class BlockBasedTableReaderTest static_cast(NewBlockBasedTableFactory(opts))); } - void TearDown() override { EXPECT_OK(test::DestroyDir(env_, test_dir_)); } + void TearDown() override { EXPECT_OK(DestroyDir(env_, test_dir_)); } // Creates a table with the specificied key value pairs (kv). void CreateTable(const std::string& table_name, @@ -159,9 +161,9 @@ TEST_P(BlockBasedTableReaderTest, MultiGet) { sprintf(k, "%08u", key); std::string v; if (block % 2) { - v = test::RandomHumanReadableString(&rnd, 256); + v = rnd.HumanReadableString(256); } else { - test::RandomString(&rnd, 256, &v); + v = rnd.RandomString(256); } kv[std::string(k)] = v; key++; @@ -256,8 +258,7 @@ TEST_P(BlockBasedTableReaderTestVerifyChecksum, ChecksumMismatch) { // and internal key size is required to be >= 8 bytes, // so use %08u as the format string. sprintf(k, "%08u", key); - std::string v; - test::RandomString(&rnd, 256, &v); + std::string v = rnd.RandomString(256); kv[std::string(k)] = v; key++; } diff --git a/table/block_based/block_test.cc b/table/block_based/block_test.cc index 7521c1515..c9f12d54e 100644 --- a/table/block_based/block_test.cc +++ b/table/block_based/block_test.cc @@ -29,12 +29,6 @@ namespace ROCKSDB_NAMESPACE { -static std::string RandomString(Random *rnd, int len) { - std::string r; - test::RandomString(rnd, len, &r); - return r; -} - std::string GenerateInternalKey(int primary_key, int secondary_key, int padding_size, Random *rnd) { char buf[50]; @@ -42,7 +36,7 @@ std::string GenerateInternalKey(int primary_key, int secondary_key, snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key); std::string k(p); if (padding_size) { - k += RandomString(rnd, padding_size); + k += rnd->RandomString(padding_size); } AppendInternalKeyFooter(&k, 0 /* seqno */, kTypeValue); @@ -67,7 +61,7 @@ void GenerateRandomKVs(std::vector *keys, keys->emplace_back(GenerateInternalKey(i, j, padding_size, &rnd)); // 100 bytes values - values->emplace_back(RandomString(&rnd, 100)); + values->emplace_back(rnd.RandomString(100)); } } } diff --git a/table/block_based/data_block_hash_index_test.cc b/table/block_based/data_block_hash_index_test.cc index 409a5bdc2..94fa7e94f 100644 --- a/table/block_based/data_block_hash_index_test.cc +++ b/table/block_based/data_block_hash_index_test.cc @@ -3,6 +3,8 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#include "table/block_based/data_block_hash_index.h" + #include #include #include @@ -12,11 +14,11 @@ #include "table/block_based/block.h" #include "table/block_based/block_based_table_reader.h" #include "table/block_based/block_builder.h" -#include "table/block_based/data_block_hash_index.h" #include "table/get_context.h" #include "table/table_builder.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -35,12 +37,6 @@ bool SearchForOffset(DataBlockHashIndex& index, const char* data, return entry == restart_point; } -// Random KV generator similer to block_test -static std::string RandomString(Random* rnd, int len) { - std::string r; - test::RandomString(rnd, len, &r); - return r; -} std::string GenerateKey(int primary_key, int secondary_key, int padding_size, Random* rnd) { char buf[50]; @@ -48,7 +44,7 @@ std::string GenerateKey(int primary_key, int secondary_key, int padding_size, snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key); std::string k(p); if (padding_size) { - k += RandomString(rnd, padding_size); + k += rnd->RandomString(padding_size); } return k; @@ -71,7 +67,7 @@ void GenerateRandomKVs(std::vector* keys, keys->emplace_back(GenerateKey(i, j, padding_size, &rnd)); // 100 bytes values - values->emplace_back(RandomString(&rnd, 100)); + values->emplace_back(rnd.RandomString(100)); } } } diff --git a/table/block_fetcher_test.cc b/table/block_fetcher_test.cc index 6d1cd5ddd..e3c17b292 100644 --- a/table/block_fetcher_test.cc +++ b/table/block_fetcher_test.cc @@ -4,7 +4,10 @@ // (found in the LICENSE.Apache file in the root directory). #include "table/block_fetcher.h" + #include "db/table_properties_collector.h" +#include "env/composite_env_wrapper.h" +#include "file/file_util.h" #include "options/options_helper.h" #include "port/port.h" #include "port/stack_trace.h" @@ -14,7 +17,6 @@ #include "table/block_based/block_based_table_reader.h" #include "table/format.h" #include "test_util/testharness.h" -#include "test_util/testutil.h" namespace ROCKSDB_NAMESPACE { namespace { @@ -71,14 +73,14 @@ class BlockFetcherTest : public testing::Test { protected: void SetUp() override { - test::SetupSyncPointsToMockDirectIO(); + SetupSyncPointsToMockDirectIO(); test_dir_ = test::PerThreadDBPath("block_fetcher_test"); env_ = Env::Default(); fs_ = FileSystem::Default(); ASSERT_OK(fs_->CreateDir(test_dir_, IOOptions(), nullptr)); } - void TearDown() override { EXPECT_OK(test::DestroyDir(env_, test_dir_)); } + void TearDown() override { EXPECT_OK(DestroyDir(env_, test_dir_)); } void AssertSameBlock(const std::string& block1, const std::string& block2) { ASSERT_EQ(block1, block2); diff --git a/table/merger_test.cc b/table/merger_test.cc index 466e0eb42..13f225731 100644 --- a/table/merger_test.cc +++ b/table/merger_test.cc @@ -3,12 +3,13 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#include #include +#include #include "table/merging_iterator.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -24,7 +25,7 @@ class MergerTest : public testing::Test { std::vector ret; for (size_t i = 0; i < len; ++i) { - InternalKey ik(test::RandomHumanReadableString(&rnd_, string_len), 0, + InternalKey ik(rnd_.HumanReadableString(string_len), 0, ValueType::kTypeValue); ret.push_back(ik.Encode().ToString(false)); } @@ -44,8 +45,7 @@ class MergerTest : public testing::Test { } void SeekToRandom() { - InternalKey ik(test::RandomHumanReadableString(&rnd_, 5), 0, - ValueType::kTypeValue); + InternalKey ik(rnd_.HumanReadableString(5), 0, ValueType::kTypeValue); Seek(ik.Encode().ToString(false)); } diff --git a/table/table_test.cc b/table/table_test.cc index 7bc53a584..3554f3463 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -1227,8 +1227,7 @@ class FileChecksumTestHelper { void AddKVtoKVMap(int num_entries) { Random rnd(test::RandomSeed()); for (int i = 0; i < num_entries; i++) { - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); kv_map_[test::RandomKey(&rnd, 20)] = v; } } @@ -1899,16 +1898,10 @@ TEST_P(BlockBasedTableTest, SkipPrefixBloomFilter) { } } -static std::string RandomString(Random* rnd, int len) { - std::string r; - test::RandomString(rnd, len, &r); - return r; -} - void AddInternalKey(TableConstructor* c, const std::string& prefix, std::string value = "v", int /*suffix_len*/ = 800) { static Random rnd(1023); - InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue); + InternalKey k(prefix + rnd.RandomString(800), 0, kTypeValue); c->Add(k.Encode().ToString(), value); } @@ -2481,7 +2474,7 @@ TEST_P(BlockBasedTableTest, IndexSizeStat) { std::vector keys; for (int i = 0; i < 100; ++i) { - keys.push_back(RandomString(&rnd, 10000)); + keys.push_back(rnd.RandomString(10000)); } // Each time we load one more key to the table. the table index block @@ -2525,7 +2518,7 @@ TEST_P(BlockBasedTableTest, NumBlockStat) { for (int i = 0; i < 10; ++i) { // the key/val are slightly smaller than block size, so that each block // holds roughly one key/value pair. - c.Add(RandomString(&rnd, 900), "val"); + c.Add(rnd.RandomString(900), "val"); } std::vector ks; @@ -3607,9 +3600,8 @@ TEST_P(ParameterizedHarnessTest, RandomizedHarnessTest) { for (int num_entries = 0; num_entries < 2000; num_entries += (num_entries < 50 ? 1 : 200)) { for (int e = 0; e < num_entries; e++) { - std::string v; Add(test::RandomKey(&rnd, rnd.Skewed(4)), - test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); + rnd.RandomString(rnd.Skewed(5))); } Test(&rnd); } @@ -3621,8 +3613,7 @@ TEST_F(DBHarnessTest, RandomizedLongDB) { int num_entries = 100000; for (int e = 0; e < num_entries; e++) { std::string v; - Add(test::RandomKey(&rnd, rnd.Skewed(4)), - test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); + Add(test::RandomKey(&rnd, rnd.Skewed(4)), rnd.RandomString(rnd.Skewed(5))); } Test(&rnd); @@ -3878,8 +3869,8 @@ TEST_P(IndexBlockRestartIntervalTest, IndexBlockRestartInterval) { TableConstructor c(BytewiseComparator()); static Random rnd(301); for (int i = 0; i < kKeysInTable; i++) { - InternalKey k(RandomString(&rnd, kKeySize), 0, kTypeValue); - c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize)); + InternalKey k(rnd.RandomString(kKeySize), 0, kTypeValue); + c.Add(k.Encode().ToString(), rnd.RandomString(kValSize)); } std::vector keys; @@ -4541,9 +4532,9 @@ TEST_P(BlockBasedTableTest, DataBlockHashIndex) { static Random rnd(1048); for (int i = 0; i < kNumKeys; i++) { // padding one "0" to mark existent keys. - std::string random_key(RandomString(&rnd, kKeySize - 1) + "1"); + std::string random_key(rnd.RandomString(kKeySize - 1) + "1"); InternalKey k(random_key, 0, kTypeValue); - c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize)); + c.Add(k.Encode().ToString(), rnd.RandomString(kValSize)); } std::vector keys; diff --git a/test_util/sync_point.cc b/test_util/sync_point.cc index 345e41d64..afdda872b 100644 --- a/test_util/sync_point.cc +++ b/test_util/sync_point.cc @@ -4,6 +4,10 @@ // (found in the LICENSE.Apache file in the root directory). #include "test_util/sync_point.h" + +#include +#include + #include "test_util/sync_point_impl.h" int rocksdb_kill_odds = 0; @@ -64,3 +68,22 @@ void SyncPoint::Process(const std::string& point, void* cb_arg) { } // namespace ROCKSDB_NAMESPACE #endif // NDEBUG + +namespace ROCKSDB_NAMESPACE { +void SetupSyncPointsToMockDirectIO() { +#if !defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_WIN) && \ + !defined(OS_SOLARIS) && !defined(OS_AIX) && !defined(OS_OPENBSD) + ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( + "NewWritableFile:O_DIRECT", [&](void* arg) { + int* val = static_cast(arg); + *val &= ~O_DIRECT; + }); + ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( + "NewRandomAccessFile:O_DIRECT", [&](void* arg) { + int* val = static_cast(arg); + *val &= ~O_DIRECT; + }); + ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); +#endif +} +} // namespace ROCKSDB_NAMESPACE diff --git a/test_util/sync_point.h b/test_util/sync_point.h index 007510a4f..08d6c037a 100644 --- a/test_util/sync_point.h +++ b/test_util/sync_point.h @@ -124,6 +124,9 @@ class SyncPoint { Data* impl_; }; +// Sets up sync points to mock direct IO instead of actually issuing direct IO +// to the file system. +void SetupSyncPointsToMockDirectIO(); } // namespace ROCKSDB_NAMESPACE // Use TEST_SYNC_POINT to specify sync points inside code base. diff --git a/test_util/testutil.cc b/test_util/testutil.cc index 0d95e3feb..234c76d81 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -11,6 +11,7 @@ #include #include + #include #include #include @@ -23,6 +24,7 @@ #include "file/writable_file_writer.h" #include "port/port.h" #include "test_util/sync_point.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { namespace test { @@ -30,23 +32,6 @@ namespace test { const uint32_t kDefaultFormatVersion = BlockBasedTableOptions().format_version; const uint32_t kLatestFormatVersion = 5u; -Slice RandomString(Random* rnd, int len, std::string* dst) { - dst->resize(len); - for (int i = 0; i < len; i++) { - (*dst)[i] = static_cast(' ' + rnd->Uniform(95)); // ' ' .. '~' - } - return Slice(*dst); -} - -extern std::string RandomHumanReadableString(Random* rnd, int len) { - std::string ret; - ret.resize(len); - for (int i = 0; i < len; ++i) { - ret[i] = static_cast('a' + rnd->Uniform(26)); - } - return ret; -} - std::string RandomKey(Random* rnd, int len, RandomKeyType type) { // Make sure to generate a wide variety of characters so we // test the boundary conditions for short-key optimizations. @@ -78,8 +63,7 @@ extern Slice CompressibleString(Random* rnd, double compressed_fraction, int len, std::string* dst) { int raw = static_cast(len * compressed_fraction); if (raw < 1) raw = 1; - std::string raw_data; - RandomString(rnd, raw, &raw_data); + std::string raw_data = rnd->RandomString(raw); // Duplicate the random data until we have filled "len" bytes dst->clear(); @@ -453,51 +437,6 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, DBOptions& db_options, &cf_opt->compression_per_level, rnd); } -Status DestroyDir(Env* env, const std::string& dir) { - Status s; - if (env->FileExists(dir).IsNotFound()) { - return s; - } - std::vector files_in_dir; - s = env->GetChildren(dir, &files_in_dir); - if (s.ok()) { - for (auto& file_in_dir : files_in_dir) { - if (file_in_dir == "." || file_in_dir == "..") { - continue; - } - std::string path = dir + "/" + file_in_dir; - bool is_dir = false; - s = env->IsDirectory(path, &is_dir); - if (s.ok()) { - if (is_dir) { - s = DestroyDir(env, path); - } else { - s = env->DeleteFile(path); - } - } - if (!s.ok()) { - // IsDirectory, etc. might not report NotFound - if (s.IsNotFound() || env->FileExists(path).IsNotFound()) { - // Allow files to be deleted externally - s = Status::OK(); - } else { - break; - } - } - } - } - - if (s.ok()) { - s = env->DeleteDir(dir); - // DeleteDir might or might not report NotFound - if (!s.ok() && (s.IsNotFound() || env->FileExists(dir).IsNotFound())) { - // Allow to be deleted externally - s = Status::OK(); - } - } - return s; -} - bool IsDirectIOSupported(Env* env, const std::string& dir) { EnvOptions env_options; env_options.use_mmap_writes = false; @@ -531,22 +470,6 @@ size_t GetLinesCount(const std::string& fname, const std::string& pattern) { return count; } -void SetupSyncPointsToMockDirectIO() { -#if !defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_WIN) && \ - !defined(OS_SOLARIS) && !defined(OS_AIX) && !defined(OS_OPENBSD) - ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( - "NewWritableFile:O_DIRECT", [&](void* arg) { - int* val = static_cast(arg); - *val &= ~O_DIRECT; - }); - ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( - "NewRandomAccessFile:O_DIRECT", [&](void* arg) { - int* val = static_cast(arg); - *val &= ~O_DIRECT; - }); - ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); -#endif -} void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt) { struct stat sbuf; diff --git a/test_util/testutil.h b/test_util/testutil.h index f41a1dec8..4255a48f2 100644 --- a/test_util/testutil.h +++ b/test_util/testutil.h @@ -26,9 +26,9 @@ #include "table/internal_iterator.h" #include "table/plain/plain_table_factory.h" #include "util/mutexlock.h" -#include "util/random.h" namespace ROCKSDB_NAMESPACE { +class Random; class SequentialFile; class SequentialFileReader; @@ -37,12 +37,6 @@ namespace test { extern const uint32_t kDefaultFormatVersion; extern const uint32_t kLatestFormatVersion; -// Store in *dst a random string of length "len" and return a Slice that -// references the generated data. -extern Slice RandomString(Random* rnd, int len, std::string* dst); - -extern std::string RandomHumanReadableString(Random* rnd, int len); - // Return a random key with the specified length that may contain interesting // characters (e.g. \x00, \xff, etc.). enum RandomKeyType : char { RANDOM, LARGEST, SMALLEST, MIDDLE }; @@ -796,8 +790,6 @@ TableFactory* RandomTableFactory(Random* rnd, int pre_defined = -1); std::string RandomName(Random* rnd, const size_t len); -Status DestroyDir(Env* env, const std::string& dir); - bool IsDirectIOSupported(Env* env, const std::string& dir); // Return the number of lines where a given pattern was found in a file. @@ -808,9 +800,6 @@ size_t GetLinesCount(const std::string& fname, const std::string& pattern); // Tries to set TEST_TMPDIR to a directory supporting direct IO. void ResetTmpDirForDirectIO(); -// Sets up sync points to mock direct IO instead of actually issuing direct IO -// to the file system. -void SetupSyncPointsToMockDirectIO(); void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt); diff --git a/test_util/testutil_test.cc b/test_util/testutil_test.cc index d055af667..41f26e389 100644 --- a/test_util/testutil_test.cc +++ b/test_util/testutil_test.cc @@ -5,6 +5,7 @@ #include "test_util/testutil.h" +#include "file/file_util.h" #include "port/port.h" #include "port/stack_trace.h" #include "test_util/testharness.h" @@ -28,7 +29,7 @@ TEST(TestUtil, DestroyDirRecursively) { ASSERT_OK(env->CreateDir(test_dir + "/dir")); CreateFile(env, test_dir + "/dir/file"); - ASSERT_OK(test::DestroyDir(env, test_dir)); + ASSERT_OK(DestroyDir(env, test_dir)); auto s = env->FileExists(test_dir); ASSERT_TRUE(s.IsNotFound()); } diff --git a/tools/db_repl_stress.cc b/tools/db_repl_stress.cc index 717f5d3d8..794ac1530 100644 --- a/tools/db_repl_stress.cc +++ b/tools/db_repl_stress.cc @@ -37,20 +37,14 @@ struct DataPumpThread { DB* db; // Assumption DB is Open'ed already. }; -static std::string RandomString(Random* rnd, int len) { - std::string r; - test::RandomString(rnd, len, &r); - return r; -} - static void DataPumpThreadBody(void* arg) { DataPumpThread* t = reinterpret_cast(arg); DB* db = t->db; Random rnd(301); size_t i = 0; while (i++ < t->no_records) { - if (!db->Put(WriteOptions(), Slice(RandomString(&rnd, 500)), - Slice(RandomString(&rnd, 500))) + if (!db->Put(WriteOptions(), Slice(rnd.RandomString(500)), + Slice(rnd.RandomString(500))) .ok()) { fprintf(stderr, "Error in put\n"); exit(1); diff --git a/tools/ldb_cmd_test.cc b/tools/ldb_cmd_test.cc index e8aa387a2..fce26aabb 100644 --- a/tools/ldb_cmd_test.cc +++ b/tools/ldb_cmd_test.cc @@ -6,6 +6,7 @@ #ifndef ROCKSDB_LITE #include "rocksdb/utilities/ldb_cmd.h" + #include "db/version_edit.h" #include "db/version_set.h" #include "env/composite_env_wrapper.h" @@ -16,6 +17,7 @@ #include "test_util/testharness.h" #include "test_util/testutil.h" #include "util/file_checksum_helper.h" +#include "util/random.h" using std::string; using std::vector; @@ -284,32 +286,28 @@ TEST_F(LdbCmdTest, DumpFileChecksumNoChecksum) { for (int i = 0; i < 200; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); for (int i = 100; i < 300; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); for (int i = 200; i < 400; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); for (int i = 300; i < 400; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); @@ -369,32 +367,28 @@ TEST_F(LdbCmdTest, DumpFileChecksumCRC32) { for (int i = 0; i < 100; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); for (int i = 50; i < 150; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); for (int i = 100; i < 200; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); for (int i = 150; i < 250; i++) { char buf[16]; snprintf(buf, sizeof(buf), "%08d", i); - std::string v; - test::RandomString(&rnd, 100, &v); + std::string v = rnd.RandomString(100); ASSERT_OK(db->Put(wopts, buf, v)); } ASSERT_OK(db->Flush(fopts)); diff --git a/util/file_reader_writer_test.cc b/util/file_reader_writer_test.cc index f37bd5931..0f1cc499b 100644 --- a/util/file_reader_writer_test.cc +++ b/util/file_reader_writer_test.cc @@ -166,8 +166,7 @@ TEST_F(WritableFileWriterTest, IncrementalBuffer) { std::string target; for (int i = 0; i < 20; i++) { uint32_t num = r.Skewed(16) * 100 + r.Uniform(100); - std::string random_string; - test::RandomString(&r, num, &random_string); + std::string random_string = r.RandomString(num); writer->Append(Slice(random_string.c_str(), num)); target.append(random_string.c_str(), num); @@ -288,8 +287,7 @@ TEST_P(ReadaheadRandomAccessFileTest, SourceStrLenGreaterThanReadaheadSize) { for (int k = 0; k < 100; ++k) { size_t strLen = k * GetReadaheadSize() + rng.Uniform(static_cast(GetReadaheadSize())); - std::string str = - test::RandomHumanReadableString(&rng, static_cast(strLen)); + std::string str = rng.HumanReadableString(static_cast(strLen)); ResetSourceStr(str); for (int test = 1; test <= 100; ++test) { size_t offset = rng.Uniform(static_cast(strLen)); @@ -304,8 +302,7 @@ TEST_P(ReadaheadRandomAccessFileTest, ReadExceedsReadaheadSize) { Random rng(7); size_t strLen = 4 * GetReadaheadSize() + rng.Uniform(static_cast(GetReadaheadSize())); - std::string str = - test::RandomHumanReadableString(&rng, static_cast(strLen)); + std::string str = rng.HumanReadableString(static_cast(strLen)); ResetSourceStr(str); for (int test = 1; test <= 100; ++test) { size_t offset = rng.Uniform(static_cast(strLen)); @@ -383,8 +380,7 @@ TEST_P(ReadaheadSequentialFileTest, SourceStrLenGreaterThanReadaheadSize) { for (int k = 0; k < 100; ++k) { size_t strLen = k * GetReadaheadSize() + rng.Uniform(static_cast(GetReadaheadSize())); - std::string str = - test::RandomHumanReadableString(&rng, static_cast(strLen)); + std::string str = rng.HumanReadableString(static_cast(strLen)); ResetSourceStr(str); size_t offset = 0; for (int test = 1; test <= 100; ++test) { @@ -406,8 +402,7 @@ TEST_P(ReadaheadSequentialFileTest, ReadExceedsReadaheadSize) { for (int k = 0; k < 100; ++k) { size_t strLen = k * GetReadaheadSize() + rng.Uniform(static_cast(GetReadaheadSize())); - std::string str = - test::RandomHumanReadableString(&rng, static_cast(strLen)); + std::string str = rng.HumanReadableString(static_cast(strLen)); ResetSourceStr(str); size_t offset = 0; for (int test = 1; test <= 100; ++test) { diff --git a/util/random.cc b/util/random.cc index 38c36defd..68624ad43 100644 --- a/util/random.cc +++ b/util/random.cc @@ -35,4 +35,22 @@ Random* Random::GetTLSInstance() { return rv; } +std::string Random::HumanReadableString(int len) { + std::string ret; + ret.resize(len); + for (int i = 0; i < len; ++i) { + ret[i] = static_cast('a' + Uniform(26)); + } + return ret; +} + +std::string Random::RandomString(int len) { + std::string ret; + ret.resize(len); + for (int i = 0; i < len; i++) { + ret[i] = static_cast(' ' + Uniform(95)); // ' ' .. '~' + } + return ret; +} + } // namespace ROCKSDB_NAMESPACE diff --git a/util/random.h b/util/random.h index 246d6c2ff..5f6eaf51e 100644 --- a/util/random.h +++ b/util/random.h @@ -86,6 +86,12 @@ class Random { return Uniform(1 << Uniform(max_log + 1)); } + // Returns a random string of length "len" + std::string RandomString(int len); + + // Generates a random string of len bytes using human-readable characters + std::string HumanReadableString(int len); + // Returns a Random instance for use by the current thread without // additional locking static Random* GetTLSInstance(); diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 126f63b79..9e8aeb176 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -446,8 +446,7 @@ class FileManager : public EnvWrapper { } for (uint64_t i = 0; i < bytes_to_corrupt; ++i) { - std::string tmp; - test::RandomString(&rnd_, 1, &tmp); + std::string tmp = rnd_.RandomString(1); file_contents[rnd_.Next() % file_contents.size()] = tmp[0]; } return WriteToFile(fname, file_contents); diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 6ba672ed9..95533cd6a 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -5,6 +5,8 @@ #ifndef ROCKSDB_LITE +#include "utilities/blob_db/blob_db.h" + #include #include #include @@ -22,14 +24,13 @@ #include "file/sst_file_manager_impl.h" #include "port/port.h" #include "rocksdb/utilities/debug.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "util/cast_util.h" #include "util/random.h" #include "util/string_util.h" -#include "utilities/blob_db/blob_db.h" #include "utilities/blob_db/blob_db_impl.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { namespace blob_db { @@ -142,7 +143,7 @@ class BlobDBTest : public testing::Test { void PutRandomWithTTL(const std::string &key, uint64_t ttl, Random *rnd, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; - std::string value = test::RandomHumanReadableString(rnd, len); + std::string value = rnd->HumanReadableString(len); ASSERT_OK( blob_db_->PutWithTTL(WriteOptions(), Slice(key), Slice(value), ttl)); if (data != nullptr) { @@ -153,7 +154,7 @@ class BlobDBTest : public testing::Test { void PutRandomUntil(const std::string &key, uint64_t expiration, Random *rnd, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; - std::string value = test::RandomHumanReadableString(rnd, len); + std::string value = rnd->HumanReadableString(len); ASSERT_OK(blob_db_->PutUntil(WriteOptions(), Slice(key), Slice(value), expiration)); if (data != nullptr) { @@ -169,7 +170,7 @@ class BlobDBTest : public testing::Test { void PutRandom(DB *db, const std::string &key, Random *rnd, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; - std::string value = test::RandomHumanReadableString(rnd, len); + std::string value = rnd->HumanReadableString(len); ASSERT_OK(db->Put(WriteOptions(), Slice(key), Slice(value))); if (data != nullptr) { (*data)[key] = value; @@ -180,7 +181,7 @@ class BlobDBTest : public testing::Test { const std::string &key, Random *rnd, WriteBatch *batch, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; - std::string value = test::RandomHumanReadableString(rnd, len); + std::string value = rnd->HumanReadableString(len); ASSERT_OK(batch->Put(key, value)); if (data != nullptr) { (*data)[key] = value; @@ -1079,7 +1080,7 @@ TEST_F(BlobDBTest, InlineSmallValues) { uint64_t expiration = rnd.Next() % kMaxExpiration; int len = is_small_value ? 50 : 200; std::string key = "key" + ToString(i); - std::string value = test::RandomHumanReadableString(&rnd, len); + std::string value = rnd.HumanReadableString(len); std::string blob_index; data[key] = value; SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; @@ -1186,8 +1187,7 @@ TEST_F(BlobDBTest, UserCompactionFilter) { oss << "key" << std::setw(4) << std::setfill('0') << i; const std::string key(oss.str()); - const std::string value( - test::RandomHumanReadableString(&rnd, (int)value_size)); + const std::string value = rnd.HumanReadableString((int)value_size); const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; ASSERT_OK(Put(key, value)); @@ -1264,8 +1264,7 @@ TEST_F(BlobDBTest, UserCompactionFilter_BlobIOError) { oss << "key" << std::setw(4) << std::setfill('0') << i; const std::string key(oss.str()); - const std::string value( - test::RandomHumanReadableString(&rnd, kValueSize)); + const std::string value = rnd.HumanReadableString(kValueSize); const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; ASSERT_OK(Put(key, value)); @@ -1319,7 +1318,7 @@ TEST_F(BlobDBTest, FilterExpiredBlobIndex) { uint64_t expiration = rnd.Next() % kMaxExpiration; int len = is_small_value ? 10 : 200; std::string key = "key" + ToString(rnd.Next() % kNumKeys); - std::string value = test::RandomHumanReadableString(&rnd, len); + std::string value = rnd.HumanReadableString(len); if (!has_ttl) { if (is_small_value) { std::string blob_entry; @@ -1440,7 +1439,7 @@ TEST_F(BlobDBTest, FilterForFIFOEviction) { // Insert some small values that will be inlined. for (int i = 0; i < 1000; i++) { std::string key = "key" + ToString(i); - std::string value = test::RandomHumanReadableString(&rnd, 50); + std::string value = rnd.HumanReadableString(50); uint64_t ttl = rnd.Next() % 120 + 1; ASSERT_OK(PutWithTTL(key, value, ttl, &data)); if (ttl >= 60) { @@ -1548,8 +1547,7 @@ TEST_F(BlobDBTest, GarbageCollection) { oss << "key" << std::setw(4) << std::setfill('0') << i; const std::string key(oss.str()); - const std::string value( - test::RandomHumanReadableString(&rnd, kLargeValueSize)); + const std::string value = rnd.HumanReadableString(kLargeValueSize); const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; ASSERT_OK(Put(key, value)); @@ -1566,8 +1564,7 @@ TEST_F(BlobDBTest, GarbageCollection) { // First, add a large TTL value will be written to its own TTL blob file. { const std::string key("key2000"); - const std::string value( - test::RandomHumanReadableString(&rnd, kLargeValueSize)); + const std::string value = rnd.HumanReadableString(kLargeValueSize); const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; ASSERT_OK(PutUntil(key, value, kExpiration)); @@ -1583,8 +1580,7 @@ TEST_F(BlobDBTest, GarbageCollection) { // Now add a small TTL value (which will be inlined). { const std::string key("key3000"); - const std::string value( - test::RandomHumanReadableString(&rnd, kSmallValueSize)); + const std::string value = rnd.HumanReadableString(kSmallValueSize); const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; ASSERT_OK(PutUntil(key, value, kExpiration)); @@ -1600,8 +1596,7 @@ TEST_F(BlobDBTest, GarbageCollection) { // value). { const std::string key("key4000"); - const std::string value( - test::RandomHumanReadableString(&rnd, kSmallValueSize)); + const std::string value = rnd.HumanReadableString(kSmallValueSize); const SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; ASSERT_OK(Put(key, value)); diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index eb6b0f12b..823a169bd 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -9,6 +9,7 @@ // Syncpoint prevents us building and running tests in release #ifndef ROCKSDB_LITE +#include "rocksdb/utilities/checkpoint.h" #ifndef OS_WIN #include @@ -16,17 +17,18 @@ #include #include #include + #include "db/db_impl/db_impl.h" +#include "file/file_util.h" #include "port/port.h" #include "port/stack_trace.h" #include "rocksdb/db.h" #include "rocksdb/env.h" -#include "rocksdb/utilities/checkpoint.h" #include "rocksdb/utilities/transaction_db.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "utilities/fault_injection_env.h" namespace ROCKSDB_NAMESPACE { class CheckpointTest : public testing::Test { @@ -69,7 +71,7 @@ class CheckpointTest : public testing::Test { env_->DeleteDir(snapshot_tmp_name); Reopen(options); export_path_ = test::PerThreadDBPath("/export"); - test::DestroyDir(env_, export_path_); + DestroyDir(env_, export_path_); cfh_reverse_comp_ = nullptr; metadata_ = nullptr; } @@ -94,7 +96,7 @@ class CheckpointTest : public testing::Test { options.db_paths.emplace_back(dbname_ + "_4", 0); EXPECT_OK(DestroyDB(dbname_, options)); EXPECT_OK(DestroyDB(snapshot_name_, options)); - test::DestroyDir(env_, export_path_); + DestroyDir(env_, export_path_); } // Return the current option configuration. @@ -347,7 +349,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { export_path_, &metadata_)); verify_files_exported(*metadata_, 1); ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); - test::DestroyDir(env_, export_path_); + DestroyDir(env_, export_path_); delete metadata_; metadata_ = nullptr; @@ -358,7 +360,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { export_path_, &metadata_)); verify_files_exported(*metadata_, 2); ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); - test::DestroyDir(env_, export_path_); + DestroyDir(env_, export_path_); delete metadata_; metadata_ = nullptr; delete checkpoint; @@ -404,7 +406,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) { ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), export_path_, &metadata_), Status::InvalidArgument("Specified export_dir exists")); - test::DestroyDir(env_, export_path_); + DestroyDir(env_, export_path_); // Export with invalid directory specification export_path_ = ""; diff --git a/test_util/fault_injection_test_env.cc b/utilities/fault_injection_env.cc similarity index 99% rename from test_util/fault_injection_test_env.cc rename to utilities/fault_injection_env.cc index d9c2167b4..06ce6b352 100644 --- a/test_util/fault_injection_test_env.cc +++ b/utilities/fault_injection_env.cc @@ -11,10 +11,12 @@ // the last "sync". It then checks for data loss errors by purposely dropping // file data (or entire files) not protected by a "sync". -#include "test_util/fault_injection_test_env.h" +#include "utilities/fault_injection_env.h" + #include #include +#include "util/random.h" namespace ROCKSDB_NAMESPACE { // Assume a filename, and not a directory name like "/foo/bar/" diff --git a/test_util/fault_injection_test_env.h b/utilities/fault_injection_env.h similarity index 98% rename from test_util/fault_injection_test_env.h rename to utilities/fault_injection_env.h index 9cc33a8d3..0de73cbe2 100644 --- a/test_util/fault_injection_test_env.h +++ b/utilities/fault_injection_env.h @@ -17,16 +17,12 @@ #include #include -#include "db/version_set.h" -#include "env/mock_env.h" #include "file/filename.h" -#include "rocksdb/db.h" #include "rocksdb/env.h" #include "util/mutexlock.h" -#include "util/random.h" namespace ROCKSDB_NAMESPACE { - +class Random; class TestWritableFile; class FaultInjectionTestEnv; diff --git a/test_util/fault_injection_test_fs.cc b/utilities/fault_injection_fs.cc similarity index 99% rename from test_util/fault_injection_test_fs.cc rename to utilities/fault_injection_fs.cc index f08ab5ef9..5ee901523 100644 --- a/test_util/fault_injection_test_fs.cc +++ b/utilities/fault_injection_fs.cc @@ -14,11 +14,15 @@ // FileSystem related operations, by specify the "IOStatus Error", a specific // error can be returned when file system is not activated. -#include "test_util/fault_injection_test_fs.h" +#include "utilities/fault_injection_fs.h" + #include #include + +#include "env/composite_env_wrapper.h" #include "port/lang.h" #include "port/stack_trace.h" +#include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -501,8 +505,7 @@ IOStatus FaultInjectionTestFS::InjectError(ErrorOperation op, // The randomly generated string could be identical to the // original one, so retry do { - str = DBTestBase::RandomString(&ctx->rand, - static_cast(len)); + str = ctx->rand.RandomString(static_cast(len)); } while (str == std::string(scratch + offset, len)); memcpy(scratch + offset, str.data(), len); break; diff --git a/test_util/fault_injection_test_fs.h b/utilities/fault_injection_fs.h similarity index 99% rename from test_util/fault_injection_test_fs.h rename to utilities/fault_injection_fs.h index 1ac0d6269..07b7efdad 100644 --- a/test_util/fault_injection_test_fs.h +++ b/utilities/fault_injection_fs.h @@ -21,15 +21,11 @@ #include #include -#include "db/db_test_util.h" -#include "db/version_set.h" -#include "env/mock_env.h" #include "file/filename.h" #include "include/rocksdb/file_system.h" -#include "rocksdb/db.h" -#include "rocksdb/env.h" #include "util/mutexlock.h" #include "util/random.h" +#include "util/thread_local.h" namespace ROCKSDB_NAMESPACE { diff --git a/utilities/memory/memory_test.cc b/utilities/memory/memory_test.cc index 9e253df44..914900362 100644 --- a/utilities/memory/memory_test.cc +++ b/utilities/memory/memory_test.cc @@ -13,6 +13,7 @@ #include "table/block_based/block_based_table_factory.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/random.h" #include "util/string_util.h" namespace ROCKSDB_NAMESPACE { @@ -25,12 +26,6 @@ class MemoryTest : public testing::Test { std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); } - std::string RandomString(int len) { - std::string r; - test::RandomString(&rnd_, len, &r); - return r; - } - void UpdateUsagesHistory(const std::vector& dbs) { std::map usage_by_type; ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type)); @@ -122,9 +117,9 @@ TEST_F(MemoryTest, SharedBlockCacheTotal) { for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) { for (int i = 0; i < kNumDBs; ++i) { for (int j = 0; j < 100; ++j) { - keys_by_db[i].emplace_back(RandomString(kKeySize)); + keys_by_db[i].emplace_back(rnd_.RandomString(kKeySize)); dbs[i]->Put(WriteOptions(), keys_by_db[i].back(), - RandomString(kValueSize)); + rnd_.RandomString(kValueSize)); } dbs[i]->Flush(FlushOptions()); } @@ -181,8 +176,8 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) { for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) { for (int i = 0; i < kNumDBs; ++i) { for (auto* handle : vec_handles[i]) { - dbs[i]->Put(WriteOptions(), handle, RandomString(kKeySize), - RandomString(kValueSize)); + dbs[i]->Put(WriteOptions(), handle, rnd_.RandomString(kKeySize), + rnd_.RandomString(kValueSize)); UpdateUsagesHistory(dbs); } } @@ -208,7 +203,7 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) { for (int j = 0; j < 100; ++j) { std::string value; - dbs[i]->Get(ReadOptions(), RandomString(kKeySize), &value); + dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value); } UpdateUsagesHistory(dbs); diff --git a/utilities/option_change_migration/option_change_migration_test.cc b/utilities/option_change_migration/option_change_migration_test.cc index 5bc883ff7..a03cabac9 100644 --- a/utilities/option_change_migration/option_change_migration_test.cc +++ b/utilities/option_change_migration/option_change_migration_test.cc @@ -8,9 +8,13 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "rocksdb/utilities/option_change_migration.h" + #include + #include "db/db_test_util.h" #include "port/stack_trace.h" +#include "util/random.h" + namespace ROCKSDB_NAMESPACE { class DBOptionChangeMigrationTests @@ -200,7 +204,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) { Random rnd(301); for (int num = 0; num < 20; num++) { for (int i = 0; i < 50; i++) { - ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900))); + ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900))); } Flush(); dbfull()->TEST_WaitForCompact(); @@ -274,7 +278,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) { Random rnd(301); for (int num = 0; num < 20; num++) { for (int i = 0; i < 50; i++) { - ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900))); + ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900))); } Flush(); dbfull()->TEST_WaitForCompact(); @@ -370,7 +374,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) { Random rnd(301); for (int num = 0; num < 20; num++) { for (int i = 0; i < 50; i++) { - ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900))); + ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900))); } } Flush(); diff --git a/utilities/persistent_cache/persistent_cache_test.h b/utilities/persistent_cache/persistent_cache_test.h index 47611ecd3..394c64964 100644 --- a/utilities/persistent_cache/persistent_cache_test.h +++ b/utilities/persistent_cache/persistent_cache_test.h @@ -24,6 +24,7 @@ #include "rocksdb/cache.h" #include "table/block_based/block_builder.h" #include "test_util/testharness.h" +#include "util/random.h" #include "utilities/persistent_cache/volatile_tier_impl.h" namespace ROCKSDB_NAMESPACE { @@ -255,7 +256,7 @@ class PersistentCacheDBTest : public DBTestBase { std::string str; for (int i = 0; i < num_iter; i++) { if (i % 4 == 0) { // high compression ratio - str = RandomString(&rnd, 1000); + str = rnd.RandomString(1000); } values->push_back(str); ASSERT_OK(Put(1, Key(i), (*values)[i])); diff --git a/utilities/transactions/transaction_lock_mgr_test.cc b/utilities/transactions/transaction_lock_mgr_test.cc index f42caed86..e67b453ca 100644 --- a/utilities/transactions/transaction_lock_mgr_test.cc +++ b/utilities/transactions/transaction_lock_mgr_test.cc @@ -6,6 +6,8 @@ #ifndef ROCKSDB_LITE #include "utilities/transactions/transaction_lock_mgr.h" + +#include "file/file_util.h" #include "port/port.h" #include "port/stack_trace.h" #include "rocksdb/utilities/transaction_db.h" @@ -36,7 +38,7 @@ class TransactionLockMgrTest : public testing::Test { void TearDown() override { delete db_; - EXPECT_OK(test::DestroyDir(env_, db_dir_)); + EXPECT_OK(DestroyDir(env_, db_dir_)); } PessimisticTransaction* NewTxn( diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index f153b9a14..30058a757 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -13,25 +13,24 @@ #include #include "db/db_impl/db_impl.h" +#include "port/port.h" #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/perf_context.h" #include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction_db.h" #include "table/mock_table.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "test_util/testutil.h" #include "test_util/transaction_test_util.h" #include "util/random.h" #include "util/string_util.h" +#include "utilities/fault_injection_env.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/string_append/stringappend.h" #include "utilities/transactions/pessimistic_transaction_db.h" -#include "port/port.h" - using std::string; namespace ROCKSDB_NAMESPACE { diff --git a/utilities/transactions/transaction_test.h b/utilities/transactions/transaction_test.h index 4fc0422c7..2f3ab2ba8 100644 --- a/utilities/transactions/transaction_test.h +++ b/utilities/transactions/transaction_test.h @@ -12,25 +12,24 @@ #include #include "db/db_impl/db_impl.h" +#include "port/port.h" #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction_db.h" #include "table/mock_table.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "test_util/testutil.h" #include "test_util/transaction_test_util.h" #include "util/random.h" #include "util/string_util.h" +#include "utilities/fault_injection_env.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/string_append/stringappend.h" #include "utilities/transactions/pessimistic_transaction_db.h" #include "utilities/transactions/write_unprepared_txn_db.h" -#include "port/port.h" - namespace ROCKSDB_NAMESPACE { // Return true if the ith bit is set in combination represented by comb diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index 8adca08e3..a999c71d4 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -5,8 +5,6 @@ #ifndef ROCKSDB_LITE -#include "utilities/transactions/transaction_test.h" - #include #include #include @@ -16,6 +14,7 @@ #include "db/db_impl/db_impl.h" #include "db/dbformat.h" +#include "port/port.h" #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/types.h" @@ -23,7 +22,6 @@ #include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction_db.h" #include "table/mock_table.h" -#include "test_util/fault_injection_test_env.h" #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "test_util/testutil.h" @@ -31,13 +29,13 @@ #include "util/mutexlock.h" #include "util/random.h" #include "util/string_util.h" +#include "utilities/fault_injection_env.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/string_append/stringappend.h" #include "utilities/transactions/pessimistic_transaction_db.h" +#include "utilities/transactions/transaction_test.h" #include "utilities/transactions/write_prepared_txn_db.h" -#include "port/port.h" - using std::string; namespace ROCKSDB_NAMESPACE { @@ -277,8 +275,8 @@ TEST(WriteBatchWithIndex, SubBatchCnt) { for (size_t k = 0; k < 10; k++) { // 10 key per batch size_t ki = static_cast(rnd.Uniform(TOTAL_KEYS)); Slice key = Slice(keys[ki]); - std::string buffer; - Slice value = Slice(test::RandomString(&rnd, 16, &buffer)); + std::string tmp = rnd.RandomString(16); + Slice value = Slice(tmp); rndbatch.Put(key, value); } SubBatchCounter batch_counter(comparators);