Remove racially charged terms "whitelist" and "blacklist" (#7008)

Summary:
We don't need them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7008

Test Plan: "make check" and ensure "make crash_test" starts

Reviewed By: ajkr

Differential Revision: D22143838

Pulled By: pdillinger

fbshipit-source-id: 72c8e16603abc59f4954e304466bc4dc1f58f94e
main
Peter Dillinger 5 years ago committed by Facebook GitHub Bot
parent a607f3efaa
commit 88b4210701
  1. 18
      Makefile
  2. 4
      db/db_memtable_test.cc
  3. 2
      db_stress_tool/db_stress_common.h
  4. 4
      db_stress_tool/db_stress_gflags.cc
  5. 4
      db_stress_tool/db_stress_test_base.cc
  6. 2
      db_stress_tool/db_stress_tool.cc
  7. 12
      env/io_posix.cc
  8. 68
      options/options_settable_test.cc
  9. 2
      test_util/sync_point.cc
  10. 2
      test_util/sync_point.h
  11. 2
      test_util/sync_point_impl.cc
  12. 4
      tools/db_crashtest.py

@ -675,9 +675,9 @@ ifdef COMPILE_WITH_UBSAN
TESTS := $(shell echo $(TESTS) | sed 's/\boptions_settable_test\b//g')
endif
ifdef ASSERT_STATUS_CHECKED
# This is a new check for which we will add support incrementally. The
# whitelist can be removed once support is fully added.
TESTS_WHITELIST = \
# This is a new check for which we will add support incrementally. This
# list can be removed once support is fully added.
TESTS_PASSING_ASC = \
arena_test \
autovector_test \
blob_file_addition_test \
@ -723,8 +723,8 @@ ifdef ASSERT_STATUS_CHECKED
work_queue_test \
write_controller_test \
TESTS := $(filter $(TESTS_WHITELIST),$(TESTS))
PARALLEL_TEST := $(filter $(TESTS_WHITELIST),$(PARALLEL_TEST))
TESTS := $(filter $(TESTS_PASSING_ASC),$(TESTS))
PARALLEL_TEST := $(filter $(TESTS_PASSING_ASC),$(PARALLEL_TEST))
endif
SUBSET := $(TESTS)
ifdef ROCKSDBTESTS_START
@ -978,7 +978,7 @@ J ?= 100%
# Use this regexp to select the subset of tests whose names match.
tests-regexp = .
ifeq ($(PRINT_PARALLEL_OUTPUTS), 1)
ifeq ($(PRINT_PARALLEL_OUTPUTS), 1)
parallel_com = '{}'
else
parallel_com = '{} >& t/log-{/}'
@ -1002,7 +1002,7 @@ check_0:
awk '{ if ($$7 != 0 || $$8 != 0) { if ($$7 == "Exitval") { h = $$0; } else { if (!f) print h; print; f = 1 } } } END { if(f) exit 1; }' < LOG ; \
if [ $$parallel_retcode -ne 0 ] ; then exit 1 ; fi
valgrind-blacklist-regexp = InlineSkipTest.ConcurrentInsert|TransactionStressTest.DeadlockStress|DBCompactionTest.SuggestCompactRangeNoTwoLevel0Compactions|BackupableDBTest.RateLimiting|DBTest.CloseSpeedup|DBTest.ThreadStatusFlush|DBTest.RateLimitingTest|DBTest.EncodeDecompressedBlockSizeTest|FaultInjectionTest.UninstalledCompaction|HarnessTest.Randomized|ExternalSSTFileTest.CompactDuringAddFileRandom|ExternalSSTFileTest.IngestFileWithGlobalSeqnoRandomized|MySQLStyleTransactionTest.TransactionStressTest
valgrind-exclude-regexp = InlineSkipTest.ConcurrentInsert|TransactionStressTest.DeadlockStress|DBCompactionTest.SuggestCompactRangeNoTwoLevel0Compactions|BackupableDBTest.RateLimiting|DBTest.CloseSpeedup|DBTest.ThreadStatusFlush|DBTest.RateLimitingTest|DBTest.EncodeDecompressedBlockSizeTest|FaultInjectionTest.UninstalledCompaction|HarnessTest.Randomized|ExternalSSTFileTest.CompactDuringAddFileRandom|ExternalSSTFileTest.IngestFileWithGlobalSeqnoRandomized|MySQLStyleTransactionTest.TransactionStressTest
.PHONY: valgrind_check_0
valgrind_check_0:
@ -1017,7 +1017,7 @@ valgrind_check_0:
} \
| $(prioritize_long_running_tests) \
| grep -E '$(tests-regexp)' \
| grep -E -v '$(valgrind-blacklist-regexp)' \
| grep -E -v '$(valgrind-exclude-regexp)' \
| build_tools/gnu_parallel -j$(J) --plain --joblog=LOG $$eta --gnu \
'(if [[ "{}" == "./"* ]] ; then $(DRIVER) {}; else {}; fi) ' \
'>& t/valgrind_log-{/}'
@ -1059,7 +1059,7 @@ ifndef ASSERT_STATUS_CHECKED # not yet working with these tests
sh tools/rocksdb_dump_test.sh
endif
endif
endif
endif
ifndef SKIP_FORMAT_BUCK_CHECKS
$(MAKE) check-format
$(MAKE) check-buck-targets

@ -303,14 +303,14 @@ TEST_F(DBMemTableTest, InsertWithHint) {
ASSERT_EQ(hint_bar, rep->last_hint_in());
ASSERT_EQ(hint_bar, rep->last_hint_out());
ASSERT_EQ(5, rep->num_insert_with_hint());
ASSERT_OK(Put("whitelisted", "vvv"));
ASSERT_OK(Put("NotInPrefixDomain", "vvv"));
ASSERT_EQ(5, rep->num_insert_with_hint());
ASSERT_EQ("foo_v1", Get("foo_k1"));
ASSERT_EQ("foo_v2", Get("foo_k2"));
ASSERT_EQ("foo_v3", Get("foo_k3"));
ASSERT_EQ("bar_v1", Get("bar_k1"));
ASSERT_EQ("bar_v2", Get("bar_k2"));
ASSERT_EQ("vvv", Get("whitelisted"));
ASSERT_EQ("vvv", Get("NotInPrefixDomain"));
}
TEST_F(DBMemTableTest, ColumnFamilyId) {

@ -161,7 +161,7 @@ DECLARE_bool(statistics);
DECLARE_bool(sync);
DECLARE_bool(use_fsync);
DECLARE_int32(kill_random_test);
DECLARE_string(kill_prefix_blacklist);
DECLARE_string(kill_exclude_prefixes);
DECLARE_bool(disable_wal);
DECLARE_uint64(recycle_log_file_num);
DECLARE_int64(target_file_size_base);

@ -414,10 +414,10 @@ static const bool FLAGS_kill_random_test_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_kill_random_test, &ValidateInt32Positive);
extern int rocksdb_kill_odds;
DEFINE_string(kill_prefix_blacklist, "",
DEFINE_string(kill_exclude_prefixes, "",
"If non-empty, kill points with prefix in the list given will be"
" skipped. Items are comma-separated.");
extern std::vector<std::string> rocksdb_kill_prefix_blacklist;
extern std::vector<std::string> rocksdb_kill_exclude_prefixes;
DEFINE_bool(disable_wal, false, "If true, do not write WAL for write.");

@ -1717,9 +1717,9 @@ void StressTest::PrintEnv() const {
fprintf(stdout, "Memtablerep : %s\n", memtablerep);
fprintf(stdout, "Test kill odd : %d\n", rocksdb_kill_odds);
if (!rocksdb_kill_prefix_blacklist.empty()) {
if (!rocksdb_kill_exclude_prefixes.empty()) {
fprintf(stdout, "Skipping kill points prefixes:\n");
for (auto& p : rocksdb_kill_prefix_blacklist) {
for (auto& p : rocksdb_kill_exclude_prefixes) {
fprintf(stdout, " %s\n", p.c_str());
}
}

@ -247,7 +247,7 @@ int db_stress_tool(int argc, char** argv) {
}
rocksdb_kill_odds = FLAGS_kill_random_test;
rocksdb_kill_prefix_blacklist = SplitString(FLAGS_kill_prefix_blacklist);
rocksdb_kill_exclude_prefixes = SplitString(FLAGS_kill_exclude_prefixes);
unsigned int levels = FLAGS_max_key_len;
std::vector<std::string> weights;

12
env/io_posix.cc vendored

@ -150,11 +150,11 @@ bool PosixPositionedWrite(int fd, const char* buf, size_t nbyte, off_t offset) {
#endif
bool IsSyncFileRangeSupported(int fd) {
// The approach taken in this function is to build a blacklist of cases where
// we know `sync_file_range` definitely will not work properly despite passing
// the compile-time check (`ROCKSDB_RANGESYNC_PRESENT`). If we are unsure, or
// if any of the checks fail in unexpected ways, we allow `sync_file_range` to
// be used. This way should minimize risk of impacting existing use cases.
// This function tracks and checks for cases where we know `sync_file_range`
// definitely will not work properly despite passing the compile-time check
// (`ROCKSDB_RANGESYNC_PRESENT`). If we are unsure, or if any of the checks
// fail in unexpected ways, we allow `sync_file_range` to be used. This way
// should minimize risk of impacting existing use cases.
struct statfs buf;
int ret = fstatfs(fd, &buf);
assert(ret == 0);
@ -176,7 +176,7 @@ bool IsSyncFileRangeSupported(int fd) {
// ("Function not implemented").
return false;
}
// None of the cases on the blacklist matched, so allow `sync_file_range` use.
// None of the known cases matched, so allow `sync_file_range` use.
return true;
}

@ -42,10 +42,10 @@ const char kSpecialChar = 'z';
typedef std::vector<std::pair<size_t, size_t>> OffsetGap;
void FillWithSpecialChar(char* start_ptr, size_t total_size,
const OffsetGap& blacklist,
const OffsetGap& excluded,
char special_char = kSpecialChar) {
size_t offset = 0;
for (auto& pair : blacklist) {
for (auto& pair : excluded) {
std::memset(start_ptr + offset, special_char, pair.first - offset);
offset = pair.first + pair.second;
}
@ -53,10 +53,10 @@ void FillWithSpecialChar(char* start_ptr, size_t total_size,
}
int NumUnsetBytes(char* start_ptr, size_t total_size,
const OffsetGap& blacklist) {
const OffsetGap& excluded) {
int total_unset_bytes_base = 0;
size_t offset = 0;
for (auto& pair : blacklist) {
for (auto& pair : excluded) {
for (char* ptr = start_ptr + offset; ptr < start_ptr + pair.first; ptr++) {
if (*ptr == kSpecialChar) {
total_unset_bytes_base++;
@ -72,11 +72,11 @@ int NumUnsetBytes(char* start_ptr, size_t total_size,
return total_unset_bytes_base;
}
// Return true iff two structs are the same except blacklist fields.
// Return true iff two structs are the same except excluded fields.
bool CompareBytes(char* start_ptr1, char* start_ptr2, size_t total_size,
const OffsetGap& blacklist) {
const OffsetGap& excluded) {
size_t offset = 0;
for (auto& pair : blacklist) {
for (auto& pair : excluded) {
for (; offset < pair.first; offset++) {
if (*(start_ptr1 + offset) != *(start_ptr2 + offset)) {
return false;
@ -99,11 +99,11 @@ bool CompareBytes(char* start_ptr1, char* start_ptr2, size_t total_size,
// GetBlockBasedTableOptionsFromString() and add the option to the input string
// passed to the GetBlockBasedTableOptionsFromString() in this test.
// If it is a complicated type, you also need to add the field to
// kBbtoBlacklist, and maybe add customized verification for it.
// kBbtoExcluded, and maybe add customized verification for it.
TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
// Items in the form of <offset, size>. Need to be in ascending order
// and not overlapping. Need to updated if new pointer-option is added.
const OffsetGap kBbtoBlacklist = {
const OffsetGap kBbtoExcluded = {
{offsetof(struct BlockBasedTableOptions, flush_block_policy_factory),
sizeof(std::shared_ptr<FlushBlockPolicyFactory>)},
{offsetof(struct BlockBasedTableOptions, block_cache),
@ -128,20 +128,20 @@ TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
// copy a well constructed struct to this memory and see how many special
// bytes left.
BlockBasedTableOptions* bbto = new (bbto_ptr) BlockBasedTableOptions();
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoExcluded);
// It based on the behavior of compiler that padding bytes are not changed
// when copying the struct. It's prone to failure when compiler behavior
// changes. We verify there is unset bytes to detect the case.
*bbto = BlockBasedTableOptions();
int unset_bytes_base =
NumUnsetBytes(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
NumUnsetBytes(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoExcluded);
ASSERT_GT(unset_bytes_base, 0);
bbto->~BlockBasedTableOptions();
// Construct the base option passed into
// GetBlockBasedTableOptionsFromString().
bbto = new (bbto_ptr) BlockBasedTableOptions();
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoExcluded);
// This option is not setable:
bbto->use_delta_encoding = true;
@ -149,7 +149,7 @@ TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
BlockBasedTableOptions* new_bbto =
new (new_bbto_ptr) BlockBasedTableOptions();
FillWithSpecialChar(new_bbto_ptr, sizeof(BlockBasedTableOptions),
kBbtoBlacklist);
kBbtoExcluded);
// Need to update the option string if a new option is added.
ASSERT_OK(GetBlockBasedTableOptionsFromString(
@ -178,7 +178,7 @@ TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
ASSERT_EQ(unset_bytes_base,
NumUnsetBytes(new_bbto_ptr, sizeof(BlockBasedTableOptions),
kBbtoBlacklist));
kBbtoExcluded));
ASSERT_TRUE(new_bbto->block_cache.get() != nullptr);
ASSERT_TRUE(new_bbto->block_cache_compressed.get() != nullptr);
@ -198,9 +198,9 @@ TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
// GetDBOptionsFromString() and add the option to the input string passed to
// DBOptionsFromString()in this test.
// If it is a complicated type, you also need to add the field to
// kDBOptionsBlacklist, and maybe add customized verification for it.
// kDBOptionsExcluded, and maybe add customized verification for it.
TEST_F(OptionsSettableTest, DBOptionsAllFieldsSettable) {
const OffsetGap kDBOptionsBlacklist = {
const OffsetGap kDBOptionsExcluded = {
{offsetof(struct DBOptions, env), sizeof(Env*)},
{offsetof(struct DBOptions, rate_limiter),
sizeof(std::shared_ptr<RateLimiter>)},
@ -228,22 +228,22 @@ TEST_F(OptionsSettableTest, DBOptionsAllFieldsSettable) {
// copy a well constructed struct to this memory and see how many special
// bytes left.
DBOptions* options = new (options_ptr) DBOptions();
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsExcluded);
// It based on the behavior of compiler that padding bytes are not changed
// when copying the struct. It's prone to failure when compiler behavior
// changes. We verify there is unset bytes to detect the case.
*options = DBOptions();
int unset_bytes_base =
NumUnsetBytes(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
NumUnsetBytes(options_ptr, sizeof(DBOptions), kDBOptionsExcluded);
ASSERT_GT(unset_bytes_base, 0);
options->~DBOptions();
options = new (options_ptr) DBOptions();
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsExcluded);
char* new_options_ptr = new char[sizeof(DBOptions)];
DBOptions* new_options = new (new_options_ptr) DBOptions();
FillWithSpecialChar(new_options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
FillWithSpecialChar(new_options_ptr, sizeof(DBOptions), kDBOptionsExcluded);
// Need to update the option string if a new option is added.
ASSERT_OK(
@ -327,7 +327,7 @@ TEST_F(OptionsSettableTest, DBOptionsAllFieldsSettable) {
new_options));
ASSERT_EQ(unset_bytes_base, NumUnsetBytes(new_options_ptr, sizeof(DBOptions),
kDBOptionsBlacklist));
kDBOptionsExcluded));
options->~DBOptions();
new_options->~DBOptions();
@ -349,12 +349,12 @@ inline int offset_of(T1 T2::*member) {
// GetColumnFamilyOptionsFromString() and add the option to the input
// string passed to GetColumnFamilyOptionsFromString()in this test.
// If it is a complicated type, you also need to add the field to
// kColumnFamilyOptionsBlacklist, and maybe add customized verification
// kColumnFamilyOptionsExcluded, and maybe add customized verification
// for it.
TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
// options in the blacklist need to appear in the same order as in
// options in the excluded set need to appear in the same order as in
// ColumnFamilyOptions.
const OffsetGap kColumnFamilyOptionsBlacklist = {
const OffsetGap kColumnFamilyOptionsExcluded = {
{offset_of(&ColumnFamilyOptions::inplace_callback),
sizeof(UpdateStatus(*)(char*, uint32_t*, Slice, std::string*))},
{offset_of(
@ -393,7 +393,7 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
// bytes left.
ColumnFamilyOptions* options = new (options_ptr) ColumnFamilyOptions();
FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
kColumnFamilyOptionsExcluded);
// It based on the behavior of compiler that padding bytes are not changed
// when copying the struct. It's prone to failure when compiler behavior
@ -405,13 +405,13 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
options->max_mem_compaction_level = 0;
int unset_bytes_base = NumUnsetBytes(options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
kColumnFamilyOptionsExcluded);
ASSERT_GT(unset_bytes_base, 0);
options->~ColumnFamilyOptions();
options = new (options_ptr) ColumnFamilyOptions();
FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
kColumnFamilyOptionsExcluded);
// Following options are not settable through
// GetColumnFamilyOptionsFromString():
@ -427,7 +427,7 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
ColumnFamilyOptions* new_options =
new (new_options_ptr) ColumnFamilyOptions();
FillWithSpecialChar(new_options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
kColumnFamilyOptionsExcluded);
// Need to update the option string if a new option is added.
ASSERT_OK(GetColumnFamilyOptionsFromString(
@ -490,7 +490,7 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
ASSERT_EQ(unset_bytes_base,
NumUnsetBytes(new_options_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist));
kColumnFamilyOptionsExcluded));
ColumnFamilyOptions rnd_filled_options = *new_options;
@ -502,7 +502,7 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
// Test copying to mutabable and immutable options and copy back the mutable
// part.
const OffsetGap kMutableCFOptionsBlacklist = {
const OffsetGap kMutableCFOptionsExcluded = {
{offset_of(&MutableCFOptions::prefix_extractor),
sizeof(std::shared_ptr<const SliceTransform>)},
{offset_of(&MutableCFOptions::max_bytes_for_level_multiplier_additional),
@ -517,16 +517,16 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
const char kMySpecialChar = 'x';
char* mcfo1_ptr = new char[sizeof(MutableCFOptions)];
FillWithSpecialChar(mcfo1_ptr, sizeof(MutableCFOptions),
kMutableCFOptionsBlacklist, kMySpecialChar);
kMutableCFOptionsExcluded, kMySpecialChar);
char* mcfo2_ptr = new char[sizeof(MutableCFOptions)];
FillWithSpecialChar(mcfo2_ptr, sizeof(MutableCFOptions),
kMutableCFOptionsBlacklist, kMySpecialChar);
kMutableCFOptionsExcluded, kMySpecialChar);
// A clean column family options is constructed after filling the same special
// char as the initial one. So that the padding bytes are the same.
char* cfo_clean_ptr = new char[sizeof(ColumnFamilyOptions)];
FillWithSpecialChar(cfo_clean_ptr, sizeof(ColumnFamilyOptions),
kColumnFamilyOptionsBlacklist);
kColumnFamilyOptionsExcluded);
rnd_filled_options.num_levels = 66;
ColumnFamilyOptions* cfo_clean = new (cfo_clean_ptr) ColumnFamilyOptions();
@ -536,7 +536,7 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
MutableCFOptions* mcfo2 = new (mcfo2_ptr) MutableCFOptions(cfo_back);
ASSERT_TRUE(CompareBytes(mcfo1_ptr, mcfo2_ptr, sizeof(MutableCFOptions),
kMutableCFOptionsBlacklist));
kMutableCFOptionsExcluded));
cfo_clean->~ColumnFamilyOptions();
mcfo1->~MutableCFOptions();

@ -7,7 +7,7 @@
#include "test_util/sync_point_impl.h"
int rocksdb_kill_odds = 0;
std::vector<std::string> rocksdb_kill_prefix_blacklist;
std::vector<std::string> rocksdb_kill_exclude_prefixes;
#ifndef NDEBUG
namespace ROCKSDB_NAMESPACE {

@ -17,7 +17,7 @@
// If non-zero, kill at various points in source code with probability 1/this
extern int rocksdb_kill_odds;
// If kill point has a prefix on this list, will skip killing.
extern std::vector<std::string> rocksdb_kill_prefix_blacklist;
extern std::vector<std::string> rocksdb_kill_exclude_prefixes;
#ifdef NDEBUG
// empty in release build

@ -10,7 +10,7 @@ namespace ROCKSDB_NAMESPACE {
void TestKillRandom(std::string kill_point, int odds,
const std::string& srcfile, int srcline) {
for (auto& p : rocksdb_kill_prefix_blacklist) {
for (auto& p : rocksdb_kill_exclude_prefixes) {
if (kill_point.substr(0, p.length()) == p) {
return;
}

@ -468,7 +468,7 @@ def whitebox_crash_main(args, unknown_args):
my_kill_odd = kill_random_test // 10 + 1
additional_opts.update({
"kill_random_test": my_kill_odd,
"kill_prefix_blacklist": "WritableFileWriter::Append,"
"kill_exclude_prefixes": "WritableFileWriter::Append,"
+ "WritableFileWriter::WriteBuffered",
})
elif kill_mode == 2:
@ -476,7 +476,7 @@ def whitebox_crash_main(args, unknown_args):
# is too small.
additional_opts.update({
"kill_random_test": (kill_random_test // 5000 + 1),
"kill_prefix_blacklist": "WritableFileWriter::Append,"
"kill_exclude_prefixes": "WritableFileWriter::Append,"
"WritableFileWriter::WriteBuffered,"
"PosixMmapFile::Allocate,WritableFileWriter::Flush",
})

Loading…
Cancel
Save