clang-format for db/compaction (#10882)

Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/10882

Reviewed By: riversand963

Differential Revision: D40724867

Pulled By: jay-zhuang

fbshipit-source-id: 7f387724f8cd07d8d2b90566a515a4e9078d21f1
main
Jay Zhuang 2 years ago committed by Facebook GitHub Bot
parent a1a1dc6659
commit b36ec37a4b
  1. 16
      db/compaction/compaction_iterator.cc
  2. 3
      db/compaction/compaction_iterator_test.cc
  3. 15
      db/compaction/compaction_job.cc
  4. 4
      db/compaction/compaction_job.h
  5. 217
      db/compaction/compaction_job_stats_test.cc
  6. 3
      db/compaction/compaction_job_test.cc
  7. 3
      db/compaction/compaction_picker_universal.cc

@ -825,8 +825,8 @@ void CompactionIterator::NextFromInput() {
cmp_with_history_ts_low_ < 0)) &&
bottommost_level_) {
// Handle the case where we have a delete key at the bottom most level
// We can skip outputting the key iff there are no subsequent puts for this
// key
// We can skip outputting the key iff there are no subsequent puts for
// this key
assert(!compaction_ || compaction_->KeyNotExistsBeyondOutputLevel(
ikey_.user_key, &level_ptrs_));
ParsedInternalKey next_ikey;
@ -853,8 +853,8 @@ void CompactionIterator::NextFromInput() {
DefinitelyNotInSnapshot(next_ikey.sequence, prev_snapshot))) {
AdvanceInputIter();
}
// If you find you still need to output a row with this key, we need to output the
// delete too
// If you find you still need to output a row with this key, we need to
// output the delete too
if (input_.Valid() &&
(ParseInternalKey(input_.key(), &next_ikey, allow_data_in_errors_)
.ok()) &&
@ -1212,8 +1212,8 @@ inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot(
ROCKS_LOG_FATAL(info_log_,
"No snapshot left in findEarliestVisibleSnapshot");
}
auto snapshots_iter = std::lower_bound(
snapshots_->begin(), snapshots_->end(), in);
auto snapshots_iter =
std::lower_bound(snapshots_->begin(), snapshots_->end(), in);
assert(prev_snapshot != nullptr);
if (snapshots_iter == snapshots_->begin()) {
*prev_snapshot = 0;
@ -1228,8 +1228,8 @@ inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot(
}
}
if (snapshot_checker_ == nullptr) {
return snapshots_iter != snapshots_->end()
? *snapshots_iter : kMaxSequenceNumber;
return snapshots_iter != snapshots_->end() ? *snapshots_iter
: kMaxSequenceNumber;
}
bool has_released_snapshot = !released_snapshots_.empty();
for (; snapshots_iter != snapshots_->end(); ++snapshots_iter) {

@ -203,7 +203,8 @@ class TestSnapshotChecker : public SnapshotChecker {
public:
explicit TestSnapshotChecker(
SequenceNumber last_committed_sequence,
const std::unordered_map<SequenceNumber, SequenceNumber>& snapshots = {{}})
const std::unordered_map<SequenceNumber, SequenceNumber>& snapshots =
{{}})
: last_committed_sequence_(last_committed_sequence),
snapshots_(snapshots) {}

@ -714,11 +714,12 @@ Status CompactionJob::Run() {
break;
}
// Verify that the table is usable
// We set for_compaction to false and don't OptimizeForCompactionTableRead
// here because this is a special case after we finish the table building
// No matter whether use_direct_io_for_flush_and_compaction is true,
// we will regard this verification as user reads since the goal is
// to cache it here for further user reads
// We set for_compaction to false and don't
// OptimizeForCompactionTableRead here because this is a special case
// after we finish the table building No matter whether
// use_direct_io_for_flush_and_compaction is true, we will regard this
// verification as user reads since the goal is to cache it here for
// further user reads
ReadOptions read_options;
InternalIterator* iter = cfd->table_cache()->NewIterator(
read_options, file_options_, cfd->internal_comparator(),
@ -764,8 +765,8 @@ Status CompactionJob::Run() {
}
};
for (size_t i = 1; i < compact_->sub_compact_states.size(); i++) {
thread_pool.emplace_back(verify_table,
std::ref(compact_->sub_compact_states[i].status));
thread_pool.emplace_back(
verify_table, std::ref(compact_->sub_compact_states[i].status));
}
verify_table(compact_->sub_compact_states[0].status);
for (auto& thread : thread_pool) {

@ -265,8 +265,8 @@ class CompactionJob {
void RecordDroppedKeys(const CompactionIterationStats& c_iter_stats,
CompactionJobStats* compaction_job_stats = nullptr);
void UpdateCompactionInputStatsHelper(
int* num_files, uint64_t* bytes_read, int input_level);
void UpdateCompactionInputStatsHelper(int* num_files, uint64_t* bytes_read,
int input_level);
void NotifyOnSubcompactionBegin(SubcompactionState* sub_compact);

@ -155,8 +155,7 @@ class CompactionJobStatsTest : public testing::Test,
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
}
Status TryReopenWithColumnFamilies(
const std::vector<std::string>& cfs,
Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const std::vector<Options>& options) {
Close();
EXPECT_EQ(cfs.size(), options.size());
@ -175,9 +174,7 @@ class CompactionJobStatsTest : public testing::Test,
return TryReopenWithColumnFamilies(cfs, v_opts);
}
void Reopen(const Options& options) {
ASSERT_OK(TryReopen(options));
}
void Reopen(const Options& options) { ASSERT_OK(TryReopen(options)); }
void Close() {
for (auto h : handles_) {
@ -226,9 +223,7 @@ class CompactionJobStatsTest : public testing::Test,
return db_->Put(wo, handles_[cf], k, v);
}
Status Delete(const std::string& k) {
return db_->Delete(WriteOptions(), k);
}
Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
Status Delete(int cf, const std::string& k) {
return db_->Delete(WriteOptions(), handles_[cf], k);
@ -338,16 +333,16 @@ class CompactionJobStatsTest : public testing::Test,
}
}
static void SetDeletionCompactionStats(
CompactionJobStats *stats, uint64_t input_deletions,
uint64_t expired_deletions, uint64_t records_replaced) {
static void SetDeletionCompactionStats(CompactionJobStats* stats,
uint64_t input_deletions,
uint64_t expired_deletions,
uint64_t records_replaced) {
stats->num_input_deletion_records = input_deletions;
stats->num_expired_deletion_records = expired_deletions;
stats->num_records_replaced = records_replaced;
}
void MakeTableWithKeyValues(
Random* rnd, uint64_t smallest, uint64_t largest,
void MakeTableWithKeyValues(Random* rnd, uint64_t smallest, uint64_t largest,
int key_size, int value_size, uint64_t interval,
double ratio, int cf = 0) {
for (auto key = smallest; key < largest; key += interval) {
@ -361,9 +356,9 @@ class CompactionJobStatsTest : public testing::Test,
// rounds of keys are inserted into the database, as per the behavior
// of the DeletionStatsTest.
void SelectivelyDeleteKeys(uint64_t smallest, uint64_t largest,
uint64_t interval, int deletion_interval, int key_size,
uint64_t cutoff_key_num, CompactionJobStats* stats, int cf = 0) {
uint64_t interval, int deletion_interval,
int key_size, uint64_t cutoff_key_num,
CompactionJobStats* stats, int cf = 0) {
// interval needs to be >= 2 so that deletion entries can be inserted
// that are intended to not result in an actual key deletion by using
// an offset of 1 from another existing key
@ -399,8 +394,7 @@ class CompactionJobStatsTest : public testing::Test,
num_expired++;
ASSERT_OK(Flush(cf));
SetDeletionCompactionStats(stats, deletions_made, num_expired,
num_deleted);
SetDeletionCompactionStats(stats, deletions_made, num_expired, num_deleted);
}
};
@ -444,21 +438,16 @@ class CompactionJobStatsChecker : public EventListener {
// time
ASSERT_GT(current_stats.elapsed_micros, 0U);
ASSERT_EQ(current_stats.num_input_records,
stats.num_input_records);
ASSERT_EQ(current_stats.num_input_files,
stats.num_input_files);
ASSERT_EQ(current_stats.num_input_records, stats.num_input_records);
ASSERT_EQ(current_stats.num_input_files, stats.num_input_files);
ASSERT_EQ(current_stats.num_input_files_at_output_level,
stats.num_input_files_at_output_level);
ASSERT_EQ(current_stats.num_output_records,
stats.num_output_records);
ASSERT_EQ(current_stats.num_output_files,
stats.num_output_files);
ASSERT_EQ(current_stats.num_output_records, stats.num_output_records);
ASSERT_EQ(current_stats.num_output_files, stats.num_output_files);
ASSERT_EQ(current_stats.is_full_compaction, stats.is_full_compaction);
ASSERT_EQ(current_stats.is_manual_compaction,
stats.is_manual_compaction);
ASSERT_EQ(current_stats.is_manual_compaction, stats.is_manual_compaction);
// file size
double kFileSizeBias = compression_enabled_ ? 0.20 : 0.10;
@ -475,17 +464,13 @@ class CompactionJobStatsChecker : public EventListener {
ASSERT_EQ(current_stats.total_input_raw_value_bytes,
stats.total_input_raw_value_bytes);
ASSERT_EQ(current_stats.num_records_replaced,
stats.num_records_replaced);
ASSERT_EQ(current_stats.num_records_replaced, stats.num_records_replaced);
ASSERT_EQ(current_stats.num_corrupt_keys,
stats.num_corrupt_keys);
ASSERT_EQ(current_stats.num_corrupt_keys, stats.num_corrupt_keys);
ASSERT_EQ(
std::string(current_stats.smallest_output_key_prefix),
ASSERT_EQ(std::string(current_stats.smallest_output_key_prefix),
std::string(stats.smallest_output_key_prefix));
ASSERT_EQ(
std::string(current_stats.largest_output_key_prefix),
ASSERT_EQ(std::string(current_stats.largest_output_key_prefix),
std::string(stats.largest_output_key_prefix));
}
@ -497,9 +482,7 @@ class CompactionJobStatsChecker : public EventListener {
expected_stats_.push(stats);
}
void EnableCompression(bool flag) {
compression_enabled_ = flag;
}
void EnableCompression(bool flag) { compression_enabled_ = flag; }
bool verify_next_comp_io_stats() const { return verify_next_comp_io_stats_; }
@ -517,45 +500,37 @@ class CompactionJobDeletionStatsChecker : public CompactionJobStatsChecker {
// Verifies whether two CompactionJobStats match.
void Verify(const CompactionJobStats& current_stats,
const CompactionJobStats& stats) override {
ASSERT_EQ(
current_stats.num_input_deletion_records,
ASSERT_EQ(current_stats.num_input_deletion_records,
stats.num_input_deletion_records);
ASSERT_EQ(
current_stats.num_expired_deletion_records,
ASSERT_EQ(current_stats.num_expired_deletion_records,
stats.num_expired_deletion_records);
ASSERT_EQ(
current_stats.num_records_replaced,
stats.num_records_replaced);
ASSERT_EQ(current_stats.num_records_replaced, stats.num_records_replaced);
ASSERT_EQ(current_stats.num_corrupt_keys,
stats.num_corrupt_keys);
ASSERT_EQ(current_stats.num_corrupt_keys, stats.num_corrupt_keys);
}
};
namespace {
uint64_t EstimatedFileSize(
uint64_t num_records, size_t key_size, size_t value_size,
double compression_ratio = 1.0,
uint64_t EstimatedFileSize(uint64_t num_records, size_t key_size,
size_t value_size, double compression_ratio = 1.0,
size_t block_size = 4096,
int bloom_bits_per_key = 10) {
const size_t kPerKeyOverhead = 8;
const size_t kFooterSize = 512;
uint64_t data_size =
static_cast<uint64_t>(
num_records * (key_size + value_size * compression_ratio +
kPerKeyOverhead));
uint64_t data_size = static_cast<uint64_t>(
num_records *
(key_size + value_size * compression_ratio + kPerKeyOverhead));
return data_size + kFooterSize
+ num_records * bloom_bits_per_key / 8 // filter block
return data_size + kFooterSize +
num_records * bloom_bits_per_key / 8 // filter block
+ data_size * (key_size + 8) / block_size; // index block
}
namespace {
void CopyPrefix(
const Slice& src, size_t prefix_length, std::string* dst) {
void CopyPrefix(const Slice& src, size_t prefix_length, std::string* dst) {
assert(prefix_length > 0);
size_t length = src.size() > prefix_length ? prefix_length : src.size();
dst->assign(src.data(), length);
@ -581,28 +556,24 @@ CompactionJobStats NewManualCompactionJobStats(
stats.num_output_files = num_output_files;
stats.total_input_bytes =
EstimatedFileSize(
num_input_records / num_input_files,
key_size, value_size, compression_ratio) * num_input_files;
EstimatedFileSize(num_input_records / num_input_files, key_size,
value_size, compression_ratio) *
num_input_files;
stats.total_output_bytes =
EstimatedFileSize(
num_output_records / num_output_files,
key_size, value_size, compression_ratio) * num_output_files;
stats.total_input_raw_key_bytes =
num_input_records * (key_size + 8);
stats.total_input_raw_value_bytes =
num_input_records * value_size;
EstimatedFileSize(num_output_records / num_output_files, key_size,
value_size, compression_ratio) *
num_output_files;
stats.total_input_raw_key_bytes = num_input_records * (key_size + 8);
stats.total_input_raw_value_bytes = num_input_records * value_size;
stats.is_full_compaction = is_full;
stats.is_manual_compaction = is_manual;
stats.num_records_replaced = num_records_replaced;
CopyPrefix(smallest_key,
CompactionJobStats::kMaxPrefixLength,
CopyPrefix(smallest_key, CompactionJobStats::kMaxPrefixLength,
&stats.smallest_output_key_prefix);
CopyPrefix(largest_key,
CompactionJobStats::kMaxPrefixLength,
CopyPrefix(largest_key, CompactionJobStats::kMaxPrefixLength,
&stats.largest_output_key_prefix);
return stats;
@ -662,11 +633,9 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
// 1st Phase: generate "num_L0_files" L0 files.
int num_L0_files = 0;
for (uint64_t start_key = key_base;
start_key <= key_base * kTestScale;
for (uint64_t start_key = key_base; start_key <= key_base * kTestScale;
start_key += key_base) {
MakeTableWithKeyValues(
&rnd, start_key, start_key + key_base - 1,
MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1,
kKeySize, kValueSize, key_interval,
compression_ratio, 1);
snprintf(buf, kBufSize, "%d", ++num_L0_files);
@ -684,13 +653,9 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
start_key += key_base, count++) {
smallest_key = Key(start_key, 10);
largest_key = Key(start_key + key_base - key_interval, 10);
stats_checker->AddExpectedStats(
NewManualCompactionJobStats(
smallest_key, largest_key,
1, 0, num_keys_per_L0_file,
kKeySize, kValueSize,
1, num_keys_per_L0_file,
compression_ratio, 0));
stats_checker->AddExpectedStats(NewManualCompactionJobStats(
smallest_key, largest_key, 1, 0, num_keys_per_L0_file, kKeySize,
kValueSize, 1, num_keys_per_L0_file, compression_ratio, 0));
ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
TEST_Compact(0, 1, smallest_key, largest_key);
snprintf(buf, kBufSize, "%d,%d", num_L0_files - count, count);
@ -701,14 +666,10 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
int num_remaining_L0 = num_L0_files - L0_compaction_count;
smallest_key = Key(key_base * (L0_compaction_count + 1), 10);
largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10);
stats_checker->AddExpectedStats(
NewManualCompactionJobStats(
smallest_key, largest_key,
num_remaining_L0,
0, num_keys_per_L0_file * num_remaining_L0,
kKeySize, kValueSize,
1, num_keys_per_L0_file * num_remaining_L0,
compression_ratio, 0));
stats_checker->AddExpectedStats(NewManualCompactionJobStats(
smallest_key, largest_key, num_remaining_L0, 0,
num_keys_per_L0_file * num_remaining_L0, kKeySize, kValueSize, 1,
num_keys_per_L0_file * num_remaining_L0, compression_ratio, 0));
ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
TEST_Compact(0, 1, smallest_key, largest_key);
@ -719,13 +680,11 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
// 3rd Phase: generate sparse L0 files (wider key-range, same num of keys)
int sparseness = 2;
for (uint64_t start_key = key_base;
start_key <= key_base * kTestScale;
for (uint64_t start_key = key_base; start_key <= key_base * kTestScale;
start_key += key_base * sparseness) {
MakeTableWithKeyValues(
&rnd, start_key, start_key + key_base * sparseness - 1,
kKeySize, kValueSize,
key_base * sparseness / num_keys_per_L0_file,
&rnd, start_key, start_key + key_base * sparseness - 1, kKeySize,
kValueSize, key_base * sparseness / num_keys_per_L0_file,
compression_ratio, 1);
snprintf(buf, kBufSize, "%d,%d", ++num_L0_files, num_L1_files);
ASSERT_EQ(std::string(buf), FilesPerLevel(1));
@ -737,21 +696,15 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
// output files without coordinating to see if the output could fit into
// a smaller number of files like it does when it runs sequentially
int num_output_files = options.max_subcompactions > 1 ? 2 : 1;
for (uint64_t start_key = key_base;
num_L0_files > 1;
for (uint64_t start_key = key_base; num_L0_files > 1;
start_key += key_base * sparseness) {
smallest_key = Key(start_key, 10);
largest_key =
Key(start_key + key_base * sparseness - key_interval, 10);
stats_checker->AddExpectedStats(
NewManualCompactionJobStats(
smallest_key, largest_key,
3, 2, num_keys_per_L0_file * 3,
kKeySize, kValueSize,
num_output_files,
largest_key = Key(start_key + key_base * sparseness - key_interval, 10);
stats_checker->AddExpectedStats(NewManualCompactionJobStats(
smallest_key, largest_key, 3, 2, num_keys_per_L0_file * 3, kKeySize,
kValueSize, num_output_files,
num_keys_per_L0_file * 2, // 1/3 of the data will be updated.
compression_ratio,
num_keys_per_L0_file));
compression_ratio, num_keys_per_L0_file));
ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
Compact(1, smallest_key, largest_key);
if (options.max_subcompactions == 1) {
@ -766,14 +719,10 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
// In the first sub-compaction, we expect L0 compaction.
smallest_key = Key(key_base, 10);
largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10);
stats_checker->AddExpectedStats(
NewManualCompactionJobStats(
Key(key_base * (kTestScale + 1 - sparseness), 10), largest_key,
2, 1, num_keys_per_L0_file * 3,
kKeySize, kValueSize,
1, num_keys_per_L0_file * 2,
compression_ratio,
num_keys_per_L0_file));
stats_checker->AddExpectedStats(NewManualCompactionJobStats(
Key(key_base * (kTestScale + 1 - sparseness), 10), largest_key, 2, 1,
num_keys_per_L0_file * 3, kKeySize, kValueSize, 1,
num_keys_per_L0_file * 2, compression_ratio, num_keys_per_L0_file));
ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
Compact(1, smallest_key, largest_key);
@ -881,13 +830,10 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) {
// Stage 1: Generate several L0 files and then send them to L2 by
// using CompactRangeOptions and CompactRange(). These files will
// have a strict subset of the keys from the full key-range
for (uint64_t start_key = key_base;
start_key <= key_base * kTestScale / 2;
for (uint64_t start_key = key_base; start_key <= key_base * kTestScale / 2;
start_key += key_base) {
MakeTableWithKeyValues(
&rnd, start_key, start_key + key_base - 1,
kKeySize, kValueSize, key_interval,
compression_ratio, 1);
MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1, kKeySize,
kValueSize, key_interval, compression_ratio, 1);
}
CompactRangeOptions cr_options;
@ -897,13 +843,10 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) {
ASSERT_GT(NumTableFilesAtLevel(2, 1), 0);
// Stage 2: Generate files including keys from the entire key range
for (uint64_t start_key = key_base;
start_key <= key_base * kTestScale;
for (uint64_t start_key = key_base; start_key <= key_base * kTestScale;
start_key += key_base) {
MakeTableWithKeyValues(
&rnd, start_key, start_key + key_base - 1,
kKeySize, kValueSize, key_interval,
compression_ratio, 1);
MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1, kKeySize,
kValueSize, key_interval, compression_ratio, 1);
}
// Send these L0 files to L1
@ -919,8 +862,8 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) {
// there are files with the same key range in L0, L1, and L2
int deletion_interval = 3;
CompactionJobStats first_compaction_stats;
SelectivelyDeleteKeys(key_base, largest_key_num,
key_interval, deletion_interval, kKeySize, cutoff_key_num,
SelectivelyDeleteKeys(key_base, largest_key_num, key_interval,
deletion_interval, kKeySize, cutoff_key_num,
&first_compaction_stats, 1);
stats_checker->AddExpectedStats(first_compaction_stats);
@ -932,8 +875,7 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) {
namespace {
int GetUniversalCompactionInputUnits(uint32_t num_flushes) {
uint32_t compaction_input_units;
for (compaction_input_units = 1;
num_flushes >= compaction_input_units;
for (compaction_input_units = 1; num_flushes >= compaction_input_units;
compaction_input_units *= 2) {
if ((num_flushes & compaction_input_units) != 0) {
return compaction_input_units > 1 ? compaction_input_units : 0;
@ -998,13 +940,10 @@ TEST_P(CompactionJobStatsTest, UniversalCompactionTest) {
}
ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 3U);
for (uint64_t start_key = key_base;
start_key <= key_base * kTestScale;
for (uint64_t start_key = key_base; start_key <= key_base * kTestScale;
start_key += key_base) {
MakeTableWithKeyValues(
&rnd, start_key, start_key + key_base - 1,
kKeySize, kValueSize, key_interval,
compression_ratio, 1);
MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1, kKeySize,
kValueSize, key_interval, compression_ratio, 1);
ASSERT_OK(static_cast_with_check<DBImpl>(db_)->TEST_WaitForCompact());
}
ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 0U);

@ -499,8 +499,7 @@ class CompactionJobTestBase : public testing::Test {
// This is how the key will look like once it's written in bottommost
// file
InternalKey bottommost_internal_key(
key, 0, kTypeValue);
InternalKey bottommost_internal_key(key, 0, kTypeValue);
if (corrupt_id(k)) {
test::CorruptKeyType(&internal_key);

@ -308,7 +308,8 @@ void UniversalCompactionBuilder::SortedRun::Dump(char* out_buf,
if (file->fd.GetPathId() == 0 || !print_path) {
snprintf(out_buf, out_buf_size, "file %" PRIu64, file->fd.GetNumber());
} else {
snprintf(out_buf, out_buf_size, "file %" PRIu64
snprintf(out_buf, out_buf_size,
"file %" PRIu64
"(path "
"%" PRIu32 ")",
file->fd.GetNumber(), file->fd.GetPathId());

Loading…
Cancel
Save