fix readampbitmap tests

Summary:
fix test failure of ReadAmpBitmap and ReadAmpBitmapLiveInCacheAfterDBClose.
test ReadAmpBitmapLiveInCacheAfterDBClose individually and make check
Closes https://github.com/facebook/rocksdb/pull/2271

Differential Revision: D5038133

Pulled By: lightmark

fbshipit-source-id: 803cd6f45ccfdd14a9d9473c8af311033e164be8
main
Aaron Gao 7 years ago committed by Facebook Github Bot
parent be421b0b16
commit 492fc49a86
  1. 135
      db/db_test2.cc

@ -1728,82 +1728,93 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) {
// the blocks again regardless of them being already in the cache // the blocks again regardless of them being already in the cache
return; return;
} }
uint32_t bytes_per_bit[2] = {1, 16};
for (size_t k = 0; k < 2; k++) {
std::shared_ptr<Cache> lru_cache = NewLRUCache(1024 * 1024 * 1024);
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
std::shared_ptr<Cache> lru_cache = NewLRUCache(1024 * 1024 * 1024); Options options = CurrentOptions();
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics(); BlockBasedTableOptions bbto;
// Disable delta encoding to make it easier to calculate read amplification
bbto.use_delta_encoding = false;
// Huge block cache to make it easier to calculate read amplification
bbto.block_cache = lru_cache;
bbto.read_amp_bytes_per_bit = bytes_per_bit[k];
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
options.statistics = stats;
DestroyAndReopen(options);
Options options = CurrentOptions(); const int kNumEntries = 10000;
BlockBasedTableOptions bbto;
// Disable delta encoding to make it easier to calculate read amplification
bbto.use_delta_encoding = false;
// Huge block cache to make it easier to calculate read amplification
bbto.block_cache = lru_cache;
bbto.read_amp_bytes_per_bit = 16;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
options.statistics = stats;
DestroyAndReopen(options);
const int kNumEntries = 10000; Random rnd(301);
for (int i = 0; i < kNumEntries; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
}
ASSERT_OK(Flush());
Random rnd(301); Close();
for (int i = 0; i < kNumEntries; i++) { Reopen(options);
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
}
ASSERT_OK(Flush());
Close(); uint64_t total_useful_bytes = 0;
Reopen(options); std::set<int> read_keys;
std::string value;
// Iter1: Read half the DB, Read even keys
// Key(0), Key(2), Key(4), Key(6), Key(8), ...
for (int i = 0; i < kNumEntries; i += 2) {
std::string key = Key(i);
ASSERT_OK(db_->Get(ReadOptions(), key, &value));
uint64_t total_useful_bytes = 0; if (read_keys.find(i) == read_keys.end()) {
std::set<int> read_keys; auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
std::string value; total_useful_bytes +=
// Iter1: Read half the DB, Read even keys GetEncodedEntrySize(internal_key.size(), value.size());
// Key(0), Key(2), Key(4), Key(6), Key(8), ... read_keys.insert(i);
for (int i = 0; i < kNumEntries; i += 2) { }
std::string k = Key(i);
ASSERT_OK(db_->Get(ReadOptions(), k, &value));
if (read_keys.find(i) == read_keys.end()) {
auto ik = InternalKey(k, 0, ValueType::kTypeValue);
total_useful_bytes += GetEncodedEntrySize(ik.size(), value.size());
read_keys.insert(i);
} }
}
size_t total_useful_bytes_iter1 = size_t total_useful_bytes_iter1 =
options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES); options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
size_t total_loaded_bytes_iter1 = size_t total_loaded_bytes_iter1 =
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES); options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
Close(); Close();
std::shared_ptr<Statistics> new_statistics = rocksdb::CreateDBStatistics(); std::shared_ptr<Statistics> new_statistics = rocksdb::CreateDBStatistics();
// Destroy old statistics obj that the blocks in lru_cache are pointing to // Destroy old statistics obj that the blocks in lru_cache are pointing to
options.statistics.reset(); options.statistics.reset();
// Use the statistics object that we just created // Use the statistics object that we just created
options.statistics = new_statistics; options.statistics = new_statistics;
Reopen(options); Reopen(options);
// Iter2: Read half the DB, Read odd keys // Iter2: Read half the DB, Read odd keys
// Key(1), Key(3), Key(5), Key(7), Key(9), ... // Key(1), Key(3), Key(5), Key(7), Key(9), ...
for (int i = 1; i < kNumEntries; i += 2) { for (int i = 1; i < kNumEntries; i += 2) {
std::string k = Key(i); std::string key = Key(i);
ASSERT_OK(db_->Get(ReadOptions(), k, &value)); ASSERT_OK(db_->Get(ReadOptions(), key, &value));
if (read_keys.find(i) == read_keys.end()) { if (read_keys.find(i) == read_keys.end()) {
auto ik = InternalKey(k, 0, ValueType::kTypeValue); auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
total_useful_bytes += GetEncodedEntrySize(ik.size(), value.size()); total_useful_bytes +=
read_keys.insert(i); GetEncodedEntrySize(internal_key.size(), value.size());
read_keys.insert(i);
}
} }
}
size_t total_useful_bytes_iter2 = size_t total_useful_bytes_iter2 =
options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES); options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
size_t total_loaded_bytes_iter2 = size_t total_loaded_bytes_iter2 =
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES); options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
// We reached read_amp of 100% because we read all the keys in the DB
ASSERT_EQ(total_useful_bytes_iter1 + total_useful_bytes_iter2, // Read amp is on average 100% since we read all what we loaded in memory
total_loaded_bytes_iter1 + total_loaded_bytes_iter2); if (k == 0) {
ASSERT_EQ(total_useful_bytes_iter1 + total_useful_bytes_iter2,
total_loaded_bytes_iter1 + total_loaded_bytes_iter2);
} else {
ASSERT_NEAR((total_useful_bytes_iter1 + total_useful_bytes_iter2) * 1.0f /
(total_loaded_bytes_iter1 + total_loaded_bytes_iter2),
1, .01);
}
}
} }
#endif // !OS_SOLARIS #endif // !OS_SOLARIS

Loading…
Cancel
Save