From 65855dd8d4d8756eec6c43b6faa3abbfb6ea8a86 Mon Sep 17 00:00:00 2001 From: Mark Callaghan Date: Fri, 26 Oct 2012 13:00:42 -0700 Subject: [PATCH] Normalize compaction stats by time in compaction Summary: I used server uptime to compute per-level IO throughput rates. I intended to use time spent doing compaction at that level. This fixes that. Task ID: # Blame Rev: Test Plan: run db_bench, look at results Revert Plan: Database Impact: Memcache Impact: Other Notes: EImportant: - begin *PUBLIC* platform impact section - Bugzilla: # - end platform impact - Reviewers: dhruba Reviewed By: dhruba Differential Revision: https://reviews.facebook.net/D6237 --- db/db_impl.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/db/db_impl.cc b/db/db_impl.cc index 0c77a26a3..8919e75fe 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1594,8 +1594,9 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) { stats_[level].bytes_readnp1 / 1048576.0, bytes_new / 1048576.0, amplify, - bytes_read / 1048576.0 / seconds_up, - stats_[level].bytes_written / 1048576.0 / seconds_up, + (bytes_read / 1048576.0) / (stats_[level].micros / 1000000.0), + (stats_[level].bytes_written / 1048576.0) / + (stats_[level].micros / 1000000.0), stats_[level].files_in_leveln, stats_[level].files_in_levelnp1, stats_[level].files_out_levelnp1,