Add a histogram stat for memtable flush

Summary:
Add a new histogram stat called rocksdb.db.flush.micros for memtable
flush
Closes https://github.com/facebook/rocksdb/pull/3269

Differential Revision: D6559496

Pulled By: anand1976

fbshipit-source-id: f5c771ba2568630458751795e8c37a493ff9b14d
main
Anand Ananthabhotla 7 years ago committed by Facebook Github Bot
parent 95583e1532
commit fccc12f386
  1. 1
      db/flush_job.cc
  2. 22
      db/flush_job_test.cc
  3. 3
      include/rocksdb/statistics.h
  4. 6
      java/rocksjni/portal.h

@ -364,6 +364,7 @@ Status FlushJob::WriteLevel0Table() {
InternalStats::CompactionStats stats(1); InternalStats::CompactionStats stats(1);
stats.micros = db_options_.env->NowMicros() - start_micros; stats.micros = db_options_.env->NowMicros() - start_micros;
stats.bytes_written = meta_.fd.GetFileSize(); stats.bytes_written = meta_.fd.GetFileSize();
MeasureTime(stats_, FLUSH_TIME, stats.micros);
cfd_->internal_stats()->AddCompactionStats(0 /* level */, stats); cfd_->internal_stats()->AddCompactionStats(0 /* level */, stats);
cfd_->internal_stats()->AddCFStats(InternalStats::BYTES_FLUSHED, cfd_->internal_stats()->AddCFStats(InternalStats::BYTES_FLUSHED,
meta_.fd.GetFileSize()); meta_.fd.GetFileSize());

@ -40,6 +40,7 @@ class FlushJobTest : public testing::Test {
EXPECT_OK(env_->CreateDirIfMissing(dbname_)); EXPECT_OK(env_->CreateDirIfMissing(dbname_));
db_options_.db_paths.emplace_back(dbname_, db_options_.db_paths.emplace_back(dbname_,
std::numeric_limits<uint64_t>::max()); std::numeric_limits<uint64_t>::max());
db_options_.statistics = rocksdb::CreateDBStatistics();
// TODO(icanadi) Remove this once we mock out VersionSet // TODO(icanadi) Remove this once we mock out VersionSet
NewDB(); NewDB();
std::vector<ColumnFamilyDescriptor> column_families; std::vector<ColumnFamilyDescriptor> column_families;
@ -138,16 +139,22 @@ TEST_F(FlushJobTest, NonEmpty) {
EventLogger event_logger(db_options_.info_log.get()); EventLogger event_logger(db_options_.info_log.get());
SnapshotChecker* snapshot_checker = nullptr; // not relavant SnapshotChecker* snapshot_checker = nullptr; // not relavant
FlushJob flush_job( FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, db_options_, *cfd->GetLatestMutableCFOptions(),
*cfd->GetLatestMutableCFOptions(), env_options_, versions_.get(), &mutex_, env_options_, versions_.get(), &mutex_, &shutting_down_,
&shutting_down_, {}, kMaxSequenceNumber, snapshot_checker, &job_context, {}, kMaxSequenceNumber, snapshot_checker, &job_context,
nullptr, nullptr, nullptr, kNoCompression, nullptr, &event_logger, true); nullptr, nullptr, nullptr, kNoCompression,
db_options_.statistics.get(), &event_logger, true);
HistogramData hist;
FileMetaData fd; FileMetaData fd;
mutex_.Lock(); mutex_.Lock();
flush_job.PickMemTable(); flush_job.PickMemTable();
ASSERT_OK(flush_job.Run(&fd)); ASSERT_OK(flush_job.Run(&fd));
mutex_.Unlock(); mutex_.Unlock();
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
ASSERT_GT(hist.average, 0.0);
ASSERT_EQ(ToString(0), fd.smallest.user_key().ToString()); ASSERT_EQ(ToString(0), fd.smallest.user_key().ToString());
ASSERT_EQ("9999a", ASSERT_EQ("9999a",
fd.largest.user_key().ToString()); // range tombstone end key fd.largest.user_key().ToString()); // range tombstone end key
@ -210,12 +217,15 @@ TEST_F(FlushJobTest, Snapshots) {
env_options_, versions_.get(), &mutex_, &shutting_down_, env_options_, versions_.get(), &mutex_, &shutting_down_,
snapshots, kMaxSequenceNumber, snapshot_checker, snapshots, kMaxSequenceNumber, snapshot_checker,
&job_context, nullptr, nullptr, nullptr, kNoCompression, &job_context, nullptr, nullptr, nullptr, kNoCompression,
nullptr, &event_logger, true); db_options_.statistics.get(), &event_logger, true);
mutex_.Lock(); mutex_.Lock();
flush_job.PickMemTable(); flush_job.PickMemTable();
ASSERT_OK(flush_job.Run()); ASSERT_OK(flush_job.Run());
mutex_.Unlock(); mutex_.Unlock();
mock_table_factory_->AssertSingleFile(inserted_keys); mock_table_factory_->AssertSingleFile(inserted_keys);
HistogramData hist;
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
ASSERT_GT(hist.average, 0.0);
job_context.Clean(); job_context.Clean();
} }

@ -510,6 +510,8 @@ enum Histograms : uint32_t {
BLOB_DB_COMPRESSION_MICROS, BLOB_DB_COMPRESSION_MICROS,
// BlobDB decompression time. // BlobDB decompression time.
BLOB_DB_DECOMPRESSION_MICROS, BLOB_DB_DECOMPRESSION_MICROS,
// Time spent flushing memtable to disk
FLUSH_TIME,
HISTOGRAM_ENUM_MAX, // TODO(ldemailly): enforce HistogramsNameMap match HISTOGRAM_ENUM_MAX, // TODO(ldemailly): enforce HistogramsNameMap match
}; };
@ -560,6 +562,7 @@ const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
{BLOB_DB_GC_MICROS, "rocksdb.blobdb.gc.micros"}, {BLOB_DB_GC_MICROS, "rocksdb.blobdb.gc.micros"},
{BLOB_DB_COMPRESSION_MICROS, "rocksdb.blobdb.compression.micros"}, {BLOB_DB_COMPRESSION_MICROS, "rocksdb.blobdb.compression.micros"},
{BLOB_DB_DECOMPRESSION_MICROS, "rocksdb.blobdb.decompression.micros"}, {BLOB_DB_DECOMPRESSION_MICROS, "rocksdb.blobdb.decompression.micros"},
{FLUSH_TIME, "rocksdb.db.flush.micros"},
}; };
struct HistogramData { struct HistogramData {

@ -2806,8 +2806,10 @@ class HistogramTypeJni {
return 0x1D; return 0x1D;
case rocksdb::Histograms::READ_NUM_MERGE_OPERANDS: case rocksdb::Histograms::READ_NUM_MERGE_OPERANDS:
return 0x1E; return 0x1E;
case rocksdb::Histograms::HISTOGRAM_ENUM_MAX: case rocksdb::Histograms::FLUSH_TIME:
return 0x1F; return 0x1F;
case rocksdb::Histograms::HISTOGRAM_ENUM_MAX:
return 0x20;
default: default:
// undefined/default // undefined/default
@ -2882,6 +2884,8 @@ class HistogramTypeJni {
case 0x1E: case 0x1E:
return rocksdb::Histograms::READ_NUM_MERGE_OPERANDS; return rocksdb::Histograms::READ_NUM_MERGE_OPERANDS;
case 0x1F: case 0x1F:
return rocksdb::Histograms::FLUSH_TIME;
case 0x20:
return rocksdb::Histograms::HISTOGRAM_ENUM_MAX; return rocksdb::Histograms::HISTOGRAM_ENUM_MAX;
default: default:

Loading…
Cancel
Save