[Report the #gets and #founds in db_stress]

Summary:
Also added some comments and fixed some bugs in
stats reporting. Now the stats seem to match what is expected.

Test Plan:
[nponnekanti@dev902 /data/users/nponnekanti/rocksdb] ./db_stress --test_batches_snapshots=1 --ops_per_thread=1000 --threads=1 --max_key=320
LevelDB version     : 1.5
Number of threads   : 1
Ops per thread      : 1000
Read percentage     : 10
Delete percentage   : 30
Max key             : 320
Ratio #ops/#keys    : 3
Num times DB reopens: 10
Batches/snapshots   : 1
Num keys per lock   : 4
Compression         : snappy
------------------------------------------------
No lock creation because test_batches_snapshots set
2013/03/04-15:58:56  Starting database operations
2013/03/04-15:58:56  Reopening database for the 1th time
2013/03/04-15:58:56  Reopening database for the 2th time
2013/03/04-15:58:56  Reopening database for the 3th time
2013/03/04-15:58:56  Reopening database for the 4th time
Created bg thread 0x7f4542bff700
2013/03/04-15:58:56  Reopening database for the 5th time
2013/03/04-15:58:56  Reopening database for the 6th time
2013/03/04-15:58:56  Reopening database for the 7th time
2013/03/04-15:58:57  Reopening database for the 8th time
2013/03/04-15:58:57  Reopening database for the 9th time
2013/03/04-15:58:57  Reopening database for the 10th time
2013/03/04-15:58:57  Reopening database for the 11th time
2013/03/04-15:58:57  Limited verification already done during gets
Stress Test : 1811.551 micros/op 552 ops/sec
            : Wrote 0.10 MB (0.05 MB/sec) (598% of 1011 ops)
            : Wrote 6050 times
            : Deleted 3050 times
            : 500/900 gets found the key
            : Got errors 0 times

[nponnekanti@dev902 /data/users/nponnekanti/rocksdb] ./db_stress --ops_per_thread=1000 --threads=1 --max_key=320
LevelDB version     : 1.5
Number of threads   : 1
Ops per thread      : 1000
Read percentage     : 10
Delete percentage   : 30
Max key             : 320
Ratio #ops/#keys    : 3
Num times DB reopens: 10
Batches/snapshots   : 0
Num keys per lock   : 4
Compression         : snappy
------------------------------------------------
Creating 80 locks
2013/03/04-15:58:17  Starting database operations
2013/03/04-15:58:17  Reopening database for the 1th time
2013/03/04-15:58:17  Reopening database for the 2th time
2013/03/04-15:58:17  Reopening database for the 3th time
2013/03/04-15:58:17  Reopening database for the 4th time
Created bg thread 0x7fc0f5bff700
2013/03/04-15:58:17  Reopening database for the 5th time
2013/03/04-15:58:17  Reopening database for the 6th time
2013/03/04-15:58:18  Reopening database for the 7th time
2013/03/04-15:58:18  Reopening database for the 8th time
2013/03/04-15:58:18  Reopening database for the 9th time
2013/03/04-15:58:18  Reopening database for the 10th time
2013/03/04-15:58:18  Reopening database for the 11th time
2013/03/04-15:58:18  Starting verification
Stress Test : 1836.258 micros/op 544 ops/sec
            : Wrote 0.01 MB (0.01 MB/sec) (59% of 1011 ops)
            : Wrote 605 times
            : Deleted 305 times
            : 50/90 gets found the key
            : Got errors 0 times
2013/03/04-15:58:18  Verification successful

Revert Plan: OK

Task ID: #

Reviewers: emayanke, dhruba

Reviewed By: emayanke

CC: leveldb

Differential Revision: https://reviews.facebook.net/D9081
main
Vamsi Ponnekanti 12 years ago
parent 3eed5c9c01
commit 8ade935971
  1. 57
      tools/db_stress.cc

@ -1,7 +1,7 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
//
//The test uses an array to compare against values written to the database. //The test uses an array to compare against values written to the database.
//Keys written to the array are in 1:1 correspondence to the actual values in //Keys written to the array are in 1:1 correspondence to the actual values in
//the database according to the formula in the functino GenerateValue //the database according to the formula in the functino GenerateValue
@ -11,6 +11,8 @@
//the positions in the array. Therefore to shorten/elongate the amount of time //the positions in the array. Therefore to shorten/elongate the amount of time
//that this test runs for, you should change the settings: //that this test runs for, you should change the settings:
//FLAGS_max_key, FLAGS_ops_per_thread, (sometimes also FLAGS_threads) //FLAGS_max_key, FLAGS_ops_per_thread, (sometimes also FLAGS_threads)
//NOTE that if FLAGS_test_batches_snapshots is set, the test behaves a little
//differently. See comment header for the flag.
#include <sys/types.h> #include <sys/types.h>
#include <stdio.h> #include <stdio.h>
@ -178,6 +180,7 @@ class Stats {
long done_; long done_;
long writes_; long writes_;
long deletes_; long deletes_;
long gets_;
long founds_; long founds_;
long errors_; long errors_;
int next_report_; int next_report_;
@ -194,6 +197,7 @@ class Stats {
done_ = 0; done_ = 0;
writes_ = 0; writes_ = 0;
deletes_ = 0; deletes_ = 0;
gets_ = 0;
founds_ = 0; founds_ = 0;
errors_ = 0; errors_ = 0;
bytes_ = 0; bytes_ = 0;
@ -208,6 +212,9 @@ class Stats {
done_ += other.done_; done_ += other.done_;
writes_ += other.writes_; writes_ += other.writes_;
deletes_ += other.deletes_; deletes_ += other.deletes_;
gets_ += other.gets_;
founds_ += other.founds_;
errors_ += other.errors_;
bytes_ += other.bytes_; bytes_ += other.bytes_;
seconds_ += other.seconds_; seconds_ += other.seconds_;
if (other.start_ < start_) start_ = other.start_; if (other.start_ < start_) start_ = other.start_;
@ -254,8 +261,9 @@ class Stats {
deletes_ += n; deletes_ += n;
} }
void AddFounds(int n) { void AddGets(int ngets, int nfounds) {
founds_ += n; founds_ += nfounds;
gets_ += ngets;
} }
void AddErrors(int n) { void AddErrors(int n) {
@ -279,8 +287,9 @@ class Stats {
seconds_ * 1e6 / done_, (long)throughput); seconds_ * 1e6 / done_, (long)throughput);
fprintf(stdout, "%-12s: Wrote %.2f MB (%.2f MB/sec) (%ld%% of %ld ops)\n", fprintf(stdout, "%-12s: Wrote %.2f MB (%.2f MB/sec) (%ld%% of %ld ops)\n",
"", bytes_mb, rate, (100*writes_)/done_, done_); "", bytes_mb, rate, (100*writes_)/done_, done_);
fprintf(stdout, "%-12s: Wrote %ld times\n", "", writes_);
fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_); fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_);
fprintf(stdout, "%-12s: Found lookups %ld times\n", "", founds_); fprintf(stdout, "%-12s: %ld/%ld gets found the key\n", "", founds_, gets_);
fprintf(stdout, "%-12s: Got errors %ld times\n", "", errors_); fprintf(stdout, "%-12s: Got errors %ld times\n", "", errors_);
if (FLAGS_histogram) { if (FLAGS_histogram) {
@ -512,8 +521,13 @@ class StressTest {
} }
now = FLAGS_env->NowMicros(); now = FLAGS_env->NowMicros();
fprintf(stdout, "%s Starting verification\n", if (FLAGS_test_batches_snapshots) {
FLAGS_env->TimeToString((uint64_t) now/1000000).c_str()); fprintf(stdout, "%s Limited verification already done during gets\n",
FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
} else {
fprintf(stdout, "%s Starting verification\n",
FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
}
shared.SetStartVerify(); shared.SetStartVerify();
shared.GetCondVar()->SignalAll(); shared.GetCondVar()->SignalAll();
@ -532,9 +546,10 @@ class StressTest {
threads[i] = nullptr; threads[i] = nullptr;
} }
double now = FLAGS_env->NowMicros(); double now = FLAGS_env->NowMicros();
fprintf(stdout, "%s Verification successful\n", if (!FLAGS_test_batches_snapshots) {
FLAGS_env->TimeToString((uint64_t) now/1000000).c_str()); fprintf(stdout, "%s Verification successful\n",
FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
}
PrintStatistics(); PrintStatistics();
} }
@ -666,6 +681,7 @@ class StressTest {
// find more errors if any // find more errors if any
} else if (s.IsNotFound()) { } else if (s.IsNotFound()) {
values[i] = ""; values[i] = "";
thread->stats.AddGets(1, 0);
} else { } else {
values[i] = *value; values[i] = *value;
@ -676,7 +692,7 @@ class StressTest {
expected_prefix, actual_prefix); expected_prefix, actual_prefix);
} }
(values[i])[0] = ' '; // blank out the differing character (values[i])[0] = ' '; // blank out the differing character
thread->stats.AddFounds(1); thread->stats.AddGets(1, 1);
} }
} }
db_->ReleaseSnapshot(readoptionscopy.snapshot); db_->ReleaseSnapshot(readoptionscopy.snapshot);
@ -720,7 +736,8 @@ class StressTest {
else { else {
thread->shared->GetCondVar()->Wait(); thread->shared->GetCondVar()->Wait();
} }
thread->stats.Start(); // Commenting this out as we don't want to reset stats on each open.
// thread->stats.Start();
} }
} }
@ -730,9 +747,21 @@ class StressTest {
unsigned int probability_operation = thread->rand.Uniform(100); unsigned int probability_operation = thread->rand.Uniform(100);
if (probability_operation < FLAGS_readpercent) { if (probability_operation < FLAGS_readpercent) {
// read load // read load
FLAGS_test_batches_snapshots ? if (!FLAGS_test_batches_snapshots) {
MultiGet(thread, read_opts, key, &from_db) : Status s = db_->Get(read_opts, key, &from_db);
db_->Get(read_opts, key, &from_db); if (s.ok()) {
// found case
thread->stats.AddGets(1, 1);
} else if (s.IsNotFound()) {
// not found case
thread->stats.AddGets(1, 0);
} else {
// errors case
thread->stats.AddErrors(1);
}
} else {
MultiGet(thread, read_opts, key, &from_db);
}
} else if (probability_operation < FLAGS_delpercent + FLAGS_readpercent) { } else if (probability_operation < FLAGS_delpercent + FLAGS_readpercent) {
//introduce delete load //introduce delete load
if (!FLAGS_test_batches_snapshots) { if (!FLAGS_test_batches_snapshots) {

Loading…
Cancel
Save