|
|
@ -132,7 +132,7 @@ DEFINE_bool(analyze_single_delete, false, "Analyze the SingleDelete query."); |
|
|
|
DEFINE_bool(analyze_range_delete, false, "Analyze the DeleteRange query."); |
|
|
|
DEFINE_bool(analyze_range_delete, false, "Analyze the DeleteRange query."); |
|
|
|
DEFINE_bool(analyze_merge, false, "Analyze the Merge query."); |
|
|
|
DEFINE_bool(analyze_merge, false, "Analyze the Merge query."); |
|
|
|
DEFINE_bool(analyze_iterator, false, |
|
|
|
DEFINE_bool(analyze_iterator, false, |
|
|
|
" Analyze the iterate query like seek() and seekForPrev()."); |
|
|
|
" Analyze the iterate query like Seek() and SeekForPrev()."); |
|
|
|
DEFINE_bool(analyze_multiget, false, |
|
|
|
DEFINE_bool(analyze_multiget, false, |
|
|
|
" Analyze the MultiGet query. NOTE: for" |
|
|
|
" Analyze the MultiGet query. NOTE: for" |
|
|
|
" MultiGet, we analyze each KV-pair read in one MultiGet query. " |
|
|
|
" MultiGet, we analyze each KV-pair read in one MultiGet query. " |
|
|
@ -280,13 +280,14 @@ TraceAnalyzer::TraceAnalyzer(std::string& trace_path, std::string& output_path, |
|
|
|
total_access_keys_ = 0; |
|
|
|
total_access_keys_ = 0; |
|
|
|
total_gets_ = 0; |
|
|
|
total_gets_ = 0; |
|
|
|
total_writes_ = 0; |
|
|
|
total_writes_ = 0; |
|
|
|
|
|
|
|
total_seeks_ = 0; |
|
|
|
|
|
|
|
total_seek_prevs_ = 0; |
|
|
|
|
|
|
|
total_multigets_ = 0; |
|
|
|
trace_create_time_ = 0; |
|
|
|
trace_create_time_ = 0; |
|
|
|
begin_time_ = 0; |
|
|
|
begin_time_ = 0; |
|
|
|
end_time_ = 0; |
|
|
|
end_time_ = 0; |
|
|
|
time_series_start_ = 0; |
|
|
|
time_series_start_ = 0; |
|
|
|
cur_time_sec_ = 0; |
|
|
|
cur_time_sec_ = 0; |
|
|
|
// Set the default trace file version as version 0.2
|
|
|
|
|
|
|
|
trace_file_version_ = 2; |
|
|
|
|
|
|
|
if (FLAGS_sample_ratio > 1.0 || FLAGS_sample_ratio <= 0) { |
|
|
|
if (FLAGS_sample_ratio > 1.0 || FLAGS_sample_ratio <= 0) { |
|
|
|
sample_max_ = 1; |
|
|
|
sample_max_ = 1; |
|
|
|
} else { |
|
|
|
} else { |
|
|
@ -360,7 +361,11 @@ TraceAnalyzer::~TraceAnalyzer() {} |
|
|
|
Status TraceAnalyzer::PrepareProcessing() { |
|
|
|
Status TraceAnalyzer::PrepareProcessing() { |
|
|
|
Status s; |
|
|
|
Status s; |
|
|
|
// Prepare the trace reader
|
|
|
|
// Prepare the trace reader
|
|
|
|
s = NewFileTraceReader(env_, env_options_, trace_name_, &trace_reader_); |
|
|
|
if (trace_reader_ == nullptr) { |
|
|
|
|
|
|
|
s = NewFileTraceReader(env_, env_options_, trace_name_, &trace_reader_); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
s = trace_reader_->Reset(); |
|
|
|
|
|
|
|
} |
|
|
|
if (!s.ok()) { |
|
|
|
if (!s.ok()) { |
|
|
|
return s; |
|
|
|
return s; |
|
|
|
} |
|
|
|
} |
|
|
@ -451,8 +456,9 @@ Status TraceAnalyzer::StartProcessing() { |
|
|
|
fprintf(stderr, "Cannot read the header\n"); |
|
|
|
fprintf(stderr, "Cannot read the header\n"); |
|
|
|
return s; |
|
|
|
return s; |
|
|
|
} |
|
|
|
} |
|
|
|
s = TracerHelper::ParseTraceHeader(header, &trace_file_version_, |
|
|
|
// Set the default trace file version as version 0.2
|
|
|
|
&db_version_); |
|
|
|
int trace_file_version = 2; |
|
|
|
|
|
|
|
s = TracerHelper::ParseTraceHeader(header, &trace_file_version, &db_version_); |
|
|
|
if (!s.ok()) { |
|
|
|
if (!s.ok()) { |
|
|
|
return s; |
|
|
|
return s; |
|
|
|
} |
|
|
|
} |
|
|
@ -469,96 +475,29 @@ Status TraceAnalyzer::StartProcessing() { |
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
total_requests_++; |
|
|
|
|
|
|
|
end_time_ = trace.ts; |
|
|
|
end_time_ = trace.ts; |
|
|
|
if (trace.type == kTraceEnd) { |
|
|
|
if (trace.type == kTraceEnd) { |
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Do not count TraceEnd (if there is one)
|
|
|
|
|
|
|
|
total_requests_++; |
|
|
|
|
|
|
|
|
|
|
|
std::unique_ptr<TraceRecord> record; |
|
|
|
std::unique_ptr<TraceRecord> record; |
|
|
|
switch (trace.type) { |
|
|
|
s = TracerHelper::DecodeTraceRecord(&trace, trace_file_version, &record); |
|
|
|
case kTraceWrite: { |
|
|
|
if (s.IsNotSupported()) { |
|
|
|
s = TracerHelper::DecodeWriteRecord(&trace, trace_file_version_, |
|
|
|
continue; |
|
|
|
&record); |
|
|
|
} |
|
|
|
if (!s.ok()) { |
|
|
|
if (!s.ok()) { |
|
|
|
return s; |
|
|
|
return s; |
|
|
|
} |
|
|
|
} |
|
|
|
total_writes_++; |
|
|
|
s = record->Accept(this, nullptr); |
|
|
|
c_time_ = trace.ts; |
|
|
|
if (!s.ok()) { |
|
|
|
std::unique_ptr<WriteQueryTraceRecord> r( |
|
|
|
fprintf(stderr, "Cannot process the TraceRecord\n"); |
|
|
|
reinterpret_cast<WriteQueryTraceRecord*>(record.release())); |
|
|
|
return s; |
|
|
|
// Note that, if the write happens in a transaction,
|
|
|
|
|
|
|
|
// 'Write' will be called twice, one for Prepare, one for
|
|
|
|
|
|
|
|
// Commit. Thus, in the trace, for the same WriteBatch, there
|
|
|
|
|
|
|
|
// will be two reords if it is in a transaction. Here, we only
|
|
|
|
|
|
|
|
// process the reord that is committed. If write is non-transaction,
|
|
|
|
|
|
|
|
// HasBeginPrepare()==false, so we process it normally.
|
|
|
|
|
|
|
|
WriteBatch batch(r->GetWriteBatchRep().ToString()); |
|
|
|
|
|
|
|
if (batch.HasBeginPrepare() && !batch.HasCommit()) { |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
TraceWriteHandler write_handler(this); |
|
|
|
|
|
|
|
s = batch.Iterate(&write_handler); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
fprintf(stderr, "Cannot process the write batch in the trace\n"); |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
case kTraceGet: { |
|
|
|
|
|
|
|
s = TracerHelper::DecodeGetRecord(&trace, trace_file_version_, &record); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
total_gets_++; |
|
|
|
|
|
|
|
std::unique_ptr<GetQueryTraceRecord> r( |
|
|
|
|
|
|
|
reinterpret_cast<GetQueryTraceRecord*>(record.release())); |
|
|
|
|
|
|
|
s = HandleGet(r->GetColumnFamilyID(), r->GetKey(), r->GetTimestamp(), |
|
|
|
|
|
|
|
1); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
fprintf(stderr, "Cannot process the get in the trace\n"); |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
case kTraceIteratorSeek: |
|
|
|
|
|
|
|
case kTraceIteratorSeekForPrev: { |
|
|
|
|
|
|
|
s = TracerHelper::DecodeIterRecord(&trace, trace_file_version_, |
|
|
|
|
|
|
|
&record); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::unique_ptr<IteratorSeekQueryTraceRecord> r( |
|
|
|
|
|
|
|
reinterpret_cast<IteratorSeekQueryTraceRecord*>(record.release())); |
|
|
|
|
|
|
|
s = HandleIter(r->GetColumnFamilyID(), r->GetKey(), r->GetTimestamp(), |
|
|
|
|
|
|
|
r->GetTraceType()); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
fprintf(stderr, "Cannot process the iterator in the trace\n"); |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
case kTraceMultiGet: { |
|
|
|
|
|
|
|
s = TracerHelper::DecodeMultiGetRecord(&trace, trace_file_version_, |
|
|
|
|
|
|
|
&record); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
std::unique_ptr<MultiGetQueryTraceRecord> r( |
|
|
|
|
|
|
|
reinterpret_cast<MultiGetQueryTraceRecord*>(record.release())); |
|
|
|
|
|
|
|
s = HandleMultiGet(r->GetColumnFamilyIDs(), r->GetKeys(), |
|
|
|
|
|
|
|
r->GetTimestamp()); |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
default: { |
|
|
|
|
|
|
|
// Skip unsupported types
|
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
if (s.IsIncomplete()) { |
|
|
|
if (s.IsIncomplete()) { |
|
|
|
// Fix it: Reaching eof returns Incomplete status at the moment.
|
|
|
|
// Fix it: Reaching eof returns Incomplete status at the moment.
|
|
|
|
//
|
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
return Status::OK(); |
|
|
|
} |
|
|
|
} |
|
|
|
return s; |
|
|
|
return s; |
|
|
@ -1555,14 +1494,41 @@ Status TraceAnalyzer::CloseOutputFiles() { |
|
|
|
return s; |
|
|
|
return s; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Handle the Get request in the trace
|
|
|
|
Status TraceAnalyzer::Handle(const WriteQueryTraceRecord& record, |
|
|
|
Status TraceAnalyzer::HandleGet(uint32_t column_family_id, const Slice& key, |
|
|
|
std::unique_ptr<TraceRecordResult>* /*result*/) { |
|
|
|
const uint64_t& ts, const uint32_t& get_ret) { |
|
|
|
total_writes_++; |
|
|
|
|
|
|
|
// Note that, if the write happens in a transaction,
|
|
|
|
|
|
|
|
// 'Write' will be called twice, one for Prepare, one for
|
|
|
|
|
|
|
|
// Commit. Thus, in the trace, for the same WriteBatch, there
|
|
|
|
|
|
|
|
// will be two records if it is in a transaction. Here, we only
|
|
|
|
|
|
|
|
// process the reord that is committed. If write is non-transaction,
|
|
|
|
|
|
|
|
// HasBeginPrepare()==false, so we process it normally.
|
|
|
|
|
|
|
|
WriteBatch batch(record.GetWriteBatchRep().ToString()); |
|
|
|
|
|
|
|
if (batch.HasBeginPrepare() && !batch.HasCommit()) { |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
c_time_ = record.GetTimestamp(); |
|
|
|
|
|
|
|
Status s = batch.Iterate(this); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
fprintf(stderr, "Cannot process the write batch in the trace\n"); |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Status TraceAnalyzer::Handle(const GetQueryTraceRecord& record, |
|
|
|
|
|
|
|
std::unique_ptr<TraceRecordResult>* /*result*/) { |
|
|
|
|
|
|
|
total_gets_++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uint32_t cf_id = record.GetColumnFamilyID(); |
|
|
|
|
|
|
|
Slice key = record.GetKey(); |
|
|
|
|
|
|
|
uint64_t ts = record.GetTimestamp(); |
|
|
|
|
|
|
|
|
|
|
|
Status s; |
|
|
|
Status s; |
|
|
|
size_t value_size = 0; |
|
|
|
size_t value_size = 0; |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
s = WriteTraceSequence(TraceOperationType::kGet, column_family_id, key, |
|
|
|
s = WriteTraceSequence(TraceOperationType::kGet, cf_id, key, value_size, |
|
|
|
value_size, ts); |
|
|
|
ts); |
|
|
|
if (!s.ok()) { |
|
|
|
if (!s.ok()) { |
|
|
|
return Status::Corruption("Failed to write the trace sequence to file"); |
|
|
|
return Status::Corruption("Failed to write the trace sequence to file"); |
|
|
|
} |
|
|
|
} |
|
|
@ -1580,11 +1546,109 @@ Status TraceAnalyzer::HandleGet(uint32_t column_family_id, const Slice& key, |
|
|
|
if (!ta_[TraceOperationType::kGet].enabled) { |
|
|
|
if (!ta_[TraceOperationType::kGet].enabled) { |
|
|
|
return Status::OK(); |
|
|
|
return Status::OK(); |
|
|
|
} |
|
|
|
} |
|
|
|
if (get_ret == 1) { |
|
|
|
value_size = 10; |
|
|
|
value_size = 10; |
|
|
|
s = KeyStatsInsertion(TraceOperationType::kGet, cf_id, key.ToString(), |
|
|
|
|
|
|
|
value_size, ts); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to insert key statistics"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Status TraceAnalyzer::Handle(const IteratorSeekQueryTraceRecord& record, |
|
|
|
|
|
|
|
std::unique_ptr<TraceRecordResult>* /*result*/) { |
|
|
|
|
|
|
|
uint32_t cf_id = record.GetColumnFamilyID(); |
|
|
|
|
|
|
|
Slice key = record.GetKey(); |
|
|
|
|
|
|
|
uint64_t ts = record.GetTimestamp(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// To do: add lower/upper bounds
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Status s; |
|
|
|
|
|
|
|
size_t value_size = 0; |
|
|
|
|
|
|
|
int type = -1; |
|
|
|
|
|
|
|
if (record.GetTraceType() == kTraceIteratorSeek) { |
|
|
|
|
|
|
|
type = TraceOperationType::kIteratorSeek; |
|
|
|
|
|
|
|
total_seeks_++; |
|
|
|
|
|
|
|
} else if (record.GetTraceType() == kTraceIteratorSeekForPrev) { |
|
|
|
|
|
|
|
type = TraceOperationType::kIteratorSeekForPrev; |
|
|
|
|
|
|
|
total_seek_prevs_++; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (type == -1) { |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
|
|
|
|
s = WriteTraceSequence(type, cf_id, key, value_size, ts); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to write the trace sequence to file"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ta_[type].sample_count >= sample_max_) { |
|
|
|
|
|
|
|
ta_[type].sample_count = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (ta_[type].sample_count > 0) { |
|
|
|
|
|
|
|
ta_[type].sample_count++; |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
ta_[type].sample_count++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!ta_[type].enabled) { |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
s = KeyStatsInsertion(type, cf_id, key.ToString(), value_size, ts); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to insert key statistics"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Status TraceAnalyzer::Handle(const MultiGetQueryTraceRecord& record, |
|
|
|
|
|
|
|
std::unique_ptr<TraceRecordResult>* /*result*/) { |
|
|
|
|
|
|
|
total_multigets_++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<uint32_t> cf_ids = record.GetColumnFamilyIDs(); |
|
|
|
|
|
|
|
std::vector<Slice> keys = record.GetKeys(); |
|
|
|
|
|
|
|
uint64_t ts = record.GetTimestamp(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Status s; |
|
|
|
|
|
|
|
size_t value_size = 0; |
|
|
|
|
|
|
|
if (cf_ids.size() != keys.size()) { |
|
|
|
|
|
|
|
// The size does not match is not the error of tracing and anayzing, we just
|
|
|
|
|
|
|
|
// report it to the user. The analyzing continues.
|
|
|
|
|
|
|
|
printf("The CF ID vector size does not match the keys vector size!\n"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
size_t vector_size = std::min(cf_ids.size(), keys.size()); |
|
|
|
|
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
|
|
|
|
for (size_t i = 0; i < vector_size; i++) { |
|
|
|
|
|
|
|
assert(i < cf_ids.size() && i < keys.size()); |
|
|
|
|
|
|
|
s = WriteTraceSequence(TraceOperationType::kMultiGet, cf_ids[i], keys[i], |
|
|
|
|
|
|
|
value_size, ts); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to write the trace sequence to file"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ta_[TraceOperationType::kMultiGet].sample_count >= sample_max_) { |
|
|
|
|
|
|
|
ta_[TraceOperationType::kMultiGet].sample_count = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (ta_[TraceOperationType::kMultiGet].sample_count > 0) { |
|
|
|
|
|
|
|
ta_[TraceOperationType::kMultiGet].sample_count++; |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
ta_[TraceOperationType::kMultiGet].sample_count++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!ta_[TraceOperationType::kMultiGet].enabled) { |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
for (size_t i = 0; i < vector_size; i++) { |
|
|
|
|
|
|
|
assert(i < cf_ids.size() && i < keys.size()); |
|
|
|
|
|
|
|
s = KeyStatsInsertion(TraceOperationType::kMultiGet, cf_ids[i], |
|
|
|
|
|
|
|
keys[i].ToString(), value_size, ts); |
|
|
|
} |
|
|
|
} |
|
|
|
s = KeyStatsInsertion(TraceOperationType::kGet, column_family_id, |
|
|
|
|
|
|
|
key.ToString(), value_size, ts); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
if (!s.ok()) { |
|
|
|
return Status::Corruption("Failed to insert key statistics"); |
|
|
|
return Status::Corruption("Failed to insert key statistics"); |
|
|
|
} |
|
|
|
} |
|
|
@ -1592,8 +1656,8 @@ Status TraceAnalyzer::HandleGet(uint32_t column_family_id, const Slice& key, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Handle the Put request in the write batch of the trace
|
|
|
|
// Handle the Put request in the write batch of the trace
|
|
|
|
Status TraceAnalyzer::HandlePut(uint32_t column_family_id, const Slice& key, |
|
|
|
Status TraceAnalyzer::PutCF(uint32_t column_family_id, const Slice& key, |
|
|
|
const Slice& value) { |
|
|
|
const Slice& value) { |
|
|
|
Status s; |
|
|
|
Status s; |
|
|
|
size_t value_size = value.ToString().size(); |
|
|
|
size_t value_size = value.ToString().size(); |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
@ -1625,8 +1689,7 @@ Status TraceAnalyzer::HandlePut(uint32_t column_family_id, const Slice& key, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Handle the Delete request in the write batch of the trace
|
|
|
|
// Handle the Delete request in the write batch of the trace
|
|
|
|
Status TraceAnalyzer::HandleDelete(uint32_t column_family_id, |
|
|
|
Status TraceAnalyzer::DeleteCF(uint32_t column_family_id, const Slice& key) { |
|
|
|
const Slice& key) { |
|
|
|
|
|
|
|
Status s; |
|
|
|
Status s; |
|
|
|
size_t value_size = 0; |
|
|
|
size_t value_size = 0; |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
@ -1658,8 +1721,8 @@ Status TraceAnalyzer::HandleDelete(uint32_t column_family_id, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Handle the SingleDelete request in the write batch of the trace
|
|
|
|
// Handle the SingleDelete request in the write batch of the trace
|
|
|
|
Status TraceAnalyzer::HandleSingleDelete(uint32_t column_family_id, |
|
|
|
Status TraceAnalyzer::SingleDeleteCF(uint32_t column_family_id, |
|
|
|
const Slice& key) { |
|
|
|
const Slice& key) { |
|
|
|
Status s; |
|
|
|
Status s; |
|
|
|
size_t value_size = 0; |
|
|
|
size_t value_size = 0; |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
@ -1691,9 +1754,9 @@ Status TraceAnalyzer::HandleSingleDelete(uint32_t column_family_id, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Handle the DeleteRange request in the write batch of the trace
|
|
|
|
// Handle the DeleteRange request in the write batch of the trace
|
|
|
|
Status TraceAnalyzer::HandleDeleteRange(uint32_t column_family_id, |
|
|
|
Status TraceAnalyzer::DeleteRangeCF(uint32_t column_family_id, |
|
|
|
const Slice& begin_key, |
|
|
|
const Slice& begin_key, |
|
|
|
const Slice& end_key) { |
|
|
|
const Slice& end_key) { |
|
|
|
Status s; |
|
|
|
Status s; |
|
|
|
size_t value_size = 0; |
|
|
|
size_t value_size = 0; |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
@ -1727,8 +1790,8 @@ Status TraceAnalyzer::HandleDeleteRange(uint32_t column_family_id, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Handle the Merge request in the write batch of the trace
|
|
|
|
// Handle the Merge request in the write batch of the trace
|
|
|
|
Status TraceAnalyzer::HandleMerge(uint32_t column_family_id, const Slice& key, |
|
|
|
Status TraceAnalyzer::MergeCF(uint32_t column_family_id, const Slice& key, |
|
|
|
const Slice& value) { |
|
|
|
const Slice& value) { |
|
|
|
Status s; |
|
|
|
Status s; |
|
|
|
size_t value_size = value.ToString().size(); |
|
|
|
size_t value_size = value.ToString().size(); |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
@ -1759,95 +1822,6 @@ Status TraceAnalyzer::HandleMerge(uint32_t column_family_id, const Slice& key, |
|
|
|
return s; |
|
|
|
return s; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Handle the Iterator request in the trace
|
|
|
|
|
|
|
|
Status TraceAnalyzer::HandleIter(uint32_t column_family_id, const Slice& key, |
|
|
|
|
|
|
|
const uint64_t& ts, TraceType trace_type) { |
|
|
|
|
|
|
|
Status s; |
|
|
|
|
|
|
|
size_t value_size = 0; |
|
|
|
|
|
|
|
int type = -1; |
|
|
|
|
|
|
|
if (trace_type == kTraceIteratorSeek) { |
|
|
|
|
|
|
|
type = TraceOperationType::kIteratorSeek; |
|
|
|
|
|
|
|
} else if (trace_type == kTraceIteratorSeekForPrev) { |
|
|
|
|
|
|
|
type = TraceOperationType::kIteratorSeekForPrev; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (type == -1) { |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
|
|
|
|
s = WriteTraceSequence(type, column_family_id, key, value_size, ts); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to write the trace sequence to file"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ta_[type].sample_count >= sample_max_) { |
|
|
|
|
|
|
|
ta_[type].sample_count = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (ta_[type].sample_count > 0) { |
|
|
|
|
|
|
|
ta_[type].sample_count++; |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
ta_[type].sample_count++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!ta_[type].enabled) { |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
s = KeyStatsInsertion(type, column_family_id, key.ToString(), value_size, ts); |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to insert key statistics"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Handle MultiGet queries in the trace
|
|
|
|
|
|
|
|
Status TraceAnalyzer::HandleMultiGet( |
|
|
|
|
|
|
|
const std::vector<uint32_t>& column_family_ids, |
|
|
|
|
|
|
|
const std::vector<Slice>& keys, const uint64_t& ts) { |
|
|
|
|
|
|
|
Status s; |
|
|
|
|
|
|
|
size_t value_size = 0; |
|
|
|
|
|
|
|
if (column_family_ids.size() != keys.size()) { |
|
|
|
|
|
|
|
// The size does not match is not the error of tracing and anayzing, we just
|
|
|
|
|
|
|
|
// report it to the user. The analyzing continues.
|
|
|
|
|
|
|
|
printf("The CF ID vector size does not match the keys vector size!\n"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
size_t vector_size = std::min(column_family_ids.size(), keys.size()); |
|
|
|
|
|
|
|
if (FLAGS_convert_to_human_readable_trace && trace_sequence_f_) { |
|
|
|
|
|
|
|
for (size_t i = 0; i < vector_size; i++) { |
|
|
|
|
|
|
|
assert(i < column_family_ids.size() && i < keys.size()); |
|
|
|
|
|
|
|
s = WriteTraceSequence(TraceOperationType::kMultiGet, |
|
|
|
|
|
|
|
column_family_ids[i], keys[i], value_size, ts); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to write the trace sequence to file"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ta_[TraceOperationType::kMultiGet].sample_count >= sample_max_) { |
|
|
|
|
|
|
|
ta_[TraceOperationType::kMultiGet].sample_count = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (ta_[TraceOperationType::kMultiGet].sample_count > 0) { |
|
|
|
|
|
|
|
ta_[TraceOperationType::kMultiGet].sample_count++; |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
ta_[TraceOperationType::kMultiGet].sample_count++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!ta_[TraceOperationType::kMultiGet].enabled) { |
|
|
|
|
|
|
|
return Status::OK(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
for (size_t i = 0; i < vector_size; i++) { |
|
|
|
|
|
|
|
assert(i < column_family_ids.size() && i < keys.size()); |
|
|
|
|
|
|
|
s = KeyStatsInsertion(TraceOperationType::kMultiGet, column_family_ids[i], |
|
|
|
|
|
|
|
keys[i].ToString(), value_size, ts); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (!s.ok()) { |
|
|
|
|
|
|
|
return Status::Corruption("Failed to insert key statistics"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return s; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Before the analyzer is closed, the requested general statistic results are
|
|
|
|
// Before the analyzer is closed, the requested general statistic results are
|
|
|
|
// printed out here. In current stage, these information are not output to
|
|
|
|
// printed out here. In current stage, these information are not output to
|
|
|
|
// the files.
|
|
|
|
// the files.
|
|
|
@ -1999,8 +1973,11 @@ void TraceAnalyzer::PrintStatistics() { |
|
|
|
printf("The statistics related to query number need to times: %u\n", |
|
|
|
printf("The statistics related to query number need to times: %u\n", |
|
|
|
sample_max_); |
|
|
|
sample_max_); |
|
|
|
printf("Total_requests: %" PRIu64 " Total_accessed_keys: %" PRIu64 |
|
|
|
printf("Total_requests: %" PRIu64 " Total_accessed_keys: %" PRIu64 |
|
|
|
" Total_gets: %" PRIu64 " Total_write_batch: %" PRIu64 "\n", |
|
|
|
" Total_gets: %" PRIu64 " Total_write_batches: %" PRIu64 |
|
|
|
total_requests_, total_access_keys_, total_gets_, total_writes_); |
|
|
|
" Total_seeks: %" PRIu64 " Total_seek_for_prevs: %" PRIu64 |
|
|
|
|
|
|
|
" Total_multigets: %" PRIu64 "\n", |
|
|
|
|
|
|
|
total_requests_, total_access_keys_, total_gets_, total_writes_, |
|
|
|
|
|
|
|
total_seeks_, total_seek_prevs_, total_multigets_); |
|
|
|
for (int type = 0; type < kTaTypeNum; type++) { |
|
|
|
for (int type = 0; type < kTaTypeNum; type++) { |
|
|
|
if (!ta_[type].enabled) { |
|
|
|
if (!ta_[type].enabled) { |
|
|
|
continue; |
|
|
|
continue; |
|
|
|