Allow EventLogger to directly log from a JSONWriter.

Summary:
Allow EventLogger to directly log from a JSONWriter.  This allows
the JSONWriter to be shared by EventLogger and potentially EventListener,
which is an important step to integrate EventLogger and EventListener.

This patch also rewrites EventLoggerHelpers::LogTableFileCreation(),
which uses the new API to generate identical log.

Test Plan:
Run db_bench in debug mode and make sure the log is correct and no
assertions fail.

Reviewers: sdong, anthony, igor

Reviewed By: igor

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D38709
main
Yueh-Hsuan Chiang 10 years ago
parent 7a3577519f
commit 7fee8775a4
  1. 64
      db/event_logger_helpers.cc
  2. 1
      db/event_logger_helpers.h
  3. 28
      util/event_logger.cc
  4. 7
      util/event_logger.h

@ -11,36 +11,50 @@ namespace {
inline double SafeDivide(double a, double b) { return b == 0.0 ? 0 : a / b; }
} // namespace
void EventLoggerHelpers::AppendCurrentTime(JSONWriter* jwriter) {
*jwriter << "time_micros"
<< std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
}
void EventLoggerHelpers::LogTableFileCreation(
EventLogger* event_logger, int job_id, uint64_t file_number,
uint64_t file_size, const TableProperties& table_properties) {
auto stream = event_logger->Log();
stream << "job" << job_id << "event"
<< "table_file_creation"
<< "file_number" << file_number << "file_size" << file_size
<< "table_properties";
stream.StartObject();
// basic properties:
stream << "data_size" << table_properties.data_size
<< "index_size" << table_properties.index_size
<< "filter_size" << table_properties.filter_size
<< "raw_key_size" << table_properties.raw_key_size
<< "raw_average_key_size" << SafeDivide(table_properties.raw_key_size,
table_properties.num_entries)
<< "raw_value_size" << table_properties.raw_value_size
<< "raw_average_value_size" << SafeDivide(
table_properties.raw_value_size, table_properties.num_entries)
<< "num_data_blocks" << table_properties.num_data_blocks
<< "num_entries" << table_properties.num_entries
<< "filter_policy_name" << table_properties.filter_policy_name;
// user collected properties
for (const auto& prop : table_properties.user_collected_properties) {
stream << prop.first << prop.second;
JSONWriter jwriter;
AppendCurrentTime(&jwriter);
jwriter << "job" << job_id
<< "event" << "table_file_creation"
<< "file_number" << file_number
<< "file_size" << file_size;
// table_properties
{
jwriter << "table_properties";
jwriter.StartObject();
// basic properties:
jwriter << "data_size" << table_properties.data_size
<< "index_size" << table_properties.index_size
<< "filter_size" << table_properties.filter_size
<< "raw_key_size" << table_properties.raw_key_size
<< "raw_average_key_size" << SafeDivide(
table_properties.raw_key_size,
table_properties.num_entries)
<< "raw_value_size" << table_properties.raw_value_size
<< "raw_average_value_size" << SafeDivide(
table_properties.raw_value_size, table_properties.num_entries)
<< "num_data_blocks" << table_properties.num_data_blocks
<< "num_entries" << table_properties.num_entries
<< "filter_policy_name" << table_properties.filter_policy_name;
// user collected properties
for (const auto& prop : table_properties.user_collected_properties) {
jwriter << prop.first << prop.second;
}
jwriter.EndObject();
}
stream.EndObject();
event_logger->Log(jwriter);
}
} // namespace rocksdb

@ -11,6 +11,7 @@ namespace rocksdb {
class EventLoggerHelpers {
public:
static void AppendCurrentTime(JSONWriter* json_writer);
static void LogTableFileCreation(EventLogger* event_logger, int job_id,
uint64_t file_number, uint64_t file_size,
const TableProperties& table_properties);

@ -18,7 +18,6 @@
namespace rocksdb {
const char* kEventLoggerPrefix = "EVENT_LOG_v1";
EventLoggerStream::EventLoggerStream(Logger* logger)
: logger_(logger), log_buffer_(nullptr), json_writer_(nullptr) {}
@ -33,14 +32,35 @@ EventLoggerStream::~EventLoggerStream() {
printf("%s\n", json_writer_->Get().c_str());
#else
if (logger_) {
Log(logger_, "%s %s", kEventLoggerPrefix, json_writer_->Get().c_str());
EventLogger::Log(logger_, *json_writer_);
} else if (log_buffer_) {
LogToBuffer(log_buffer_, "%s %s", kEventLoggerPrefix,
json_writer_->Get().c_str());
EventLogger::LogToBuffer(log_buffer_, *json_writer_);
}
#endif
delete json_writer_;
}
}
void EventLogger::Log(const JSONWriter& jwriter) {
Log(logger_, jwriter);
}
void EventLogger::Log(Logger* logger, const JSONWriter& jwriter) {
#ifdef ROCKSDB_PRINT_EVENTS_TO_STDOUT
printf("%s\n", jwriter.Get().c_str());
#else
rocksdb::Log(logger, "%s %s", Prefix(), jwriter.Get().c_str());
#endif
}
void EventLogger::LogToBuffer(
LogBuffer* log_buffer, const JSONWriter& jwriter) {
#ifdef ROCKSDB_PRINT_EVENTS_TO_STDOUT
printf("%s\n", jwriter.Get().c_str());
#else
assert(log_buffer);
rocksdb::LogToBuffer(log_buffer, "%s %s", Prefix(), jwriter.Get().c_str());
#endif
}
} // namespace rocksdb

@ -157,11 +157,18 @@ class EventLoggerStream {
// "file_size": 1909699}
class EventLogger {
public:
static const char* Prefix() {
return "EVENT_LOG_v1";
}
explicit EventLogger(Logger* logger) : logger_(logger) {}
EventLoggerStream Log() { return EventLoggerStream(logger_); }
EventLoggerStream LogToBuffer(LogBuffer* log_buffer) {
return EventLoggerStream(log_buffer);
}
void Log(const JSONWriter& jwriter);
static void Log(Logger* logger, const JSONWriter& jwriter);
static void LogToBuffer(LogBuffer* log_buffer, const JSONWriter& jwriter);
private:
Logger* logger_;

Loading…
Cancel
Save