Adapt benchmark result script to new fields. (#10120)

Summary:
Recently merged CI benchmark scripts were failing.

There has clearly been a major revision of the fields of benchmark output. The upload script expects and sanity-checks the existence of some fields (changes date to conform to OpenSearch format)..., so the script needs to change.

Also add a bit more exception checking to make it more obvious when this happens again.

We have deleted the existing report.tsv from the benchmark machine. An existing report.tsv is appended to by default, so that if the fields change, later rows no longer match the header. This makes for an upload that dies half way through the report file, when the format no longer matches the header.

Re-instate the config.yml for running the benchmarks, so we can once again test it in situ.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10120

Reviewed By: pdillinger

Differential Revision: D37314908

Pulled By: jay-zhuang

fbshipit-source-id: 34f5243fee694b75c6838eb55d3398e4273254b2
main
Alan Paxton 3 years ago committed by Facebook GitHub Bot
parent 36fefd7e22
commit a16e2ff82a
  1. 16
      .circleci/config.yml
  2. 58
      build_tools/benchmark_log_tool.py

@ -206,7 +206,11 @@ commands:
destination: test_logs destination: test_logs
- run: - run:
name: Send benchmark report to visualisation name: Send benchmark report to visualisation
command: ./build_tools/benchmark_log_tool.py --tsvfile /tmp/benchmark-results/report.tsv --esdocument https://search-rocksdb-bench-k2izhptfeap2hjfxteolsgsynm.us-west-2.es.amazonaws.com/bench_test3/_doc command: |
set +e
set +o pipefail
./build_tools/benchmark_log_tool.py --tsvfile /tmp/benchmark-results/report.tsv --esdocument https://search-rocksdb-bench-k2izhptfeap2hjfxteolsgsynm.us-west-2.es.amazonaws.com/bench_test3/_doc
true
executors: executors:
windows-2xlarge: windows-2xlarge:
@ -964,6 +968,16 @@ workflows:
build-fuzzers: build-fuzzers:
jobs: jobs:
- build-fuzzers - build-fuzzers
benchmark-linux:
triggers:
- schedule:
cron: "0 * * * *"
filters:
branches:
only:
- main
jobs:
- benchmark-linux
nightly: nightly:
triggers: triggers:
- schedule: - schedule:

@ -30,45 +30,56 @@ class BenchmarkResultException(Exception):
class BenchmarkUtils: class BenchmarkUtils:
expected_keys = ['ops_sec', 'mb_sec', 'total_size_gb', 'level0_size_gb', 'sum_gb', 'write_amplification', expected_keys = ['ops_sec', 'mb_sec', 'lsm_sz', 'blob_sz', 'c_wgb', 'w_amp',
'write_mbps', 'usec_op', 'percentile_50', 'percentile_75', 'c_mbps', 'c_wsecs', 'c_csecs', 'b_rgb', 'b_wgb', 'usec_op',
'percentile_99', 'percentile_99.9', 'percentile_99.99', 'uptime', 'p50', 'p99', 'p99.9', 'p99.99', 'pmax',
'stall_time', 'stall_percent', 'test_name', 'test_date', 'rocksdb_version', 'uptime', 'stall%', 'Nstall', 'u_cpu', 's_cpu', 'rss', 'test', 'date', 'version', 'job_id']
'job_id', 'timestamp']
metric_keys = ['ops_sec', 'mb_sec', 'total_size_gb', 'level0_size_gb', 'sum_gb', 'write_amplification',
'write_mbps', 'usec_op', 'percentile_50', 'percentile_75',
'percentile_99', 'percentile_99.9', 'percentile_99.99', 'uptime',
'stall_time', 'stall_percent']
def sanity_check(row): def sanity_check(row):
if not 'test_name' in row: if not 'test' in row:
logging.debug(f"not 'test' in row: {row}")
return False return False
if row['test_name'] == '': if row['test'] == '':
logging.debug(f"row['test'] == '': {row}")
return False return False
if not 'test_date' in row: if not 'date' in row:
logging.debug(f"not 'date' in row: {row}")
return False return False
if not 'ops_sec' in row: if not 'ops_sec' in row:
logging.debug(f"not 'ops_sec' in row: {row}")
return False return False
try: try:
v = int(row['ops_sec']) v = int(row['ops_sec'])
except (ValueError, TypeError): except (ValueError, TypeError):
logging.debug(f"int(row['ops_sec']): {row}")
return False
try:
(_, _) = parser.parse(row['date'], fuzzy_with_tokens=True)
except (parser.ParserError):
logging.error(f"parser.parse((row['date']): not a valid format for date in row: {row}")
return False return False
return True return True
def conform_opensearch(row): def conform_opensearch(row):
(dt, _) = parser.parse(row['test_date'], fuzzy_with_tokens=True) (dt, _) = parser.parse(row['date'], fuzzy_with_tokens=True)
row['test_date'] = dt.isoformat() row['test_date'] = dt.isoformat()
return dict((key.replace('.', '_'), value) return dict((key.replace('.', '_'), value)
for (key, value) in row.items()) for (key, value) in row.items())
class ResultParser: class ResultParser:
def __init__(self, field="(\w|[+-:.])+", intrafield="(\s)+", separator="\t"): def __init__(self, field="(\w|[+-:.%])+", intrafield="(\s)+", separator="\t"):
self.field = re.compile(field) self.field = re.compile(field)
self.intra = re.compile(intrafield) self.intra = re.compile(intrafield)
self.sep = re.compile(separator) self.sep = re.compile(separator)
def ignore(self, l_in: str):
if len(l_in) == 0:
return True
if l_in[0:1] == '#':
return True
return False
def line(self, l_in: str): def line(self, l_in: str):
'''Parse a line into items '''Parse a line into items
Being clever about separators Being clever about separators
@ -102,7 +113,7 @@ class ResultParser:
def parse(self, lines): def parse(self, lines):
'''Parse something that iterates lines''' '''Parse something that iterates lines'''
rows = [self.line(line) for line in lines] rows = [self.line(line) for line in lines if not self.ignore(line)]
header = rows[0] header = rows[0]
width = len(header) width = len(header)
records = [{k: v for (k, v) in itertools.zip_longest( records = [{k: v for (k, v) in itertools.zip_longest(
@ -123,7 +134,7 @@ def load_report_from_tsv(filename: str):
def push_report_to_opensearch(report, esdocument): def push_report_to_opensearch(report, esdocument):
sanitized = [BenchmarkUtils.conform_opensearch(row) sanitized = [BenchmarkUtils.conform_opensearch(row)
for row in report if BenchmarkUtils.sanity_check(row)] for row in report if BenchmarkUtils.sanity_check(row)]
logging.debug(f"upload {len(sanitized)} benchmarks to opensearch") logging.debug(f"upload {len(sanitized)} sane of {len(report)} benchmarks to opensearch")
for single_benchmark in sanitized: for single_benchmark in sanitized:
logging.debug(f"upload benchmark: {single_benchmark}") logging.debug(f"upload benchmark: {single_benchmark}")
response = requests.post( response = requests.post(
@ -133,6 +144,13 @@ def push_report_to_opensearch(report, esdocument):
f"Sent to OpenSearch, status: {response.status_code}, result: {response.text}") f"Sent to OpenSearch, status: {response.status_code}, result: {response.text}")
response.raise_for_status() response.raise_for_status()
def push_report_to_null(report):
for row in report:
if BenchmarkUtils.sanity_check(row):
logging.debug(f"row {row}")
conformed = BenchmarkUtils.conform_opensearch(row)
logging.debug(f"conformed row {conformed}")
def main(): def main():
'''Tool for fetching, parsing and uploading benchmark results to OpenSearch / ElasticSearch '''Tool for fetching, parsing and uploading benchmark results to OpenSearch / ElasticSearch
@ -151,11 +169,15 @@ def main():
parser.add_argument('--tsvfile', default='build_tools/circle_api_scraper_input.txt', parser.add_argument('--tsvfile', default='build_tools/circle_api_scraper_input.txt',
help='File from which to read tsv report') help='File from which to read tsv report')
parser.add_argument('--esdocument', help='ElasticSearch/OpenSearch document URL to upload report into') parser.add_argument('--esdocument', help='ElasticSearch/OpenSearch document URL to upload report into')
parser.add_argument('--upload', choices=['opensearch', 'none'], default='opensearch')
args = parser.parse_args() args = parser.parse_args()
logging.debug(f"Arguments: {args}") logging.debug(f"Arguments: {args}")
reports = load_report_from_tsv(args.tsvfile) reports = load_report_from_tsv(args.tsvfile)
push_report_to_opensearch(reports, args.esdocument) if (args.upload == 'opensearch'):
push_report_to_opensearch(reports, args.esdocument)
else:
push_report_to_null(reports)
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(main()) sys.exit(main())

Loading…
Cancel
Save