update buckifier for new json format and updated macros (#9643)

Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/9643

Reviewed By: jay-zhuang

Differential Revision: D34543573

fbshipit-source-id: fec0c81ece37ca5eb958cef13ac9657cca6338b7
main
Patrick Somaru 3 years ago committed by Facebook GitHub Bot
parent 33742c2a9f
commit af6cb50bc4
  1. 7677
      TARGETS
  2. 28
      buckifier/buckify_rocksdb.py
  3. 4
      buckifier/targets_builder.py
  4. 2
      buckifier/targets_cfg.py

7677
TARGETS

File diff suppressed because it is too large Load Diff

@ -210,14 +210,34 @@ def generate_targets(repo_path, deps_map):
with open(f"{repo_path}/buckifier/bench.json") as json_file: with open(f"{repo_path}/buckifier/bench.json") as json_file:
fast_fancy_bench_config_list = json.load(json_file) fast_fancy_bench_config_list = json.load(json_file)
for config_dict in fast_fancy_bench_config_list: for config_dict in fast_fancy_bench_config_list:
TARGETS.add_fancy_bench_config(config_dict['name'],config_dict['benchmarks'], False, config_dict['expected_runtime']) clean_benchmarks = {}
benchmarks = config_dict['benchmarks']
for binary, benchmark_dict in benchmarks.items():
clean_benchmarks[binary] = {}
for benchmark, overloaded_metric_list in benchmark_dict.items():
clean_benchmarks[binary][benchmark] = []
for metric in overloaded_metric_list:
if not isinstance(metric, dict):
clean_benchmarks[binary][benchmark].append(metric)
TARGETS.add_fancy_bench_config(config_dict['name'], clean_benchmarks, False, config_dict['expected_runtime_one_iter'], config_dict['sl_iterations'], config_dict['regression_threshold'])
with open(f"{repo_path}/buckifier/bench-slow.json") as json_file: with open(f"{repo_path}/buckifier/bench-slow.json") as json_file:
slow_fancy_bench_config_list = json.load(json_file) slow_fancy_bench_config_list = json.load(json_file)
for config_dict in slow_fancy_bench_config_list: for config_dict in slow_fancy_bench_config_list:
TARGETS.add_fancy_bench_config(config_dict['name']+"_slow",config_dict['benchmarks'], True, config_dict['expected_runtime']) clean_benchmarks = {}
benchmarks = config_dict['benchmarks']
except (FileNotFoundError, KeyError): for binary, benchmark_dict in benchmarks.items():
clean_benchmarks[binary] = {}
for benchmark, overloaded_metric_list in benchmark_dict.items():
clean_benchmarks[binary][benchmark] = []
for metric in overloaded_metric_list:
if not isinstance(metric, dict):
clean_benchmarks[binary][benchmark].append(metric)
for config_dict in slow_fancy_bench_config_list:
TARGETS.add_fancy_bench_config(config_dict['name']+"_slow", clean_benchmarks, True, config_dict['expected_runtime_one_iter'], config_dict['sl_iterations'], config_dict['regression_threshold'])
# it is better servicelab experiments break
# than rocksdb github ci
except Exception:
pass pass
TARGETS.add_test_header() TARGETS.add_test_header()

@ -92,12 +92,14 @@ add_c_test_wrapper()
# will not be included. # will not be included.
""") """)
def add_fancy_bench_config(self, name, bench_config, slow, expected_runtime): def add_fancy_bench_config(self, name, bench_config, slow, expected_runtime, sl_iterations, regression_threshold):
self.targets_file.write(targets_cfg.fancy_bench_template.format( self.targets_file.write(targets_cfg.fancy_bench_template.format(
name=name, name=name,
bench_config=pprint.pformat(bench_config), bench_config=pprint.pformat(bench_config),
slow=slow, slow=slow,
expected_runtime=expected_runtime, expected_runtime=expected_runtime,
sl_iterations=sl_iterations,
regression_threshold=regression_threshold
).encode("utf-8")) ).encode("utf-8"))
def register_test(self, def register_test(self,

@ -41,6 +41,6 @@ cpp_unittest_wrapper(name="{test_name}",
""" """
fancy_bench_template = """ fancy_bench_template = """
fancy_bench_wrapper(suite_name="{name}", binary_to_bench_to_metric_list_map={bench_config}, slow={slow}, expected_runtime={expected_runtime}) fancy_bench_wrapper(suite_name="{name}", binary_to_bench_to_metric_list_map={bench_config}, slow={slow}, expected_runtime={expected_runtime}, sl_iterations={sl_iterations}, regression_threshold={regression_threshold})
""" """

Loading…
Cancel
Save