Skip to content

Commit

Permalink
add TimeBenchmarks evaluation script
Browse files Browse the repository at this point in the history
  • Loading branch information
m-atalla committed Jun 22, 2024
1 parent 02af178 commit 768095c
Showing 1 changed file with 77 additions and 0 deletions.
77 changes: 77 additions & 0 deletions src/test-suite/TimeBenchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Copyright (c) 2024, The Contributors of IR2Vec.
#
# Part of the IR2Vec project. This software is available under the BSD 4-Clause
# License. Please see LICENSE file in the top-level directory for more details.
#
import csv
import time
import argparse
import subprocess
from pathlib import Path


SEED_VERSION = "llvm17"
# IR2Vec binary relative to this script: "../../build/bin/ir2vec"
IR2VEC_PATH = (Path(__file__).parents[2] / "build/bin/ir2vec").resolve()


def evaluate_benchmark(benchmark_path: Path, mode: str, repeat: int):
command = [IR2VEC_PATH.resolve(),
f"-{mode}",
"-level", "p",
"-o", "/dev/null",
benchmark_path.resolve()]
trials_time = []
for _ in range(repeat):
start = time.perf_counter_ns()

subprocess.run(command)

# duration with ns to ms convertion
duration = (time.perf_counter_ns() - start) / 1_000_000
trials_time.append(duration)
avg_time = sum(trials_time) / repeat
return round(avg_time, 2)


if __name__ == "__main__":
default_repeat = 5
default_ir_path = Path(__file__).parent / \
f"PE-benchmarks-llfiles-{SEED_VERSION}"

parser = argparse.ArgumentParser(
prog="TimeBenchmarks",
description="Performance evaluation script of `PE-benchmarks`")

parser.add_argument("--benchmarks_path", type=str,
default=(default_ir_path).resolve(),
help="Path to benchmarks \".ll\" files. "
f"(default={default_ir_path})")

parser.add_argument("--repeat", type=int, default=default_repeat,
help="Number of repeats of a single benchmark "
"evaluation, result reported is the average runtime. "
f"(default={default_repeat})")

parser.add_argument("-o", "--output_file", type=str,
default=f"TimeTaken-{int(time.time())}.csv",
help="Output file name, generated relative to where "
"this script was executed. "
"(default=\"TimeTaken-<CURRENT_TIME>.csv\")")
args = parser.parse_args()

benchmarks_ll_path = Path(args.benchmarks_path)

print("Saving results to: ", Path(args.output_file).resolve())
with open(args.output_file, "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["Benchmarks", "FA", "SYM"]) # header
rows = []
for ll_file in benchmarks_ll_path.glob("*.ll"):
benchmark_name = ll_file.stem
fa_time = evaluate_benchmark(ll_file, "fa", args.repeat)
sym_time = evaluate_benchmark(ll_file, "sym", args.repeat)
rows.append([benchmark_name, fa_time, sym_time])
# sort rows using the average flow-aware (FA) time
rows = sorted(rows, key=lambda x: x[1])
writer.writerows(rows)

0 comments on commit 768095c

Please sign in to comment.