Skip to content

Commit af42634

Browse files
committed
tests/run-multitests.py: Support outputting test metrics.
If a multitest calls `multitest.output_metric(...)` then that output will be collected separately, not considered as part of the test verification output, and instead be printed at the end. This is useful for tests that want to output performance/timing metrics that may change from one run to the next. Signed-off-by: Damien George <damien@micropython.org>
1 parent 31e7a05 commit af42634

File tree

1 file changed

+16
-3
lines changed

1 file changed

+16
-3
lines changed

tests/run-multitests.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,9 @@ def get_network_ip():
9393
@staticmethod
9494
def expect_reboot(resume, delay_ms=0):
9595
print("WAIT_FOR_REBOOT", resume, delay_ms)
96+
@staticmethod
97+
def output_metric(data):
98+
print("OUTPUT_METRIC", data)
9699
97100
{}
98101
@@ -312,6 +315,7 @@ def run_test_on_instances(test_file, num_instances, instances):
312315
skip = False
313316
injected_globals = ""
314317
output = [[] for _ in range(num_instances)]
318+
output_metrics = []
315319

316320
# If the test calls get_network_ip() then inject HOST_IP so that devices can know
317321
# the IP address of the host. Do this lazily to not require a TCP/IP connection
@@ -400,6 +404,8 @@ def run_test_on_instances(test_file, num_instances, instances):
400404
for instance2 in instances:
401405
if instance2 is not instance:
402406
instance2.write(bytes(out, "ascii") + b"\r\n")
407+
elif out.startswith("OUTPUT_METRIC "):
408+
output_metrics.append(out.split(" ", 1)[1])
403409
else:
404410
output[idx].append(out)
405411
if err is not None:
@@ -421,7 +427,7 @@ def run_test_on_instances(test_file, num_instances, instances):
421427
output_str += "--- instance{} ---\n".format(idx)
422428
output_str += "\n".join(lines) + "\n"
423429

424-
return error, skip, output_str
430+
return error, skip, output_str, output_metrics
425431

426432

427433
def wait_for_reboot(instance, extra_timeout_ms=0):
@@ -481,7 +487,9 @@ def run_tests(test_files, instances_truth, instances_test):
481487
sys.stdout.flush()
482488

483489
# Run test on test instances
484-
error, skip, output_test = run_test_on_instances(test_file, num_instances, instances_test)
490+
error, skip, output_test, output_metrics = run_test_on_instances(
491+
test_file, num_instances, instances_test
492+
)
485493

486494
if not skip:
487495
# Check if truth exists in a file, and read it in
@@ -491,7 +499,7 @@ def run_tests(test_files, instances_truth, instances_test):
491499
output_truth = f.read()
492500
else:
493501
# Run test on truth instances to get expected output
494-
_, _, output_truth = run_test_on_instances(
502+
_, _, output_truth, _ = run_test_on_instances(
495503
test_file, num_instances, instances_truth
496504
)
497505

@@ -520,6 +528,11 @@ def run_tests(test_files, instances_truth, instances_test):
520528
print("### DIFF ###")
521529
print_diff(output_truth, output_test)
522530

531+
# Print test output metrics, if there are any.
532+
if output_metrics:
533+
for metric in output_metrics:
534+
print(test_file, ": ", metric, sep="")
535+
523536
if cmd_args.show_output:
524537
print()
525538

0 commit comments

Comments
 (0)