From: Ian Rogers Date: Fri, 10 Jan 2025 04:57:35 +0000 (-0800) Subject: perf test: Add a runs-per-test flag X-Git-Tag: v6.14-rc1~29^2~16 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=1c0d9816e9cb9548c74e04971300ec9cecf2c0d7;p=linux-block.git perf test: Add a runs-per-test flag To detect flakes it is useful to run tests more than once. Add a runs-per-test flag that will run each test multiple times. Example output: ``` $ perf test -r 3 lbr -v 122: perf record LBR tests : Ok 122: perf record LBR tests : Ok 122: perf record LBR tests : Ok ``` Update the documentation for the runs-per-test option. Signed-off-by: Ian Rogers Reviewed-by: Namhyung Kim Cc: James Clark Link: https://lore.kernel.org/r/20250110045736.598281-5-irogers@google.com Signed-off-by: Namhyung Kim --- diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt index 2e40869b64de..85f868c324ff 100644 --- a/tools/perf/Documentation/perf-test.txt +++ b/tools/perf/Documentation/perf-test.txt @@ -37,6 +37,11 @@ OPTIONS tests are run sequentially, but other tests are run in parallel to speed execution. +-r:: +--runs-per-test:: + Run each test the given number of times, by default once. This + option can be useful to determine if a test is flaky. + -F:: --dont-fork:: Do not fork child for each test, run all tests within single process, this diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index c6071c4db741..14d30a5053be 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -42,6 +42,8 @@ static bool dont_fork; /* Fork the tests in parallel and wait for their completion. */ static bool sequential; +/* Number of times each test is run. */ +static unsigned int runs_per_test = 1; const char *dso_to_test; const char *test_objdump_path = "objdump"; @@ -485,7 +487,7 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[], len = strlen(test_description(*t, i)); if (width < len) width = len; - num_tests++; + num_tests += runs_per_test; } } child_tests = calloc(num_tests, sizeof(*child_tests)); @@ -549,16 +551,18 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[], continue; } - test_suite__for_each_test_case(*t, curr_test_case) { - if (!perf_test__matches(test_description(*t, curr_test_case), - curr_suite, argc, argv)) - continue; - - err = start_test(*t, curr_suite, curr_test_case, - &child_tests[child_test_num++], - width, pass); - if (err) - goto err_out; + for (unsigned int run = 0; run < runs_per_test; run++) { + test_suite__for_each_test_case(*t, curr_test_case) { + if (!perf_test__matches(test_description(*t, curr_test_case), + curr_suite, argc, argv)) + continue; + + err = start_test(*t, curr_suite, curr_test_case, + &child_tests[child_test_num++], + width, pass); + if (err) + goto err_out; + } } } if (!sequential) { @@ -698,6 +702,8 @@ int cmd_test(int argc, const char **argv) "Do not fork for testcase"), OPT_BOOLEAN('S', "sequential", &sequential, "Run the tests one after another rather than in parallel"), + OPT_UINTEGER('r', "runs-per-test", &runs_per_test, + "Run each test the given number of times, default 1"), OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."), OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"), OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),