Currently the seccomp benchmark selftest produces non-standard output, meaning that while it makes a number of checks of the performance it observes this has to be parsed by humans. This means that automated systems running this suite of tests are almost certainly ignoring the results which isn't ideal for spotting problems. Let's rework things so that each check that the program does is reported as a test result to the framework.
Signed-off-by: Mark Brown broonie@kernel.org --- Mark Brown (2): kselftest/seccomp: Use kselftest output functions for benchmark kselftest/seccomp: Report each expectation we assert as a KTAP test
.../testing/selftests/seccomp/seccomp_benchmark.c | 105 +++++++++++++-------- 1 file changed, 65 insertions(+), 40 deletions(-) --- base-commit: 2cc14f52aeb78ce3f29677c2de1f06c0e91471ab change-id: 20231219-b4-kselftest-seccomp-benchmark-ktap-357603823708
Best regards,
In preparation for trying to output the test results themselves in TAP format rework all the prints in the benchmark to use the kselftest output functions. The uses of system() all produce single line output so we can avoid having to deal with fully managing the child process and continue to use system() by simply printing an empty message before we invoke system(). We also leave one printf() used to complete a line of output in place.
Signed-off-by: Mark Brown broonie@kernel.org --- .../testing/selftests/seccomp/seccomp_benchmark.c | 45 ++++++++++++---------- 1 file changed, 24 insertions(+), 21 deletions(-)
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c index 5b5c9d558dee..93168dd2c1e3 100644 --- a/tools/testing/selftests/seccomp/seccomp_benchmark.c +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c @@ -38,10 +38,10 @@ unsigned long long timing(clockid_t clk_id, unsigned long long samples) i *= 1000000000ULL; i += finish.tv_nsec - start.tv_nsec;
- printf("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n", - finish.tv_sec, finish.tv_nsec, - start.tv_sec, start.tv_nsec, - i, (double)i / 1000000000.0); + ksft_print_msg("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n", + finish.tv_sec, finish.tv_nsec, + start.tv_sec, start.tv_nsec, + i, (double)i / 1000000000.0);
return i; } @@ -53,7 +53,7 @@ unsigned long long calibrate(void) pid_t pid, ret; int seconds = 15;
- printf("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds); + ksft_print_msg("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
samples = 0; pid = getpid(); @@ -102,14 +102,14 @@ long compare(const char *name_one, const char *name_eval, const char *name_two, { bool good;
- printf("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two, - (long long)one, name_eval, (long long)two); + ksft_print_msg("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two, + (long long)one, name_eval, (long long)two); if (one > INT_MAX) { - printf("Miscalculation! Measurement went negative: %lld\n", (long long)one); + ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)one); return 1; } if (two > INT_MAX) { - printf("Miscalculation! Measurement went negative: %lld\n", (long long)two); + ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)two); return 1; }
@@ -145,12 +145,15 @@ int main(int argc, char *argv[])
setbuf(stdout, NULL);
- printf("Running on:\n"); + ksft_print_msg("Running on:\n"); + ksft_print_msg(""); system("uname -a");
- printf("Current BPF sysctl settings:\n"); + ksft_print_msg("Current BPF sysctl settings:\n"); /* Avoid using "sysctl" which may not be installed. */ + ksft_print_msg(""); system("grep -H . /proc/sys/net/core/bpf_jit_enable"); + ksft_print_msg(""); system("grep -H . /proc/sys/net/core/bpf_jit_harden");
if (argc > 1) @@ -158,11 +161,11 @@ int main(int argc, char *argv[]) else samples = calibrate();
- printf("Benchmarking %llu syscalls...\n", samples); + ksft_print_msg("Benchmarking %llu syscalls...\n", samples);
/* Native call */ native = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples; - printf("getpid native: %llu ns\n", native); + ksft_print_msg("getpid native: %llu ns\n", native);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); assert(ret == 0); @@ -172,33 +175,33 @@ int main(int argc, char *argv[]) assert(ret == 0);
bitmap1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples; - printf("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1); + ksft_print_msg("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
/* Second filter resulting in a bitmap */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog); assert(ret == 0);
bitmap2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples; - printf("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2); + ksft_print_msg("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
/* Third filter, can no longer be converted to bitmap */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); assert(ret == 0);
filter1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples; - printf("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1); + ksft_print_msg("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
/* Fourth filter, can not be converted to bitmap because of filter 3 */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog); assert(ret == 0);
filter2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples; - printf("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2); + ksft_print_msg("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
/* Estimations */ #define ESTIMATE(fmt, var, what) do { \ var = (what); \ - printf("Estimated " fmt ": %llu ns\n", var); \ + ksft_print_msg("Estimated " fmt ": %llu ns\n", var); \ if (var > INT_MAX) \ goto more_samples; \ } while (0) @@ -218,7 +221,7 @@ int main(int argc, char *argv[]) ESTIMATE("seccomp per-filter overhead (filters / 4)", per_filter2, (filter2 - native - entry) / 4);
- printf("Expectations:\n"); + ksft_print_msg("Expectations:\n"); ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1); bits = compare("native", "≤", "1 filter", native, le, filter1); if (bits) @@ -230,7 +233,7 @@ int main(int argc, char *argv[]) bits = compare("1 bitmapped", "≈", "2 bitmapped", bitmap1 - native, approx, bitmap2 - native); if (bits) { - printf("Skipping constant action bitmap expectations: they appear unsupported.\n"); + ksft_print_msg("Skipping constant action bitmap expectations: they appear unsupported.\n"); goto out; }
@@ -242,7 +245,7 @@ int main(int argc, char *argv[]) goto out;
more_samples: - printf("Saw unexpected benchmark result. Try running again with more samples?\n"); + ksft_print_msg("Saw unexpected benchmark result. Try running again with more samples?\n"); out: return 0; }
The seccomp benchmark test makes a number of checks on the performance it measures and logs them to the output but does so in a custom format which none of the automated test runners understand meaning that the chances that anyone is paying attention are slim. Let's additionally log each result in KTAP format so that automated systems parsing the test output will see each comparison as a test case. The original logs are left in place since they provide the actual numbers for analysis.
As part of this rework the flow for the main program so that when we skip tests we still log all the tests we skip, this is because the standard KTAP headers and footers include counts of the number of expected and run tests. --- .../testing/selftests/seccomp/seccomp_benchmark.c | 62 +++++++++++++++------- 1 file changed, 42 insertions(+), 20 deletions(-)
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c index 93168dd2c1e3..436a527b8235 100644 --- a/tools/testing/selftests/seccomp/seccomp_benchmark.c +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c @@ -98,24 +98,36 @@ bool le(int i_one, int i_two) }
long compare(const char *name_one, const char *name_eval, const char *name_two, - unsigned long long one, bool (*eval)(int, int), unsigned long long two) + unsigned long long one, bool (*eval)(int, int), unsigned long long two, + bool skip) { bool good;
+ if (skip) { + ksft_test_result_skip("%s %s %s\n", name_one, name_eval, + name_two); + return 0; + } + ksft_print_msg("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two, (long long)one, name_eval, (long long)two); if (one > INT_MAX) { ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)one); - return 1; + good = false; + goto out; } if (two > INT_MAX) { ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)two); - return 1; + good = false; + goto out; }
good = eval(one, two); printf("%s\n", good ? "✔️" : "❌");
+out: + ksft_test_result(good, "%s %s %s\n", name_one, name_eval, name_two); + return good ? 0 : 1; }
@@ -142,9 +154,13 @@ int main(int argc, char *argv[]) unsigned long long samples, calc; unsigned long long native, filter1, filter2, bitmap1, bitmap2; unsigned long long entry, per_filter1, per_filter2; + bool skip = false;
setbuf(stdout, NULL);
+ ksft_print_header(); + ksft_set_plan(7); + ksft_print_msg("Running on:\n"); ksft_print_msg(""); system("uname -a"); @@ -202,8 +218,10 @@ int main(int argc, char *argv[]) #define ESTIMATE(fmt, var, what) do { \ var = (what); \ ksft_print_msg("Estimated " fmt ": %llu ns\n", var); \ - if (var > INT_MAX) \ - goto more_samples; \ + if (var > INT_MAX) { \ + skip = true; \ + ret |= 1; \ + } \ } while (0)
ESTIMATE("total seccomp overhead for 1 bitmapped filter", calc, @@ -222,30 +240,34 @@ int main(int argc, char *argv[]) (filter2 - native - entry) / 4);
ksft_print_msg("Expectations:\n"); - ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1); - bits = compare("native", "≤", "1 filter", native, le, filter1); + ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1, + skip); + bits = compare("native", "≤", "1 filter", native, le, filter1, + skip); if (bits) - goto more_samples; + skip = true;
ret |= compare("per-filter (last 2 diff)", "≈", "per-filter (filters / 4)", - per_filter1, approx, per_filter2); + per_filter1, approx, per_filter2, skip);
bits = compare("1 bitmapped", "≈", "2 bitmapped", - bitmap1 - native, approx, bitmap2 - native); + bitmap1 - native, approx, bitmap2 - native, skip); if (bits) { ksft_print_msg("Skipping constant action bitmap expectations: they appear unsupported.\n"); - goto out; + skip = true; }
- ret |= compare("entry", "≈", "1 bitmapped", entry, approx, bitmap1 - native); - ret |= compare("entry", "≈", "2 bitmapped", entry, approx, bitmap2 - native); + ret |= compare("entry", "≈", "1 bitmapped", entry, approx, + bitmap1 - native, skip); + ret |= compare("entry", "≈", "2 bitmapped", entry, approx, + bitmap2 - native, skip); ret |= compare("native + entry + (per filter * 4)", "≈", "4 filters total", - entry + (per_filter1 * 4) + native, approx, filter2); - if (ret == 0) - goto out; + entry + (per_filter1 * 4) + native, approx, filter2, + skip);
-more_samples: - ksft_print_msg("Saw unexpected benchmark result. Try running again with more samples?\n"); -out: - return 0; + if (ret) { + ksft_print_msg("Saw unexpected benchmark result. Try running again with more samples?\n"); + } + + ksft_finished(); }
On 2023-12-19 21:21, Mark Brown wrote:
Currently the seccomp benchmark selftest produces non-standard output, meaning that while it makes a number of checks of the performance it observes this has to be parsed by humans. This means that automated systems running this suite of tests are almost certainly ignoring the results which isn't ideal for spotting problems. Let's rework things so that each check that the program does is reported as a test result to the framework.
Signed-off-by: Mark Brown broonie@kernel.org
Tested-by: Anders Roxell anders.roxell@linaro.org
Tested these patches on today tag and it works as expected.
seccomp_seccomp_benchmark_native_1_bitmap pass seccomp_seccomp_benchmark_native_1_filter pass seccomp_seccomp_benchmark_per-filter_last_2_diff_per-filter_filters_4 fail seccomp_seccomp_benchmark_1_bitmapped_2_bitmapped pass seccomp_seccomp_benchmark_entry_1_bitmapped pass seccomp_seccomp_benchmark_entry_2_bitmapped fail seccomp_seccomp_benchmark_native_entry_per_filter_4_4_filters_total fail
Now with this change it shows up in the results.
Cheers, Anders
linux-kselftest-mirror@lists.linaro.org