On Mon, Feb 10, 2025 at 8:01 AM Rasmus Villemoes linux@rasmusvillemoes.dk wrote:
On Fri, Feb 07 2025, Tamir Duberstein tamird@gmail.com wrote:
[...]
If/when you do re-roll a v3, can you split the defconfig changes off to a separate patch? It's a little annoying to scroll through all those mechanical one-liner diffs to get to the actual changes.
Yep. I'll split them into one for m68k and another for powerpc. Geert, I'll move your Acked-by to the m68k patch.
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1af972a92d06..4834cac1eb8f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2427,6 +2427,23 @@ config ASYNC_RAID6_TEST config TEST_HEXDUMP tristate "Test functions located in the hexdump module at runtime"
+config PRINTF_KUNIT_TEST
tristate "KUnit test printf() family of functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Enable this option to test the printf functions at boot.
Well, not necessarily at boot unless built-in?
KUnit tests run during boot and output the results to the debug log
in TAP format (http://testanything.org/). Only useful for kernel devs
running the KUnit test harness, and not intended for inclusion into a
production build.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
I see that this is copy-pasted from other kunit config items, but not all of them have all this boilerplate, and I don't think it's useful (and, the mentions of "at boot" and "during boot" are actively misleading). Other kunit config items are shorter and more to the point, e.g.
Enable this to turn on 'list_sort()' function test. This test is executed only once during system boot (so affects only boot time), or at module load time. If unsure, say N.
Very good point. Truthfully this boilerplate is a result of checkpatch.pl nagging about this paragraph being a certain length. I'll drop it and just write "Enable this option to test the printf functions at runtime.".
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o diff --git a/lib/test_printf.c b/lib/printf_kunit.c similarity index 89% rename from lib/test_printf.c rename to lib/printf_kunit.c index 59dbe4f9a4cb..fe6f4df92dd5 100644 --- a/lib/test_printf.c +++ b/lib/printf_kunit.c @@ -5,7 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h> +#include <kunit/test.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/printk.h> @@ -25,8 +25,6 @@
#include <linux/property.h>
-#include "../tools/testing/selftests/kselftest_module.h"
#define BUF_SIZE 256 #define PAD_SIZE 16 #define FILL_CHAR '$' @@ -37,72 +35,71 @@ block \ __diag_pop();
-KSTM_MODULE_GLOBALS(); +static char *test_buffer; +static char *alloced_buffer;
+static struct kunit *kunittest;
-static char *test_buffer __initdata; -static char *alloced_buffer __initdata; +#define tc_fail(fmt, ...) \
KUNIT_FAIL(kunittest, fmt, ##__VA_ARGS__)
-static int __printf(4, 0) __init +static void __printf(4, 0) do_test(int bufsize, const char *expect, int elen, const char *fmt, va_list ap) { va_list aq; int ret, written;
total_tests++;
memset(alloced_buffer, FILL_CHAR, BUF_SIZE + 2*PAD_SIZE); va_copy(aq, ap); ret = vsnprintf(test_buffer, bufsize, fmt, aq); va_end(aq); if (ret != elen) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
tc_fail("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n", bufsize, fmt, ret, elen);
return 1;
return; } if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt);
return 1;
tc_fail("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt);
return; } if (!bufsize) { if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) {
pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
tc_fail("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n", fmt);
return 1; }
return 0;
return; } written = min(bufsize-1, elen); if (test_buffer[written]) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
tc_fail("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n", bufsize, fmt);
return 1;
return; } if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
tc_fail("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n", bufsize, fmt);
return 1;
return; } if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
return 1;
tc_fail("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
return; } if (memcmp(test_buffer, expect, written)) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
tc_fail("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n", bufsize, fmt, test_buffer, written, expect);
return 1;
return; }
return 0;
}
-static void __printf(3, 4) __init +static void __printf(3, 4) __test(const char *expect, int elen, const char *fmt, ...) { va_list ap; @@ -110,9 +107,8 @@ __test(const char *expect, int elen, const char *fmt, ...) char *p;
if (elen >= BUF_SIZE) {
pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n",
elen, fmt);
failed_tests++;
tc_fail("error in test suite: expected output length %d too long. Format was '%s'.\n",
elen, fmt); return; }
@@ -124,19 +120,17 @@ __test(const char *expect, int elen, const char *fmt, ...) * enough and 0), and then we also test that kvasprintf would * be able to print it as expected. */
failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
do_test(BUF_SIZE, expect, elen, fmt, ap); rand = get_random_u32_inclusive(1, elen + 1); /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
failed_tests += do_test(rand, expect, elen, fmt, ap);
failed_tests += do_test(0, expect, elen, fmt, ap);
do_test(rand, expect, elen, fmt, ap);
do_test(0, expect, elen, fmt, ap); p = kvasprintf(GFP_KERNEL, fmt, ap); if (p) {
total_tests++; if (memcmp(p, expect, elen+1)) {
pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
tc_fail("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n", fmt, p, expect);
failed_tests++; } kfree(p); }
So reading through this, is there really any reason to drop those existing total_tests and failed_tests tallies? Since you do need to keep the control flow the same, leaving the return types and "return 1"/"return 0" and their uses in updating failed_tests would also reduce the patch size quite significantly.
So they no longer come from some KSTM_MODULE_GLOBALS(), and thus need to be explicitly declared in this module, but that's exactly how it was originally until someone decided to lift that from the the printf suite to kstm.
Yes, they'd need to be explicitly initialized to 0 during kunit_init since they are no longer use-once __initdata variable, and some kunit_exit logic would need to "manually" report them.
If I understand your concern correctly, you only really care about `total_tests`, right? I think it's slightly unidiomatic to count tests outside the framework this way but it's not a hill I'll die on.
Just to elaborate on why I think this unidiomatic: the KUnit test runner script that produces human-readable output doesn't emit logs unless something fails. In the success case, you'd get (before the next patch that breaks this into many tests):
[09:34:15] ==================== printf (1 subtest) ==================== [09:34:15] [PASSED] printf_test [09:34:15] ===================== [PASSED] printf ====================== [09:34:15] ============================================================ [09:34:15] Testing complete. Ran 1 tests: passed: 1
but the raw output does contain the tally:
KTAP version 1 1..1 KTAP version 1 # Subtest: printf # module: printf_kunit 1..1 ok 1 printf_test # printf: ran 448 tests ok 1 printf
So assuming you're OK with this compromise, I'll go ahead and spin v3.
Thanks for the review!