This adds the convertion of the runtime tests of check_*_overflow fuctions, from `lib/test_overflow.c`to KUnit tests.
The log similar to the one seen in dmesg running test_overflow can be seen in `test.log`.
Signed-off-by: Vitor Massaru Iha vitor@massaru.org --- lib/Kconfig.debug | 17 ++ lib/Makefile | 1 + lib/kunit_overflow_test.c | 590 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+) create mode 100644 lib/kunit_overflow_test.c
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1f4ab7a2bdee..72fcfe1f24a4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2075,6 +2075,23 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config OVERFLOW_KUNIT_TEST + tristate "KUnit test for overflow" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + This builds the overflow KUnit tests. + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running KUnit test harness and are not for inclusion into a production + build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config LIST_KUNIT_TEST tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Makefile b/lib/Makefile index 685aee60de1d..a3290adc0019 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -309,3 +309,4 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# KUnit tests obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o +obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += kunit_overflow_test.o diff --git a/lib/kunit_overflow_test.c b/lib/kunit_overflow_test.c new file mode 100644 index 000000000000..c3eb8f0d3d50 --- /dev/null +++ b/lib/kunit_overflow_test.c @@ -0,0 +1,590 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code is the conversion of the overflow test in runtime to KUnit tests. + */ + +/* + * Test cases for arithmetic overflow checks. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <kunit/test.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/overflow.h> +#include <linux/vmalloc.h> + +#define DEFINE_TEST_ARRAY(t) \ + static const struct test_ ## t { \ + t a, b; \ + t sum, diff, prod; \ + bool s_of, d_of, p_of; \ + } t ## _tests[] + +DEFINE_TEST_ARRAY(u8) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U8_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U8_MAX, U8_MAX, 1, 0, false, true, false}, + {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false}, + {1, U8_MAX, 0, 2, U8_MAX, true, true, false}, + {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false}, + {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true}, + + {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true}, + {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true}, + + {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false}, + {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true}, + {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false}, + {1U << 7, 1U << 7, 0, 0, 0, true, false, true}, + + {48, 32, 80, 16, 0, false, false, true}, + {128, 128, 0, 0, 0, true, false, true}, + {123, 234, 101, 145, 110, true, true, true}, +}; + +DEFINE_TEST_ARRAY(u16) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U16_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U16_MAX, U16_MAX, 1, 0, false, true, false}, + {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false}, + {1, U16_MAX, 0, 2, U16_MAX, true, true, false}, + {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false}, + {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true}, + + {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true}, + {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true}, + + {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false}, + {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true}, + {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false}, + {1U << 15, 1U << 15, 0, 0, 0, true, false, true}, + + {123, 234, 357, 65425, 28782, false, true, false}, + {1234, 2345, 3579, 64425, 10146, false, true, true}, +}; + +DEFINE_TEST_ARRAY(u32) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U32_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U32_MAX, U32_MAX, 1, 0, false, true, false}, + {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false}, + {1, U32_MAX, 0, 2, U32_MAX, true, true, false}, + {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false}, + {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true}, + + {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true}, + {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true}, + + {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false}, + {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true}, + {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false}, + {1U << 31, 1U << 31, 0, 0, 0, true, false, true}, + + {-2U, 1U, -1U, -3U, -2U, false, false, false}, + {-4U, 5U, 1U, -9U, -20U, true, false, true}, +}; + +DEFINE_TEST_ARRAY(u64) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U64_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U64_MAX, U64_MAX, 1, 0, false, true, false}, + {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false}, + {1, U64_MAX, 0, 2, U64_MAX, true, true, false}, + {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false}, + {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true}, + + {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true}, + {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true}, + + {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false}, + {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true}, + {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false}, + {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true}, + {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */, + 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL, + false, true, false}, + {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, +}; + +DEFINE_TEST_ARRAY(s8) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false}, + {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false}, + {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false}, + {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false}, + + {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true}, + {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true}, + {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false}, + {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false}, + {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false}, + {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false}, + + {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false}, + {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false}, + {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false}, + {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false}, + + {S8_MIN, S8_MIN, 0, 0, 0, true, false, true}, + {S8_MAX, S8_MAX, -2, 0, 1, true, false, true}, + + {-4, -32, -36, 28, -128, false, false, true}, + {-4, 32, 28, -36, -128, false, false, false}, +}; + +DEFINE_TEST_ARRAY(s16) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false}, + {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false}, + {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false}, + {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false}, + + {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true}, + {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true}, + {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false}, + {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false}, + {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false}, + {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false}, + + {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false}, + {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false}, + {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false}, + {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false}, + + {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, + {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, +}; + +DEFINE_TEST_ARRAY(s32) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false}, + {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false}, + {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false}, + {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false}, + + {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true}, + {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true}, + {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false}, + {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false}, + {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false}, + {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false}, + + {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false}, + {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false}, + {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false}, + {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false}, + + {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, + {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, +}; + +DEFINE_TEST_ARRAY(s64) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false}, + {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false}, + {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false}, + {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false}, + + {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true}, + {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true}, + {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false}, + {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false}, + {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false}, + {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false}, + + {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false}, + {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false}, + {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false}, + {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false}, + + {S64_MIN, S64_MIN, 0, 0, 0, true, false, true}, + {S64_MAX, S64_MAX, -2, 0, 1, true, false, true}, + + {-1, -1, -2, 0, 1, false, false, false}, + {-1, -128, -129, 127, 128, false, false, false}, + {-128, -1, -129, -127, 128, false, false, false}, + {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, +}; + +#define check_one_op(test, t, fmt, op, sym, a, b, r, of) do { \ + t _r; \ + bool _of; \ + \ + _of = check_ ## op ## _overflow(a, b, &_r); \ + if (_of != of) { \ + KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt \ + " to%s overflow (type %s)\n", \ + a, b, of ? "" : " not", #t); \ + } \ + if (_r != r) { \ + KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt" == " \ + fmt", got "fmt" (type %s)\n", \ + a, b, r, _r, #t); \ + } \ +} while (0) + +#define DEFINE_TEST_FUNC(test, t, fmt) \ +static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ +{ \ + check_one_op(test, t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ + check_one_op(test, t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ + check_one_op(test, t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ + check_one_op(test, t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ + check_one_op(test, t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ +} \ + \ +static void test_ ## t ## _overflow(struct kunit *test) \ +{ \ + unsigned i; \ + \ + kunit_warn(test, "%-3s: %zu arithmetic tests\n", #t, \ + ARRAY_SIZE(t ## _tests)); \ + for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ + do_test_ ## t(test, &t ## _tests[i]); \ +} + +DEFINE_TEST_FUNC(test, u8, "%d"); +DEFINE_TEST_FUNC(test, s8, "%d"); +DEFINE_TEST_FUNC(test, u16, "%d"); +DEFINE_TEST_FUNC(test, s16, "%d"); +DEFINE_TEST_FUNC(test, u32, "%u"); +DEFINE_TEST_FUNC(test, s32, "%d"); +#if BITS_PER_LONG == 64 +DEFINE_TEST_FUNC(test, u64, "%llu"); +DEFINE_TEST_FUNC(test, s64, "%lld"); +#endif + +static void overflow_calculation_test(struct kunit *test) +{ + + test_u8_overflow(test); + test_s8_overflow(test); + test_s8_overflow(test); + test_u16_overflow(test); + test_s16_overflow(test); + test_u32_overflow(test); + test_s32_overflow(test); +#if BITS_PER_LONG == 64 + test_u64_overflow(test); + test_s64_overflow(test); +#endif +} + +static void overflow_shift_test(struct kunit *test) +{ +/* Args are: value, shift, type, expected result, overflow expected */ +#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \ + int __failed = 0; \ + typeof(a) __a = (a); \ + typeof(s) __s = (s); \ + t __e = (expect); \ + t __d; \ + bool __of = check_shl_overflow(__a, __s, &__d); \ + if (__of != of) { \ + KUNIT_FAIL(test, "Expected (%s)(%s << %s) to%s overflow\n", \ + #t, #a, #s, of ? "" : " not"); \ + __failed = 1; \ + } else if (!__of && __d != __e) { \ + KUNIT_FAIL(test, "Expected (%s)(%s << %s) == %s\n", \ + #t, #a, #s, #expect); \ + if ((t)-1 < 0) \ + KUNIT_FAIL(test, "got %lld\n", (s64)__d); \ + else \ + KUNIT_FAIL(test, "got %llu\n", (u64)__d); \ + __failed = 1; \ + } \ + if (!__failed) \ + kunit_info(test, "ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \ + of ? "overflow" : #expect); \ + __failed; \ +}) + + /* Sane shifts. */ + TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); + TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); + TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false); + TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false); + TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false); + TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false); + TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false); + TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false); + TEST_ONE_SHIFT(1, 0, int, 1 << 0, false); + TEST_ONE_SHIFT(1, 16, int, 1 << 16, false); + TEST_ONE_SHIFT(1, 30, int, 1 << 30, false); + TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false); + TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false); + TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false); + TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false); + TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false); + TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false); + TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false); + TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false); + TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false); + TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false); + TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false); + TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false); + TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false); + TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false); + TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, + 0xFFFFFFFFULL << 32, false); + + /* Sane shift: start and end with 0, without a too-wide shift. */ + TEST_ONE_SHIFT(0, 7, u8, 0, false); + TEST_ONE_SHIFT(0, 15, u16, 0, false); + TEST_ONE_SHIFT(0, 31, unsigned int, 0, false); + TEST_ONE_SHIFT(0, 31, u32, 0, false); + TEST_ONE_SHIFT(0, 63, u64, 0, false); + + /* Sane shift: start and end with 0, without reaching signed bit. */ + TEST_ONE_SHIFT(0, 6, s8, 0, false); + TEST_ONE_SHIFT(0, 14, s16, 0, false); + TEST_ONE_SHIFT(0, 30, int, 0, false); + TEST_ONE_SHIFT(0, 30, s32, 0, false); + TEST_ONE_SHIFT(0, 62, s64, 0, false); + + /* Overflow: shifted the bit off the end. */ + TEST_ONE_SHIFT(1, 8, u8, 0, true); + TEST_ONE_SHIFT(1, 16, u16, 0, true); + TEST_ONE_SHIFT(1, 32, unsigned int, 0, true); + TEST_ONE_SHIFT(1, 32, u32, 0, true); + TEST_ONE_SHIFT(1, 64, u64, 0, true); + + /* Overflow: shifted into the signed bit. */ + TEST_ONE_SHIFT(1, 7, s8, 0, true); + TEST_ONE_SHIFT(1, 15, s16, 0, true); + TEST_ONE_SHIFT(1, 31, int, 0, true); + TEST_ONE_SHIFT(1, 31, s32, 0, true); + TEST_ONE_SHIFT(1, 63, s64, 0, true); + + /* Overflow: high bit falls off unsigned types. */ + /* 10010110 */ + TEST_ONE_SHIFT(150, 1, u8, 0, true); + /* 1000100010010110 */ + TEST_ONE_SHIFT(34966, 1, u16, 0, true); + /* 10000100000010001000100010010110 */ + TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true); + TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true); + /* 1000001000010000010000000100000010000100000010001000100010010110 */ + TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true); + + /* Overflow: bit shifted into signed bit on signed types. */ + /* 01001011 */ + TEST_ONE_SHIFT(75, 1, s8, 0, true); + /* 0100010001001011 */ + TEST_ONE_SHIFT(17483, 1, s16, 0, true); + /* 01000010000001000100010001001011 */ + TEST_ONE_SHIFT(1107575883, 1, s32, 0, true); + TEST_ONE_SHIFT(1107575883, 1, int, 0, true); + /* 0100000100001000001000000010000001000010000001000100010001001011 */ + TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true); + + /* Overflow: bit shifted past signed bit on signed types. */ + /* 01001011 */ + TEST_ONE_SHIFT(75, 2, s8, 0, true); + /* 0100010001001011 */ + TEST_ONE_SHIFT(17483, 2, s16, 0, true); + /* 01000010000001000100010001001011 */ + TEST_ONE_SHIFT(1107575883, 2, s32, 0, true); + TEST_ONE_SHIFT(1107575883, 2, int, 0, true); + /* 0100000100001000001000000010000001000010000001000100010001001011 */ + TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); + + /* Overflow: values larger than destination type. */ + TEST_ONE_SHIFT(0x100, 0, u8, 0, true); + TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); + TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true); + TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true); + TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true); + TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); + + /* Nonsense: negative initial value. */ + TEST_ONE_SHIFT(-1, 0, s8, 0, true); + TEST_ONE_SHIFT(-1, 0, u8, 0, true); + TEST_ONE_SHIFT(-5, 0, s16, 0, true); + TEST_ONE_SHIFT(-5, 0, u16, 0, true); + TEST_ONE_SHIFT(-10, 0, int, 0, true); + TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true); + TEST_ONE_SHIFT(-100, 0, s32, 0, true); + TEST_ONE_SHIFT(-100, 0, u32, 0, true); + TEST_ONE_SHIFT(-10000, 0, s64, 0, true); + TEST_ONE_SHIFT(-10000, 0, u64, 0, true); + + /* Nonsense: negative shift values. */ + TEST_ONE_SHIFT(0, -5, s8, 0, true); + TEST_ONE_SHIFT(0, -5, u8, 0, true); + TEST_ONE_SHIFT(0, -10, s16, 0, true); + TEST_ONE_SHIFT(0, -10, u16, 0, true); + TEST_ONE_SHIFT(0, -15, int, 0, true); + TEST_ONE_SHIFT(0, -15, unsigned int, 0, true); + TEST_ONE_SHIFT(0, -20, s32, 0, true); + TEST_ONE_SHIFT(0, -20, u32, 0, true); + TEST_ONE_SHIFT(0, -30, s64, 0, true); + TEST_ONE_SHIFT(0, -30, u64, 0, true); + + /* Overflow: shifted at or beyond entire type's bit width. */ + TEST_ONE_SHIFT(0, 8, u8, 0, true); + TEST_ONE_SHIFT(0, 9, u8, 0, true); + TEST_ONE_SHIFT(0, 8, s8, 0, true); + TEST_ONE_SHIFT(0, 9, s8, 0, true); + TEST_ONE_SHIFT(0, 16, u16, 0, true); + TEST_ONE_SHIFT(0, 17, u16, 0, true); + TEST_ONE_SHIFT(0, 16, s16, 0, true); + TEST_ONE_SHIFT(0, 17, s16, 0, true); + TEST_ONE_SHIFT(0, 32, u32, 0, true); + TEST_ONE_SHIFT(0, 33, u32, 0, true); + TEST_ONE_SHIFT(0, 32, int, 0, true); + TEST_ONE_SHIFT(0, 33, int, 0, true); + TEST_ONE_SHIFT(0, 32, s32, 0, true); + TEST_ONE_SHIFT(0, 33, s32, 0, true); + TEST_ONE_SHIFT(0, 64, u64, 0, true); + TEST_ONE_SHIFT(0, 65, u64, 0, true); + TEST_ONE_SHIFT(0, 64, s64, 0, true); + TEST_ONE_SHIFT(0, 65, s64, 0, true); + + /* + * Corner case: for unsigned types, we fail when we've shifted + * through the entire width of bits. For signed types, we might + * want to match this behavior, but that would mean noticing if + * we shift through all but the signed bit, and this is not + * currently detected (but we'll notice an overflow into the + * signed bit). So, for now, we will test this condition but + * mark it as not expected to overflow. + */ + TEST_ONE_SHIFT(0, 7, s8, 0, false); + TEST_ONE_SHIFT(0, 15, s16, 0, false); + TEST_ONE_SHIFT(0, 31, int, 0, false); + TEST_ONE_SHIFT(0, 31, s32, 0, false); + TEST_ONE_SHIFT(0, 63, s64, 0, false); +} + +/* + * Deal with the various forms of allocator arguments. See comments above + * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". + */ +#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN) +#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP) +#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE) +#define alloc000(alloc, arg, sz) alloc(sz) +#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) +#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP) +#define free0(free, arg, ptr) free(ptr) +#define free1(free, arg, ptr) free(arg, ptr) + +/* Wrap around to 16K */ +#define TEST_SIZE (5 * 4096) + +#define DEFINE_TEST_ALLOC(test, func, free_func, want_arg, want_gfp, want_node) \ +static void test_ ## func (struct kunit *test, void *arg) \ +{ \ + volatile size_t a = TEST_SIZE; \ + volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ + void *ptr; \ + \ + /* Tiny allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1); \ + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr); \ + if (!ptr) { \ + kunit_err(test, #func " failed regular allocation?!\n"); \ + } \ + free ## want_arg (free_func, arg, ptr); \ + \ + /* Wrapped allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, a * b); \ + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr); \ + if (!ptr) { \ + kunit_err(test, #func " unexpectedly failed bad wrapping?!\n"); \ + } \ + free ## want_arg (free_func, arg, ptr); \ + \ + /* Saturated allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ + array_size(a, b)); \ + KUNIT_ASSERT_TRUE(test, IS_ERR_OR_NULL(ptr)); \ + if (ptr) { \ + kunit_err(test, #func " missed saturation!\n"); \ + free ## want_arg (free_func, arg, ptr); \ + } \ + kunit_info(test, #func " detected saturation\n"); \ +} + +/* + * Allocator uses a trailing node argument -------------------+ (e.g. kmalloc_node()) + * Allocator uses the gfp_t argument ----------------------+ | (e.g. kmalloc()) + * Allocator uses a special leading argument -----------+ | | (e.g. devm_kmalloc()) + * | | | + */ +DEFINE_TEST_ALLOC(test, kmalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kmalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, kzalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kzalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, vmalloc, vfree, 0, 0, 0); +DEFINE_TEST_ALLOC(test, vmalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(test, vzalloc, vfree, 0, 0, 0); +DEFINE_TEST_ALLOC(test, vzalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(test, kvmalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kvmalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, kvzalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kvzalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, devm_kmalloc, devm_kfree, 1, 1, 0); +DEFINE_TEST_ALLOC(test, devm_kzalloc, devm_kfree, 1, 1, 0); + +static void overflow_allocation_test(struct kunit *test) +{ + const char device_name[] = "overflow-test"; + struct device *dev; + + /* Create dummy device for devm_kmalloc()-family tests. */ + dev = root_device_register(device_name); + if (IS_ERR(dev)) + kunit_warn(test, "Cannot register test device\n"); + + test_kmalloc(test, NULL); + test_kmalloc_node(test, NULL); + test_kzalloc(test, NULL); + test_kzalloc_node(test, NULL); + test_kvmalloc(test, NULL); + test_kvmalloc_node(test, NULL); + test_kvzalloc(test, NULL); + test_kvzalloc_node(test, NULL); + test_vmalloc(test, NULL); + test_vmalloc_node(test, NULL); + test_vzalloc(test, NULL); + test_vzalloc_node(test, NULL); + test_devm_kmalloc(test, dev); + test_devm_kzalloc(test, dev); + + device_unregister(dev); +} + +static struct kunit_case overflow_test_cases[] = { + KUNIT_CASE(overflow_calculation_test), + KUNIT_CASE(overflow_shift_test), + KUNIT_CASE(overflow_allocation_test), + {} +}; + +static struct kunit_suite overflow_test_suite = { + .name = "overflow", + .test_cases = overflow_test_cases, +}; + +kunit_test_suites(&overflow_test_suite); + +MODULE_LICENSE("GPL v2");
On Thu, Jun 11, 2020 at 2:55 PM Vitor Massaru Iha vitor@massaru.org wrote:
This adds the convertion of the runtime tests of check_*_overflow fuctions, from `lib/test_overflow.c`to KUnit tests.
The log similar to the one seen in dmesg running test_overflow can be seen in `test.log`.
Signed-off-by: Vitor Massaru Iha vitor@massaru.org
lib/Kconfig.debug | 17 ++ lib/Makefile | 1 + lib/kunit_overflow_test.c | 590 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+) create mode 100644 lib/kunit_overflow_test.c
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1f4ab7a2bdee..72fcfe1f24a4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2075,6 +2075,23 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config OVERFLOW_KUNIT_TEST
tristate "KUnit test for overflow" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
This builds the overflow KUnit tests.
KUnit tests run during boot and output the results to the debug log
in TAP format (http://testanything.org/). Only useful for kernel devs
running KUnit test harness and are not for inclusion into a production
build.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
Since this is a migration of the overflow runtime test, I feel like you should delete it here. I also think it would probably make this easier to read for Kees and the other maintainers since it highlights what you are changing in the test.
config LIST_KUNIT_TEST tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Makefile b/lib/Makefile index 685aee60de1d..a3290adc0019 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -309,3 +309,4 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# KUnit tests obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o +obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += kunit_overflow_test.o diff --git a/lib/kunit_overflow_test.c b/lib/kunit_overflow_test.c new file mode 100644 index 000000000000..c3eb8f0d3d50 --- /dev/null +++ b/lib/kunit_overflow_test.c @@ -0,0 +1,590 @@ +// SPDX-License-Identifier: GPL-2.0 +/*
- This code is the conversion of the overflow test in runtime to KUnit tests.
- */
+/*
- Test cases for arithmetic overflow checks.
- */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/test.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/overflow.h> +#include <linux/vmalloc.h>
+#define DEFINE_TEST_ARRAY(t) \
static const struct test_ ## t { \
t a, b; \
t sum, diff, prod; \
bool s_of, d_of, p_of; \
} t ## _tests[]
+DEFINE_TEST_ARRAY(u8) = {
{0, 0, 0, 0, 0, false, false, false},
{1, 1, 2, 0, 1, false, false, false},
{0, 1, 1, U8_MAX, 0, false, true, false},
{1, 0, 1, 1, 0, false, false, false},
{0, U8_MAX, U8_MAX, 1, 0, false, true, false},
{U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false},
{1, U8_MAX, 0, 2, U8_MAX, true, true, false},
{U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false},
{U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true},
{U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true},
{U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true},
{1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false},
{1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true},
{1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false},
{1U << 7, 1U << 7, 0, 0, 0, true, false, true},
{48, 32, 80, 16, 0, false, false, true},
{128, 128, 0, 0, 0, true, false, true},
{123, 234, 101, 145, 110, true, true, true},
+};
+DEFINE_TEST_ARRAY(u16) = {
{0, 0, 0, 0, 0, false, false, false},
{1, 1, 2, 0, 1, false, false, false},
{0, 1, 1, U16_MAX, 0, false, true, false},
{1, 0, 1, 1, 0, false, false, false},
{0, U16_MAX, U16_MAX, 1, 0, false, true, false},
{U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false},
{1, U16_MAX, 0, 2, U16_MAX, true, true, false},
{U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false},
{U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true},
{U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true},
{U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true},
{1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false},
{1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true},
{1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false},
{1U << 15, 1U << 15, 0, 0, 0, true, false, true},
{123, 234, 357, 65425, 28782, false, true, false},
{1234, 2345, 3579, 64425, 10146, false, true, true},
+};
+DEFINE_TEST_ARRAY(u32) = {
{0, 0, 0, 0, 0, false, false, false},
{1, 1, 2, 0, 1, false, false, false},
{0, 1, 1, U32_MAX, 0, false, true, false},
{1, 0, 1, 1, 0, false, false, false},
{0, U32_MAX, U32_MAX, 1, 0, false, true, false},
{U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false},
{1, U32_MAX, 0, 2, U32_MAX, true, true, false},
{U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false},
{U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true},
{U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true},
{U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true},
{1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false},
{1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true},
{1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false},
{1U << 31, 1U << 31, 0, 0, 0, true, false, true},
{-2U, 1U, -1U, -3U, -2U, false, false, false},
{-4U, 5U, 1U, -9U, -20U, true, false, true},
+};
+DEFINE_TEST_ARRAY(u64) = {
{0, 0, 0, 0, 0, false, false, false},
{1, 1, 2, 0, 1, false, false, false},
{0, 1, 1, U64_MAX, 0, false, true, false},
{1, 0, 1, 1, 0, false, false, false},
{0, U64_MAX, U64_MAX, 1, 0, false, true, false},
{U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false},
{1, U64_MAX, 0, 2, U64_MAX, true, true, false},
{U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false},
{U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true},
{U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true},
{U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true},
{1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false},
{1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true},
{1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false},
{1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true},
{1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */,
11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL,
false, true, false},
{-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
+};
+DEFINE_TEST_ARRAY(s8) = {
{0, 0, 0, 0, 0, false, false, false},
{0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false},
{S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false},
{0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false},
{S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false},
{-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true},
{S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true},
{-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false},
{S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false},
{-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false},
{-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false},
{1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false},
{S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false},
{1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false},
{S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false},
{S8_MIN, S8_MIN, 0, 0, 0, true, false, true},
{S8_MAX, S8_MAX, -2, 0, 1, true, false, true},
{-4, -32, -36, 28, -128, false, false, true},
{-4, 32, 28, -36, -128, false, false, false},
+};
+DEFINE_TEST_ARRAY(s16) = {
{0, 0, 0, 0, 0, false, false, false},
{0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false},
{S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false},
{0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false},
{S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false},
{-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true},
{S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true},
{-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false},
{S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false},
{-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false},
{-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false},
{1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false},
{S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false},
{1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false},
{S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false},
{S16_MIN, S16_MIN, 0, 0, 0, true, false, true},
{S16_MAX, S16_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s32) = {
{0, 0, 0, 0, 0, false, false, false},
{0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false},
{S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false},
{0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false},
{S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false},
{-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true},
{S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true},
{-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false},
{S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false},
{-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false},
{-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false},
{1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false},
{S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false},
{1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false},
{S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false},
{S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
{S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s64) = {
{0, 0, 0, 0, 0, false, false, false},
{0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false},
{S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false},
{0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false},
{S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false},
{-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true},
{S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true},
{-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false},
{S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false},
{-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false},
{-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false},
{1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false},
{S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false},
{1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false},
{S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false},
{S64_MIN, S64_MIN, 0, 0, 0, true, false, true},
{S64_MAX, S64_MAX, -2, 0, 1, true, false, true},
{-1, -1, -2, 0, 1, false, false, false},
{-1, -128, -129, 127, 128, false, false, false},
{-128, -1, -129, -127, 128, false, false, false},
{0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
+};
+#define check_one_op(test, t, fmt, op, sym, a, b, r, of) do { \
t _r; \
bool _of; \
\
_of = check_ ## op ## _overflow(a, b, &_r); \
if (_of != of) { \
KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt \
" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
} \
if (_r != r) { \
KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt" == " \
fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
} \
+} while (0)
+#define DEFINE_TEST_FUNC(test, t, fmt) \ +static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ +{ \
check_one_op(test, t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(test, t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
check_one_op(test, t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
check_one_op(test, t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
check_one_op(test, t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+} \
\
+static void test_ ## t ## _overflow(struct kunit *test) \ +{ \
unsigned i; \
\
kunit_warn(test, "%-3s: %zu arithmetic tests\n", #t, \
ARRAY_SIZE(t ## _tests)); \
for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
do_test_ ## t(test, &t ## _tests[i]); \
+}
+DEFINE_TEST_FUNC(test, u8, "%d"); +DEFINE_TEST_FUNC(test, s8, "%d"); +DEFINE_TEST_FUNC(test, u16, "%d"); +DEFINE_TEST_FUNC(test, s16, "%d"); +DEFINE_TEST_FUNC(test, u32, "%u"); +DEFINE_TEST_FUNC(test, s32, "%d"); +#if BITS_PER_LONG == 64 +DEFINE_TEST_FUNC(test, u64, "%llu"); +DEFINE_TEST_FUNC(test, s64, "%lld"); +#endif
+static void overflow_calculation_test(struct kunit *test) +{
test_u8_overflow(test);
test_s8_overflow(test);
test_s8_overflow(test);
test_u16_overflow(test);
test_s16_overflow(test);
test_u32_overflow(test);
test_s32_overflow(test);
+#if BITS_PER_LONG == 64
test_u64_overflow(test);
test_s64_overflow(test);
+#endif +}
+static void overflow_shift_test(struct kunit *test) +{ +/* Args are: value, shift, type, expected result, overflow expected */ +#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \
int __failed = 0; \
typeof(a) __a = (a); \
typeof(s) __s = (s); \
t __e = (expect); \
t __d; \
bool __of = check_shl_overflow(__a, __s, &__d); \
if (__of != of) { \
KUNIT_FAIL(test, "Expected (%s)(%s << %s) to%s overflow\n", \
I mentioned this offline: I am of two minds on this. Part of me would like to make it more KUnit-y, but part of me thinks that it is best to not change the test more than necessary. I am fine either way, I just wanted to draw attention to it for other reviewers who may care.
#t, #a, #s, of ? "" : " not"); \
__failed = 1; \
} else if (!__of && __d != __e) { \
KUNIT_FAIL(test, "Expected (%s)(%s << %s) == %s\n", \
#t, #a, #s, #expect); \
if ((t)-1 < 0) \
KUNIT_FAIL(test, "got %lld\n", (s64)__d); \
else \
KUNIT_FAIL(test, "got %llu\n", (u64)__d); \
__failed = 1; \
} \
if (!__failed) \
kunit_info(test, "ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \
of ? "overflow" : #expect); \
__failed; \
+})
/* Sane shifts. */
TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64,
0xFFFFFFFFULL << 32, false);
/* Sane shift: start and end with 0, without a too-wide shift. */
TEST_ONE_SHIFT(0, 7, u8, 0, false);
TEST_ONE_SHIFT(0, 15, u16, 0, false);
TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
TEST_ONE_SHIFT(0, 31, u32, 0, false);
TEST_ONE_SHIFT(0, 63, u64, 0, false);
/* Sane shift: start and end with 0, without reaching signed bit. */
TEST_ONE_SHIFT(0, 6, s8, 0, false);
TEST_ONE_SHIFT(0, 14, s16, 0, false);
TEST_ONE_SHIFT(0, 30, int, 0, false);
TEST_ONE_SHIFT(0, 30, s32, 0, false);
TEST_ONE_SHIFT(0, 62, s64, 0, false);
/* Overflow: shifted the bit off the end. */
TEST_ONE_SHIFT(1, 8, u8, 0, true);
TEST_ONE_SHIFT(1, 16, u16, 0, true);
TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
TEST_ONE_SHIFT(1, 32, u32, 0, true);
TEST_ONE_SHIFT(1, 64, u64, 0, true);
/* Overflow: shifted into the signed bit. */
TEST_ONE_SHIFT(1, 7, s8, 0, true);
TEST_ONE_SHIFT(1, 15, s16, 0, true);
TEST_ONE_SHIFT(1, 31, int, 0, true);
TEST_ONE_SHIFT(1, 31, s32, 0, true);
TEST_ONE_SHIFT(1, 63, s64, 0, true);
/* Overflow: high bit falls off unsigned types. */
/* 10010110 */
TEST_ONE_SHIFT(150, 1, u8, 0, true);
/* 1000100010010110 */
TEST_ONE_SHIFT(34966, 1, u16, 0, true);
/* 10000100000010001000100010010110 */
TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
/* 1000001000010000010000000100000010000100000010001000100010010110 */
TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
/* Overflow: bit shifted into signed bit on signed types. */
/* 01001011 */
TEST_ONE_SHIFT(75, 1, s8, 0, true);
/* 0100010001001011 */
TEST_ONE_SHIFT(17483, 1, s16, 0, true);
/* 01000010000001000100010001001011 */
TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
/* 0100000100001000001000000010000001000010000001000100010001001011 */
TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
/* Overflow: bit shifted past signed bit on signed types. */
/* 01001011 */
TEST_ONE_SHIFT(75, 2, s8, 0, true);
/* 0100010001001011 */
TEST_ONE_SHIFT(17483, 2, s16, 0, true);
/* 01000010000001000100010001001011 */
TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
/* 0100000100001000001000000010000001000010000001000100010001001011 */
TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
/* Overflow: values larger than destination type. */
TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
/* Nonsense: negative initial value. */
TEST_ONE_SHIFT(-1, 0, s8, 0, true);
TEST_ONE_SHIFT(-1, 0, u8, 0, true);
TEST_ONE_SHIFT(-5, 0, s16, 0, true);
TEST_ONE_SHIFT(-5, 0, u16, 0, true);
TEST_ONE_SHIFT(-10, 0, int, 0, true);
TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
TEST_ONE_SHIFT(-100, 0, s32, 0, true);
TEST_ONE_SHIFT(-100, 0, u32, 0, true);
TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
/* Nonsense: negative shift values. */
TEST_ONE_SHIFT(0, -5, s8, 0, true);
TEST_ONE_SHIFT(0, -5, u8, 0, true);
TEST_ONE_SHIFT(0, -10, s16, 0, true);
TEST_ONE_SHIFT(0, -10, u16, 0, true);
TEST_ONE_SHIFT(0, -15, int, 0, true);
TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
TEST_ONE_SHIFT(0, -20, s32, 0, true);
TEST_ONE_SHIFT(0, -20, u32, 0, true);
TEST_ONE_SHIFT(0, -30, s64, 0, true);
TEST_ONE_SHIFT(0, -30, u64, 0, true);
/* Overflow: shifted at or beyond entire type's bit width. */
TEST_ONE_SHIFT(0, 8, u8, 0, true);
TEST_ONE_SHIFT(0, 9, u8, 0, true);
TEST_ONE_SHIFT(0, 8, s8, 0, true);
TEST_ONE_SHIFT(0, 9, s8, 0, true);
TEST_ONE_SHIFT(0, 16, u16, 0, true);
TEST_ONE_SHIFT(0, 17, u16, 0, true);
TEST_ONE_SHIFT(0, 16, s16, 0, true);
TEST_ONE_SHIFT(0, 17, s16, 0, true);
TEST_ONE_SHIFT(0, 32, u32, 0, true);
TEST_ONE_SHIFT(0, 33, u32, 0, true);
TEST_ONE_SHIFT(0, 32, int, 0, true);
TEST_ONE_SHIFT(0, 33, int, 0, true);
TEST_ONE_SHIFT(0, 32, s32, 0, true);
TEST_ONE_SHIFT(0, 33, s32, 0, true);
TEST_ONE_SHIFT(0, 64, u64, 0, true);
TEST_ONE_SHIFT(0, 65, u64, 0, true);
TEST_ONE_SHIFT(0, 64, s64, 0, true);
TEST_ONE_SHIFT(0, 65, s64, 0, true);
/*
* Corner case: for unsigned types, we fail when we've shifted
* through the entire width of bits. For signed types, we might
* want to match this behavior, but that would mean noticing if
* we shift through all but the signed bit, and this is not
* currently detected (but we'll notice an overflow into the
* signed bit). So, for now, we will test this condition but
* mark it as not expected to overflow.
*/
TEST_ONE_SHIFT(0, 7, s8, 0, false);
TEST_ONE_SHIFT(0, 15, s16, 0, false);
TEST_ONE_SHIFT(0, 31, int, 0, false);
TEST_ONE_SHIFT(0, 31, s32, 0, false);
TEST_ONE_SHIFT(0, 63, s64, 0, false);
+}
+/*
- Deal with the various forms of allocator arguments. See comments above
- the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
- */
+#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN) +#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP) +#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE) +#define alloc000(alloc, arg, sz) alloc(sz) +#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) +#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP) +#define free0(free, arg, ptr) free(ptr) +#define free1(free, arg, ptr) free(arg, ptr)
+/* Wrap around to 16K */ +#define TEST_SIZE (5 * 4096)
+#define DEFINE_TEST_ALLOC(test, func, free_func, want_arg, want_gfp, want_node) \ +static void test_ ## func (struct kunit *test, void *arg) \ +{ \
volatile size_t a = TEST_SIZE; \
volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
void *ptr; \
\
/* Tiny allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1); \
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr); \
if (!ptr) { \
kunit_err(test, #func " failed regular allocation?!\n"); \
} \
free ## want_arg (free_func, arg, ptr); \
\
/* Wrapped allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, a * b); \
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr); \
if (!ptr) { \
kunit_err(test, #func " unexpectedly failed bad wrapping?!\n"); \
} \
free ## want_arg (free_func, arg, ptr); \
\
/* Saturated allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
array_size(a, b)); \
KUNIT_ASSERT_TRUE(test, IS_ERR_OR_NULL(ptr)); \
if (ptr) { \
kunit_err(test, #func " missed saturation!\n"); \
free ## want_arg (free_func, arg, ptr); \
} \
kunit_info(test, #func " detected saturation\n"); \
+}
+/*
- Allocator uses a trailing node argument -------------------+ (e.g. kmalloc_node())
- Allocator uses the gfp_t argument ----------------------+ | (e.g. kmalloc())
- Allocator uses a special leading argument -----------+ | | (e.g. devm_kmalloc())
| | |
- */
+DEFINE_TEST_ALLOC(test, kmalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kmalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, kzalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kzalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, vmalloc, vfree, 0, 0, 0); +DEFINE_TEST_ALLOC(test, vmalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(test, vzalloc, vfree, 0, 0, 0); +DEFINE_TEST_ALLOC(test, vzalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(test, kvmalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kvmalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, kvzalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(test, kvzalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(test, devm_kmalloc, devm_kfree, 1, 1, 0); +DEFINE_TEST_ALLOC(test, devm_kzalloc, devm_kfree, 1, 1, 0);
+static void overflow_allocation_test(struct kunit *test) +{
const char device_name[] = "overflow-test";
struct device *dev;
/* Create dummy device for devm_kmalloc()-family tests. */
dev = root_device_register(device_name);
if (IS_ERR(dev))
kunit_warn(test, "Cannot register test device\n");
test_kmalloc(test, NULL);
test_kmalloc_node(test, NULL);
test_kzalloc(test, NULL);
test_kzalloc_node(test, NULL);
test_kvmalloc(test, NULL);
test_kvmalloc_node(test, NULL);
test_kvzalloc(test, NULL);
test_kvzalloc_node(test, NULL);
test_vmalloc(test, NULL);
test_vmalloc_node(test, NULL);
test_vzalloc(test, NULL);
test_vzalloc_node(test, NULL);
test_devm_kmalloc(test, dev);
test_devm_kzalloc(test, dev);
device_unregister(dev);
+}
+static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(overflow_calculation_test),
KUNIT_CASE(overflow_shift_test),
KUNIT_CASE(overflow_allocation_test),
{}
+};
+static struct kunit_suite overflow_test_suite = {
.name = "overflow",
.test_cases = overflow_test_cases,
+};
+kunit_test_suites(&overflow_test_suite);
+MODULE_LICENSE("GPL v2");
2.26.2
On Thu, Jun 11, 2020 at 06:55:01PM -0300, Vitor Massaru Iha wrote:
This adds the convertion of the runtime tests of check_*_overflow fuctions, from `lib/test_overflow.c`to KUnit tests.
The log similar to the one seen in dmesg running test_overflow can be seen in `test.log`.
Signed-off-by: Vitor Massaru Iha vitor@massaru.org
lib/Kconfig.debug | 17 ++ lib/Makefile | 1 + lib/kunit_overflow_test.c | 590 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+) create mode 100644 lib/kunit_overflow_test.c
What tree is this based on? I can't apply it to v5.7, linux-next, nor Linus's latest. I've fixed it up to apply to linux-next for now. :)
Looking at linux-next, though, I am reminded of my agony over naming:
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += kunit_overflow_test.o
*-test test_* *_test
This has to get fixed now, and the naming convention needs to be documented. For old tests, the preferred naming was test_*. For kunit, I think it should be kunit_* (and no trailing _test; that's redundant).
For for this bikeshed, I think it should be kunit_overflow.c
For the CONFIG name, it seems to be suggested in docs to be *_KUNIT_TEST:
... menuconfig). From there, you can enable any KUnit tests you want: they usually have config options ending in ``_KUNIT_TEST``. ...
I think much stronger language needs to be added to "Writing your first test" (which actually recommends the wrong thing: "config MISC_EXAMPLE_TEST"). And then doesn't specify a module file name, though it hints at one:
... obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o ...
So, please, let's get this documented: we really really need a single naming convention for these.
For Kconfig in the tree, I see:
drivers/base/Kconfig:config PM_QOS_KUNIT_TEST drivers/base/test/Kconfig:config KUNIT_DRIVER_PE_TEST fs/ext4/Kconfig:config EXT4_KUNIT_TESTS lib/Kconfig.debug:config SYSCTL_KUNIT_TEST lib/Kconfig.debug:config OVERFLOW_KUNIT_TEST lib/Kconfig.debug:config LIST_KUNIT_TEST lib/Kconfig.debug:config LINEAR_RANGES_TEST lib/kunit/Kconfig:menuconfig KUNIT lib/kunit/Kconfig:config KUNIT_DEBUGFS lib/kunit/Kconfig:config KUNIT_TEST lib/kunit/Kconfig:config KUNIT_EXAMPLE_TEST lib/kunit/Kconfig:config KUNIT_ALL_TESTS
Which is:
*_KUNIT_TEST KUNIT_*_TEST KUNIT_*_TESTS *_TEST
Nooooo. ;)
If it should all be *_KUNIT_TEST, let's do that. I think just *_KUNIT would be sufficient (again, adding the word "test" to "kunit" is redundant). And it absolutely should not be a prefix, otherwise it'll get sorted away from the thing it's named after. So my preference is here would be CONFIG_OVERFLOW_KUNIT. (Yes the old convention was CONFIG_TEST_*, but those things tended to be regression tests, not unit tests.)
Please please, can we fix this before we add anything more?
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1f4ab7a2bdee..72fcfe1f24a4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2075,6 +2075,23 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config OVERFLOW_KUNIT_TEST
- tristate "KUnit test for overflow" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
- help
This builds the overflow KUnit tests.
KUnit tests run during boot and output the results to the debug log
in TAP format (http://testanything.org/). Only useful for kernel devs
running KUnit test harness and are not for inclusion into a production
build.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
config LIST_KUNIT_TEST tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS depends on KUNIT
Regarding output:
[ 36.611358] TAP version 14 [ 36.611953] # Subtest: overflow [ 36.611954] 1..3 ... [ 36.622914] # overflow_calculation_test: s64: 21 arithmetic tests [ 36.624020] ok 1 - overflow_calculation_test ... [ 36.731096] # overflow_shift_test: ok: (s64)(0 << 63) == 0 [ 36.731840] ok 2 - overflow_shift_test ... [ 36.750294] kunit_try_catch: vmalloc: allocation failure: 18446744073709551615 bytes, mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0 ... [ 36.805350] # overflow_allocation_test: devm_kzalloc detected saturation [ 36.807763] ok 3 - overflow_allocation_test [ 36.807765] ok 1 - overflow
A few things here....
- On the outer test report, there is no "plan" line (I was expecting "1..1"). Technically it's optional, but it seems like the information is available. :)
- The subtest should have its own "TAP version 14" line, and it should be using the diagnostic line prefix for the top-level test (this is what kselftest is doing).
- There is no way to distinguish top-level TAP output from kernel log lines. I think we should stick with the existing marker, which is "# ", so that kernel output has no way to be interpreted as TAP details -- unless it intentionally starts adding "#"s. ;)
- There is no summary line (to help humans). For example, the kselftest API produces a final pass/fail report.
Taken together, I was expecting the output to be:
[ 36.611358] # TAP version 14 [ 36.611953] # 1..1 [ 36.611958] # # TAP version 14 [ 36.611954] # # 1..3 ... [ 36.622914] # # # overflow_calculation_test: s64: 21 arithmetic tests [ 36.624020] # # ok 1 - overflow_calculation_test ... [ 36.731096] # # # overflow_shift_test: ok: (s64)(0 << 63) == 0 [ 36.731840] # # ok 2 - overflow_shift_test ... [ 36.750294] kunit_try_catch: vmalloc: allocation failure: 18446744073709551615 bytes, mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0 ... [ 36.805350] # # # overflow_allocation_test: devm_kzalloc detected saturation [ 36.807763] # # ok 3 - overflow_allocation_test [ 36.807763] # # # overflow: PASS [ 36.807765] # ok 1 - overflow [ 36.807767] # # kunit: PASS
But I assume there are threads on this that I've not read... :)
Now, speaking to actual behavior, I love it. :) All the tests are there (and then some -- noted below).
diff --git a/lib/Makefile b/lib/Makefile index 685aee60de1d..a3290adc0019 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -309,3 +309,4 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# KUnit tests obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o +obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += kunit_overflow_test.o diff --git a/lib/kunit_overflow_test.c b/lib/kunit_overflow_test.c new file mode 100644 index 000000000000..c3eb8f0d3d50 --- /dev/null +++ b/lib/kunit_overflow_test.c
A lot of this file is unchanged, so I would suggest doing this as a "git mv lib/test_overflow.c lib/kunit_overflow.c" and then put the changes into the file. Then it should be easier to track git history, etc.
Without this, it's a lot harder to review this patch since I'm just looking at a 590 new lines. ;) Really, it's a diff (which I'll paste here for the code review...)
--- a/lib/test_overflow.c 2020-06-12 14:07:11.161999209 -0700 +++ b/lib/kunit_overflow_test.c 2020-06-12 14:07:27.950183116 -0700 @@ -1,17 +1,18 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT +// SPDX-License-Identifier: GPL-2.0
Please don't change the license.
+/*
- This code is the conversion of the overflow test in runtime to KUnit tests.
- */
This can be left off.
/*
- Test cases for arithmetic overflow checks.
*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <kunit/test.h> #include <linux/device.h> #include <linux/init.h> -#include <linux/kernel.h> #include <linux/mm.h> -#include <linux/module.h> #include <linux/overflow.h> -#include <linux/slab.h> -#include <linux/types.h> #include <linux/vmalloc.h> #define DEFINE_TEST_ARRAY(t) \ @@ -19,7 +20,7 @@ t a, b; \ t sum, diff, prod; \ bool s_of, d_of, p_of; \
- } t ## _tests[] __initconst
- } t ## _tests[]
Why drop the __initconst?
DEFINE_TEST_ARRAY(u8) = { {0, 0, 0, 0, 0, false, false, false}, @@ -44,6 +45,7 @@ {128, 128, 0, 0, 0, true, false, true}, {123, 234, 101, 145, 110, true, true, true}, };
Style nit: I'd like to avoid the blank lines here.
DEFINE_TEST_ARRAY(u16) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, @@ -66,6 +68,7 @@ {123, 234, 357, 65425, 28782, false, true, false}, {1234, 2345, 3579, 64425, 10146, false, true, true}, };
DEFINE_TEST_ARRAY(u32) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, @@ -163,6 +166,7 @@ {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, };
DEFINE_TEST_ARRAY(s32) = { {0, 0, 0, 0, 0, false, false, false}, @@ -186,6 +190,7 @@ {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, };
DEFINE_TEST_ARRAY(s64) = { {0, 0, 0, 0, 0, false, false, false}, @@ -215,254 +220,243 @@ {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, }; -#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
- t _r; \
- bool _of; \
\
- _of = check_ ## op ## _overflow(a, b, &_r); \
- if (_of != of) { \
pr_warn("expected "fmt" "sym" "fmt \
" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
err = 1; \
- } \
- if (_r != r) { \
pr_warn("expected "fmt" "sym" "fmt" == " \
fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
err = 1; \
- } \
+#define check_one_op(test, t, fmt, op, sym, a, b, r, of) do { \
- t _r; \
- bool _of; \
\
- _of = check_ ## op ## _overflow(a, b, &_r); \
- if (_of != of) { \
KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt \
" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
- } \
- if (_r != r) { \
KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt" == " \
fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
- } \
} while (0)
The trailing \ makes this more awkward to diff, but one thing I'm not quite seeing is why "test" needs to be added. It's not a variable in these macros. i.e. it is used literally:
#define DEFINE_TEST_FUNC(test, t, fmt) \ static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ { \ check_one_op(test, t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ ...
Only callers of the do_test_*() would need to be changed. I think all of these macros just need the pr_warn/KUNIT_FAIL changes, and the function prototypes updated to include struct kunit *test.
-#define DEFINE_TEST_FUNC(t, fmt) \ -static int __init do_test_ ## t(const struct test_ ## t *p) \ -{ \
- int err = 0; \
\
- check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
- check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
- check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
- check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
- check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
\
- return err; \
-} \
\
-static int __init test_ ## t ## _overflow(void) { \
- int err = 0; \
- unsigned i; \
\
- pr_info("%-3s: %zu arithmetic tests\n", #t, \
ARRAY_SIZE(t ## _tests)); \
- for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
err |= do_test_ ## t(&t ## _tests[i]); \
- return err; \
+#define DEFINE_TEST_FUNC(test, t, fmt) \ +static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ +{ \
- check_one_op(test, t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
- check_one_op(test, t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
- check_one_op(test, t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
- check_one_op(test, t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
- check_one_op(test, t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+} \
Then these all only need the prototype on the actual function changed.
\
+static void test_ ## t ## _overflow(struct kunit *test) \ +{ \
- unsigned i; \
\
- kunit_warn(test, "%-3s: %zu arithmetic tests\n", #t, \
ARRAY_SIZE(t ## _tests)); \
- for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
do_test_ ## t(test, &t ## _tests[i]); \
} -DEFINE_TEST_FUNC(u8, "%d"); -DEFINE_TEST_FUNC(s8, "%d"); -DEFINE_TEST_FUNC(u16, "%d"); -DEFINE_TEST_FUNC(s16, "%d"); -DEFINE_TEST_FUNC(u32, "%u"); -DEFINE_TEST_FUNC(s32, "%d"); +DEFINE_TEST_FUNC(test, u8, "%d"); +DEFINE_TEST_FUNC(test, s8, "%d"); +DEFINE_TEST_FUNC(test, u16, "%d"); +DEFINE_TEST_FUNC(test, s16, "%d"); +DEFINE_TEST_FUNC(test, u32, "%u"); +DEFINE_TEST_FUNC(test, s32, "%d"); #if BITS_PER_LONG == 64 -DEFINE_TEST_FUNC(u64, "%llu"); -DEFINE_TEST_FUNC(s64, "%lld"); +DEFINE_TEST_FUNC(test, u64, "%llu"); +DEFINE_TEST_FUNC(test, s64, "%lld"); #endif
And all the actual uses of the macros can be left unchanged.
-static int __init test_overflow_calculation(void) +static void overflow_calculation_test(struct kunit *test) {
- int err = 0;
- err |= test_u8_overflow();
- err |= test_s8_overflow();
- err |= test_u16_overflow();
- err |= test_s16_overflow();
- err |= test_u32_overflow();
- err |= test_s32_overflow();
- test_u8_overflow(test);
- test_s8_overflow(test);
- test_s8_overflow(test);
The s8 test got added twice here accidentally.
- test_u16_overflow(test);
- test_s16_overflow(test);
- test_u32_overflow(test);
- test_s32_overflow(test);
#if BITS_PER_LONG == 64
- err |= test_u64_overflow();
- err |= test_s64_overflow();
- test_u64_overflow(test);
- test_s64_overflow(test);
#endif
- return err;
}
I think it might be nice to keep the "err" vars around for a final report line (maybe per test)? (It would keep the diff churn way lower, too...)
So, yes! I like it. :) Most of my comments here have nothing to do with specifically this patch (sorry)! But I'd love to see a v2.
Thanks for doing this! I'm glad to see more TAP output. :)
On Sat, Jun 13, 2020 at 6:36 AM Kees Cook keescook@chromium.org wrote:
On Thu, Jun 11, 2020 at 06:55:01PM -0300, Vitor Massaru Iha wrote:
This adds the convertion of the runtime tests of check_*_overflow fuctions, from `lib/test_overflow.c`to KUnit tests.
The log similar to the one seen in dmesg running test_overflow can be seen in `test.log`.
Signed-off-by: Vitor Massaru Iha vitor@massaru.org
lib/Kconfig.debug | 17 ++ lib/Makefile | 1 + lib/kunit_overflow_test.c | 590 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+) create mode 100644 lib/kunit_overflow_test.c
What tree is this based on? I can't apply it to v5.7, linux-next, nor Linus's latest. I've fixed it up to apply to linux-next for now. :)
This applies to the kselftest/kunit branch for me.
Looking at linux-next, though, I am reminded of my agony over naming:
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += kunit_overflow_test.o
*-test test_* *_test
This has to get fixed now, and the naming convention needs to be documented. For old tests, the preferred naming was test_*. For kunit, I think it should be kunit_* (and no trailing _test; that's redundant).
For for this bikeshed, I think it should be kunit_overflow.c
For the CONFIG name, it seems to be suggested in docs to be *_KUNIT_TEST:
... menuconfig). From there, you can enable any KUnit tests you want: they usually have config options ending in ``_KUNIT_TEST``. ...
Yeah, _KUNIT_TEST was what we've sort-of implicitly decided on for config names, but the documentation does need to happen. We haven't put as much thought into standardising the filenames much, though.
Both of these are slightly complicated by cases like this where tests are being ported from a non-KUnit test to KUnit. There's a small argument there for trying to keep the name the same, though personally I suspect consistency is more important.
I think much stronger language needs to be added to "Writing your first test" (which actually recommends the wrong thing: "config MISC_EXAMPLE_TEST"). And then doesn't specify a module file name, though it hints at one:
... obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o ...
So, please, let's get this documented: we really really need a single naming convention for these.
For Kconfig in the tree, I see:
drivers/base/Kconfig:config PM_QOS_KUNIT_TEST drivers/base/test/Kconfig:config KUNIT_DRIVER_PE_TEST fs/ext4/Kconfig:config EXT4_KUNIT_TESTS lib/Kconfig.debug:config SYSCTL_KUNIT_TEST lib/Kconfig.debug:config OVERFLOW_KUNIT_TEST lib/Kconfig.debug:config LIST_KUNIT_TEST lib/Kconfig.debug:config LINEAR_RANGES_TEST lib/kunit/Kconfig:menuconfig KUNIT lib/kunit/Kconfig:config KUNIT_DEBUGFS lib/kunit/Kconfig:config KUNIT_TEST lib/kunit/Kconfig:config KUNIT_EXAMPLE_TEST lib/kunit/Kconfig:config KUNIT_ALL_TESTS
Which is:
*_KUNIT_TEST KUNIT_*_TEST KUNIT_*_TESTS *_TEST
Nooooo. ;)
If it should all be *_KUNIT_TEST, let's do that. I think just *_KUNIT would be sufficient (again, adding the word "test" to "kunit" is redundant). And it absolutely should not be a prefix, otherwise it'll get sorted away from the thing it's named after. So my preference is here would be CONFIG_OVERFLOW_KUNIT. (Yes the old convention was CONFIG_TEST_*, but those things tended to be regression tests, not unit tests.)
Please please, can we fix this before we add anything more?
Alas, the plans to document test coding style / conventions kept getting pre-empted: I'll drag it back up to the top of the to-do list, and see if we can't prioritise it. I think we'd hoped to be able to catch these in review, but between a bit of forgetfulness and a few tests going upstream without our seeing them has made it obvious that doesn't work.
Once something's documented (and the suitable bikeshedding has subsided), we can consider renaming existing tests if that seems worthwhile.
My feeling is we'll go for: - Kconfig name: ~_KUNIT_TEST - filename: ~-test.c
At least for the initial draft documentation, as those seem to be most common, but I think a thread on that would probably be the best place to add it. This would also be a good opportunity to document the "standard" KUnit boilerplate help text in the Kconfig options.
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1f4ab7a2bdee..72fcfe1f24a4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2075,6 +2075,23 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config OVERFLOW_KUNIT_TEST
tristate "KUnit test for overflow" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
This builds the overflow KUnit tests.
KUnit tests run during boot and output the results to the debug log
in TAP format (http://testanything.org/). Only useful for kernel devs
running KUnit test harness and are not for inclusion into a production
build.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
config LIST_KUNIT_TEST tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS depends on KUNIT
Regarding output:
[ 36.611358] TAP version 14 [ 36.611953] # Subtest: overflow [ 36.611954] 1..3 ... [ 36.622914] # overflow_calculation_test: s64: 21 arithmetic tests [ 36.624020] ok 1 - overflow_calculation_test ... [ 36.731096] # overflow_shift_test: ok: (s64)(0 << 63) == 0 [ 36.731840] ok 2 - overflow_shift_test ... [ 36.750294] kunit_try_catch: vmalloc: allocation failure: 18446744073709551615 bytes, mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0 ... [ 36.805350] # overflow_allocation_test: devm_kzalloc detected saturation [ 36.807763] ok 3 - overflow_allocation_test [ 36.807765] ok 1 - overflow
A few things here....
Tim Bird has just sent out an RFC for a "KTAP" specification, which we'll hope to support in KUnit: https://lore.kernel.org/linux-kselftest/CY4PR13MB1175B804E31E502221BC8163FD8...
That's probably where we'll end up trying to hash out exactly what this format should be. Fortunately, I don't think any of these will require any per-test work, just changes to the KUnit implementation.
- On the outer test report, there is no "plan" line (I was expecting "1..1"). Technically it's optional, but it seems like the information is available. :)
There's work underway to support this, but it's hit a few minor snags: https://lkml.org/lkml/2020/2/27/2155
- The subtest should have its own "TAP version 14" line, and it should be using the diagnostic line prefix for the top-level test (this is what kselftest is doing).
Alas, TAP itself hasn't standardised subtests. Personally, I think it doesn't fundamentally matter which way we do this (I actually prefer the way we're doing it currently slightly), but converging with what kselftest does would be ideal.
- There is no way to distinguish top-level TAP output from kernel log lines. I think we should stick with the existing marker, which is "# ", so that kernel output has no way to be interpreted as TAP details -- unless it intentionally starts adding "#"s. ;)
At the moment, we're doing this in KUnit tool by stripping anything before "TAP version 14" (e.g., the timestamp), and then only incuding lines which parse correctly (are a test plan, result, or a diagnostic line beginning with '#'). This has worked pretty well thus far, and we do have the ability to get results from debugfs instead of the kernel log, which won't have the same problems.
It's worth considering, though, particularly since our parser should handle this anyway without any changes.
- There is no summary line (to help humans). For example, the kselftest API produces a final pass/fail report.
Currently, we're relying on the kunit.py script to produce this, but it shouldn't be impossible to add to the kernel, particularly once the "KUnit Executor" changes mentioned above land.
Taken together, I was expecting the output to be:
[ 36.611358] # TAP version 14 [ 36.611953] # 1..1 [ 36.611958] # # TAP version 14 [ 36.611954] # # 1..3 ... [ 36.622914] # # # overflow_calculation_test: s64: 21 arithmetic tests [ 36.624020] # # ok 1 - overflow_calculation_test ... [ 36.731096] # # # overflow_shift_test: ok: (s64)(0 << 63) == 0 [ 36.731840] # # ok 2 - overflow_shift_test ... [ 36.750294] kunit_try_catch: vmalloc: allocation failure: 18446744073709551615 bytes, mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0 ... [ 36.805350] # # # overflow_allocation_test: devm_kzalloc detected saturation [ 36.807763] # # ok 3 - overflow_allocation_test [ 36.807763] # # # overflow: PASS [ 36.807765] # ok 1 - overflow [ 36.807767] # # kunit: PASS
But I assume there are threads on this that I've not read... :)
These discussions all seem to be coming to a head now, so this is probably just the kick we need to prioritise this a bit more. The KTAP thread hasn't covered all of these (particularly the subtest stuff) yet, so I confess I hadn't realised the extent of the divergence between KUnit and kselftest here.
Cheers, -- David
On Sat, Jun 13, 2020 at 02:51:17PM +0800, David Gow wrote:
Yeah, _KUNIT_TEST was what we've sort-of implicitly decided on for config names, but the documentation does need to happen.
That works for me. It still feels redundant, but all I really want is a standard name. :)
We haven't put as much thought into standardising the filenames much, though.
I actually find this to be much more important because it is more end-user-facing (i.e. in module naming, in build logs, in scripts, on filesystem, etc -- CONFIG is basically only present during kernel build). Trying to do any sorting or greping really needs a way to find all the kunit pieces.
Both of these are slightly complicated by cases like this where tests are being ported from a non-KUnit test to KUnit. There's a small argument there for trying to keep the name the same, though personally I suspect consistency is more important.
Understood. I think consistency is preferred too, especially since the driving reason to make this conversions is to gain consistency with the actual tests themselves.
Alas, the plans to document test coding style / conventions kept getting pre-empted: I'll drag it back up to the top of the to-do list, and see if we can't prioritise it. I think we'd hoped to be able to catch these in review, but between a bit of forgetfulness and a few tests going upstream without our seeing them has made it obvious that doesn't work.
Once something's documented (and the suitable bikeshedding has subsided), we can consider renaming existing tests if that seems worthwhile.
Yes please! :)
My feeling is we'll go for:
- Kconfig name: ~_KUNIT_TEST
As mentioned, I'm fine with this, but prefer ~_KUNIT
- filename: ~-test.c
I really don't like this. Several reasons reasons:
- it does not distinguish it from other tests -- there is no way to identify kunit tests from non-kunit tests from directory listings, build log greps, etc.
- the current "common" naming has been with a leading "test", ignoring kunit, tools/, and samples/:
53 test_*.c 27 *_test.c 19 *[a-z0-9]test.c 19 selftest*.c 16 test-*.c 11 *-test.c 11 test[a-z0-9]*.c 8 *-tests.c 5 *-selftest.c 4 *_test_*.c 1 *_selftest_*.c 1 *_selftests.c
(these counts might be a bit off -- my eyes started to cross while constructing regexes)
- dashes are converted to _ in module names, leading to some confusion between .c file and .ko file.
I'd strongly prefer ~_kunit.c, but could live with _kunit_test.c (even though it's redundant).
At least for the initial draft documentation, as those seem to be most common, but I think a thread on that would probably be the best place to add it.
I'm ready! :) (Subject updated)
This would also be a good opportunity to document the "standard" KUnit boilerplate help text in the Kconfig options.
Ah yeah, good idea.
CONFIG_PM_QOS_KUNIT_TESTOn Mon, Jun 15, 2020 at 1:48 AM Kees Cook keescook@chromium.org wrote:
On Sat, Jun 13, 2020 at 02:51:17PM +0800, David Gow wrote:
Yeah, _KUNIT_TEST was what we've sort-of implicitly decided on for config names, but the documentation does need to happen.
That works for me. It still feels redundant, but all I really want is a standard name. :)
We haven't put as much thought into standardising the filenames much, though.
I actually find this to be much more important because it is more end-user-facing (i.e. in module naming, in build logs, in scripts, on filesystem, etc -- CONFIG is basically only present during kernel build). Trying to do any sorting or greping really needs a way to find all the kunit pieces.
Certainly this is more of an issue now we support building KUnit tests as modules, rather than having them always be built-in.
Having some halfway consistent config-name <-> filename <-> test suite name could be useful down the line, too. Unfortunately, not necessarily a 1:1 mapping, e.g.: - CONFIG_KUNIT_TEST compiles both kunit-test.c and string-stream-test.c - kunit-test.c has several test suites within it: kunit-try-catch-test, kunit-resource-test & kunit-log-test. - CONFIG_EXT4_KUNIT_TESTS currently only builds ext4-inode-test.c, but as the plural name suggests, might build others later. - CONFIG_SECURITY_APPARMOR_KUNIT_TEST doesn't actually have its own source file: the test is built into policy_unpack.c - &cetera
Indeed, this made me quickly look up the names of suites, and there are a few inconsistencies there: - most have "-test" as a suffix - some have "_test" as a suffix - some have no suffix
(I'm inclined to say that these don't need a suffix at all.)
Within test suites, we're also largely prefixing all of the tests with a suite name (even if it's not actually the specified suite name). For example, CONFIG_PM_QOS_KUNIT_TEST builds drivers/base/power/qos-test.c which contains a suite called "qos-kunit-test", with tests prefixed "freq_qos_test_". Some of this clearly comes down to wanting to namespace things a bit more ("qos-test" as a name could refer to a few things, I imagine), but specifying how to do so consistently could help.
Both of these are slightly complicated by cases like this where tests are being ported from a non-KUnit test to KUnit. There's a small argument there for trying to keep the name the same, though personally I suspect consistency is more important.
Understood. I think consistency is preferred too, especially since the driving reason to make this conversions is to gain consistency with the actual tests themselves.
We'll go with that until someone objects: from what I recall, this is largely what people have been doing anyway.
Alas, the plans to document test coding style / conventions kept getting pre-empted: I'll drag it back up to the top of the to-do list, and see if we can't prioritise it. I think we'd hoped to be able to catch these in review, but between a bit of forgetfulness and a few tests going upstream without our seeing them has made it obvious that doesn't work.
Once something's documented (and the suitable bikeshedding has subsided), we can consider renaming existing tests if that seems worthwhile.
Yes please! :)
I'll see if I can find time to draft something this week, then. No promises, but hopefully there'll at least be something to build on.
My feeling is we'll go for:
- Kconfig name: ~_KUNIT_TEST
As mentioned, I'm fine with this, but prefer ~_KUNIT
- filename: ~-test.c
I really don't like this. Several reasons reasons:
it does not distinguish it from other tests -- there is no way to identify kunit tests from non-kunit tests from directory listings, build log greps, etc.
the current "common" naming has been with a leading "test", ignoring kunit, tools/, and samples/:
53 test_*.c 27 *_test.c 19 *[a-z0-9]test.c 19 selftest*.c 16 test-*.c 11 *-test.c 11 test[a-z0-9]*.c 8 *-tests.c 5 *-selftest.c 4 *_test_*.c 1 *_selftest_*.c 1 *_selftests.c
(these counts might be a bit off -- my eyes started to cross while constructing regexes)
- dashes are converted to _ in module names, leading to some confusion between .c file and .ko file.
I'd strongly prefer ~_kunit.c, but could live with _kunit_test.c (even though it's redundant).
I suggested -test.c largely because it's seemed to be most popular out of existing KUnit tests (and certainly out of the ones that already had-KUNIT_TEST config suffixes), but I definitely see your points. I think that trying to stick to a "common" test naming is a bit contradictory with trying to distinguish KUnit tests from other tests, and I'm leaning to the side of distinguishing them, so I definitely could be converted to ~_kunit.c.
Brendan, any thoughts?
At least for the initial draft documentation, as those seem to be most common, but I think a thread on that would probably be the best place to add it.
I'm ready! :) (Subject updated)
This would also be a good opportunity to document the "standard" KUnit boilerplate help text in the Kconfig options.
Ah yeah, good idea.
-- Kees Cook
Cheers, -- David
On Tue, 16 Jun 2020, David Gow wrote:
CONFIG_PM_QOS_KUNIT_TESTOn Mon, Jun 15, 2020 at 1:48 AM Kees Cook keescook@chromium.org wrote:
On Sat, Jun 13, 2020 at 02:51:17PM +0800, David Gow wrote:
Yeah, _KUNIT_TEST was what we've sort-of implicitly decided on for config names, but the documentation does need to happen.
That works for me. It still feels redundant, but all I really want is a standard name. :)
We haven't put as much thought into standardising the filenames much, though.
I actually find this to be much more important because it is more end-user-facing (i.e. in module naming, in build logs, in scripts, on filesystem, etc -- CONFIG is basically only present during kernel build). Trying to do any sorting or greping really needs a way to find all the kunit pieces.
Certainly this is more of an issue now we support building KUnit tests as modules, rather than having them always be built-in.
Having some halfway consistent config-name <-> filename <-> test suite name could be useful down the line, too. Unfortunately, not necessarily a 1:1 mapping, e.g.:
- CONFIG_KUNIT_TEST compiles both kunit-test.c and string-stream-test.c
- kunit-test.c has several test suites within it:
kunit-try-catch-test, kunit-resource-test & kunit-log-test.
- CONFIG_EXT4_KUNIT_TESTS currently only builds ext4-inode-test.c, but
as the plural name suggests, might build others later.
- CONFIG_SECURITY_APPARMOR_KUNIT_TEST doesn't actually have its own
source file: the test is built into policy_unpack.c
- &cetera
Indeed, this made me quickly look up the names of suites, and there are a few inconsistencies there:
- most have "-test" as a suffix
- some have "_test" as a suffix
- some have no suffix
(I'm inclined to say that these don't need a suffix at all.)
A good convention for module names - which I _think_ is along the lines of what Kees is suggesting - might be something like
<subsystem>[_<optional_test-area>]_kunit.ko
So for example
kunit_test -> test_kunit.ko string_stream_test.ko -> test_string_stream_kunit.ko kunit_example_test -> example_kunit.ko ext4_inode_test.ko -> ext4_inode_kunit.ko
For the kunit selftests, "selftest_" might be a better name than "test_", as the latter might encourage people to reintroduce a redundant "test" into their module name.
Within test suites, we're also largely prefixing all of the tests with a suite name (even if it's not actually the specified suite name). For example, CONFIG_PM_QOS_KUNIT_TEST builds drivers/base/power/qos-test.c which contains a suite called "qos-kunit-test", with tests prefixed "freq_qos_test_". Some of this clearly comes down to wanting to namespace things a bit more ("qos-test" as a name could refer to a few things, I imagine), but specifying how to do so consistently could help.
Could we add some definitions to help standardize this? For example, adding a "subsystem" field to "struct kunit_suite"?
So for the ext4 tests the "subsystem" would be "ext4" and the name "inode" would specify the test area within that subsystem. For the KUnit selftests, the subsystem would be "test"/"selftest". Logging could utilize the subsystem definition to allow test writers to use less redundant test names too. For example the suite name logged could be constructed from the subsystem + area values associated with the kunit_suite, and individual test names could be shown as the suite area + test_name.
Thanks!
Alan
On Tue, Jun 16, 2020 at 5:40 PM Alan Maguire alan.maguire@oracle.com wrote:
On Tue, 16 Jun 2020, David Gow wrote:
CONFIG_PM_QOS_KUNIT_TESTOn Mon, Jun 15, 2020 at 1:48 AM Kees Cook keescook@chromium.org wrote:
On Sat, Jun 13, 2020 at 02:51:17PM +0800, David Gow wrote:
Yeah, _KUNIT_TEST was what we've sort-of implicitly decided on for config names, but the documentation does need to happen.
That works for me. It still feels redundant, but all I really want is a standard name. :)
We haven't put as much thought into standardising the filenames much, though.
I actually find this to be much more important because it is more end-user-facing (i.e. in module naming, in build logs, in scripts, on filesystem, etc -- CONFIG is basically only present during kernel build). Trying to do any sorting or greping really needs a way to find all the kunit pieces.
Certainly this is more of an issue now we support building KUnit tests as modules, rather than having them always be built-in.
Having some halfway consistent config-name <-> filename <-> test suite name could be useful down the line, too. Unfortunately, not necessarily a 1:1 mapping, e.g.:
- CONFIG_KUNIT_TEST compiles both kunit-test.c and string-stream-test.c
- kunit-test.c has several test suites within it:
kunit-try-catch-test, kunit-resource-test & kunit-log-test.
- CONFIG_EXT4_KUNIT_TESTS currently only builds ext4-inode-test.c, but
as the plural name suggests, might build others later.
- CONFIG_SECURITY_APPARMOR_KUNIT_TEST doesn't actually have its own
source file: the test is built into policy_unpack.c
- &cetera
Indeed, this made me quickly look up the names of suites, and there are a few inconsistencies there:
- most have "-test" as a suffix
- some have "_test" as a suffix
- some have no suffix
(I'm inclined to say that these don't need a suffix at all.)
A good convention for module names - which I _think_ is along the lines of what Kees is suggesting - might be something like
<subsystem>[_<optional_test-area>]_kunit.ko
So for example
kunit_test -> test_kunit.ko string_stream_test.ko -> test_string_stream_kunit.ko kunit_example_test -> example_kunit.ko ext4_inode_test.ko -> ext4_inode_kunit.ko
For the kunit selftests, "selftest_" might be a better name than "test_", as the latter might encourage people to reintroduce a redundant "test" into their module name.
I quite like the idea of having the deeper "subsystem:suite:test" hierarchy here. "selftest" possibly would be a bit confusing against kselftest for the KUnit tests -- personally I'd probably go with "kunit", even if that introduces a redundant-looking kunit into the module name.
So, this could look something like: - Kconfig name: CONFIG_<subsystem>_<suite>_KUNIT_TEST, or very possibly CONFIG_<subsystem>_KUNIT_TEST(S?) -- we already have something like that for the ext4 tests. - Source filename: <suite>_kunit.c within a subsystem directory. (We probably don't need to enforce suites being in separate files, but whatever file does contain the tests should at least end "kunit.c") - Module filename: <subsystem>_<suite>_kunit.ko, or <subsystem>_kunit.ko if all suites are built into the same module (or there's just one suite for the whole subsystem) - Suite name: Either <subsystem>_<suite> or have a separate subsystem field in kunit_suite. If there's only one suite for the subsystem, set suite==subsystem - Test names: Personally, I'd kind-of like to not prefix these at all, as they're already part of the suite. If we do want to, though, prefix them with <subsystem> and <suite>.
Within test suites, we're also largely prefixing all of the tests with a suite name (even if it's not actually the specified suite name). For example, CONFIG_PM_QOS_KUNIT_TEST builds drivers/base/power/qos-test.c which contains a suite called "qos-kunit-test", with tests prefixed "freq_qos_test_". Some of this clearly comes down to wanting to namespace things a bit more ("qos-test" as a name could refer to a few things, I imagine), but specifying how to do so consistently could help.
Could we add some definitions to help standardize this? For example, adding a "subsystem" field to "struct kunit_suite"?
So for the ext4 tests the "subsystem" would be "ext4" and the name "inode" would specify the test area within that subsystem. For the KUnit selftests, the subsystem would be "test"/"selftest". Logging could utilize the subsystem definition to allow test writers to use less redundant test names too. For example the suite name logged could be constructed from the subsystem + area values associated with the kunit_suite, and individual test names could be shown as the suite area + test_name.
As above, I quite like this. If we were really brave, we could actually nest the results into subsystem:area/suite:test using the TAP subtests stuff. Generating the longer suite name may be easier on people manually reading large test logs, though, as they wouldn't have to scroll quite as far to determine what subsystem they're in. (Again, something that could be solved with tooling, and probably less of a problem for people accessing results through debugfs.)
Cheers, -- David
On Tue, Jun 16, 2020 at 9:21 PM David Gow davidgow@google.com wrote:
On Tue, Jun 16, 2020 at 5:40 PM Alan Maguire alan.maguire@oracle.com wrote:
On Tue, 16 Jun 2020, David Gow wrote:
CONFIG_PM_QOS_KUNIT_TESTOn Mon, Jun 15, 2020 at 1:48 AM Kees Cook keescook@chromium.org wrote:
On Sat, Jun 13, 2020 at 02:51:17PM +0800, David Gow wrote:
Yeah, _KUNIT_TEST was what we've sort-of implicitly decided on for config names, but the documentation does need to happen.
That works for me. It still feels redundant, but all I really want is a standard name. :)
We haven't put as much thought into standardising the filenames much, though.
I actually find this to be much more important because it is more end-user-facing (i.e. in module naming, in build logs, in scripts, on filesystem, etc -- CONFIG is basically only present during kernel build). Trying to do any sorting or greping really needs a way to find all the kunit pieces.
Certainly this is more of an issue now we support building KUnit tests as modules, rather than having them always be built-in.
Having some halfway consistent config-name <-> filename <-> test suite name could be useful down the line, too. Unfortunately, not necessarily a 1:1 mapping, e.g.:
- CONFIG_KUNIT_TEST compiles both kunit-test.c and string-stream-test.c
- kunit-test.c has several test suites within it:
kunit-try-catch-test, kunit-resource-test & kunit-log-test.
- CONFIG_EXT4_KUNIT_TESTS currently only builds ext4-inode-test.c, but
as the plural name suggests, might build others later.
- CONFIG_SECURITY_APPARMOR_KUNIT_TEST doesn't actually have its own
source file: the test is built into policy_unpack.c
- &cetera
Indeed, this made me quickly look up the names of suites, and there are a few inconsistencies there:
- most have "-test" as a suffix
- some have "_test" as a suffix
- some have no suffix
(I'm inclined to say that these don't need a suffix at all.)
A good convention for module names - which I _think_ is along the lines of what Kees is suggesting - might be something like
<subsystem>[_<optional_test-area>]_kunit.ko
So for example
kunit_test -> test_kunit.ko string_stream_test.ko -> test_string_stream_kunit.ko kunit_example_test -> example_kunit.ko ext4_inode_test.ko -> ext4_inode_kunit.ko
For the kunit selftests, "selftest_" might be a better name than "test_", as the latter might encourage people to reintroduce a redundant "test" into their module name.
I quite like the idea of having the deeper "subsystem:suite:test" hierarchy here. "selftest" possibly would be a bit confusing against kselftest for the KUnit tests -- personally I'd probably go with "kunit", even if that introduces a redundant-looking kunit into the module name.
Ditto. My first reaction was that it would create too much nesting and subsystems are a poorly defined notion in the Linux kernel; however, after thinking about it some, I think we are already doing what you are proposing with namespacing in identifier names. It makes sense to reflect that in test organization and reporting.
So, this could look something like:
- Kconfig name: CONFIG_<subsystem>_<suite>_KUNIT_TEST, or very
possibly CONFIG_<subsystem>_KUNIT_TEST(S?) -- we already have something like that for the ext4 tests.
I think the biggest question there is whether we go with the every suite gets its own config approach or all suites in a subsystem are turned on by a single config. I don't think there are enough examples to determine what the community would prefer, and I can see advantages and disadvantages to both.
- Source filename: <suite>_kunit.c within a subsystem directory. (We
probably don't need to enforce suites being in separate files, but whatever file does contain the tests should at least end "kunit.c")
I am cool with changing *-test.c to *-kunit.c. The *-test.c was a hold over from when everything was prefixed TEST_* instead of KUNIT_* (back in the early days of the RFC). I never liked naming everything KUNIT_* or -kunit- whatever; it felt kind of egotistical to me (there was also always a part of me that hoped I would come up with a better name than KUnit, but that ship sailed a long time ago). However, purely logically speaking, I think naming all KUnit tests *-kunit.c makes more sense than anything else.
One question: the AppArmor KUnit tests are #included into another .c file when the tests are selected. Should tests #included in this manner be -kunit.h or should all tests be -kunit.c?
- Module filename: <subsystem>_<suite>_kunit.ko, or
<subsystem>_kunit.ko if all suites are built into the same module (or there's just one suite for the whole subsystem)
- Suite name: Either <subsystem>_<suite> or have a separate subsystem
field in kunit_suite. If there's only one suite for the subsystem, set suite==subsystem
No strong feelings here.
- Test names: Personally, I'd kind-of like to not prefix these at all,
as they're already part of the suite. If we do want to, though, prefix them with <subsystem> and <suite>.
Eh, I did that to remain consistent with the kernel naming conventions, but I think those have diverged too. If maintainers are cool with it, I agree that the prefixes are redundant on tests and generally way too long.
Within test suites, we're also largely prefixing all of the tests with a suite name (even if it's not actually the specified suite name). For example, CONFIG_PM_QOS_KUNIT_TEST builds drivers/base/power/qos-test.c which contains a suite called "qos-kunit-test", with tests prefixed "freq_qos_test_". Some of this clearly comes down to wanting to namespace things a bit more ("qos-test" as a name could refer to a few things, I imagine), but specifying how to do so consistently could help.
Could we add some definitions to help standardize this? For example, adding a "subsystem" field to "struct kunit_suite"?
So for the ext4 tests the "subsystem" would be "ext4" and the name "inode" would specify the test area within that subsystem. For the KUnit selftests, the subsystem would be "test"/"selftest". Logging could utilize the subsystem definition to allow test writers to use less redundant test names too. For example the suite name logged could be constructed from the subsystem + area values associated with the kunit_suite, and individual test names could be shown as the suite area + test_name.
As above, I quite like this. If we were really brave, we could actually nest the results into subsystem:area/suite:test using the TAP subtests stuff. Generating the longer suite name may be easier on people manually reading large test logs, though, as they wouldn't have to scroll quite as far to determine what subsystem they're in. (Again, something that could be solved with tooling, and probably less of a problem for people accessing results through debugfs.)
SGTM
On Thu, Jun 18, 2020 at 01:27:55PM -0700, Brendan Higgins wrote:
I am cool with changing *-test.c to *-kunit.c. The *-test.c was a hold
I am fine with basically any decision as long as there's a single naming convention, *except* for this part. Dashes in source files creates confusion for module naming. Separators should be underscores. This is a standing pet-peeve of mine, and while I certainly can't fix it universally in the kernel, we can at least avoid creating an entire subsystem that gets this wrong for all modules. :)
To illustrate:
$ modinfo dvb-bt8xx filename: .../kernel/drivers/media/pci/bt8xx/dvb-bt8xx.ko ... name: dvb_bt8xx ^ does not match the .ko file, nor source.
Primarily my issue is the disconnect between "dmesg" output and finding the source. It's not like, a huge deal, but it bugs me. :) As in:
$ strings drivers/media/pci/bt8xx/dvb-bt8xx.o | grep 'Init Error' 4dvb_bt8xx: or51211: Init Error - Can't Reset DVR (%i)
All this said, if there really is some good reason to use dashes, I will get over it. :P
(And now that I've had to say all this "out loud", I wonder if maybe I could actually fix this all at the root cause: KBUILD_MOD_NAME... it is sometimes used for identifiers, which is why it does the underscore replacement... I wonder if it could be split into "name" and "identifier"...)
I'm in the process of writing up some documentation for this now.
I hope to post a draft soon, but here's the overview of what's going in it: - Test filenames should be <suite>_kunit.c - (If the suite name is too long/namespaced, the source filename may have prefixes removed, so long as the module name doesn't) - Config names should end in _KUNIT_TEST, and follow the other existing conventions (re: help text, default values, etc) - Suite names should be fully-qualified/unambiguous across the whole kernel (i.e., include driver/subsystem names) - Possibly look at the path to the code being tested as inspiration for the suite name. - Use underscores as separators, and don't decorate test/suite names with "test" or "kunit".
Still thinking about: - Whether to formalise a "subsystem", or just have it implicitly part of the suite name. - Whether to talk about having multiple suites in one file and/or Kconfig entry.
On Fri, Jun 19, 2020 at 4:28 AM Brendan Higgins brendanhiggins@google.com wrote:
On Tue, Jun 16, 2020 at 9:21 PM David Gow davidgow@google.com wrote:
On Tue, Jun 16, 2020 at 5:40 PM Alan Maguire alan.maguire@oracle.com wrote:
On Tue, 16 Jun 2020, David Gow wrote:
CONFIG_PM_QOS_KUNIT_TESTOn Mon, Jun 15, 2020 at 1:48 AM Kees Cook keescook@chromium.org wrote:
On Sat, Jun 13, 2020 at 02:51:17PM +0800, David Gow wrote:
Yeah, _KUNIT_TEST was what we've sort-of implicitly decided on for config names, but the documentation does need to happen.
That works for me. It still feels redundant, but all I really want is a standard name. :)
We haven't put as much thought into standardising the filenames much, though.
I actually find this to be much more important because it is more end-user-facing (i.e. in module naming, in build logs, in scripts, on filesystem, etc -- CONFIG is basically only present during kernel build). Trying to do any sorting or greping really needs a way to find all the kunit pieces.
Certainly this is more of an issue now we support building KUnit tests as modules, rather than having them always be built-in.
Having some halfway consistent config-name <-> filename <-> test suite name could be useful down the line, too. Unfortunately, not necessarily a 1:1 mapping, e.g.:
- CONFIG_KUNIT_TEST compiles both kunit-test.c and string-stream-test.c
- kunit-test.c has several test suites within it:
kunit-try-catch-test, kunit-resource-test & kunit-log-test.
- CONFIG_EXT4_KUNIT_TESTS currently only builds ext4-inode-test.c, but
as the plural name suggests, might build others later.
- CONFIG_SECURITY_APPARMOR_KUNIT_TEST doesn't actually have its own
source file: the test is built into policy_unpack.c
- &cetera
Indeed, this made me quickly look up the names of suites, and there are a few inconsistencies there:
- most have "-test" as a suffix
- some have "_test" as a suffix
- some have no suffix
(I'm inclined to say that these don't need a suffix at all.)
A good convention for module names - which I _think_ is along the lines of what Kees is suggesting - might be something like
<subsystem>[_<optional_test-area>]_kunit.ko
So for example
kunit_test -> test_kunit.ko string_stream_test.ko -> test_string_stream_kunit.ko kunit_example_test -> example_kunit.ko ext4_inode_test.ko -> ext4_inode_kunit.ko
For the kunit selftests, "selftest_" might be a better name than "test_", as the latter might encourage people to reintroduce a redundant "test" into their module name.
I quite like the idea of having the deeper "subsystem:suite:test" hierarchy here. "selftest" possibly would be a bit confusing against kselftest for the KUnit tests -- personally I'd probably go with "kunit", even if that introduces a redundant-looking kunit into the module name.
Ditto. My first reaction was that it would create too much nesting and subsystems are a poorly defined notion in the Linux kernel; however, after thinking about it some, I think we are already doing what you are proposing with namespacing in identifier names. It makes sense to reflect that in test organization and reporting.
The real trick for documenting this is, as you say, defining subsystem. I suspect we'll be okay leaving this up to common sense, and perhaps suggesting the MAINTAINERS entry or file path as places to pull subsystem names from. Having the subsystem be the name of the module being tested for drivers, for example, makes some sense, too.
So, this could look something like:
- Kconfig name: CONFIG_<subsystem>_<suite>_KUNIT_TEST, or very
possibly CONFIG_<subsystem>_KUNIT_TEST(S?) -- we already have something like that for the ext4 tests.
I think the biggest question there is whether we go with the every suite gets its own config approach or all suites in a subsystem are turned on by a single config. I don't think there are enough examples to determine what the community would prefer, and I can see advantages and disadvantages to both.
Yeah: it's really difficult to tell what makes more sense when most subsystems will start off with just one suite. I had been favouring the config-per-suite approach, but I can definitely see that getting really cluttered. Maybe the right thing is to permit both?
- Source filename: <suite>_kunit.c within a subsystem directory. (We
probably don't need to enforce suites being in separate files, but whatever file does contain the tests should at least end "kunit.c")
I am cool with changing *-test.c to *-kunit.c. The *-test.c was a hold over from when everything was prefixed TEST_* instead of KUNIT_* (back in the early days of the RFC). I never liked naming everything KUNIT_* or -kunit- whatever; it felt kind of egotistical to me (there was also always a part of me that hoped I would come up with a better name than KUnit, but that ship sailed a long time ago). However, purely logically speaking, I think naming all KUnit tests *-kunit.c makes more sense than anything else.
Let's go with _kunit.c as a suffix, then.
One question: the AppArmor KUnit tests are #included into another .c file when the tests are selected. Should tests #included in this manner be -kunit.h or should all tests be -kunit.c?
I don't think this matters as much -- such tests can't be built as modules. I'd lean slightly towards it being _kunit.c, if only because it's a simpler rule, and the current AppArmor tests are in a .c file anyway. I think we do want to explicitly call out that this is something to be avoided if possible, anyway.
- Module filename: <subsystem>_<suite>_kunit.ko, or
<subsystem>_kunit.ko if all suites are built into the same module (or there's just one suite for the whole subsystem)
- Suite name: Either <subsystem>_<suite> or have a separate subsystem
field in kunit_suite. If there's only one suite for the subsystem, set suite==subsystem
No strong feelings here.
- Test names: Personally, I'd kind-of like to not prefix these at all,
as they're already part of the suite. If we do want to, though, prefix them with <subsystem> and <suite>.
Eh, I did that to remain consistent with the kernel naming conventions, but I think those have diverged too. If maintainers are cool with it, I agree that the prefixes are redundant on tests and generally way too long.
Do you have a link to the conventions you're talking about?
Within test suites, we're also largely prefixing all of the tests with a suite name (even if it's not actually the specified suite name). For example, CONFIG_PM_QOS_KUNIT_TEST builds drivers/base/power/qos-test.c which contains a suite called "qos-kunit-test", with tests prefixed "freq_qos_test_". Some of this clearly comes down to wanting to namespace things a bit more ("qos-test" as a name could refer to a few things, I imagine), but specifying how to do so consistently could help.
Could we add some definitions to help standardize this? For example, adding a "subsystem" field to "struct kunit_suite"?
So for the ext4 tests the "subsystem" would be "ext4" and the name "inode" would specify the test area within that subsystem. For the KUnit selftests, the subsystem would be "test"/"selftest". Logging could utilize the subsystem definition to allow test writers to use less redundant test names too. For example the suite name logged could be constructed from the subsystem + area values associated with the kunit_suite, and individual test names could be shown as the suite area + test_name.
As above, I quite like this. If we were really brave, we could actually nest the results into subsystem:area/suite:test using the TAP subtests stuff. Generating the longer suite name may be easier on people manually reading large test logs, though, as they wouldn't have to scroll quite as far to determine what subsystem they're in. (Again, something that could be solved with tooling, and probably less of a problem for people accessing results through debugfs.)
SGTM
Cheers, -- David
On Thu, Jun 18, 2020 at 11:39 PM David Gow davidgow@google.com wrote: [...]
On Fri, Jun 19, 2020 at 4:28 AM Brendan Higgins brendanhiggins@google.com wrote:
On Tue, Jun 16, 2020 at 9:21 PM David Gow davidgow@google.com wrote:
On Tue, Jun 16, 2020 at 5:40 PM Alan Maguire alan.maguire@oracle.com wrote:
On Tue, 16 Jun 2020, David Gow wrote:
[...]
keescook@chromium.org wrote:
On Sat, Jun 13, 2020 at 02:51:17PM +0800, David Gow wrote:
[...]
- Test names: Personally, I'd kind-of like to not prefix these at all,
as they're already part of the suite. If we do want to, though, prefix them with <subsystem> and <suite>.
Eh, I did that to remain consistent with the kernel naming conventions, but I think those have diverged too. If maintainers are cool with it, I agree that the prefixes are redundant on tests and generally way too long.
Do you have a link to the conventions you're talking about?
A link no. This is only of those undocumented rules that most people follow.
The rule is something like this:
Global identifiers should be named: <subsystem_name_n>_<subsystem_name_n-1>_..._<subsystem_name_1>_<subsystem_name_0>_foo.
For example, let's say I am working on Synopsis' DesignWare I2C master driver. The outermost namespace is i2c, and because DesignWare is long, we might prefix each function with i2c_dw_*.
It is a practice that is not universally maintained around the kernel, but it seems to be the most common method of namespacing aside from just randomly throwing characters together in a prefix that hasn't been used before.
Anyway, standardized or not, that is the convention I was trying to follow.
On Fri, 2020-06-12 at 15:36 -0700, Kees Cook wrote:
On Thu, Jun 11, 2020 at 06:55:01PM -0300, Vitor Massaru Iha wrote:
This adds the convertion of the runtime tests of check_*_overflow fuctions, from `lib/test_overflow.c`to KUnit tests.
The log similar to the one seen in dmesg running test_overflow can be seen in `test.log`.
Signed-off-by: Vitor Massaru Iha vitor@massaru.org
lib/Kconfig.debug | 17 ++ lib/Makefile | 1 + lib/kunit_overflow_test.c | 590 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+) create mode 100644 lib/kunit_overflow_test.c
What tree is this based on? I can't apply it to v5.7, linux-next, nor Linus's latest. I've fixed it up to apply to linux-next for now. :)
Looking at linux-next, though, I am reminded of my agony over naming:
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += kunit_overflow_test.o
*-test test_* *_test
This has to get fixed now, and the naming convention needs to be documented. For old tests, the preferred naming was test_*. For kunit, I think it should be kunit_* (and no trailing _test; that's redundant).
For for this bikeshed, I think it should be kunit_overflow.c
For the CONFIG name, it seems to be suggested in docs to be *_KUNIT_TEST:
... menuconfig). From there, you can enable any KUnit tests you want: they usually have config options ending in ``_KUNIT_TEST``. ...
I think much stronger language needs to be added to "Writing your first test" (which actually recommends the wrong thing: "config MISC_EXAMPLE_TEST"). And then doesn't specify a module file name, though it hints at one:
... obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o ...
So, please, let's get this documented: we really really need a single naming convention for these.
For Kconfig in the tree, I see:
drivers/base/Kconfig:config PM_QOS_KUNIT_TEST drivers/base/test/Kconfig:config KUNIT_DRIVER_PE_TEST fs/ext4/Kconfig:config EXT4_KUNIT_TESTS lib/Kconfig.debug:config SYSCTL_KUNIT_TEST lib/Kconfig.debug:config OVERFLOW_KUNIT_TEST lib/Kconfig.debug:config LIST_KUNIT_TEST lib/Kconfig.debug:config LINEAR_RANGES_TEST lib/kunit/Kconfig:menuconfig KUNIT lib/kunit/Kconfig:config KUNIT_DEBUGFS lib/kunit/Kconfig:config KUNIT_TEST lib/kunit/Kconfig:config KUNIT_EXAMPLE_TEST lib/kunit/Kconfig:config KUNIT_ALL_TESTS
Which is:
*_KUNIT_TEST KUNIT_*_TEST KUNIT_*_TESTS *_TEST
Nooooo. ;)
If it should all be *_KUNIT_TEST, let's do that. I think just *_KUNIT would be sufficient (again, adding the word "test" to "kunit" is redundant). And it absolutely should not be a prefix, otherwise it'll get sorted away from the thing it's named after. So my preference is here would be CONFIG_OVERFLOW_KUNIT. (Yes the old convention was CONFIG_TEST_*, but those things tended to be regression tests, not unit tests.)
Please please, can we fix this before we add anything more?
Sure, I'll rewrite with _KUNIT_TEST, as David Gow suggested in the next emails.
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1f4ab7a2bdee..72fcfe1f24a4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2075,6 +2075,23 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config OVERFLOW_KUNIT_TEST
- tristate "KUnit test for overflow" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
- help
This builds the overflow KUnit tests.
KUnit tests run during boot and output the results to the
debug log
in TAP format (http://testanything.org/). Only useful for
kernel devs
running KUnit test harness and are not for inclusion into a
production
build.
For more information on KUnit and unit tests in general
please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
config LIST_KUNIT_TEST tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS depends on KUNIT
Regarding output:
[ 36.611358] TAP version 14 [ 36.611953] # Subtest: overflow [ 36.611954] 1..3 ... [ 36.622914] # overflow_calculation_test: s64: 21 arithmetic tests [ 36.624020] ok 1 - overflow_calculation_test ... [ 36.731096] # overflow_shift_test: ok: (s64)(0 << 63) == 0 [ 36.731840] ok 2 - overflow_shift_test ... [ 36.750294] kunit_try_catch: vmalloc: allocation failure: 18446744073709551615 bytes, mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0 ... [ 36.805350] # overflow_allocation_test: devm_kzalloc detected saturation [ 36.807763] ok 3 - overflow_allocation_test [ 36.807765] ok 1 - overflow
A few things here....
- On the outer test report, there is no "plan" line (I was expecting "1..1"). Technically it's optional, but it seems like the
information is available. :)
- The subtest should have its own "TAP version 14" line, and it
should be using the diagnostic line prefix for the top-level test (this is what kselftest is doing).
There is no way to distinguish top-level TAP output from kernel log lines. I think we should stick with the existing marker, which is "# ", so that kernel output has no way to be interpreted as TAP details -- unless it intentionally starts adding "#"s. ;)
There is no summary line (to help humans). For example, the
kselftest API produces a final pass/fail report.
Taken together, I was expecting the output to be:
[ 36.611358] # TAP version 14 [ 36.611953] # 1..1 [ 36.611958] # # TAP version 14 [ 36.611954] # # 1..3 ... [ 36.622914] # # # overflow_calculation_test: s64: 21 arithmetic tests [ 36.624020] # # ok 1 - overflow_calculation_test ... [ 36.731096] # # # overflow_shift_test: ok: (s64)(0 << 63) == 0 [ 36.731840] # # ok 2 - overflow_shift_test ... [ 36.750294] kunit_try_catch: vmalloc: allocation failure: 18446744073709551615 bytes, mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0 ... [ 36.805350] # # # overflow_allocation_test: devm_kzalloc detected saturation [ 36.807763] # # ok 3 - overflow_allocation_test [ 36.807763] # # # overflow: PASS [ 36.807765] # ok 1 - overflow [ 36.807767] # # kunit: PASS
But I assume there are threads on this that I've not read... :)
Now, speaking to actual behavior, I love it. :) All the tests are there (and then some -- noted below).
diff --git a/lib/Makefile b/lib/Makefile index 685aee60de1d..a3290adc0019 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -309,3 +309,4 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# KUnit tests obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o +obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += kunit_overflow_test.o diff --git a/lib/kunit_overflow_test.c b/lib/kunit_overflow_test.c new file mode 100644 index 000000000000..c3eb8f0d3d50 --- /dev/null +++ b/lib/kunit_overflow_test.c
A lot of this file is unchanged, so I would suggest doing this as a "git mv lib/test_overflow.c lib/kunit_overflow.c" and then put the changes into the file. Then it should be easier to track git history, etc.
Without this, it's a lot harder to review this patch since I'm just looking at a 590 new lines. ;) Really, it's a diff (which I'll paste here for the code review...)
Sure, I'll do it. I didn't know if the runtime tests were going to stay.
--- a/lib/test_overflow.c 2020-06-12 14:07:11.161999209 -0700 +++ b/lib/kunit_overflow_test.c 2020-06-12 14:07:27.950183116 -0700 @@ -1,17 +1,18 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT +// SPDX-License-Identifier: GPL-2.0
Please don't change the license.
Sure I'll fix it.
+/*
- This code is the conversion of the overflow test in runtime to
KUnit tests.
- */
This can be left off.
Sure I'll fix it.
/*
- Test cases for arithmetic overflow checks.
*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <kunit/test.h> #include <linux/device.h> #include <linux/init.h> -#include <linux/kernel.h> #include <linux/mm.h> -#include <linux/module.h> #include <linux/overflow.h> -#include <linux/slab.h> -#include <linux/types.h> #include <linux/vmalloc.h> #define DEFINE_TEST_ARRAY(t) \ @@ -19,7 +20,7 @@ t a, b; \ t sum, diff, prod; \ bool s_of, d_of, p_of; \
- } t ## _tests[] __initconst
- } t ## _tests[]
Why drop the __initconst?
I removed __initconst because of these warnings below, as it is used for the kernel during the module initialization, and I do not use the module initialization in this tests. Does this have any side effects in these tests?
WARNING: modpost: vmlinux.o(.text.unlikely+0x131b7): Section mismatch in reference from the function test_s8_overflow() to the variable .init.rodata:s8_tests The function test_s8_overflow() references the variable __initconst s8_tests. This is often because test_s8_overflow lacks a __initconst annotation or the annotation of s8_tests is wrong.
DEFINE_TEST_ARRAY(u8) = { {0, 0, 0, 0, 0, false, false, false}, @@ -44,6 +45,7 @@ {128, 128, 0, 0, 0, true, false, true}, {123, 234, 101, 145, 110, true, true, true}, };
Style nit: I'd like to avoid the blank lines here.
DEFINE_TEST_ARRAY(u16) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, @@ -66,6 +68,7 @@ {123, 234, 357, 65425, 28782, false, true, false}, {1234, 2345, 3579, 64425, 10146, false, true, true}, };
DEFINE_TEST_ARRAY(u32) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, @@ -163,6 +166,7 @@ {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, };
DEFINE_TEST_ARRAY(s32) = { {0, 0, 0, 0, 0, false, false, false}, @@ -186,6 +190,7 @@ {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, };
DEFINE_TEST_ARRAY(s64) = { {0, 0, 0, 0, 0, false, false, false}, @@ -215,254 +220,243 @@ {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, }; -#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
- t _r; \
- bool _of; \
\
- _of = check_ ## op ## _overflow(a, b, &_r); \
- if (_of != of) { \
pr_warn("expected "fmt" "sym" "fmt \
" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
err = 1; \
- } \
- if (_r != r) { \
pr_warn("expected "fmt" "sym" "fmt" == " \
fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
err = 1; \
- } \
+#define check_one_op(test, t, fmt, op, sym, a, b, r, of) do { \
- t _r;
\
- bool _of; \
\
- _of = check_ ## op ## _overflow(a, b, &_r); \
- if (_of != of) { \
KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt \
" to%s overflow (type %s)\n",
\
a, b, of ? "" : " not", #t);
\
- } \
- if (_r != r) {
\
KUNIT_FAIL(test, "Expected "fmt" "sym" "fmt" == " \
fmt", got "fmt" (type %s)\n",
\
a, b, r, _r, #t); \
- } \
} while (0)
The trailing \ makes this more awkward to diff, but one thing I'm not quite seeing is why "test" needs to be added. It's not a variable in these macros. i.e. it is used literally:
#define DEFINE_TEST_FUNC(test, t, fmt) \ static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ { \ check_one_op(test, t, fmt, add, "+", p->a, p->b, p->sum, p-
s_of); \
...
Only callers of the do_test_*() would need to be changed. I think all of these macros just need the pr_warn/KUNIT_FAIL changes, and the function prototypes updated to include struct kunit *test.
-#define DEFINE_TEST_FUNC(t, fmt) \ -static int __init do_test_ ## t(const struct test_ ## t *p) \ -{ \
- int err = 0;
\
\
- check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of);
\
- check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of);
\
- check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of);
\
- check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of);
\
- check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of);
\
\
- return err; \
-} \
\ -static int __init test_ ## t ## _overflow(void) { \
- int err = 0;
\
- unsigned i; \
\
- pr_info("%-3s: %zu arithmetic tests\n", #t, \
ARRAY_SIZE(t ## _tests)); \
- for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i)
\
err |= do_test_ ## t(&t ## _tests[i]);
\
- return err; \
+#define DEFINE_TEST_FUNC(test, t, fmt) \ +static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ +{ \
- check_one_op(test, t, fmt, add, "+", p->a, p->b, p->sum, p-
s_of); \
- check_one_op(test, t, fmt, add, "+", p->b, p->a, p->sum, p-
s_of); \
- check_one_op(test, t, fmt, sub, "-", p->a, p->b, p->diff, p-
d_of); \
- check_one_op(test, t, fmt, mul, "*", p->a, p->b, p->prod, p-
p_of); \
- check_one_op(test, t, fmt, mul, "*", p->b, p->a, p->prod, p-
p_of); \
+} \
Then these all only need the prototype on the actual function changed.
\
+static void test_ ## t ## _overflow(struct kunit *test) \ +{ \
- unsigned i;
\
\
- kunit_warn(test, "%-3s: %zu arithmetic tests\n", #t, \
ARRAY_SIZE(t ## _tests));
\
- for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
\do_test_ ## t(test, &t ## _tests[i]);
} -DEFINE_TEST_FUNC(u8, "%d"); -DEFINE_TEST_FUNC(s8, "%d"); -DEFINE_TEST_FUNC(u16, "%d"); -DEFINE_TEST_FUNC(s16, "%d"); -DEFINE_TEST_FUNC(u32, "%u"); -DEFINE_TEST_FUNC(s32, "%d"); +DEFINE_TEST_FUNC(test, u8, "%d"); +DEFINE_TEST_FUNC(test, s8, "%d"); +DEFINE_TEST_FUNC(test, u16, "%d"); +DEFINE_TEST_FUNC(test, s16, "%d"); +DEFINE_TEST_FUNC(test, u32, "%u"); +DEFINE_TEST_FUNC(test, s32, "%d"); #if BITS_PER_LONG == 64 -DEFINE_TEST_FUNC(u64, "%llu"); -DEFINE_TEST_FUNC(s64, "%lld"); +DEFINE_TEST_FUNC(test, u64, "%llu"); +DEFINE_TEST_FUNC(test, s64, "%lld"); #endif
And all the actual uses of the macros can be left unchanged.
-static int __init test_overflow_calculation(void) +static void overflow_calculation_test(struct kunit *test) {
- int err = 0;
- err |= test_u8_overflow();
- err |= test_s8_overflow();
- err |= test_u16_overflow();
- err |= test_s16_overflow();
- err |= test_u32_overflow();
- err |= test_s32_overflow();
- test_u8_overflow(test);
- test_s8_overflow(test);
- test_s8_overflow(test);
The s8 test got added twice here accidentally.
- test_u16_overflow(test);
- test_s16_overflow(test);
- test_u32_overflow(test);
- test_s32_overflow(test);
#if BITS_PER_LONG == 64
- err |= test_u64_overflow();
- err |= test_s64_overflow();
- test_u64_overflow(test);
- test_s64_overflow(test);
#endif
- return err;
}
I think it might be nice to keep the "err" vars around for a final report line (maybe per test)? (It would keep the diff churn way lower, too...)
I will correct these other suggestions.
So, yes! I like it. :) Most of my comments here have nothing to do with specifically this patch (sorry)! But I'd love to see a v2.
Thanks for doing this! I'm glad to see more TAP output. :)
Thanks Kees, I'm learning a lot from you, and as I said privately with Brendan, I've never seen so much macro in a code. I learned a lot from it.
On Mon, Jun 15, 2020 at 01:30:42PM -0300, Vitor Massaru Iha wrote:
On Fri, 2020-06-12 at 15:36 -0700, Kees Cook wrote:
Why drop the __initconst?
I removed __initconst because of these warnings below, as it is used for the kernel during the module initialization, and I do not use the module initialization in this tests. Does this have any side effects in these tests?
WARNING: modpost: vmlinux.o(.text.unlikely+0x131b7): Section mismatch in reference from the function test_s8_overflow() to the variable .init.rodata:s8_tests The function test_s8_overflow() references the variable __initconst s8_tests. This is often because test_s8_overflow lacks a __initconst annotation or the annotation of s8_tests is wrong.
Ah, right. I assume there are build modes where the tests don't only live in the __init section any more due to how Kunit does its runtime? (Before all the tests ran from __init via module_init(), IIRC.)
So, yes! I like it. :) Most of my comments here have nothing to do with specifically this patch (sorry)! But I'd love to see a v2.
Thanks for doing this! I'm glad to see more TAP output. :)
Thanks Kees, I'm learning a lot from you, and as I said privately with Brendan, I've never seen so much macro in a code. I learned a lot from it.
Heh, yes, for that I must beg forgiveness. ;) The macros are rather wild, but it seemed the best way to avoid a ton of cut/paste code duplication.
On Fri, Jun 12, 2020 at 5:55 AM Vitor Massaru Iha vitor@massaru.org wrote:
This adds the convertion of the runtime tests of check_*_overflow fuctions, from `lib/test_overflow.c`to KUnit tests.
Nit: couple of minor typos here: convertion -> conversion, and functions -> functions
The log similar to the one seen in dmesg running test_overflow can be seen in `test.log`.
Signed-off-by: Vitor Massaru Iha vitor@massaru.org
I've tested that this builds and runs on my system, and it all seems to be working fine!
Tested-by: David Gow davidgow@google.com
lib/Kconfig.debug | 17 ++ lib/Makefile | 1 + lib/kunit_overflow_test.c | 590 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+) create mode 100644 lib/kunit_overflow_test.c
Echoing what Brendan and Kees mentioned, it'd be nicer to have this replace the existing test, both because there's no need for redundant tests (one will get out-of-date), and so that we can have a nice diff showing the changes made as part of the conversion.
Cheers, -- David
On Sat, 2020-06-13 at 14:56 +0800, David Gow wrote:
On Fri, Jun 12, 2020 at 5:55 AM Vitor Massaru Iha vitor@massaru.org wrote:
This adds the convertion of the runtime tests of check_*_overflow fuctions, from `lib/test_overflow.c`to KUnit tests.
Nit: couple of minor typos here: convertion -> conversion, and functions -> functions
The log similar to the one seen in dmesg running test_overflow can be seen in `test.log`.
Signed-off-by: Vitor Massaru Iha vitor@massaru.org
I've tested that this builds and runs on my system, and it all seems to be working fine!
Tested-by: David Gow davidgow@google.com
lib/Kconfig.debug | 17 ++ lib/Makefile | 1 + lib/kunit_overflow_test.c | 590 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+) create mode 100644 lib/kunit_overflow_test.c
Echoing what Brendan and Kees mentioned, it'd be nicer to have this replace the existing test, both because there's no need for redundant tests (one will get out-of-date), and so that we can have a nice diff showing the changes made as part of the conversion.
Cheers, -- David
Thank you David, I will fix your suggestions.
linux-kselftest-mirror@lists.linaro.org