Convert this unit test to a KUnit test.
Signed-off-by: Tamir Duberstein tamird@gmail.com --- I tested this using: $ tools/testing/kunit/kunit.py run --arch arm64 --make_options LLVM=1 ww_mutex
On success: ; [12:48:16] ================== ww_mutex (5 subtests) =================== ; [12:48:16] ======================= test_mutex ======================== ; [12:48:16] [PASSED] flags=0 ; [12:48:16] [PASSED] flags=1 ; [12:48:16] [PASSED] flags=2 ; [12:48:16] [PASSED] flags=3 ; [12:48:16] [PASSED] flags=4 ; [12:48:17] [PASSED] flags=5 ; [12:48:17] [PASSED] flags=6 ; [12:48:17] [PASSED] flags=7 ; [12:48:17] =================== [PASSED] test_mutex ==================== ; [12:48:17] ========================= test_aa ========================= ; [12:48:17] [PASSED] lock ; [12:48:17] [PASSED] trylock ; [12:48:17] ===================== [PASSED] test_aa ===================== ; [12:48:17] ======================== test_abba ======================== ; [12:48:17] [PASSED] trylock=0,resolve=0 ; [12:48:17] [PASSED] trylock=1,resolve=1 ; [12:48:17] [PASSED] trylock=0,resolve=0 ; [12:48:17] [PASSED] trylock=1,resolve=1 ; [12:48:17] ==================== [PASSED] test_abba ==================== ; [12:48:17] ======================= test_cycle ======================== ; [12:48:17] [PASSED] nthreads=2 ; [12:48:17] =================== [PASSED] test_cycle ==================== ; [12:48:21] ========================= stress ========================== ; [12:48:21] [PASSED] nlocks=16,nthreads_per_cpu=2,flags=1 ; [12:48:23] [PASSED] nlocks=16,nthreads_per_cpu=2,flags=2 ; [12:48:23] [PASSED] nlocks=2046,nthreads_per_cpu=3,flags=7 ; [12:48:23] ===================== [PASSED] stress ====================== ; [12:48:23] ==================== [PASSED] ww_mutex ===================== ; [12:48:23] ============================================================ ; [12:48:23] Testing complete. Ran 18 tests: passed: 18
On failure: --- kernel/locking/Makefile | 3 +- .../locking/{test-ww_mutex.c => ww_mutex_kunit.c} | 311 ++++++++++----------- lib/Kconfig.debug | 12 +- tools/testing/selftests/locking/ww_mutex.sh | 19 -- 4 files changed, 163 insertions(+), 182 deletions(-)
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 0db4093d17b8..26af3e88fffa 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -30,5 +30,6 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o -obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o obj-$(CONFIG_LOCK_EVENT_COUNTS) += lock_events.o + +obj-$(CONFIG_WW_MUTEX_KUNIT_TEST) += ww_mutex_kunit.o diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/ww_mutex_kunit.c similarity index 67% rename from kernel/locking/test-ww_mutex.c rename to kernel/locking/ww_mutex_kunit.c index bcb1b9fea588..ba89a4a702f9 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/ww_mutex_kunit.c @@ -3,10 +3,10 @@ * Module-based API test facility for ww_mutexes */
-#include <linux/kernel.h> - +#include <kunit/test.h> #include <linux/completion.h> #include <linux/delay.h> +#include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/prandom.h> @@ -54,12 +54,39 @@ static void test_mutex_work(struct work_struct *work) ww_mutex_unlock(&mtx->mutex); }
-static int __test_mutex(unsigned int flags) +static const unsigned int *gen_range( + unsigned int *storage, + const unsigned int min, + const unsigned int max, + const int *prev) +{ + if (prev != NULL) { + if (*prev >= max) + return NULL; + *storage = *prev + 1; + } else { + *storage = min; + } + return storage; +} + +static const void *test_mutex_gen_params(const void *prev, char *desc) +{ + static unsigned int storage; + const unsigned int *next = gen_range(&storage, 0, __TEST_MTX_LAST - 1, prev); + + if (next != NULL) + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags=%x", *next); + return next; +} + +static void test_mutex(struct kunit *test) { #define TIMEOUT (HZ / 16) + const unsigned int *param = test->param_value; + const unsigned int flags = *param; struct test_mutex mtx; struct ww_acquire_ctx ctx; - int ret;
ww_mutex_init(&mtx.mutex, &ww_class); if (flags & TEST_MTX_CTX) @@ -79,53 +106,42 @@ static int __test_mutex(unsigned int flags) if (flags & TEST_MTX_SPIN) { unsigned long timeout = jiffies + TIMEOUT;
- ret = 0; do { if (completion_done(&mtx.done)) { - ret = -EINVAL; + KUNIT_FAIL(test, "mutual exclusion failure"); break; } cond_resched(); } while (time_before(jiffies, timeout)); } else { - ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); + KUNIT_EXPECT_EQ(test, wait_for_completion_timeout(&mtx.done, TIMEOUT), 0); } ww_mutex_unlock(&mtx.mutex); if (flags & TEST_MTX_CTX) ww_acquire_fini(&ctx);
- if (ret) { - pr_err("%s(flags=%x): mutual exclusion failure\n", - __func__, flags); - ret = -EINVAL; - } - flush_work(&mtx.work); destroy_work_on_stack(&mtx.work); - return ret; #undef TIMEOUT }
-static int test_mutex(void) +static const void *test_aa_gen_params(const void *prev, char *desc) { - int ret; - int i; + static unsigned int storage; + const unsigned int *next = gen_range(&storage, 0, 1, prev);
- for (i = 0; i < __TEST_MTX_LAST; i++) { - ret = __test_mutex(i); - if (ret) - return ret; - } - - return 0; + if (next != NULL) + snprintf(desc, KUNIT_PARAM_DESC_SIZE, *next ? "trylock" : "lock"); + return next; }
-static int test_aa(bool trylock) +static void test_aa(struct kunit *test) { + const unsigned int *param = test->param_value; + const bool trylock = *param; struct ww_mutex mutex; struct ww_acquire_ctx ctx; int ret; - const char *from = trylock ? "trylock" : "lock";
ww_mutex_init(&mutex, &ww_class); ww_acquire_init(&ctx, &ww_class); @@ -133,46 +149,42 @@ static int test_aa(bool trylock) if (!trylock) { ret = ww_mutex_lock(&mutex, &ctx); if (ret) { - pr_err("%s: initial lock failed!\n", __func__); + KUNIT_FAIL(test, "initial lock failed, ret=%d", ret); goto out; } } else { ret = !ww_mutex_trylock(&mutex, &ctx); if (ret) { - pr_err("%s: initial trylock failed!\n", __func__); + KUNIT_FAIL(test, "initial trylock failed, ret=%d", ret); goto out; } }
- if (ww_mutex_trylock(&mutex, NULL)) { - pr_err("%s: trylocked itself without context from %s!\n", __func__, from); + ret = ww_mutex_trylock(&mutex, NULL); + if (ret) { + KUNIT_FAIL(test, "trylocked itself without context, ret=%d", ret); ww_mutex_unlock(&mutex); - ret = -EINVAL; goto out; }
- if (ww_mutex_trylock(&mutex, &ctx)) { - pr_err("%s: trylocked itself with context from %s!\n", __func__, from); + ret = ww_mutex_trylock(&mutex, &ctx); + if (ret) { + KUNIT_FAIL(test, "trylocked itself with context, ret=%d", ret); ww_mutex_unlock(&mutex); - ret = -EINVAL; goto out; }
ret = ww_mutex_lock(&mutex, &ctx); if (ret != -EALREADY) { - pr_err("%s: missed deadlock for recursing, ret=%d from %s\n", - __func__, ret, from); + KUNIT_FAIL(test, "missed deadlock for recursing, ret=%d", ret); if (!ret) ww_mutex_unlock(&mutex); - ret = -EINVAL; goto out; }
ww_mutex_unlock(&mutex); - ret = 0; out: ww_acquire_fini(&ctx); - return ret; }
struct test_abba { @@ -217,11 +229,28 @@ static void test_abba_work(struct work_struct *work) abba->result = err; }
-static int test_abba(bool trylock, bool resolve) +static const void *test_abba_gen_params(const void *prev, char *desc) { + static unsigned int storage; + const unsigned int *next = gen_range(&storage, 0b00, 0b11, prev); + + if (next != NULL) { + const bool trylock = *next & 0b01 >> 0; + const bool resolve = *next & 0b10 >> 1; + + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "trylock=%d,resolve=%d", trylock, resolve); + } + return next; +} + +static void test_abba(struct kunit *test) +{ + const unsigned int *param = test->param_value; + const bool trylock = *param & 0b01 >> 0; + const bool resolve = *param & 0b10 >> 1; struct test_abba abba; struct ww_acquire_ctx ctx; - int err, ret; + int err;
ww_mutex_init(&abba.a_mutex, &ww_class); ww_mutex_init(&abba.b_mutex, &ww_class); @@ -259,21 +288,17 @@ static int test_abba(bool trylock, bool resolve) flush_work(&abba.work); destroy_work_on_stack(&abba.work);
- ret = 0; if (resolve) { if (err || abba.result) { - pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n", - __func__, err, abba.result); - ret = -EINVAL; + KUNIT_FAIL(test, "failed to resolve ABBA deadlock, A err=%d, B err=%d", + err, abba.result); } } else { if (err != -EDEADLK && abba.result != -EDEADLK) { - pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n", - __func__, err, abba.result); - ret = -EINVAL; + KUNIT_FAIL(test, "missed ABBA deadlock, A err=%d, B err=%d", + err, abba.result); } } - return ret; }
struct test_cycle { @@ -314,15 +339,25 @@ static void test_cycle_work(struct work_struct *work) cycle->result = err ?: erra; }
-static int __test_cycle(unsigned int nthreads) +static const void *test_cycle_gen_params(const void *prev, char *desc) { + static unsigned int storage; + const unsigned int *next = gen_range(&storage, 2, num_online_cpus(), prev); + + if (next != NULL) + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "nthreads=%d", *next); + return next; +} + +static void test_cycle(struct kunit *test) +{ + const unsigned int *param = test->param_value; + const unsigned int nthreads = *param; struct test_cycle *cycles; unsigned int n, last = nthreads - 1; - int ret;
- cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); - if (!cycles) - return -ENOMEM; + cycles = kunit_kmalloc_array(test, nthreads, sizeof(*cycles), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, cycles);
for (n = 0; n < nthreads; n++) { struct test_cycle *cycle = &cycles[n]; @@ -348,41 +383,24 @@ static int __test_cycle(unsigned int nthreads)
flush_workqueue(wq);
- ret = 0; for (n = 0; n < nthreads; n++) { struct test_cycle *cycle = &cycles[n];
if (!cycle->result) continue;
- pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n", - n, nthreads, cycle->result); - ret = -EINVAL; + KUNIT_FAIL(test, "cyclic deadlock not resolved, ret[%d/%d] = %d", + n, nthreads, cycle->result); break; }
for (n = 0; n < nthreads; n++) ww_mutex_destroy(&cycles[n].a_mutex); - kfree(cycles); - return ret; -} - -static int test_cycle(unsigned int ncpus) -{ - unsigned int n; - int ret; - - for (n = 2; n <= ncpus + 1; n++) { - ret = __test_cycle(n); - if (ret) - return ret; - } - - return 0; }
struct stress { struct work_struct work; + struct kunit *test; struct ww_mutex *locks; unsigned long timeout; int nlocks; @@ -401,12 +419,12 @@ static inline u32 prandom_u32_below(u32 ceil) return ret; }
-static int *get_random_order(int count) +static int *get_random_order(struct kunit *test, int count) { int *order; int n, r;
- order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); + order = kunit_kmalloc_array(test, count, sizeof(*order), GFP_KERNEL); if (!order) return order;
@@ -435,7 +453,7 @@ static void stress_inorder_work(struct work_struct *work) struct ww_acquire_ctx ctx; int *order;
- order = get_random_order(nlocks); + order = get_random_order(stress->test, nlocks); if (!order) return;
@@ -472,13 +490,10 @@ static void stress_inorder_work(struct work_struct *work)
ww_acquire_fini(&ctx); if (err) { - pr_err_once("stress (%s) failed with %d\n", - __func__, err); + KUNIT_FAIL(stress->test, "lock[%d] failed, err=%d", n, err); break; } } while (!time_after(jiffies, stress->timeout)); - - kfree(order); }
struct reorder_lock { @@ -495,19 +510,16 @@ static void stress_reorder_work(struct work_struct *work) int *order; int n, err;
- order = get_random_order(stress->nlocks); - if (!order) - return; + order = get_random_order(stress->test, stress->nlocks); + KUNIT_ASSERT_NOT_NULL(stress->test, order);
for (n = 0; n < stress->nlocks; n++) { - ll = kmalloc(sizeof(*ll), GFP_KERNEL); - if (!ll) - goto out; + ll = kunit_kmalloc(stress->test, sizeof(*ll), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(stress->test, ll);
ll->lock = &stress->locks[order[n]]; list_add(&ll->link, &locks); } - kfree(order); order = NULL;
do { @@ -523,8 +535,7 @@ static void stress_reorder_work(struct work_struct *work) ww_mutex_unlock(ln->lock);
if (err != -EDEADLK) { - pr_err_once("stress (%s) failed with %d\n", - __func__, err); + KUNIT_FAIL(stress->test, "lock failed, err=%d", err); break; }
@@ -538,11 +549,6 @@ static void stress_reorder_work(struct work_struct *work)
ww_acquire_fini(&ctx); } while (!time_after(jiffies, stress->timeout)); - -out: - list_for_each_entry_safe(ll, ln, &locks, link) - kfree(ll); - kfree(order); }
static void stress_one_work(struct work_struct *work) @@ -558,8 +564,7 @@ static void stress_one_work(struct work_struct *work) dummy_load(stress); ww_mutex_unlock(lock); } else { - pr_err_once("stress (%s) failed with %d\n", - __func__, err); + KUNIT_FAIL(stress->test, "lock failed, err=%d", err); break; } } while (!time_after(jiffies, stress->timeout)); @@ -570,22 +575,41 @@ static void stress_one_work(struct work_struct *work) #define STRESS_ONE BIT(2) #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
-static int stress(int nlocks, int nthreads, unsigned int flags) +struct stress_case { + int nlocks; + int nthreads_per_cpu; + unsigned int flags; +}; + +static const struct stress_case stress_cases[] = { + { 16, 2, STRESS_INORDER }, + { 16, 2, STRESS_REORDER }, + { 2046, hweight32(STRESS_ALL), STRESS_ALL }, +}; + +static void stress_case_to_desc(const struct stress_case *param, char *desc) { + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "nlocks=%d,nthreads_per_cpu=%d,flags=%x", + param->nlocks, param->nthreads_per_cpu, param->flags); +} + +KUNIT_ARRAY_PARAM(stress_cases, stress_cases, stress_case_to_desc); + +static void stress(struct kunit *test) +{ + const struct stress_case *param = test->param_value; + const int nlocks = param->nlocks; + int nthreads = param->nthreads_per_cpu * num_online_cpus(); + const unsigned int flags = param->flags; struct ww_mutex *locks; struct stress *stress_array; int n, count;
- locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL); - if (!locks) - return -ENOMEM; + locks = kunit_kmalloc_array(test, nlocks, sizeof(*locks), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, locks);
- stress_array = kmalloc_array(nthreads, sizeof(*stress_array), - GFP_KERNEL); - if (!stress_array) { - kfree(locks); - return -ENOMEM; - } + stress_array = kunit_kmalloc_array(test, nthreads, sizeof(*stress_array), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, stress_array);
for (n = 0; n < nlocks; n++) ww_mutex_init(&locks[n], &ww_class); @@ -617,6 +641,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags) stress = &stress_array[count++];
INIT_WORK(&stress->work, fn); + stress->test = test; stress->locks = locks; stress->nlocks = nlocks; stress->timeout = jiffies + 2*HZ; @@ -629,70 +654,42 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
for (n = 0; n < nlocks; n++) ww_mutex_destroy(&locks[n]); - kfree(stress_array); - kfree(locks); - - return 0; }
-static int __init test_ww_mutex_init(void) +static int ww_mutex_suite_init(struct kunit_suite *suite) { - int ncpus = num_online_cpus(); - int ret, i; - - printk(KERN_INFO "Beginning ww mutex selftests\n"); - - prandom_seed_state(&rng, get_random_u64()); - wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); if (!wq) return -ENOMEM;
- ret = test_mutex(); - if (ret) - return ret; - - ret = test_aa(false); - if (ret) - return ret; - - ret = test_aa(true); - if (ret) - return ret; - - for (i = 0; i < 4; i++) { - ret = test_abba(i & 1, i & 2); - if (ret) - return ret; - } - - ret = test_cycle(ncpus); - if (ret) - return ret; - - ret = stress(16, 2*ncpus, STRESS_INORDER); - if (ret) - return ret; - - ret = stress(16, 2*ncpus, STRESS_REORDER); - if (ret) - return ret; - - ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL); - if (ret) - return ret; + prandom_seed_state(&rng, get_random_u64());
- printk(KERN_INFO "All ww mutex selftests passed\n"); return 0; }
-static void __exit test_ww_mutex_exit(void) +static void ww_mutex_suite_exit(struct kunit_suite *suite) { - destroy_workqueue(wq); + if (wq) + destroy_workqueue(wq); }
-module_init(test_ww_mutex_init); -module_exit(test_ww_mutex_exit); +static struct kunit_case ww_mutex_cases[] = { + KUNIT_CASE_PARAM(test_mutex, test_mutex_gen_params), + KUNIT_CASE_PARAM(test_aa, test_aa_gen_params), + KUNIT_CASE_PARAM(test_abba, test_abba_gen_params), + KUNIT_CASE_PARAM(test_cycle, test_cycle_gen_params), + KUNIT_CASE_PARAM_ATTR(stress, stress_cases_gen_params, {.speed = KUNIT_SPEED_SLOW}), + {}, +}; + +static struct kunit_suite ww_mutex_suite = { + .name = "ww_mutex", + .suite_init = ww_mutex_suite_init, + .suite_exit = ww_mutex_suite_exit, + .test_cases = ww_mutex_cases, +}; + +kunit_test_suite(ww_mutex_suite);
MODULE_LICENSE("GPL"); MODULE_AUTHOR("Intel Corporation"); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1af972a92d06..a7edcbdc1428 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1586,16 +1586,18 @@ config LOCK_TORTURE_TEST Say M if you want these torture tests to build as a module. Say N if you are unsure.
-config WW_MUTEX_SELFTEST - tristate "Wait/wound mutex selftests" +config WW_MUTEX_KUNIT_TEST + tristate "KUnit test for wait/wound mutex" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS help - This option provides a kernel module that runs tests on the - on the struct ww_mutex locking API. + This option provides a KUnit test that exercises the struct + ww_mutex locking API.
It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction with this test harness.
- Say M if you want these self tests to build as a module. + Say M if you want these tests to build as a module. Say N if you are unsure.
config SCF_TORTURE_TEST diff --git a/tools/testing/selftests/locking/ww_mutex.sh b/tools/testing/selftests/locking/ww_mutex.sh deleted file mode 100755 index 91e4ac7566af..000000000000 --- a/tools/testing/selftests/locking/ww_mutex.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - -# Runs API tests for struct ww_mutex (Wait/Wound mutexes) -if ! /sbin/modprobe -q -n test-ww_mutex; then - echo "ww_mutex: module test-ww_mutex is not found [SKIP]" - exit $ksft_skip -fi - -if /sbin/modprobe -q test-ww_mutex; then - /sbin/modprobe -q -r test-ww_mutex - echo "locking/ww_mutex: ok" -else - echo "locking/ww_mutex: [FAIL]" - exit 1 -fi
--- base-commit: a64dcfb451e254085a7daee5fe51bf22959d52d3 change-id: 20250208-ww_mutex-kunit-convert-71842a7a6be2
Best regards,
Hi Tamir,
On Mon, Feb 10, 2025 at 10:59:12AM -0500, Tamir Duberstein wrote:
Convert this unit test to a KUnit test.
I would like to know the pros and cons between kunit tests and kselftests, maybe someone Cced can answer that? It'll be good to put these in the commit log as well.
Regards, Boqun
Signed-off-by: Tamir Duberstein tamird@gmail.com
I tested this using: $ tools/testing/kunit/kunit.py run --arch arm64 --make_options LLVM=1 ww_mutex
On success: ; [12:48:16] ================== ww_mutex (5 subtests) =================== ; [12:48:16] ======================= test_mutex ======================== ; [12:48:16] [PASSED] flags=0 ; [12:48:16] [PASSED] flags=1 ; [12:48:16] [PASSED] flags=2 ; [12:48:16] [PASSED] flags=3 ; [12:48:16] [PASSED] flags=4 ; [12:48:17] [PASSED] flags=5 ; [12:48:17] [PASSED] flags=6 ; [12:48:17] [PASSED] flags=7 ; [12:48:17] =================== [PASSED] test_mutex ==================== ; [12:48:17] ========================= test_aa ========================= ; [12:48:17] [PASSED] lock ; [12:48:17] [PASSED] trylock ; [12:48:17] ===================== [PASSED] test_aa ===================== ; [12:48:17] ======================== test_abba ======================== ; [12:48:17] [PASSED] trylock=0,resolve=0 ; [12:48:17] [PASSED] trylock=1,resolve=1 ; [12:48:17] [PASSED] trylock=0,resolve=0 ; [12:48:17] [PASSED] trylock=1,resolve=1 ; [12:48:17] ==================== [PASSED] test_abba ==================== ; [12:48:17] ======================= test_cycle ======================== ; [12:48:17] [PASSED] nthreads=2 ; [12:48:17] =================== [PASSED] test_cycle ==================== ; [12:48:21] ========================= stress ========================== ; [12:48:21] [PASSED] nlocks=16,nthreads_per_cpu=2,flags=1 ; [12:48:23] [PASSED] nlocks=16,nthreads_per_cpu=2,flags=2 ; [12:48:23] [PASSED] nlocks=2046,nthreads_per_cpu=3,flags=7 ; [12:48:23] ===================== [PASSED] stress ====================== ; [12:48:23] ==================== [PASSED] ww_mutex ===================== ; [12:48:23] ============================================================ ; [12:48:23] Testing complete. Ran 18 tests: passed: 18
On failure:
[...]
On Tue, Feb 11, 2025 at 5:38 PM Boqun Feng boqun.feng@gmail.com wrote:
Hi Tamir,
Hi Boqun, thanks for taking a look.
On Mon, Feb 10, 2025 at 10:59:12AM -0500, Tamir Duberstein wrote:
Convert this unit test to a KUnit test.
I would like to know the pros and cons between kunit tests and kselftests, maybe someone Cced can answer that? It'll be good to put these in the commit log as well.
David Gow gave a pretty detailed answer in https://lore.kernel.org/all/CABVgOS=KZrM2dWyp1HzVS0zh7vquLxmTY2T2Ti53DQADrW+... for a similar patch.
David, what do you think about enumerating these reasons in the KUnit documentation? This is the 3rd of these patches that has received this question. It'd be a shame to have every commit enumerate KUnit's reasons for being.
Tamir
On Wed, 12 Feb 2025 at 06:43, Tamir Duberstein tamird@gmail.com wrote:
On Tue, Feb 11, 2025 at 5:38 PM Boqun Feng boqun.feng@gmail.com wrote:
Hi Tamir,
Hi Boqun, thanks for taking a look.
On Mon, Feb 10, 2025 at 10:59:12AM -0500, Tamir Duberstein wrote:
Convert this unit test to a KUnit test.
I would like to know the pros and cons between kunit tests and kselftests, maybe someone Cced can answer that? It'll be good to put these in the commit log as well.
David Gow gave a pretty detailed answer in https://lore.kernel.org/all/CABVgOS=KZrM2dWyp1HzVS0zh7vquLxmTY2T2Ti53DQADrW+... for a similar patch.
That previous answer covers a lot, but while I think the advantages (better tooling, easy architecture emulation, etc) are covered, I'll add a few extra caveats specific to this test here.
KUnit is really better suited to "unit tests" rather than "stress tests", and does have some limits when tests span multiple threads (assertions cannot kill other threads, the test context is not automatically applied, etc).
I don't think these would preclude you from using KUnit for ww_mutex testing -- and people have done multithreaded stress tests in KUnit in the past if they really want to, it's definitely a trade-off rather than a strict win, so it may make sense if as maintainers you are already familiar with or want to use KUnit, but definitely is up to you.
David, what do you think about enumerating these reasons in the KUnit documentation? This is the 3rd of these patches that has received this question. It'd be a shame to have every commit enumerate KUnit's reasons for being.
We have some existing documentation as to when KUnit and kselftest are more appropriate as a part of the 'Kernel Testing Guide' in Documentation/testing-overview.rst (though it focuses on the underlying differences in goals, rather than 'look at all of these useful features'), as well as an overview of why KUnit is useful on the KUnit webpage at http://kunit.dev/ (though without the direct kselftest comparison).
My hope was that porting tests from one framework to the other should be a rare, probably one-off occurrence, so the focus should be more on writing new tests, but clearly what we have is insufficient. And ideally some of the advantages of KUnit, particularly in the standardisation and tooling support, will continue to make their way to kselftest, so hopefully some of the advantages of KUnit will disappear.
Nevertheless, I'll see if there's anything specific we want to add about test porting.
Cheers, -- David
Hi Tamir,
kernel test robot noticed the following build warnings:
url: https://github.com/intel-lab-lkp/linux/commits/Tamir-Duberstein/ww_mutex-con... base: a64dcfb451e254085a7daee5fe51bf22959d52d3 patch link: https://lore.kernel.org/r/20250210-ww_mutex-kunit-convert-v1-1-972f0201f71e%... patch subject: [PATCH] ww_mutex: convert self-test to KUnit config: i386-randconfig-141-20250212 (https://download.01.org/0day-ci/archive/20250212/202502121806.CS6r741y-lkp@i...) compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Reported-by: Dan Carpenter dan.carpenter@linaro.org | Closes: https://lore.kernel.org/r/202502121806.CS6r741y-lkp@intel.com/
smatch warnings: kernel/locking/ww_mutex_kunit.c:238 test_abba_gen_params() warn: shift has higher precedence than mask kernel/locking/ww_mutex_kunit.c:249 test_abba() warn: shift has higher precedence than mask
vim +238 kernel/locking/ww_mutex_kunit.c
70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 231 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 232 static const void *test_abba_gen_params(const void *prev, char *desc) daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 233 { daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 234 static unsigned int storage; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 235 const unsigned int *next = gen_range(&storage, 0b00, 0b11, prev); daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 236 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 237 if (next != NULL) { daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 @238 const bool trylock = *next & 0b01 >> 0; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 239 const bool resolve = *next & 0b10 >> 1;
The shifts here are weird... A zero shift is strange but even the 1 shift is odd. The current code is equivalent to:
const bool resolve = *next & (0b10 >> 1);
But changing it to:
const bool resolve = (*next & 0b10) >> 1;
Doesn't make sense either... Probably that makes less sense actually. What are you trying to communicate with this code?
daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 240 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 241 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "trylock=%d,resolve=%d", trylock, resolve); daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 242 } daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 243 return next; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 244 } daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 245 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 246 static void test_abba(struct kunit *test) 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 247 { daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 248 const unsigned int *param = test->param_value; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 @249 const bool trylock = *param & 0b01 >> 0; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 250 const bool resolve = *param & 0b10 >> 1;
Same.
70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 251 struct test_abba abba; 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 252 struct ww_acquire_ctx ctx; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 253 int err; 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 254 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 255 ww_mutex_init(&abba.a_mutex, &ww_class); 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 256 ww_mutex_init(&abba.b_mutex, &ww_class); 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 257 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
Hi Dan,
On Wed, Feb 12, 2025 at 6:53 AM Dan Carpenter dan.carpenter@linaro.org wrote:
Hi Tamir,
kernel test robot noticed the following build warnings:
url: https://github.com/intel-lab-lkp/linux/commits/Tamir-Duberstein/ww_mutex-con... base: a64dcfb451e254085a7daee5fe51bf22959d52d3 patch link: https://lore.kernel.org/r/20250210-ww_mutex-kunit-convert-v1-1-972f0201f71e%... patch subject: [PATCH] ww_mutex: convert self-test to KUnit config: i386-randconfig-141-20250212 (https://download.01.org/0day-ci/archive/20250212/202502121806.CS6r741y-lkp@i...) compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Reported-by: Dan Carpenter dan.carpenter@linaro.org | Closes: https://lore.kernel.org/r/202502121806.CS6r741y-lkp@intel.com/
smatch warnings: kernel/locking/ww_mutex_kunit.c:238 test_abba_gen_params() warn: shift has higher precedence than mask kernel/locking/ww_mutex_kunit.c:249 test_abba() warn: shift has higher precedence than mask
vim +238 kernel/locking/ww_mutex_kunit.c
70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 231 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 232 static const void *test_abba_gen_params(const void *prev, char *desc) daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 233 { daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 234 static unsigned int storage; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 235 const unsigned int *next = gen_range(&storage, 0b00, 0b11, prev); daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 236 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 237 if (next != NULL) { daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 @238 const bool trylock = *next & 0b01 >> 0; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 239 const bool resolve = *next & 0b10 >> 1;
The shifts here are weird... A zero shift is strange but even the 1 shift is odd. The current code is equivalent to:
const bool resolve = *next & (0b10 >> 1);
But changing it to:
const bool resolve = (*next & 0b10) >> 1;
Doesn't make sense either... Probably that makes less sense actually. What are you trying to communicate with this code?
Yeah, the bit shifting here is not necessary. I'll replace this with a proper bitfield.
daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 240 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 241 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "trylock=%d,resolve=%d", trylock, resolve); daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 242 } daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 243 return next; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 244 } daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 245 daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 246 static void test_abba(struct kunit *test) 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 247 { daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 248 const unsigned int *param = test->param_value; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 @249 const bool trylock = *param & 0b01 >> 0; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 250 const bool resolve = *param & 0b10 >> 1;
Same.
70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 251 struct test_abba abba; 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 252 struct ww_acquire_ctx ctx; daf92a37bd1117 kernel/locking/ww_mutex_kunit.c Tamir Duberstein 2025-02-10 253 int err; 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 254 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 255 ww_mutex_init(&abba.a_mutex, &ww_class); 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 256 ww_mutex_init(&abba.b_mutex, &ww_class); 70207686e492fb kernel/locking/test-ww_mutex.c Chris Wilson 2016-12-01 257 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
-- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
As an aside, how can I compile with the warning settings used by kernel test robot?
Thanks. Tamir
On Wed, Feb 12, 2025 at 09:33:46AM -0500, Tamir Duberstein wrote:
As an aside, how can I compile with the warning settings used by kernel test robot?
This is a Smatch warning.
https://github.com/error27/smatch https://github.com/error27/smatch/blob/master/Documentation/smatch.rst
Run smatch/smatch_scripts/kchecker kernel/locking/ww_mutex_kunit.c
regards, dan carpenter
linux-kselftest-mirror@lists.linaro.org