diff --git a/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c b/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c index cb6468adb..84683c547 100644 --- a/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c +++ b/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c @@ -16,6 +17,8 @@ static int verbose = 1; #define MY_POOL_SIZE 1024
+DEFINE_MUTEX(wait_for_tasklet);
[ ... ]
+/* Testing page_pool requires running under softirq.
- Running under a tasklet satisfy this, as tasklets are built on top of
- softirq.
- */
+static void pp_tasklet_handler(struct tasklet_struct *t) +{
- uint32_t nr_loops = loops;
- if (in_serving_softirq())
pr_warn("%s(): in_serving_softirq fast-path\n",__func__); // True- else
pr_warn("%s(): Cannot use page_pool fast-path\n", __func__);- if (enabled(bit_run_bench_tasklet01))
time_bench_loop(nr_loops, 0, "tasklet_page_pool01_fast_path",NULL, time_bench_page_pool01_fast_path);- if (enabled(bit_run_bench_tasklet02))
time_bench_loop(nr_loops, 0, "tasklet_page_pool02_ptr_ring",NULL, time_bench_page_pool02_ptr_ring);- if (enabled(bit_run_bench_tasklet03))
time_bench_loop(nr_loops, 0, "tasklet_page_pool03_slow", NULL,time_bench_page_pool03_slow);- if (enabled(bit_run_bench_tasklet04))
time_bench_loop(nr_loops, 0, "tasklet_page_pool04_napi_aware",NULL, time_bench_page_pool04_napi_aware);- mutex_unlock(&wait_for_tasklet); /* Module __init waiting on unlock */
^^^^^^^^^^^^^
Can mutex_unlock() be called from softirq context? The pp_tasklet_handler() function runs as a tasklet handler in softirq context, but mutexes are sleeping locks that require process context. Would a completion or spinlock be more appropriate here?
The synchronization pattern appears to be: bench_page_pool_simple_module_init()->mutex_lock(&wait_for_tasklet) bench_page_pool_simple_module_init()->tasklet_schedule(&pp_tasklet) bench_page_pool_simple_module_init()->mutex_lock(&wait_for_tasklet) [blocks] pp_tasklet_handler()->mutex_unlock(&wait_for_tasklet) [softirq context]
+} +DECLARE_TASKLET_DISABLED(pp_tasklet, pp_tasklet_handler);
+static void run_tasklet_tests(void) +{
- tasklet_enable(&pp_tasklet);
- /* "Async" schedule tasklet, which runs on the CPU that schedule it */
- tasklet_schedule(&pp_tasklet);
+}
[ ... ]
@@ -251,12 +332,19 @@ static int __init bench_page_pool_simple_module_init(void)
run_benchmark_tests();
- mutex_lock(&wait_for_tasklet);
- run_tasklet_tests();
- /* Sleep on mutex, waiting for tasklet to release */
- mutex_lock(&wait_for_tasklet);
- return 0;
}
--- AI reviewed your patch. Please fix the bug or email reply why it's not a bug. See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19165940352