This series updates crc_kunit to use the same interrupt context testing strategy that I used in the crypto KUnit tests. I.e., test CRC computation in hardirq, softirq, and task context concurrently. This detect issues related to use of the FPU/SIMD/vector registers.
To allow lib/crc/tests/ and lib/crypto/tests/ to share code, move the needed helper function to include/kunit/run-in-irq-context.h. include/kunit/ seems like the most relevant location for this sort of thing, but let me know if there is any other preference.
The third patch replaces the calls to crypto_simd_usable() in lib/crc/ with calls to the underlying functions, now that we have a better solution that doesn't rely on the test injecting values. (Note that crc_kunit wasn't actually using the injection solution, anyway.)
I'd like to take this series via crc-next.
Eric Biggers (3): kunit, lib/crypto: Move run_irq_test() to common header lib/crc: crc_kunit: Test CRC computation in interrupt contexts lib/crc: Use underlying functions instead of crypto_simd_usable()
include/kunit/run-in-irq-context.h | 129 ++++++++++++++++++++++++++ lib/crc/arm/crc-t10dif.h | 6 +- lib/crc/arm/crc32.h | 6 +- lib/crc/arm64/crc-t10dif.h | 6 +- lib/crc/arm64/crc32.h | 11 ++- lib/crc/powerpc/crc-t10dif.h | 5 +- lib/crc/powerpc/crc32.h | 5 +- lib/crc/tests/crc_kunit.c | 62 +++++++++++-- lib/crc/x86/crc-pclmul-template.h | 3 +- lib/crc/x86/crc32.h | 2 +- lib/crypto/tests/hash-test-template.h | 123 +----------------------- 11 files changed, 206 insertions(+), 152 deletions(-) create mode 100644 include/kunit/run-in-irq-context.h
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
Rename run_irq_test() to kunit_run_irq_test() and move it to a public header so that it can be reused by crc_kunit.
Signed-off-by: Eric Biggers ebiggers@kernel.org --- include/kunit/run-in-irq-context.h | 129 ++++++++++++++++++++++++++ lib/crypto/tests/hash-test-template.h | 123 +----------------------- 2 files changed, 133 insertions(+), 119 deletions(-) create mode 100644 include/kunit/run-in-irq-context.h
diff --git a/include/kunit/run-in-irq-context.h b/include/kunit/run-in-irq-context.h new file mode 100644 index 0000000000000..108e96433ea45 --- /dev/null +++ b/include/kunit/run-in-irq-context.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper function for testing code in interrupt contexts + * + * Copyright 2025 Google LLC + */ +#ifndef _KUNIT_RUN_IN_IRQ_CONTEXT_H +#define _KUNIT_RUN_IN_IRQ_CONTEXT_H + +#include <kunit/test.h> +#include <linux/timekeeping.h> +#include <linux/hrtimer.h> +#include <linux/workqueue.h> + +#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5) + +struct kunit_irq_test_state { + bool (*func)(void *test_specific_state); + void *test_specific_state; + bool task_func_reported_failure; + bool hardirq_func_reported_failure; + bool softirq_func_reported_failure; + unsigned long hardirq_func_calls; + unsigned long softirq_func_calls; + struct hrtimer timer; + struct work_struct bh_work; +}; + +static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer) +{ + struct kunit_irq_test_state *state = + container_of(timer, typeof(*state), timer); + + WARN_ON_ONCE(!in_hardirq()); + state->hardirq_func_calls++; + + if (!state->func(state->test_specific_state)) + state->hardirq_func_reported_failure = true; + + hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL); + queue_work(system_bh_wq, &state->bh_work); + return HRTIMER_RESTART; +} + +static void kunit_irq_test_bh_work_func(struct work_struct *work) +{ + struct kunit_irq_test_state *state = + container_of(work, typeof(*state), bh_work); + + WARN_ON_ONCE(!in_serving_softirq()); + state->softirq_func_calls++; + + if (!state->func(state->test_specific_state)) + state->softirq_func_reported_failure = true; +} + +/* + * Helper function which repeatedly runs the given @func in task, softirq, and + * hardirq context concurrently, and reports a failure to KUnit if any + * invocation of @func in any context returns false. @func is passed + * @test_specific_state as its argument. At most 3 invocations of @func will + * run concurrently: one in each of task, softirq, and hardirq context. + * + * The main purpose of this interrupt context testing is to validate fallback + * code paths that run in contexts where the normal code path cannot be used, + * typically due to the FPU or vector registers already being in-use in kernel + * mode. These code paths aren't covered when the test code is executed only by + * the KUnit test runner thread in task context. The reason for the concurrency + * is because merely using hardirq context is not sufficient to reach a fallback + * code path on some architectures; the hardirq actually has to occur while the + * FPU or vector unit was already in-use in kernel mode. + * + * Another purpose of this testing is to detect issues with the architecture's + * irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions, + * especially in softirq context when the softirq may have interrupted a task + * already using kernel-mode FPU or vector (if the arch didn't prevent that). + * Crypto functions are often executed in softirqs, so this is important. + */ +static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *), + int max_iterations, + void *test_specific_state) +{ + struct kunit_irq_test_state state = { + .func = func, + .test_specific_state = test_specific_state, + }; + unsigned long end_jiffies; + + /* + * Set up a hrtimer (the way we access hardirq context) and a work + * struct for the BH workqueue (the way we access softirq context). + */ + hrtimer_setup_on_stack(&state.timer, kunit_irq_test_timer_func, + CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func); + + /* Run for up to max_iterations or 1 second, whichever comes first. */ + end_jiffies = jiffies + HZ; + hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL, + HRTIMER_MODE_REL_HARD); + for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies); + i++) { + if (!func(test_specific_state)) + state.task_func_reported_failure = true; + } + + /* Cancel the timer and work. */ + hrtimer_cancel(&state.timer); + flush_work(&state.bh_work); + + /* Sanity check: the timer and BH functions should have been run. */ + KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0, + "Timer function was not called"); + KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0, + "BH work function was not called"); + + /* Check for incorrect hash values reported from any context. */ + KUNIT_EXPECT_FALSE_MSG( + test, state.task_func_reported_failure, + "Incorrect hash values reported from task context"); + KUNIT_EXPECT_FALSE_MSG( + test, state.hardirq_func_reported_failure, + "Incorrect hash values reported from hardirq context"); + KUNIT_EXPECT_FALSE_MSG( + test, state.softirq_func_reported_failure, + "Incorrect hash values reported from softirq context"); +} + +#endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */ diff --git a/lib/crypto/tests/hash-test-template.h b/lib/crypto/tests/hash-test-template.h index f437a0a9ac6cd..61b43e62779fb 100644 --- a/lib/crypto/tests/hash-test-template.h +++ b/lib/crypto/tests/hash-test-template.h @@ -3,15 +3,13 @@ * Test cases for hash functions, including a benchmark. This is included by * KUnit test suites that want to use it. See sha512_kunit.c for an example. * * Copyright 2025 Google LLC */ +#include <kunit/run-in-irq-context.h> #include <kunit/test.h> -#include <linux/hrtimer.h> -#include <linux/timekeeping.h> #include <linux/vmalloc.h> -#include <linux/workqueue.h>
/* test_buf is a guarded buffer, i.e. &test_buf[TEST_BUF_LEN] is not mapped. */ #define TEST_BUF_LEN 16384 static u8 *test_buf;
@@ -317,123 +315,10 @@ static void test_hash_ctx_zeroization(struct kunit *test) HASH_FINAL(&ctx, test_buf); KUNIT_ASSERT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx), "Hash context was not zeroized by finalization"); }
-#define IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5) - -struct hash_irq_test_state { - bool (*func)(void *test_specific_state); - void *test_specific_state; - bool task_func_reported_failure; - bool hardirq_func_reported_failure; - bool softirq_func_reported_failure; - unsigned long hardirq_func_calls; - unsigned long softirq_func_calls; - struct hrtimer timer; - struct work_struct bh_work; -}; - -static enum hrtimer_restart hash_irq_test_timer_func(struct hrtimer *timer) -{ - struct hash_irq_test_state *state = - container_of(timer, typeof(*state), timer); - - WARN_ON_ONCE(!in_hardirq()); - state->hardirq_func_calls++; - - if (!state->func(state->test_specific_state)) - state->hardirq_func_reported_failure = true; - - hrtimer_forward_now(&state->timer, IRQ_TEST_HRTIMER_INTERVAL); - queue_work(system_bh_wq, &state->bh_work); - return HRTIMER_RESTART; -} - -static void hash_irq_test_bh_work_func(struct work_struct *work) -{ - struct hash_irq_test_state *state = - container_of(work, typeof(*state), bh_work); - - WARN_ON_ONCE(!in_serving_softirq()); - state->softirq_func_calls++; - - if (!state->func(state->test_specific_state)) - state->softirq_func_reported_failure = true; -} - -/* - * Helper function which repeatedly runs the given @func in task, softirq, and - * hardirq context concurrently, and reports a failure to KUnit if any - * invocation of @func in any context returns false. @func is passed - * @test_specific_state as its argument. At most 3 invocations of @func will - * run concurrently: one in each of task, softirq, and hardirq context. - * - * The main purpose of this interrupt context testing is to validate fallback - * code paths that run in contexts where the normal code path cannot be used, - * typically due to the FPU or vector registers already being in-use in kernel - * mode. These code paths aren't covered when the test code is executed only by - * the KUnit test runner thread in task context. The reason for the concurrency - * is because merely using hardirq context is not sufficient to reach a fallback - * code path on some architectures; the hardirq actually has to occur while the - * FPU or vector unit was already in-use in kernel mode. - * - * Another purpose of this testing is to detect issues with the architecture's - * irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions, - * especially in softirq context when the softirq may have interrupted a task - * already using kernel-mode FPU or vector (if the arch didn't prevent that). - * Crypto functions are often executed in softirqs, so this is important. - */ -static void run_irq_test(struct kunit *test, bool (*func)(void *), - int max_iterations, void *test_specific_state) -{ - struct hash_irq_test_state state = { - .func = func, - .test_specific_state = test_specific_state, - }; - unsigned long end_jiffies; - - /* - * Set up a hrtimer (the way we access hardirq context) and a work - * struct for the BH workqueue (the way we access softirq context). - */ - hrtimer_setup_on_stack(&state.timer, hash_irq_test_timer_func, - CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - INIT_WORK_ONSTACK(&state.bh_work, hash_irq_test_bh_work_func); - - /* Run for up to max_iterations or 1 second, whichever comes first. */ - end_jiffies = jiffies + HZ; - hrtimer_start(&state.timer, IRQ_TEST_HRTIMER_INTERVAL, - HRTIMER_MODE_REL_HARD); - for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies); - i++) { - if (!func(test_specific_state)) - state.task_func_reported_failure = true; - } - - /* Cancel the timer and work. */ - hrtimer_cancel(&state.timer); - flush_work(&state.bh_work); - - /* Sanity check: the timer and BH functions should have been run. */ - KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0, - "Timer function was not called"); - KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0, - "BH work function was not called"); - - /* Check for incorrect hash values reported from any context. */ - KUNIT_EXPECT_FALSE_MSG( - test, state.task_func_reported_failure, - "Incorrect hash values reported from task context"); - KUNIT_EXPECT_FALSE_MSG( - test, state.hardirq_func_reported_failure, - "Incorrect hash values reported from hardirq context"); - KUNIT_EXPECT_FALSE_MSG( - test, state.softirq_func_reported_failure, - "Incorrect hash values reported from softirq context"); -} - #define IRQ_TEST_DATA_LEN 256 #define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */
struct hash_irq_test1_state { u8 expected_hashes[IRQ_TEST_NUM_BUFFERS][HASH_SIZE]; @@ -467,11 +352,11 @@ static void test_hash_interrupt_context_1(struct kunit *test) rand_bytes(test_buf, IRQ_TEST_NUM_BUFFERS * IRQ_TEST_DATA_LEN); for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++) HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN, state.expected_hashes[i]);
- run_irq_test(test, hash_irq_test1_func, 100000, &state); + kunit_run_irq_test(test, hash_irq_test1_func, 100000, &state); }
struct hash_irq_test2_hash_ctx { struct HASH_CTX hash_ctx; atomic_t in_use; @@ -498,11 +383,11 @@ static bool hash_irq_test2_func(void *state_) break; } if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) { /* * This should never happen, as the number of contexts is equal - * to the maximum concurrency level of run_irq_test(). + * to the maximum concurrency level of kunit_run_irq_test(). */ return false; }
if (ctx->step == 0) { @@ -564,11 +449,11 @@ static void test_hash_interrupt_context_2(struct kunit *test) } if (remaining) state->update_lens[state->num_steps++] = remaining; state->num_steps += 2; /* for init and final */
- run_irq_test(test, hash_irq_test2_func, 250000, state); + kunit_run_irq_test(test, hash_irq_test2_func, 250000, state); }
#define UNKEYED_HASH_KUNIT_CASES \ KUNIT_CASE(test_hash_test_vectors), \ KUNIT_CASE(test_hash_all_lens_up_to_4096), \
Test that if CRCs are computed in task, softirq, and hardirq context concurrently, then all results are as expected. Implement this using kunit_run_irq_test() which is also used by the lib/crypto/ tests.
As with the corresponding lib/crypto/ tests, the purpose of this is to test fallback code paths and to exercise edge cases in the architecture's support for in-kernel FPU/SIMD/vector.
Remove the code from crc_test() that sometimes disabled interrupts, as that was just an incomplete attempt to achieve similar test coverage.
Signed-off-by: Eric Biggers ebiggers@kernel.org --- lib/crc/tests/crc_kunit.c | 62 +++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 9 deletions(-)
diff --git a/lib/crc/tests/crc_kunit.c b/lib/crc/tests/crc_kunit.c index f08d985d8860e..9a450e25ac811 100644 --- a/lib/crc/tests/crc_kunit.c +++ b/lib/crc/tests/crc_kunit.c @@ -4,10 +4,11 @@ * * Copyright 2024 Google LLC * * Author: Eric Biggers ebiggers@google.com */ +#include <kunit/run-in-irq-context.h> #include <kunit/test.h> #include <linux/crc7.h> #include <linux/crc16.h> #include <linux/crc-t10dif.h> #include <linux/crc32.h> @@ -139,19 +140,66 @@ static size_t generate_random_length(size_t max_length) break; } return len % (max_length + 1); }
+#define IRQ_TEST_DATA_LEN 512 +#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */ + +struct crc_irq_test_state { + const struct crc_variant *v; + u64 initial_crc; + u64 expected_crcs[IRQ_TEST_NUM_BUFFERS]; + atomic_t seqno; +}; + +/* + * Compute the CRC of one of the test messages and verify that it matches the + * expected CRC from @state->expected_crcs. To increase the chance of detecting + * problems, cycle through multiple messages. + */ +static bool crc_irq_test_func(void *state_) +{ + struct crc_irq_test_state *state = state_; + const struct crc_variant *v = state->v; + u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS; + u64 actual_crc = v->func(state->initial_crc, + &test_buffer[i * IRQ_TEST_DATA_LEN], + IRQ_TEST_DATA_LEN); + + return actual_crc == state->expected_crcs[i]; +} + +/* + * Test that if CRCs are computed in task, softirq, and hardirq context + * concurrently, then all results are as expected. + */ +static void crc_interrupt_context_test(struct kunit *test, + const struct crc_variant *v) +{ + struct crc_irq_test_state state = { + .v = v, + .initial_crc = generate_random_initial_crc(v), + }; + + for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++) { + state.expected_crcs[i] = crc_ref( + v, state.initial_crc, + &test_buffer[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN); + } + + kunit_run_irq_test(test, crc_irq_test_func, 100000, &state); +} + /* Test that v->func gives the same CRCs as a reference implementation. */ static void crc_test(struct kunit *test, const struct crc_variant *v) { size_t i;
for (i = 0; i < CRC_KUNIT_NUM_TEST_ITERS; i++) { u64 init_crc, expected_crc, actual_crc; size_t len, offset; - bool nosimd;
init_crc = generate_random_initial_crc(v); len = generate_random_length(CRC_KUNIT_MAX_LEN);
/* Generate a random offset. */ @@ -166,26 +214,22 @@ static void crc_test(struct kunit *test, const struct crc_variant *v)
if (rand32() % 8 == 0) /* Refresh the data occasionally. */ prandom_bytes_state(&rng, &test_buffer[offset], len);
- nosimd = rand32() % 8 == 0; - /* * Compute the CRC, and verify that it equals the CRC computed * by a simple bit-at-a-time reference implementation. */ expected_crc = crc_ref(v, init_crc, &test_buffer[offset], len); - if (nosimd) - local_irq_disable(); actual_crc = v->func(init_crc, &test_buffer[offset], len); - if (nosimd) - local_irq_enable(); KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc, - "Wrong result with len=%zu offset=%zu nosimd=%d", - len, offset, nosimd); + "Wrong result with len=%zu offset=%zu", + len, offset); } + + crc_interrupt_context_test(test, v); }
static __always_inline void crc_benchmark(struct kunit *test, u64 (*crc_func)(u64 crc, const u8 *p, size_t len))
Since crc_kunit now tests the fallback code paths without using crypto_simd_disabled_for_test, make the CRC code just use the underlying may_use_simd() and irq_fpu_usable() functions directly instead of crypto_simd_usable(). This eliminates an unnecessary layer.
Take the opportunity to add likely() and unlikely() annotations as well.
Signed-off-by: Eric Biggers ebiggers@kernel.org --- lib/crc/arm/crc-t10dif.h | 6 ++---- lib/crc/arm/crc32.h | 6 ++---- lib/crc/arm64/crc-t10dif.h | 6 ++---- lib/crc/arm64/crc32.h | 11 ++++++----- lib/crc/powerpc/crc-t10dif.h | 5 +++-- lib/crc/powerpc/crc32.h | 5 +++-- lib/crc/x86/crc-pclmul-template.h | 3 +-- lib/crc/x86/crc32.h | 2 +- 8 files changed, 20 insertions(+), 24 deletions(-)
diff --git a/lib/crc/arm/crc-t10dif.h b/lib/crc/arm/crc-t10dif.h index 2edf7e9681d05..1a969f4024d47 100644 --- a/lib/crc/arm/crc-t10dif.h +++ b/lib/crc/arm/crc-t10dif.h @@ -3,12 +3,10 @@ * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions * * Copyright (C) 2016 Linaro Ltd ard.biesheuvel@linaro.org */
-#include <crypto/internal/simd.h> - #include <asm/neon.h> #include <asm/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull); @@ -21,19 +19,19 @@ asmlinkage void crc_t10dif_pmull8(u16 init_crc, const u8 *buf, size_t len,
static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length) { if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) { if (static_branch_likely(&have_pmull)) { - if (crypto_simd_usable()) { + if (likely(may_use_simd())) { kernel_neon_begin(); crc = crc_t10dif_pmull64(crc, data, length); kernel_neon_end(); return crc; } } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && static_branch_likely(&have_neon) && - crypto_simd_usable()) { + likely(may_use_simd())) { u8 buf[16] __aligned(16);
kernel_neon_begin(); crc_t10dif_pmull8(crc, data, length, buf); kernel_neon_end(); diff --git a/lib/crc/arm/crc32.h b/lib/crc/arm/crc32.h index 018007e162a2b..ae71aa60b7a74 100644 --- a/lib/crc/arm/crc32.h +++ b/lib/crc/arm/crc32.h @@ -5,12 +5,10 @@ * Copyright (C) 2016 Linaro Ltd ard.biesheuvel@linaro.org */
#include <linux/cpufeature.h>
-#include <crypto/internal/simd.h> - #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); @@ -32,11 +30,11 @@ static inline u32 crc32_le_scalar(u32 crc, const u8 *p, size_t len) }
static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { if (len >= PMULL_MIN_LEN + 15 && - static_branch_likely(&have_pmull) && crypto_simd_usable()) { + static_branch_likely(&have_pmull) && likely(may_use_simd())) { size_t n = -(uintptr_t)p & 15;
/* align p to 16-byte boundary */ if (n) { crc = crc32_le_scalar(crc, p, n); @@ -61,11 +59,11 @@ static inline u32 crc32c_scalar(u32 crc, const u8 *p, size_t len) }
static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len) { if (len >= PMULL_MIN_LEN + 15 && - static_branch_likely(&have_pmull) && crypto_simd_usable()) { + static_branch_likely(&have_pmull) && likely(may_use_simd())) { size_t n = -(uintptr_t)p & 15;
/* align p to 16-byte boundary */ if (n) { crc = crc32c_scalar(crc, p, n); diff --git a/lib/crc/arm64/crc-t10dif.h b/lib/crc/arm64/crc-t10dif.h index c4521a7f1ee9b..435a990ec43c2 100644 --- a/lib/crc/arm64/crc-t10dif.h +++ b/lib/crc/arm64/crc-t10dif.h @@ -5,12 +5,10 @@ * Copyright (C) 2016 - 2017 Linaro Ltd ard.biesheuvel@linaro.org */
#include <linux/cpufeature.h>
-#include <crypto/internal/simd.h> - #include <asm/neon.h> #include <asm/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_asimd); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull); @@ -23,19 +21,19 @@ asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length) { if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) { if (static_branch_likely(&have_pmull)) { - if (crypto_simd_usable()) { + if (likely(may_use_simd())) { kernel_neon_begin(); crc = crc_t10dif_pmull_p64(crc, data, length); kernel_neon_end(); return crc; } } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && static_branch_likely(&have_asimd) && - crypto_simd_usable()) { + likely(may_use_simd())) { u8 buf[16];
kernel_neon_begin(); crc_t10dif_pmull_p8(crc, data, length, buf); kernel_neon_end(); diff --git a/lib/crc/arm64/crc32.h b/lib/crc/arm64/crc32.h index 6e5dec45f05d2..31e649cd40a2f 100644 --- a/lib/crc/arm64/crc32.h +++ b/lib/crc/arm64/crc32.h @@ -3,12 +3,10 @@ #include <asm/alternative.h> #include <asm/cpufeature.h> #include <asm/neon.h> #include <asm/simd.h>
-#include <crypto/internal/simd.h> - // The minimum input length to consider the 4-way interleaved code path static const size_t min_len = 1024;
asmlinkage u32 crc32_le_arm64(u32 crc, unsigned char const *p, size_t len); asmlinkage u32 crc32c_le_arm64(u32 crc, unsigned char const *p, size_t len); @@ -21,11 +19,12 @@ asmlinkage u32 crc32_be_arm64_4way(u32 crc, unsigned char const *p, size_t len); static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { if (!alternative_has_cap_likely(ARM64_HAS_CRC32)) return crc32_le_base(crc, p, len);
- if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) { + if (len >= min_len && cpu_have_named_feature(PMULL) && + likely(may_use_simd())) { kernel_neon_begin(); crc = crc32_le_arm64_4way(crc, p, len); kernel_neon_end();
p += round_down(len, 64); @@ -41,11 +40,12 @@ static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len) { if (!alternative_has_cap_likely(ARM64_HAS_CRC32)) return crc32c_base(crc, p, len);
- if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) { + if (len >= min_len && cpu_have_named_feature(PMULL) && + likely(may_use_simd())) { kernel_neon_begin(); crc = crc32c_le_arm64_4way(crc, p, len); kernel_neon_end();
p += round_down(len, 64); @@ -61,11 +61,12 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len) static inline u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) { if (!alternative_has_cap_likely(ARM64_HAS_CRC32)) return crc32_be_base(crc, p, len);
- if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) { + if (len >= min_len && cpu_have_named_feature(PMULL) && + likely(may_use_simd())) { kernel_neon_begin(); crc = crc32_be_arm64_4way(crc, p, len); kernel_neon_end();
p += round_down(len, 64); diff --git a/lib/crc/powerpc/crc-t10dif.h b/lib/crc/powerpc/crc-t10dif.h index 59e16804a6eae..e033d5f57bae2 100644 --- a/lib/crc/powerpc/crc-t10dif.h +++ b/lib/crc/powerpc/crc-t10dif.h @@ -4,12 +4,12 @@ * * Copyright 2017, Daniel Axtens, IBM Corporation. * [based on crc32c-vpmsum_glue.c] */
+#include <asm/simd.h> #include <asm/switch_to.h> -#include <crypto/internal/simd.h> #include <linux/cpufeature.h> #include <linux/jump_label.h> #include <linux/preempt.h> #include <linux/uaccess.h>
@@ -27,11 +27,12 @@ static inline u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len) unsigned int prealign; unsigned int tail; u32 crc = crci;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || - !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable()) + !static_branch_likely(&have_vec_crypto) || + unlikely(!may_use_simd())) return crc_t10dif_generic(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) { prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); crc = crc_t10dif_generic(crc, p, prealign); diff --git a/lib/crc/powerpc/crc32.h b/lib/crc/powerpc/crc32.h index 811cc2e6ed24d..cc8fa3913d4e0 100644 --- a/lib/crc/powerpc/crc32.h +++ b/lib/crc/powerpc/crc32.h @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <asm/simd.h> #include <asm/switch_to.h> -#include <crypto/internal/simd.h> #include <linux/cpufeature.h> #include <linux/jump_label.h> #include <linux/preempt.h> #include <linux/uaccess.h>
@@ -22,11 +22,12 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len) { unsigned int prealign; unsigned int tail;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || - !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable()) + !static_branch_likely(&have_vec_crypto) || + unlikely(!may_use_simd())) return crc32c_base(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) { prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); crc = crc32c_base(crc, p, prealign); diff --git a/lib/crc/x86/crc-pclmul-template.h b/lib/crc/x86/crc-pclmul-template.h index 35c950d7010c2..02744831c6fac 100644 --- a/lib/crc/x86/crc-pclmul-template.h +++ b/lib/crc/x86/crc-pclmul-template.h @@ -10,11 +10,10 @@ #ifndef _CRC_PCLMUL_TEMPLATE_H #define _CRC_PCLMUL_TEMPLATE_H
#include <asm/cpufeatures.h> #include <asm/simd.h> -#include <crypto/internal/simd.h> #include <linux/static_call.h> #include "crc-pclmul-consts.h"
#define DECLARE_CRC_PCLMUL_FUNCS(prefix, crc_t) \ crc_t prefix##_pclmul_sse(crc_t crc, const u8 *p, size_t len, \ @@ -55,11 +54,11 @@ static inline bool have_avx512(void) * the dcache than the table-based code is, a 16-byte cutoff seems to work well. */ #define CRC_PCLMUL(crc, p, len, prefix, consts, have_pclmulqdq) \ do { \ if ((len) >= 16 && static_branch_likely(&(have_pclmulqdq)) && \ - crypto_simd_usable()) { \ + likely(irq_fpu_usable())) { \ const void *consts_ptr; \ \ consts_ptr = (consts).fold_across_128_bits_consts; \ kernel_fpu_begin(); \ crc = static_call(prefix##_pclmul)((crc), (p), (len), \ diff --git a/lib/crc/x86/crc32.h b/lib/crc/x86/crc32.h index cea2c96d08d09..2c4a5976654ad 100644 --- a/lib/crc/x86/crc32.h +++ b/lib/crc/x86/crc32.h @@ -42,11 +42,11 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
if (!static_branch_likely(&have_crc32)) return crc32c_base(crc, p, len);
if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN && - static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) { + static_branch_likely(&have_pclmulqdq) && likely(irq_fpu_usable())) { /* * Long length, the vector registers are usable, and the CPU is * 64-bit and supports both CRC32 and PCLMULQDQ instructions. * It is worthwhile to divide the data into multiple streams, * CRC them independently, and combine them using PCLMULQDQ.
linux-kselftest-mirror@lists.linaro.org