Hello Sasha,
On 30.12.20 14:02, Sasha Levin wrote:
From: Linus Walleij linus.walleij@linaro.org
[ Upstream commit d6d51a96c7d63b7450860a3037f2d62388286a52 ]
Functions like memset()/memmove()/memcpy() do a lot of memory accesses.
If a bad pointer is passed to one of these functions it is important to catch this. Compiler instrumentation cannot do this since these functions are written in assembly.
KASan replaces these memory functions with instrumented variants.
Unless someone actually wants this, I suggest dropping it.
It's a prerequisite patch for KASan support on ARM32, which is new in v5.11-rc1. Backporting it on its own doesn't add any value IMO.
Cheers Ahmad
The original functions are declared as weak symbols so that the strong definitions in mm/kasan/kasan.c can replace them.
The original functions have aliases with a '__' prefix in their name, so we can call the non-instrumented variant if needed.
We must use __memcpy()/__memset() in place of memcpy()/memset() when we copy .data to RAM and when we clear .bss, because kasan_early_init cannot be called before the initialization of .data and .bss.
For the kernel compression and EFI libstub's custom string libraries we need a special quirk: even if these are built without KASan enabled, they rely on the global headers for their custom string libraries, which means that e.g. memcpy() will be defined to __memcpy() and we get link failures. Since these implementations are written i C rather than assembly we use e.g. __alias(memcpy) to redirected any users back to the local implementation.
Cc: Andrey Ryabinin aryabinin@virtuozzo.com Cc: Alexander Potapenko glider@google.com Cc: Dmitry Vyukov dvyukov@google.com Cc: kasan-dev@googlegroups.com Reviewed-by: Ard Biesheuvel ardb@kernel.org Tested-by: Ard Biesheuvel ardb@kernel.org # QEMU/KVM/mach-virt/LPAE/8G Tested-by: Florian Fainelli f.fainelli@gmail.com # Brahma SoCs Tested-by: Ahmad Fatoum a.fatoum@pengutronix.de # i.MX6Q Reported-by: Russell King - ARM Linux rmk+kernel@armlinux.org.uk Signed-off-by: Ahmad Fatoum a.fatoum@pengutronix.de Signed-off-by: Abbott Liu liuwenliang@huawei.com Signed-off-by: Florian Fainelli f.fainelli@gmail.com Signed-off-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Russell King rmk+kernel@armlinux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org
arch/arm/boot/compressed/string.c | 19 +++++++++++++++++++ arch/arm/include/asm/string.h | 26 ++++++++++++++++++++++++++ arch/arm/kernel/head-common.S | 4 ++-- arch/arm/lib/memcpy.S | 3 +++ arch/arm/lib/memmove.S | 5 ++++- arch/arm/lib/memset.S | 3 +++ 6 files changed, 57 insertions(+), 3 deletions(-)
diff --git a/arch/arm/boot/compressed/string.c b/arch/arm/boot/compressed/string.c index ade5079bebbf9..8c0fa276d9946 100644 --- a/arch/arm/boot/compressed/string.c +++ b/arch/arm/boot/compressed/string.c @@ -7,6 +7,25 @@ #include <linux/string.h> +/*
- The decompressor is built without KASan but uses the same redirects as the
- rest of the kernel when CONFIG_KASAN is enabled, defining e.g. memcpy()
- to __memcpy() but since we are not linking with the main kernel string
- library in the decompressor, that will lead to link failures.
- Undefine KASan's versions, define the wrapped functions and alias them to
- the right names so that when e.g. __memcpy() appear in the code, it will
- still be linked to this local version of memcpy().
- */
+#ifdef CONFIG_KASAN +#undef memcpy +#undef memmove +#undef memset +void *__memcpy(void *__dest, __const void *__src, size_t __n) __alias(memcpy); +void *__memmove(void *__dest, __const void *__src, size_t count) __alias(memmove); +void *__memset(void *s, int c, size_t count) __alias(memset); +#endif
void *memcpy(void *__dest, __const void *__src, size_t __n) { int i = 0; diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h index 111a1d8a41ddf..6c607c68f3ad7 100644 --- a/arch/arm/include/asm/string.h +++ b/arch/arm/include/asm/string.h @@ -5,6 +5,9 @@ /*
- We don't do inline string functions, since the
- optimised inline asm versions are not small.
- The __underscore versions of some functions are for KASan to be able
*/
- to replace them with instrumented versions.
#define __HAVE_ARCH_STRRCHR @@ -15,15 +18,18 @@ extern char * strchr(const char * s, int c); #define __HAVE_ARCH_MEMCPY extern void * memcpy(void *, const void *, __kernel_size_t); +extern void *__memcpy(void *dest, const void *src, __kernel_size_t n); #define __HAVE_ARCH_MEMMOVE extern void * memmove(void *, const void *, __kernel_size_t); +extern void *__memmove(void *dest, const void *src, __kernel_size_t n); #define __HAVE_ARCH_MEMCHR extern void * memchr(const void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMSET extern void * memset(void *, int, __kernel_size_t); +extern void *__memset(void *s, int c, __kernel_size_t n); #define __HAVE_ARCH_MEMSET32 extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); @@ -39,4 +45,24 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) return __memset64(p, v, n * 8, v >> 32); } +/*
- For files that are not instrumented (e.g. mm/slub.c) we
- must use non-instrumented versions of the mem*
- functions named __memcpy() etc. All such kernel code has
- been tagged with KASAN_SANITIZE_file.o = n, which means
- that the address sanitization argument isn't passed to the
- compiler, and __SANITIZE_ADDRESS__ is not set. As a result
- these defines kick in.
- */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n)
+#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif
+#endif
#endif diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 4a3982812a401..6840c7c60a858 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -95,7 +95,7 @@ __mmap_switched: THUMB( ldmia r4!, {r0, r1, r2, r3} ) THUMB( mov sp, r3 ) sub r2, r2, r1
- bl memcpy @ copy .data to RAM
- bl __memcpy @ copy .data to RAM
#endif ARM( ldmia r4!, {r0, r1, sp} ) @@ -103,7 +103,7 @@ __mmap_switched: THUMB( mov sp, r3 ) sub r2, r1, r0 mov r1, #0
- bl memset @ clear .bss
- bl __memset @ clear .bss
ldmia r4, {r0, r1, r2, r3} str r9, [r0] @ Save processor ID diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 09a333153dc66..ad4625d16e117 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -58,6 +58,8 @@ /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ +.weak memcpy +ENTRY(__memcpy) ENTRY(mmiocpy) ENTRY(memcpy) @@ -65,3 +67,4 @@ ENTRY(memcpy) ENDPROC(memcpy) ENDPROC(mmiocpy) +ENDPROC(__memcpy) diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index b50e5770fb44d..fd123ea5a5a4a 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S @@ -24,12 +24,14 @@
- occurring in the opposite direction.
*/ +.weak memmove +ENTRY(__memmove) ENTRY(memmove) UNWIND( .fnstart ) subs ip, r0, r1 cmphi r2, ip
bls memcpy
bls __memcpy
stmfd sp!, {r0, r4, lr} UNWIND( .fnend ) @@ -222,3 +224,4 @@ ENTRY(memmove) 18: backward_copy_shift push=24 pull=8 ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 6ca4535c47fb6..0e7ff0423f50b 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -13,6 +13,8 @@ .text .align 5 +.weak memset +ENTRY(__memset) ENTRY(mmioset) ENTRY(memset) UNWIND( .fnstart ) @@ -132,6 +134,7 @@ UNWIND( .fnstart ) UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) +ENDPROC(__memset) ENTRY(__memset32) UNWIND( .fnstart )