From: Maciej Wieczor-Retman maciej.wieczor-retman@intel.com
A KASAN tag mismatch, possibly causing a kernel panic, can be observed on systems with a tag-based KASAN enabled and with multiple NUMA nodes. It was reported on arm64 and reproduced on x86. It can be explained in the following points:
1. There can be more than one virtual memory chunk. 2. Chunk's base address has a tag. 3. The base address points at the first chunk and thus inherits the tag of the first chunk. 4. The subsequent chunks will be accessed with the tag from the first chunk. 5. Thus, the subsequent chunks need to have their tag set to match that of the first chunk.
Refactor code by reusing __kasan_unpoison_vmalloc in a new helper in preparation for the actual fix.
Changelog v1 (after splitting of from the KASAN series): - Rewrite first paragraph of the patch message to point at the user impact of the issue. - Move helper to common.c so it can be compiled in all KASAN modes.
Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS") Cc: stable@vger.kernel.org # 6.1+ Signed-off-by: Maciej Wieczor-Retman maciej.wieczor-retman@intel.com --- Changelog v2: - Redo the whole patch so it's an actual refactor.
include/linux/kasan.h | 16 +++++++++++++--- mm/kasan/common.c | 17 +++++++++++++++++ mm/kasan/hw_tags.c | 15 +++++++++++++-- mm/kasan/shadow.c | 16 ++++++++++++++-- mm/vmalloc.c | 4 +--- 5 files changed, 58 insertions(+), 10 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d12e1a5f5a9a..4a3d3dba9764 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -595,14 +595,14 @@ static inline void kasan_release_vmalloc(unsigned long start,
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, - kasan_vmalloc_flags_t flags); +void *__kasan_random_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags); static __always_inline void *kasan_unpoison_vmalloc(const void *start, unsigned long size, kasan_vmalloc_flags_t flags) { if (kasan_enabled()) - return __kasan_unpoison_vmalloc(start, size, flags); + return __kasan_random_unpoison_vmalloc(start, size, flags); return (void *)start; }
@@ -614,6 +614,11 @@ static __always_inline void kasan_poison_vmalloc(const void *start, __kasan_poison_vmalloc(start, size); }
+void *__kasan_unpoison_vmap_areas(void *addr, unsigned long size, + kasan_vmalloc_flags_t flags, u8 tag); +void kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags); + #else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start, @@ -638,6 +643,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start, static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { }
+static __always_inline void +kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ } + #endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index d4c14359feaf..7884ea7d13f9 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -28,6 +28,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/bug.h> +#include <linux/vmalloc.h>
#include "kasan.h" #include "../slab.h" @@ -582,3 +583,19 @@ bool __kasan_check_byte(const void *address, unsigned long ip) } return true; } + +#ifdef CONFIG_KASAN_VMALLOC +void kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ + unsigned long size; + void *addr; + int area; + + for (area = 0 ; area < nr_vms ; area++) { + size = vms[area]->size; + addr = vms[area]->addr; + vms[area]->addr = __kasan_unpoison_vmap_areas(addr, size, flags); + } +} +#endif diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 1c373cc4b3fa..4b7936a2bd6f 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -316,8 +316,8 @@ static void init_vmalloc_pages(const void *start, unsigned long size) } }
-void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, - kasan_vmalloc_flags_t flags) +static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags) { u8 tag; unsigned long redzone_start, redzone_size; @@ -387,6 +387,12 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, return (void *)start; }
+void *__kasan_random_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags) +{ + return __kasan_unpoison_vmalloc(start, size, flags); +} + void __kasan_poison_vmalloc(const void *start, unsigned long size) { /* @@ -396,6 +402,11 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size) */ }
+void *__kasan_unpoison_vmap_areas(void *addr, unsigned long size, + kasan_vmalloc_flags_t flags, u8 tag) +{ + return __kasan_unpoison_vmalloc(addr, size, flags); +} #endif
void kasan_enable_hw_tags(void) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 5d2a876035d6..0a8d8bf6e9cf 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -624,8 +624,8 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, } }
-void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, - kasan_vmalloc_flags_t flags) +static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags) { /* * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC @@ -653,6 +653,18 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, return (void *)start; }
+void *__kasan_random_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags) +{ + return __kasan_unpoison_vmalloc(start, size, flags); +} + +void *__kasan_unpoison_vmap_areas(void *addr, unsigned long size, + kasan_vmalloc_flags_t flags, u8 tag) +{ + return __kasan_unpoison_vmalloc(addr, size, flags); +} + /* * Poison the shadow for a vmalloc region. Called as part of the * freeing process at the time the region is freed. diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 798b2ed21e46..32ecdb8cd4b8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4870,9 +4870,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ - for (area = 0; area < nr_vms; area++) - vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, - vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); + kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL);
kfree(vas); return vms;
linux-stable-mirror@lists.linaro.org