When sync:ing the VMALLOC area to other CPUs, make sure to also sync the KASAN shadow memory for the VMALLOC area, so that we don't get stale entries for the shadow memory in the top level PGD.
Cc: stable@vger.kernel.org Fixes: 565cbaad83d8 ("ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC") Link: https://lore.kernel.org/linux-arm-kernel/a1a1d062-f3a2-4d05-9836-3b098de9db6... Reported-by: Clement LE GOFFIC clement.legoffic@foss.st.com Suggested-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Linus Walleij linus.walleij@linaro.org --- arch/arm/mm/ioremap.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 794cfea9f9d4..449f1f04814c 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -23,6 +23,7 @@ */ #include <linux/module.h> #include <linux/errno.h> +#include <linux/kasan.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/io.h> @@ -125,6 +126,12 @@ void __check_vmalloc_seq(struct mm_struct *mm) pgd_offset_k(VMALLOC_START), sizeof(pgd_t) * (pgd_index(VMALLOC_END) - pgd_index(VMALLOC_START))); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + memcpy(pgd_offset(mm, (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START)), + pgd_offset_k((unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START)), + sizeof(pgd_t) * (pgd_index((unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END)) - + pgd_index((unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START)))); + } /* * Use a store-release so that other CPUs that observe the * counter's new value are guaranteed to see the results of the