This is patchset for KASAN backporting to LSK 4.1. Any comments are appreciated.
Hi Linus,
Could you like to give some suggestion for testing?
Thanks Alex
From: Ard Biesheuvel ard.biesheuvel@linaro.org
For more control over which functions are called with the MMU off or with the UEFI 1:1 mapping active, annotate some assembler routines as position independent. This is done by introducing ENDPIPROC(), which replaces the ENDPROC() declaration of those routines.
Signed-off-by: Ard Biesheuvel ard.biesheuvel@linaro.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com (cherry picked from commit 207918461eb0aca720fddec5da79bc71c133b9f1) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/include/asm/assembler.h | 11 +++++++++++ arch/arm64/lib/memchr.S | 2 +- arch/arm64/lib/memcmp.S | 2 +- arch/arm64/lib/memcpy.S | 2 +- arch/arm64/lib/memmove.S | 2 +- arch/arm64/lib/memset.S | 2 +- arch/arm64/lib/strcmp.S | 2 +- arch/arm64/lib/strlen.S | 2 +- arch/arm64/lib/strncmp.S | 2 +- arch/arm64/mm/cache.S | 10 +++++----- 10 files changed, 24 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 144b64a..3579988 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -207,4 +207,15 @@ lr .req x30 // link register str \src, [\tmp, :lo12:\sym] .endm
+/* + * Annotate a function as position independent, i.e., safe to be called before + * the kernel virtual mapping is activated. + */ +#define ENDPIPROC(x) \ + .globl __pi_##x; \ + .type __pi_##x, %function; \ + .set __pi_##x, x; \ + .size __pi_##x, . - x; \ + ENDPROC(x) + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S index 8636b75..4444c1d 100644 --- a/arch/arm64/lib/memchr.S +++ b/arch/arm64/lib/memchr.S @@ -41,4 +41,4 @@ ENTRY(memchr) ret 2: mov x0, #0 ret -ENDPROC(memchr) +ENDPIPROC(memchr) diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S index 6ea0776..ffbdec0 100644 --- a/arch/arm64/lib/memcmp.S +++ b/arch/arm64/lib/memcmp.S @@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 ) .Lret0: mov result, #0 ret -ENDPROC(memcmp) +ENDPIPROC(memcmp) diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 8a9a96d..5d6ddb7 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -198,4 +198,4 @@ ENTRY(memcpy) tst count, #0x3f b.ne .Ltail63 ret -ENDPROC(memcpy) +ENDPIPROC(memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index 57b19ea2..68e2f20 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -194,4 +194,4 @@ ENTRY(memmove) tst count, #0x3f b.ne .Ltail63 ret -ENDPROC(memmove) +ENDPIPROC(memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index 7c72dfd3..29f405f 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -213,4 +213,4 @@ ENTRY(memset) ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret -ENDPROC(memset) +ENDPIPROC(memset) diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index 42f828b..471fe61 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -231,4 +231,4 @@ CPU_BE( orr syndrome, diff, has_nul ) lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret -ENDPROC(strcmp) +ENDPIPROC(strcmp) diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 987b68b..55ccc8e 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned -ENDPROC(strlen) +ENDPIPROC(strlen) diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S index 0224cf5..e267044 100644 --- a/arch/arm64/lib/strncmp.S +++ b/arch/arm64/lib/strncmp.S @@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul ) .Lret0: mov result, #0 ret -ENDPROC(strncmp) +ENDPIPROC(strncmp) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 2560e1e..4c7150b 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -171,7 +171,7 @@ ENTRY(__flush_dcache_area) b.lo 1b dsb sy ret -ENDPROC(__flush_dcache_area) +ENDPIPROC(__flush_dcache_area)
/* * __inval_cache_range(start, end) @@ -204,7 +204,7 @@ __dma_inv_range: b.lo 2b dsb sy ret -ENDPROC(__inval_cache_range) +ENDPIPROC(__inval_cache_range) ENDPROC(__dma_inv_range)
/* @@ -239,7 +239,7 @@ ENTRY(__dma_flush_range) b.lo 1b dsb sy ret -ENDPROC(__dma_flush_range) +ENDPIPROC(__dma_flush_range)
/* * __dma_map_area(start, size, dir) @@ -252,7 +252,7 @@ ENTRY(__dma_map_area) cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_range b __dma_clean_range -ENDPROC(__dma_map_area) +ENDPIPROC(__dma_map_area)
/* * __dma_unmap_area(start, size, dir) @@ -265,4 +265,4 @@ ENTRY(__dma_unmap_area) cmp w2, #DMA_TO_DEVICE b.ne __dma_inv_range ret -ENDPROC(__dma_unmap_area) +ENDPIPROC(__dma_unmap_area)
From: Andrey Ryabinin ryabinin.a.a@gmail.com
This will be used by KASAN latter.
Signed-off-by: Andrey Ryabinin ryabinin.a.a@gmail.com Acked-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com (cherry picked from commit fd2203dd3556f6553231fa026060793e67a25ce6) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/include/asm/pgalloc.h | 1 + arch/arm64/mm/pgd.c | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 7642056..c150539 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -27,6 +27,7 @@ #define check_pgt_cache() do { } while (0)
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) +#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 71ca104..cb3ba1b 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -28,8 +28,6 @@
#include "mm.h"
-#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) - static struct kmem_cache *pgd_cache;
pgd_t *pgd_alloc(struct mm_struct *mm)
From: Feng Kan fkan@apm.com
This converts the memcpy.S to use the copy template file. The copy template file was based originally on the memcpy.S
Signed-off-by: Feng Kan fkan@apm.com Signed-off-by: Balamurugan Shanmugam bshanmugam@apm.com [catalin.marinas@arm.com: removed tmp3(w) .req statements as they are not used] Signed-off-by: Catalin Marinas catalin.marinas@arm.com
(cherry picked from commit e5c88e3f2fb35dca5f3e46d65095bf5d008595b7) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/lib/copy_template.S | 193 +++++++++++++++++++++++++++++++++++++++++ arch/arm64/lib/memcpy.S | 179 ++++++-------------------------------- 2 files changed, 219 insertions(+), 153 deletions(-) create mode 100644 arch/arm64/lib/copy_template.S
diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S new file mode 100644 index 0000000..410fbdb --- /dev/null +++ b/arch/arm64/lib/copy_template.S @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2013 Linaro. + * + * This code is based on glibc cortex strings work originally authored by Linaro + * and re-licensed under GPLv2 for the Linux kernel. The original code can + * be found @ + * + * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ + * files/head:/src/aarch64/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + + +/* + * Copy a buffer from src to dest (alignment handled by the hardware) + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - dest + */ +dstin .req x0 +src .req x1 +count .req x2 +tmp1 .req x3 +tmp1w .req w3 +tmp2 .req x4 +tmp2w .req w4 +dst .req x6 + +A_l .req x7 +A_h .req x8 +B_l .req x9 +B_h .req x10 +C_l .req x11 +C_h .req x12 +D_l .req x13 +D_h .req x14 + + mov dst, dstin + cmp count, #16 + /*When memory length is less than 16, the accessed are not aligned.*/ + b.lo .Ltiny15 + + neg tmp2, src + ands tmp2, tmp2, #15/* Bytes to reach alignment. */ + b.eq .LSrcAligned + sub count, count, tmp2 + /* + * Copy the leading memory data from src to dst in an increasing + * address order.By this way,the risk of overwritting the source + * memory data is eliminated when the distance between src and + * dst is less than 16. The memory accesses here are alignment. + */ + tbz tmp2, #0, 1f + ldrb1 tmp1w, src, #1 + strb1 tmp1w, dst, #1 +1: + tbz tmp2, #1, 2f + ldrh1 tmp1w, src, #2 + strh1 tmp1w, dst, #2 +2: + tbz tmp2, #2, 3f + ldr1 tmp1w, src, #4 + str1 tmp1w, dst, #4 +3: + tbz tmp2, #3, .LSrcAligned + ldr1 tmp1, src, #8 + str1 tmp1, dst, #8 + +.LSrcAligned: + cmp count, #64 + b.ge .Lcpy_over64 + /* + * Deal with small copies quickly by dropping straight into the + * exit block. + */ +.Ltail63: + /* + * Copy up to 48 bytes of data. At this point we only need the + * bottom 6 bits of count to be accurate. + */ + ands tmp1, count, #0x30 + b.eq .Ltiny15 + cmp tmp1w, #0x20 + b.eq 1f + b.lt 2f + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +1: + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +2: + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +.Ltiny15: + /* + * Prefer to break one ldp/stp into several load/store to access + * memory in an increasing address order,rather than to load/store 16 + * bytes from (src-16) to (dst-16) and to backward the src to aligned + * address,which way is used in original cortex memcpy. If keeping + * the original memcpy process here, memmove need to satisfy the + * precondition that src address is at least 16 bytes bigger than dst + * address,otherwise some source data will be overwritten when memove + * call memcpy directly. To make memmove simpler and decouple the + * memcpy's dependency on memmove, withdrew the original process. + */ + tbz count, #3, 1f + ldr1 tmp1, src, #8 + str1 tmp1, dst, #8 +1: + tbz count, #2, 2f + ldr1 tmp1w, src, #4 + str1 tmp1w, dst, #4 +2: + tbz count, #1, 3f + ldrh1 tmp1w, src, #2 + strh1 tmp1w, dst, #2 +3: + tbz count, #0, .Lexitfunc + ldrb1 tmp1w, src, #1 + strb1 tmp1w, dst, #1 + + b .Lexitfunc + +.Lcpy_over64: + subs count, count, #128 + b.ge .Lcpy_body_large + /* + * Less than 128 bytes to copy, so handle 64 here and then jump + * to the tail. + */ + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 + ldp1 B_l, B_h, src, #16 + ldp1 C_l, C_h, src, #16 + stp1 B_l, B_h, dst, #16 + stp1 C_l, C_h, dst, #16 + ldp1 D_l, D_h, src, #16 + stp1 D_l, D_h, dst, #16 + + tst count, #0x3f + b.ne .Ltail63 + b .Lexitfunc + + /* + * Critical loop. Start at a new cache line boundary. Assuming + * 64 bytes per line this ensures the entire loop is in one line. + */ + .p2align L1_CACHE_SHIFT +.Lcpy_body_large: + /* pre-get 64 bytes data. */ + ldp1 A_l, A_h, src, #16 + ldp1 B_l, B_h, src, #16 + ldp1 C_l, C_h, src, #16 + ldp1 D_l, D_h, src, #16 +1: + /* + * interlace the load of next 64 bytes data block with store of the last + * loaded 64 bytes data. + */ + stp1 A_l, A_h, dst, #16 + ldp1 A_l, A_h, src, #16 + stp1 B_l, B_h, dst, #16 + ldp1 B_l, B_h, src, #16 + stp1 C_l, C_h, dst, #16 + ldp1 C_l, C_h, src, #16 + stp1 D_l, D_h, dst, #16 + ldp1 D_l, D_h, src, #16 + subs count, count, #64 + b.ge 1b + stp1 A_l, A_h, dst, #16 + stp1 B_l, B_h, dst, #16 + stp1 C_l, C_h, dst, #16 + stp1 D_l, D_h, dst, #16 + + tst count, #0x3f + b.ne .Ltail63 +.Lexitfunc: diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 5d6ddb7..36a6a62 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -36,166 +36,39 @@ * Returns: * x0 - dest */ -dstin .req x0 -src .req x1 -count .req x2 -tmp1 .req x3 -tmp1w .req w3 -tmp2 .req x4 -tmp2w .req w4 -tmp3 .req x5 -tmp3w .req w5 -dst .req x6 + .macro ldrb1 ptr, regB, val + ldrb \ptr, [\regB], \val + .endm
-A_l .req x7 -A_h .req x8 -B_l .req x9 -B_h .req x10 -C_l .req x11 -C_h .req x12 -D_l .req x13 -D_h .req x14 + .macro strb1 ptr, regB, val + strb \ptr, [\regB], \val + .endm
-ENTRY(memcpy) - mov dst, dstin - cmp count, #16 - /*When memory length is less than 16, the accessed are not aligned.*/ - b.lo .Ltiny15 + .macro ldrh1 ptr, regB, val + ldrh \ptr, [\regB], \val + .endm
- neg tmp2, src - ands tmp2, tmp2, #15/* Bytes to reach alignment. */ - b.eq .LSrcAligned - sub count, count, tmp2 - /* - * Copy the leading memory data from src to dst in an increasing - * address order.By this way,the risk of overwritting the source - * memory data is eliminated when the distance between src and - * dst is less than 16. The memory accesses here are alignment. - */ - tbz tmp2, #0, 1f - ldrb tmp1w, [src], #1 - strb tmp1w, [dst], #1 -1: - tbz tmp2, #1, 2f - ldrh tmp1w, [src], #2 - strh tmp1w, [dst], #2 -2: - tbz tmp2, #2, 3f - ldr tmp1w, [src], #4 - str tmp1w, [dst], #4 -3: - tbz tmp2, #3, .LSrcAligned - ldr tmp1, [src],#8 - str tmp1, [dst],#8 + .macro strh1 ptr, regB, val + strh \ptr, [\regB], \val + .endm
-.LSrcAligned: - cmp count, #64 - b.ge .Lcpy_over64 - /* - * Deal with small copies quickly by dropping straight into the - * exit block. - */ -.Ltail63: - /* - * Copy up to 48 bytes of data. At this point we only need the - * bottom 6 bits of count to be accurate. - */ - ands tmp1, count, #0x30 - b.eq .Ltiny15 - cmp tmp1w, #0x20 - b.eq 1f - b.lt 2f - ldp A_l, A_h, [src], #16 - stp A_l, A_h, [dst], #16 -1: - ldp A_l, A_h, [src], #16 - stp A_l, A_h, [dst], #16 -2: - ldp A_l, A_h, [src], #16 - stp A_l, A_h, [dst], #16 -.Ltiny15: - /* - * Prefer to break one ldp/stp into several load/store to access - * memory in an increasing address order,rather than to load/store 16 - * bytes from (src-16) to (dst-16) and to backward the src to aligned - * address,which way is used in original cortex memcpy. If keeping - * the original memcpy process here, memmove need to satisfy the - * precondition that src address is at least 16 bytes bigger than dst - * address,otherwise some source data will be overwritten when memove - * call memcpy directly. To make memmove simpler and decouple the - * memcpy's dependency on memmove, withdrew the original process. - */ - tbz count, #3, 1f - ldr tmp1, [src], #8 - str tmp1, [dst], #8 -1: - tbz count, #2, 2f - ldr tmp1w, [src], #4 - str tmp1w, [dst], #4 -2: - tbz count, #1, 3f - ldrh tmp1w, [src], #2 - strh tmp1w, [dst], #2 -3: - tbz count, #0, .Lexitfunc - ldrb tmp1w, [src] - strb tmp1w, [dst] + .macro ldr1 ptr, regB, val + ldr \ptr, [\regB], \val + .endm
-.Lexitfunc: - ret + .macro str1 ptr, regB, val + str \ptr, [\regB], \val + .endm
-.Lcpy_over64: - subs count, count, #128 - b.ge .Lcpy_body_large - /* - * Less than 128 bytes to copy, so handle 64 here and then jump - * to the tail. - */ - ldp A_l, A_h, [src],#16 - stp A_l, A_h, [dst],#16 - ldp B_l, B_h, [src],#16 - ldp C_l, C_h, [src],#16 - stp B_l, B_h, [dst],#16 - stp C_l, C_h, [dst],#16 - ldp D_l, D_h, [src],#16 - stp D_l, D_h, [dst],#16 + .macro ldp1 ptr, regB, regC, val + ldp \ptr, \regB, [\regC], \val + .endm
- tst count, #0x3f - b.ne .Ltail63 - ret + .macro stp1 ptr, regB, regC, val + stp \ptr, \regB, [\regC], \val + .endm
- /* - * Critical loop. Start at a new cache line boundary. Assuming - * 64 bytes per line this ensures the entire loop is in one line. - */ - .p2align L1_CACHE_SHIFT -.Lcpy_body_large: - /* pre-get 64 bytes data. */ - ldp A_l, A_h, [src],#16 - ldp B_l, B_h, [src],#16 - ldp C_l, C_h, [src],#16 - ldp D_l, D_h, [src],#16 -1: - /* - * interlace the load of next 64 bytes data block with store of the last - * loaded 64 bytes data. - */ - stp A_l, A_h, [dst],#16 - ldp A_l, A_h, [src],#16 - stp B_l, B_h, [dst],#16 - ldp B_l, B_h, [src],#16 - stp C_l, C_h, [dst],#16 - ldp C_l, C_h, [src],#16 - stp D_l, D_h, [dst],#16 - ldp D_l, D_h, [src],#16 - subs count, count, #64 - b.ge 1b - stp A_l, A_h, [dst],#16 - stp B_l, B_h, [dst],#16 - stp C_l, C_h, [dst],#16 - stp D_l, D_h, [dst],#16 - - tst count, #0x3f - b.ne .Ltail63 +ENTRY(memcpy) +#include "copy_template.S" ret ENDPIPROC(memcpy)
From: Mark Salter msalter@redhat.com
When booting an arm64 kernel w/initrd using UEFI/grub, use of mem= will likely cut off part or all of the initrd. This leaves it outside the kernel linear map which leads to failure when unpacking. The x86 code has a similar need to relocate an initrd outside of mapped memory in some cases.
The current x86 code uses early_memremap() to copy the original initrd from unmapped to mapped RAM. This patchset creates a generic copy_from_early_mem() utility based on that x86 code and has arm64 and x86 share it in their respective initrd relocation code.
This patch (of 3):
In some early boot circumstances, it may be necessary to copy from RAM outside the kernel linear mapping to mapped RAM. The need to relocate an initrd is one example in the x86 code. This patch creates a helper function based on current x86 code.
Signed-off-by: Mark Salter msalter@redhat.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: Will Deacon will.deacon@arm.com Cc: Arnd Bergmann arnd@arndb.de Cc: Ard Biesheuvel ard.biesheuvel@linaro.org Cc: Mark Rutland mark.rutland@arm.com Cc: Russell King rmk@arm.linux.org.uk Cc: Ingo Molnar mingo@elte.hu Cc: Thomas Gleixner tglx@linutronix.de Cc: "H. Peter Anvin" hpa@zytor.com Cc: Yinghai Lu yinghai@kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 6b0f68e32ea8749ff7d4a66cd5761e915e48e59d) Signed-off-by: Alex Shi alex.shi@linaro.org --- include/asm-generic/early_ioremap.h | 6 ++++++ mm/early_ioremap.c | 22 ++++++++++++++++++++++ 2 files changed, 28 insertions(+)
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index a5de55c..e539f27 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -33,6 +33,12 @@ extern void early_ioremap_setup(void); */ extern void early_ioremap_reset(void);
+/* + * Early copy from unmapped memory to kernel mapped memory. + */ +extern void copy_from_early_mem(void *dest, phys_addr_t src, + unsigned long size); + #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index e10ccd2..a0baeb4 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -217,6 +217,28 @@ early_memremap(resource_size_t phys_addr, unsigned long size) return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_NORMAL); } + +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = src & ~PAGE_MASK; + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + p = early_memremap(src & PAGE_MASK, clen + slop); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + #else /* CONFIG_MMU */
void __init __iomem *
From: Mark Salter msalter@redhat.com
The use of mem= could leave part or all of the initrd outside of the kernel linear map. This will lead to an error when unpacking the initrd and a probable failure to boot. This patch catches that situation and relocates the initrd to be fully within the linear map.
Signed-off-by: Mark Salter msalter@redhat.com Acked-by: Will Deacon will.deacon@arm.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: Arnd Bergmann arnd@arndb.de Cc: Ard Biesheuvel ard.biesheuvel@linaro.org Cc: Mark Rutland mark.rutland@arm.com Cc: Russell King rmk@arm.linux.org.uk Cc: Ingo Molnar mingo@elte.hu Cc: Thomas Gleixner tglx@linutronix.de Cc: "H. Peter Anvin" hpa@zytor.com Cc: Yinghai Lu yinghai@kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 1570f0d7ab425c1e0905715bf9cc98b2a82e723f) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/kernel/setup.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index bbdb53b..1fd9b4d 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -368,6 +368,67 @@ static void __init request_standard_resources(void) } }
+#ifdef CONFIG_BLK_DEV_INITRD +/* + * Relocate initrd if it is not completely within the linear mapping. + * This would be the case if mem= cuts out all or part of it. + */ +static void __init relocate_initrd(void) +{ + phys_addr_t orig_start = __virt_to_phys(initrd_start); + phys_addr_t orig_end = __virt_to_phys(initrd_end); + phys_addr_t ram_end = memblock_end_of_DRAM(); + phys_addr_t new_start; + unsigned long size, to_free = 0; + void *dest; + + if (orig_end <= ram_end) + return; + + /* + * Any of the original initrd which overlaps the linear map should + * be freed after relocating. + */ + if (orig_start < ram_end) + to_free = ram_end - orig_start; + + size = orig_end - orig_start; + + /* initrd needs to be relocated completely inside linear mapping */ + new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), + size, PAGE_SIZE); + if (!new_start) + panic("Cannot relocate initrd of size %ld\n", size); + memblock_reserve(new_start, size); + + initrd_start = __phys_to_virt(new_start); + initrd_end = initrd_start + size; + + pr_info("Moving initrd from [%llx-%llx] to [%llx-%llx]\n", + orig_start, orig_start + size - 1, + new_start, new_start + size - 1); + + dest = (void *)initrd_start; + + if (to_free) { + memcpy(dest, (void *)__phys_to_virt(orig_start), to_free); + dest += to_free; + } + + copy_from_early_mem(dest, orig_start + to_free, size - to_free); + + if (to_free) { + pr_info("Freeing original RAMDISK from [%llx-%llx]\n", + orig_start, orig_start + to_free - 1); + memblock_free(orig_start, to_free); + } +} +#else +static inline void __init relocate_initrd(void) +{ +} +#endif + u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p) @@ -401,6 +462,7 @@ void __init setup_arch(char **cmdline_p) acpi_boot_table_init();
paging_init(); + relocate_initrd(); request_standard_resources();
early_ioremap_reset();
From: Andrey Ryabinin ryabinin.a.a@gmail.com
In order to not use lengthy (UL(0xffffffffffffffff) << VA_BITS) everywhere, replace it with VA_START.
Signed-off-by: Andrey Ryabinin ryabinin.a.a@gmail.com Reviewed-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com (cherry picked from commit 127db024a7baee9874014dac33628253f438b4da) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/include/asm/memory.h | 2 ++ arch/arm64/include/asm/pgtable.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 44a59c2..53d3fdc 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -42,12 +42,14 @@ * PAGE_OFFSET - the virtual address of the start of the kernel image (top * (VA_BITS - 1)) * VA_BITS - the maximum number of bits for virtual addresses. + * VA_START - the first kernel virtual address. * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 128MB of the kernel text. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) +#define VA_START (UL(0xffffffffffffffff) << VA_BITS) #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) #define MODULES_END (PAGE_OFFSET) #define MODULES_VADDR (MODULES_END - SZ_64M) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index cf73194..ca7ce49 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -40,7 +40,7 @@ * fixed mappings and modules */ #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) -#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) +#define VMALLOC_START (VA_START) #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
This patch adds arch specific code for kernel address sanitizer (see Documentation/kasan.txt).
1/8 of kernel addresses reserved for shadow memory. There was no big enough hole for this, so virtual addresses for shadow were stolen from vmalloc area.
At early boot stage the whole shadow region populated with just one physical page (kasan_zero_page). Later, this page reused as readonly zero shadow for some memory that KASan currently don't track (vmalloc). After mapping the physical memory, pages for shadow memory are allocated and mapped.
Functions like memset/memmove/memcpy do a lot of memory accesses. If bad pointer passed to one of these function it is important to catch this. Compiler's instrumentation cannot do this since these functions are written in assembly. KASan replaces memory functions with manually instrumented variants. Original functions declared as weak symbols so strong definitions in mm/kasan/kasan.c could replace them. Original functions have aliases with '__' prefix in name, so we could call non-instrumented variant if needed. Some files built without kasan instrumentation (e.g. mm/slub.c). Original mem* function replaced (via #define) with prefixed variants to disable memory access checks for such files.
Signed-off-by: Andrey Ryabinin ryabinin.a.a@gmail.com Tested-by: Linus Walleij linus.walleij@linaro.org Reviewed-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com (cherry picked from commit 39d114ddc68223022c12ae3a1573912bc4b585e5) Signed-off-by: Alex Shi alex.shi@linaro.org
Conflicts: remove efitstub from arch/arm64/kernel/image.h, since we don't have it. --- arch/arm64/Kconfig | 1 + arch/arm64/Makefile | 7 ++ arch/arm64/include/asm/kasan.h | 36 +++++++++ arch/arm64/include/asm/pgtable.h | 7 ++ arch/arm64/include/asm/string.h | 16 ++++ arch/arm64/kernel/Makefile | 2 + arch/arm64/kernel/arm64ksyms.c | 3 + arch/arm64/kernel/head.S | 3 + arch/arm64/kernel/module.c | 16 +++- arch/arm64/kernel/setup.c | 4 + arch/arm64/lib/memcpy.S | 3 + arch/arm64/lib/memmove.S | 7 +- arch/arm64/lib/memset.S | 3 + arch/arm64/mm/Makefile | 3 + arch/arm64/mm/kasan_init.c | 165 +++++++++++++++++++++++++++++++++++++++ drivers/firmware/efi/Makefile | 8 ++ scripts/Makefile.kasan | 4 +- 17 files changed, 282 insertions(+), 6 deletions(-) create mode 100644 arch/arm64/include/asm/kasan.h create mode 100644 arch/arm64/mm/kasan_init.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6f0a3b4..6adba13 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -44,6 +44,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 3258174..a240765 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -44,6 +44,13 @@ else TEXT_OFFSET := 0x00080000 endif
+# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61) +# in 32-bit arithmetic +KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ + (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ + + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \ + - (1 << (64 - 32 - 3)) )) ) + export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/ diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h new file mode 100644 index 0000000..71dfe14 --- /dev/null +++ b/arch/arm64/include/asm/kasan.h @@ -0,0 +1,36 @@ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KASAN + +#include <asm/memory.h> + +/* + * KASAN_SHADOW_START: beginning of the kernel virtual addresses. + * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses. + */ +#define KASAN_SHADOW_START (VA_START) +#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1UL << (VA_BITS - 3))) + +/* + * This value is used to map an address to the corresponding shadow + * address by the following formula: + * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; + * + * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END] + * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET + * should satisfy the following equation: + * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61) + */ +#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3))) + +void kasan_init(void); + +#else +static inline void kasan_init(void) { } +#endif + +#endif +#endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ca7ce49..8dedad5 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -40,7 +40,14 @@ * fixed mappings and modules */ #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) + +#ifndef CONFIG_KASAN #define VMALLOC_START (VA_START) +#else +#include <asm/kasan.h> +#define VMALLOC_START (KASAN_SHADOW_END + SZ_64K) +#endif + #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index 64d2d48..2eb714c 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -36,17 +36,33 @@ extern __kernel_size_t strnlen(const char *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY extern void *memcpy(void *, const void *, __kernel_size_t); +extern void *__memcpy(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMMOVE extern void *memmove(void *, const void *, __kernel_size_t); +extern void *__memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCHR extern void *memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); +extern void *__memset(void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMCMP extern int memcmp(const void *, const void *, size_t);
+ +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) + +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ + +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) +#endif + #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 426d076..086b122 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -7,6 +7,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_armv8_deprecated.o := -I$(src)
+KASAN_SANITIZE_efi-stub.o := n + CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index a85843d..3b6d8cc 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -51,6 +51,9 @@ EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memcmp);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index cc7435c..6aeb0cb 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -486,6 +486,9 @@ __mmap_switched: str_l x21, __fdt_pointer, x5 // Save FDT pointer str_l x24, memstart_addr, x6 // Save PHYS_OFFSET mov x29, #0 +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif b start_kernel ENDPROC(__mmap_switched)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 876eb8d..f4bc779 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -21,6 +21,7 @@ #include <linux/bitops.h> #include <linux/elf.h> #include <linux/gfp.h> +#include <linux/kasan.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleloader.h> @@ -34,9 +35,18 @@
void *module_alloc(unsigned long size) { - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, - NUMA_NO_NODE, __builtin_return_address(0)); + void *p; + + p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, 0, + NUMA_NO_NODE, __builtin_return_address(0)); + + if (p && (kasan_module_alloc(p, size) < 0)) { + vfree(p); + return NULL; + } + + return p; }
enum aarch64_reloc_op { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 1fd9b4d..389959a 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -54,6 +54,7 @@ #include <asm/elf.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> +#include <asm/kasan.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> @@ -463,6 +464,9 @@ void __init setup_arch(char **cmdline_p)
paging_init(); relocate_initrd(); + + kasan_init(); + request_standard_resources();
early_ioremap_reset(); diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 36a6a62..6761393 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -68,7 +68,10 @@ stp \ptr, \regB, [\regC], \val .endm
+ .weak memcpy +ENTRY(__memcpy) ENTRY(memcpy) #include "copy_template.S" ret ENDPIPROC(memcpy) +ENDPROC(__memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index 68e2f20..a5a4459 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -57,12 +57,14 @@ C_h .req x12 D_l .req x13 D_h .req x14
+ .weak memmove +ENTRY(__memmove) ENTRY(memmove) cmp dstin, src - b.lo memcpy + b.lo __memcpy add tmp1, src, count cmp dstin, tmp1 - b.hs memcpy /* No overlap. */ + b.hs __memcpy /* No overlap. */
add dst, dstin, count add src, src, count @@ -195,3 +197,4 @@ ENTRY(memmove) b.ne .Ltail63 ret ENDPIPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index 29f405f..f2670a9 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -54,6 +54,8 @@ dst .req x8 tmp3w .req w9 tmp3 .req x9
+ .weak memset +ENTRY(__memset) ENTRY(memset) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 @@ -214,3 +216,4 @@ ENTRY(memset) b.ne .Ltail_maybe_long ret ENDPIPROC(memset) +ENDPROC(__memset) diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 773d37a..57f57fd 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -4,3 +4,6 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ context.o proc.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_ARM64_PTDUMP) += dump.o + +obj-$(CONFIG_KASAN) += kasan_init.o +KASAN_SANITIZE_kasan_init.o := n diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c new file mode 100644 index 0000000..b6a92f5 --- /dev/null +++ b/arch/arm64/mm/kasan_init.c @@ -0,0 +1,165 @@ +/* + * This file contains kasan initialization code for ARM64. + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin ryabinin.a.a@gmail.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "kasan: " fmt +#include <linux/kasan.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <linux/start_kernel.h> + +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); + +static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, + unsigned long end) +{ + pte_t *pte; + unsigned long next; + + if (pmd_none(*pmd)) + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + + pte = pte_offset_kernel(pmd, addr); + do { + next = addr + PAGE_SIZE; + set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), + PAGE_KERNEL)); + } while (pte++, addr = next, addr != end && pte_none(*pte)); +} + +static void __init kasan_early_pmd_populate(pud_t *pud, + unsigned long addr, + unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + + if (pud_none(*pud)) + pud_populate(&init_mm, pud, kasan_zero_pmd); + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + kasan_early_pte_populate(pmd, addr, next); + } while (pmd++, addr = next, addr != end && pmd_none(*pmd)); +} + +static void __init kasan_early_pud_populate(pgd_t *pgd, + unsigned long addr, + unsigned long end) +{ + pud_t *pud; + unsigned long next; + + if (pgd_none(*pgd)) + pgd_populate(&init_mm, pgd, kasan_zero_pud); + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + kasan_early_pmd_populate(pud, addr, next); + } while (pud++, addr = next, addr != end && pud_none(*pud)); +} + +static void __init kasan_map_early_shadow(void) +{ + unsigned long addr = KASAN_SHADOW_START; + unsigned long end = KASAN_SHADOW_END; + unsigned long next; + pgd_t *pgd; + + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + kasan_early_pud_populate(pgd, addr, next); + } while (pgd++, addr = next, addr != end); +} + +void __init kasan_early_init(void) +{ + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); + kasan_map_early_shadow(); +} + +static void __init clear_pgds(unsigned long start, + unsigned long end) +{ + /* + * Remove references to kasan page tables from + * swapper_pg_dir. pgd_clear() can't be used + * here because it's nop on 2,3-level pagetable setups + */ + for (; start < end; start += PGDIR_SIZE) + set_pgd(pgd_offset_k(start), __pgd(0)); +} + +static void __init cpu_set_ttbr1(unsigned long ttbr1) +{ + asm( + " msr ttbr1_el1, %0\n" + " isb" + : + : "r" (ttbr1)); +} + +void __init kasan_init(void) +{ + struct memblock_region *reg; + + /* + * We are going to perform proper setup of shadow memory. + * At first we should unmap early shadow (clear_pgds() call bellow). + * However, instrumented code couldn't execute without shadow memory. + * tmp_pg_dir used to keep early shadow mapped until full shadow + * setup will be finished. + */ + memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); + cpu_set_ttbr1(__pa(tmp_pg_dir)); + flush_tlb_all(); + + clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + + kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_mem_to_shadow((void *)MODULES_VADDR)); + + for_each_memblock(memory, reg) { + void *start = (void *)__phys_to_virt(reg->base); + void *end = (void *)__phys_to_virt(reg->base + reg->size); + + if (start >= end) + break; + + /* + * end + 1 here is intentional. We check several shadow bytes in + * advance to slightly speed up fastpath. In some rare cases + * we could cross boundary of mapped shadow, so we just map + * some more here. + */ + vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), + (unsigned long)kasan_mem_to_shadow(end) + 1, + pfn_to_nid(virt_to_pfn(start))); + } + + memset(kasan_zero_page, 0, PAGE_SIZE); + cpu_set_ttbr1(__pa(swapper_pg_dir)); + flush_tlb_all(); + + /* At this point kasan is fully initialized. Enable error messages */ + init_task.kasan_depth = 0; + pr_info("KernelAddressSanitizer initialized\n"); +} diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index d8be608..513ae7ce 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -1,6 +1,14 @@ # # Makefile for linux kernel # + +# +# ARM64 maps efi runtime services in userspace addresses +# which don't have KASAN shadow. So dereference of these addresses +# in efi_call_virt() will cause crash if this code instrumented. +# +KASAN_SANITIZE_runtime-wrappers.o := n + obj-$(CONFIG_EFI) += efi.o vars.o reboot.o obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 3f874d2..37323b0 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -5,10 +5,12 @@ else call_threshold := 0 endif
+KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) + CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \ - -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \ + -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET) \ --param asan-stack=1 --param asan-globals=1 \ --param asan-instrumentation-with-call-threshold=$(call_threshold))
From: Linus Walleij linus.walleij@linaro.org
This prints out the virtual memory assigned to KASan in the boot crawl along with other memory assignments, if and only if KASan is activated.
Example dmesg from the Juno Development board:
Memory: 1691156K/2080768K available (5465K kernel code, 444K rwdata, 2160K rodata, 340K init, 217K bss, 373228K reserved, 16384K cma-reserved) Virtual kernel memory layout: kasan : 0xffffff8000000000 - 0xffffff9000000000 ( 64 GB) vmalloc : 0xffffff9000000000 - 0xffffffbdbfff0000 ( 182 GB) vmemmap : 0xffffffbdc0000000 - 0xffffffbfc0000000 ( 8 GB maximum) 0xffffffbdc2000000 - 0xffffffbdc3fc0000 ( 31 MB actual) fixed : 0xffffffbffabfd000 - 0xffffffbffac00000 ( 12 KB) PCI I/O : 0xffffffbffae00000 - 0xffffffbffbe00000 ( 16 MB) modules : 0xffffffbffc000000 - 0xffffffc000000000 ( 64 MB) memory : 0xffffffc000000000 - 0xffffffc07f000000 ( 2032 MB) .init : 0xffffffc0007f5000 - 0xffffffc00084a000 ( 340 KB) .text : 0xffffffc000080000 - 0xffffffc0007f45b4 ( 7634 KB) .data : 0xffffffc000850000 - 0xffffffc0008bf200 ( 445 KB)
Signed-off-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Andrey Ryabinin ryabinin.a.a@gmail.com Acked-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com (cherry picked from commit ee7f881b59de4e0e0be250fd0c5d4ade3e30ec34) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/mm/init.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ad87ce8..3930692 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -298,6 +298,9 @@ void __init mem_init(void) #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
pr_notice("Virtual kernel memory layout:\n" +#ifdef CONFIG_KASAN + " kasan : 0x%16lx - 0x%16lx (%6ld GB)\n" +#endif " vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n" #ifdef CONFIG_SPARSEMEM_VMEMMAP " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" @@ -310,6 +313,9 @@ void __init mem_init(void) " .init : 0x%p" " - 0x%p" " (%6ld KB)\n" " .text : 0x%p" " - 0x%p" " (%6ld KB)\n" " .data : 0x%p" " - 0x%p" " (%6ld KB)\n", +#ifdef CONFIG_KASAN + MLG(KASAN_SHADOW_START, KASAN_SHADOW_END), +#endif MLG(VMALLOC_START, VMALLOC_END), #ifdef CONFIG_SPARSEMEM_VMEMMAP MLG((unsigned long)vmemmap,
From: Will Deacon will.deacon@arm.com
Sparse reports some new issues introduced by the kasan patches:
arch/arm64/mm/kasan_init.c:91:13: warning: no previous prototype for 'kasan_early_init' [-Wmissing-prototypes] void __init kasan_early_init(void) ^ arch/arm64/mm/kasan_init.c:91:13: warning: symbol 'kasan_early_init' was not declared. Should it be static? [sparse]
This patch resolves the problem by adding a prototype for kasan_early_init and marking the function as asmlinkage, since it's only called from head.S.
Signed-off-by: Will Deacon will.deacon@arm.com Acked-by: Andrey Ryabinin ryabinin.a.a@gmail.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com (cherry picked from commit 83040123fde42ec532d3b632efb5f7f84024e61d) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/include/asm/kasan.h | 2 ++ arch/arm64/mm/kasan_init.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 71dfe14..2774fa3 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -5,6 +5,7 @@
#ifdef CONFIG_KASAN
+#include <linux/linkage.h> #include <asm/memory.h>
/* @@ -27,6 +28,7 @@ #define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3)))
void kasan_init(void); +asmlinkage void kasan_early_init(void);
#else static inline void kasan_init(void) { } diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index b6a92f5..cf038c7 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -88,7 +88,7 @@ static void __init kasan_map_early_shadow(void) } while (pgd++, addr = next, addr != end); }
-void __init kasan_early_init(void) +asmlinkage void __init kasan_early_init(void) { BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
From: Andrey Ryabinin aryabinin@virtuozzo.com
On KASAN + 16K_PAGES + 48BIT_VA arch/arm64/mm/kasan_init.c: In function ‘kasan_early_init’: include/linux/compiler.h:484:38: error: call to ‘__compiletime_assert_95’ declared with attribute error: BUILD_BUG_ON failed: !IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE) _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
Currently KASAN will not work on 16K_PAGES and 48BIT_VA, so forbid such configuration to avoid above build failure.
Signed-off-by: Andrey Ryabinin aryabinin@virtuozzo.com Reported-by: Suzuki K. Poulose Suzuki.Poulose@arm.com Acked-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com (cherry picked from commit f1b9032f61c0412082a240cb7245f8b79e09ae8d) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6adba13..eaf47b9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -44,7 +44,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK
From: Wang Long long.wanglong@huawei.com
Remove duplicate definition of the macro KASAN_FREE_PAGE in mm/kasan/kasan.h
Signed-off-by: Wang Long long.wanglong@huawei.com Acked-by: Andrey Ryabinin a.ryabinin@samsung.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit bffacb9132a306b7e22bb6366e5b277f20f67465) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.h | 1 - 1 file changed, 1 deletion(-)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 4986b0a..c242adf 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -7,7 +7,6 @@ #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
#define KASAN_FREE_PAGE 0xFF /* page was freed */ -#define KASAN_FREE_PAGE 0xFF /* page was freed */ #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ #define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ #define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
From: Andrey Ryabinin ryabinin.a.a@gmail.com
Current definition of KASAN_SHADOW_OFFSET in include/linux/kasan.h will not work for upcomming arm64, so move it to the arch header.
Signed-off-by: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Alexander Potapenko glider@google.com Cc: Alexey Klimov klimov.linux@gmail.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Cc: Arnd Bergmann arnd@arndb.de Cc: Catalin Marinas catalin.marinas@arm.com Cc: David Keitel dkeitel@codeaurora.org Cc: Dmitry Vyukov dvyukov@google.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Linus Walleij linus.walleij@linaro.org Cc: Peter Zijlstra peterz@infradead.org Cc: Rik van Riel riel@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Will Deacon will.deacon@arm.com Cc: Yury yury.norov@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/1439444244-26057-2-git-send-email-ryabinin.a.a@gmai... Signed-off-by: Ingo Molnar mingo@kernel.org (cherry picked from commit 920e277e17f12870188f4564887a95ae9ac03e31) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/x86/include/asm/kasan.h | 3 +++ include/linux/kasan.h | 1 - 2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index 74a2a8d..1410b56 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -1,6 +1,9 @@ #ifndef _ASM_X86_KASAN_H #define _ASM_X86_KASAN_H
+#include <linux/const.h> +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) + /* * Compiler uses shadow offset assuming that addresses start * from 0. Kernel addresses don't start from 0, so shadow diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 5486d77..6fb1c7d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -10,7 +10,6 @@ struct vm_struct; #ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3 -#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#include <asm/kasan.h> #include <linux/sched.h>
From: Andrey Ryabinin ryabinin.a.a@gmail.com
Introduce generic kasan_populate_zero_shadow(shadow_start, shadow_end). This function maps kasan_zero_page to the [shadow_start, shadow_end] addresses.
This replaces x86_64 specific populate_zero_shadow() and will be used for ARM64 in follow on patches.
The main changes from original version are:
* Use p?d_populate*() instead of set_p?d() * Use memblock allocator directly instead of vmemmap_alloc_block() * __pa() instead of __pa_nodebug(). __pa() causes troubles iff we use it before kasan_early_init(). kasan_populate_zero_shadow() will be used later, so we ok with __pa() here.
Signed-off-by: Andrey Ryabinin ryabinin.a.a@gmail.com Acked-by: Catalin Marinas catalin.marinas@arm.com Cc: Alexander Potapenko glider@google.com Cc: Alexey Klimov klimov.linux@gmail.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Cc: Arnd Bergmann arnd@arndb.de Cc: David Keitel dkeitel@codeaurora.org Cc: Dmitry Vyukov dvyukov@google.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Linus Walleij linus.walleij@linaro.org Cc: Peter Zijlstra peterz@infradead.org Cc: Rik van Riel riel@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Will Deacon will.deacon@arm.com Cc: Yury yury.norov@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/1439444244-26057-3-git-send-email-ryabinin.a.a@gmai... Signed-off-by: Ingo Molnar mingo@kernel.org (cherry picked from commit 69786cdb379bbc6eab14cf2393c1abd879316e85) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/x86/mm/kasan_init_64.c | 123 ++--------------------------------- include/linux/kasan.h | 9 +++ mm/kasan/Makefile | 2 +- mm/kasan/kasan_init.c | 152 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 167 insertions(+), 119 deletions(-) create mode 100644 mm/kasan/kasan_init.c
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 9a54dbe..e753a16 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -11,20 +11,6 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern struct range pfn_mapped[E820_X_MAX];
-static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; -static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; -static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; - -/* - * This page used as early shadow. We don't use empty_zero_page - * at early stages, stack instrumentation could write some garbage - * to this page. - * Latter we reuse it as zero shadow for large ranges of memory - * that allowed to access, but not instrumented by kasan - * (vmalloc/vmemmap ...). - */ -static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; - static int __init map_range(struct range *range) { unsigned long start; @@ -61,106 +47,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd) } }
-static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr, - unsigned long end) -{ - pte_t *pte = pte_offset_kernel(pmd, addr); - - while (addr + PAGE_SIZE <= end) { - WARN_ON(!pte_none(*pte)); - set_pte(pte, __pte(__pa_nodebug(kasan_zero_page) - | __PAGE_KERNEL_RO)); - addr += PAGE_SIZE; - pte = pte_offset_kernel(pmd, addr); - } - return 0; -} - -static int __init zero_pmd_populate(pud_t *pud, unsigned long addr, - unsigned long end) -{ - int ret = 0; - pmd_t *pmd = pmd_offset(pud, addr); - - while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { - WARN_ON(!pmd_none(*pmd)); - set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) - | _KERNPG_TABLE)); - addr += PMD_SIZE; - pmd = pmd_offset(pud, addr); - } - if (addr < end) { - if (pmd_none(*pmd)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pte_populate(pmd, addr, end); - } - return ret; -} - - -static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr, - unsigned long end) -{ - int ret = 0; - pud_t *pud = pud_offset(pgd, addr); - - while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { - WARN_ON(!pud_none(*pud)); - set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) - | _KERNPG_TABLE)); - addr += PUD_SIZE; - pud = pud_offset(pgd, addr); - } - - if (addr < end) { - if (pud_none(*pud)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pmd_populate(pud, addr, end); - } - return ret; -} - -static int __init zero_pgd_populate(unsigned long addr, unsigned long end) -{ - int ret = 0; - pgd_t *pgd = pgd_offset_k(addr); - - while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { - WARN_ON(!pgd_none(*pgd)); - set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) - | _KERNPG_TABLE)); - addr += PGDIR_SIZE; - pgd = pgd_offset_k(addr); - } - - if (addr < end) { - if (pgd_none(*pgd)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pud_populate(pgd, addr, end); - } - return ret; -} - - -static void __init populate_zero_shadow(const void *start, const void *end) -{ - if (zero_pgd_populate((unsigned long)start, (unsigned long)end)) - panic("kasan: unable to map zero shadow!"); -} - - #ifdef CONFIG_KASAN_INLINE static int kasan_die_handler(struct notifier_block *self, unsigned long val, @@ -212,7 +98,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
- populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, kasan_mem_to_shadow((void *)PAGE_OFFSET));
for (i = 0; i < E820_X_MAX; i++) { @@ -222,14 +108,15 @@ void __init kasan_init(void) if (map_range(&pfn_mapped[i])) panic("kasan: unable to allocate shadow!"); } - populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), - kasan_mem_to_shadow((void *)__START_KERNEL_map)); + kasan_populate_zero_shadow( + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), + kasan_mem_to_shadow((void *)__START_KERNEL_map));
vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), (unsigned long)kasan_mem_to_shadow(_end), NUMA_NO_NODE);
- populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), (void *)KASAN_SHADOW_END);
memset(kasan_zero_page, 0, PAGE_SIZE); diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6fb1c7d..4b9f85c 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -12,8 +12,17 @@ struct vm_struct; #define KASAN_SHADOW_SCALE_SHIFT 3
#include <asm/kasan.h> +#include <asm/pgtable.h> #include <linux/sched.h>
+extern unsigned char kasan_zero_page[PAGE_SIZE]; +extern pte_t kasan_zero_pte[PTRS_PER_PTE]; +extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; +extern pud_t kasan_zero_pud[PTRS_PER_PUD]; + +void kasan_populate_zero_shadow(const void *shadow_start, + const void *shadow_end); + static inline void *kasan_mem_to_shadow(const void *addr) { return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index bd837b8..6471014 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -5,4 +5,4 @@ CFLAGS_REMOVE_kasan.o = -pg # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
-obj-y := kasan.o report.o +obj-y := kasan.o report.o kasan_init.o diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c new file mode 100644 index 0000000..3f9a41c --- /dev/null +++ b/mm/kasan/kasan_init.c @@ -0,0 +1,152 @@ +/* + * This file contains some kasan initialization code. + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin ryabinin.a.a@gmail.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/bootmem.h> +#include <linux/init.h> +#include <linux/kasan.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <linux/pfn.h> + +#include <asm/page.h> +#include <asm/pgalloc.h> + +/* + * This page serves two purposes: + * - It used as early shadow memory. The entire shadow region populated + * with this page, before we will be able to setup normal shadow memory. + * - Latter it reused it as zero shadow to cover large ranges of memory + * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). + */ +unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; + +#if CONFIG_PGTABLE_LEVELS > 3 +pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; +#endif +#if CONFIG_PGTABLE_LEVELS > 2 +pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; +#endif +pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; + +static __init void *early_alloc(size_t size, int node) +{ + return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), + BOOTMEM_ALLOC_ACCESSIBLE, node); +} + +static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, + unsigned long end) +{ + pte_t *pte = pte_offset_kernel(pmd, addr); + pte_t zero_pte; + + zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL); + zero_pte = pte_wrprotect(zero_pte); + + while (addr + PAGE_SIZE <= end) { + set_pte_at(&init_mm, addr, pte, zero_pte); + addr += PAGE_SIZE; + pte = pte_offset_kernel(pmd, addr); + } +} + +static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, + unsigned long end) +{ + pmd_t *pmd = pmd_offset(pud, addr); + unsigned long next; + + do { + next = pmd_addr_end(addr, end); + + if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + continue; + } + + if (pmd_none(*pmd)) { + pmd_populate_kernel(&init_mm, pmd, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + zero_pte_populate(pmd, addr, next); + } while (pmd++, addr = next, addr != end); +} + +static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, + unsigned long end) +{ + pud_t *pud = pud_offset(pgd, addr); + unsigned long next; + + do { + next = pud_addr_end(addr, end); + if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { + pmd_t *pmd; + + pud_populate(&init_mm, pud, kasan_zero_pmd); + pmd = pmd_offset(pud, addr); + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + continue; + } + + if (pud_none(*pud)) { + pud_populate(&init_mm, pud, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + zero_pmd_populate(pud, addr, next); + } while (pud++, addr = next, addr != end); +} + +/** + * kasan_populate_zero_shadow - populate shadow memory region with + * kasan_zero_page + * @shadow_start - start of the memory range to populate + * @shadow_end - end of the memory range to populate + */ +void __init kasan_populate_zero_shadow(const void *shadow_start, + const void *shadow_end) +{ + unsigned long addr = (unsigned long)shadow_start; + unsigned long end = (unsigned long)shadow_end; + pgd_t *pgd = pgd_offset_k(addr); + unsigned long next; + + do { + next = pgd_addr_end(addr, end); + + if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { + pud_t *pud; + pmd_t *pmd; + + /* + * kasan_zero_pud should be populated with pmds + * at this moment. + * [pud,pmd]_populate*() below needed only for + * 3,2 - level page tables where we don't have + * puds,pmds, so pgd_populate(), pud_populate() + * is noops. + */ + pgd_populate(&init_mm, pgd, kasan_zero_pud); + pud = pud_offset(pgd, addr); + pud_populate(&init_mm, pud, kasan_zero_pmd); + pmd = pmd_offset(pud, addr); + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + continue; + } + + if (pgd_none(*pgd)) { + pgd_populate(&init_mm, pgd, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + zero_pud_populate(pgd, addr, next); + } while (pgd++, addr = next, addr != end); +}
From: "Aneesh Kumar K.V" aneesh.kumar@linux.vnet.ibm.com
The function only disable/enable reporting. In the later patch we will be adding a kasan early enable/disable. Rename kasan_enabled to properly reflect its function.
Signed-off-by: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Reviewed-by: Andrey Ryabinin ryabinin.a.a@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 0ba8663cbfae066fc504b858db7cbb7d03c2b872) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.h | 2 +- mm/kasan/report.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index c242adf..a6b46cc 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -63,7 +63,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) << KASAN_SHADOW_SCALE_SHIFT); }
-static inline bool kasan_enabled(void) +static inline bool kasan_report_enabled(void) { return !current->kasan_depth; } diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 680ceed..ba916b1b 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -220,7 +220,7 @@ void kasan_report(unsigned long addr, size_t size, { struct kasan_access_info info;
- if (likely(!kasan_enabled())) + if (likely(!kasan_report_enabled())) return;
info.access_addr = (void *)addr;
From: "Aneesh Kumar K.V" aneesh.kumar@linux.vnet.ibm.com
Use is_module_address instead
Signed-off-by: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Reviewed-by: Andrey Ryabinin ryabinin.a.a@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 527f215b78976e94995dce7163b07539b576d519) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/report.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ba916b1b..9474364 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -22,6 +22,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/kasan.h> +#include <linux/module.h>
#include <asm/sections.h>
@@ -85,9 +86,11 @@ static void print_error_description(struct kasan_access_info *info)
static inline bool kernel_or_module_addr(const void *addr) { - return (addr >= (void *)_stext && addr < (void *)_end) - || (addr >= (void *)MODULES_VADDR - && addr < (void *)MODULES_END); + if (addr >= (void *)_stext && addr < (void *)_end) + return true; + if (is_module_address((unsigned long)addr)) + return true; + return false; }
static inline bool init_task_stack_addr(const void *addr)
From: "Aneesh Kumar K.V" aneesh.kumar@linux.vnet.ibm.com
We can't use generic functions like print_hex_dump to access kasan shadow region. This require us to setup another kasan shadow region for the address passed (kasan shadow address). Some architectures won't be able to do that. Hence make a copy of the shadow region row and pass that to generic functions.
Signed-off-by: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Reviewed-by: Andrey Ryabinin ryabinin.a.a@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit f2377d4eaab2aabe1938b3974b5b94f5ba4c7ead) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/report.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 9474364..2b30762 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -164,14 +164,20 @@ static void print_shadow_for_address(const void *addr) for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) { const void *kaddr = kasan_shadow_to_mem(shadow_row); char buffer[4 + (BITS_PER_LONG/8)*2]; + char shadow_buf[SHADOW_BYTES_PER_ROW];
snprintf(buffer, sizeof(buffer), (i == 0) ? ">%p: " : " %p: ", kaddr); - + /* + * We should not pass a shadow pointer to generic + * function, because generic functions may try to + * access kasan mapping for the passed address. + */ kasan_disable_current(); + memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW); print_hex_dump(KERN_ERR, buffer, DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1, - shadow_row, SHADOW_BYTES_PER_ROW, 0); + shadow_buf, SHADOW_BYTES_PER_ROW, 0); kasan_enable_current();
if (row_is_guilty(shadow_row, shadow))
From: "Aneesh Kumar K.V" aneesh.kumar@linux.vnet.ibm.com
When we end up calling kasan_report in real mode, our shadow mapping for the spinlock variable will show poisoned. This will result in us calling kasan_report_error with lock_report spin lock held. To prevent this disable kasan reporting when we are priting error w.r.t kasan.
Signed-off-by: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Reviewed-by: Andrey Ryabinin ryabinin.a.a@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit fc5aeeaf593278f07ffa4d97296e27423ecae867) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/report.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 2b30762..a42dade 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -173,12 +173,10 @@ static void print_shadow_for_address(const void *addr) * function, because generic functions may try to * access kasan mapping for the passed address. */ - kasan_disable_current(); memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW); print_hex_dump(KERN_ERR, buffer, DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1, shadow_buf, SHADOW_BYTES_PER_ROW, 0); - kasan_enable_current();
if (row_is_guilty(shadow_row, shadow)) pr_err("%*c\n", @@ -195,6 +193,10 @@ void kasan_report_error(struct kasan_access_info *info) { unsigned long flags;
+ /* + * Make sure we don't end up in loop. + */ + kasan_disable_current(); spin_lock_irqsave(&report_lock, flags); pr_err("=================================" "=================================\n"); @@ -204,12 +206,17 @@ void kasan_report_error(struct kasan_access_info *info) pr_err("=================================" "=================================\n"); spin_unlock_irqrestore(&report_lock, flags); + kasan_enable_current(); }
void kasan_report_user_access(struct kasan_access_info *info) { unsigned long flags;
+ /* + * Make sure we don't end up in loop. + */ + kasan_disable_current(); spin_lock_irqsave(&report_lock, flags); pr_err("=================================" "=================================\n"); @@ -222,6 +229,7 @@ void kasan_report_user_access(struct kasan_access_info *info) pr_err("=================================" "=================================\n"); spin_unlock_irqrestore(&report_lock, flags); + kasan_enable_current(); }
void kasan_report(unsigned long addr, size_t size,
From: Andrey Konovalov andreyknvl@google.com
Each access with address lower than kasan_shadow_to_mem(KASAN_SHADOW_START) is reported as user-memory-access. This is not always true, the accessed address might not be in user space. Fix this by reporting such accesses as null-ptr-derefs or wild-memory-accesses.
There's another reason for this change. For userspace ASan we have a bunch of systems that analyze error types for the purpose of classification and deduplication. Sooner of later we will write them to KASAN as well. Then clearly and explicitly stated error types will bring value.
Signed-off-by: Andrey Konovalov andreyknvl@google.com Cc: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Dmitry Vyukov dvyukov@google.com Cc: Alexander Potapenko glider@google.com Cc: Konstantin Serebryany kcc@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit e91210766341cb356ead7fd39f07493a3d00b80f) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.c | 8 +------- mm/kasan/kasan.h | 3 --- mm/kasan/report.c | 50 +++++++++++++++++++++++--------------------------- 3 files changed, 24 insertions(+), 37 deletions(-)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 6c513a6..d6798bc 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -236,18 +236,12 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) static __always_inline void check_memory_region(unsigned long addr, size_t size, bool write) { - struct kasan_access_info info; - if (unlikely(size == 0)) return;
if (unlikely((void *)addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { - info.access_addr = (void *)addr; - info.access_size = size; - info.is_write = write; - info.ip = _RET_IP_; - kasan_report_user_access(&info); + kasan_report(addr, size, write, _RET_IP_); return; }
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index a6b46cc..4f6c62e 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -54,9 +54,6 @@ struct kasan_global { #endif };
-void kasan_report_error(struct kasan_access_info *info); -void kasan_report_user_access(struct kasan_access_info *info); - static inline const void *kasan_shadow_to_mem(const void *shadow_addr) { return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) diff --git a/mm/kasan/report.c b/mm/kasan/report.c index a42dade..8a695bb 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -189,9 +189,10 @@ static void print_shadow_for_address(const void *addr)
static DEFINE_SPINLOCK(report_lock);
-void kasan_report_error(struct kasan_access_info *info) +static void kasan_report_error(struct kasan_access_info *info) { unsigned long flags; + const char *bug_type;
/* * Make sure we don't end up in loop. @@ -200,32 +201,26 @@ void kasan_report_error(struct kasan_access_info *info) spin_lock_irqsave(&report_lock, flags); pr_err("=================================" "=================================\n"); - print_error_description(info); - print_address_description(info); - print_shadow_for_address(info->first_bad_addr); - pr_err("=================================" - "=================================\n"); - spin_unlock_irqrestore(&report_lock, flags); - kasan_enable_current(); -} - -void kasan_report_user_access(struct kasan_access_info *info) -{ - unsigned long flags; - - /* - * Make sure we don't end up in loop. - */ - kasan_disable_current(); - spin_lock_irqsave(&report_lock, flags); - pr_err("=================================" - "=================================\n"); - pr_err("BUG: KASan: user-memory-access on address %p\n", - info->access_addr); - pr_err("%s of size %zu by task %s/%d\n", - info->is_write ? "Write" : "Read", - info->access_size, current->comm, task_pid_nr(current)); - dump_stack(); + if (info->access_addr < + kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) { + if ((unsigned long)info->access_addr < PAGE_SIZE) + bug_type = "null-ptr-deref"; + else if ((unsigned long)info->access_addr < TASK_SIZE) + bug_type = "user-memory-access"; + else + bug_type = "wild-memory-access"; + pr_err("BUG: KASan: %s on address %p\n", + bug_type, info->access_addr); + pr_err("%s of size %zu by task %s/%d\n", + info->is_write ? "Write" : "Read", + info->access_size, current->comm, + task_pid_nr(current)); + dump_stack(); + } else { + print_error_description(info); + print_address_description(info); + print_shadow_for_address(info->first_bad_addr); + } pr_err("=================================" "=================================\n"); spin_unlock_irqrestore(&report_lock, flags); @@ -244,6 +239,7 @@ void kasan_report(unsigned long addr, size_t size, info.access_size = size; info.is_write = is_write; info.ip = ip; + kasan_report_error(&info); }
From: Andrey Konovalov andreyknvl@google.com
Update the names of the bad access types to better reflect the type of the access that happended and make these error types "literals" that can be used for classification and deduplication in scripts.
Signed-off-by: Andrey Konovalov andreyknvl@google.com Cc: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Dmitry Vyukov dvyukov@google.com Cc: Alexander Potapenko glider@google.com Cc: Konstantin Serebryany kcc@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 0952d87fd6a6211ac51b2abdc5c066b49c651fd8) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/report.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 8a695bb..69d9315c 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -49,7 +49,7 @@ static const void *find_first_bad_addr(const void *addr, size_t size)
static void print_error_description(struct kasan_access_info *info) { - const char *bug_type = "unknown crash"; + const char *bug_type = "unknown-crash"; u8 shadow_val;
info->first_bad_addr = find_first_bad_addr(info->access_addr, @@ -58,21 +58,25 @@ static void print_error_description(struct kasan_access_info *info) shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr);
switch (shadow_val) { - case KASAN_FREE_PAGE: - case KASAN_KMALLOC_FREE: - bug_type = "use after free"; + case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: + bug_type = "out-of-bounds"; break; case KASAN_PAGE_REDZONE: case KASAN_KMALLOC_REDZONE: + bug_type = "slab-out-of-bounds"; + break; case KASAN_GLOBAL_REDZONE: - case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: - bug_type = "out of bounds access"; + bug_type = "global-out-of-bounds"; break; case KASAN_STACK_LEFT: case KASAN_STACK_MID: case KASAN_STACK_RIGHT: case KASAN_STACK_PARTIAL: - bug_type = "out of bounds on stack"; + bug_type = "stack-out-of-bounds"; + break; + case KASAN_FREE_PAGE: + case KASAN_KMALLOC_FREE: + bug_type = "use-after-free"; break; }
From: Andrey Konovalov andreyknvl@google.com
Makes KASAN accurately determine the type of the bad access. If the shadow byte value is in the [0, KASAN_SHADOW_SCALE_SIZE) range we can look at the next shadow byte to determine the type of the access.
Signed-off-by: Andrey Konovalov andreyknvl@google.com Cc: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Dmitry Vyukov dvyukov@google.com Cc: Alexander Potapenko glider@google.com Cc: Konstantin Serebryany kcc@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit cdf6a273dc4346277ab9d148ef29f6e058624a8c) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/report.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 69d9315c..63e039f 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -50,15 +50,26 @@ static const void *find_first_bad_addr(const void *addr, size_t size) static void print_error_description(struct kasan_access_info *info) { const char *bug_type = "unknown-crash"; - u8 shadow_val; + u8 *shadow_addr;
info->first_bad_addr = find_first_bad_addr(info->access_addr, info->access_size);
- shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr); + shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
- switch (shadow_val) { + /* + * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look + * at the next shadow byte to determine the type of the bad access. + */ + if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1) + shadow_addr++; + + switch (*shadow_addr) { case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: + /* + * In theory it's still possible to see these shadow values + * due to a data race in the kernel code. + */ bug_type = "out-of-bounds"; break; case KASAN_PAGE_REDZONE:
From: Andrey Konovalov andreyknvl@google.com
We decided to use KASAN as the short name of the tool and KernelAddressSanitizer as the full one. Update log messages according to that.
Signed-off-by: Andrey Konovalov andreyknvl@google.com Cc: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Dmitry Vyukov dvyukov@google.com Cc: Alexander Potapenko glider@google.com Cc: Konstantin Serebryany kcc@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 25add7ec708170e4eaef1f9793a07803b2fb5c71) Signed-off-by: Alex Shi alex.shi@linaro.org --- arch/x86/mm/kasan_init_64.c | 2 ++ mm/kasan/kasan.c | 2 +- mm/kasan/report.c | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e753a16..caf1757 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -124,4 +124,6 @@ void __init kasan_init(void) load_cr3(init_level4_pgt); __flush_tlb_all(); init_task.kasan_depth = 0; + + pr_info("KernelAddressSanitizer initialized\n"); } diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index d6798bc..b9b83f9 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -519,7 +519,7 @@ static int kasan_mem_notifier(struct notifier_block *nb,
static int __init kasan_memhotplug_init(void) { - pr_err("WARNING: KASan doesn't support memory hot-add\n"); + pr_err("WARNING: KASAN doesn't support memory hot-add\n"); pr_err("Memory hot-add will be disabled\n");
hotplug_memory_notifier(kasan_mem_notifier, 0); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 63e039f..c8cc63f 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -91,7 +91,7 @@ static void print_error_description(struct kasan_access_info *info) break; }
- pr_err("BUG: KASan: %s in %pS at addr %p\n", + pr_err("BUG: KASAN: %s in %pS at addr %p\n", bug_type, (void *)info->ip, info->access_addr); pr_err("%s of size %zu by task %s/%d\n", @@ -224,7 +224,7 @@ static void kasan_report_error(struct kasan_access_info *info) bug_type = "user-memory-access"; else bug_type = "wild-memory-access"; - pr_err("BUG: KASan: %s on address %p\n", + pr_err("BUG: KASAN: %s on address %p\n", bug_type, info->access_addr); pr_err("%s of size %zu by task %s/%d\n", info->is_write ? "Write" : "Read",
From: Andrey Konovalov andreyknvl@google.com
Update the reference to the kasan prototype repository on github, since it was renamed.
Signed-off-by: Andrey Konovalov andreyknvl@google.com Cc: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Dmitry Vyukov dvyukov@google.com Cc: Alexander Potapenko glider@google.com Cc: Konstantin Serebryany kcc@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 5d0926efe728e00afbd81a1e3c498222cf908d23) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.c | 2 +- mm/kasan/report.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index b9b83f9..879a33f 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -4,7 +4,7 @@ * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Andrey Ryabinin a.ryabinin@samsung.com * - * Some of code borrowed from https://github.com/xairy/linux by + * Some code borrowed from https://github.com/xairy/kasan-prototype by * Andrey Konovalov adech.fo@gmail.com * * This program is free software; you can redistribute it and/or modify diff --git a/mm/kasan/report.c b/mm/kasan/report.c index c8cc63f..c59fc0e 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -4,7 +4,7 @@ * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Andrey Ryabinin a.ryabinin@samsung.com * - * Some of code borrowed from https://github.com/xairy/linux by + * Some code borrowed from https://github.com/xairy/kasan-prototype by * Andrey Konovalov adech.fo@gmail.com * * This program is free software; you can redistribute it and/or modify
From: Wang Long long.wanglong@huawei.com
The current KASAN code can not find the following out-of-bounds bugs:
char *ptr; ptr = kmalloc(8, GFP_KERNEL); memset(ptr+7, 0, 2);
the cause of the problem is the type conversion error in *memory_is_poisoned_n* function. So this patch fix that.
Signed-off-by: Wang Long long.wanglong@huawei.com Acked-by: Andrey Ryabinin aryabinin@virtuozzo.com Cc: Vladimir Murzin vladimir.murzin@arm.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit e0d57714394f5e2ce4e2f9bbebf48e3c7a7fd3be) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 879a33f..d2dca93 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -204,7 +204,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr, s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow || - ((last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) + ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) return true; } return false;
From: Xishi Qiu qiuxishi@huawei.com
The shadow which correspond 16 bytes memory may span 2 or 3 bytes. If the memory is aligned on 8, then the shadow takes only 2 bytes. So we check "shadow_first_bytes" is enough, and need not to call "memory_is_poisoned_1(addr + 15);". But the code "if (likely(!last_byte))" is wrong judgement.
e.g. addr=0, so last_byte = 15 & KASAN_SHADOW_MASK = 7, then the code will continue to call "memory_is_poisoned_1(addr + 15);"
Signed-off-by: Xishi Qiu qiuxishi@huawei.com Acked-by: Andrey Ryabinin aryabinin@virtuozzo.com Cc: Andrey Konovalov adech.fo@gmail.com Cc: Rusty Russell rusty@rustcorp.com.au Cc: Michal Marek mmarek@suse.cz Cc: zhongjiang@huawei.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 8d77a6d18ae9ccfd5eee1cc551ee4ac27fd41464) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index d2dca93..1a66507 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
if (unlikely(*shadow_addr)) { u16 shadow_first_bytes = *(u16 *)shadow_addr; - s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
if (unlikely(shadow_first_bytes)) return true;
- if (likely(!last_byte)) + if (likely(IS_ALIGNED(addr, 8))) return false;
return memory_is_poisoned_1(addr + 15);
From: Xishi Qiu qiuxishi@huawei.com
Use IS_ALIGNED() to determine whether the shadow span two bytes. It generates less code and more readable. Also add some comments in shadow check functions.
Signed-off-by: Xishi Qiu qiuxishi@huawei.com Acked-by: Andrey Ryabinin aryabinin@virtuozzo.com Cc: Andrey Konovalov adech.fo@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 10f702627e139e21465f4c9d44f63527bbca163c) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 1a66507..7f97c85 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -86,6 +86,11 @@ static __always_inline bool memory_is_poisoned_2(unsigned long addr) if (memory_is_poisoned_1(addr + 1)) return true;
+ /* + * If single shadow byte covers 2-byte access, we don't + * need to do anything more. Otherwise, test the first + * shadow byte. + */ if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) return false;
@@ -103,6 +108,11 @@ static __always_inline bool memory_is_poisoned_4(unsigned long addr) if (memory_is_poisoned_1(addr + 3)) return true;
+ /* + * If single shadow byte covers 4-byte access, we don't + * need to do anything more. Otherwise, test the first + * shadow byte. + */ if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) return false;
@@ -120,7 +130,12 @@ static __always_inline bool memory_is_poisoned_8(unsigned long addr) if (memory_is_poisoned_1(addr + 7)) return true;
- if (likely(((addr + 7) & KASAN_SHADOW_MASK) >= 7)) + /* + * If single shadow byte covers 8-byte access, we don't + * need to do anything more. Otherwise, test the first + * shadow byte. + */ + if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) return false;
return unlikely(*(u8 *)shadow_addr); @@ -139,7 +154,12 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr) if (unlikely(shadow_first_bytes)) return true;
- if (likely(IS_ALIGNED(addr, 8))) + /* + * If two shadow bytes covers 16-byte access, we don't + * need to do anything more. Otherwise, test the last + * shadow byte. + */ + if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) return false;
return memory_is_poisoned_1(addr + 15);
From: Andrey Ryabinin aryabinin@virtuozzo.com
Currently we already taint the kernel in some cases. E.g. if we hit some bug in slub memory we call object_err() which will taint the kernel with TAINT_BAD_PAGE flag. But for other kind of bugs kernel left untainted.
Always taint with TAINT_BAD_PAGE if kasan found some bug. This is useful for automated testing.
Signed-off-by: Andrey Ryabinin aryabinin@virtuozzo.com Cc: Alexander Potapenko glider@google.com Reviewed-by: Dmitry Vyukov dvyukov@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit eb06f43f1c94d502b7867b0998e92cdabbc060bc) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/report.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index c59fc0e..e0b3e94 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -238,6 +238,7 @@ static void kasan_report_error(struct kasan_access_info *info) } pr_err("=================================" "=================================\n"); + add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irqrestore(&report_lock, flags); kasan_enable_current(); }
From: Andrey Ryabinin aryabinin@virtuozzo.com
Kmemleak reports the following leak:
unreferenced object 0xfffffbfff41ea000 (size 20480): comm "modprobe", pid 65199, jiffies 4298875551 (age 542.568s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<ffffffff82354f5e>] kmemleak_alloc+0x4e/0xc0 [<ffffffff8152e718>] __vmalloc_node_range+0x4b8/0x740 [<ffffffff81574072>] kasan_module_alloc+0x72/0xc0 [<ffffffff810efe68>] module_alloc+0x78/0xb0 [<ffffffff812f6a24>] module_alloc_update_bounds+0x14/0x70 [<ffffffff812f8184>] layout_and_allocate+0x16f4/0x3c90 [<ffffffff812faa1f>] load_module+0x2ff/0x6690 [<ffffffff813010b6>] SyS_finit_module+0x136/0x170 [<ffffffff8239bbc9>] system_call_fastpath+0x16/0x1b [<ffffffffffffffff>] 0xffffffffffffffff
kasan_module_alloc() allocates shadow memory for module and frees it on module unloading. It doesn't store the pointer to allocated shadow memory because it could be calculated from the shadowed address, i.e. kasan_mem_to_shadow(addr).
Since kmemleak cannot find pointer to allocated shadow, it thinks that memory leaked.
Use kmemleak_ignore() to tell kmemleak that this is not a leak and shadow memory doesn't contain any pointers.
Signed-off-by: Andrey Ryabinin aryabinin@virtuozzo.com Acked-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 459372545c9c0d6f491e280dccc8a54a61b60e56) Signed-off-by: Alex Shi alex.shi@linaro.org --- mm/kasan/kasan.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 7f97c85..81a2f45 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -19,6 +19,7 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/kmemleak.h> #include <linux/memblock.h> #include <linux/memory.h> #include <linux/mm.h> @@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
if (ret) { find_vm_area(addr)->flags |= VM_KASAN; + kmemleak_ignore(ret); return 0; }
On Tue, Apr 12, 2016 at 11:06 AM, Alex Shi alex.shi@linaro.org wrote:
This is patchset for KASAN backporting to LSK 4.1. Any comments are appreciated.
Hi Linus,
Could you like to give some suggestion for testing?
You just enable it in KConfig and boot. I have this config snippet in my aarch64.mak Makefile:
config-kasan: config-base $(CURDIR)/scripts/config --file $(config_file) \ --enable KASAN \ --enable KASAN_OUTLINE \ --enable STACKTRACE \ --enable SLUB_DEBUG_ON \ --enable TEST_KASAN
The TEST_KASAN will show you that instrumentation works by triggering a console dump for each test.
It should be worth noting that a new GCC (4.9+, preferrably 5+) is needed for KASan to work, so if people are as conservative with upgrading their toolchain as they are in upgrading their kernels, they may see absolutely nothing: it will just be deactivated at compile-time.
Yours, Linus Walleij
Thanks a lot for this! :)
On 04/12/2016 05:27 PM, Linus Walleij wrote:
On Tue, Apr 12, 2016 at 11:06 AM, Alex Shi alex.shi@linaro.org wrote:
This is patchset for KASAN backporting to LSK 4.1. Any comments are appreciated.
Hi Linus,
Could you like to give some suggestion for testing?
You just enable it in KConfig and boot. I have this config snippet in my aarch64.mak Makefile:
config-kasan: config-base $(CURDIR)/scripts/config --file $(config_file) \ --enable KASAN \ --enable KASAN_OUTLINE \ --enable STACKTRACE \ --enable SLUB_DEBUG_ON \ --enable TEST_KASAN
The TEST_KASAN will show you that instrumentation works by triggering a console dump for each test.
It should be worth noting that a new GCC (4.9+, preferrably 5+) is needed for KASan to work, so if people are as conservative with upgrading their toolchain as they are in upgrading their kernels, they may see absolutely nothing: it will just be deactivated at compile-time.
Yours, Linus Walleij
linaro-kernel@lists.linaro.org