[ Sasha's backport helper bot ]
Hi,
✅ All tests passed successfully. No issues detected. No action required from the submitter.
The upstream commit SHA1 provided is correct: f754f27e98f88428aaf6be6e00f5cbce97f62d4b
WARNING: Author mismatch between patch and upstream commit: Backport author: Zhaoyang Lilizy04@hust.edu.cn Commit author: Xu Luluxu.kernel@bytedance.com
Status in newer kernel trees: 6.14.y | Present (exact SHA1) 6.12.y | Present (different SHA1: d2bd51954ac8) 6.6.y | Present (different SHA1: a4a7ac3d2660)
Note: The patch differs from the upstream commit: --- 1: f754f27e98f88 ! 1: 0281b720e72e4 riscv: mm: Fix the out of bound issue of vmemmap address @@ Metadata ## Commit message ## riscv: mm: Fix the out of bound issue of vmemmap address
+ [ Upstream commit f754f27e98f88428aaf6be6e00f5cbce97f62d4b ] + In sparse vmemmap model, the virtual address of vmemmap is calculated as: ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT)). And the struct page's va can be calculated with an offset: @@ Commit message Reviewed-by: Björn Töpel bjorn@rivosinc.com Link: https://lore.kernel.org/r/20241209122617.53341-1-luxu.kernel@bytedance.com Signed-off-by: Palmer Dabbelt palmer@rivosinc.com + Signed-off-by: Zhaoyang Li lizy04@hust.edu.cn
## arch/riscv/include/asm/page.h ## @@ arch/riscv/include/asm/page.h: struct kernel_mapping { @@ arch/riscv/include/asm/pgtable.h
## arch/riscv/mm/init.c ## @@ - #include <asm/pgtable.h> - #include <asm/sections.h> - #include <asm/soc.h> + #include <linux/hugetlb.h> + + #include <asm/fixmap.h> +#include <asm/sparsemem.h> #include <asm/tlbflush.h> - - #include "../kernel/head.h" + #include <asm/sections.h> + #include <asm/soc.h> @@ arch/riscv/mm/init.c: EXPORT_SYMBOL(pgtable_l5_enabled); phys_addr_t phys_ram_base __ro_after_init; EXPORT_SYMBOL(phys_ram_base); @@ arch/riscv/mm/init.c: EXPORT_SYMBOL(pgtable_l5_enabled); __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); @@ arch/riscv/mm/init.c: static void __init setup_bootmem(void) - * Make sure we align the start of the memory on a PMD boundary so that - * at worst, we map the linear mapping with PMD mappings. - */ + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); + + phys_ram_end = memblock_end_of_DRAM(); - if (!IS_ENABLED(CONFIG_XIP_KERNEL)) + if (!IS_ENABLED(CONFIG_XIP_KERNEL)) { - phys_ram_base = memblock_start_of_DRAM() & PMD_MASK; + phys_ram_base = memblock_start_of_DRAM(); +#ifdef CONFIG_SPARSEMEM_VMEMMAP + vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT; +#endif -+ } - ++} /* - * In 64-bit, any use of __va/__pa before this point is wrong as we + * Reserve physical address space that would be mapped to virtual + * addresses greater than (void *)(-PAGE_SIZE) because: @@ arch/riscv/mm/init.c: asmlinkage void __init setup_vm(uintptr_t dtb_pa) kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
---
Results of testing on various branches:
| Branch | Patch Apply | Build Test | |---------------------------|-------------|------------| | stable/linux-6.1.y | Success | Success |