For UEFI, we need to install the memory mappings used for Runtime Services in a dedicated set of page tables. Add create_pgd_mapping(), which allows us to allocate and install the page table entries early. --- arch/arm64/include/asm/mmu.h | 14 ++++++++++++-- arch/arm64/kernel/efi.c | 4 ++-- arch/arm64/mm/mmu.c | 41 ++++++++++++++++++++++++++++++----------- 3 files changed, 44 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index c2f006c48bdb..c23aa4d87be7 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -31,7 +31,17 @@ extern void paging_init(void); extern void setup_mm_for_reboot(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); -/* create an identity mapping for memory (or io if map_io is true) */ -extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); + +enum mmu_map_type { + MMU_MAP_TYPE_DEFAULT, + MMU_MAP_TYPE_WRITE_PROTECT, + MMU_MAP_TYPE_EXECUTE_PROTECT, + MMU_MAP_TYPE_MMIO +}; +extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, + enum mmu_map_type map_type); +extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + enum mmu_map_type map_type);
#endif diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 00b693212b23..4c6c9f0319dc 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -54,7 +54,7 @@ static void __init efi_setup_idmap(void) u64 paddr, npages, size;
for_each_memblock(memory, r) - create_id_mapping(r->base, r->size, 0); + create_id_mapping(r->base, r->size, MMU_MAP_TYPE_DEFAULT);
/* map runtime io spaces */ for_each_efi_memory_desc(&memmap, md) { @@ -64,7 +64,7 @@ static void __init efi_setup_idmap(void) npages = md->num_pages; memrange_efi_to_native(&paddr, &npages); size = npages << PAGE_SHIFT; - create_id_mapping(paddr, size, 1); + create_id_mapping(paddr, size, MMU_MAP_TYPE_MMIO); } }
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f7d01c9816f0..f661fc44fab2 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -157,19 +157,30 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys, int map_io) + phys_addr_t phys, enum mmu_map_type type) { pmd_t *pmd; unsigned long next; pmdval_t prot_sect; pgprot_t prot_pte;
- if (map_io) { + switch (type) { + case MMU_MAP_TYPE_WRITE_PROTECT: + prot_sect = PROT_SECT_NORMAL_EXEC | PMD_SECT_RDONLY; + prot_pte = __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY); + break; + case MMU_MAP_TYPE_EXECUTE_PROTECT: + prot_sect = PROT_SECT_NORMAL; + prot_pte = PAGE_KERNEL; + break; + case MMU_MAP_TYPE_MMIO: prot_sect = PROT_SECT_DEVICE_nGnRE; prot_pte = __pgprot(PROT_DEVICE_nGnRE); - } else { + break; + default: prot_sect = PROT_SECT_NORMAL_EXEC; prot_pte = PAGE_KERNEL_EXEC; + break; }
/* @@ -203,7 +214,7 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long phys, int map_io) + unsigned long phys, enum mmu_map_type type) { pud_t *pud; unsigned long next; @@ -221,7 +232,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, /* * For 4K granule only, attempt to put down a 1GB block */ - if (!map_io && (PAGE_SHIFT == 12) && + if (type == MMU_MAP_TYPE_DEFAULT && (PAGE_SHIFT == 12) && ((addr | next | phys) & ~PUD_MASK) == 0) { pud_t old_pud = *pud; set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC)); @@ -239,7 +250,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, flush_tlb_all(); } } else { - alloc_init_pmd(mm, pud, addr, next, phys, map_io); + alloc_init_pmd(mm, pud, addr, next, phys, type); } phys += next - addr; } while (pud++, addr = next, addr != end); @@ -251,7 +262,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, */ static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd, phys_addr_t phys, unsigned long virt, - phys_addr_t size, int map_io) + phys_addr_t size, enum mmu_map_type type) { unsigned long addr, length, end, next;
@@ -261,7 +272,7 @@ static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd, end = addr + length; do { next = pgd_addr_end(addr, end); - alloc_init_pud(mm, pgd, addr, next, phys, map_io); + alloc_init_pud(mm, pgd, addr, next, phys, type); phys += next - addr; } while (pgd++, addr = next, addr != end); } @@ -275,17 +286,25 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, return; } __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, - size, 0); + size, MMU_MAP_TYPE_DEFAULT); }
-void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io) +void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, + enum mmu_map_type map_type) { if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) { pr_warn("BUG: not creating id mapping for %pa\n", &addr); return; } __create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)], - addr, addr, size, map_io); + addr, addr, size, map_type); +} + +void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + enum mmu_map_type map_type) +{ + __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, map_type); }
static void __init map_mem(void)