x86 and ia64 have the early_ioremap()/early_iounmap() functions, which are useful for supporting things like UEFI, ACPI and SMBIOS, where configuration tables need to be parsed before proper memory management is available, regardless of highmem status.
This patchset implements a restricted form of early_ioremap(), available before paging_init() only. Like the x86 code on which it is based, it (p)re-uses the fixmap regions for its virtual mapping range. Up to 7 simultaneous mappings of up to 128KB can be accommodated in the available fixmap space.
Leif Lindholm (2): Documentation: arm: early_ioremap arm: add early_ioremap support
Documentation/arm/00-INDEX | 2 + Documentation/arm/early_ioremap.txt | 12 ++ arch/arm/Kconfig | 20 +++ arch/arm/include/asm/fixmap.h | 31 +++- arch/arm/include/asm/io.h | 13 ++ arch/arm/kernel/setup.c | 3 + arch/arm/mm/Makefile | 1 + arch/arm/mm/early_ioremap.c | 273 +++++++++++++++++++++++++++++++++++ arch/arm/mm/mmu.c | 2 + 9 files changed, 355 insertions(+), 2 deletions(-) create mode 100644 Documentation/arm/early_ioremap.txt create mode 100644 arch/arm/mm/early_ioremap.c
This patch provides documentation of the early_ioremap() functionality, including its implementation and usage instructions.
Signed-off-by: Leif Lindholm leif.lindholm@linaro.org --- Documentation/arm/00-INDEX | 2 ++ Documentation/arm/early_ioremap.txt | 12 ++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 Documentation/arm/early_ioremap.txt
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX index 36420e1..4978456 100644 --- a/Documentation/arm/00-INDEX +++ b/Documentation/arm/00-INDEX @@ -24,6 +24,8 @@ SPEAr - ST SPEAr platform Linux Overview VFP/ - Release notes for Linux Kernel Vector Floating Point support code +early_ioremap.txt + - documentation of the early_ioremap() functionality empeg/ - Ltd's Empeg MP3 Car Audio Player mem_alignment diff --git a/Documentation/arm/early_ioremap.txt b/Documentation/arm/early_ioremap.txt new file mode 100644 index 0000000..178f791 --- /dev/null +++ b/Documentation/arm/early_ioremap.txt @@ -0,0 +1,12 @@ +early_ioremap() and early_iounmap() rovide a mechanism for temporarily mapping +in small blocks of memory, identified by their physical address, into the +fixmap virtual address block before paging_init() has been called and more +flexible mapping functions are available. + +Due to its direct method, it also gets around potential need for special +handling of regions that end up in highmem. + +It supports up to 7 simultaneously mapped regions of up to 128KB each. +All regions are mapped as non-shareable device memory. + +Specify 'early_ioremap_debug' on the kernel commandline for verbose output.
This patch adds support for early_ioremap, based on the existing mechanism in x86. Up to 7 regions of up to 128KB each can be temporarily mapped in before paging_init, regardless of later highmem status.
Signed-off-by: Leif Lindholm leif.lindholm@linaro.org --- arch/arm/Kconfig | 20 +++ arch/arm/include/asm/fixmap.h | 31 ++++- arch/arm/include/asm/io.h | 13 ++ arch/arm/kernel/setup.c | 3 + arch/arm/mm/Makefile | 1 + arch/arm/mm/early_ioremap.c | 273 +++++++++++++++++++++++++++++++++++++++++ arch/arm/mm/mmu.c | 2 + 7 files changed, 341 insertions(+), 2 deletions(-) create mode 100644 arch/arm/mm/early_ioremap.c
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 49d993c..d9fe195 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1756,6 +1756,26 @@ config UACCESS_WITH_MEMCPY However, if the CPU data cache is using a write-allocate mode, this option is unlikely to provide any performance gain.
+config EARLY_IOREMAP + depends on MMU + bool "Provide early_ioremap() support for kernel initialization." + help + Provides a mechanism for kernel initialisation code to temporarily + map, in a highmem-agnostic way, memory pages in before paging_init(). + +config EFI +bool "UEFI runtime service support" + depends on OF + select UCS2_STRING + select EARLY_IOREMAP + ---help--- + This enables the kernel to use UEFI runtime services that are + available (such as the UEFI variable services). + + This option is only useful on systems that have UEFI firmware. + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + config SECCOMP bool prompt "Enable seccomp to safely compute untrusted bytecode" diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index bbae919..a2a5f50 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -1,6 +1,8 @@ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H
+#include <linux/bug.h> + /* * Nothing too fancy for now. * @@ -20,13 +22,38 @@ #define FIX_KMAP_BEGIN 0 #define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
+/* + * 224 temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. + * + * (P)re-using the FIXADDR region, which is used for highmem + * later on, and statically aligned to 1MB. + */ +#define NR_FIX_BTMAPS 32 +#define FIX_BTMAPS_SLOTS 7 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) +#define FIX_BTMAP_BEGIN FIX_KMAP_BEGIN +#define FIX_BTMAP_END (FIX_KMAP_END - 1) + +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, __pgprot(0)) + #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
-static inline unsigned long fix_to_virt(const unsigned int idx) +static __always_inline unsigned long fix_to_virt(const unsigned int idx) { + /* + * this branch gets completely eliminated after inlining, + * except when someone tries to use fixaddr indices in an + * illegal way. (such as mixing up address types or using + * out-of-range indices). + * + * If it doesn't get removed, the linker will complain + * loudly with a reasonably clear error message.. + */ if (idx >= FIX_KMAP_END) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); @@ -38,4 +65,4 @@ static inline unsigned int virt_to_fix(const unsigned long vaddr) return __virt_to_fix(vaddr); }
-#endif +#endif /* _ASM_FIXMAP_H */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 652b560..c8866e3 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -397,5 +397,18 @@ extern int devmem_is_allowed(unsigned long pfn); extern void register_isa_ports(unsigned int mmio, unsigned int io, unsigned int io_shift);
+/* + * early_ioremap() and early_iounmap() are for temporary early boot-time + * mappings, before the real ioremap() is functional. + * A boot-time mapping is currently limited to at most 16 pages. + * + * This is all squashed by paging_init(). + */ +extern void early_ioremap_init(void); +extern void early_ioremap_reset(void); +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void early_iounmap(void __iomem *addr, unsigned long size); + #endif /* __KERNEL__ */ #endif /* __ASM_ARM_IO_H */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1522c7a..290c561 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -36,6 +36,7 @@ #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/elf.h> +#include <asm/io.h> #include <asm/procinfo.h> #include <asm/sections.h> #include <asm/setup.h> @@ -783,6 +784,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+ early_ioremap_init(); + sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 9e51be9..ae2c477 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -15,6 +15,7 @@ endif obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o +obj-$(CONFIG_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o diff --git a/arch/arm/mm/early_ioremap.c b/arch/arm/mm/early_ioremap.c new file mode 100644 index 0000000..b14f58b --- /dev/null +++ b/arch/arm/mm/early_ioremap.c @@ -0,0 +1,273 @@ +/* + * early_ioremap() support for ARM + * + * Based on existing support in arch/x86/mm/ioremap.c + * + * Restrictions: currently only functional before paging_init() + */ + +#include <linux/init.h> +#include <linux/io.h> + +#include <asm/fixmap.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +#include <asm/mach/map.h> + +static int __initdata early_ioremap_debug; + +static int __init early_ioremap_debug_setup(char *str) +{ + early_ioremap_debug = 1; + + return 0; +} +early_param("early_ioremap_debug", early_ioremap_debug_setup); + +static pte_t __initdata bm_pte[PTRS_PER_PTE] __aligned(PTRS_PER_PTE * sizeof(pte_t)); +static __initdata int after_paging_init; + +static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) +{ + unsigned int index = pgd_index(addr); + pgd_t *pgd = cpu_get_pgd() + index; + pud_t *pud = pud_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); + + return pmd; +} + +static inline pte_t * __init early_ioremap_pte(unsigned long addr) +{ + return &bm_pte[pte_index(addr)]; +} + +static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; + +void __init early_ioremap_init(void) +{ + pmd_t *pmd; + int i; + u64 desc; + + if (early_ioremap_debug) + pr_info("early_ioremap_init()\n"); + + for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { + slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN + NR_FIX_BTMAPS*i); + if (early_ioremap_debug) + pr_info(" %lu byte slot @ 0x%08x\n", + NR_FIX_BTMAPS * PAGE_SIZE, (u32)slot_virt[i]); + } + + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); + desc = *pmd; + memset(bm_pte, 0, sizeof(bm_pte)); + + pmd_populate_kernel(NULL, pmd, bm_pte); + desc = *pmd; + + BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) + != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); + + if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { + WARN_ON(1); + pr_warn("pmd %p != %p\n", + pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); + pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", + fix_to_virt(FIX_BTMAP_BEGIN)); + pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", + fix_to_virt(FIX_BTMAP_END)); + pr_warn("FIX_BTMAP_END: %lu\n", FIX_BTMAP_END); + pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); + } +} + +void __init early_ioremap_reset(void) +{ + after_paging_init = 1; +} + +static void __init __early_set_fixmap(unsigned long idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *pte; + u64 desc; + + if (idx >= FIX_KMAP_END) { + BUG(); + return; + } + pte = early_ioremap_pte(addr); + + if (pgprot_val(flags)) + set_pte_at(NULL, 0xfff00000, pte, + pfn_pte(phys >> PAGE_SHIFT, flags)); + else + pte_clear(NULL, addr, pte); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + desc = *pte; +} + +static inline void __init early_set_fixmap(unsigned long idx, + phys_addr_t phys, pgprot_t prot) +{ + __early_set_fixmap(idx, phys, prot); +} + +static inline void __init early_clear_fixmap(unsigned long idx) +{ + __early_set_fixmap(idx, 0, __pgprot(0)); +} + +static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; +static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; + +static void __init __iomem * +__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) +{ + unsigned long offset; + resource_size_t last_addr; + unsigned int nrpages; + unsigned long idx; + int i, slot; + + slot = -1; + for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { + if (!prev_map[i]) { + slot = i; + break; + } + } + + if (slot < 0) { + pr_info("early_iomap(%08llx, %08lx) not found slot\n", + (u64)phys_addr, size); + WARN_ON(1); + return NULL; + } + + if (early_ioremap_debug) { + pr_info("early_ioremap(%08llx, %08lx) [%d] => ", + (u64)phys_addr, size, slot); + } + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) { + WARN_ON(1); + return NULL; + } + + prev_size[slot] = size; + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr + 1) - phys_addr; + + /* + * Mappings have to fit in the FIX_BTMAP area. + */ + nrpages = size >> PAGE_SHIFT; + if (nrpages > NR_FIX_BTMAPS) { + WARN_ON(1); + return NULL; + } + + /* + * Ok, go for it.. + */ + idx = FIX_BTMAP_BEGIN + slot * NR_FIX_BTMAPS; + while (nrpages > 0) { + early_set_fixmap(idx, phys_addr, prot); + phys_addr += PAGE_SIZE; + idx++; + --nrpages; + } + if (early_ioremap_debug) + pr_cont("%08lx + %08lx\n", offset, slot_virt[slot]); + + prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); + return prev_map[slot]; +} + +/* Remap an IO device */ +void __init __iomem * +early_ioremap(resource_size_t phys_addr, unsigned long size) +{ + unsigned long prot; + + if (after_paging_init) { + WARN_ON(1); + return NULL; + } + + /* + * PAGE_KERNEL depends on not-yet-initialised variables. + * We don't care about coherency or executability of early_ioremap + * pages anyway. + */ + prot = L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_MT_DEV_NONSHARED; + return __early_ioremap(phys_addr, size, prot); +} + + +void __init early_iounmap(void __iomem *addr, unsigned long size) +{ + unsigned long virt_addr; + unsigned long offset; + unsigned int nrpages; + unsigned long idx; + int i, slot; + + if (after_paging_init) { + WARN_ON(1); + return; + } + + slot = -1; + for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { + if (prev_map[i] == addr) { + slot = i; + break; + } + } + + if (slot < 0) { + pr_info("early_iounmap(%p, %08lx) not found slot\n", + addr, size); + WARN_ON(1); + return; + } + + if (prev_size[slot] != size) { + pr_info("early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", + addr, size, slot, prev_size[slot]); + WARN_ON(1); + return; + } + + if (early_ioremap_debug) + pr_info("early_iounmap(%p, %08lx) [%d]\n", addr, size, slot); + + virt_addr = (unsigned long)addr; + if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { + WARN_ON(1); + return; + } + offset = virt_addr & ~PAGE_MASK; + nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; + + idx = FIX_BTMAP_BEGIN + slot * NR_FIX_BTMAPS; + while (nrpages > 0) { + early_clear_fixmap(idx); + idx++; + --nrpages; + } + prev_map[slot] = NULL; +} diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e0d8565..c953b20 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -28,6 +28,7 @@ #include <asm/highmem.h> #include <asm/system_info.h> #include <asm/traps.h> +#include <asm/io.h>
#include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -1306,4 +1307,5 @@ void __init paging_init(struct machine_desc *mdesc)
empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); + early_ioremap_reset(); }
boot-architecture@lists.linaro.org