The memory pinning code in uaccess_with_memcpy.c does not check
for HugeTLB or THP pmds, and will enter an infinite loop should
a __copy_to_user or __clear_user occur against a huge page.
This patch adds detection code for huge pages to pin_page_for_write.
As this code can be executed in a fast path it refers to the actual
pmds rather than the vma. If a HugeTLB or THP is found (they have
the same pmd representation on ARM), the page table spinlock is
taken to prevent modification whilst the page is pinned.
On ARM, huge pages are only represented as pmds, thus no huge pud
checks are performed. (For huge puds one would lock the page table
in a similar manner as in the pmd case).
Two helper functions are introduced; pmd_thp_or_huge will check
whether or not a page is huge or transparent huge (which have the
same pmd layout on ARM), and pmd_hugewillfault will detect whether
or not a page fault will occur on write to the page.
Running the following test (with the chunking from read_zero
removed):
$ dd if=/dev/zero of=/dev/null bs=10M count=1024
Gave: 2.3 GB/s backed by normal pages,
2.9 GB/s backed by huge pages,
5.1 GB/s backed by huge pages, with page mask=HPAGE_MASK.
After some discussion, it was decided not to adopt the HPAGE_MASK,
as this would have a significant detrimental effect on the overall
system latency due to page_table_lock being held for too long.
This could be revisited if split huge page locks are adopted.
Signed-off-by: Steve Capper <steve.capper(a)linaro.org>
Reviewed-by: Nicolas Pitre <nico(a)linaro.org>
---
arch/arm/include/asm/pgtable-3level.h | 3 +++
arch/arm/lib/uaccess_with_memcpy.c | 41 ++++++++++++++++++++++++++++++++---
2 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18..39c54cf 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 025f742..3e58d71 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -18,6 +18,7 @@
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
#include <linux/highmem.h>
+#include <linux/hugetlb.h>
#include <asm/current.h>
#include <asm/page.h>
@@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
return 0;
pmd = pmd_offset(pud, addr);
- if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
+ if (unlikely(pmd_none(*pmd)))
+ return 0;
+
+ /*
+ * A pmd can be bad if it refers to a HugeTLB or THP page.
+ *
+ * Both THP and HugeTLB pages have the same pmd layout
+ * and should not be manipulated by the pte functions.
+ *
+ * Lock the page table for the destination and check
+ * to see that it's still huge and whether or not we will
+ * need to fault on write, or if we have a splitting THP.
+ */
+ if (unlikely(pmd_thp_or_huge(*pmd))) {
+ ptl = ¤t->mm->page_table_lock;
+ spin_lock(ptl);
+ if (unlikely(!pmd_thp_or_huge(*pmd)
+ || pmd_hugewillfault(*pmd)
+ || pmd_trans_splitting(*pmd))) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ *ptep = NULL;
+ *ptlp = ptl;
+ return 1;
+ }
+
+ if (unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
from += tocopy;
n -= tocopy;
- pte_unmap_unlock(pte, ptl);
+ if (pte)
+ pte_unmap_unlock(pte, ptl);
+ else
+ spin_unlock(ptl);
}
if (!atomic)
up_read(¤t->mm->mmap_sem);
@@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
addr += tocopy;
n -= tocopy;
- pte_unmap_unlock(pte, ptl);
+ if (pte)
+ pte_unmap_unlock(pte, ptl);
+ else
+ spin_unlock(ptl);
}
up_read(¤t->mm->mmap_sem);
--
1.8.1.4
In Thumb2 kernel (CONFIG_THUMB2_KERNEL) kexec's relocate code is assembled
in Thumb2 mode, but cpu_v7_reset() jumps to this code in ARM state,
because its address is page aligned and has 0 in LSB.
Assemble this code in ARM mode to fix the issue.
Signed-off-by: Taras Kondratiuk <taras.kondratiuk(a)linaro.org>
---
Based on v3.12-rc4
Cc: Dave Martin <dave.martin(a)linaro.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: Russell King <linux(a)arm.linux.org.uk>
Cc: linaro-kernel(a)lists.linaro.org
Cc: linux-arm-kernel(a)lists.infradead.org
---
arch/arm/kernel/relocate_kernel.S | 1 +
1 file changed, 1 insertion(+)
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index d0cdedf..a3af323 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -5,6 +5,7 @@
#include <asm/kexec.h>
.globl relocate_new_kernel
+ .arm
relocate_new_kernel:
ldr r0,kexec_indirection_page
--
1.7.9.5
The coherant DMA allocator code contained a compile time warning
when HugeTLB support was enabled. It stated that huge pages were
not supported by the DMA allocator.
Apart from memory pressure, HugeTLB should not affect (or be
affected by) the higher order pages operated on by the DMA
allocator. Also, the user space mappings returned by arm_dma_mmap
are done via remap_pfn_range, so the Transparent Huge Page daemon
will leave them alone too.
This patch removes the huge page warning from dma-mapping.c.
Signed-off-by: Steve Capper <steve.capper(a)linaro.org>
---
Hi, I'm resending this patch as it appears to have slipped through the
cracks. Without this patch we will get spurious compiler warnings when
building kernels with huge page support.
Cheers,
--
Steve
---
arch/arm/mm/dma-mapping.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7f9b179..9486048 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -249,9 +249,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
#ifdef CONFIG_MMU
-#ifdef CONFIG_HUGETLB_PAGE
-#warning ARM Coherent DMA allocator does not (yet) support huge TLB
-#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
--
1.8.1.4