On Thursday 29 January 2015 11:23:42 Mark Brown wrote:
On Thu, Jan 29, 2015 at 09:27:30AM +0000, Build bot for Mark Brown wrote:
For a little while now (about a week IIRC) fs/dax.c has been breaking the ARM allmodconfig build due to a missing declaration of copy_user_page():
arm-allmodconfig ../fs/dax.c:266:2: error: implicit declaration of function 'copy_user_page' [-Werror=implicit-function-declaration]
I have to confess I'm not 100% sure this isn't an ARM bug as it looks like include/mm.h ought to pull it in from asm/page.h but I'm not 100% sure where it *should* be coming from.
Russell already created a patch, and I have made sure that it builds random configurations fine. It could use some more runtime testing.
Arnd
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 4355f0ec44d6..7b972e2c61c2 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -110,8 +110,12 @@ struct page; struct vm_area_struct;
+typedef void copy_user_page_t(void *, const void *, unsigned long, + struct page *); + struct cpu_user_fns { void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); + copy_user_page_t *cpu_copy_user_page; void (*cpu_copy_user_highpage)(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); }; @@ -120,20 +124,26 @@ struct cpu_user_fns { extern struct cpu_user_fns cpu_user;
#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage +#define __cpu_copy_user_page cpu_user.cpu_copy_user_page #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage
#else
#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) +#define __cpu_copy_user_page __glue(_USER,_copy_user_page) #define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage)
extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); +extern void __cpu_copy_user_page(void *to, const void *from, + unsigned long user, struct page *); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #endif
#define clear_user_highpage(page,vaddr) \ - __cpu_clear_user_highpage(page, vaddr) + __cpu_clear_user_highpage(page, vaddr) +#define copy_user_page(to,from,vaddr,pg) \ + __cpu_copy_user_page(to, from, vaddr, pg)
#define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d130a5ece5d5..f68befd19e1c 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -17,8 +17,7 @@ /* * Faraday optimised copy_user_page */ -static void __naked -fa_copy_user_page(void *kto, const void *kfrom) +static void __naked __fa_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -39,6 +38,12 @@ fa_copy_user_page(void *kto, const void *kfrom) : "I" (PAGE_SIZE / 32)); }
+void fa_copy_user_page(void *to, const void *from, unsigned long vaddr, + struct page *pto) +{ + __fa_copy_user_page(to, from); +} + void fa_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -46,7 +51,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
kto = kmap_atomic(to); kfrom = kmap_atomic(from); - fa_copy_user_page(kto, kfrom); + __fa_copy_user_page(kto, kfrom); kunmap_atomic(kfrom); kunmap_atomic(kto); } @@ -82,5 +87,6 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
struct cpu_user_fns fa_user_fns __initdata = { .cpu_clear_user_highpage = fa_clear_user_highpage, + .cpu_copy_user_page = fa_copy_user_page, .cpu_copy_user_highpage = fa_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 49ee0c1a7209..f4388216fc09 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -13,8 +13,7 @@ #include <linux/init.h> #include <linux/highmem.h>
-static void __naked -feroceon_copy_user_page(void *kto, const void *kfrom) +static void __naked __feroceon_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4-r9, lr} \n\ @@ -67,6 +66,12 @@ feroceon_copy_user_page(void *kto, const void *kfrom) : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); }
+void feroceon_copy_user_page(void *to, const void *from, unsigned long vaddr, + struct page *pto) +{ + __feroceon_copy_user_page(to, from); +} + void feroceon_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -75,7 +80,7 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to); kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); - feroceon_copy_user_page(kto, kfrom); + __feroceon_copy_user_page(kto, kfrom); kunmap_atomic(kfrom); kunmap_atomic(kto); } @@ -107,6 +112,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
struct cpu_user_fns feroceon_user_fns __initdata = { .cpu_clear_user_highpage = feroceon_clear_user_highpage, + .cpu_copy_user_page = feroceon_copy_user_page, .cpu_copy_user_highpage = feroceon_copy_user_highpage, };
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 1267e64133b9..fdce7efcf04f 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -40,8 +40,7 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void __naked mc_copy_user_page(const void *from, void *to) { asm volatile( "stmfd sp!, {r4, lr} @ 2\n\ @@ -64,6 +63,12 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); }
+void v4_mc_copy_user_page(void *to, const void *from, unsigned long vaddr, + struct page *pto) +{ + mc_copy_user_page(from, to); +} + void v4_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -111,5 +116,6 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
struct cpu_user_fns v4_mc_user_fns __initdata = { .cpu_clear_user_highpage = v4_mc_clear_user_highpage, + .cpu_copy_user_page = v4_mc_copy_user_page, .cpu_copy_user_highpage = v4_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 067d0fdd630c..445cb4e74d6a 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -22,8 +22,7 @@ * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -v4wb_copy_user_page(void *kto, const void *kfrom) +static void __naked __v4wb_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -47,6 +46,12 @@ v4wb_copy_user_page(void *kto, const void *kfrom) : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); }
+void v4wb_copy_user_page(void *to, const void *from, unsigned long vaddr, + struct page *pto) +{ + __v4wb_copy_user_page(to, from); +} + void v4wb_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -55,7 +60,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to); kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); - v4wb_copy_user_page(kto, kfrom); + __v4wb_copy_user_page(kto, kfrom); kunmap_atomic(kfrom); kunmap_atomic(kto); } @@ -91,5 +96,6 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
struct cpu_user_fns v4wb_user_fns __initdata = { .cpu_clear_user_highpage = v4wb_clear_user_highpage, + .cpu_copy_user_page = v4wb_copy_user_page, .cpu_copy_user_highpage = v4wb_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b85c5da2e510..9aaa5d03c915 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -20,8 +20,7 @@ * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -static void __naked -v4wt_copy_user_page(void *kto, const void *kfrom) +static void __naked __v4wt_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -43,6 +42,12 @@ v4wt_copy_user_page(void *kto, const void *kfrom) : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); }
+void v4wt_copy_user_page(void *to, const void *from, unsigned long vaddr, + struct page *pto) +{ + __v4wt_copy_user_page(to, from); +} + void v4wt_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -50,7 +55,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
kto = kmap_atomic(to); kfrom = kmap_atomic(from); - v4wt_copy_user_page(kto, kfrom); + __v4wt_copy_user_page(kto, kfrom); kunmap_atomic(kfrom); kunmap_atomic(kto); } @@ -84,5 +89,6 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
struct cpu_user_fns v4wt_user_fns __initdata = { .cpu_clear_user_highpage = v4wt_clear_user_highpage, + .cpu_copy_user_page = v4wt_copy_user_page, .cpu_copy_user_highpage = v4wt_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 70423345da26..9820d0efa2a1 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -98,6 +98,21 @@ static void v6_copy_user_highpage_aliasing(struct page *to, raw_spin_unlock(&v6_lock); }
+static void v6_copy_user_page_aliasing(void *kto_unused, const void *kfrom, + unsigned long vaddr, struct page *page_to) +{ + unsigned int offset = CACHE_COLOUR(vaddr); + unsigned long kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); + + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(page_to)); + + raw_spin_lock(&v6_lock); + set_top_pte(kto, mk_pte(page_to, PAGE_KERNEL)); + copy_page((void *)kto, kfrom); + raw_spin_unlock(&v6_lock); +} + /* * Clear the user page. We need to deal with the aliasing issues, * so remap the kernel page into the same cache colour as the user @@ -124,6 +139,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
struct cpu_user_fns v6_user_fns __initdata = { .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, + .cpu_copy_user_page = (copy_user_page_t *)copy_page, .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, };
@@ -131,6 +147,7 @@ static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; + cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing, cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; }
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 03a2042aced5..fc67fc653ea3 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -29,8 +29,7 @@ * if we eventually end up using our copied page. * */ -static void __naked -xsc3_mc_copy_user_page(void *kto, const void *kfrom) +static void __naked __xsc3_mc_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, r5, lr} \n\ @@ -70,6 +69,12 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); }
+void xsc3_mc_copy_user_page(void *to, const void *from, unsigned long vaddr, + struct page *pto) +{ + __xsc3_mc_copy_user_page(to, from); +} + void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -78,7 +83,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to); kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); - xsc3_mc_copy_user_page(kto, kfrom); + __xsc3_mc_copy_user_page(kto, kfrom); kunmap_atomic(kfrom); kunmap_atomic(kto); } @@ -110,5 +115,6 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
struct cpu_user_fns xsc3_mc_user_fns __initdata = { .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, + .cpu_copy_user_page = xsc3_mc_copy_user_page, .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 0fb85025344d..6ac25571a868 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -36,8 +36,7 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void __naked __xscale_mc_copy_user_page(const void *from, void *to) { /* * Strangely enough, best performance is achieved @@ -84,6 +83,12 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); }
+void xscale_mc_copy_user_page(void *to, const void *from, + unsigned long vaddr, struct page *pto) +{ + __xscale_mc_copy_user_page(from, to); +} + void xscale_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -96,7 +101,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
- mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); + __xscale_mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
raw_spin_unlock(&minicache_lock);
@@ -131,5 +136,6 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
struct cpu_user_fns xscale_mc_user_fns __initdata = { .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, + .cpu_copy_user_page = xscale_mc_copy_user_page, .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, };