On Fri, Nov 24, 2023 at 04:34:54PM +0000, Joey Gouly wrote:
arch/arm64/include/asm/mman.h | 8 +++++++- arch/arm64/include/asm/page.h | 10 ++++++++++ arch/arm64/mm/mmap.c | 9 +++++++++ arch/powerpc/include/asm/page.h | 11 +++++++++++ arch/x86/include/asm/page.h | 10 ++++++++++ fs/proc/task_mmu.c | 2 ++ include/linux/mm.h | 13 ------------- 7 files changed, 49 insertions(+), 14 deletions(-)
It might be worth splitting out the powerpc/x86/generic parts into a separate patch.
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 5966ee4a6154..ecb2d18dc4d7 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -7,7 +7,7 @@ #include <uapi/asm/mman.h> static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
- unsigned long pkey __always_unused)
- unsigned long pkey)
{ unsigned long ret = 0; @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, if (system_supports_mte() && (prot & PROT_MTE)) ret |= VM_MTE; +#if defined(CONFIG_ARCH_HAS_PKEYS)
- ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0;
- ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0;
- ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0;
+#endif
Is there anywhere that rejects pkey & 8 on arm64? Because with 128-bit PTEs, if we ever support them, we can have 4-bit pkeys.
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 2312e6ee595f..aabfda2516d2 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -49,6 +49,16 @@ int pfn_is_map_memory(unsigned long pfn); #define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED) +#if defined(CONFIG_ARCH_HAS_PKEYS) +/* A protection key is a 3-bit value */ +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_2 +# define VM_PKEY_BIT0 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT1 VM_HIGH_ARCH_3 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_4 +# define VM_PKEY_BIT3 0 +# define VM_PKEY_BIT4 0 +#endif
I think we should start from VM_HIGH_ARCH_BIT_0 and just move the VM_MTE, VM_MTE_ALLOWED to VM_HIGH_ARCH_BIT_{4,5}.
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e5fcc79b5bfb..a5e75ec333ad 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -330,6 +330,17 @@ static inline unsigned long kaslr_offset(void) } #include <asm-generic/memory_model.h>
+#if defined(CONFIG_ARCH_HAS_PKEYS) +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 +/* A protection key is a 5-bit value */ +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 +#endif /* CONFIG_ARCH_HAS_PKEYS */
#endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index d18e5c332cb9..b770db1a21e7 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -87,5 +87,15 @@ static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#if defined(CONFIG_ARCH_HAS_PKEYS) +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 +/* A protection key is a 4-bit value */ +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +# define VM_PKEY_BIT4 0 +#endif /* CONFIG_ARCH_HAS_PKEYS */
Rather than moving these to arch code, we could instead keep the generic definitions with some additional CONFIG_ARCH_HAS_PKEYS_{3,4,5}BIT selected from the arch code.