Map pages flagged as being part of a GCS as such rather than using the full set of generic VM flags.
This is done using a conditional rather than extending the size of protection_map since that would make for a very sparse array.
Signed-off-by: Mark Brown broonie@kernel.org --- arch/arm64/include/asm/mman.h | 6 ++++++ arch/arm64/mm/mmap.c | 13 ++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index c21849ffdd88..4c4615867713 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -61,6 +61,12 @@ static inline bool arch_validate_flags(unsigned long vm_flags) return false; }
+ if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) { + /* An executable GCS isn't a good idea */ + if (vm_flags & (VM_EXEC | VM_ARM64_BTI)) + return false; + } + return true;
} diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 8f5b7ce857ed..e6fc7ef83ea1 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -79,9 +79,20 @@ arch_initcall(adjust_protection_map);
pgprot_t vm_get_page_prot(unsigned long vm_flags) { - pteval_t prot = pgprot_val(protection_map[vm_flags & + pteval_t prot; + + /* If this is a GCS then only interpret VM_WRITE. */ + if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) { + if (vm_flags & VM_WRITE) + prot = _PAGE_GCS; + else + prot = _PAGE_GCS_RO; + } else { + prot = pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); + }
+ /* VM_ARM64_BTI on a GCS is rejected in arch_valdiate_flags() */ if (vm_flags & VM_ARM64_BTI) prot |= PTE_GP;