From: "Mike Rapoport (Microsoft)" rppt@kernel.org
Hi,
Jürgen Groß reported some bugs in interaction of ITS mitigation with execmem [1] when running on a Xen PV guest.
These patches fix the issue by moving all the permissions management of ITS memory allocated from execmem into ITS code.
I didn't test on a real Xen PV guest, but I emulated !PSE variant by force-disabling the ROX cache in x86::execmem_arch_setup().
Peter, I took liberty to put your SoB in the patch that actually implements the execmem permissions management in ITS, please let me know if I need to update something about the authorship.
The patches are against v6.15. They are also available in git: https://web.git.kernel.org/pub/scm/linux/kernel/git/rppt/linux.git/log/?h=it...
[1] https://lore.kernel.org/all/20250528123557.12847-2-jgross@suse.com/
Juergen Gross (1): x86/mm/pat: don't collapse pages without PSE set
Mike Rapoport (Microsoft) (3): x86/Kconfig: only enable ROX cache in execmem when STRICT_MODULE_RWX is set x86/its: move its_pages array to struct mod_arch_specific Revert "mm/execmem: Unify early execmem_cache behaviour"
Peter Zijlstra (Intel) (1): x86/its: explicitly manage permissions for ITS pages
arch/x86/Kconfig | 2 +- arch/x86/include/asm/module.h | 8 ++++ arch/x86/kernel/alternative.c | 89 ++++++++++++++++++++++++++--------- arch/x86/mm/init_32.c | 3 -- arch/x86/mm/init_64.c | 3 -- arch/x86/mm/pat/set_memory.c | 3 ++ include/linux/execmem.h | 8 +--- include/linux/module.h | 5 -- mm/execmem.c | 40 ++-------------- 9 files changed, 82 insertions(+), 79 deletions(-)
base-commit: 0ff41df1cb268fc69e703a08a57ee14ae967d0ca
From: Juergen Gross jgross@suse.com
Collapsing pages to a leaf PMD or PUD should be done only if X86_FEATURE_PSE is available, which is not the case when running e.g. as a Xen PV guest.
Cc: stable@vger.kernel.org Fixes: 41d88484c71c ("x86/mm/pat: restore large ROX pages after fragmentation") Signed-off-by: Juergen Gross jgross@suse.com Link: https://lore.kernel.org/r/20250528123557.12847-3-jgross@suse.com Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- arch/x86/mm/pat/set_memory.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index def3d9284254..9292f835cf5a 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1257,6 +1257,9 @@ static int collapse_pmd_page(pmd_t *pmd, unsigned long addr, pgprot_t pgprot; int i = 0;
+ if (!cpu_feature_enabled(X86_FEATURE_PSE)) + return 0; + addr &= PMD_MASK; pte = pte_offset_kernel(pmd, addr); first = *pte;
From: "Mike Rapoport (Microsoft)" rppt@kernel.org
Currently ROX cache in execmem is enabled regardless of STRICT_MODULE_RWX setting. This breaks an assumption that module memory is writable when STRICT_MODULE_RWX is disabled, for instance for kernel debuggin.
Only enable ROX cache in execmem when STRICT_MODULE_RWX is set to restore the original behaviour of module text permissions.
Fixes: 64f6a4e10c05 ("x86: re-enable EXECMEM_ROX support") Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e21cca404943..47932d5f4499 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -88,7 +88,7 @@ config X86 select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN select ARCH_HAS_EARLY_DEBUG if KGDB select ARCH_HAS_ELF_RANDOMIZE - select ARCH_HAS_EXECMEM_ROX if X86_64 + select ARCH_HAS_EXECMEM_ROX if X86_64 && STRICT_MODULE_RWX select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL
From: "Mike Rapoport (Microsoft)" rppt@kernel.org
The of pages with ITS thunks allocated for modules are tracked by an array in 'struct module'.
Since this is very architecture specific data structure, move it to 'struct mod_arch_specific'.
No functional changes.
Fixes: 872df34d7c51 ("x86/its: Use dynamic thunks for indirect branches") Suggested-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- arch/x86/include/asm/module.h | 8 ++++++++ arch/x86/kernel/alternative.c | 19 ++++++++++--------- include/linux/module.h | 5 ----- 3 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index e988bac0a4a1..3c2de4ce3b10 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -5,12 +5,20 @@ #include <asm-generic/module.h> #include <asm/orc_types.h>
+struct its_array { +#ifdef CONFIG_MITIGATION_ITS + void **pages; + int num; +#endif +}; + struct mod_arch_specific { #ifdef CONFIG_UNWINDER_ORC unsigned int num_orcs; int *orc_unwind_ip; struct orc_entry *orc_unwind; #endif + struct its_array its_pages; };
#endif /* _ASM_X86_MODULE_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 45bcff181cba..372ef5dff631 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -195,8 +195,8 @@ void its_fini_mod(struct module *mod) its_page = NULL; mutex_unlock(&text_mutex);
- for (int i = 0; i < mod->its_num_pages; i++) { - void *page = mod->its_page_array[i]; + for (int i = 0; i < mod->arch.its_pages.num; i++) { + void *page = mod->arch.its_pages.pages[i]; execmem_restore_rox(page, PAGE_SIZE); } } @@ -206,11 +206,11 @@ void its_free_mod(struct module *mod) if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) return;
- for (int i = 0; i < mod->its_num_pages; i++) { - void *page = mod->its_page_array[i]; + for (int i = 0; i < mod->arch.its_pages.num; i++) { + void *page = mod->arch.its_pages.pages[i]; execmem_free(page); } - kfree(mod->its_page_array); + kfree(mod->arch.its_pages.pages); } #endif /* CONFIG_MODULES */
@@ -223,14 +223,15 @@ static void *its_alloc(void)
#ifdef CONFIG_MODULES if (its_mod) { - void *tmp = krealloc(its_mod->its_page_array, - (its_mod->its_num_pages+1) * sizeof(void *), + struct its_array *pages = &its_mod->arch.its_pages; + void *tmp = krealloc(pages->pages, + (pages->num+1) * sizeof(void *), GFP_KERNEL); if (!tmp) return NULL;
- its_mod->its_page_array = tmp; - its_mod->its_page_array[its_mod->its_num_pages++] = page; + pages->pages = tmp; + pages->pages[pages->num++] = page;
execmem_make_temp_rw(page, PAGE_SIZE); } diff --git a/include/linux/module.h b/include/linux/module.h index 8050f77c3b64..b3329110d668 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -586,11 +586,6 @@ struct module { atomic_t refcnt; #endif
-#ifdef CONFIG_MITIGATION_ITS - int its_num_pages; - void **its_page_array; -#endif - #ifdef CONFIG_CONSTRUCTORS /* Constructor functions. */ ctor_fn_t *ctors;
Hi,
Thanks for your patch.
FYI: kernel test robot notices the stable kernel rule is not satisfied.
The check is based on https://www.kernel.org/doc/html/latest/process/stable-kernel-rules.html#opti...
Rule: add the tag "Cc: stable@vger.kernel.org" in the sign-off area to have the patch automatically included in the stable tree. Subject: [PATCH 3/5] x86/its: move its_pages array to struct mod_arch_specific Link: https://lore.kernel.org/stable/20250603111446.2609381-4-rppt%40kernel.org
From: "Peter Zijlstra (Intel)" peterz@infradead.org
execmem_alloc() sets permissions differently depending on the kernel configuration, CPU support for PSE and whether a page is allocated before or after mark_rodata_ro().
Add tracking for pages allocated for ITS when patching the core kernel and make sure the permissions for ITS pages are explicitly managed for both kernel and module allocations.
Fixes: 872df34d7c51 ("x86/its: Use dynamic thunks for indirect branches") Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Co-developed-by: Mike Rapoport (Microsoft) rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- arch/x86/kernel/alternative.c | 84 ++++++++++++++++++++++++++--------- 1 file changed, 63 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 372ef5dff631..8289e9e1f954 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -138,6 +138,25 @@ static struct module *its_mod; #endif static void *its_page; static unsigned int its_offset; +struct its_array its_pages; + +static void *__its_alloc(struct its_array *pages) +{ + void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE); + + if (!page) + return NULL; + + void *tmp = krealloc(pages->pages, (pages->num+1) * sizeof(void *), + GFP_KERNEL); + if (!tmp) + return NULL; + + pages->pages = tmp; + pages->pages[pages->num++] = page; + + return no_free_ptr(page); +}
/* Initialize a thunk with the "jmp *reg; int3" instructions. */ static void *its_init_thunk(void *thunk, int reg) @@ -173,6 +192,21 @@ static void *its_init_thunk(void *thunk, int reg) return thunk + offset; }
+static void its_pages_protect(struct its_array *pages) +{ + for (int i = 0; i < pages->num; i++) { + void *page = pages->pages[i]; + execmem_restore_rox(page, PAGE_SIZE); + } +} + +static void its_fini_core(void) +{ + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + its_pages_protect(&its_pages); + kfree(its_pages.pages); +} + #ifdef CONFIG_MODULES void its_init_mod(struct module *mod) { @@ -195,10 +229,8 @@ void its_fini_mod(struct module *mod) its_page = NULL; mutex_unlock(&text_mutex);
- for (int i = 0; i < mod->arch.its_pages.num; i++) { - void *page = mod->arch.its_pages.pages[i]; - execmem_restore_rox(page, PAGE_SIZE); - } + if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + its_pages_protect(&mod->arch.its_pages); }
void its_free_mod(struct module *mod) @@ -212,32 +244,38 @@ void its_free_mod(struct module *mod) } kfree(mod->arch.its_pages.pages); } -#endif /* CONFIG_MODULES */
-static void *its_alloc(void) +static void *its_alloc_mod(void) { - void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE); + void *page = __its_alloc(&its_mod->arch.its_pages);
- if (!page) - return NULL; + if (page) + execmem_make_temp_rw(page, PAGE_SIZE);
-#ifdef CONFIG_MODULES - if (its_mod) { - struct its_array *pages = &its_mod->arch.its_pages; - void *tmp = krealloc(pages->pages, - (pages->num+1) * sizeof(void *), - GFP_KERNEL); - if (!tmp) - return NULL; + return page; +} +#endif /* CONFIG_MODULES */
- pages->pages = tmp; - pages->pages[pages->num++] = page; +static void *its_alloc_core(void) +{ + void *page = __its_alloc(&its_pages);
+ if (page) { execmem_make_temp_rw(page, PAGE_SIZE); + set_memory_x((unsigned long)page, 1); } + + return page; +} + +static void *its_alloc(void) +{ +#ifdef CONFIG_MODULES + if (its_mod) + return its_alloc_mod(); #endif /* CONFIG_MODULES */
- return no_free_ptr(page); + return its_alloc_core(); }
static void *its_allocate_thunk(int reg) @@ -291,7 +329,9 @@ u8 *its_static_thunk(int reg) return thunk; }
-#endif +#else +static inline void its_fini_core(void) {} +#endif /* CONFIG_MITIGATION_ITS */
/* * Nomenclature for variable names to simplify and clarify this code and ease @@ -2368,6 +2408,8 @@ void __init alternative_instructions(void) apply_retpolines(__retpoline_sites, __retpoline_sites_end); apply_returns(__return_sites, __return_sites_end);
+ its_fini_core(); + /* * Adjust all CALL instructions to point to func()-10, including * those in .altinstr_replacement.
On Tue, Jun 03, 2025 at 02:14:44PM +0300, Mike Rapoport wrote:
From: "Peter Zijlstra (Intel)" peterz@infradead.org
execmem_alloc() sets permissions differently depending on the kernel configuration, CPU support for PSE and whether a page is allocated before or after mark_rodata_ro().
Add tracking for pages allocated for ITS when patching the core kernel and make sure the permissions for ITS pages are explicitly managed for both kernel and module allocations.
Fixes: 872df34d7c51 ("x86/its: Use dynamic thunks for indirect branches") Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Co-developed-by: Mike Rapoport (Microsoft) rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org
How about something like this on top?
--- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -121,7 +121,6 @@ struct its_array its_pages; static void *__its_alloc(struct its_array *pages) { void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE); - if (!page) return NULL;
@@ -172,6 +171,9 @@ static void *its_init_thunk(void *thunk,
static void its_pages_protect(struct its_array *pages) { + if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + return; + for (int i = 0; i < pages->num; i++) { void *page = pages->pages[i]; execmem_restore_rox(page, PAGE_SIZE); @@ -180,8 +182,7 @@ static void its_pages_protect(struct its
static void its_fini_core(void) { - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) - its_pages_protect(&its_pages); + its_pages_protect(&its_pages); kfree(its_pages.pages); }
@@ -207,8 +208,7 @@ void its_fini_mod(struct module *mod) its_page = NULL; mutex_unlock(&text_mutex);
- if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) - its_pages_protect(&mod->arch.its_pages); + its_pages_protect(&mod->arch.its_pages); }
void its_free_mod(struct module *mod) @@ -222,40 +222,29 @@ void its_free_mod(struct module *mod) } kfree(mod->arch.its_pages.pages); } +#endif /* CONFIG_MODULES */
-static void *its_alloc_mod(void) +static void *its_alloc(void) { - void *page = __its_alloc(&its_mod->arch.its_pages); - - if (page) - execmem_make_temp_rw(page, PAGE_SIZE); + struct its_array *pages = &its_pages; + void *page;
- return page; -} -#endif /* CONFIG_MODULES */ +#ifdef CONFIG_MODULE + if (its_mod) + pages = &its_mod->arch.its_pages; +#endif
-static void *its_alloc_core(void) -{ - void *page = __its_alloc(&its_pages); + page = __its_alloc(pages); + if (!page) + return NULL;
- if (page) { - execmem_make_temp_rw(page, PAGE_SIZE); + execmem_make_temp_rw(page, PAGE_SIZE); + if (pages == &its_pages) set_memory_x((unsigned long)page, 1); - }
return page; }
-static void *its_alloc(void) -{ -#ifdef CONFIG_MODULES - if (its_mod) - return its_alloc_mod(); -#endif /* CONFIG_MODULES */ - - return its_alloc_core(); -} - static void *its_allocate_thunk(int reg) { int size = 3 + (reg / 8);
On Tue, Jun 03, 2025 at 03:58:45PM +0200, Peter Zijlstra wrote:
On Tue, Jun 03, 2025 at 02:14:44PM +0300, Mike Rapoport wrote:
From: "Peter Zijlstra (Intel)" peterz@infradead.org
execmem_alloc() sets permissions differently depending on the kernel configuration, CPU support for PSE and whether a page is allocated before or after mark_rodata_ro().
Add tracking for pages allocated for ITS when patching the core kernel and make sure the permissions for ITS pages are explicitly managed for both kernel and module allocations.
Fixes: 872df34d7c51 ("x86/its: Use dynamic thunks for indirect branches") Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Co-developed-by: Mike Rapoport (Microsoft) rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org
How about something like this on top?
Works for me :)
--- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -121,7 +121,6 @@ struct its_array its_pages; static void *__its_alloc(struct its_array *pages) { void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
- if (!page) return NULL;
@@ -172,6 +171,9 @@ static void *its_init_thunk(void *thunk, static void its_pages_protect(struct its_array *pages) {
- if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
return;
But modules generally use STRICT_MODULE_RWX. Do you want to make the its pages stricter than normal module text?
for (int i = 0; i < pages->num; i++) { void *page = pages->pages[i]; execmem_restore_rox(page, PAGE_SIZE); @@ -180,8 +182,7 @@ static void its_pages_protect(struct its static void its_fini_core(void) {
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
its_pages_protect(&its_pages);
- its_pages_protect(&its_pages); kfree(its_pages.pages);
} @@ -207,8 +208,7 @@ void its_fini_mod(struct module *mod) its_page = NULL; mutex_unlock(&text_mutex);
- if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
its_pages_protect(&mod->arch.its_pages);
- its_pages_protect(&mod->arch.its_pages);
}
On Tue, Jun 03, 2025 at 05:36:41PM +0300, Mike Rapoport wrote:
static void its_pages_protect(struct its_array *pages) {
- if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
return;
But modules generally use STRICT_MODULE_RWX.
Oh, I can't read anymore :-( I'll undo that one.
On 6/3/25 14:14, Mike Rapoport wrote:
From: "Peter Zijlstra (Intel)" peterz@infradead.org
execmem_alloc() sets permissions differently depending on the kernel configuration, CPU support for PSE and whether a page is allocated before or after mark_rodata_ro().
Add tracking for pages allocated for ITS when patching the core kernel and make sure the permissions for ITS pages are explicitly managed for both kernel and module allocations.
Fixes: 872df34d7c51 ("x86/its: Use dynamic thunks for indirect branches") Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Co-developed-by: Mike Rapoport (Microsoft) rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org
Reviewed-by: Nikolay Borisov nik.borisov@suse.com
From: "Mike Rapoport (Microsoft)" rppt@kernel.org
The commit d6d1e3e6580c ("mm/execmem: Unify early execmem_cache behaviour") changed early behaviour of execemem ROX cache to allow its usage in early x86 code that allocates text pages when CONFIG_MITGATION_ITS is enabled.
The permission management of the pages allocated from execmem for ITS mitigation is now completely contained in arch/x86/kernel/alternatives.c and therefore there is no need to special case early allocations in execmem.
This reverts commit d6d1e3e6580ca35071ad474381f053cbf1fb6414.
Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- arch/x86/mm/init_32.c | 3 --- arch/x86/mm/init_64.c | 3 --- include/linux/execmem.h | 8 +------- mm/execmem.c | 40 +++------------------------------------- 4 files changed, 4 insertions(+), 50 deletions(-)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bb8d99e717b9..148eba50265a 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -30,7 +30,6 @@ #include <linux/initrd.h> #include <linux/cpumask.h> #include <linux/gfp.h> -#include <linux/execmem.h>
#include <asm/asm.h> #include <asm/bios_ebda.h> @@ -756,8 +755,6 @@ void mark_rodata_ro(void) pr_info("Write protecting kernel text and read-only data: %luk\n", size >> 10);
- execmem_cache_make_ro(); - kernel_set_to_readonly = 1;
#ifdef CONFIG_CPA_DEBUG diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 949a447f75ec..7c4f6f591f2b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -34,7 +34,6 @@ #include <linux/gfp.h> #include <linux/kcore.h> #include <linux/bootmem_info.h> -#include <linux/execmem.h>
#include <asm/processor.h> #include <asm/bios_ebda.h> @@ -1392,8 +1391,6 @@ void mark_rodata_ro(void) (end - start) >> 10); set_memory_ro(start, (end - start) >> PAGE_SHIFT);
- execmem_cache_make_ro(); - kernel_set_to_readonly = 1;
/* diff --git a/include/linux/execmem.h b/include/linux/execmem.h index ca42d5e46ccc..3be35680a54f 100644 --- a/include/linux/execmem.h +++ b/include/linux/execmem.h @@ -54,7 +54,7 @@ enum execmem_range_flags { EXECMEM_ROX_CACHE = (1 << 1), };
-#if defined(CONFIG_ARCH_HAS_EXECMEM_ROX) && defined(CONFIG_EXECMEM) +#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX /** * execmem_fill_trapping_insns - set memory to contain instructions that * will trap @@ -94,15 +94,9 @@ int execmem_make_temp_rw(void *ptr, size_t size); * Return: 0 on success or negative error code on failure. */ int execmem_restore_rox(void *ptr, size_t size); - -/* - * Called from mark_readonly(), where the system transitions to ROX. - */ -void execmem_cache_make_ro(void); #else static inline int execmem_make_temp_rw(void *ptr, size_t size) { return 0; } static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; } -static inline void execmem_cache_make_ro(void) { } #endif
/** diff --git a/mm/execmem.c b/mm/execmem.c index 6f7a2653b280..e6c4f5076ca8 100644 --- a/mm/execmem.c +++ b/mm/execmem.c @@ -254,34 +254,6 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size) return ptr; }
-static bool execmem_cache_rox = false; - -void execmem_cache_make_ro(void) -{ - struct maple_tree *free_areas = &execmem_cache.free_areas; - struct maple_tree *busy_areas = &execmem_cache.busy_areas; - MA_STATE(mas_free, free_areas, 0, ULONG_MAX); - MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX); - struct mutex *mutex = &execmem_cache.mutex; - void *area; - - execmem_cache_rox = true; - - mutex_lock(mutex); - - mas_for_each(&mas_free, area, ULONG_MAX) { - unsigned long pages = mas_range_len(&mas_free) >> PAGE_SHIFT; - set_memory_ro(mas_free.index, pages); - } - - mas_for_each(&mas_busy, area, ULONG_MAX) { - unsigned long pages = mas_range_len(&mas_busy) >> PAGE_SHIFT; - set_memory_ro(mas_busy.index, pages); - } - - mutex_unlock(mutex); -} - static int execmem_cache_populate(struct execmem_range *range, size_t size) { unsigned long vm_flags = VM_ALLOW_HUGE_VMAP; @@ -302,15 +274,9 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size) /* fill memory with instructions that will trap */ execmem_fill_trapping_insns(p, alloc_size, /* writable = */ true);
- if (execmem_cache_rox) { - err = set_memory_rox((unsigned long)p, vm->nr_pages); - if (err) - goto err_free_mem; - } else { - err = set_memory_x((unsigned long)p, vm->nr_pages); - if (err) - goto err_free_mem; - } + err = set_memory_rox((unsigned long)p, vm->nr_pages); + if (err) + goto err_free_mem;
err = execmem_cache_add(p, alloc_size); if (err)
linux-stable-mirror@lists.linaro.org