envcfg CSR defines enabling bits for cache management instructions and soon will control enabling for control flow integrity and pointer masking features.
Control flow integrity enabling for forward cfi and backward cfi are controlled via envcfg and thus need to be enabled on per thread basis.
This patch creates a place holder for envcfg CSR in `thread_info` and adds logic to save and restore on task switching.
Signed-off-by: Deepak Gupta debug@rivosinc.com --- arch/riscv/include/asm/switch_to.h | 10 ++++++++++ arch/riscv/include/asm/thread_info.h | 1 + 2 files changed, 11 insertions(+)
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 7efdb0584d47..2d9a00a30394 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -69,6 +69,15 @@ static __always_inline bool has_fpu(void) { return false; } #define __switch_to_fpu(__prev, __next) do { } while (0) #endif
+static inline void __switch_to_envcfg(struct task_struct *next) +{ + register unsigned long envcfg = next->thread_info.envcfg; + + asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", 0, + RISCV_ISA_EXT_XLINUXENVCFG, 1) + :: "r" (envcfg) : "memory"); +} + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *);
@@ -80,6 +89,7 @@ do { \ __switch_to_fpu(__prev, __next); \ if (has_vector()) \ __switch_to_vector(__prev, __next); \ + __switch_to_envcfg(__next); \ ((last) = __switch_to(__prev, __next)); \ } while (0)
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 5d473343634b..a503bdc2f6dd 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -56,6 +56,7 @@ struct thread_info { long user_sp; /* User stack pointer */ int cpu; unsigned long syscall_work; /* SYSCALL_WORK_ flags */ + unsigned long envcfg; #ifdef CONFIG_SHADOW_CALL_STACK void *scs_base; void *scs_sp;