prctls implemented are: PR_SET_INDIR_BR_LP_STATUS, PR_GET_INDIR_BR_LP_STATUS and PR_LOCK_INDIR_BR_LP_STATUS.
Signed-off-by: Deepak Gupta debug@rivosinc.com --- arch/riscv/include/asm/usercfi.h | 22 ++++++++- arch/riscv/kernel/process.c | 5 +++ arch/riscv/kernel/usercfi.c | 76 ++++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h index a168ae0fa5d8..8accdc8ec164 100644 --- a/arch/riscv/include/asm/usercfi.h +++ b/arch/riscv/include/asm/usercfi.h @@ -16,7 +16,9 @@ struct kernel_clone_args; struct cfi_status { unsigned long ubcfi_en : 1; /* Enable for backward cfi. */ unsigned long ubcfi_locked : 1; - unsigned long rsvd : ((sizeof(unsigned long)*8) - 2); + unsigned long ufcfi_en : 1; /* Enable for forward cfi. Note that ELP goes in sstatus */ + unsigned long ufcfi_locked : 1; + unsigned long rsvd : ((sizeof(unsigned long)*8) - 4); unsigned long user_shdw_stk; /* Current user shadow stack pointer */ unsigned long shdw_stk_base; /* Base address of shadow stack */ unsigned long shdw_stk_size; /* size of shadow stack */ @@ -30,6 +32,9 @@ void set_active_shstk(struct task_struct *task, unsigned long shstk_addr); bool is_shstk_enabled(struct task_struct *task); bool is_shstk_locked(struct task_struct *task); void set_shstk_status(struct task_struct *task, bool enable); +bool is_indir_lp_enabled(struct task_struct *task); +bool is_indir_lp_locked(struct task_struct *task); +void set_indir_lp_status(struct task_struct *task, bool enable);
#define PR_SHADOW_STACK_SUPPORTED_STATUS_MASK (PR_SHADOW_STACK_ENABLE)
@@ -72,6 +77,21 @@ static inline void set_shstk_status(struct task_struct *task, bool enable)
}
+static inline bool is_indir_lp_enabled(struct task_struct *task) +{ + return false; +} + +static inline bool is_indir_lp_locked(struct task_struct *task) +{ + return false; +} + +static inline void set_indir_lp_status(struct task_struct *task, bool enable) +{ + +} + #endif /* CONFIG_RISCV_USER_CFI */
#endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 3fb8b23f629b..ebed7589c51a 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -152,6 +152,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, set_shstk_status(current, false); set_shstk_base(current, 0, 0); set_active_shstk(current, 0); + /* + * disable indirect branch tracking on exec. + * libc will enable it later via prctl. + */ + set_indir_lp_status(current, false);
#ifdef CONFIG_64BIT regs->status &= ~SR_UXL; diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c index cdedf1f78b3e..13920b9d86f3 100644 --- a/arch/riscv/kernel/usercfi.c +++ b/arch/riscv/kernel/usercfi.c @@ -69,6 +69,32 @@ void set_shstk_lock(struct task_struct *task) task->thread_info.user_cfi_state.ubcfi_locked = 1; }
+bool is_indir_lp_enabled(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.ufcfi_en ? true : false; +} + +bool is_indir_lp_locked(struct task_struct *task) +{ + return task->thread_info.user_cfi_state.ufcfi_locked ? true : false; +} + +void set_indir_lp_status(struct task_struct *task, bool enable) +{ + task->thread_info.user_cfi_state.ufcfi_en = enable ? 1 : 0; + + if (enable) + task->thread_info.envcfg |= ENVCFG_LPE; + else + task->thread_info.envcfg &= ~ENVCFG_LPE; + + csr_write(CSR_ENVCFG, task->thread_info.envcfg); +} + +void set_indir_lp_lock(struct task_struct *task) +{ + task->thread_info.user_cfi_state.ufcfi_locked = 1; +} /* * If size is 0, then to be compatible with regular stack we want it to be as big as * regular stack. Else PAGE_ALIGN it and return back @@ -375,3 +401,53 @@ int arch_lock_shadow_stack_status(struct task_struct *task,
return 0; } + +int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status) +{ + unsigned long fcfi_status = 0; + + if (!cpu_supports_indirect_br_lp_instr()) + return -EINVAL; + + /* indirect branch tracking is enabled on the task or not */ + fcfi_status |= (is_indir_lp_enabled(t) ? PR_INDIR_BR_LP_ENABLE : 0); + + return copy_to_user(status, &fcfi_status, sizeof(fcfi_status)) ? -EFAULT : 0; +} + +int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status) +{ + bool enable_indir_lp = false; + + if (!cpu_supports_indirect_br_lp_instr()) + return -EINVAL; + + /* indirect branch tracking is locked and further can't be modified by user */ + if (is_indir_lp_locked(t)) + return -EINVAL; + + /* Reject unknown flags */ + if (status & ~PR_INDIR_BR_LP_ENABLE) + return -EINVAL; + + enable_indir_lp = (status & PR_INDIR_BR_LP_ENABLE) ? true : false; + set_indir_lp_status(t, enable_indir_lp); + + return 0; +} + +int arch_lock_indir_br_lp_status(struct task_struct *task, + unsigned long arg) +{ + /* + * If indirect branch tracking is not supported or not enabled on task, + * nothing to lock here + */ + if (!cpu_supports_indirect_br_lp_instr() || + !is_indir_lp_enabled(task)) + return -EINVAL; + + set_indir_lp_lock(task); + + return 0; +}