Add strict validation for vector csr registers when setting them via ptrace: - reject attempts to set reserved bits or invalid field combinations - enforce strict VL checks against calculated VLMAX values
Vector spec 1.0 allows normal applications to set candidate VL values and read back the hardware-adjusted results, see section 6 for details. Disallow such flexibility in vector ptrace operations and strictly enforce valid VL input.
The traced process may not update its saved vector context if no vector instructions execute between breakpoints. So the purpose of the strict ptrace approach is to make sure that debuggers maintain an accurate view of the tracee's vector context across multiple halt/resume debug cycles.
Signed-off-by: Sergey Matyukevich geomatsi@gmail.com --- arch/riscv/kernel/ptrace.c | 62 +++++++++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 906cf1197edc..a567e558e746 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -124,6 +124,66 @@ static int riscv_vr_get(struct task_struct *target, return membuf_write(&to, vstate->datap, riscv_v_vsize); }
+static int invalid_ptrace_v_csr(struct __riscv_v_ext_state *vstate, + struct __riscv_v_regset_state *ptrace) +{ + unsigned long vsew, vlmul, vfrac, vl; + unsigned long elen, vlen; + unsigned long sew, lmul; + unsigned long reserved; + + if (!has_vector()) + return 1; + + vlen = vstate->vlenb * 8; + if (vstate->vlenb != ptrace->vlenb) + return 1; + + reserved = ~(CSR_VXSAT_MASK | (CSR_VXRM_MASK << CSR_VXRM_SHIFT)); + if (ptrace->vcsr & reserved) + return 1; + + /* do not allow to set vill */ + reserved = ~(VTYPE_VSEW | VTYPE_VLMUL | VTYPE_VMA | VTYPE_VTA); + if (ptrace->vtype & reserved) + return 1; + + elen = riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE64X) ? 64 : 32; + vsew = (ptrace->vtype & VTYPE_VSEW) >> VTYPE_VSEW_SHIFT; + sew = 8 << vsew; + + if (sew > elen) + return 1; + + vfrac = (ptrace->vtype & VTYPE_VLMUL_FRAC); + vlmul = (ptrace->vtype & VTYPE_VLMUL); + + /* RVV 1.0 spec 3.4.2: VLMUL(0x4) reserved */ + if (vlmul == 4) + return 1; + + /* RVV 1.0 spec 3.4.2: (LMUL < SEW_min / ELEN) reserved */ + if (vlmul == 5 && elen == 32) + return 1; + + /* for zero vl verify that at least one element is possible */ + vl = ptrace->vl ? ptrace->vl : 1; + + if (vfrac) { + /* integer 1/LMUL: VL =< VLMAX = VLEN / SEW / LMUL */ + lmul = 2 << (3 - (vlmul - vfrac)); + if (vlen < vl * sew * lmul) + return 1; + } else { + /* integer LMUL: VL =< VLMAX = LMUL * VLEN / SEW */ + lmul = 1 << vlmul; + if (vl * sew > lmul * vlen) + return 1; + } + + return 0; +} + static int riscv_vr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -145,7 +205,7 @@ static int riscv_vr_set(struct task_struct *target, if (unlikely(ret)) return ret;
- if (vstate->vlenb != ptrace_vstate.vlenb) + if (invalid_ptrace_v_csr(vstate, &ptrace_vstate)) return -EINVAL;
vstate->vstart = ptrace_vstate.vstart;