For historical reasons, the non-KPTI exception return path is duplicated for EL1 and EL0, with the structure:
.if \el == 0 [ KPTI handling ] ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp [ EL0 exception return workaround ] eret .else ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp [ EL1 exception return workaround ] eret .endif sb
This would be simpler and clearer with the common portions factored out, e.g.
.if \el == 0 [ KPTI handling ] .endif
ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp
.if \el == 0 [ EL0 exception return workaround ] .else [ EL1 exception return workaround ] .endif
eret sb
This expands to the same code, but is simpler for a human to follow as it avoids duplicates the restore of LR+SP, and makes it clear that the ERET is associated with the SB.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland mark.rutland@arm.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: James Morse james.morse@arm.com Cc: Rob Herring robh@kernel.org Cc: Will Deacon will@kernel.org --- arch/arm64/kernel/entry.S | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7fcbee0f6c0e4..7ef0e127b149f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -442,24 +442,23 @@ alternative_else_nop_endif
.L_skip_tramp_exit_@: #endif + .endif + ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp
+ .if \el == 0 /* This must be after the last explicit memory access */ alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD tlbi vale1, xzr dsb nsh alternative_else_nop_endif - eret .else - ldr lr, [sp, #S_LR] - add sp, sp, #PT_REGS_SIZE // restore sp - /* Ensure any device/NC reads complete */ alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 + .endif
eret - .endif sb .endm