David,
On 31/10/18 13:57, David Long wrote:
From: Marc Zyngier marc.zyngier@arm.com
Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream.
In order to avoid aliasing attacks against the branch predictor, let's invalidate the BTB on guest exit. This is made complicated by the fact that we cannot take a branch before invalidating the BTB.
We only apply this to A12 and A17, which are the only two ARM cores on which this useful.
Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Russell King rmk+kernel@armlinux.org.uk Boot-tested-by: Tony Lindgren tony@atomide.com Reviewed-by: Tony Lindgren tony@atomide.com Signed-off-by: David A. Long dave.long@linaro.org
arch/arm/include/asm/kvm_asm.h | 2 - arch/arm/include/asm/kvm_mmu.h | 17 ++++++++- arch/arm/kvm/hyp/hyp-entry.S | 69 ++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 3 deletions(-)
[...]
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 96beb53934c9..de242d9598c6 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -71,6 +71,66 @@ __kvm_hyp_vector: W(b) hyp_irq W(b) hyp_fiq +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
- .align 5
+__kvm_hyp_vector_bp_inv:
- .global __kvm_hyp_vector_bp_inv
- /*
* We encode the exception entry in the bottom 3 bits of
* SP, and we have to guarantee to be 8 bytes aligned.
*/
- W(add) sp, sp, #1 /* Reset 7 */
- W(add) sp, sp, #1 /* Undef 6 */
- W(add) sp, sp, #1 /* Syscall 5 */
- W(add) sp, sp, #1 /* Prefetch abort 4 */
- W(add) sp, sp, #1 /* Data abort 3 */
- W(add) sp, sp, #1 /* HVC 2 */
- W(add) sp, sp, #1 /* IRQ 1 */
- W(nop) /* FIQ 0 */
- mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
- isb
+#ifdef CONFIG_THUMB2_KERNEL
- /*
* Yet another silly hack: Use VPIDR as a temp register.
* Thumb2 is really a pain, as SP cannot be used with most
* of the bitwise instructions. The vect_br macro ensures
* things gets cleaned-up.
*/
- mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
- mov r0, sp
- and r0, r0, #7
- sub sp, sp, r0
- push {r1, r2}
- mov r1, r0
- mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
- mrc p15, 0, r2, c0, c0, 0 /* MIDR */
- mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
+#endif
+.macro vect_br val, targ +ARM( eor sp, sp, #\val ) +ARM( tst sp, #7 ) +ARM( eorne sp, sp, #\val )
+THUMB( cmp r1, #\val ) +THUMB( popeq {r1, r2} )
- beq \targ
+.endm
- vect_br 0, hyp_fiq
- vect_br 1, hyp_irq
- vect_br 2, hyp_hvc
- vect_br 3, hyp_dabt
- vect_br 4, hyp_pabt
- vect_br 5, hyp_svc
- vect_br 6, hyp_undef
- vect_br 7, hyp_reset
+#endif
.macro invalid_vector label, cause .align \label: mov r0, #\cause @@ -132,6 +192,14 @@ hyp_hvc: beq 1f push {lr}
- /*
* Pushing r2 here is just a way of keeping the stack aligned to
* 8 bytes on any path that can trigger a HYP exception. Here,
* we may well be about to jump into the guest, and the guest
* exit would otherwise be badly decoded by our fancy
* "decode-exception-without-a-branch" code...
*/
- push {r2, lr}
mov lr, r0 mov r0, r1 @@ -142,6 +210,7 @@ THUMB( orr lr, #1) blx lr @ Call the HYP function pop {lr}
- pop {r2, lr}
I don't see how this can work. This clearly isn't the right resolution for merging 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f, as it contradicts the very comment you are merging here.
I wouldn't be surprised if the crash you're observing would be due to this problem (unaligned stack, bad decoding of the vector, branch to the wrong handler, HYP on fire).
M.