From: Greg Kroah-Hartman gregkh@linuxfoundation.org
From: Marc Zyngier maz@kernel.org
Commit 01dc9262ff5797b675c32c0c6bc682777d23de05 upstream.
It recently became apparent that the ARMv8 architecture has interesting rules regarding attributes being used when fetching instructions if the MMU is off at Stage-1.
In this situation, the CPU is allowed to fetch from the PoC and allocate into the I-cache (unless the memory is mapped with the XN attribute at Stage-2).
If we transpose this to vcpus sharing a single physical CPU, it is possible for a vcpu running with its MMU off to influence another vcpu running with its MMU on, as the latter is expected to fetch from the PoU (and self-patching code doesn't flush below that level).
In order to solve this, reuse the vcpu-private TLB invalidation code to apply the same policy to the I-cache, nuking it every time the vcpu runs on a physical CPU that ran another vcpu of the same VM in the past.
This involve renaming __kvm_tlb_flush_local_vmid() to __kvm_flush_cpu_context(), and inserting a local i-cache invalidation there.
Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier maz@kernel.org Acked-by: Will Deacon will@kernel.org Acked-by: Catalin Marinas catalin.marinas@arm.com Link: https://lore.kernel.org/r/20210303164505.68492-1-maz@kernel.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- arch/arm64/include/asm/kvm_asm.h | 4 ++-- arch/arm64/kvm/arm.c | 7 ++++++- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/tlb.c | 3 ++- arch/arm64/kvm/hyp/vhe/tlb.c | 3 ++- 5 files changed, 14 insertions(+), 7 deletions(-)
--- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -49,7 +49,7 @@ #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4 -#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5 +#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5 #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6 #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8 @@ -180,10 +180,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
extern void __kvm_flush_vm_context(void); +extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); -extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
--- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -352,11 +352,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/* + * We guarantee that both TLBs and I-cache are private to each + * vcpu. If detecting that a vcpu from the same VM has + * previously run on the same physical CPU, call into the + * hypervisor code to nuke the relevant contexts. + * * We might get preempted before the vCPU actually runs, but * over-invalidation doesn't affect correctness. */ if (*last_ran != vcpu->vcpu_id) { - kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu); + kvm_call_hyp(__kvm_flush_cpu_context, mmu); *last_ran = vcpu->vcpu_id; }
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -46,11 +46,11 @@ static void handle_host_hcall(unsigned l __kvm_tlb_flush_vmid(kern_hyp_va(mmu)); break; } - case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): { + case KVM_HOST_SMCCC_FUNC(__kvm_flush_cpu_context): { unsigned long r1 = host_ctxt->regs.regs[1]; struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
- __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu)); + __kvm_flush_cpu_context(kern_hyp_va(mmu)); break; } case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): { --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_ __tlb_switch_to_host(&cxt); }
-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) +void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt;
@@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct k __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1); + asm volatile("ic iallu"); dsb(nsh); isb();
--- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_ __tlb_switch_to_host(&cxt); }
-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) +void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt;
@@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct k __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1); + asm volatile("ic iallu"); dsb(nsh); isb();