6.12-stable review patch. If anyone has any objections, please let me know.
------------------
From: Sean Christopherson seanjc@google.com
[ Upstream commit c9be85dabb376299504e0d391d15662c0edf8273 ]
Mark the underlying page as dirty in kvmppc_e500_ref_setup()'s sole caller, kvmppc_e500_shadow_map(), which will allow converting e500 to __kvm_faultin_pfn() + kvm_release_faultin_page() without having to do a weird dance between ref_setup() and shadow_map().
Opportunistically drop the redundant kvm_set_pfn_accessed(), as shadow_map() puts the page via kvm_release_pfn_clean().
Signed-off-by: Sean Christopherson seanjc@google.com Tested-by: Dmitry Osipenko dmitry.osipenko@collabora.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Message-ID: 20241010182427.1434605-53-seanjc@google.com Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs") Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kvm/e500_mmu_host.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index c664fdec75b1a..5c2adfd19e123 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); }
-static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, +static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, kvm_pfn_t pfn, unsigned int wimg) { @@ -252,11 +252,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, /* Use guest supplied MAS2_G and MAS2_E */ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
- /* Mark the page accessed */ - kvm_set_pfn_accessed(pfn); - - if (tlbe_is_writable(gtlbe)) - kvm_set_pfn_dirty(pfn); + return tlbe_is_writable(gtlbe); }
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) @@ -337,6 +333,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, unsigned int wimg = 0; pgd_t *pgdir; unsigned long flags; + bool writable = false;
/* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; @@ -490,7 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, goto out; } } - kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); + writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); + if (writable) + kvm_set_pfn_dirty(pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe);