commit 19a23da53932bc8011220bd8c410cb76012de004 upstream.
Grab kvm->lock before pinning memory when registering an encrypted region; sev_pin_memory() relies on kvm->lock being held to ensure correctness when checking and updating the number of pinned pages.
Add a lockdep assertion to help prevent future regressions.
Cc: Thomas Gleixner tglx@linutronix.de Cc: Ingo Molnar mingo@redhat.com Cc: "H. Peter Anvin" hpa@zytor.com Cc: Paolo Bonzini pbonzini@redhat.com Cc: Joerg Roedel joro@8bytes.org Cc: Tom Lendacky thomas.lendacky@amd.com Cc: Brijesh Singh brijesh.singh@amd.com Cc: Sean Christopherson seanjc@google.com Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: stable@vger.kernel.org Cc: linux-kernel@vger.kernel.org Fixes: 1e80fdc09d12 ("KVM: SVM: Pin guest memory when SEV is active") Signed-off-by: Peter Gonda pgonda@google.com
V2 - Fix up patch description - Correct file paths svm.c -> sev.c - Add unlock of kvm->lock on sev_pin_memory error
V1 - https://lore.kernel.org/kvm/20210126185431.1824530-1-pgonda@google.com/
Message-Id: 20210127161524.2832400-1-pgonda@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com --- arch/x86/kvm/svm.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2b506904be02..93c89f1ffc5d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1830,6 +1830,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, struct page **pages; unsigned long first, last;
+ lockdep_assert_held(&kvm->lock); + if (ulen == 0 || uaddr + ulen < uaddr) return NULL;
@@ -7086,12 +7088,21 @@ static int svm_register_enc_region(struct kvm *kvm, if (!region) return -ENOMEM;
+ mutex_lock(&kvm->lock); region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); if (!region->pages) { ret = -ENOMEM; + mutex_unlock(&kvm->lock); goto e_free; }
+ region->uaddr = range->addr; + region->size = range->size; + + mutex_lock(&kvm->lock); + list_add_tail(®ion->list, &sev->regions_list); + mutex_unlock(&kvm->lock); + /* * The guest may change the memory encryption attribute from C=0 -> C=1 * or vice versa for this memory range. Lets make sure caches are @@ -7100,13 +7111,6 @@ static int svm_register_enc_region(struct kvm *kvm, */ sev_clflush_pages(region->pages, region->npages);
- region->uaddr = range->addr; - region->size = range->size; - - mutex_lock(&kvm->lock); - list_add_tail(®ion->list, &sev->regions_list); - mutex_unlock(&kvm->lock); - return ret;
e_free:
On 08/02/21 17:48, Peter Gonda wrote:
commit 19a23da53932bc8011220bd8c410cb76012de004 upstream.
Grab kvm->lock before pinning memory when registering an encrypted region; sev_pin_memory() relies on kvm->lock being held to ensure correctness when checking and updating the number of pinned pages.
Add a lockdep assertion to help prevent future regressions.
Cc: Thomas Gleixner tglx@linutronix.de Cc: Ingo Molnar mingo@redhat.com Cc: "H. Peter Anvin" hpa@zytor.com Cc: Paolo Bonzini pbonzini@redhat.com Cc: Joerg Roedel joro@8bytes.org Cc: Tom Lendacky thomas.lendacky@amd.com Cc: Brijesh Singh brijesh.singh@amd.com Cc: Sean Christopherson seanjc@google.com Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: stable@vger.kernel.org Cc: linux-kernel@vger.kernel.org Fixes: 1e80fdc09d12 ("KVM: SVM: Pin guest memory when SEV is active") Signed-off-by: Peter Gonda pgonda@google.com
V2
- Fix up patch description
- Correct file paths svm.c -> sev.c
- Add unlock of kvm->lock on sev_pin_memory error
V1
Message-Id: 20210127161524.2832400-1-pgonda@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com
arch/x86/kvm/svm.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2b506904be02..93c89f1ffc5d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1830,6 +1830,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, struct page **pages; unsigned long first, last;
- lockdep_assert_held(&kvm->lock);
- if (ulen == 0 || uaddr + ulen < uaddr) return NULL;
@@ -7086,12 +7088,21 @@ static int svm_register_enc_region(struct kvm *kvm, if (!region) return -ENOMEM;
- mutex_lock(&kvm->lock); region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); if (!region->pages) { ret = -ENOMEM;
goto e_free; }mutex_unlock(&kvm->lock);
- region->uaddr = range->addr;
- region->size = range->size;
- mutex_lock(&kvm->lock);
- list_add_tail(®ion->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
- /*
- The guest may change the memory encryption attribute from C=0 -> C=1
- or vice versa for this memory range. Lets make sure caches are
@@ -7100,13 +7111,6 @@ static int svm_register_enc_region(struct kvm *kvm, */ sev_clflush_pages(region->pages, region->npages);
- region->uaddr = range->addr;
- region->size = range->size;
- mutex_lock(&kvm->lock);
- list_add_tail(®ion->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
- return ret;
e_free:
Acked-by: Paolo Bonzini pbonzini@redhat.com
linux-stable-mirror@lists.linaro.org