From: Dave Martin Dave.Martin@arm.com
[ Upstream commit df205b5c63281e4f32caac22adda18fd68795e80 ]
Since commit d26c25a9d19b ("arm64: KVM: Tighten guest core register access from userspace"), KVM_{GET,SET}_ONE_REG rejects register IDs that do not correspond to a single underlying architectural register.
KVM_GET_REG_LIST was not changed to match however: instead, it simply yields a list of 32-bit register IDs that together cover the whole kvm_regs struct. This means that if userspace tries to use the resulting list of IDs directly to drive calls to KVM_*_ONE_REG, some of those calls will now fail.
This was not the intention. Instead, iterating KVM_*_ONE_REG over the list of IDs returned by KVM_GET_REG_LIST should be guaranteed to work.
This patch fixes the problem by splitting validate_core_offset() into a backend core_reg_size_from_offset() which does all of the work except for checking that the size field in the register ID matches, and kvm_arm_copy_reg_indices() and num_core_regs() are converted to use this to enumerate the valid offsets.
kvm_arm_copy_reg_indices() now also sets the register ID size field appropriately based on the value returned, so the register ID supplied to userspace is fully qualified for use with the register access ioctls.
Cc: stable@vger.kernel.org Fixes: d26c25a9d19b ("arm64: KVM: Tighten guest core register access from userspace") Signed-off-by: Dave Martin Dave.Martin@arm.com Reviewed-by: Andrew Jones drjones@redhat.com Tested-by: Andrew Jones drjones@redhat.com Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Takahiro Itazuri itazur@amazon.com --- arch/arm64/kvm/guest.c | 51 +++++++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index d29cb34a828f..80715affc108 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -57,9 +57,8 @@ static u64 core_reg_offset_from_id(u64 id) return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); }
-static int validate_core_offset(const struct kvm_one_reg *reg) +static int core_reg_size_from_offset(u64 off) { - u64 off = core_reg_offset_from_id(reg->id); int size;
switch (off) { @@ -89,11 +88,24 @@ static int validate_core_offset(const struct kvm_one_reg *reg) return -EINVAL; }
- if (KVM_REG_SIZE(reg->id) == size && - IS_ALIGNED(off, size / sizeof(__u32))) - return 0; + if (!IS_ALIGNED(off, size / sizeof(__u32))) + return -EINVAL;
- return -EINVAL; + return size; +} + +static int validate_core_offset(const struct kvm_one_reg *reg) +{ + u64 off = core_reg_offset_from_id(reg->id); + int size = core_reg_size_from_offset(off); + + if (size < 0) + return -EINVAL; + + if (KVM_REG_SIZE(reg->id) != size) + return -EINVAL; + + return 0; }
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) @@ -197,11 +209,34 @@ static int kvm_arm_copy_core_reg_indices(u64 __user *uindices) { unsigned int i; int n = 0; - const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { + u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i; + int size = core_reg_size_from_offset(i); + + if (size < 0) + continue; + + switch (size) { + case sizeof(__u32): + reg |= KVM_REG_SIZE_U32; + break; + + case sizeof(__u64): + reg |= KVM_REG_SIZE_U64; + break; + + case sizeof(__uint128_t): + reg |= KVM_REG_SIZE_U128; + break; + + default: + WARN_ON(1); + continue; + } + if (uindices) { - if (put_user(core_reg | i, uindices)) + if (put_user(reg, uindices)) return -EFAULT; uindices++; }