On Tue, 23 Dec 2025 at 01:22, Mark Brown broonie@kernel.org wrote:
Due to the overlap between SVE and SME vector length configuration created by streaming mode SVE we will finalize both at once. Rename the existing finalization to use _VEC (vector) for the naming to avoid confusion.
Since this includes the userspace API we create an alias KVM_ARM_VCPU_VEC for the existing KVM_ARM_VCPU_SVE capability, existing code which does not enable SME will be unaffected and any SME only code will not need to use SVE constants.
No functional change.
Signed-off-by: Mark Brown broonie@kernel.org
Reviewed-by: Fuad Tabba tabba@google.com
Cheers, /fuad
arch/arm64/include/asm/kvm_host.h | 8 +++++--- arch/arm64/include/uapi/asm/kvm.h | 6 ++++++ arch/arm64/kvm/guest.c | 10 +++++----- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/reset.c | 20 ++++++++++---------- 5 files changed, 27 insertions(+), 19 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e6d25db10a6b..0f3d26467bf0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -988,8 +988,8 @@ struct kvm_vcpu_arch {
/* KVM_ARM_VCPU_INIT completed */ #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0)) -/* SVE config completed */ -#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) +/* Vector config completed */ +#define VCPU_VEC_FINALIZED __vcpu_single_flag(cflags, BIT(1)) /* pKVM VCPU setup completed */ #define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
@@ -1062,6 +1062,8 @@ struct kvm_vcpu_arch { #define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm) #endif
+#define vcpu_has_vec(vcpu) vcpu_has_sve(vcpu)
#ifdef CONFIG_ARM64_PTR_AUTH #define vcpu_has_ptrauth(vcpu) \ ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ @@ -1458,7 +1460,7 @@ struct kvm *kvm_arch_alloc_vm(void); int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
-#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED) +#define kvm_arm_vcpu_vec_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_VEC_FINALIZED)
#define kvm_has_mte(kvm) \ (system_supports_mte() && \ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index a792a599b9d6..c67564f02981 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -107,6 +107,12 @@ struct kvm_regs { #define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ #define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */
+/*
- An alias for _SVE since we finalize VL configuration for both SVE and SME
- simultaneously.
- */
+#define KVM_ARM_VCPU_VEC KVM_ARM_VCPU_SVE
struct kvm_vcpu_init { __u32 target; __u32 features[7]; diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 1c87699fd886..d15aa2da1891 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -342,7 +342,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (!vcpu_has_sve(vcpu)) return -ENOENT;
if (kvm_arm_vcpu_sve_finalized(vcpu))
if (kvm_arm_vcpu_vec_finalized(vcpu)) return -EPERM; /* too late! */ if (WARN_ON(vcpu->arch.sve_state))@@ -497,7 +497,7 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (ret) return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu))
if (!kvm_arm_vcpu_vec_finalized(vcpu)) return -EPERM; if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,@@ -523,7 +523,7 @@ static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (ret) return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu))
if (!kvm_arm_vcpu_vec_finalized(vcpu)) return -EPERM; if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,@@ -599,7 +599,7 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) return 0;
/* Policed by KVM_GET_REG_LIST: */
WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
WARN_ON(!kvm_arm_vcpu_vec_finalized(vcpu)); return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) + 1; /* KVM_REG_ARM64_SVE_VLS */@@ -617,7 +617,7 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, return 0;
/* Policed by KVM_GET_REG_LIST: */
WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
WARN_ON(!kvm_arm_vcpu_vec_finalized(vcpu)); /* * Enumerate this first, so that userspace can save/restore indiff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 8911338961c5..b402dcb7691e 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -445,7 +445,7 @@ static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *h int ret = 0;
if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
vcpu_clear_flag(vcpu, VCPU_VEC_FINALIZED); return 0; }diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 959532422d3a..f7c63e145d54 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -92,7 +92,7 @@ static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
- Finalize vcpu's maximum SVE vector length, allocating
- vcpu->arch.sve_state as necessary.
*/ -static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) +static int kvm_vcpu_finalize_vec(struct kvm_vcpu *vcpu) { void *buf; unsigned int vl; @@ -122,21 +122,21 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) }
vcpu->arch.sve_state = buf;
vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
vcpu_set_flag(vcpu, VCPU_VEC_FINALIZED); return 0;}
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) { switch (feature) {
case KVM_ARM_VCPU_SVE:if (!vcpu_has_sve(vcpu))
case KVM_ARM_VCPU_VEC:if (!vcpu_has_vec(vcpu)) return -EINVAL;
if (kvm_arm_vcpu_sve_finalized(vcpu))
if (kvm_arm_vcpu_vec_finalized(vcpu)) return -EPERM;
return kvm_vcpu_finalize_sve(vcpu);
return kvm_vcpu_finalize_vec(vcpu); } return -EINVAL;@@ -144,7 +144,7 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) {
if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
if (vcpu_has_vec(vcpu) && !kvm_arm_vcpu_vec_finalized(vcpu)) return false; return true;@@ -163,7 +163,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) kfree(vcpu->arch.ccsidr); }
-static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) +static void kvm_vcpu_reset_vec(struct kvm_vcpu *vcpu) { if (vcpu_has_sve(vcpu)) memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); @@ -203,11 +203,11 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (loaded) kvm_arch_vcpu_put(vcpu);
if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
if (!kvm_arm_vcpu_vec_finalized(vcpu)) { if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) kvm_vcpu_enable_sve(vcpu); } else {
kvm_vcpu_reset_sve(vcpu);
kvm_vcpu_reset_vec(vcpu); } if (vcpu_el1_is_32bit(vcpu))-- 2.47.3