Rename vcpu_* to kvm_* so that the same flags mechanism can be used in places other than vcpu without being confusing. Wherever macros are still related to vcpu like vcpu_get_flag() with hard coded v->arch, keep the vcpu_* name, otherwise change it.
Also move the "v->arch" access one macro higher for the same reason.
This will be used for moving flags to host_data in a later commit.
Signed-off-by: James Clark james.clark@linaro.org --- arch/arm64/include/asm/kvm_host.h | 88 +++++++++++++++---------------- arch/arm64/kvm/hyp/exception.c | 12 ++--- arch/arm64/kvm/inject_fault.c | 4 +- arch/arm64/kvm/mmio.c | 10 ++-- 4 files changed, 57 insertions(+), 57 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f333b189fb43..34aa59f498c4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -790,22 +790,22 @@ struct kvm_vcpu_arch { /* * Each 'flag' is composed of a comma-separated triplet: * - * - the flag-set it belongs to in the vcpu->arch structure + * - the flag-set it belongs to in the structure pointed to by 'v' * - the value for that flag * - the mask for that flag * - * __vcpu_single_flag() builds such a triplet for a single-bit flag. - * unpack_vcpu_flag() extract the flag value from the triplet for + * __kvm_single_flag() builds such a triplet for a single-bit flag. + * unpack_kvm_flag() extract the flag value from the triplet for * direct use outside of the flag accessors. */ -#define __vcpu_single_flag(_set, _f) _set, (_f), (_f) +#define __kvm_single_flag(_set, _f) _set, (_f), (_f)
#define __unpack_flag(_set, _f, _m) _f -#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__) +#define unpack_kvm_flag(...) __unpack_flag(__VA_ARGS__)
#define __build_check_flag(v, flagset, f, m) \ do { \ - typeof(v->arch.flagset) *_fset; \ + typeof(v.flagset) *_fset; \ \ /* Check that the flags fit in the mask */ \ BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \ @@ -813,11 +813,11 @@ struct kvm_vcpu_arch { BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \ } while (0)
-#define __vcpu_get_flag(v, flagset, f, m) \ +#define __kvm_get_flag(v, flagset, f, m) \ ({ \ __build_check_flag(v, flagset, f, m); \ \ - READ_ONCE(v->arch.flagset) & (m); \ + READ_ONCE(v.flagset) & (m); \ })
/* @@ -826,64 +826,64 @@ struct kvm_vcpu_arch { */ #ifdef __KVM_NVHE_HYPERVISOR__ /* the nVHE hypervisor is always non-preemptible */ -#define __vcpu_flags_preempt_disable() -#define __vcpu_flags_preempt_enable() +#define __kvm_flags_preempt_disable() +#define __kvm_flags_preempt_enable() #else -#define __vcpu_flags_preempt_disable() preempt_disable() -#define __vcpu_flags_preempt_enable() preempt_enable() +#define __kvm_flags_preempt_disable() preempt_disable() +#define __kvm_flags_preempt_enable() preempt_enable() #endif
-#define __vcpu_set_flag(v, flagset, f, m) \ +#define __kvm_set_flag(v, flagset, f, m) \ do { \ - typeof(v->arch.flagset) *fset; \ + typeof(v.flagset) *fset; \ \ __build_check_flag(v, flagset, f, m); \ \ - fset = &v->arch.flagset; \ - __vcpu_flags_preempt_disable(); \ + fset = &v.flagset; \ + __kvm_flags_preempt_disable(); \ if (HWEIGHT(m) > 1) \ *fset &= ~(m); \ *fset |= (f); \ - __vcpu_flags_preempt_enable(); \ + __kvm_flags_preempt_enable(); \ } while (0)
-#define __vcpu_clear_flag(v, flagset, f, m) \ +#define __kvm_clear_flag(v, flagset, f, m) \ do { \ - typeof(v->arch.flagset) *fset; \ + typeof(v.flagset) *fset; \ \ __build_check_flag(v, flagset, f, m); \ \ - fset = &v->arch.flagset; \ - __vcpu_flags_preempt_disable(); \ + fset = &v.flagset; \ + __kvm_flags_preempt_disable(); \ *fset &= ~(m); \ - __vcpu_flags_preempt_enable(); \ + __kvm_flags_preempt_enable(); \ } while (0)
-#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__) -#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) -#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) +#define vcpu_get_flag(v, ...) __kvm_get_flag(((v)->arch), __VA_ARGS__) +#define vcpu_set_flag(v, ...) __kvm_set_flag(((v)->arch), __VA_ARGS__) +#define vcpu_clear_flag(v, ...) __kvm_clear_flag(((v)->arch), __VA_ARGS__)
/* SVE exposed to guest */ -#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0)) +#define GUEST_HAS_SVE __kvm_single_flag(cflags, BIT(0)) /* SVE config completed */ -#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) +#define VCPU_SVE_FINALIZED __kvm_single_flag(cflags, BIT(1)) /* PTRAUTH exposed to guest */ -#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) +#define GUEST_HAS_PTRAUTH __kvm_single_flag(cflags, BIT(2)) /* KVM_ARM_VCPU_INIT completed */ -#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3)) +#define VCPU_INITIALIZED __kvm_single_flag(cflags, BIT(3))
/* Exception pending */ -#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) +#define PENDING_EXCEPTION __kvm_single_flag(iflags, BIT(0)) /* * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't * be set together with an exception... */ -#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1)) +#define INCREMENT_PC __kvm_single_flag(iflags, BIT(1)) /* Target EL/MODE (not a single flag, but let's abuse the macro) */ -#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1)) +#define EXCEPT_MASK __kvm_single_flag(iflags, GENMASK(3, 1))
/* Helpers to encode exceptions with minimum fuss */ -#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK) +#define __EXCEPT_MASK_VAL unpack_kvm_flag(EXCEPT_MASK) #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL) #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
@@ -907,28 +907,28 @@ struct kvm_vcpu_arch { #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) /* Guest debug is live */ -#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4)) +#define DEBUG_DIRTY __kvm_single_flag(iflags, BIT(4)) /* Save SPE context if active */ -#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5)) +#define DEBUG_STATE_SAVE_SPE __kvm_single_flag(iflags, BIT(5)) /* Save TRBE context if active */ -#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) +#define DEBUG_STATE_SAVE_TRBE __kvm_single_flag(iflags, BIT(6))
/* SVE enabled for host EL0 */ -#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0)) +#define HOST_SVE_ENABLED __kvm_single_flag(sflags, BIT(0)) /* SME enabled for EL0 */ -#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1)) +#define HOST_SME_ENABLED __kvm_single_flag(sflags, BIT(1)) /* Physical CPU not in supported_cpus */ -#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) +#define ON_UNSUPPORTED_CPU __kvm_single_flag(sflags, BIT(2)) /* WFIT instruction trapped */ -#define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) +#define IN_WFIT __kvm_single_flag(sflags, BIT(3)) /* vcpu system registers loaded on physical CPU */ -#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) +#define SYSREGS_ON_CPU __kvm_single_flag(sflags, BIT(4)) /* Software step state is Active-pending */ -#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) +#define DBG_SS_ACTIVE_PENDING __kvm_single_flag(sflags, BIT(5)) /* PMUSERENR for the guest EL0 is on physical CPU */ -#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) +#define PMUSERENR_ON_CPU __kvm_single_flag(sflags, BIT(6)) /* WFI instruction trapped */ -#define IN_WFI __vcpu_single_flag(sflags, BIT(7)) +#define IN_WFI __kvm_single_flag(sflags, BIT(7))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c index 424a5107cddb..6bb61e933644 100644 --- a/arch/arm64/kvm/hyp/exception.c +++ b/arch/arm64/kvm/hyp/exception.c @@ -320,13 +320,13 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu) { if (vcpu_el1_is_32bit(vcpu)) { switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { - case unpack_vcpu_flag(EXCEPT_AA32_UND): + case unpack_kvm_flag(EXCEPT_AA32_UND): enter_exception32(vcpu, PSR_AA32_MODE_UND, 4); break; - case unpack_vcpu_flag(EXCEPT_AA32_IABT): + case unpack_kvm_flag(EXCEPT_AA32_IABT): enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12); break; - case unpack_vcpu_flag(EXCEPT_AA32_DABT): + case unpack_kvm_flag(EXCEPT_AA32_DABT): enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16); break; default: @@ -335,15 +335,15 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu) } } else { switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { - case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC): + case unpack_kvm_flag(EXCEPT_AA64_EL1_SYNC): enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync); break;
- case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC): + case unpack_kvm_flag(EXCEPT_AA64_EL2_SYNC): enter_exception64(vcpu, PSR_MODE_EL2h, except_type_sync); break;
- case unpack_vcpu_flag(EXCEPT_AA64_EL2_IRQ): + case unpack_kvm_flag(EXCEPT_AA64_EL2_IRQ): enter_exception64(vcpu, PSR_MODE_EL2h, except_type_irq); break;
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index a640e839848e..a7a2540cc507 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -83,7 +83,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
esr |= ESR_ELx_FSC_EXTABT;
- if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) { + if (match_target_el(vcpu, unpack_kvm_flag(EXCEPT_AA64_EL1_SYNC))) { vcpu_write_sys_reg(vcpu, addr, FAR_EL1); vcpu_write_sys_reg(vcpu, esr, ESR_EL1); } else { @@ -105,7 +105,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) if (kvm_vcpu_trap_il_is32bit(vcpu)) esr |= ESR_ELx_IL;
- if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) + if (match_target_el(vcpu, unpack_kvm_flag(EXCEPT_AA64_EL1_SYNC))) vcpu_write_sys_reg(vcpu, esr, ESR_EL1); else vcpu_write_sys_reg(vcpu, esr, ESR_EL2); diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index ab365e839874..1728e37739fe 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -79,17 +79,17 @@ static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu)
if (vcpu_el1_is_32bit(vcpu)) { switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { - case unpack_vcpu_flag(EXCEPT_AA32_UND): - case unpack_vcpu_flag(EXCEPT_AA32_IABT): - case unpack_vcpu_flag(EXCEPT_AA32_DABT): + case unpack_kvm_flag(EXCEPT_AA32_UND): + case unpack_kvm_flag(EXCEPT_AA32_IABT): + case unpack_kvm_flag(EXCEPT_AA32_DABT): return true; default: return false; } } else { switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { - case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC): - case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC): + case unpack_kvm_flag(EXCEPT_AA64_EL1_SYNC): + case unpack_kvm_flag(EXCEPT_AA64_EL2_SYNC): return true; default: return false;