On Thu, Sep 15, 2022 at 12:04:48AM +0000, Vishal Annapurve wrote:
Update xen specific hypercall invocation to execute cpu specific vmcall instructions.
Suggested-by: Sean Christopherson seanjc@google.com Signed-off-by: Vishal Annapurve vannapurve@google.com
.../selftests/kvm/x86_64/xen_shinfo_test.c | 64 +++++++------------ .../selftests/kvm/x86_64/xen_vmcall_test.c | 14 ++-- 2 files changed, 34 insertions(+), 44 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 8a5cb800f50e..92ed07f1c772 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -145,6 +145,23 @@ static void guest_wait_for_irq(void) guest_saw_irq = false; } +static unsigned long vmcall_helper(unsigned long reg_a, unsigned long reg_di,
- unsigned long reg_si)
+{
- unsigned long ret;
- if (is_amd_cpu())
__asm__ __volatile__ ("vmmcall" :
"=a" (ret) :
"a" (reg_a), "D" (reg_di), "S" (reg_si));
- else
__asm__ __volatile__ ("vmcall" :
"=a" (ret) :
"a" (reg_a), "D" (reg_di), "S" (reg_si));
- return ret;
+}
static void guest_code(void) { struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR; @@ -217,12 +234,7 @@ static void guest_code(void) * EVTCHNOP_send hypercall. */ unsigned long rax; struct evtchn_send s = { .port = 127 };
- __asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_event_channel_op),
"D" (EVTCHNOP_send),
"S" (&s));
- rax = vmcall_helper(__HYPERVISOR_event_channel_op, EVTCHNOP_send, (unsigned long)&s); GUEST_ASSERT(rax == 0);
guest_wait_for_irq(); @@ -232,12 +244,7 @@ static void guest_code(void) /* Deliver "outbound" event channel to an eventfd which * happens to be one of our own irqfds. */ s.port = 197;
- __asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_event_channel_op),
"D" (EVTCHNOP_send),
"S" (&s));
- rax = vmcall_helper(__HYPERVISOR_event_channel_op, EVTCHNOP_send, (unsigned long)&s); GUEST_ASSERT(rax == 0);
guest_wait_for_irq(); @@ -245,10 +252,7 @@ static void guest_code(void) GUEST_SYNC(13); /* Set a timer 100ms in the future. */
- __asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_set_timer_op),
"D" (rs->state_entry_time + 100000000));
- rax = vmcall_helper(__HYPERVISOR_set_timer_op, (rs->state_entry_time + 100000000), 0); GUEST_ASSERT(rax == 0);
GUEST_SYNC(14); @@ -271,36 +275,21 @@ static void guest_code(void) .timeout = 0, };
- __asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
- rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p); GUEST_ASSERT(rax == 0);
GUEST_SYNC(17); /* Poll for an unset port and wait for the timeout. */ p.timeout = 100000000;
- __asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
- rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p); GUEST_ASSERT(rax == 0);
GUEST_SYNC(18); /* A timer will wake the masked port we're waiting on, while we poll */ p.timeout = 0;
- __asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
- rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p); GUEST_ASSERT(rax == 0);
GUEST_SYNC(19); @@ -309,12 +298,7 @@ static void guest_code(void) * actual interrupt, while we're polling on a different port. */ ports[0]++; p.timeout = 0;
- __asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
- rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p); GUEST_ASSERT(rax == 0);
guest_wait_for_irq(); diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c index 88914d48c65e..e78f1b5d3af8 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -37,10 +37,16 @@ static void guest_code(void) register unsigned long r9 __asm__("r9") = ARGVALUE(6); /* First a direct invocation of 'vmcall' */
- __asm__ __volatile__("vmcall" :
"=a"(rax) :
"a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
"r"(r10), "r"(r8), "r"(r9));
- if (is_amd_cpu())
__asm__ __volatile__("vmmcall" :
"=a"(rax) :
"a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
"r"(r10), "r"(r8), "r"(r9));
- else
__asm__ __volatile__("vmcall" :
"=a"(rax) :
"a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
"r"(r10), "r"(r8), "r"(r9));
Can we create common helper functions or macros for doing hypercalls to reduce the amount of duplicated inline assembly?
GUEST_ASSERT(rax == RETVALUE); /* Fill in the Xen hypercall page */ -- 2.37.2.789.g6183377224-goog