Refactor memop selftest and add tests. Add storage key tests, both for success as well as failure cases. Similarly, test both vcpu and vm ioctls.
v1 -> v2 * restructure commits * get rid of test_* wrapper functions that hid vm.vm * minor changes
v0 -> v2 * complete rewrite
v1: https://lore.kernel.org/kvm/20220217145336.1794778-1-scgl@linux.ibm.com/ v0: https://lore.kernel.org/kvm/20220211182215.2730017-11-scgl@linux.ibm.com/
Janis Schoetterl-Glausch (5): KVM: s390: selftests: Split memop tests KVM: s390: selftests: Add macro as abstraction for MEM_OP KVM: s390: selftests: Add named stages for memop test KVM: s390: selftests: Add more copy memop tests KVM: s390: selftests: Add error memop tests
tools/testing/selftests/kvm/s390x/memop.c | 735 ++++++++++++++++++---- 1 file changed, 617 insertions(+), 118 deletions(-)
base-commit: ee6a569d3bf64c9676eee3eecb861fb01cc11311
Split success case/copy test from error test, making them independent. This means they do not share state and are easier to understand. Also, new test can be added in the same manner without affecting the old ones. In order to make that simpler, introduce functionality for the setup of commonly used variables.
Signed-off-by: Janis Schoetterl-Glausch scgl@linux.ibm.com --- tools/testing/selftests/kvm/s390x/memop.c | 137 +++++++++++++--------- 1 file changed, 82 insertions(+), 55 deletions(-)
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index d19c3ffdea3f..b9b673acb766 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -18,71 +18,82 @@ static uint8_t mem1[65536]; static uint8_t mem2[65536];
-static void guest_code(void) +struct test_default { + struct kvm_vm *kvm_vm; + struct kvm_run *run; + int size; +}; + +static struct test_default test_default_init(void *guest_code) { - int i; + struct test_default t;
- for (;;) { - for (i = 0; i < sizeof(mem2); i++) - mem2[i] = mem1[i]; - GUEST_SYNC(0); - } + t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1)); + t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code); + t.run = vcpu_state(t.kvm_vm, VCPU_ID); + return t; }
-int main(int argc, char *argv[]) +static void guest_copy(void) { - struct kvm_vm *vm; - struct kvm_run *run; - struct kvm_s390_mem_op ksmo; - int rv, i, maxsize; - - setbuf(stdout, NULL); /* Tell stdout not to buffer its content */ - - maxsize = kvm_check_cap(KVM_CAP_S390_MEM_OP); - if (!maxsize) { - print_skip("CAP_S390_MEM_OP not supported"); - exit(KSFT_SKIP); - } - if (maxsize > sizeof(mem1)) - maxsize = sizeof(mem1); + memcpy(&mem2, &mem1, sizeof(mem2)); + GUEST_SYNC(0); +}
- /* Create VM */ - vm = vm_create_default(VCPU_ID, 0, guest_code); - run = vcpu_state(vm, VCPU_ID); +static void test_copy(void) +{ + struct test_default t = test_default_init(guest_copy); + struct kvm_s390_mem_op ksmo; + int i;
for (i = 0; i < sizeof(mem1); i++) mem1[i] = i * i + i;
/* Set the first array */ - ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1); + ksmo.gaddr = addr_gva2gpa(t.kvm_vm, (uintptr_t)mem1); ksmo.flags = 0; - ksmo.size = maxsize; + ksmo.size = t.size; ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; ksmo.buf = (uintptr_t)mem1; ksmo.ar = 0; - vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
/* Let the guest code copy the first array to the second */ - vcpu_run(vm, VCPU_ID); - TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, + vcpu_run(t.kvm_vm, VCPU_ID); + TEST_ASSERT(t.run->exit_reason == KVM_EXIT_S390_SIEIC, "Unexpected exit reason: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + t.run->exit_reason, + exit_reason_str(t.run->exit_reason));
memset(mem2, 0xaa, sizeof(mem2));
/* Get the second array */ ksmo.gaddr = (uintptr_t)mem2; ksmo.flags = 0; - ksmo.size = maxsize; + ksmo.size = t.size; ksmo.op = KVM_S390_MEMOP_LOGICAL_READ; ksmo.buf = (uintptr_t)mem2; ksmo.ar = 0; - vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
- TEST_ASSERT(!memcmp(mem1, mem2, maxsize), + TEST_ASSERT(!memcmp(mem1, mem2, t.size), "Memory contents do not match!");
+ kvm_vm_free(t.kvm_vm); +} + +static void guest_idle(void) +{ + for (;;) + GUEST_SYNC(0); +} + +static void test_errors(void) +{ + struct test_default t = test_default_init(guest_idle); + struct kvm_s390_mem_op ksmo; + int rv; + /* Check error conditions - first bad size: */ ksmo.gaddr = (uintptr_t)mem1; ksmo.flags = 0; @@ -90,7 +101,7 @@ int main(int argc, char *argv[]) ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; ksmo.buf = (uintptr_t)mem1; ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
/* Zero size: */ @@ -100,65 +111,65 @@ int main(int argc, char *argv[]) ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; ksmo.buf = (uintptr_t)mem1; ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM), "ioctl allows 0 as size");
/* Bad flags: */ ksmo.gaddr = (uintptr_t)mem1; ksmo.flags = -1; - ksmo.size = maxsize; + ksmo.size = t.size; ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; ksmo.buf = (uintptr_t)mem1; ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
/* Bad operation: */ ksmo.gaddr = (uintptr_t)mem1; ksmo.flags = 0; - ksmo.size = maxsize; + ksmo.size = t.size; ksmo.op = -1; ksmo.buf = (uintptr_t)mem1; ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
/* Bad guest address: */ ksmo.gaddr = ~0xfffUL; ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY; - ksmo.size = maxsize; + ksmo.size = t.size; ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; ksmo.buf = (uintptr_t)mem1; ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
/* Bad host address: */ ksmo.gaddr = (uintptr_t)mem1; ksmo.flags = 0; - ksmo.size = maxsize; + ksmo.size = t.size; ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; ksmo.buf = 0; ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && errno == EFAULT, "ioctl does not report bad host memory address");
/* Bad access register: */ - run->psw_mask &= ~(3UL << (63 - 17)); - run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */ - vcpu_run(vm, VCPU_ID); /* To sync new state to SIE block */ + t.run->psw_mask &= ~(3UL << (63 - 17)); + t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */ + vcpu_run(t.kvm_vm, VCPU_ID); /* To sync new state to SIE block */ ksmo.gaddr = (uintptr_t)mem1; ksmo.flags = 0; - ksmo.size = maxsize; + ksmo.size = t.size; ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; ksmo.buf = (uintptr_t)mem1; ksmo.ar = 17; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15"); - run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */ - vcpu_run(vm, VCPU_ID); /* Run to sync new state */ + t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */ + vcpu_run(t.kvm_vm, VCPU_ID); /* Run to sync new state */
/* Check that the SIDA calls are rejected for non-protected guests */ ksmo.gaddr = 0; @@ -167,15 +178,31 @@ int main(int argc, char *argv[]) ksmo.op = KVM_S390_MEMOP_SIDA_READ; ksmo.buf = (uintptr_t)mem1; ksmo.sida_offset = 0x1c0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl does not reject SIDA_READ in non-protected mode"); ksmo.op = KVM_S390_MEMOP_SIDA_WRITE; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl does not reject SIDA_WRITE in non-protected mode");
- kvm_vm_free(vm); + kvm_vm_free(t.kvm_vm); +} + +int main(int argc, char *argv[]) +{ + int memop_cap; + + setbuf(stdout, NULL); /* Tell stdout not to buffer its content */ + + memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP); + if (!memop_cap) { + print_skip("CAP_S390_MEM_OP not supported"); + exit(KSFT_SKIP); + } + + test_copy(); + test_errors();
return 0; }
In order to achieve good test coverage we need to be able to invoke the MEM_OP ioctl with all possible parametrizations. However, for a given test, we want to be concise and not specify a long list of default values for parameters not relevant for the test, so the readers attention is not needlessly diverted. Add a macro that enables this and convert the existing test to use it. The macro emulates named arguments and hides some of the ioctl's redundancy, e.g. sets the key flag if an access key is specified.
Signed-off-by: Janis Schoetterl-Glausch scgl@linux.ibm.com --- tools/testing/selftests/kvm/s390x/memop.c | 272 ++++++++++++++++------ 1 file changed, 197 insertions(+), 75 deletions(-)
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index b9b673acb766..e2ad3d70bae4 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -13,6 +13,188 @@ #include "test_util.h" #include "kvm_util.h"
+enum mop_target { + LOGICAL, + SIDA, + ABSOLUTE, + INVALID, +}; + +enum mop_access_mode { + READ, + WRITE, +}; + +struct mop_desc { + uintptr_t gaddr; + uintptr_t gaddr_v; + uint64_t set_flags; + unsigned int f_check : 1; + unsigned int f_inject : 1; + unsigned int f_key : 1; + unsigned int _gaddr_v : 1; + unsigned int _set_flags : 1; + unsigned int _sida_offset : 1; + unsigned int _ar : 1; + uint32_t size; + enum mop_target target; + enum mop_access_mode mode; + void *buf; + uint32_t sida_offset; + uint8_t ar; + uint8_t key; +}; + +static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc) +{ + struct kvm_s390_mem_op ksmo = { + .gaddr = (uintptr_t)desc.gaddr, + .size = desc.size, + .buf = ((uintptr_t)desc.buf), + .reserved = "ignored_ignored_ignored_ignored" + }; + + switch (desc.target) { + case LOGICAL: + if (desc.mode == READ) + ksmo.op = KVM_S390_MEMOP_LOGICAL_READ; + if (desc.mode == WRITE) + ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; + break; + case SIDA: + if (desc.mode == READ) + ksmo.op = KVM_S390_MEMOP_SIDA_READ; + if (desc.mode == WRITE) + ksmo.op = KVM_S390_MEMOP_SIDA_WRITE; + break; + case ABSOLUTE: + if (desc.mode == READ) + ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ; + if (desc.mode == WRITE) + ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE; + break; + case INVALID: + ksmo.op = -1; + } + if (desc.f_check) + ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; + if (desc.f_inject) + ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION; + if (desc._set_flags) + ksmo.flags = desc.set_flags; + if (desc.f_key) { + ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; + ksmo.key = desc.key; + } + if (desc._ar) + ksmo.ar = desc.ar; + else + ksmo.ar = 0; + if (desc._sida_offset) + ksmo.sida_offset = desc.sida_offset; + + return ksmo; +} + +/* vcpu dummy id signifying that vm instead of vcpu ioctl is to occur */ +const uint32_t VM_VCPU_ID = (uint32_t)-1; + +struct test_vcpu { + struct kvm_vm *vm; + uint32_t id; +}; + +#define PRINT_MEMOP false +static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo) +{ + if (!PRINT_MEMOP) + return; + + if (vcpu_id == VM_VCPU_ID) + printf("vm memop("); + else + printf("vcpu memop("); + switch (ksmo->op) { + case KVM_S390_MEMOP_LOGICAL_READ: + printf("LOGICAL, READ, "); + break; + case KVM_S390_MEMOP_LOGICAL_WRITE: + printf("LOGICAL, WRITE, "); + break; + case KVM_S390_MEMOP_SIDA_READ: + printf("SIDA, READ, "); + break; + case KVM_S390_MEMOP_SIDA_WRITE: + printf("SIDA, WRITE, "); + break; + case KVM_S390_MEMOP_ABSOLUTE_READ: + printf("ABSOLUTE, READ, "); + break; + case KVM_S390_MEMOP_ABSOLUTE_WRITE: + printf("ABSOLUTE, WRITE, "); + break; + } + printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u", + ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key); + if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY) + printf(", CHECK_ONLY"); + if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) + printf(", INJECT_EXCEPTION"); + if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) + printf(", SKEY_PROTECTION"); + puts(")"); +} + +static void memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo) +{ + if (vcpu.id == VM_VCPU_ID) + vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo); + else + vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo); +} + +static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo) +{ + if (vcpu.id == VM_VCPU_ID) + return _vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo); + else + return _vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo); +} + +#define MEMOP(err, vcpu_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \ +({ \ + struct test_vcpu __vcpu = (vcpu_p); \ + struct mop_desc __desc = { \ + .target = (mop_target_p), \ + .mode = (access_mode_p), \ + .buf = (buf_p), \ + .size = (size_p), \ + __VA_ARGS__ \ + }; \ + struct kvm_s390_mem_op __ksmo; \ + \ + if (__desc._gaddr_v) { \ + if (__desc.target == ABSOLUTE) \ + __desc.gaddr = addr_gva2gpa(__vcpu.vm, __desc.gaddr_v); \ + else \ + __desc.gaddr = __desc.gaddr_v; \ + } \ + __ksmo = ksmo_from_desc(__desc); \ + print_memop(__vcpu.id, &__ksmo); \ + err##memop_ioctl(__vcpu, &__ksmo); \ +}) + +#define MOP(...) MEMOP(, __VA_ARGS__) +#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__) + +#define GADDR(a) .gaddr = ((uintptr_t)a) +#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v) +#define CHECK_ONLY .f_check = 1 +#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f) +#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o) +#define AR(a) ._ar = 1, .ar = (a) +#define KEY(a) .f_key = 1, .key = (a) + #define VCPU_ID 1
static uint8_t mem1[65536]; @@ -20,6 +202,7 @@ static uint8_t mem2[65536];
struct test_default { struct kvm_vm *kvm_vm; + struct test_vcpu vcpu; struct kvm_run *run; int size; }; @@ -30,6 +213,7 @@ static struct test_default test_default_init(void *guest_code)
t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1)); t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code); + t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID }; t.run = vcpu_state(t.kvm_vm, VCPU_ID); return t; } @@ -43,20 +227,14 @@ static void guest_copy(void) static void test_copy(void) { struct test_default t = test_default_init(guest_copy); - struct kvm_s390_mem_op ksmo; int i;
for (i = 0; i < sizeof(mem1); i++) mem1[i] = i * i + i;
/* Set the first array */ - ksmo.gaddr = addr_gva2gpa(t.kvm_vm, (uintptr_t)mem1); - ksmo.flags = 0; - ksmo.size = t.size; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, + GADDR(addr_gva2gpa(t.kvm_vm, (uintptr_t)mem1)));
/* Let the guest code copy the first array to the second */ vcpu_run(t.kvm_vm, VCPU_ID); @@ -68,13 +246,7 @@ static void test_copy(void) memset(mem2, 0xaa, sizeof(mem2));
/* Get the second array */ - ksmo.gaddr = (uintptr_t)mem2; - ksmo.flags = 0; - ksmo.size = t.size; - ksmo.op = KVM_S390_MEMOP_LOGICAL_READ; - ksmo.buf = (uintptr_t)mem2; - ksmo.ar = 0; - vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + MOP(t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2));
TEST_ASSERT(!memcmp(mem1, mem2, t.size), "Memory contents do not match!"); @@ -91,68 +263,31 @@ static void guest_idle(void) static void test_errors(void) { struct test_default t = test_default_init(guest_idle); - struct kvm_s390_mem_op ksmo; int rv;
- /* Check error conditions - first bad size: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = -1; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + /* Bad size: */ + rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, -1, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
/* Zero size: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = 0; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, 0, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM), "ioctl allows 0 as size");
/* Bad flags: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = -1; - ksmo.size = t.size; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), SET_FLAGS(-1)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
/* Bad operation: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = t.size; - ksmo.op = -1; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
/* Bad guest address: */ - ksmo.gaddr = ~0xfffUL; - ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY; - ksmo.size = t.size; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR((void *)~0xfffUL), CHECK_ONLY); TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
/* Bad host address: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = t.size; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = 0; - ksmo.ar = 0; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, 0, t.size, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == EFAULT, "ioctl does not report bad host memory address");
@@ -160,29 +295,16 @@ static void test_errors(void) t.run->psw_mask &= ~(3UL << (63 - 17)); t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */ vcpu_run(t.kvm_vm, VCPU_ID); /* To sync new state to SIE block */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = t.size; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 17; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15"); t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */ vcpu_run(t.kvm_vm, VCPU_ID); /* Run to sync new state */
/* Check that the SIDA calls are rejected for non-protected guests */ - ksmo.gaddr = 0; - ksmo.flags = 0; - ksmo.size = 8; - ksmo.op = KVM_S390_MEMOP_SIDA_READ; - ksmo.buf = (uintptr_t)mem1; - ksmo.sida_offset = 0x1c0; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl does not reject SIDA_READ in non-protected mode"); - ksmo.op = KVM_S390_MEMOP_SIDA_WRITE; - rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl does not reject SIDA_WRITE in non-protected mode");
The stages synchronize guest and host execution. This helps the reader and constraits the execution of the test -- if the observed staging differs from the expected the test fails.
Signed-off-by: Janis Schoetterl-Glausch scgl@linux.ibm.com --- tools/testing/selftests/kvm/s390x/memop.c | 44 +++++++++++++++++------ 1 file changed, 33 insertions(+), 11 deletions(-)
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index e2ad3d70bae4..88ce2d670ed5 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -218,10 +218,32 @@ static struct test_default test_default_init(void *guest_code) return t; }
+enum stage { + /* Synced state set by host, e.g. DAT */ + STAGE_INITED, + /* Guest did nothing */ + STAGE_IDLED, + /* Guest copied memory (locations up to test case) */ + STAGE_COPIED, +}; + +#define HOST_SYNC(vcpu_p, stage) \ +({ \ + struct test_vcpu __vcpu = (vcpu_p); \ + struct ucall uc; \ + int __stage = (stage); \ + \ + vcpu_run(__vcpu.vm, __vcpu.id); \ + get_ucall(__vcpu.vm, __vcpu.id, &uc); \ + ASSERT_EQ(uc.cmd, UCALL_SYNC); \ + ASSERT_EQ(uc.args[1], __stage); \ +}) \ + static void guest_copy(void) { + GUEST_SYNC(STAGE_INITED); memcpy(&mem2, &mem1, sizeof(mem2)); - GUEST_SYNC(0); + GUEST_SYNC(STAGE_COPIED); }
static void test_copy(void) @@ -232,16 +254,13 @@ static void test_copy(void) for (i = 0; i < sizeof(mem1); i++) mem1[i] = i * i + i;
+ HOST_SYNC(t.vcpu, STAGE_INITED); + /* Set the first array */ - MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, - GADDR(addr_gva2gpa(t.kvm_vm, (uintptr_t)mem1))); + MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1));
/* Let the guest code copy the first array to the second */ - vcpu_run(t.kvm_vm, VCPU_ID); - TEST_ASSERT(t.run->exit_reason == KVM_EXIT_S390_SIEIC, - "Unexpected exit reason: %u (%s)\n", - t.run->exit_reason, - exit_reason_str(t.run->exit_reason)); + HOST_SYNC(t.vcpu, STAGE_COPIED);
memset(mem2, 0xaa, sizeof(mem2));
@@ -256,8 +275,9 @@ static void test_copy(void)
static void guest_idle(void) { + GUEST_SYNC(STAGE_INITED); /* for consistency's sake */ for (;;) - GUEST_SYNC(0); + GUEST_SYNC(STAGE_IDLED); }
static void test_errors(void) @@ -265,6 +285,8 @@ static void test_errors(void) struct test_default t = test_default_init(guest_idle); int rv;
+ HOST_SYNC(t.vcpu, STAGE_INITED); + /* Bad size: */ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, -1, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes"); @@ -294,11 +316,11 @@ static void test_errors(void) /* Bad access register: */ t.run->psw_mask &= ~(3UL << (63 - 17)); t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */ - vcpu_run(t.kvm_vm, VCPU_ID); /* To sync new state to SIE block */ + HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15"); t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */ - vcpu_run(t.kvm_vm, VCPU_ID); /* Run to sync new state */ + HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
/* Check that the SIDA calls are rejected for non-protected guests */ rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
Do not just test the actual copy, but also that success is indicated when using the check only flag. Add copy test with storage key checking enabled, including tests for storage and fetch protection override. These test cover both logical vcpu ioctls as well as absolute vm ioctls.
Signed-off-by: Janis Schoetterl-Glausch scgl@linux.ibm.com --- tools/testing/selftests/kvm/s390x/memop.c | 243 ++++++++++++++++++++-- 1 file changed, 230 insertions(+), 13 deletions(-)
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index 88ce2d670ed5..c46d47672e2a 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -195,13 +195,21 @@ static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo) #define AR(a) ._ar = 1, .ar = (a) #define KEY(a) .f_key = 1, .key = (a)
+#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); }) + #define VCPU_ID 1 +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1ULL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) +#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
static uint8_t mem1[65536]; static uint8_t mem2[65536];
struct test_default { struct kvm_vm *kvm_vm; + struct test_vcpu vm; struct test_vcpu vcpu; struct kvm_run *run; int size; @@ -213,6 +221,7 @@ static struct test_default test_default_init(void *guest_code)
t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1)); t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code); + t.vm = (struct test_vcpu) { t.kvm_vm, VM_VCPU_ID }; t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID }; t.run = vcpu_state(t.kvm_vm, VCPU_ID); return t; @@ -223,6 +232,8 @@ enum stage { STAGE_INITED, /* Guest did nothing */ STAGE_IDLED, + /* Guest set storage keys (specifics up to test case) */ + STAGE_SKEYS_SET, /* Guest copied memory (locations up to test case) */ STAGE_COPIED, }; @@ -239,6 +250,47 @@ enum stage { ASSERT_EQ(uc.args[1], __stage); \ }) \
+static void prepare_mem12(void) +{ + int i; + + for (i = 0; i < sizeof(mem1); i++) + mem1[i] = rand(); + memset(mem2, 0xaa, sizeof(mem2)); +} + +#define ASSERT_MEM_EQ(p1, p2, size) \ + TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!") + +#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \ +({ \ + struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \ + enum mop_target __target = (mop_target_p); \ + uint32_t __size = (size); \ + \ + prepare_mem12(); \ + CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \ + GADDR_V(mem1), ##__VA_ARGS__); \ + HOST_SYNC(__copy_cpu, STAGE_COPIED); \ + CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \ + GADDR_V(mem2), ##__VA_ARGS__); \ + ASSERT_MEM_EQ(mem1, mem2, __size); \ +}) + +#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \ +({ \ + struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \ + enum mop_target __target = (mop_target_p); \ + uint32_t __size = (size); \ + \ + prepare_mem12(); \ + CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \ + GADDR_V(mem1)); \ + HOST_SYNC(__copy_cpu, STAGE_COPIED); \ + CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\ + ASSERT_MEM_EQ(mem1, mem2, __size); \ +}) + static void guest_copy(void) { GUEST_SYNC(STAGE_INITED); @@ -249,30 +301,186 @@ static void guest_copy(void) static void test_copy(void) { struct test_default t = test_default_init(guest_copy); - int i;
- for (i = 0; i < sizeof(mem1); i++) - mem1[i] = i * i + i; + HOST_SYNC(t.vcpu, STAGE_INITED); + + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size); + + kvm_vm_free(t.kvm_vm); +} + +static void set_storage_key_range(void *addr, size_t len, uint8_t key) +{ + uintptr_t _addr, abs, i; + int not_mapped = 0; + + _addr = (uintptr_t)addr; + for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) { + abs = i; + asm volatile ( + "lra %[abs], 0(0,%[abs])\n" + " jz 0f\n" + " llill %[not_mapped],1\n" + " j 1f\n" + "0: sske %[key], %[abs]\n" + "1:" + : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped) + : [key] "r" (key) + : "cc" + ); + GUEST_ASSERT_EQ(not_mapped, 0); + } +} + +static void guest_copy_key(void) +{ + set_storage_key_range(mem1, sizeof(mem1), 0x90); + set_storage_key_range(mem2, sizeof(mem2), 0x90); + GUEST_SYNC(STAGE_SKEYS_SET); + + for (;;) { + memcpy(&mem2, &mem1, sizeof(mem2)); + GUEST_SYNC(STAGE_COPIED); + } +} + +static void test_copy_key(void) +{ + struct test_default t = test_default_init(guest_copy_key); + + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vm, no key */ + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size); + + /* vm/vcpu, machting key or key 0 */ + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9)); + /* + * There used to be different code paths for key handling depending on + * if the region crossed a page boundary. + * There currently are not, but the more tests the merrier. + */ + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9)); + + /* vm/vcpu, mismatching keys on read, but no fetch protection */ + DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2)); + DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2)); + + kvm_vm_free(t.kvm_vm); +} + +static void guest_copy_key_fetch_prot(void) +{ + /* + * For some reason combining the first sync with override enablement + * results in an exception when calling HOST_SYNC. + */ + GUEST_SYNC(STAGE_INITED); + /* Storage protection override applies to both store and fetch. */ + set_storage_key_range(mem1, sizeof(mem1), 0x98); + set_storage_key_range(mem2, sizeof(mem2), 0x98); + GUEST_SYNC(STAGE_SKEYS_SET); + + for (;;) { + memcpy(&mem2, &mem1, sizeof(mem2)); + GUEST_SYNC(STAGE_COPIED); + } +} + +static void test_copy_key_storage_prot_override(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot);
HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
- /* Set the first array */ - MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1)); + /* vcpu, mismatching keys, storage protection override in effect */ + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
- /* Let the guest code copy the first array to the second */ - HOST_SYNC(t.vcpu, STAGE_COPIED); + kvm_vm_free(t.kvm_vm); +}
- memset(mem2, 0xaa, sizeof(mem2)); +static void test_copy_key_fetch_prot(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot);
- /* Get the second array */ - MOP(t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2)); + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
- TEST_ASSERT(!memcmp(mem1, mem2, t.size), - "Memory contents do not match!"); + /* vm/vcpu, matching key, fetch protection in effect */ + DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9)); + DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
kvm_vm_free(t.kvm_vm); }
+const uint64_t last_page_addr = -PAGE_SIZE; + +static void guest_copy_key_fetch_prot_override(void) +{ + int i; + char *page_0 = 0; + + GUEST_SYNC(STAGE_INITED); + set_storage_key_range(0, PAGE_SIZE, 0x18); + set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0); + asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc"); + GUEST_SYNC(STAGE_SKEYS_SET); + + for (;;) { + for (i = 0; i < PAGE_SIZE; i++) + page_0[i] = mem1[i]; + GUEST_SYNC(STAGE_COPIED); + } +} + +static void test_copy_key_fetch_prot_override(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); + vm_vaddr_t guest_0_page, guest_last_page; + + guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + if (guest_0_page != 0 || guest_last_page != last_page_addr) { + print_skip("did not allocate guest pages at required positions"); + goto out; + } + + HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vcpu, mismatching keys on fetch, fetch protection override applies */ + prepare_mem12(); + MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1)); + HOST_SYNC(t.vcpu, STAGE_COPIED); + CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2)); + ASSERT_MEM_EQ(mem1, mem2, 2048); + + /* + * vcpu, mismatching keys on fetch, fetch protection override applies, + * wraparound + */ + prepare_mem12(); + MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page)); + HOST_SYNC(t.vcpu, STAGE_COPIED); + CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048, + GADDR_V(guest_last_page), KEY(2)); + ASSERT_MEM_EQ(mem1, mem2, 2048); + +out: + kvm_vm_free(t.kvm_vm); +} + static void guest_idle(void) { GUEST_SYNC(STAGE_INITED); /* for consistency's sake */ @@ -335,17 +543,26 @@ static void test_errors(void)
int main(int argc, char *argv[]) { - int memop_cap; + int memop_cap, extension_cap;
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP); + extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION); if (!memop_cap) { print_skip("CAP_S390_MEM_OP not supported"); exit(KSFT_SKIP); }
test_copy(); + if (extension_cap > 0) { + test_copy_key(); + test_copy_key_storage_prot_override(); + test_copy_key_fetch_prot(); + test_copy_key_fetch_prot_override(); + } else { + print_skip("storage key memop extension not supported"); + } test_errors();
return 0;
Test that errors occur if key protection disallows access, including tests for storage and fetch protection override. Perform tests for both logical vcpu and absolute vm ioctls. Also extend the existing tests to the vm ioctl.
Signed-off-by: Janis Schoetterl-Glausch scgl@linux.ibm.com --- tools/testing/selftests/kvm/s390x/memop.c | 137 ++++++++++++++++++++-- 1 file changed, 124 insertions(+), 13 deletions(-)
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index c46d47672e2a..638a0609426a 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -422,6 +422,46 @@ static void test_copy_key_fetch_prot(void) kvm_vm_free(t.kvm_vm); }
+#define ERR_PROT_MOP(...) \ +({ \ + int rv; \ + \ + rv = ERR_MOP(__VA_ARGS__); \ + TEST_ASSERT(rv == 4, "Should result in protection exception"); \ +}) + +static void test_errors_key(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot); + + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vm/vcpu, mismatching keys, fetch protection in effect */ + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + + kvm_vm_free(t.kvm_vm); +} + +static void test_errors_key_storage_prot_override(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot); + + HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vm, mismatching keys, storage protection override not applicable to vm */ + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + + kvm_vm_free(t.kvm_vm); +} + const uint64_t last_page_addr = -PAGE_SIZE;
static void guest_copy_key_fetch_prot_override(void) @@ -481,6 +521,58 @@ static void test_copy_key_fetch_prot_override(void) kvm_vm_free(t.kvm_vm); }
+static void test_errors_key_fetch_prot_override_not_enabled(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); + vm_vaddr_t guest_0_page, guest_last_page; + + guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + if (guest_0_page != 0 || guest_last_page != last_page_addr) { + print_skip("did not allocate guest pages at required positions"); + goto out; + } + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vcpu, mismatching keys on fetch, fetch protection override not enabled */ + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2)); + +out: + kvm_vm_free(t.kvm_vm); +} + +static void test_errors_key_fetch_prot_override_enabled(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); + vm_vaddr_t guest_0_page, guest_last_page; + + guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + if (guest_0_page != 0 || guest_last_page != last_page_addr) { + print_skip("did not allocate guest pages at required positions"); + goto out; + } + HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* + * vcpu, mismatching keys on fetch, + * fetch protection override does not apply because memory range acceeded + */ + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1, + GADDR_V(guest_last_page), KEY(2)); + /* vm, fetch protected override does not apply */ + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2)); + +out: + kvm_vm_free(t.kvm_vm); +} + static void guest_idle(void) { GUEST_SYNC(STAGE_INITED); /* for consistency's sake */ @@ -488,39 +580,54 @@ static void guest_idle(void) GUEST_SYNC(STAGE_IDLED); }
-static void test_errors(void) +static void _test_errors_common(struct test_vcpu vcpu, enum mop_target target, int size) { - struct test_default t = test_default_init(guest_idle); int rv;
- HOST_SYNC(t.vcpu, STAGE_INITED); - /* Bad size: */ - rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, -1, GADDR_V(mem1)); + rv = ERR_MOP(vcpu, target, WRITE, mem1, -1, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
/* Zero size: */ - rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, 0, GADDR_V(mem1)); + rv = ERR_MOP(vcpu, target, WRITE, mem1, 0, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM), "ioctl allows 0 as size");
/* Bad flags: */ - rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), SET_FLAGS(-1)); + rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
- /* Bad operation: */ - rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1)); - TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations"); - /* Bad guest address: */ - rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR((void *)~0xfffUL), CHECK_ONLY); + rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY); TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
/* Bad host address: */ - rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, 0, t.size, GADDR_V(mem1)); + rv = ERR_MOP(vcpu, target, WRITE, 0, size, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == EFAULT, "ioctl does not report bad host memory address");
+ /* Bad key: */ + rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17)); + TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key"); +} + +static void test_errors(void) +{ + struct test_default t = test_default_init(guest_idle); + int rv; + + HOST_SYNC(t.vcpu, STAGE_INITED); + + _test_errors_common(t.vcpu, LOGICAL, t.size); + _test_errors_common(t.vm, ABSOLUTE, t.size); + + /* Bad operation: */ + rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1)); + TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations"); + /* virtual addresses are not translated when passing INVALID */ + rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0)); + TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations"); + /* Bad access register: */ t.run->psw_mask &= ~(3UL << (63 - 17)); t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */ @@ -560,6 +667,10 @@ int main(int argc, char *argv[]) test_copy_key_storage_prot_override(); test_copy_key_fetch_prot(); test_copy_key_fetch_prot_override(); + test_errors_key(); + test_errors_key_storage_prot_override(); + test_errors_key_fetch_prot_override_not_enabled(); + test_errors_key_fetch_prot_override_enabled(); } else { print_skip("storage key memop extension not supported"); }
Shuah,
I am planning to queue this for the kvm tree. Any concerns?
Am 08.03.22 um 13:58 schrieb Janis Schoetterl-Glausch:
Refactor memop selftest and add tests. Add storage key tests, both for success as well as failure cases. Similarly, test both vcpu and vm ioctls.
v1 -> v2
- restructure commits
- get rid of test_* wrapper functions that hid vm.vm
- minor changes
v0 -> v2
- complete rewrite
v1: https://lore.kernel.org/kvm/20220217145336.1794778-1-scgl@linux.ibm.com/ v0: https://lore.kernel.org/kvm/20220211182215.2730017-11-scgl@linux.ibm.com/
Janis Schoetterl-Glausch (5): KVM: s390: selftests: Split memop tests KVM: s390: selftests: Add macro as abstraction for MEM_OP KVM: s390: selftests: Add named stages for memop test KVM: s390: selftests: Add more copy memop tests KVM: s390: selftests: Add error memop tests
tools/testing/selftests/kvm/s390x/memop.c | 735 ++++++++++++++++++---- 1 file changed, 617 insertions(+), 118 deletions(-)
base-commit: ee6a569d3bf64c9676eee3eecb861fb01cc11311
Am 08.03.22 um 13:58 schrieb Janis Schoetterl-Glausch:
Refactor memop selftest and add tests. Add storage key tests, both for success as well as failure cases. Similarly, test both vcpu and vm ioctls.
v1 -> v2
- restructure commits
- get rid of test_* wrapper functions that hid vm.vm
- minor changes
v0 -> v2
- complete rewrite
v1: https://lore.kernel.org/kvm/20220217145336.1794778-1-scgl@linux.ibm.com/ v0: https://lore.kernel.org/kvm/20220211182215.2730017-11-scgl@linux.ibm.com/
Janis Schoetterl-Glausch (5): KVM: s390: selftests: Split memop tests KVM: s390: selftests: Add macro as abstraction for MEM_OP KVM: s390: selftests: Add named stages for memop test KVM: s390: selftests: Add more copy memop tests KVM: s390: selftests: Add error memop tests
tools/testing/selftests/kvm/s390x/memop.c | 735 ++++++++++++++++++---- 1 file changed, 617 insertions(+), 118 deletions(-)
base-commit: ee6a569d3bf64c9676eee3eecb861fb01cc11311
applied (with minor whitespace fixes). Will queue for kvms390/next.
On 3/9/22 10:05, Christian Borntraeger wrote:
Am 08.03.22 um 13:58 schrieb Janis Schoetterl-Glausch:
Refactor memop selftest and add tests. Add storage key tests, both for success as well as failure cases. Similarly, test both vcpu and vm ioctls.
v1 -> v2 * restructure commits * get rid of test_* wrapper functions that hid vm.vm * minor changes
v0 -> v2 * complete rewrite
v1: https://lore.kernel.org/kvm/20220217145336.1794778-1-scgl@linux.ibm.com/ v0: https://lore.kernel.org/kvm/20220211182215.2730017-11-scgl@linux.ibm.com/
Janis Schoetterl-Glausch (5): KVM: s390: selftests: Split memop tests KVM: s390: selftests: Add macro as abstraction for MEM_OP KVM: s390: selftests: Add named stages for memop test KVM: s390: selftests: Add more copy memop tests KVM: s390: selftests: Add error memop tests
tools/testing/selftests/kvm/s390x/memop.c | 735 ++++++++++++++++++---- 1 file changed, 617 insertions(+), 118 deletions(-)
base-commit: ee6a569d3bf64c9676eee3eecb861fb01cc11311
applied (with minor whitespace fixes). Will queue for kvms390/next.
Not sure if it's a good idea, but I broke style rules here intentionally:
+ CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048, + GADDR_V(guest_last_page), KEY(2));
in order to emphasize that the arguments are ultimately arguments to MOP. I did the same in the DEFAULT(_WRITE)?_READ macros, which checkpatch might not warn about because of the line break escapes.
linux-kselftest-mirror@lists.linaro.org