Verify that a guest with MTE has access to the MTE registers. Since MTE is enabled as a VM wide capability we need to add support for doing that in the process.
Signed-off-by: Mark Brown broonie@kernel.org --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+)
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index d287dd2cac0a..63d6a9046702 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -42,6 +42,7 @@ struct reg_sublist { long capability; int feature; bool finalize; + bool enable_capability; __u64 *regs; __u64 regs_n; __u64 *rejects_set; @@ -404,6 +405,18 @@ static void check_supported(struct vcpu_config *c) } }
+static void enable_capabilities(struct kvm_vm *vm, struct vcpu_config *c) +{ + struct reg_sublist *s; + + for_each_sublist(c, s) { + if (!s->enable_capability) + continue; + + vm_enable_cap(vm, s->capability, 1); + } +} + static bool print_list; static bool print_filtered; static bool fixup_core_regs; @@ -420,6 +433,7 @@ static void run_test(struct vcpu_config *c) check_supported(c);
vm = vm_create_barebones(); + enable_capabilities(vm, c); prepare_vcpu_init(c, &init); vcpu = __vm_vcpu_add(vm, 0); aarch64_vcpu_setup(vcpu, &init); @@ -1049,6 +1063,13 @@ static __u64 pauth_generic_regs[] = { ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */ };
+static __u64 mte_regs[] = { + ARM64_SYS_REG(3, 0, 1, 0, 5), /* RGSR_EL1 */ + ARM64_SYS_REG(3, 0, 1, 0, 6), /* GCR_EL1 */ + ARM64_SYS_REG(3, 0, 5, 6, 0), /* TFSR_EL1 */ + ARM64_SYS_REG(3, 0, 5, 6, 1), /* TFSRE0_EL1 */ +}; + #define BASE_SUBLIST \ { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), } #define VREGS_SUBLIST \ @@ -1075,6 +1096,9 @@ static __u64 pauth_generic_regs[] = { .regs = pauth_generic_regs, \ .regs_n = ARRAY_SIZE(pauth_generic_regs), \ } +#define MTE_SUBLIST \ + { "mte", .capability = KVM_CAP_ARM_MTE, .enable_capability = true, \ + .regs = mte_regs, .regs_n = ARRAY_SIZE(mte_regs), }
static struct vcpu_config vregs_config = { .sublists = { @@ -1123,6 +1147,14 @@ static struct vcpu_config pauth_pmu_config = { {0}, }, }; +static struct vcpu_config mte_config = { + .sublists = { + BASE_SUBLIST, + VREGS_SUBLIST, + MTE_SUBLIST, + {0}, + }, +};
static struct vcpu_config *vcpu_configs[] = { &vregs_config, @@ -1131,5 +1163,6 @@ static struct vcpu_config *vcpu_configs[] = { &sve_pmu_config, &pauth_config, &pauth_pmu_config, + &mte_config, }; static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
--- base-commit: fe15c26ee26efa11741a7b632e9f23b01aca4cc6 change-id: 20230308-kvm-arm64-test-mte-regs-1b19bab1a49a
Best regards,
On Wed, Mar 08 2023, Mark Brown broonie@kernel.org wrote:
Verify that a guest with MTE has access to the MTE registers. Since MTE is enabled as a VM wide capability we need to add support for doing that in the process.
Signed-off-by: Mark Brown broonie@kernel.org
tools/testing/selftests/kvm/aarch64/get-reg-list.c | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+)
Reviewed-by: Cornelia Huck cohuck@redhat.com
On Wed, 08 Mar 2023 17:12:26 +0000, Mark Brown broonie@kernel.org wrote:
Verify that a guest with MTE has access to the MTE registers. Since MTE is enabled as a VM wide capability we need to add support for doing that in the process.
Signed-off-by: Mark Brown broonie@kernel.org
tools/testing/selftests/kvm/aarch64/get-reg-list.c | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+)
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index d287dd2cac0a..63d6a9046702 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -42,6 +42,7 @@ struct reg_sublist { long capability; int feature; bool finalize;
- bool enable_capability; __u64 *regs; __u64 regs_n; __u64 *rejects_set;
@@ -404,6 +405,18 @@ static void check_supported(struct vcpu_config *c) } } +static void enable_capabilities(struct kvm_vm *vm, struct vcpu_config *c) +{
- struct reg_sublist *s;
- for_each_sublist(c, s) {
if (!s->enable_capability)
continue;
vm_enable_cap(vm, s->capability, 1);
- }
+}
static bool print_list; static bool print_filtered; static bool fixup_core_regs; @@ -420,6 +433,7 @@ static void run_test(struct vcpu_config *c) check_supported(c); vm = vm_create_barebones();
- enable_capabilities(vm, c); prepare_vcpu_init(c, &init); vcpu = __vm_vcpu_add(vm, 0); aarch64_vcpu_setup(vcpu, &init);
@@ -1049,6 +1063,13 @@ static __u64 pauth_generic_regs[] = { ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */ }; +static __u64 mte_regs[] = {
- ARM64_SYS_REG(3, 0, 1, 0, 5), /* RGSR_EL1 */
- ARM64_SYS_REG(3, 0, 1, 0, 6), /* GCR_EL1 */
- ARM64_SYS_REG(3, 0, 5, 6, 0), /* TFSR_EL1 */
- ARM64_SYS_REG(3, 0, 5, 6, 1), /* TFSRE0_EL1 */
+};
#define BASE_SUBLIST \ { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), } #define VREGS_SUBLIST \ @@ -1075,6 +1096,9 @@ static __u64 pauth_generic_regs[] = { .regs = pauth_generic_regs, \ .regs_n = ARRAY_SIZE(pauth_generic_regs), \ } +#define MTE_SUBLIST \
- { "mte", .capability = KVM_CAP_ARM_MTE, .enable_capability = true, \
.regs = mte_regs, .regs_n = ARRAY_SIZE(mte_regs), }
static struct vcpu_config vregs_config = { .sublists = { @@ -1123,6 +1147,14 @@ static struct vcpu_config pauth_pmu_config = { {0}, }, }; +static struct vcpu_config mte_config = {
- .sublists = {
- BASE_SUBLIST,
- VREGS_SUBLIST,
- MTE_SUBLIST,
- {0},
- },
+}; static struct vcpu_config *vcpu_configs[] = { &vregs_config, @@ -1131,5 +1163,6 @@ static struct vcpu_config *vcpu_configs[] = { &sve_pmu_config, &pauth_config, &pauth_pmu_config,
- &mte_config,
}; static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
Is there any reason why we sidestep the combinations of MTE with PAuth and PMU? I know this leads to an exponential set growth, but this is the very purpose of this test, and we found bugs related to this in the past.
A good first step would be to be able to build these combinations dynamically, and only then add new sublists to the mix.
Thanks,
M.
On Sun, Mar 12, 2023 at 10:29:11AM +0000, Marc Zyngier wrote:
Mark Brown broonie@kernel.org wrote:
static struct vcpu_config *vcpu_configs[] = { &vregs_config, @@ -1131,5 +1163,6 @@ static struct vcpu_config *vcpu_configs[] = { &sve_pmu_config, &pauth_config, &pauth_pmu_config,
- &mte_config,
}; static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
Is there any reason why we sidestep the combinations of MTE with PAuth and PMU? I know this leads to an exponential set growth, but this is the very purpose of this test, and we found bugs related to this in the past.
The test is already not bothering with the combinations of SVE and pointer auth, it appeared that the intent of the test was only to test specific combinations. From what's there it looks more like there's something with PMU interacting specially with things (it's all X and X+PMU) that needs coverage. I couldn't see anything between it and MTE, though I nearly added a MTE+PMU combination just for the sake of it. It's one of those areas where it's hard to determine if there's an intent behind the implementation choices made or if they're just whatever someone happened to write and not particularly important or desired.
A good first step would be to be able to build these combinations dynamically, and only then add new sublists to the mix.
That would certainly be a good idea, if we were heading in that direction I'd also expect negative tests checking that for example pointer authentication registers don't appear when that's not enabled. I'm not sure that it's worth blocking all new coverage for that though, there is still value in having a bit of basic coverage even if not all the combinations are covered yet.
The test is also going to want extension for more gracefully handling registers that appear based on architecture extensions with no control over their exposure to the guest, potentially with tie in to handling based on configurable ID registers when that goes in. The way this test is written was also part of why I was wondering if PIE should be configurable.
On Sun, 12 Mar 2023 14:35:13 +0000, Mark Brown broonie@kernel.org wrote:
On Sun, Mar 12, 2023 at 10:29:11AM +0000, Marc Zyngier wrote:
Mark Brown broonie@kernel.org wrote:
static struct vcpu_config *vcpu_configs[] = { &vregs_config, @@ -1131,5 +1163,6 @@ static struct vcpu_config *vcpu_configs[] = { &sve_pmu_config, &pauth_config, &pauth_pmu_config,
- &mte_config,
}; static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
Is there any reason why we sidestep the combinations of MTE with PAuth and PMU? I know this leads to an exponential set growth, but this is the very purpose of this test, and we found bugs related to this in the past.
The test is already not bothering with the combinations of SVE and pointer auth, it appeared that the intent of the test was only to test specific combinations. From what's there it looks more like there's something with PMU interacting specially with things (it's all X and X+PMU) that needs coverage. I couldn't see anything between it and MTE, though I nearly added a MTE+PMU combination just for the sake of it. It's one of those areas where it's hard to determine if there's an intent behind the implementation choices made or if they're just whatever someone happened to write and not particularly important or desired.
It *is* desired. We've had cases of flags being reset at the wrong time and leading to issues that would be detected by this test. The PMU stuff is indeed one example, but similar things could happen between SVE+MTE, for example.
A good first step would be to be able to build these combinations dynamically, and only then add new sublists to the mix.
That would certainly be a good idea, if we were heading in that direction I'd also expect negative tests checking that for example pointer authentication registers don't appear when that's not enabled. I'm not sure that it's worth blocking all new coverage for that though, there is still value in having a bit of basic coverage even if not all the combinations are covered yet.
Then where is the incentive to get it fixed? People will just keep piling stuff, and the coverage will increasingly become worse.
We have to do it as some point, and now is as good a time as any.
M.
On Sun, Mar 12, 2023 at 03:37:39PM +0000, Marc Zyngier wrote:
Mark Brown broonie@kernel.org wrote:
On Sun, Mar 12, 2023 at 10:29:11AM +0000, Marc Zyngier wrote:
Mark Brown broonie@kernel.org wrote:
combination just for the sake of it. It's one of those areas where it's hard to determine if there's an intent behind the implementation choices made or if they're just whatever someone happened to write and not particularly important or desired.
It *is* desired. We've had cases of flags being reset at the wrong time and leading to issues that would be detected by this test. The PMU stuff is indeed one example, but similar things could happen between SVE+MTE, for example.
I take it you mean that the current situation where it's only covering X and X+PMU cases is not desired and wasn't intentional?
A good first step would be to be able to build these combinations dynamically, and only then add new sublists to the mix.
That would certainly be a good idea, if we were heading in that direction I'd also expect negative tests checking that for example pointer authentication registers don't appear when that's not enabled. I'm not sure that it's worth blocking all new coverage for that though, there is still value in having a bit of basic coverage even if not all the combinations are covered yet.
Then where is the incentive to get it fixed? People will just keep piling stuff, and the coverage will increasingly become worse.
It's always possible someone will be interested and keep plugging away at improving things over the longer term even without having other work blocked. Sometimes someone will come along explicitly trying to improve whatever the thing is, or someone other than the person submitting a given patch might see the idea being mentioned and be inspired to implement it (that process is how we ended up with the ALSA pcm-test program).
The flip side of this approach is that it's encouraging people to do the minimum possible in order to reduce the chances that out of scope cleanup work gets added on to whatever they were originally trying to do, and to avoid doing smaller cleanups if they notice anything else that could be improved (especially if those things might resuling in something that'd tie up something more urgent). It's not a big deal if it's a small bit of extra work, but the more work it is than the original thing the more of an impact it can have.
It's a balancing thing - sometimes things do need some push to get things done, but on the other hand if it's the only approach taken then it can become a bit of a self fulfilling prophecy.
We have to do it as some point, and now is as good a time as any.
Well, I was just doing a drive by patch here because I noticed that MTE wasn't covered, it's not like I'm even looking at MTE. Realistically I'm not sure how long it'll be before I have the bandwidth for reworking this. There is some other work where I'd get blocked on it, but it's not going to be this release cycle.
linux-kselftest-mirror@lists.linaro.org