Hi Sagi,
On 10/28/25 2:20 PM, Sagi Shahar wrote:
diff --git a/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h b/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h index dafdc7e46abe..a2509959c7ce 100644 --- a/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h +++ b/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h @@ -11,6 +11,60 @@ static inline bool is_tdx_vm(struct kvm_vm *vm) return vm->type == KVM_X86_TDX_VM; } +/*
- TDX ioctls
- */
+#define __vm_tdx_vm_ioctl(vm, cmd, metadata, arg) \ +({ \
- int r; \
\- union { \
struct kvm_tdx_cmd c; \unsigned long raw; \- } tdx_cmd = { .c = { \
.id = (cmd), \.flags = (uint32_t)(metadata), \.data = (uint64_t)(arg), \- } }; \
\- r = __vm_ioctl(vm, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd.raw); \
- r ?: tdx_cmd.c.hw_error; \
+})
+#define vm_tdx_vm_ioctl(vm, cmd, flags, arg) \ +({ \
- int ret = __vm_tdx_vm_ioctl(vm, cmd, flags, arg); \
\- __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
+})
+#define __vm_tdx_vcpu_ioctl(vcpu, cmd, metadata, arg) \ +({ \
- int r; \
\- union { \
struct kvm_tdx_cmd c; \unsigned long raw; \- } tdx_cmd = { .c = { \
.id = (cmd), \.flags = (uint32_t)(metadata), \.data = (uint64_t)(arg), \- } }; \
\- r = __vcpu_ioctl(vcpu, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd.raw); \
- r ?: tdx_cmd.c.hw_error; \
+})
+#define vm_tdx_vcpu_ioctl(vcpu, cmd, flags, arg) \ +({ \
- int ret = __vm_tdx_vcpu_ioctl(vcpu, cmd, flags, arg); \
\- __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
+})
+void vm_tdx_init_vm(struct kvm_vm *vm, uint64_t attributes);
void vm_tdx_setup_boot_code_region(struct kvm_vm *vm); void vm_tdx_setup_boot_parameters_region(struct kvm_vm *vm, uint32_t nr_runnable_vcpus); void vm_tdx_load_common_boot_parameters(struct kvm_vm *vm);
For completeness to help with discussion below other patches add: void vm_tdx_load_vcpu_boot_parameters(struct kvm_vm *vm, struct kvm_vcpu *vcpu); void vm_tdx_set_vcpu_entry_point(struct kvm_vcpu *vcpu, void *guest_code); void vm_tdx_finalize(struct kvm_vm *vm);
When considering the TDX functions in tdx_util.h visible above the namespace of TDX related functions is not clear to me. I believe an intuitive namespace makes the code easier to understand and build upon.
Almost all tdx_util.h functions appear to have the "vm_tdx" prefix even when they just operate on a vCPU scope, for example: void vm_tdx_set_vcpu_entry_point(struct kvm_vcpu *vcpu, void *guest_code); and vm_tdx_vcpu_ioctl()
Also, when operating on a VM there may be an extra "vm" added to create a function like vm_tdx_vm_ioctl() with two "vm" in its name.
Compare with similar functions for normal VMs:
vm_ioctl() -> vm_tdx_vm_ioctl() vcpu_ioctl() -> vm_tdx_vcpu_ioctl()
Could it not perhaps instead be:
vm_ioctl() -> tdx_vm_ioctl() vcpu_ioctl() -> tdx_vcpu_ioctl()
The functions could still have "vm"/"vcpu" in their name to designate the scope, for example: void tdx_vm_setup_boot_code_region(struct kvm_vm *vm); void tdx_vm_setup_boot_parameters_region(struct kvm_vm *vm, uint32_t nr_runnable_vcpus); void tdx_vm_load_common_boot_parameters(struct kvm_vm *vm); void tdx_vcpu_load_boot_parameters(struct kvm_vm *vm, struct kvm_vcpu *vcpu); void tdx_vcpu_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); void tdx_vm_finalize(struct kvm_vm *vm);
With a namespace like above it is clear that (a) it is a TDX call and (b) what the scope of the call is. This helps to understand what the code does while reading it and makes clear how to name new functions when adding new features.
...
+/*
- Filter CPUID based on TDX supported capabilities
- Input Args:
- vm - Virtual Machine
- cpuid_data - CPUID fileds to filter
fileds -> fields?
- Output Args: None
- Return: None
- For each CPUID leaf, filter out non-supported bits based on the capabilities reported
- by the TDX module
- */
+static void vm_tdx_filter_cpuid(struct kvm_vm *vm,
struct kvm_cpuid2 *cpuid_data)+{
- struct kvm_tdx_capabilities *tdx_cap;
- struct kvm_cpuid_entry2 *config;
- struct kvm_cpuid_entry2 *e;
- int i;
- tdx_cap = tdx_read_capabilities(vm);
- i = 0;
- while (i < cpuid_data->nent) {
e = cpuid_data->entries + i;config = tdx_find_cpuid_config(tdx_cap, e->function, e->index);if (!config) {int left = cpuid_data->nent - i - 1;if (left > 0)memmove(cpuid_data->entries + i,cpuid_data->entries + i + 1,sizeof(*cpuid_data->entries) * left);cpuid_data->nent--;continue;}e->eax &= config->eax;e->ebx &= config->ebx;e->ecx &= config->ecx;e->edx &= config->edx;i++;- }
- free(tdx_cap);
+}
+void vm_tdx_init_vm(struct kvm_vm *vm, uint64_t attributes) +{
- struct kvm_tdx_init_vm *init_vm;
- const struct kvm_cpuid2 *tmp;
- struct kvm_cpuid2 *cpuid;
- tmp = kvm_get_supported_cpuid();
- cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
Could this allocation be limited to tmp->nent?
- memcpy(cpuid, tmp, kvm_cpuid2_size(tmp->nent));
- vm_tdx_filter_cpuid(vm, cpuid);
- init_vm = calloc(1, sizeof(*init_vm) +
sizeof(init_vm->cpuid.entries[0]) * cpuid->nent);- TEST_ASSERT(init_vm, "init_vm allocation failed");
- memcpy(&init_vm->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
- free(cpuid);
- init_vm->attributes = attributes;
- vm_tdx_vm_ioctl(vm, KVM_TDX_INIT_VM, 0, init_vm);
- free(init_vm);
+}
Reinette