This patch set addresses KVM issue described in Geoff's kexec patch set[1]. See "Changes" below. Initially, I used reboot notifier hook to shut down cpu cores, but in this version, kvm cpu hotplug is implemented after Mark's comment.
I confirmed that it works with kexec under the following scenarios: - boot 1st kernel - run a guest OS - (stop a guest OS) - reboot 2nd kernel by kexec - run a guest OS
test target: MediaTek MT8173-EVB version: kernel v4.0-rc1 + Geoff's kexec v8 + Arn's patch[2]
But I didn't test other complicated scenarios with cpu hotplug.
On arm, Frediano[3] is no longer working on this issue as he left his company. patch#4 is just a stub for arm.
Changes from v3: * modified to use kvm cpu hotplug framework directly instead of reboot notifier hook
Changes from v2: * modified kvm_virt_to_trampoline() macro to fix a page-alignment issue[4]
Changes from v1: * modified kvm_cpu_reset() implementation: - define a macro to translate va to addr in trampoline - use __hyp_default_vectors instead of kvm_get_hyp_stub_vectors() - shuffle the arguments in __cpu_reset_hyp_mode() - optimize TLB flush operations * changed a patch#2's name * added a patch#5 to add stub code for arm
[1] http://lists.infradead.org/pipermail/kexec/2015-April/335533.html [2] http://lists.infradead.org/pipermail/linux-arm-kernel/2015-March/334002.html [3] http://lists.infradead.org/pipermail/linux-arm-kernel/2015-February/322231.h... [4] http://lists.infradead.org/pipermail/linux-arm-kernel/2015-March/334910.html
AKASHI Takahiro (4): arm64: kvm: add a cpu tear-down function arm64: kvm: add kvm cpu hotplug arm64: kvm: remove !KEXEC dependency arm: kvm: add stub implementation for kvm_cpu_reset()
arch/arm/include/asm/kvm_asm.h | 1 + arch/arm/include/asm/kvm_host.h | 13 ++++++++++- arch/arm/include/asm/kvm_mmu.h | 5 +++++ arch/arm/kvm/arm.c | 44 ++++++++++++++++++++++++++++--------- arch/arm/kvm/init.S | 6 +++++ arch/arm/kvm/mmu.c | 5 +++++ arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/include/asm/kvm_host.h | 12 +++++++++- arch/arm64/include/asm/kvm_mmu.h | 7 ++++++ arch/arm64/include/asm/virt.h | 11 ++++++++++ arch/arm64/kvm/Kconfig | 1 - arch/arm64/kvm/hyp-init.S | 32 +++++++++++++++++++++++++++ arch/arm64/kvm/hyp.S | 16 +++++++++++--- 13 files changed, 138 insertions(+), 16 deletions(-)
Cpu must be put back into its initial state, at least, in the following cases in order to shutdown the system and/or re-initialize cpus later on: 1) kexec/kdump 2) cpu hotplug (offline) 3) removing kvm as a module
To address those issues in later patches, this patch adds a tear-down function, kvm_cpu_reset(), that disables D-cache & MMU and restore a vector table to the initial stub at EL2.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org --- arch/arm/kvm/arm.c | 15 +++++++++++++++ arch/arm/kvm/mmu.c | 5 +++++ arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/include/asm/kvm_host.h | 11 +++++++++++ arch/arm64/include/asm/kvm_mmu.h | 7 +++++++ arch/arm64/include/asm/virt.h | 11 +++++++++++ arch/arm64/kvm/hyp-init.S | 32 ++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp.S | 16 +++++++++++++--- 8 files changed, 95 insertions(+), 3 deletions(-)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 07e7eb1..251ab9e 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -897,6 +897,21 @@ static void cpu_init_hyp_mode(void *dummy) __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); }
+static void kvm_cpu_reset(void *dummy) +{ + phys_addr_t boot_pgd_ptr; + phys_addr_t phys_idmap_start; + + if (__hyp_get_vectors() == hyp_default_vectors) + return; + + boot_pgd_ptr = kvm_mmu_get_boot_httbr(); + phys_idmap_start = kvm_get_idmap_start(); + __cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start, + hyp_default_vectors, + kvm_virt_to_trampoline(__kvm_hyp_reset)); +} + static int hyp_init_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859b..3631a37 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1490,6 +1490,11 @@ phys_addr_t kvm_get_idmap_vector(void) return hyp_idmap_vector; }
+phys_addr_t kvm_get_idmap_start(void) +{ + return hyp_idmap_start; +} + int kvm_mmu_init(void) { int err; diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 4f7310f..f1c16e2 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -118,6 +118,7 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; +extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8ac3c70..6a8da9c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -199,6 +199,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...); +void kvm_call_reset(phys_addr_t boot_pgd_ptr, phys_addr_t phys_idmap_start, + unsigned long stub_vector_ptr, unsigned long reset_func); void force_vm_exit(const cpumask_t *mask); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -223,6 +225,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); }
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t phys_idmap_start, + unsigned long stub_vector_ptr, + unsigned long reset_func) +{ + kvm_call_reset(boot_pgd_ptr, phys_idmap_start, stub_vector_ptr, + reset_func); +} + struct vgic_sr_vectors { void *save_vgic; void *restore_vgic; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b53..facfd6d 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -96,6 +96,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void);
@@ -305,5 +306,11 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+extern char __hyp_idmap_text_start[]; +#define kvm_virt_to_trampoline(x) \ + (TRAMPOLINE_VA \ + + ((unsigned long)(x) \ + - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK))) + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 3070096..7fcd087 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -61,6 +61,17 @@ #define BOOT_CPU_MODE_EL1 (0xe11) #define BOOT_CPU_MODE_EL2 (0xe12)
+/* + * HVC_RESET - Reset cpu in EL2 to initial state. + * + * @x0: entry address in trampoline code in va + * @x1: identical mapping page table in pa + * @x2: start address of identical mapping in pa + * @x3: initial stub vector in pa + */ + +#define HVC_RESET 5 + #ifndef __ASSEMBLY__
/* diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c319116..2614cfc 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -115,6 +115,38 @@ target: /* We're now in the trampoline code, switch page tables */ eret ENDPROC(__kvm_hyp_init)
+ /* + * x0: HYP boot pgd + * x1: HYP phys_idmap_start + * x2: HYP stub vectors + */ +ENTRY(__kvm_hyp_reset) + /* We're in trampoline code in VA, switch back to boot page tables */ + msr ttbr0_el2, x0 + isb + + /* Invalidate the old TLBs */ + tlbi alle2 + dsb sy + + /* Branch into PA space */ + adr x0, 1f + bfi x1, x0, #0, #PAGE_SHIFT + br x1 + + /* We're now in idmap, disable MMU */ +1: mrs x0, sctlr_el2 + ldr x1, =SCTLR_EL2_FLAGS + bic x0, x0, x1 // Clear SCTL_M and etc + msr sctlr_el2, x0 + isb + + /* Install stub vectors */ + msr vbar_el2, x2 + + eret +ENDPROC(__kvm_hyp_reset) + .ltorg
.popsection diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fd085ec..7c3bdee 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -1136,6 +1136,11 @@ ENTRY(kvm_call_hyp) ret ENDPROC(kvm_call_hyp)
+ENTRY(kvm_call_reset) + hvc #HVC_RESET + ret +ENDPROC(kvm_call_reset) + .macro invalid_vector label, target .align 2 \label: @@ -1179,10 +1184,14 @@ el1_sync: // Guest trapped into EL2 cmp x18, #HVC_GET_VECTORS b.ne 1f mrs x0, vbar_el2 - b 2f + b 3f
-1: /* Default to HVC_CALL_HYP. */ + /* jump into trampoline code */ +1: cmp x18, #HVC_RESET + b.ne 2f + br x3 // no return
+2: /* Default to HVC_CALL_HYP. */ push lr, xzr
/* @@ -1196,7 +1205,8 @@ el1_sync: // Guest trapped into EL2 blr lr
pop lr, xzr -2: eret + +3: eret
el1_trap: /*
Hi Takahiro,
On 08/05/15 02:18, AKASHI Takahiro wrote:
Cpu must be put back into its initial state, at least, in the following cases in order to shutdown the system and/or re-initialize cpus later on:
- kexec/kdump
- cpu hotplug (offline)
- removing kvm as a module
To address those issues in later patches, this patch adds a tear-down function, kvm_cpu_reset(), that disables D-cache & MMU and restore a vector table to the initial stub at EL2.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org
arch/arm/kvm/arm.c | 15 +++++++++++++++ arch/arm/kvm/mmu.c | 5 +++++ arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/include/asm/kvm_host.h | 11 +++++++++++ arch/arm64/include/asm/kvm_mmu.h | 7 +++++++ arch/arm64/include/asm/virt.h | 11 +++++++++++ arch/arm64/kvm/hyp-init.S | 32 ++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp.S | 16 +++++++++++++--- 8 files changed, 95 insertions(+), 3 deletions(-)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 07e7eb1..251ab9e 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -897,6 +897,21 @@ static void cpu_init_hyp_mode(void *dummy) __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); } +static void kvm_cpu_reset(void *dummy)
It looks like you can entirely loose the "dummy" parameter. The only reason some function have this is when they are used from an IPI call.
+{
- phys_addr_t boot_pgd_ptr;
- phys_addr_t phys_idmap_start;
- if (__hyp_get_vectors() == hyp_default_vectors)
return;
- boot_pgd_ptr = kvm_mmu_get_boot_httbr();
- phys_idmap_start = kvm_get_idmap_start();
- __cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start,
hyp_default_vectors,
kvm_virt_to_trampoline(__kvm_hyp_reset));
+}
static int hyp_init_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859b..3631a37 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1490,6 +1490,11 @@ phys_addr_t kvm_get_idmap_vector(void) return hyp_idmap_vector; } +phys_addr_t kvm_get_idmap_start(void) +{
- return hyp_idmap_start;
+}
int kvm_mmu_init(void) { int err; diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 4f7310f..f1c16e2 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -118,6 +118,7 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; +extern char __kvm_hyp_reset[]; extern char __kvm_hyp_vector[]; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8ac3c70..6a8da9c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -199,6 +199,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); u64 kvm_call_hyp(void *hypfn, ...); +void kvm_call_reset(phys_addr_t boot_pgd_ptr, phys_addr_t phys_idmap_start,
unsigned long stub_vector_ptr, unsigned long reset_func);
void force_vm_exit(const cpumask_t *mask); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); @@ -223,6 +225,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } +static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t phys_idmap_start,
unsigned long stub_vector_ptr,
unsigned long reset_func)
+{
- kvm_call_reset(boot_pgd_ptr, phys_idmap_start, stub_vector_ptr,
reset_func);
+}
struct vgic_sr_vectors { void *save_vgic; void *restore_vgic; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b53..facfd6d 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -96,6 +96,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); @@ -305,5 +306,11 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); +extern char __hyp_idmap_text_start[];
If you're defining it here, then it worth considering removing the similar declaration from mmu.c.
+#define kvm_virt_to_trampoline(x) \
(TRAMPOLINE_VA \
+ ((unsigned long)(x) \
- ((unsigned long)__hyp_idmap_text_start & PAGE_MASK)))
#endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 3070096..7fcd087 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -61,6 +61,17 @@ #define BOOT_CPU_MODE_EL1 (0xe11) #define BOOT_CPU_MODE_EL2 (0xe12) +/*
- HVC_RESET - Reset cpu in EL2 to initial state.
- @x0: entry address in trampoline code in va
- @x1: identical mapping page table in pa
- @x2: start address of identical mapping in pa
- @x3: initial stub vector in pa
- */
+#define HVC_RESET 5
#ifndef __ASSEMBLY__ /* diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c319116..2614cfc 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -115,6 +115,38 @@ target: /* We're now in the trampoline code, switch page tables */ eret ENDPROC(__kvm_hyp_init)
- /*
* x0: HYP boot pgd
* x1: HYP phys_idmap_start
* x2: HYP stub vectors
*/
+ENTRY(__kvm_hyp_reset)
- /* We're in trampoline code in VA, switch back to boot page tables */
- msr ttbr0_el2, x0
- isb
- /* Invalidate the old TLBs */
- tlbi alle2
- dsb sy
- /* Branch into PA space */
- adr x0, 1f
- bfi x1, x0, #0, #PAGE_SHIFT
- br x1
- /* We're now in idmap, disable MMU */
+1: mrs x0, sctlr_el2
- ldr x1, =SCTLR_EL2_FLAGS
- bic x0, x0, x1 // Clear SCTL_M and etc
- msr sctlr_el2, x0
- isb
- /* Install stub vectors */
- msr vbar_el2, x2
Instead of using a parameter, can't you just do
adr x2, __hyp_stub_vectors msr vbar_el2, x2
? I can't imagine a case where we don't want this behaviour, and this would slightly simplify the calling convention.
- eret
+ENDPROC(__kvm_hyp_reset)
- .ltorg
.popsection diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fd085ec..7c3bdee 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -1136,6 +1136,11 @@ ENTRY(kvm_call_hyp) ret ENDPROC(kvm_call_hyp) +ENTRY(kvm_call_reset)
- hvc #HVC_RESET
- ret
+ENDPROC(kvm_call_reset)
.macro invalid_vector label, target .align 2 \label: @@ -1179,10 +1184,14 @@ el1_sync: // Guest trapped into EL2 cmp x18, #HVC_GET_VECTORS b.ne 1f mrs x0, vbar_el2
- b 2f
- b 3f
How about renaming this to something like "do_eret" once and for all so that we stop this dance each time someone adds a new entry point?
-1: /* Default to HVC_CALL_HYP. */
- /* jump into trampoline code */
+1: cmp x18, #HVC_RESET
- b.ne 2f
- br x3 // no return
Same here. If we're always jumping to the trampoline code, why do we have to pass it as a parameter?
+2: /* Default to HVC_CALL_HYP. */ push lr, xzr /* @@ -1196,7 +1205,8 @@ el1_sync: // Guest trapped into EL2 blr lr pop lr, xzr -2: eret
+3: eret el1_trap: /*
Thanks,
M.
Marc,
Thank you for your reviews:
On 05/26/2015 06:26 PM, Marc Zyngier wrote:
Hi Takahiro,
On 08/05/15 02:18, AKASHI Takahiro wrote:
Cpu must be put back into its initial state, at least, in the following cases in order to shutdown the system and/or re-initialize cpus later on:
- kexec/kdump
- cpu hotplug (offline)
- removing kvm as a module
To address those issues in later patches, this patch adds a tear-down function, kvm_cpu_reset(), that disables D-cache & MMU and restore a vector table to the initial stub at EL2.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org
arch/arm/kvm/arm.c | 15 +++++++++++++++ arch/arm/kvm/mmu.c | 5 +++++ arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/include/asm/kvm_host.h | 11 +++++++++++ arch/arm64/include/asm/kvm_mmu.h | 7 +++++++ arch/arm64/include/asm/virt.h | 11 +++++++++++ arch/arm64/kvm/hyp-init.S | 32 ++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp.S | 16 +++++++++++++--- 8 files changed, 95 insertions(+), 3 deletions(-)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 07e7eb1..251ab9e 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -897,6 +897,21 @@ static void cpu_init_hyp_mode(void *dummy) __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); }
+static void kvm_cpu_reset(void *dummy)
It looks like you can entirely loose the "dummy" parameter. The only reason some function have this is when they are used from an IPI call.
OK. I will remove all of them.
+{
- phys_addr_t boot_pgd_ptr;
- phys_addr_t phys_idmap_start;
- if (__hyp_get_vectors() == hyp_default_vectors)
return;
- boot_pgd_ptr = kvm_mmu_get_boot_httbr();
- phys_idmap_start = kvm_get_idmap_start();
- __cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start,
hyp_default_vectors,
kvm_virt_to_trampoline(__kvm_hyp_reset));
+}
- static int hyp_init_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859b..3631a37 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1490,6 +1490,11 @@ phys_addr_t kvm_get_idmap_vector(void) return hyp_idmap_vector; }
+phys_addr_t kvm_get_idmap_start(void) +{
- return hyp_idmap_start;
+}
- int kvm_mmu_init(void) { int err;
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 4f7310f..f1c16e2 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -118,6 +118,7 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; +extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8ac3c70..6a8da9c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -199,6 +199,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...); +void kvm_call_reset(phys_addr_t boot_pgd_ptr, phys_addr_t phys_idmap_start,
void force_vm_exit(const cpumask_t *mask); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);unsigned long stub_vector_ptr, unsigned long reset_func);
@@ -223,6 +225,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); }
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t phys_idmap_start,
unsigned long stub_vector_ptr,
unsigned long reset_func)
+{
- kvm_call_reset(boot_pgd_ptr, phys_idmap_start, stub_vector_ptr,
reset_func);
+}
- struct vgic_sr_vectors { void *save_vgic; void *restore_vgic;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b53..facfd6d 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -96,6 +96,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void);
@@ -305,5 +306,11 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+extern char __hyp_idmap_text_start[];
If you're defining it here, then it worth considering removing the similar declaration from mmu.c.
Yes. I will remove them from mmu.c.
+#define kvm_virt_to_trampoline(x) \
(TRAMPOLINE_VA \
+ ((unsigned long)(x) \
- ((unsigned long)__hyp_idmap_text_start & PAGE_MASK)))
- #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 3070096..7fcd087 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -61,6 +61,17 @@ #define BOOT_CPU_MODE_EL1 (0xe11) #define BOOT_CPU_MODE_EL2 (0xe12)
+/*
- HVC_RESET - Reset cpu in EL2 to initial state.
- @x0: entry address in trampoline code in va
- @x1: identical mapping page table in pa
- @x2: start address of identical mapping in pa
- @x3: initial stub vector in pa
- */
+#define HVC_RESET 5
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c319116..2614cfc 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -115,6 +115,38 @@ target: /* We're now in the trampoline code, switch page tables */ eret ENDPROC(__kvm_hyp_init)
- /*
* x0: HYP boot pgd
* x1: HYP phys_idmap_start
* x2: HYP stub vectors
*/
+ENTRY(__kvm_hyp_reset)
- /* We're in trampoline code in VA, switch back to boot page tables */
- msr ttbr0_el2, x0
- isb
- /* Invalidate the old TLBs */
- tlbi alle2
- dsb sy
- /* Branch into PA space */
- adr x0, 1f
- bfi x1, x0, #0, #PAGE_SHIFT
- br x1
- /* We're now in idmap, disable MMU */
+1: mrs x0, sctlr_el2
- ldr x1, =SCTLR_EL2_FLAGS
- bic x0, x0, x1 // Clear SCTL_M and etc
- msr sctlr_el2, x0
- isb
- /* Install stub vectors */
- msr vbar_el2, x2
Instead of using a parameter, can't you just do
adr x2, __hyp_stub_vectors msr vbar_el2, x2
? I can't imagine a case where we don't want this behaviour, and this would slightly simplify the calling convention.
Yeah, but it will also enforces kvm to be statically linked to the kernel. (not module-capable) Is that OK?
- eret
+ENDPROC(__kvm_hyp_reset)
.ltorg
.popsection
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fd085ec..7c3bdee 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -1136,6 +1136,11 @@ ENTRY(kvm_call_hyp) ret ENDPROC(kvm_call_hyp)
+ENTRY(kvm_call_reset)
- hvc #HVC_RESET
- ret
+ENDPROC(kvm_call_reset)
- .macro invalid_vector label, target .align 2 \label:
@@ -1179,10 +1184,14 @@ el1_sync: // Guest trapped into EL2 cmp x18, #HVC_GET_VECTORS b.ne 1f mrs x0, vbar_el2
- b 2f
- b 3f
How about renaming this to something like "do_eret" once and for all so that we stop this dance each time someone adds a new entry point?
Good point. I will take it.
-1: /* Default to HVC_CALL_HYP. */
- /* jump into trampoline code */
+1: cmp x18, #HVC_RESET
- b.ne 2f
- br x3 // no return
Same here. If we're always jumping to the trampoline code, why do we have to pass it as a parameter?
We need here a physical address of reset function. It seems easy to call kvm_virt_to_trampoline() in C code. But, yes, we can also do it in asm.
Thanks, -Takahiro AKASHI
+2: /* Default to HVC_CALL_HYP. */ push lr, xzr
/* @@ -1196,7 +1205,8 @@ el1_sync: // Guest trapped into EL2 blr lr
pop lr, xzr -2: eret
+3: eret
el1_trap: /*
Thanks,
M.
Hi Takahiro,
On 27/05/15 06:21, AKASHI Takahiro wrote:
Marc,
Thank you for your reviews:
On 05/26/2015 06:26 PM, Marc Zyngier wrote:
Hi Takahiro,
On 08/05/15 02:18, AKASHI Takahiro wrote:
Cpu must be put back into its initial state, at least, in the following cases in order to shutdown the system and/or re-initialize cpus later on:
- kexec/kdump
- cpu hotplug (offline)
- removing kvm as a module
To address those issues in later patches, this patch adds a tear-down function, kvm_cpu_reset(), that disables D-cache & MMU and restore a vector table to the initial stub at EL2.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org
arch/arm/kvm/arm.c | 15 +++++++++++++++ arch/arm/kvm/mmu.c | 5 +++++ arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/include/asm/kvm_host.h | 11 +++++++++++ arch/arm64/include/asm/kvm_mmu.h | 7 +++++++ arch/arm64/include/asm/virt.h | 11 +++++++++++ arch/arm64/kvm/hyp-init.S | 32 ++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp.S | 16 +++++++++++++--- 8 files changed, 95 insertions(+), 3 deletions(-)
[...]
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c319116..2614cfc 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -115,6 +115,38 @@ target: /* We're now in the trampoline code, switch page tables */ eret ENDPROC(__kvm_hyp_init)
- /*
* x0: HYP boot pgd
* x1: HYP phys_idmap_start
* x2: HYP stub vectors
*/
+ENTRY(__kvm_hyp_reset)
- /* We're in trampoline code in VA, switch back to boot page tables */
- msr ttbr0_el2, x0
- isb
- /* Invalidate the old TLBs */
- tlbi alle2
- dsb sy
- /* Branch into PA space */
- adr x0, 1f
- bfi x1, x0, #0, #PAGE_SHIFT
- br x1
- /* We're now in idmap, disable MMU */
+1: mrs x0, sctlr_el2
- ldr x1, =SCTLR_EL2_FLAGS
- bic x0, x0, x1 // Clear SCTL_M and etc
- msr sctlr_el2, x0
- isb
- /* Install stub vectors */
- msr vbar_el2, x2
Instead of using a parameter, can't you just do
adr x2, __hyp_stub_vectors msr vbar_el2, x2
? I can't imagine a case where we don't want this behaviour, and this would slightly simplify the calling convention.
Yeah, but it will also enforces kvm to be statically linked to the kernel. (not module-capable) Is that OK?
I don't think this enforces anything to be statically linked. You don't have to refer to the symbol from C code for module linking to work. You may have to use a combination of ADRP + ADD :lo12: to make sure you can reach the symbol (ADR is limited to +/- 1MB).
[...]
-1: /* Default to HVC_CALL_HYP. */
- /* jump into trampoline code */
+1: cmp x18, #HVC_RESET
- b.ne 2f
- br x3 // no return
Same here. If we're always jumping to the trampoline code, why do we have to pass it as a parameter?
We need here a physical address of reset function. It seems easy to call kvm_virt_to_trampoline() in C code. But, yes, we can also do it in asm.
I think that would be a lot clearer as it will be self-contained.
Thanks,
M.
This patch allows cpu cores to be up and down by adding kvm_arch_hardware_enable/isable(). This way, especially in kexec case, cores are reset to initial states and kexec can gracefully shutdown the system and reboot a new kernel from EL2.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org --- arch/arm/include/asm/kvm_host.h | 1 - arch/arm/kvm/arm.c | 29 +++++++++++++++++++---------- arch/arm64/include/asm/kvm_host.h | 1 - 3 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 41008cd..ca97764 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -237,7 +237,6 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 251ab9e..e989925 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -87,11 +87,6 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) return &kvm_arm_running_vcpu; }
-int kvm_arch_hardware_enable(void) -{ - return 0; -} - int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; @@ -885,6 +880,9 @@ static void cpu_init_hyp_mode(void *dummy) unsigned long stack_page; unsigned long vector_ptr;
+ if (__hyp_get_vectors() != hyp_default_vectors) + return; + /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector());
@@ -921,6 +919,10 @@ static int hyp_init_cpu_notify(struct notifier_block *self, if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); break; + case CPU_DYING: + case CPU_DYING_FROZEN: + kvm_cpu_reset(NULL); + break; }
return NOTIFY_OK; @@ -936,6 +938,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, void *v) { if (cmd == CPU_PM_EXIT && + kvm_arm_get_running_vcpu() && __hyp_get_vectors() == hyp_default_vectors) { cpu_init_hyp_mode(NULL); return NOTIFY_OK; @@ -1039,11 +1042,6 @@ static int init_hyp_mode(void) }
/* - * Execute the init code on each CPU. - */ - on_each_cpu(cpu_init_hyp_mode, NULL, 1); - - /* * Init HYP view of VGIC */ err = kvm_vgic_hyp_init(); @@ -1144,6 +1142,17 @@ out_err: return err; }
+int kvm_arch_hardware_enable(void) +{ + cpu_init_hyp_mode(NULL); + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + kvm_cpu_reset(NULL); +} + /* NOP: Compiling as a module not supported */ void kvm_arch_exit(void) { diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6a8da9c..831e6a4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -262,7 +262,6 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic) } }
-static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
On 08/05/15 02:18, AKASHI Takahiro wrote:
This patch allows cpu cores to be up and down by adding kvm_arch_hardware_enable/isable(). This way, especially in kexec case, cores are reset to initial states and kexec can gracefully shutdown the system and reboot a new kernel from EL2.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org
arch/arm/include/asm/kvm_host.h | 1 - arch/arm/kvm/arm.c | 29 +++++++++++++++++++---------- arch/arm64/include/asm/kvm_host.h | 1 - 3 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 41008cd..ca97764 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -237,7 +237,6 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 251ab9e..e989925 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -87,11 +87,6 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) return &kvm_arm_running_vcpu; } -int kvm_arch_hardware_enable(void) -{
- return 0;
-}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; @@ -885,6 +880,9 @@ static void cpu_init_hyp_mode(void *dummy)
Since you removed the IPI, why do you keep the dummy argument?
unsigned long stack_page; unsigned long vector_ptr;
- if (__hyp_get_vectors() != hyp_default_vectors)
return;
- /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector());
@@ -921,6 +919,10 @@ static int hyp_init_cpu_notify(struct notifier_block *self, if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); break;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
kvm_cpu_reset(NULL);
}break;
return NOTIFY_OK; @@ -936,6 +938,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, void *v) { if (cmd == CPU_PM_EXIT &&
cpu_init_hyp_mode(NULL); return NOTIFY_OK;kvm_arm_get_running_vcpu() && __hyp_get_vectors() == hyp_default_vectors) {
@@ -1039,11 +1042,6 @@ static int init_hyp_mode(void) } /*
* Execute the init code on each CPU.
*/
- on_each_cpu(cpu_init_hyp_mode, NULL, 1);
- /*
*/ err = kvm_vgic_hyp_init();
- Init HYP view of VGIC
@@ -1144,6 +1142,17 @@ out_err: return err; } +int kvm_arch_hardware_enable(void) +{
- cpu_init_hyp_mode(NULL);
- return 0;
+}
+void kvm_arch_hardware_disable(void) +{
- kvm_cpu_reset(NULL);
+}
Bahhh... Just rename cpu_init_hyp_mode to kvm_arch_hardware_enable, and kvM_cpu_reset to kvm_arch_hardware_disable. I don't see the point of keeping this indirection.
/* NOP: Compiling as a module not supported */ void kvm_arch_exit(void) { diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6a8da9c..831e6a4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -262,7 +262,6 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic) } } -static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
On 05/26/2015 06:35 PM, Marc Zyngier wrote:
On 08/05/15 02:18, AKASHI Takahiro wrote:
This patch allows cpu cores to be up and down by adding kvm_arch_hardware_enable/isable(). This way, especially in kexec case, cores are reset to initial states and kexec can gracefully shutdown the system and reboot a new kernel from EL2.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org
arch/arm/include/asm/kvm_host.h | 1 - arch/arm/kvm/arm.c | 29 +++++++++++++++++++---------- arch/arm64/include/asm/kvm_host.h | 1 - 3 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 41008cd..ca97764 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -237,7 +237,6 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 251ab9e..e989925 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -87,11 +87,6 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) return &kvm_arm_running_vcpu; }
-int kvm_arch_hardware_enable(void) -{
- return 0;
-}
- int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -885,6 +880,9 @@ static void cpu_init_hyp_mode(void *dummy)
Since you removed the IPI, why do you keep the dummy argument?
Yeah, will remove it.
unsigned long stack_page; unsigned long vector_ptr;
- if (__hyp_get_vectors() != hyp_default_vectors)
return;
- /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector());
@@ -921,6 +919,10 @@ static int hyp_init_cpu_notify(struct notifier_block *self, if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); break;
case CPU_DYING:
case CPU_DYING_FROZEN:
kvm_cpu_reset(NULL);
break;
}
return NOTIFY_OK;
@@ -936,6 +938,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, void *v) { if (cmd == CPU_PM_EXIT &&
cpu_init_hyp_mode(NULL); return NOTIFY_OK;kvm_arm_get_running_vcpu() && __hyp_get_vectors() == hyp_default_vectors) {
@@ -1039,11 +1042,6 @@ static int init_hyp_mode(void) }
/*
* Execute the init code on each CPU.
*/
- on_each_cpu(cpu_init_hyp_mode, NULL, 1);
- /*
*/ err = kvm_vgic_hyp_init();
- Init HYP view of VGIC
@@ -1144,6 +1142,17 @@ out_err: return err; }
+int kvm_arch_hardware_enable(void) +{
- cpu_init_hyp_mode(NULL);
- return 0;
+}
+void kvm_arch_hardware_disable(void) +{
- kvm_cpu_reset(NULL);
+}
Bahhh... Just rename cpu_init_hyp_mode to kvm_arch_hardware_enable, and kvM_cpu_reset to kvm_arch_hardware_disable. I don't see the point of keeping this indirection.
Historical reason ... Anyway, if we don't have to add anything else here, yes, I will rename them.
-Takahiro AKASHI
/* NOP: Compiling as a module not supported */ void kvm_arch_exit(void) { diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6a8da9c..831e6a4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -262,7 +262,6 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic) } }
-static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
By indroducing cpu kvm hotplug, this dependency is not needed any more.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org --- arch/arm64/kvm/Kconfig | 1 - 1 file changed, 1 deletion(-)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 30ae7a7..f5590c8 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -18,7 +18,6 @@ if VIRTUALIZATION
config KVM bool "Kernel-based Virtual Machine (KVM) support" - depends on !KEXEC select MMU_NOTIFIER select PREEMPT_NOTIFIERS select ANON_INODES
Just to avoid compiling errors on arm.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org --- arch/arm/include/asm/kvm_asm.h | 1 + arch/arm/include/asm/kvm_host.h | 12 ++++++++++++ arch/arm/include/asm/kvm_mmu.h | 5 +++++ arch/arm/kvm/init.S | 6 ++++++ 4 files changed, 24 insertions(+)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 25410b2..462babf 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -85,6 +85,7 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; +extern char __kvm_hyp_reset[];
extern char __kvm_hyp_exit[]; extern char __kvm_hyp_exit_end[]; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca97764..6d38134 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -220,6 +220,18 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); }
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t phys_idmap_start, + unsigned long stub_vector_ptr, + unsigned long reset_func) +{ + /* + * TODO + * kvm_call_reset(boot_pgd_ptr, phys_idmap_start, stub_vector_ptr, + * reset_func); + */ +} + static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 37ca2a4..d85bcd1 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -66,6 +66,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void);
@@ -270,6 +271,10 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+extern char __hyp_idmap_text_start[]; +#define kvm_virt_to_trampoline(x) \ + (TRAMPOLINE_VA + ((x) - __hyp_idmap_text_start)) + #endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72..9178c9a 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -156,4 +156,10 @@ target: @ We're now in the trampoline code, switch page tables .globl __kvm_hyp_init_end __kvm_hyp_init_end:
+ .globl __kvm_hyp_reset +__kvm_hyp_reset: + @ TODO + + eret + .popsection
On 08/05/15 02:18, AKASHI Takahiro wrote:
Just to avoid compiling errors on arm.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org
arch/arm/include/asm/kvm_asm.h | 1 + arch/arm/include/asm/kvm_host.h | 12 ++++++++++++ arch/arm/include/asm/kvm_mmu.h | 5 +++++ arch/arm/kvm/init.S | 6 ++++++ 4 files changed, 24 insertions(+)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 25410b2..462babf 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -85,6 +85,7 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; +extern char __kvm_hyp_reset[]; extern char __kvm_hyp_exit[]; extern char __kvm_hyp_exit_end[]; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca97764..6d38134 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -220,6 +220,18 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } +static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t phys_idmap_start,
unsigned long stub_vector_ptr,
unsigned long reset_func)
+{
- /*
* TODO
* kvm_call_reset(boot_pgd_ptr, phys_idmap_start, stub_vector_ptr,
* reset_func);
*/
+}
static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 37ca2a4..d85bcd1 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -66,6 +66,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); @@ -270,6 +271,10 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); +extern char __hyp_idmap_text_start[]; +#define kvm_virt_to_trampoline(x) \
(TRAMPOLINE_VA + ((x) - __hyp_idmap_text_start))
#endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72..9178c9a 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -156,4 +156,10 @@ target: @ We're now in the trampoline code, switch page tables .globl __kvm_hyp_init_end __kvm_hyp_init_end:
- .globl __kvm_hyp_reset
+__kvm_hyp_reset:
- @ TODO
- eret
- .popsection
So before this patch, KVM is broken on ARM. This is not acceptable. Please merge it with patch #1.
Thanks,
M.
On 05/26/2015 06:36 PM, Marc Zyngier wrote:
On 08/05/15 02:18, AKASHI Takahiro wrote:
Just to avoid compiling errors on arm.
Signed-off-by: AKASHI Takahiro takahiro.akashi@linaro.org
arch/arm/include/asm/kvm_asm.h | 1 + arch/arm/include/asm/kvm_host.h | 12 ++++++++++++ arch/arm/include/asm/kvm_mmu.h | 5 +++++ arch/arm/kvm/init.S | 6 ++++++ 4 files changed, 24 insertions(+)
(snip)
So before this patch, KVM is broken on ARM. This is not acceptable. Please merge it with patch #1.
OK.
-Takahiro AKASHI
Thanks,
M.
linaro-kernel@lists.linaro.org