From: Marc Zyngier maz@kernel.org
[ Upstream commit 9eb563cdabe1d583c262042d5d44cc256f644543 ]
In subsequent patches we'll allow an FIQ handler to be registered, and FIQ exceptions will need to be triaged very similarly to IRQ exceptions. So that we can reuse the existing logic, this patch factors the IRQ triage logic out into macros that can be reused for FIQ.
The macros are named to follow the elX_foo_handler scheme used by the C exception handlers. For consistency with other top-level exception handlers, the kernel_entry/kernel_exit logic is not moved into the macros. As FIQ will use a different C handler, this handler name is provided as an argument to the macros.
There should be no functional change as a result of this patch.
Signed-off-by: Marc Zyngier maz@kernel.org [Mark: rework macros, commit message, rebase before DAIF rework] Signed-off-by: Mark Rutland mark.rutland@arm.com Tested-by: Hector Martin marcan@marcan.st Cc: James Morse james.morse@arm.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Will Deacon will@kernel.org Acked-by: Will Deacon will@kernel.org Link: https://lore.kernel.org/r/20210315115629.57191-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm64/kernel/entry.S | 80 +++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 37 deletions(-)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2da82c139e1c..a9644da545c7 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -464,8 +464,8 @@ tsk .req x28 // current thread_info /* * Interrupt handling. */ - .macro irq_handler - ldr_l x1, handle_arch_irq + .macro irq_handler, handler:req + ldr_l x1, \handler mov x0, sp irq_stack_entry blr x1 @@ -504,6 +504,45 @@ alternative_endif #endif .endm
+ .macro el1_interrupt_handler, handler:req + gic_prio_irq_setup pmr=x20, tmp=x1 + enable_da_f + + mov x0, sp + bl enter_el1_irq_or_nmi + + irq_handler \handler + +#ifdef CONFIG_PREEMPTION + ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + /* + * DA_F were cleared at start of handling. If anything is set in DAIF, + * we come back from an NMI, so skip preemption + */ + mrs x0, daif + orr x24, x24, x0 +alternative_else_nop_endif + cbnz x24, 1f // preempt count != 0 || NMI return path + bl arm64_preempt_schedule_irq // irq en/disable is done inside +1: +#endif + + mov x0, sp + bl exit_el1_irq_or_nmi + .endm + + .macro el0_interrupt_handler, handler:req + gic_prio_irq_setup pmr=x20, tmp=x0 + user_exit_irqoff + enable_da_f + + tbz x22, #55, 1f + bl do_el0_irq_bp_hardening +1: + irq_handler \handler + .endm + .text
/* @@ -633,32 +672,7 @@ SYM_CODE_END(el1_sync) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el1_irq) kernel_entry 1 - gic_prio_irq_setup pmr=x20, tmp=x1 - enable_da_f - - mov x0, sp - bl enter_el1_irq_or_nmi - - irq_handler - -#ifdef CONFIG_PREEMPTION - ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count -alternative_if ARM64_HAS_IRQ_PRIO_MASKING - /* - * DA_F were cleared at start of handling. If anything is set in DAIF, - * we come back from an NMI, so skip preemption - */ - mrs x0, daif - orr x24, x24, x0 -alternative_else_nop_endif - cbnz x24, 1f // preempt count != 0 || NMI return path - bl arm64_preempt_schedule_irq // irq en/disable is done inside -1: -#endif - - mov x0, sp - bl exit_el1_irq_or_nmi - + el1_interrupt_handler handle_arch_irq kernel_exit 1 SYM_CODE_END(el1_irq)
@@ -698,15 +712,7 @@ SYM_CODE_END(el0_error_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_irq) kernel_entry 0 el0_irq_naked: - gic_prio_irq_setup pmr=x20, tmp=x0 - user_exit_irqoff - enable_da_f - - tbz x22, #55, 1f - bl do_el0_irq_bp_hardening -1: - irq_handler - + el0_interrupt_handler handle_arch_irq b ret_to_user SYM_CODE_END(el0_irq)