Implement and enable context tracking for arm64 (which is a prerequisite for FULL_NOHZ support). This patchset builds upon earlier work by Kevin Hilman and is based on 3.15-rc2.
Larry Bassel (2): arm64: adjust el0_sync so that a function can be called arm64: enable context tracking
arch/arm64/Kconfig | 1 + arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/kernel/entry.S | 36 +++++++++++++++++++++++++++++++----- 3 files changed, 33 insertions(+), 5 deletions(-)
To implement the context tracker properly on arm64, a function call needs to be made after debugging and interrupts are turned on, but before the lr is changed to point to ret_from_exception(). If the function call is made after the lr is changed the function will not return to the correct place.
For similar reasons, defer the setting of x0 so that it doesn't need to be saved around the function call (save far_el1 in x26 temporarily instead).
Signed-off-by: Larry Bassel larry.bassel@linaro.org Reviewed-by: Kevin Hilman khilman@linaro.org --- arch/arm64/kernel/entry.S | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 39ac630..d920d7f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -349,11 +349,11 @@ el0_sync: lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state b.eq el0_svc - adr lr, ret_from_exception cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 b.eq el0_da cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 b.eq el0_ia + adr lr, ret_from_exception cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception @@ -378,11 +378,11 @@ el0_sync_compat: lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state b.eq el0_svc_compat - adr lr, ret_from_exception cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 b.eq el0_da cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 b.eq el0_ia + adr lr, ret_from_exception cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception @@ -421,28 +421,32 @@ el0_da: /* * Data abort handling */ - mrs x0, far_el1 - bic x0, x0, #(0xff << 56) + mrs x26, far_el1 disable_step x1 isb enable_dbg // enable interrupts before calling the main handler enable_irq + mov x0, x26 + bic x0, x0, #(0xff << 56) mov x1, x25 mov x2, sp + adr lr, ret_from_exception b do_mem_abort el0_ia: /* * Instruction abort handling */ - mrs x0, far_el1 + mrs x26, far_el1 disable_step x1 isb enable_dbg // enable interrupts before calling the main handler enable_irq + mov x0, x26 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts mov x2, sp + adr lr, ret_from_exception b do_mem_abort el0_fpsimd_acc: /*
Make calls to ct_user_enter when the kernel is exited and ct_user_exit when the kernel is entered (in el0_da, el0_ia, el0_svc, el0_irq).
These macros expand to function calls which will only work properly if el0_sync and related code has been rearranged (in a previous patch of this series).
In order to avoid saving registers, the slow syscall path is forced (as x86 does).
The calls to ct_user_exit are made after hw debugging has been enabled (enable_dbg).
The call to ct_user_enter is made at the beginning of the kernel_exit macro.
This patch is based on earlier work by Kevin Hilman.
Signed-off-by: Kevin Hilman khilman@linaro.org Signed-off-by: Larry Bassel larry.bassel@linaro.org --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/kernel/entry.S | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e6e4d37..152d92b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -55,6 +55,7 @@ config ARM64 select RTC_LIB select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE + select HAVE_CONTEXT_TRACKING help ARM 64-bit (AArch64) Linux support.
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 720e70b..301ea6a 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -108,6 +108,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SINGLESTEP 21 #define TIF_32BIT 22 /* 32bit process */ #define TIF_SWITCH_MM 23 /* deferred switch_mm */ +#define TIF_NOHZ 24
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index d920d7f..5fe447c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -30,6 +30,22 @@ #include <asm/unistd32.h>
/* + * Context tracking subsystem. Used to instrument transitions + * between user and kernel mode. + */ + .macro ct_user_exit +#ifdef CONFIG_CONTEXT_TRACKING + bl context_tracking_user_exit +#endif + .endm + + .macro ct_user_enter +#ifdef CONFIG_CONTEXT_TRACKING + bl context_tracking_user_enter +#endif + .endm + +/* * Bad Abort numbers *----------------- */ @@ -88,6 +104,7 @@ .macro kernel_exit, el, ret = 0 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 + ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer .endif .if \ret @@ -427,6 +444,7 @@ el0_da: enable_dbg // enable interrupts before calling the main handler enable_irq + ct_user_exit mov x0, x26 bic x0, x0, #(0xff << 56) mov x1, x25 @@ -443,6 +461,7 @@ el0_ia: enable_dbg // enable interrupts before calling the main handler enable_irq + ct_user_exit mov x0, x26 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts mov x2, sp @@ -511,6 +530,7 @@ el0_irq_naked: bl trace_hardirqs_off #endif
+ ct_user_exit irq_handler get_thread_info tsk
@@ -633,10 +653,12 @@ el0_svc_naked: // compat entry point isb enable_dbg enable_irq + ct_user_exit
get_thread_info tsk ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? + tbnz x16, #TIF_NOHZ, __sys_trace adr lr, ret_fast_syscall // return address cmp scno, sc_nr // check upper syscall limit b.hs ni_sys
On Wed, May 07, 2014 at 12:32:29AM +0100, Larry Bassel wrote:
Make calls to ct_user_enter when the kernel is exited and ct_user_exit when the kernel is entered (in el0_da, el0_ia, el0_svc, el0_irq).
Why only these entry points? I can reschedule after any exception from EL0, so I'd expect all exceptions from userspace to need annotating, no?
These macros expand to function calls which will only work properly if el0_sync and related code has been rearranged (in a previous patch of this series).
In order to avoid saving registers, the slow syscall path is forced (as x86 does).
... and if you decide to handle undef exceptions, I think you'll need the register saving too, in case the kernel needs to perform emulation.
Will
On 07 May 14 11:17, Will Deacon wrote:
On Wed, May 07, 2014 at 12:32:29AM +0100, Larry Bassel wrote:
Make calls to ct_user_enter when the kernel is exited and ct_user_exit when the kernel is entered (in el0_da, el0_ia, el0_svc, el0_irq).
Why only these entry points? I can reschedule after any exception from EL0, so I'd expect all exceptions from userspace to need annotating, no?
These macros expand to function calls which will only work properly if el0_sync and related code has been rearranged (in a previous patch of this series).
In order to avoid saving registers, the slow syscall path is forced (as x86 does).
... and if you decide to handle undef exceptions, I think you'll need the register saving too, in case the kernel needs to perform emulation.
These are excellent points, I will rework the patch and submit v3.
Thanks for the feedback.
Will
Larry
Hi Will,
Will Deacon will.deacon@arm.com writes:
On Wed, May 07, 2014 at 12:32:29AM +0100, Larry Bassel wrote:
Make calls to ct_user_enter when the kernel is exited and ct_user_exit when the kernel is entered (in el0_da, el0_ia, el0_svc, el0_irq).
Why only these entry points? I can reschedule after any exception from EL0, so I'd expect all exceptions from userspace to need annotating, no?
In my initial approach to this, you might recall (though it was over a year ago now) was to just instrument kernel_enter rather than sprinkle the instrumentaion in cl0_*. However, your concern at the time was that since it was before debugging was enabled it would complicate debugging these paths.
Any chance you have any other suggestion on how we might do this in kernel_entry rather than sprinkling them all over cl0_*? or is the sprinkling the only good way to handle this.
Kevin
On Thu, May 08, 2014 at 12:49:04AM +0100, Kevin Hilman wrote:
Hi Will,
Hello Kevin,
Will Deacon will.deacon@arm.com writes:
On Wed, May 07, 2014 at 12:32:29AM +0100, Larry Bassel wrote:
Make calls to ct_user_enter when the kernel is exited and ct_user_exit when the kernel is entered (in el0_da, el0_ia, el0_svc, el0_irq).
Why only these entry points? I can reschedule after any exception from EL0, so I'd expect all exceptions from userspace to need annotating, no?
In my initial approach to this, you might recall (though it was over a year ago now) was to just instrument kernel_enter rather than sprinkle the instrumentaion in cl0_*. However, your concern at the time was that since it was before debugging was enabled it would complicate debugging these paths.
Any chance you have any other suggestion on how we might do this in kernel_entry rather than sprinkling them all over cl0_*? or is the sprinkling the only good way to handle this.
Unfortunately, different exceptions do subtly different things before invoking the main handler. For example:
- Stashing the far - Enabling IRQs - Enabling debug - All the stuff on the syscall path
so putting the logic in kernel_entry isn't really do-able, unfortunately.
Will
linaro-kernel@lists.linaro.org