On Apr 12, 2022, at 10:49 PM, Xu Kuohai xukuohai@huawei.com wrote:
Add ftrace direct support for arm64.
- When there is custom trampoline only, replace the fentry nop to a
jump instruction that jumps directly to the custom trampoline.
- When ftrace trampoline and custome coexist, jump from fentry to
ftrace trampoline first, then jump to custom trampoline when ftrace trampoline exits. The currently unused register pt_regs->x0 is used as an intermediary for jumping from ftrace trampoline to custom trampoline.
Signed-off-by: Xu Kuohai xukuohai@huawei.com
For the series:
Acked-by: Song Liu songliubraving@fb.com
(Pardon my laziness, I somehow only got 1/5 and 3/5 in one of my inboxes, and nothing in my other inbox. :( )
Just one nitpick for 2/5: as we move is_valid_bpf_tramp_flags to trampoline.c, we should change the multi-line comment into net style:
/* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, * and it must be used alone. */
Thanks, Song
arch/arm64/Kconfig | 2 ++ arch/arm64/include/asm/ftrace.h | 10 ++++++++++ arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/entry-ftrace.S | 18 +++++++++++++++--- 4 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 57c4c995965f..81cc330daafc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -177,6 +177,8 @@ config ARM64 select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS \ if $(cc-option,-fpatchable-function-entry=2)
- select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ if DYNAMIC_FTRACE_WITH_REGS select HAVE_EFFICIENT_UNALIGNED_ACCESSif DYNAMIC_FTRACE_WITH_REGS
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 1494cfa8639b..3a363d6a3bd0 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -78,6 +78,16 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; }
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
unsigned long addr)
+{
- /*
* Place custom trampoline address in regs->orig_x0 to let ftrace
* trampoline jump to it.
*/
- regs->orig_x0 = addr;
+}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS struct dyn_ftrace; int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 1197e7679882..b1ed0bf01c59 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -80,6 +80,7 @@ int main(void) DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1)); DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
- DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); BLANK();
#ifdef CONFIG_COMPAT diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index e535480a4069..b1bd6576f205 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -60,6 +60,9 @@ str x29, [sp, #S_FP] .endif
- /* Set orig_x0 to zero */
- str xzr, [sp, #S_ORIG_X0]
- /* Save the callsite's SP and LR */ add x10, sp, #(PT_REGS_SIZE + 16) stp x9, x10, [sp, #S_LR]
@@ -119,12 +122,21 @@ ftrace_common_return: /* Restore the callsite's FP, LR, PC */ ldr x29, [sp, #S_FP] ldr x30, [sp, #S_LR]
- ldr x9, [sp, #S_PC]
- ldr x10, [sp, #S_PC]
- ldr x11, [sp, #S_ORIG_X0]
- cbz x11, 1f
- /* Set x9 to parent ip before jump to bpf trampoline */
- mov x9, x30
- /* Set lr to self ip */
- ldr x30, [sp, #S_PC]
- /* Set x10 (used for return address) to bpf trampoline */
- mov x10, x11
+1: /* Restore the callsite's SP */ add sp, sp, #PT_REGS_SIZE + 16
- ret x9
- ret x10
SYM_CODE_END(ftrace_common)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2.30.2