On 15/8/23 08:52, Eduard Zingerman wrote:
On Mon, 2023-08-14 at 21:41 +0800, Leon Hwang wrote:
From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall handling in JIT"), the tailcall on x64 works better than before.
From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms for x64 JIT"), tailcall is able to run in BPF subprograms on x64.
From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program to other BPF programs"), BPF program is able to trace other BPF programs.
How about combining them all together?
- FENTRY/FEXIT on a BPF subprogram.
- A tailcall runs in the BPF subprogram.
- The tailcall calls itself.
As a result, a tailcall infinite loop comes up. And the loop halts the machine.
As we know, in tail call context, the tail_call_cnt propagates by stack and RAX register between BPF subprograms. So do it in FENTRY/FEXIT trampolines.
Hi Leon,
I'm not familiar with this part of the jit compiler, so decided that taking a look at your series might be a good learning point. I think I got the gist of it, but I don't understand where the initial value of RAX (== 0) is coming from in arch_prepare_bpf_trampoline(), could you please help me out?
Also a nitpick:
- in arch_prepare_bpf_trampoline() there is a comment detailing the stack layout, it probably should be updated to say that tail call count is stored as well;
- before arch_prepare_bpf_trampoline() there is a comment with an example of generated assembly, should it be updated?
Thanks, Eduard
a) Initial value of RAX is in emit_prologue(). if (!ebpf_from_cbpf) { if (tail_call_reachable && !is_subprog) /* When it's the entry of the whole * tailcall context, zeroing the RAX * means init tail_call_cnt. */ EMIT2(0x31, 0xC0); /* xor eax, eax */ else // Keep the same asm layout. EMIT2(0x66, 0x90); /* nop2 */ } I'd like to add this comment to emit_prologue().
b) Good to update the stack layout. I'll do it.
c) Its comment will be updated also.
Thanks, Leon
Fixes: ebf7d1f508a7 ("bpf, x64: rework pro/epilogue and tailcall handling in JIT") Fixes: e411901c0b77 ("bpf: allow for tailcalls in BPF subprograms for x64 JIT") Signed-off-by: Leon Hwang hffilwlqm@gmail.com
arch/x86/net/bpf_jit_comp.c | 23 +++++++++++++++++++---- include/linux/bpf.h | 6 ++++++ kernel/bpf/trampoline.c | 5 +++-- kernel/bpf/verifier.c | 9 +++++++-- 4 files changed, 35 insertions(+), 8 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a5930042139d3..ca5366d97ad04 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1018,6 +1018,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ +#define RESTORE_TAIL_CALL_CNT(stack) \
- EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) { @@ -1623,9 +1627,7 @@ st: if (is_imm8(insn->off)) func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); if (!imm32) return -EINVAL; offs = 7 + x86_call_depth_emit_accounting(&prog, func);
@@ -2464,6 +2466,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i else /* sub rsp, stack_size */ EMIT4(0x48, 0x83, 0xEC, stack_size);
- if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
/* mov QWORD PTR [rbp - rbx_off], rbx */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);EMIT1(0x50); /* push rax */
@@ -2516,6 +2520,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i restore_regs(m, &prog, regs_off); save_args(m, &prog, arg_stack_off, true);
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
/* Before calling the original function, restore the
* tail_call_cnt from stack.
*/
RESTORE_TAIL_CALL_CNT(stack_size);
- if (flags & BPF_TRAMP_F_ORIG_STACK) { emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); EMIT2(0xff, 0xd0); /* call *rax */
@@ -2569,7 +2579,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ret = -EINVAL; goto cleanup; }
- }
- } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
/* Before running the original function, restore the
* tail_call_cnt from stack.
*/
RESTORE_TAIL_CALL_CNT(stack_size);
- /* restore return value of orig_call or fentry prog back into RAX */ if (save_ret) emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cfabbcf47bdb8..55c72086034ef 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1028,6 +1028,11 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) +/* Indicate that current trampoline is in a tail call context. Then, it has to
- cache and restore tail_call_cnt to avoid infinite tail call loop.
- */
+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
- bytes on x86.
*/ @@ -1147,6 +1152,7 @@ struct bpf_attach_target_info { struct module *tgt_mod; const char *tgt_name; const struct btf_type *tgt_type;
- bool tail_call_ctx;
}; #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 78acf28d48732..0fae334e3f7b8 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut goto out; }
- /* clear all bits except SHARE_IPMODIFY */
- tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
- /* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
- tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
if (tlinks[BPF_TRAMP_FEXIT].nr_links || tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) { @@ -783,6 +783,7 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key, memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel)); tr->func.addr = (void *)tgt_info->tgt_addr;
- tr->flags = (tgt_info->tail_call_ctx ? BPF_TRAMP_F_TAIL_CALL_CTX : 0);
out: mutex_unlock(&tr->mutex); return tr; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4ccca1f6c9981..a78e5a2ae5c72 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19400,10 +19400,15 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, return -EINVAL; fallthrough; case BPF_MODIFY_RETURN:
- case BPF_LSM_MAC:
- case BPF_LSM_CGROUP: case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT:
if (tgt_prog && subprog > 0 &&
tgt_prog->aux->func[subprog]->is_func &&
tgt_prog->aux->tail_call_reachable)
tgt_info->tail_call_ctx = true;
fallthrough;
- case BPF_LSM_MAC:
- case BPF_LSM_CGROUP: if (!btf_type_is_func(t)) { bpf_log(log, "attach_btf_id %u is not a function\n", btf_id);