From: Abhishek Dubey adubey@linux.ibm.com
This patch series enables support for two BPF JIT features on powerpc64. The first three patches target support for tail calls with subprogram combinations. The third patch includes an optimization in which NVRs are accommodated in the stack save area of the trampoline frame. Implementation details are provided in the commit messages.
The last three patches add support for BPF exceptions. An architecture-specific stack walker is implemented to assist with stack walk during exceptions. BPF selftest results and implementation details are presented in the corresponding commits.
Abhishek Dubey (6): powerpc64/bpf: Support tailcalls with subprogs powerpc64/bpf: Tailcall handling with trampolines powerpc/bpf: use BPF_PPC_STACK_SAVE to spill trampoline NVRs powerpc64/bpf: Add arch_bpf_stack_walk() for BPF JIT powerpc64/bpf: Support exceptions powerpc64/bpf: Additional NVR handling for bpf_throw
arch/powerpc/net/bpf_jit.h | 16 ++- arch/powerpc/net/bpf_jit_comp.c | 90 +++++++++---- arch/powerpc/net/bpf_jit_comp64.c | 214 ++++++++++++++++++++++++------ 3 files changed, 256 insertions(+), 64 deletions(-)
From: Abhishek Dubey adubey@linux.ibm.com
Enabling tailcalls with subprog combinations by referencing method. The actual tailcall count is always maintained in the tail_call_info variable present in the frame of main function (also called entry function). The tail_call_info variables in the frames of all other subprog contains reference to the tail_call_info present in frame of main function.
Dynamic resolution interprets the tail_call_info either as value or reference depending on the context of active frame while tailcall is invoked.
Following is selftest run:
#./test_progs -t tailcalls #425/1 tailcalls/tailcall_1:OK #425/2 tailcalls/tailcall_2:OK #425/3 tailcalls/tailcall_3:OK #425/4 tailcalls/tailcall_4:OK #425/5 tailcalls/tailcall_5:OK #425/6 tailcalls/tailcall_6:OK #425/7 tailcalls/tailcall_bpf2bpf_1:OK #425/8 tailcalls/tailcall_bpf2bpf_2:OK #425/9 tailcalls/tailcall_bpf2bpf_3:OK #425/10 tailcalls/tailcall_bpf2bpf_4:OK #425/11 tailcalls/tailcall_bpf2bpf_5:OK #425/12 tailcalls/tailcall_bpf2bpf_6:OK #425/13 tailcalls/tailcall_bpf2bpf_fentry:OK #425/14 tailcalls/tailcall_bpf2bpf_fexit:OK #425/15 tailcalls/tailcall_bpf2bpf_fentry_fexit:OK #425/16 tailcalls/tailcall_bpf2bpf_fentry_entry:OK #425/17 tailcalls/tailcall_poke:OK #425/18 tailcalls/tailcall_bpf2bpf_hierarchy_1:OK #425/19 tailcalls/tailcall_bpf2bpf_hierarchy_fentry:OK #425/20 tailcalls/tailcall_bpf2bpf_hierarchy_fexit:OK #425/21 tailcalls/tailcall_bpf2bpf_hierarchy_fentry_fexit:OK #425/22 tailcalls/tailcall_bpf2bpf_hierarchy_fentry_entry:OK #425/23 tailcalls/tailcall_bpf2bpf_hierarchy_2:OK #425/24 tailcalls/tailcall_bpf2bpf_hierarchy_3:OK #425/25 tailcalls/tailcall_freplace:OK #425/26 tailcalls/tailcall_bpf2bpf_freplace:OK #425/27 tailcalls/tailcall_failure:OK #425/28 tailcalls/reject_tail_call_spin_lock:OK #425/29 tailcalls/reject_tail_call_rcu_lock:OK #425/30 tailcalls/reject_tail_call_preempt_lock:OK #425/31 tailcalls/reject_tail_call_ref:OK #425 tailcalls:OK Summary: 1/31 PASSED, 0 SKIPPED, 0 FAILED
Signed-off-by: Abhishek Dubey adubey@linux.ibm.com --- arch/powerpc/net/bpf_jit.h | 14 ++++++- arch/powerpc/net/bpf_jit_comp.c | 10 ++++- arch/powerpc/net/bpf_jit_comp64.c | 67 +++++++++++++++++++++++-------- 3 files changed, 71 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 8334cd667bba..98e8b1f9c2f9 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -51,6 +51,12 @@ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ } while (0)
+/* Same as PPC_BCC_SHORT, except valid dest is known prior to call. */ +#define PPC_COND_BRANCH(cond, dest) \ + do { \ + long offset = (long)(dest) - CTX_NIA(ctx); \ + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ + } while (0) /* * Sign-extended 32-bit immediate load * @@ -72,6 +78,10 @@ } } while (0)
#ifdef CONFIG_PPC64 + +/* for gpr non volatile registers BPG_REG_6 to 10 */ +#define BPF_PPC_STACK_SAVE (6*8) + /* If dummy pass (!image), account for maximum possible instructions */ #define PPC_LI64(d, i) do { \ if (!image) \ @@ -166,6 +176,7 @@ struct codegen_context { unsigned int alt_exit_addr; u64 arena_vm_start; u64 user_vm_start; + bool is_subprog; };
#define bpf_to_ppc(r) (ctx->b2p[r]) @@ -200,11 +211,10 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx); void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx); void bpf_jit_realloc_regs(struct codegen_context *ctx); int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr); - int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, struct codegen_context *ctx, int insn_idx, int jmp_off, int dst_reg, u32 code); - +int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx); #endif
#endif diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 5e976730b2f5..069a8822c30d 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -206,6 +206,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) cgctx.stack_size = round_up(fp->aux->stack_depth, 16); cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); + cgctx.is_subprog = bpf_is_subprog(fp);
/* Scouting faux-generate pass 0 */ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { @@ -435,6 +436,11 @@ void bpf_jit_free(struct bpf_prog *fp) bpf_prog_unlock_free(fp); }
+bool bpf_jit_supports_subprog_tailcalls(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + bool bpf_jit_supports_kfunc_call(void) { return true; @@ -604,7 +610,7 @@ static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_contex int func_frame_offset, int r4_off) { if (IS_ENABLED(CONFIG_PPC64)) { - /* See bpf_jit_stack_tailcallcnt() */ + /* See bpf_jit_stack_tailcallinfo_offset() */ int tailcallcnt_offset = 7 * 8;
EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); @@ -619,7 +625,7 @@ static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_cont int func_frame_offset, int r4_off) { if (IS_ENABLED(CONFIG_PPC64)) { - /* See bpf_jit_stack_tailcallcnt() */ + /* See bpf_jit_stack_tailcallinfo_offset() */ int tailcallcnt_offset = 7 * 8;
EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 1fe37128c876..37c547b49da8 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -24,17 +24,19 @@ * Ensure the top half (upto local_tmp_var) stays consistent * with our redzone usage. * + * tail_call_info - stores tailcall count value in main program's + * frame, stores reference to tail_call_info of + * main's frame in sub-prog's frame. + * * [ prev sp ] <------------- * [ nv gpr save area ] 6*8 | - * [ tail_call_cnt ] 8 | + * [ tail_call_info ] 8 | * [ local_tmp_var ] 24 | * fp (r31) --> [ ebpf stack space ] upto 512 | * [ frame header ] 32/112 | * sp (r1) ---> [ stack pointer ] -------------- */
-/* for gpr non volatile registers BPG_REG_6 to 10 */ -#define BPF_PPC_STACK_SAVE (6*8) /* for bpf JIT code internal usage */ #define BPF_PPC_STACK_LOCALS 32 /* stack frame excluding BPF stack, ensure this is quadword aligned */ @@ -93,7 +95,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- * [ nv gpr save area ] 6*8 - * [ tail_call_cnt ] 8 + * [ tail_call_info ] 8 * [ local_tmp_var ] 24 * [ unused red zone ] 224 */ @@ -105,7 +107,7 @@ static int bpf_jit_stack_local(struct codegen_context *ctx) return -(BPF_PPC_STACK_SAVE + 32); }
-static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) +int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx) { return bpf_jit_stack_local(ctx) + 24; } @@ -138,17 +140,31 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) #endif
/* - * Initialize tail_call_cnt if we do tail calls. - * Otherwise, put in NOPs so that it can be skipped when we are - * invoked through a tail call. + * Tail call count(tcc) is saved & updated only in main + * program's frame and the address of tcc in main program's + * frame (tcc_ptr) is saved in subprogs frame. + * + * Offset of tail_call_info on any frame will be interpreted + * as either tcc_ptr or tcc value depending on whether it is + * greater than MAX_TAIL_CALL_CNT or not. */ - if (ctx->seen & SEEN_TAILCALL) { + if (!ctx->is_subprog) { EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0)); /* this goes in the redzone */ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8))); } else { - EMIT(PPC_RAW_NOP()); - EMIT(PPC_RAW_NOP()); + /* + * if tail_call_info < MAX_TAIL_CALL_CNT + * main prog calling first subprog -> copy reference + * else + * subsequent subprog calling another subprog -> directly copy content + */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, 0)); + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), -(BPF_PPC_STACK_SAVE+8))); + EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_GT, CTX_NIA(ctx) + 8); + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), -(BPF_PPC_STACK_SAVE + 8))); + EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8))); }
if (bpf_has_stack_frame(ctx)) { @@ -343,19 +359,38 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1))); PPC_BCC_SHORT(COND_GE, out);
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallinfo_offset(ctx))); + EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_LE, CTX_NIA(ctx) + 8); + + /* dereference TMP_REG_1 */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 0)); + /* - * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) + * if (tail_call_info == MAX_TAIL_CALL_CNT) * goto out; */ - EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); - PPC_BCC_SHORT(COND_GE, out); + PPC_COND_BRANCH(COND_EQ, out);
/* - * tail_call_cnt++; + * tail_call_info++; <- Actual value of tcc here */ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1)); - EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); + + /* + * Before writing updated tail_call_info, distinguish if current frame + * is storing a reference to tail_call_info or actual tcc value in + * tail_call_info. + */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx))); + EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_2), MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_GT, CTX_NIA(ctx) + 8); + + /* First get address of tail_call_info */ + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx))); + /* Writeback updated value to tail_call_info */ + EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
/* prog = array->ptrs[index]; */ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
From: Abhishek Dubey adubey@linux.ibm.com
The trampoline mechanism sets up its own stack frame and an additional dummy frame. We need to have additional JIT instructions handling tailcall dereferencing in the trampoline's context.
We don't add the two stack frames pointed above, rather add space for conventional 'non-volatile register save area' and tail_call_info in trampoline's frame for ppc64. This makes the trampoline's frame consistent with layout of all other frames.
Signed-off-by: Abhishek Dubey adubey@linux.ibm.com --- arch/powerpc/net/bpf_jit_comp.c | 48 ++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 069a8822c30d..4aaa0a287a45 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -606,15 +606,42 @@ static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context return 0; }
-static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx, - int func_frame_offset, int r4_off) +/* + * Refer the label 'Generated stack layout' in this file for actual stack + * layout during trampoline invocation. + * + * Refer __arch_prepare_bpf_trampoline() for stack component details. + * + * The tailcall count/reference is present in caller's stack frame. Its required + * to copy the content of tail_call_info before calling the actual function + * to which the trampoline is attached. + * + */ + +static void bpf_trampoline_setup_tail_call_info(u32 *image, struct codegen_context *ctx, + int func_frame_offset, + int bpf_dummy_frame_size, int r4_off) { if (IS_ENABLED(CONFIG_PPC64)) { /* See bpf_jit_stack_tailcallinfo_offset() */ - int tailcallcnt_offset = 7 * 8; + int tailcallinfo_offset = BPF_PPC_STACK_SAVE + SZL; + /* + * func_frame_offset = + * bpf_dummy_frame_size + trampoline_frame_size + */ + EMIT(PPC_RAW_LD(_R4, _R1, func_frame_offset)); + EMIT(PPC_RAW_LD(_R3, _R4, -tailcallinfo_offset)); + + /* + * Setting the tail_call_info in trampoline's frame + * depending on if previous frame had value or reference. + */ + EMIT(PPC_RAW_CMPLWI(_R3, MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_GT, CTX_NIA(ctx) + 8); + EMIT(PPC_RAW_ADDI(_R3, _R4, bpf_jit_stack_tailcallinfo_offset(ctx))); + EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset + - bpf_dummy_frame_size - tailcallinfo_offset));
- EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); - EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset)); } else { /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ EMIT(PPC_RAW_LL(_R4, _R1, r4_off)); @@ -721,6 +748,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * [ r0 save (32-bit) ] | * dummy frame for unwind [ back chain 1 ] -- * [ padding ] align stack frame + * [ r26..r31 ] nvr save : BPF_PPC_STACK_SAVE + * [ tail_call_info ] non optional - 64-bit powerpc * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc * alt_lr_off [ real lr (ool stub)] optional - actual lr * [ r26 ] @@ -801,6 +830,12 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im } }
+ /* Room for 64-bit tail_call_cnt */ + bpf_frame_size += SZL; + + /* Room for nvr save area */ + bpf_frame_size += BPF_PPC_STACK_SAVE; + /* Padding to align stack frame, if any */ bpf_frame_size = round_up(bpf_frame_size, SZL * 2);
@@ -902,7 +937,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
/* Replicate tail_call_cnt before calling the original BPF prog */ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) - bpf_trampoline_setup_tail_call_cnt(image, ctx, func_frame_offset, r4_off); + bpf_trampoline_setup_tail_call_info(image, ctx, func_frame_offset, + bpf_dummy_frame_size, r4_off);
/* Restore args */ bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 069a8822c30d..4aaa0a287a45 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c
[ ... ]
-static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx,
int func_frame_offset, int r4_off)+static void bpf_trampoline_setup_tail_call_info(u32 *image, struct codegen_context *ctx,
int func_frame_offset,int bpf_dummy_frame_size, int r4_off){ if (IS_ENABLED(CONFIG_PPC64)) { /* See bpf_jit_stack_tailcallinfo_offset() */
int tailcallcnt_offset = 7 * 8;
int tailcallinfo_offset = BPF_PPC_STACK_SAVE + SZL;
[ ... ]
EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset- bpf_dummy_frame_size - tailcallinfo_offset));
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The new setup function stores tail_call_info at offset (func_frame_offset - bpf_dummy_frame_size - tailcallinfo_offset) from R1. Since func_frame_offset = bpf_dummy_frame_size + bpf_frame_size, this resolves to (bpf_frame_size - 56), which is a positive offset from R1.
However, the corresponding bpf_trampoline_restore_tail_call_cnt() function was not updated. It still reads from (R1 - 56):
static void bpf_trampoline_restore_tail_call_cnt(...) { if (IS_ENABLED(CONFIG_PPC64)) { int tailcallcnt_offset = 7 * 8; EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); ^^^^^^^^^^^^^^^^^^ ... } }
Is there an offset mismatch here? The setup function stores at R1 + (bpf_frame_size - 56) while restore reads from R1 - 56. These point to different memory locations since bpf_frame_size is a positive value (at minimum ~200 bytes given the frame contents).
When BPF_TRAMP_F_TAIL_CALL_CTX is set, the restore function would read incorrect data after the traced function returns, potentially causing wrong tail call context to be propagated.
@@ -801,6 +830,12 @@ static int __arch_prepare_bpf_trampoline(...) } }
- /* Room for 64-bit tail_call_cnt */
- bpf_frame_size += SZL;
- /* Room for nvr save area */
- bpf_frame_size += BPF_PPC_STACK_SAVE;
- /* Padding to align stack frame, if any */ bpf_frame_size = round_up(bpf_frame_size, SZL * 2);
The frame size is increased by 56 bytes (SZL + BPF_PPC_STACK_SAVE = 8 + 48) for the tail_call_info and nvr save area. This affects all calculations using bpf_frame_size, including the store offset in the setup function, but the restore function continues to use a hardcoded negative offset.
--- AI reviewed your patch. Please fix the bug or email reply why it's not a bug. See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/20713322833
From: Abhishek Dubey adubey@linux.ibm.com
In the previous patch, we introduced BPF_PPC_STACK_SAVE into the trampoline frame to make its layout consistent with a conventional stack frame.
The trampoline JIT currently uses NVRs R25 and R26 and allocates dedicated stack slots to save them. This dedicated space can be eliminated by reusing the BPF_PPC_STACK_SAVE area instead, reducing overall stack footprint.
The BPF_PPC_STACK_SAVE area corresponds to the register range R26–R31. By remapping registers in the trampoline JIT code (R25 → R26 and R26 → R27), we can spill these registers into the existing save area and utilize the stack more efficiently.
Signed-off-by: Abhishek Dubey adubey@linux.ibm.com --- arch/powerpc/net/bpf_jit_comp.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 4aaa0a287a45..b09d294084d4 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -512,7 +512,7 @@ static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ct
/* __bpf_prog_enter(p, &bpf_tramp_run_ctx) */ PPC_LI_ADDR(_R3, p); - EMIT(PPC_RAW_MR(_R25, _R3)); + EMIT(PPC_RAW_MR(_R26, _R3)); EMIT(PPC_RAW_ADDI(_R4, _R1, run_ctx_off)); ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, (unsigned long)bpf_trampoline_enter(p)); @@ -520,7 +520,7 @@ static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ct return ret;
/* Remember prog start time returned by __bpf_prog_enter */ - EMIT(PPC_RAW_MR(_R26, _R3)); + EMIT(PPC_RAW_MR(_R27, _R3));
/* * if (__bpf_prog_enter(p) == 0) @@ -543,7 +543,7 @@ static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ct image[ctx->idx] = ppc_inst_val(branch_insn); ctx->idx++; } else { - EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func))); + EMIT(PPC_RAW_LL(_R12, _R26, offsetof(struct bpf_prog, bpf_func))); EMIT(PPC_RAW_MTCTR(_R12)); EMIT(PPC_RAW_BCTRL()); } @@ -560,8 +560,8 @@ static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ct }
/* __bpf_prog_exit(p, start_time, &bpf_tramp_run_ctx) */ - EMIT(PPC_RAW_MR(_R3, _R25)); - EMIT(PPC_RAW_MR(_R4, _R26)); + EMIT(PPC_RAW_MR(_R3, _R26)); + EMIT(PPC_RAW_MR(_R4, _R27)); EMIT(PPC_RAW_ADDI(_R5, _R1, run_ctx_off)); ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, (unsigned long)bpf_trampoline_exit(p)); @@ -748,12 +748,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * [ r0 save (32-bit) ] | * dummy frame for unwind [ back chain 1 ] -- * [ padding ] align stack frame - * [ r26..r31 ] nvr save : BPF_PPC_STACK_SAVE + * nvr_off [ r26..r31 ] nvr save : BPF_PPC_STACK_SAVE * [ tail_call_info ] non optional - 64-bit powerpc * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc * alt_lr_off [ real lr (ool stub)] optional - actual lr - * [ r26 ] - * nvr_off [ r25 ] nvr save area * retval_off [ return value ] * [ reg argN ] * [ ... ] @@ -811,10 +809,6 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im if (save_ret) bpf_frame_size += SZL;
- /* Room for nvr save area */ - nvr_off = bpf_frame_size; - bpf_frame_size += 2 * SZL; - /* Optional save area for actual LR in case of ool ftrace */ if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { alt_lr_off = bpf_frame_size; @@ -834,6 +828,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im bpf_frame_size += SZL;
/* Room for nvr save area */ + nvr_off = bpf_frame_size; bpf_frame_size += BPF_PPC_STACK_SAVE;
/* Padding to align stack frame, if any */ @@ -897,8 +892,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im EMIT(PPC_RAW_STL(_R3, _R1, nregs_off));
/* Save nv regs */ - EMIT(PPC_RAW_STL(_R25, _R1, nvr_off)); - EMIT(PPC_RAW_STL(_R26, _R1, nvr_off + SZL)); + EMIT(PPC_RAW_STL(_R26, _R1, nvr_off)); + EMIT(PPC_RAW_STL(_R27, _R1, nvr_off + SZL));
if (flags & BPF_TRAMP_F_CALL_ORIG) { PPC_LI_ADDR(_R3, (unsigned long)im); @@ -999,8 +994,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im EMIT(PPC_RAW_LL(_R3, _R1, retval_off));
/* Restore nv regs */ - EMIT(PPC_RAW_LL(_R26, _R1, nvr_off + SZL)); - EMIT(PPC_RAW_LL(_R25, _R1, nvr_off)); + EMIT(PPC_RAW_LL(_R27, _R1, nvr_off + SZL)); + EMIT(PPC_RAW_LL(_R26, _R1, nvr_off));
/* Epilogue */ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
From: Abhishek Dubey adubey@linux.ibm.com
This function is used by bpf_throw() to unwind the stack until frame of exception-boundary during BPF exception handling.
This function is necessary to support BPF exceptions on PowerPC.
Signed-off-by: Abhishek Dubey adubey@linux.ibm.com --- arch/powerpc/net/bpf_jit_comp64.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+)
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 37c547b49da8..0f3af67914d6 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -237,6 +237,34 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) bpf_jit_build_fentry_stubs(image, ctx); }
+void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), void *cookie) +{ + // callback processing always in current context + unsigned long fp = current_stack_frame(); + + for (;;) { + unsigned long *frame = (unsigned long *) fp; + unsigned long ip; + + if(!validate_sp(fp, current)) + return; + + ip = frame[STACK_FRAME_LR_SAVE]; + if (!ip) + break; + + /* + * consume_fn common code expects stack pointer(sp) in third + * argument. There is no sp in ppc64, rather pass frame + * pointer. + */ + if (ip && !consume_fn(cookie, ip, fp, fp)) + break; + + fp = frame[0]; + } +} + int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) { unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
From: Abhishek Dubey adubey@linux.ibm.com
The modified prologue/epilogue generation code now enables exception-callback to use the stack frame of the program marked as exception boundary, where callee saved registers are stored.
As per ppc64 ABIv2 documentation[1], r14-r31 are callee saved registers. BPF programs on ppc64 already saves r26-r31 registers. Saving the remaining set of callee saved registers(r14-r25) is handled in the next patch.
[1] https://ftp.rtems.org/pub/rtems/people/sebh/ABI64BitOpenPOWERv1.1_16July2015...
Following is exceptions selftest result on ppc64le:
# ./test_progs -t exceptions #100/1 exceptions/exception_throw_always_1:OK #100/2 exceptions/exception_throw_always_2:OK #100/3 exceptions/exception_throw_unwind_1:OK #100/4 exceptions/exception_throw_unwind_2:OK #100/5 exceptions/exception_throw_default:OK #100/6 exceptions/exception_throw_default_value:OK #100/7 exceptions/exception_tail_call:OK #100/8 exceptions/exception_ext:OK #100/9 exceptions/exception_ext_mod_cb_runtime:OK #100/10 exceptions/exception_throw_subprog:OK #100/11 exceptions/exception_assert_nz_gfunc:OK #100/12 exceptions/exception_assert_zero_gfunc:OK #100/13 exceptions/exception_assert_neg_gfunc:OK #100/14 exceptions/exception_assert_pos_gfunc:OK #100/15 exceptions/exception_assert_negeq_gfunc:OK #100/16 exceptions/exception_assert_poseq_gfunc:OK #100/17 exceptions/exception_assert_nz_gfunc_with:OK #100/18 exceptions/exception_assert_zero_gfunc_with:OK #100/19 exceptions/exception_assert_neg_gfunc_with:OK #100/20 exceptions/exception_assert_pos_gfunc_with:OK #100/21 exceptions/exception_assert_negeq_gfunc_with:OK #100/22 exceptions/exception_assert_poseq_gfunc_with:OK #100/23 exceptions/exception_bad_assert_nz_gfunc:OK #100/24 exceptions/exception_bad_assert_zero_gfunc:OK #100/25 exceptions/exception_bad_assert_neg_gfunc:OK #100/26 exceptions/exception_bad_assert_pos_gfunc:OK #100/27 exceptions/exception_bad_assert_negeq_gfunc:OK #100/28 exceptions/exception_bad_assert_poseq_gfunc:OK #100/29 exceptions/exception_bad_assert_nz_gfunc_with:OK #100/30 exceptions/exception_bad_assert_zero_gfunc_with:OK #100/31 exceptions/exception_bad_assert_neg_gfunc_with:OK #100/32 exceptions/exception_bad_assert_pos_gfunc_with:OK #100/33 exceptions/exception_bad_assert_negeq_gfunc_with:OK #100/34 exceptions/exception_bad_assert_poseq_gfunc_with:OK #100/35 exceptions/exception_assert_range:OK #100/36 exceptions/exception_assert_range_with:OK #100/37 exceptions/exception_bad_assert_range:OK #100/38 exceptions/exception_bad_assert_range_with:OK #100/39 exceptions/non-throwing fentry -> exception_cb:OK #100/40 exceptions/throwing fentry -> exception_cb:OK #100/41 exceptions/non-throwing fexit -> exception_cb:OK #100/42 exceptions/throwing fexit -> exception_cb:OK #100/43 exceptions/throwing extension (with custom cb) -> exception_cb:OK #100/44 exceptions/throwing extension -> global func in exception_cb:OK #100/45 exceptions/exception_ext_mod_cb_runtime:OK #100/46 exceptions/throwing extension (with custom cb) -> global func in exception_cb:OK #100/47 exceptions/exception_ext:OK #100/48 exceptions/non-throwing fentry -> non-throwing subprog:OK #100/49 exceptions/throwing fentry -> non-throwing subprog:OK #100/50 exceptions/non-throwing fentry -> throwing subprog:OK #100/51 exceptions/throwing fentry -> throwing subprog:OK #100/52 exceptions/non-throwing fexit -> non-throwing subprog:OK #100/53 exceptions/throwing fexit -> non-throwing subprog:OK #100/54 exceptions/non-throwing fexit -> throwing subprog:OK #100/55 exceptions/throwing fexit -> throwing subprog:OK #100/56 exceptions/non-throwing fmod_ret -> non-throwing subprog:OK #100/57 exceptions/non-throwing fmod_ret -> non-throwing global subprog:OK #100/58 exceptions/non-throwing extension -> non-throwing subprog:OK #100/59 exceptions/non-throwing extension -> throwing subprog:OK #100/60 exceptions/non-throwing extension -> non-throwing subprog:OK #100/61 exceptions/non-throwing extension -> throwing global subprog:OK #100/62 exceptions/throwing extension -> throwing global subprog:OK #100/63 exceptions/throwing extension -> non-throwing global subprog:OK #100/64 exceptions/non-throwing extension -> main subprog:OK #100/65 exceptions/throwing extension -> main subprog:OK #100/66 exceptions/reject_exception_cb_type_1:OK #100/67 exceptions/reject_exception_cb_type_2:OK #100/68 exceptions/reject_exception_cb_type_3:OK #100/69 exceptions/reject_exception_cb_type_4:OK #100/70 exceptions/reject_async_callback_throw:OK #100/71 exceptions/reject_with_lock:OK #100/72 exceptions/reject_subprog_with_lock:OK #100/73 exceptions/reject_with_rcu_read_lock:OK #100/74 exceptions/reject_subprog_with_rcu_read_lock:OK #100/75 exceptions/reject_with_rbtree_add_throw:OK #100/76 exceptions/reject_with_reference:OK #100/77 exceptions/reject_with_cb_reference:OK #100/78 exceptions/reject_with_cb:OK #100/79 exceptions/reject_with_subprog_reference:OK #100/80 exceptions/reject_throwing_exception_cb:OK #100/81 exceptions/reject_exception_cb_call_global_func:OK #100/82 exceptions/reject_exception_cb_call_static_func:OK #100/83 exceptions/reject_multiple_exception_cb:OK #100/84 exceptions/reject_exception_throw_cb:OK #100/85 exceptions/reject_exception_throw_cb_diff:OK #100/86 exceptions/reject_set_exception_cb_bad_ret1:OK #100/87 exceptions/reject_set_exception_cb_bad_ret2:OK #100/88 exceptions/check_assert_eq_int_min:OK #100/89 exceptions/check_assert_eq_int_max:OK #100/90 exceptions/check_assert_eq_zero:OK #100/91 exceptions/check_assert_eq_llong_min:OK #100/92 exceptions/check_assert_eq_llong_max:OK #100/93 exceptions/check_assert_lt_pos:OK #100/94 exceptions/check_assert_lt_zero:OK #100/95 exceptions/check_assert_lt_neg:OK #100/96 exceptions/check_assert_le_pos:OK #100/97 exceptions/check_assert_le_zero:OK #100/98 exceptions/check_assert_le_neg:OK #100/99 exceptions/check_assert_gt_pos:OK #100/100 exceptions/check_assert_gt_zero:OK #100/101 exceptions/check_assert_gt_neg:OK #100/102 exceptions/check_assert_ge_pos:OK #100/103 exceptions/check_assert_ge_zero:OK #100/104 exceptions/check_assert_ge_neg:OK #100/105 exceptions/check_assert_range_s64:OK #100/106 exceptions/check_assert_range_u64:OK #100/107 exceptions/check_assert_single_range_s64:OK #100/108 exceptions/check_assert_single_range_u64:OK #100/109 exceptions/check_assert_generic:OK #100/110 exceptions/check_assert_with_return:OK #100 exceptions:OK Summary: 1/110 PASSED, 0 SKIPPED, 0 FAILED
Signed-off-by: Abhishek Dubey adubey@linux.ibm.com --- arch/powerpc/net/bpf_jit.h | 2 ++ arch/powerpc/net/bpf_jit_comp.c | 7 ++++ arch/powerpc/net/bpf_jit_comp64.c | 53 +++++++++++++++++++++---------- 3 files changed, 45 insertions(+), 17 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 98e8b1f9c2f9..b9316780a501 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -177,6 +177,8 @@ struct codegen_context { u64 arena_vm_start; u64 user_vm_start; bool is_subprog; + bool exception_boundary; + bool exception_cb; };
#define bpf_to_ppc(r) (ctx->b2p[r]) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index b09d294084d4..3c030a7d8e73 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -207,6 +207,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); cgctx.is_subprog = bpf_is_subprog(fp); + cgctx.exception_boundary = fp->aux->exception_boundary; + cgctx.exception_cb = fp->aux->exception_cb;
/* Scouting faux-generate pass 0 */ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { @@ -436,6 +438,11 @@ void bpf_jit_free(struct bpf_prog *fp) bpf_prog_unlock_free(fp); }
+bool bpf_jit_supports_exceptions(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + bool bpf_jit_supports_subprog_tailcalls(void) { return IS_ENABLED(CONFIG_PPC64); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 0f3af67914d6..5ec8e3654098 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -85,7 +85,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * - the bpf program uses its stack area * The latter condition is deduced from the usage of BPF_REG_FP */ - return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); + return ctx->seen & SEEN_FUNC || + bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) || + ctx->exception_cb; }
/* @@ -180,23 +182,32 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); }
- /* - * Back up non-volatile regs -- BPF registers 6-10 - * If we haven't created our own stack frame, we save these - * in the protected zone below the previous stack frame - */ - for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) - EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); + if (!ctx->exception_cb) { + /* + * Back up non-volatile regs -- BPF registers 6-10 + * If we haven't created our own stack frame, we save these + * in the protected zone below the previous stack frame + */ + for (i = BPF_REG_6; i <= BPF_REG_10; i++) + if (ctx->exception_boundary || bpf_is_seen_register(ctx, bpf_to_ppc(i))) + EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
- if (ctx->arena_vm_start) - EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, + if (ctx->exception_boundary || ctx->arena_vm_start) + EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
- /* Setup frame pointer to point to the bpf stack area */ - if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) - EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, + /* Setup frame pointer to point to the bpf stack area */ + if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) + EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, STACK_FRAME_MIN_SIZE + ctx->stack_size)); + } else { + /* + * Exception callback receives Frame Pointer of main + * program as third arg + */ + EMIT(PPC_RAW_MR(_R1, _R5)); + }
if (ctx->arena_vm_start) PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start); @@ -208,17 +219,25 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */ for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) + if (ctx->exception_cb || bpf_is_seen_register(ctx, bpf_to_ppc(i))) EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
- if (ctx->arena_vm_start) + if (ctx->exception_cb || ctx->arena_vm_start) EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
+ if (ctx->exception_cb) { + /* + * LR value from boundary-frame is received as second parameter + * in exception callback. + */ + EMIT(PPC_RAW_MTLR(_R4)); + } + /* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); - if (ctx->seen & SEEN_FUNC) { + if (ctx->seen & SEEN_FUNC || ctx->exception_cb) { EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF)); EMIT(PPC_RAW_MTLR(_R0)); }
From: Abhishek Dubey adubey@linux.ibm.com
The bpf_throw() function never returns, if it has clobbered any callee-saved register, those will remain clobbered. The prologue must take care of saving all callee-saved registers in the frame of exception boundary program. Later these additional non volatile registers R14-R25 along with other NVRs are restored back in the epilogue of exception callback.
To achieve above objective the frame size is determined dynamically to accommodate additional non volatile registers in exception boundary's frame. For non-exception boundary program, the frame size remains optimal. The additional instructions to save & restore r14-r25 registers are emitted only during exception boundary and exception callback respectively.
Signed-off-by: Abhishek Dubey adubey@linux.ibm.com --- arch/powerpc/net/bpf_jit_comp64.c | 70 +++++++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 5ec8e3654098..21f04eb57605 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -29,19 +29,33 @@ * main's frame in sub-prog's frame. * * [ prev sp ] <------------- - * [ nv gpr save area ] 6*8 | + * [ nv gpr save area ] 6*8 + (12*8) | * [ tail_call_info ] 8 | * [ local_tmp_var ] 24 | * fp (r31) --> [ ebpf stack space ] upto 512 | * [ frame header ] 32/112 | * sp (r1) ---> [ stack pointer ] -------------- + * + * Additional (12*8) in 'nv gpr save area' only in case of + * exception boundary. */
/* for bpf JIT code internal usage */ #define BPF_PPC_STACK_LOCALS 32 +/* + * for additional non volatile registers(r14-r25) to be saved + * at exception boundary + */ +#define BPF_PPC_EXC_STACK_SAVE (12*8) /* stack frame excluding BPF stack, ensure this is quadword aligned */ #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) +/* + * same as BPF_PPC_STACKFRAME with save area for additional + * non volatile registers saved at exception boundary. + * This is quad-word aligned. + */ +#define BPF_PPC_EXC_STACKFRAME BPF_PPC_STACKFRAME + BPF_PPC_EXC_STACK_SAVE
/* BPF register usage */ #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) @@ -96,17 +110,23 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ prev sp ] <------------- * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- - * [ nv gpr save area ] 6*8 + * [ nv gpr save area ] 6*8 + (12*8) * [ tail_call_info ] 8 * [ local_tmp_var ] 24 * [ unused red zone ] 224 + * + * Additional (12*8) in 'nv gpr save area' only in case of + * exception boundary. */ static int bpf_jit_stack_local(struct codegen_context *ctx) { if (bpf_has_stack_frame(ctx)) return STACK_FRAME_MIN_SIZE + ctx->stack_size; else - return -(BPF_PPC_STACK_SAVE + 32); + return -(BPF_PPC_STACK_SAVE + + (ctx->exception_boundary || ctx->exception_cb ? + BPF_PPC_EXC_STACK_SAVE : 0) + + 32) /* tail_call_info + local_tmp_var */; }
int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx) @@ -116,10 +136,20 @@ int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx)
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) { - if (reg >= BPF_PPC_NVR_MIN && reg < 32) + int min_valid_nvreg = BPF_PPC_NVR_MIN; + /* Default frame size for all cases except exception boundary */ + int frame_nvr_size = BPF_PPC_STACKFRAME; + + /* Consider all nv regs for handling exceptions */ + if (ctx->exception_boundary || ctx->exception_cb) { + min_valid_nvreg = _R14; + frame_nvr_size = BPF_PPC_EXC_STACKFRAME; + } + + if (reg >= min_valid_nvreg && reg < 32) return (bpf_has_stack_frame(ctx) ? - (BPF_PPC_STACKFRAME + ctx->stack_size) : 0) - - (8 * (32 - reg)); + frame_nvr_size + ctx->stack_size : 0) + - (8 * (32 - reg));
pr_err("BPF JIT is asking about unknown registers"); BUG(); @@ -179,7 +209,20 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)); }
- EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); + int stack_expand = ctx->exception_boundary || ctx->exception_cb ? + BPF_PPC_EXC_STACKFRAME : BPF_PPC_STACKFRAME; + EMIT(PPC_RAW_STDU(_R1, _R1, -(stack_expand + ctx->stack_size))); + } + + /* + * Program acting as exception boundary pushes R14..R25 in addition to + * BPF callee-saved non volatile registers. Exception callback uses + * the boundary program's stack frame, recover additionally saved + * registers in epilogue of exception callback. + */ + if (ctx->exception_boundary) { + for (i = _R14; i <= _R25; i++) + EMIT(PPC_RAW_STD(i, _R1, bpf_jit_stack_offsetof(ctx, i))); }
if (!ctx->exception_cb) { @@ -227,6 +270,14 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
if (ctx->exception_cb) { + /* + * Recover additionally saved non volatile registers from stack + * frame of exception boundary program. + */ + for (i = _R14; i <= _R25; i++) { + EMIT(PPC_RAW_LD(i, _R1, bpf_jit_stack_offsetof(ctx, i))); + } + /* * LR value from boundary-frame is received as second parameter * in exception callback. @@ -236,7 +287,10 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { - EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); + int stack_shrink = ctx->exception_cb || ctx->exception_boundary ? + BPF_PPC_EXC_STACKFRAME : BPF_PPC_STACKFRAME; + EMIT(PPC_RAW_ADDI(_R1, _R1, stack_shrink + ctx->stack_size)); + if (ctx->seen & SEEN_FUNC || ctx->exception_cb) { EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF)); EMIT(PPC_RAW_MTLR(_R0));
linux-kselftest-mirror@lists.linaro.org