Hi,
Wei reported when loading his bpf prog in 5.10.200 kernel, host would panic, this didn't happen in 5.10.135 kernel. Test on latest v5.10.238 still has this panic:
[ 26.531718] BUG: kernel NULL pointer dereference, address: 0000000000000168 [ 26.538093] #PF: supervisor read access in kernel mode [ 26.542727] #PF: error_code(0x0000) - not-present page [ 26.548093] PGD 10f3e9067 P4D 10f332067 PUD 10f0c5067 PMD 0 [ 26.553211] Oops: 0000 [#1] SMP NOPTI [ 26.556531] CPU: 2 PID: 541 Comm: main Not tainted 5.10.238-00267-g01e7e36b8606 #63 [ 26.563816] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014 [ 26.572357] RIP: 0010:__mark_chain_precision+0x24b/0x4d0 [ 26.576572] Code: 51 01 be 20 00 00 00 4c 89 ef 48 63 d2 e8 bd df 31 00 89 c1 83 f8 1f 7f 29 48 63 d1 48 89 d0 48 c1 e0 04 48 29 d0 48 8d 04 c3 <83> 38 01 75 c3 0f b6 74 24 06 80 78 74 00 c6 40 74 01 44 0f 44 f6 [ 26.589100] RSP: 0018:ffa0000000ff7b60 EFLAGS: 00010216 [ 26.592612] RAX: 0000000000000168 RBX: 0000000000000000 RCX: 0000000000000003 [ 26.597416] RDX: 0000000000000003 RSI: 0000000000000020 RDI: ffa0000000ff7b78 [ 26.601362] RBP: 0000000000000003 R08: ffa0000000ff7b70 R09: 0000000000000004 [ 26.604261] R10: 0000000000000007 R11: ffa0000000425000 R12: ff11000102ee2000 [ 26.607202] R13: ffa0000000ff7b78 R14: 0000000000000000 R15: ff1100010ee37140 [ 26.610327] FS: 00000000007a0630(0000) GS:ff1100081c400000(0000) knlGS:0000000000000000 [ 26.613678] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 26.616105] CR2: 0000000000000168 CR3: 0000000115e72002 CR4: 0000000000371ee0 [ 26.619059] Call Trace: [ 26.620118] adjust_reg_min_max_vals+0x133/0x340 [ 26.622048] ? krealloc+0x63/0xe0 [ 26.623435] do_check+0x38c/0xa80 [ 26.624859] do_check_common+0x15b/0x280 [ 26.626496] bpf_check+0xbe1/0xd30 [ 26.627939] ? srso_alias_return_thunk+0x5/0x7f [ 26.629796] ? trace_hardirqs_on+0x1a/0xd0 [ 26.631503] ? srso_alias_return_thunk+0x5/0x7f [ 26.633402] bpf_prog_load+0x422/0x8a0 [ 26.634987] ? srso_alias_return_thunk+0x5/0x7f [ 26.636864] ? __handle_mm_fault+0x3cb/0x6d0 [ 26.638658] ? srso_alias_return_thunk+0x5/0x7f [ 26.640543] ? lock_release+0xe3/0x110 [ 26.642114] __do_sys_bpf+0x485/0xdf0 [ 26.643624] do_syscall_64+0x33/0x40 [ 26.645110] entry_SYSCALL_64_after_hwframe+0x67/0xd1 [ 26.647190] RIP: 0033:0x409a6e [ 26.648470] Code: 24 28 44 8b 44 24 2c e9 70 ff ff ff cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc 49 89 f2 48 89 fa 48 89 ce 48 89 df 0f 05 <48> 3d 01 f0 ff ff 76 15 48 f7 d8 48 89 c1 48 c7 c0 ff ff ff ff 48 [ 26.656154] RSP: 002b:000000c00199edc0 EFLAGS: 00000212 ORIG_RAX: 0000000000000141 [ 26.659451] RAX: ffffffffffffffda RBX: 0000000000000005 RCX: 0000000000409a6e [ 26.662375] RDX: 0000000000000098 RSI: 000000c00199f290 RDI: 0000000000000005 [ 26.665267] RBP: 000000c00199ee00 R08: 0000000000000000 R09: 0000000000000000 [ 26.668204] R10: 0000000000000000 R11: 0000000000000212 R12: 0000000000000000 [ 26.671125] R13: 0000000000000080 R14: 000000c000002380 R15: 8080808080808080 [ 26.674085] Modules linked in: [ 26.675363] CR2: 0000000000000168 [ 26.676772] ---[ end trace 3fc192ee4dabbf12 ]--- [ 26.678667] RIP: 0010:__mark_chain_precision+0x24b/0x4d0 [ 26.680926] Code: 51 01 be 20 00 00 00 4c 89 ef 48 63 d2 e8 bd df 31 00 89 c1 83 f8 1f 7f 29 48 63 d1 48 89 d0 48 c1 e0 04 48 29 d0 48 8d 04 c3 <83> 38 01 75 c3 0f b6 74 24 06 80 78 74 00 c6 40 74 01 44 0f 44 f6 [ 26.688665] RSP: 0018:ffa0000000ff7b60 EFLAGS: 00010216 [ 26.690828] RAX: 0000000000000168 RBX: 0000000000000000 RCX: 0000000000000003 [ 26.693777] RDX: 0000000000000003 RSI: 0000000000000020 RDI: ffa0000000ff7b78 [ 26.696680] RBP: 0000000000000003 R08: ffa0000000ff7b70 R09: 0000000000000004 [ 26.699651] R10: 0000000000000007 R11: ffa0000000425000 R12: ff11000102ee2000 [ 26.702561] R13: ffa0000000ff7b78 R14: 0000000000000000 R15: ff1100010ee37140 [ 26.705522] FS: 00000000007a0630(0000) GS:ff1100081c400000(0000) knlGS:0000000000000000 [ 26.708806] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 26.711179] CR2: 0000000000000168 CR3: 0000000115e72002 CR4: 0000000000371ee0 [ 26.714143] Kernel panic - not syncing: Fatal exception [ 26.716893] Kernel Offset: disabled [ 26.718911] Rebooting in 5 seconds..
I did a bisect in linux-5.10.y branch and found the fbc is commit 2474ec58b96d("bpf: allow precision tracking for programs with subprogs").
This series revert the above commit and related commits. After the revert, kernel does not panic anymore.
For detailed log and a reproducer, please reference this link: https://lore.kernel.org/stable/20250605070921.GA3795@bytedance
Aaron Lu (4): Revert "selftests/bpf: make test_align selftest more robust" Revert "bpf: aggressively forget precise markings during state checkpointing" Revert "bpf: stop setting precise in current state" Revert "bpf: allow precision tracking for programs with subprogs"
kernel/bpf/verifier.c | 175 ++---------------- .../testing/selftests/bpf/prog_tests/align.c | 36 ++-- 2 files changed, 26 insertions(+), 185 deletions(-)
This reverts commit 4af2d9ddb7e78f97c23f709827e5075c6d866e34.
The backport of bpf precision tracking related changes has caused bpf verifier to panic while loading some certain bpf prog so revert them.
Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/ Reported-by: Wei Wei weiwei.danny@bytedance.com Signed-off-by: Aaron Lu ziqianlu@bytedance.com --- .../testing/selftests/bpf/prog_tests/align.c | 36 +++++++------------ 1 file changed, 13 insertions(+), 23 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 7996ec07e0bdb..5861446d07770 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -2,7 +2,7 @@ #include <test_progs.h>
#define MAX_INSNS 512 -#define MAX_MATCHES 24 +#define MAX_MATCHES 16
struct bpf_reg_match { unsigned int line; @@ -267,7 +267,6 @@ static struct bpf_align_test tests[] = { */ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), @@ -281,7 +280,6 @@ static struct bpf_align_test tests[] = { BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), @@ -313,52 +311,44 @@ static struct bpf_align_test tests[] = { {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, - * resulting in auxiliary alignment of 4. To avoid BPF - * verifier's precision backtracking logging - * interfering we also have a no-op R4 = R5 - * instruction to validate R5 state. We also check - * that R4 is what it should be in such case. + * resulting in auxiliary alignment of 4. */ - {19, "R4_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {19, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - {20, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off * (14) which is 16. Then the variable offset is 4-byte * aligned, so the total offset is 4-byte aligned and * meets the load's requirements. */ - {24, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {24, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {27, "R5_w=pkt(id=0,off=14,r=8"}, + {26, "R5_w=pkt(id=0,off=14,r=8"}, /* Variable offset is added to R5, resulting in a - * variable offset of (4n). See comment for insn #19 - * for R4 = R5 trick. + * variable offset of (4n). */ - {29, "R4_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {29, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant is added to R5 again, setting reg->off to 18. */ - {30, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* And once more we add a variable; resulting var_off * is still (4n), fixed offset is not changed. * Also, we create a new reg->id. */ - {32, "R4_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, - {32, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (18) * which is 20. Then the variable offset is (4n), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {35, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, - {35, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, }, }, {
This reverts commit 1952a4d5e4cf610336b9c9ab52b1fc4e42721cf3.
The backport of bpf precision tracking related changes has caused bpf verifier to panic while loading some certain bpf prog so revert them.
Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/ Reported-by: Wei Wei weiwei.danny@bytedance.com Signed-off-by: Aaron Lu ziqianlu@bytedance.com --- kernel/bpf/verifier.c | 37 ------------------------------------- 1 file changed, 37 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e6d50e371a2b8..b3cbfda41d9cf 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2053,31 +2053,6 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, } }
-static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) -{ - struct bpf_func_state *func; - struct bpf_reg_state *reg; - int i, j; - - for (i = 0; i <= st->curframe; i++) { - func = st->frame[i]; - for (j = 0; j < BPF_REG_FP; j++) { - reg = &func->regs[j]; - if (reg->type != SCALAR_VALUE) - continue; - reg->precise = false; - } - for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { - if (!is_spilled_reg(&func->stack[j])) - continue; - reg = &func->stack[j].spilled_ptr; - if (reg->type != SCALAR_VALUE) - continue; - reg->precise = false; - } - } -} - /* * __mark_chain_precision() backtracks BPF program instruction sequence and * chain of verifier states making sure that register *regno* (if regno >= 0) @@ -2156,14 +2131,6 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_ * be imprecise. If any child state does require this register to be precise, * we'll mark it precise later retroactively during precise markings * propagation from child state to parent states. - * - * Skipping precise marking setting in current state is a mild version of - * relying on the above observation. But we can utilize this property even - * more aggressively by proactively forgetting any precise marking in the - * current state (which we inherited from the parent state), right before we - * checkpoint it and branch off into new child state. This is done by - * mark_all_scalars_imprecise() to hopefully get more permissive and generic - * finalized states which help in short circuiting more future states. */ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) @@ -9928,10 +9895,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) env->prev_jmps_processed = env->jmps_processed; env->prev_insn_processed = env->insn_processed;
- /* forget precise markings we inherited, see __mark_chain_precision */ - if (env->bpf_capable) - mark_all_scalars_imprecise(env, cur); - /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur);
This reverts commit 7ca3e7459f4a5795e78b14390635879f534d9741.
The backport of bpf precision tracking related changes has caused bpf verifier to panic while loading some certain bpf prog so revert them.
Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/ Reported-by: Wei Wei weiwei.danny@bytedance.com Signed-off-by: Aaron Lu ziqianlu@bytedance.com --- kernel/bpf/verifier.c | 103 +++++------------------------------------- 1 file changed, 12 insertions(+), 91 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b3cbfda41d9cf..ecd0d04ff8e61 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2028,11 +2028,8 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
/* big hammer: mark all scalars precise in this path. * pop_stack may still get !precise scalars. - * We also skip current state and go straight to first parent state, - * because precision markings in current non-checkpointed state are - * not needed. See why in the comment in __mark_chain_precision below. */ - for (st = st->parent; st; st = st->parent) { + for (; st; st = st->parent) for (i = 0; i <= st->curframe; i++) { func = st->frame[i]; for (j = 0; j < BPF_REG_FP; j++) { @@ -2050,88 +2047,8 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } } - } }
-/* - * __mark_chain_precision() backtracks BPF program instruction sequence and - * chain of verifier states making sure that register *regno* (if regno >= 0) - * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked - * SCALARS, as well as any other registers and slots that contribute to - * a tracked state of given registers/stack slots, depending on specific BPF - * assembly instructions (see backtrack_insns() for exact instruction handling - * logic). This backtracking relies on recorded jmp_history and is able to - * traverse entire chain of parent states. This process ends only when all the - * necessary registers/slots and their transitive dependencies are marked as - * precise. - * - * One important and subtle aspect is that precise marks *do not matter* in - * the currently verified state (current state). It is important to understand - * why this is the case. - * - * First, note that current state is the state that is not yet "checkpointed", - * i.e., it is not yet put into env->explored_states, and it has no children - * states as well. It's ephemeral, and can end up either a) being discarded if - * compatible explored state is found at some point or BPF_EXIT instruction is - * reached or b) checkpointed and put into env->explored_states, branching out - * into one or more children states. - * - * In the former case, precise markings in current state are completely - * ignored by state comparison code (see regsafe() for details). Only - * checkpointed ("old") state precise markings are important, and if old - * state's register/slot is precise, regsafe() assumes current state's - * register/slot as precise and checks value ranges exactly and precisely. If - * states turn out to be compatible, current state's necessary precise - * markings and any required parent states' precise markings are enforced - * after the fact with propagate_precision() logic, after the fact. But it's - * important to realize that in this case, even after marking current state - * registers/slots as precise, we immediately discard current state. So what - * actually matters is any of the precise markings propagated into current - * state's parent states, which are always checkpointed (due to b) case above). - * As such, for scenario a) it doesn't matter if current state has precise - * markings set or not. - * - * Now, for the scenario b), checkpointing and forking into child(ren) - * state(s). Note that before current state gets to checkpointing step, any - * processed instruction always assumes precise SCALAR register/slot - * knowledge: if precise value or range is useful to prune jump branch, BPF - * verifier takes this opportunity enthusiastically. Similarly, when - * register's value is used to calculate offset or memory address, exact - * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to - * what we mentioned above about state comparison ignoring precise markings - * during state comparison, BPF verifier ignores and also assumes precise - * markings *at will* during instruction verification process. But as verifier - * assumes precision, it also propagates any precision dependencies across - * parent states, which are not yet finalized, so can be further restricted - * based on new knowledge gained from restrictions enforced by their children - * states. This is so that once those parent states are finalized, i.e., when - * they have no more active children state, state comparison logic in - * is_state_visited() would enforce strict and precise SCALAR ranges, if - * required for correctness. - * - * To build a bit more intuition, note also that once a state is checkpointed, - * the path we took to get to that state is not important. This is crucial - * property for state pruning. When state is checkpointed and finalized at - * some instruction index, it can be correctly and safely used to "short - * circuit" any *compatible* state that reaches exactly the same instruction - * index. I.e., if we jumped to that instruction from a completely different - * code path than original finalized state was derived from, it doesn't - * matter, current state can be discarded because from that instruction - * forward having a compatible state will ensure we will safely reach the - * exit. States describe preconditions for further exploration, but completely - * forget the history of how we got here. - * - * This also means that even if we needed precise SCALAR range to get to - * finalized state, but from that point forward *that same* SCALAR register is - * never used in a precise context (i.e., it's precise value is not needed for - * correctness), it's correct and safe to mark such register as "imprecise" - * (i.e., precise marking set to false). This is what we rely on when we do - * not set precise marking in current state. If no child state requires - * precision for any given SCALAR register, it's safe to dictate that it can - * be imprecise. If any child state does require this register to be precise, - * we'll mark it precise later retroactively during precise markings - * propagation from child state to parent states. - */ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) { @@ -2149,10 +2066,6 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r if (!env->bpf_capable) return 0;
- /* Do sanity checks against current state of register and/or stack - * slot, but don't set precise flag in current state, as precision - * tracking in the current state is unnecessary. - */ func = st->frame[frame]; if (regno >= 0) { reg = &func->regs[regno]; @@ -2160,7 +2073,11 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r WARN_ONCE(1, "backtracing misuse"); return -EFAULT; } - new_marks = true; + if (!reg->precise) + new_marks = true; + else + reg_mask = 0; + reg->precise = true; }
while (spi >= 0) { @@ -2173,7 +2090,11 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r stack_mask = 0; break; } - new_marks = true; + if (!reg->precise) + new_marks = true; + else + stack_mask = 0; + reg->precise = true; break; }
@@ -9358,7 +9279,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, if (env->explore_alu_limits) return false; if (rcur->type == SCALAR_VALUE) { - if (!rold->precise) + if (!rold->precise && !rcur->precise) return true; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) &&
This reverts commit 2474ec58b96d8a028b046beabdf49f5475eefcf8.
The backport of bpf precision tracking related changes has caused bpf verifier to panic while loading some certain bpf prog so revert them.
Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/ Reported-by: Wei Wei weiwei.danny@bytedance.com Signed-off-by: Aaron Lu ziqianlu@bytedance.com --- kernel/bpf/verifier.c | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ecd0d04ff8e61..7a8599355c5ed 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1359,7 +1359,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env, reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; - reg->precise = !env->bpf_capable; + reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; __mark_reg_unbounded(reg); }
@@ -2102,42 +2102,12 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r return 0; if (!reg_mask && !stack_mask) return 0; - for (;;) { DECLARE_BITMAP(mask, 64); u32 history = st->jmp_history_cnt;
if (env->log.level & BPF_LOG_LEVEL) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); - - if (last_idx < 0) { - /* we are at the entry into subprog, which - * is expected for global funcs, but only if - * requested precise registers are R1-R5 - * (which are global func's input arguments) - */ - if (st->curframe == 0 && - st->frame[0]->subprogno > 0 && - st->frame[0]->callsite == BPF_MAIN_FUNC && - stack_mask == 0 && (reg_mask & ~0x3e) == 0) { - bitmap_from_u64(mask, reg_mask); - for_each_set_bit(i, mask, 32) { - reg = &st->frame[0]->regs[i]; - if (reg->type != SCALAR_VALUE) { - reg_mask &= ~(1u << i); - continue; - } - reg->precise = true; - } - return 0; - } - - verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n", - st->frame[0]->subprogno, reg_mask, stack_mask); - WARN_ONCE(1, "verifier backtracking bug"); - return -EFAULT; - } - for (i = last_idx;;) { if (skip_first) { err = 0; @@ -11896,9 +11866,6 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog) 0 /* frameno */, subprog);
- state->first_insn_idx = env->subprog_info[subprog].start; - state->last_insn_idx = -1; - regs = state->frame[state->curframe]->regs; if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { ret = btf_prepare_func_args(env, subprog, regs);
linux-stable-mirror@lists.linaro.org