This small patchset is about avoid verifier bug warning when conditional jumps on same register when the register holds a scalar with range.
v3: - Enhance is_scalar_branch_taken() to handle scalar case. (Eduard) - Update the selftest to cover all conditional jump opcodes. (Eduard)
v2: https://lore.kernel.org/bpf/20251025053017.2308823-1-kafai.wan@linux.dev/ - Enhance is_branch_taken() and is_scalar_branch_taken() to handle branch direction computation for same register. (Eduard and Alexei) - Update the selftest.
v1: https://lore.kernel.org/bpf/20251022164457.1203756-1-kafai.wan@linux.dev/ --- KaFai Wan (2): bpf: Skip bounds adjustment for conditional jumps on same scalar register selftests/bpf: Add test for conditional jumps on same scalar register
kernel/bpf/verifier.c | 33 ++++ .../selftests/bpf/progs/verifier_bounds.c | 154 ++++++++++++++++++ 2 files changed, 187 insertions(+)
When conditional jumps are performed on the same scalar register (e.g., r0 <= r0, r0 > r0, r0 < r0), the BPF verifier incorrectly attempts to adjust the register's min/max bounds. This leads to invalid range bounds and triggers a BUG warning.
The problematic BPF program: 0: call bpf_get_prandom_u32 1: w8 = 0x80000000 2: r0 &= r8 3: if r0 > r0 goto <exit>
The instruction 3 triggers kernel warning: 3: if r0 > r0 goto <exit> true_reg1: range bounds violation u64=[0x1, 0x0] s64=[0x1, 0x0] u32=[0x1, 0x0] s32=[0x1, 0x0] var_off=(0x0, 0x0) true_reg2: const tnum out of sync with range bounds u64=[0x0, 0xffffffffffffffff] s64=[0x8000000000000000, 0x7fffffffffffffff] var_off=(0x0, 0x0)
Comparing a register with itself should not change its bounds and for most comparison operations, comparing a register with itself has a known result (e.g., r0 == r0 is always true, r0 < r0 is always false).
Fix this by: 1. Enhance is_scalar_branch_taken() to properly handle branch direction computation for same register comparisons across all BPF jump operations 2. Adds early return in reg_set_min_max() to avoid bounds adjustment for unknown branch directions (e.g., BPF_JSET) on the same register
The fix ensures that unnecessary bounds adjustments are skipped, preventing the verifier bug while maintaining correct branch direction analysis.
Reported-by: Kaiyan Mei M202472210@hust.edu.cn Reported-by: Yinhao Hu dddddd@hust.edu.cn Closes: https://lore.kernel.org/all/1881f0f5.300df.199f2576a01.Coremail.kaiyanm@hust... Signed-off-by: KaFai Wan kafai.wan@linux.dev --- kernel/bpf/verifier.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 542e23fb19c7..a571263f4ebe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -15995,6 +15995,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
switch (opcode) { case BPF_JEQ: + if (reg1 == reg2) + return 1; /* constants, umin/umax and smin/smax checks would be * redundant in this case because they all should match */ @@ -16021,6 +16023,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta } break; case BPF_JNE: + if (reg1 == reg2) + return 0; /* constants, umin/umax and smin/smax checks would be * redundant in this case because they all should match */ @@ -16047,6 +16051,12 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta } break; case BPF_JSET: + if (reg1 == reg2) { + if (tnum_is_const(t1)) + return t1.value != 0; + else + return (smin1 <= 0 && smax1 >= 0) ? -1 : 1; + } if (!is_reg_const(reg2, is_jmp32)) { swap(reg1, reg2); swap(t1, t2); @@ -16059,48 +16069,64 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta return 0; break; case BPF_JGT: + if (reg1 == reg2) + return 0; if (umin1 > umax2) return 1; else if (umax1 <= umin2) return 0; break; case BPF_JSGT: + if (reg1 == reg2) + return 0; if (smin1 > smax2) return 1; else if (smax1 <= smin2) return 0; break; case BPF_JLT: + if (reg1 == reg2) + return 0; if (umax1 < umin2) return 1; else if (umin1 >= umax2) return 0; break; case BPF_JSLT: + if (reg1 == reg2) + return 0; if (smax1 < smin2) return 1; else if (smin1 >= smax2) return 0; break; case BPF_JGE: + if (reg1 == reg2) + return 1; if (umin1 >= umax2) return 1; else if (umax1 < umin2) return 0; break; case BPF_JSGE: + if (reg1 == reg2) + return 1; if (smin1 >= smax2) return 1; else if (smax1 < smin2) return 0; break; case BPF_JLE: + if (reg1 == reg2) + return 1; if (umax1 <= umin2) return 1; else if (umin1 > umax2) return 0; break; case BPF_JSLE: + if (reg1 == reg2) + return 1; if (smax1 <= smin2) return 1; else if (smin1 > smax2) @@ -16439,6 +16465,13 @@ static int reg_set_min_max(struct bpf_verifier_env *env, if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) return 0;
+ /* We compute branch direction for same SCALAR_VALUE registers in + * is_scalar_branch_taken(). For unknown branch directions (e.g., BPF_JSET) + * on the same registers, we don't need to adjust the min/max values. + */ + if (false_reg1 == false_reg2) + return 0; + /* fallthrough (FALSE) branch */ regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32); reg_bounds_sync(false_reg1);
On Fri, Oct 31, 2025 at 8:44 AM KaFai Wan kafai.wan@linux.dev wrote:
When conditional jumps are performed on the same scalar register (e.g., r0 <= r0, r0 > r0, r0 < r0), the BPF verifier incorrectly attempts to adjust the register's min/max bounds. This leads to invalid range bounds and triggers a BUG warning.
The problematic BPF program: 0: call bpf_get_prandom_u32 1: w8 = 0x80000000 2: r0 &= r8 3: if r0 > r0 goto <exit>
The instruction 3 triggers kernel warning: 3: if r0 > r0 goto <exit> true_reg1: range bounds violation u64=[0x1, 0x0] s64=[0x1, 0x0] u32=[0x1, 0x0] s32=[0x1, 0x0] var_off=(0x0, 0x0) true_reg2: const tnum out of sync with range bounds u64=[0x0, 0xffffffffffffffff] s64=[0x8000000000000000, 0x7fffffffffffffff] var_off=(0x0, 0x0)
Comparing a register with itself should not change its bounds and for most comparison operations, comparing a register with itself has a known result (e.g., r0 == r0 is always true, r0 < r0 is always false).
Fix this by:
- Enhance is_scalar_branch_taken() to properly handle branch direction computation for same register comparisons across all BPF jump operations
- Adds early return in reg_set_min_max() to avoid bounds adjustment for unknown branch directions (e.g., BPF_JSET) on the same register
The fix ensures that unnecessary bounds adjustments are skipped, preventing the verifier bug while maintaining correct branch direction analysis.
Reported-by: Kaiyan Mei M202472210@hust.edu.cn Reported-by: Yinhao Hu dddddd@hust.edu.cn Closes: https://lore.kernel.org/all/1881f0f5.300df.199f2576a01.Coremail.kaiyanm@hust... Signed-off-by: KaFai Wan kafai.wan@linux.dev
kernel/bpf/verifier.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 542e23fb19c7..a571263f4ebe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -15995,6 +15995,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
switch (opcode) { case BPF_JEQ:
if (reg1 == reg2)return 1; /* constants, umin/umax and smin/smax checks would be * redundant in this case because they all should match */@@ -16021,6 +16023,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta } break; case BPF_JNE:
if (reg1 == reg2)return 0; /* constants, umin/umax and smin/smax checks would be * redundant in this case because they all should match */@@ -16047,6 +16051,12 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta } break; case BPF_JSET:
if (reg1 == reg2) {if (tnum_is_const(t1))return t1.value != 0;elsereturn (smin1 <= 0 && smax1 >= 0) ? -1 : 1;} if (!is_reg_const(reg2, is_jmp32)) { swap(reg1, reg2); swap(t1, t2);@@ -16059,48 +16069,64 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta return 0; break; case BPF_JGT:
if (reg1 == reg2)return 0; if (umin1 > umax2) return 1; else if (umax1 <= umin2) return 0; break; case BPF_JSGT:if (reg1 == reg2)return 0;
This is uglier than the previous version. reg1 == reg2 is a syzbot territory. We shouldn't uglify the code everywhere because of it.
pw-bot: cr
Add test cases to verify the correctness of the BPF verifier's branch analysis when conditional jumps are performed on the same scalar register. And make sure that JGT does not trigger verifier BUG.
Signed-off-by: KaFai Wan kafai.wan@linux.dev --- .../selftests/bpf/progs/verifier_bounds.c | 154 ++++++++++++++++++ 1 file changed, 154 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 0a72e0228ea9..e975dc285db6 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1709,4 +1709,158 @@ __naked void jeq_disagreeing_tnums(void *ctx) : __clobber_all); }
+SEC("socket") +__description("conditional jump on same register, branch taken") +__not_msg("20: (b7) r0 = 1 {{.*}} R0=1") +__success __log_level(2) +__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__naked void condition_jump_on_same_register(void *ctx) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + w8 = 0x80000000; \ + r0 &= r8; \ + if r0 == r0 goto +1; \ + goto l1_%=; \ + if r0 >= r0 goto +1; \ + goto l1_%=; \ + if r0 s>= r0 goto +1; \ + goto l1_%=; \ + if r0 <= r0 goto +1; \ + goto l1_%=; \ + if r0 s<= r0 goto +1; \ + goto l1_%=; \ + if r0 != r0 goto l1_%=; \ + if r0 > r0 goto l1_%=; \ + if r0 s> r0 goto l1_%=; \ + if r0 < r0 goto l1_%=; \ + if r0 s< r0 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("jset on same register, constant value branch taken") +__not_msg("7: (b7) r0 = 1 {{.*}} R0=1") +__success __log_level(2) +__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_on_same_register_1(void *ctx) +{ + asm volatile(" \ + r0 = 0; \ + if r0 & r0 goto l1_%=; \ + r0 = 1; \ + if r0 & r0 goto +1; \ + goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("jset on same register, scalar value branch taken") +__not_msg("12: (b7) r0 = 1 {{.*}} R0=1") +__success __log_level(2) +__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_on_same_register_2(void *ctx) +{ + asm volatile(" \ + /* range [1;2] */ \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x1; \ + r0 += 1; \ + if r0 & r0 goto +1; \ + goto l1_%=; \ + /* range [-2;-1] */ \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x1; \ + r0 -= 2; \ + if r0 & r0 goto +1; \ + goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("jset on same register, scalar value unknown branch 1") +__msg("3: (b7) r0 = 0 {{.*}} R0=0") +__msg("5: (b7) r0 = 1 {{.*}} R0=1") +__success __log_level(2) +__flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_on_same_register_3(void *ctx) +{ + asm volatile(" \ + /* range [0;1] */ \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x1; \ + if r0 & r0 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("jset on same register, scalar value unknown branch 2") +__msg("4: (b7) r0 = 0 {{.*}} R0=0") +__msg("6: (b7) r0 = 1 {{.*}} R0=1") +__success __log_level(2) +__flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_on_same_register_4(void *ctx) +{ + asm volatile(" \ + /* range [-1;0] */ \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x1; \ + r0 -= 1; \ + if r0 & r0 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("jset on same register, scalar value unknown branch 3") +__msg("4: (b7) r0 = 0 {{.*}} R0=0") +__msg("6: (b7) r0 = 1 {{.*}} R0=1") +__success __log_level(2) +__flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_on_same_register_5(void *ctx) +{ + asm volatile(" \ + /* range [-1;-1] */ \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x2; \ + r0 -= 1; \ + if r0 & r0 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL";
linux-kselftest-mirror@lists.linaro.org