The previous commit improves the precision in scalar(32)_min_max_add, and scalar(32)_min_max_sub. The improvement in precision occurs in cases when all outcomes overflow or underflow, respectively.
This commit adds selftests that exercise those cases.
This commit also adds selftests for cases where the output register state bounds for u(32)_min/u(32)_max are conservatively set to unbounded (when there is partial overflow or underflow).
Signed-off-by: Harishankar Vishwanathan harishankar.vishwanathan@gmail.com Co-developed-by: Matan Shachnai m.shachnai@rutgers.edu Signed-off-by: Matan Shachnai m.shachnai@rutgers.edu Suggested-by: Eduard Zingerman eddyz87@gmail.com --- .../selftests/bpf/progs/verifier_bounds.c | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 30e16153fdf1..31986f6c609e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1371,4 +1371,165 @@ __naked void mult_sign_ovf(void) __imm(bpf_skb_store_bytes) : __clobber_all); } + +SEC("socket") +__description("64-bit addition, all outcomes overflow") +__success __log_level(2) +__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") +__retval(0) +__naked void add64_full_overflow(void) +{ + asm volatile ( + "r4 = 0;" + "r4 = -r4;" + "r3 = 0xa000000000000000 ll;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + +SEC("socket") +__description("64-bit addition, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()") +__retval(0) +__naked void add64_partial_overflow(void) +{ + asm volatile ( + "r4 = 0;" + "r4 = -r4;" + "r3 = 2;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition overflow, all outcomes overflow") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_full_overflow(void) +{ + asm volatile ( + "w4 = 0;" + "w4 = -w4;" + "w3 = 0xa0000000;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_partial_overflow(void) +{ + asm volatile ( + "w4 = 0;" + "w4 = -w4;" + "w3 = 2;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtraction, all outcomes underflow") +__success __log_level(2) +__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)") +__retval(0) +__naked void sub64_full_overflow(void) +{ + asm volatile ( + "r1 = 0;" + "r1 = -r1;" + "r2 = 0x8000000000000000 ll;" + "r1 |= r2;" + "r3 = 0;" + "r3 -= r1;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtration, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()") +__retval(0) +__naked void sub64_partial_overflow(void) +{ + asm volatile ( + "r3 = 0;" + "r3 = -r3;" + "r2 = 1;" + "r3 -= r2;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtraction overflow, all outcomes underflow") +__success __log_level(2) +__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_full_overflow(void) +{ + asm volatile ( + "w1 = 0;" + "w1 = -w1;" + "w2 = 0x80000000;" + "w1 |= w2;" + "w3 = 0;" + "w3 -= w1;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtration, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_partial_overflow(void) +{ + asm volatile ( + "w3 = 0;" + "w3 = -w3;" + "w2 = 1;" + "w3 -= w2;" + "r0 = 0;" + "exit" + : + : + : __clobber_all); +} + char _license[] SEC("license") = "GPL";
On Mon, 2025-06-23 at 00:03 -0400, Harishankar Vishwanathan wrote:
The previous commit improves the precision in scalar(32)_min_max_add, and scalar(32)_min_max_sub. The improvement in precision occurs in cases when all outcomes overflow or underflow, respectively.
This commit adds selftests that exercise those cases.
This commit also adds selftests for cases where the output register state bounds for u(32)_min/u(32)_max are conservatively set to unbounded (when there is partial overflow or underflow).
Signed-off-by: Harishankar Vishwanathan harishankar.vishwanathan@gmail.com Co-developed-by: Matan Shachnai m.shachnai@rutgers.edu Signed-off-by: Matan Shachnai m.shachnai@rutgers.edu Suggested-by: Eduard Zingerman eddyz87@gmail.com
Thank you for adding these tests. Even with "human readable" numbers took me 15-20 minutes to verify the numbers :)
Acked-by: Eduard Zingerman eddyz87@gmail.com
.../selftests/bpf/progs/verifier_bounds.c | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 30e16153fdf1..31986f6c609e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1371,4 +1371,165 @@ __naked void mult_sign_ovf(void) __imm(bpf_skb_store_bytes) : __clobber_all); }
+SEC("socket") +__description("64-bit addition, all outcomes overflow") +__success __log_level(2) +__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") +__retval(0) +__naked void add64_full_overflow(void) +{
- asm volatile (
- "r4 = 0;"
- "r4 = -r4;"
Nit: there is a change in the workings that would make range propagation in negation instruction, a better way to get unbound scalar here is e.g. call to bpf_get_prandom_u32() or read from a constant global map. Depending on order in which patches would be accepted this rework would be either on you or on the other patch-set author.
- "r3 = 0xa000000000000000 ll;"
- "r3 |= r4;"
- "r3 += r3;"
- "r0 = 0;"
- "exit"
- :
- :
- : __clobber_all);
+}
[...]
On Tue, Jun 24, 2025 at 3:45 PM Eduard Zingerman eddyz87@gmail.com wrote:
On Mon, 2025-06-23 at 00:03 -0400, Harishankar Vishwanathan wrote:
The previous commit improves the precision in scalar(32)_min_max_add, and scalar(32)_min_max_sub. The improvement in precision occurs in cases when all outcomes overflow or underflow, respectively.
This commit adds selftests that exercise those cases.
This commit also adds selftests for cases where the output register state bounds for u(32)_min/u(32)_max are conservatively set to unbounded (when there is partial overflow or underflow).
Signed-off-by: Harishankar Vishwanathan harishankar.vishwanathan@gmail.com Co-developed-by: Matan Shachnai m.shachnai@rutgers.edu Signed-off-by: Matan Shachnai m.shachnai@rutgers.edu Suggested-by: Eduard Zingerman eddyz87@gmail.com
Thank you for adding these tests. Even with "human readable" numbers took me 15-20 minutes to verify the numbers :)
Acked-by: Eduard Zingerman eddyz87@gmail.com
.../selftests/bpf/progs/verifier_bounds.c | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 30e16153fdf1..31986f6c609e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1371,4 +1371,165 @@ __naked void mult_sign_ovf(void) __imm(bpf_skb_store_bytes) : __clobber_all); }
+SEC("socket") +__description("64-bit addition, all outcomes overflow") +__success __log_level(2) +__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") +__retval(0) +__naked void add64_full_overflow(void) +{
asm volatile (
"r4 = 0;"
"r4 = -r4;"
Nit: there is a change in the workings that would make range propagation in negation instruction, a better way to get unbound scalar here is e.g. call to bpf_get_prandom_u32() or read from a constant global map. Depending on order in which patches would be accepted this rework would be either on you or on the other patch-set author.
Fixed them up like below while applying:
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 31986f6c609e..e52a24e15b34 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1380,15 +1380,15 @@ __retval(0) __naked void add64_full_overflow(void) { asm volatile ( - "r4 = 0;" - "r4 = -r4;" + "call %[bpf_get_prandom_u32];" + "r4 = r0;" "r3 = 0xa000000000000000 ll;" "r3 |= r4;" "r3 += r3;" "r0 = 0;" "exit" : - : + : __imm(bpf_get_prandom_u32) : __clobber_all); }
@@ -1400,15 +1400,15 @@ __retval(0) __naked void add64_partial_overflow(void) { asm volatile ( - "r4 = 0;" - "r4 = -r4;" + "call %[bpf_get_prandom_u32];" + "r4 = r0;" "r3 = 2;" "r3 |= r4;" "r3 += r3;" "r0 = 0;" "exit" : - : + : __imm(bpf_get_prandom_u32) : __clobber_all); }
etc
linux-kselftest-mirror@lists.linaro.org