Hi,
well, given that the HID changes haven't moved a lot in the past revisions and that I am cc-ing a bunch of people, I have dropped them while we focus on the last 2 requirements in bpf-core changes.
I'll submit a HID targeted series when we get these in tree, which would make things a lore more independent.
For reference, the whole reasons for these 2 main changes are at https://lore.kernel.org/bpf/20220902132938.2409206-1-benjamin.tissoires@redh...
Compared to v10 (in addition of dropping the HID changes), I have changed the selftests so we can test both light skeletons and libbbpf calls.
Cheers, Benjamin
Benjamin Tissoires (7): selftests/bpf: regroup and declare similar kfuncs selftests in an array bpf: split btf_check_subprog_arg_match in two bpf/verifier: allow all functions to read user provided context selftests/bpf: add test for accessing ctx from syscall program type bpf/btf: bump BTF_KFUNC_SET_MAX_CNT bpf/verifier: allow kfunc to return an allocated mem selftests/bpf: Add tests for kfunc returning a memory pointer
include/linux/bpf.h | 11 +- include/linux/bpf_verifier.h | 2 + include/linux/btf.h | 10 + kernel/bpf/btf.c | 149 ++++++++++-- kernel/bpf/verifier.c | 66 +++-- net/bpf/test_run.c | 37 +++ tools/testing/selftests/bpf/Makefile | 5 +- .../selftests/bpf/prog_tests/kfunc_call.c | 227 ++++++++++++++++-- .../selftests/bpf/progs/kfunc_call_fail.c | 160 ++++++++++++ .../selftests/bpf/progs/kfunc_call_test.c | 71 ++++++ 10 files changed, 678 insertions(+), 60 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/kfunc_call_fail.c
Similar to tools/testing/selftests/bpf/prog_tests/dynptr.c: we declare an array of tests that we run one by one in a for loop.
Followup patches will add more similar-ish tests, so avoid a lot of copy paste by grouping the declaration in an array.
For light skeletons, we have to rely on the offsetof() macro so we can statically declare which program we are using. In the libbpf case, we can rely on bpf_object__find_program_by_name(). So also change the Makefile to generate both light skeletons and normal ones.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
---
changes in v11: - use of both light skeletons and normal libbpf
new in v10 --- tools/testing/selftests/bpf/Makefile | 5 +- .../selftests/bpf/prog_tests/kfunc_call.c | 81 +++++++++++++++---- 2 files changed, 68 insertions(+), 18 deletions(-)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index eecad99f1735..314216d77b8d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -351,11 +351,12 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ test_subskeleton.skel.h test_subskeleton_lib.skel.h \ test_usdt.skel.h
-LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ +LSKELS := fentry_test.c fexit_test.c fexit_sleep.c \ test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ map_ptr_kern.c core_kern.c core_kern_overflow.c # Generate both light skeleton and libbpf skeleton for these -LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c +LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \ + kfunc_call_test_subprog.c SKEL_BLACKLIST += $$(LSKELS)
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index eede7c304f86..9dfbe5355a2d 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include <network_helpers.h> +#include "kfunc_call_test.skel.h" #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" #include "kfunc_call_test_subprog.lskel.h" @@ -9,9 +10,31 @@
#include "cap_helpers.h"
-static void test_main(void) +struct kfunc_test_params { + const char *prog_name; + unsigned long lskel_prog_desc_offset; + int retval; +}; + +#define TC_TEST(name, __retval) \ + { \ + .prog_name = #name, \ + .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \ + .retval = __retval, \ + } + +static struct kfunc_test_params kfunc_tests[] = { + TC_TEST(kfunc_call_test1, 12), + TC_TEST(kfunc_call_test2, 3), + TC_TEST(kfunc_call_test_ref_btf_id, 0), +}; + +static void verify_success(struct kfunc_test_params *param) { - struct kfunc_call_test_lskel *skel; + struct kfunc_call_test_lskel *lskel = NULL; + struct bpf_prog_desc *lskel_prog; + struct kfunc_call_test *skel; + struct bpf_program *prog; int prog_fd, err; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, @@ -19,26 +42,53 @@ static void test_main(void) .repeat = 1, );
- skel = kfunc_call_test_lskel__open_and_load(); + /* first test with normal libbpf */ + skel = kfunc_call_test__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return;
- prog_fd = skel->progs.kfunc_call_test1.prog_fd; - err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "bpf_prog_test_run(test1)"); - ASSERT_EQ(topts.retval, 12, "test1-retval"); + prog = bpf_object__find_program_by_name(skel->obj, param->prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup;
- prog_fd = skel->progs.kfunc_call_test2.prog_fd; + prog_fd = bpf_program__fd(prog); err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "bpf_prog_test_run(test2)"); - ASSERT_EQ(topts.retval, 3, "test2-retval"); + if (!ASSERT_OK(err, param->prog_name)) + goto cleanup; + + if (!ASSERT_EQ(topts.retval, param->retval, "retval")) + goto cleanup; + + /* second test with light skeletons */ + lskel = kfunc_call_test_lskel__open_and_load(); + if (!ASSERT_OK_PTR(lskel, "lskel")) + goto cleanup; + + lskel_prog = (struct bpf_prog_desc *)((char *)lskel + param->lskel_prog_desc_offset);
- prog_fd = skel->progs.kfunc_call_test_ref_btf_id.prog_fd; + prog_fd = lskel_prog->prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "bpf_prog_test_run(test_ref_btf_id)"); - ASSERT_EQ(topts.retval, 0, "test_ref_btf_id-retval"); + if (!ASSERT_OK(err, param->prog_name)) + goto cleanup; + + ASSERT_EQ(topts.retval, param->retval, "retval"); + +cleanup: + kfunc_call_test__destroy(skel); + if (lskel) + kfunc_call_test_lskel__destroy(lskel); +} + +static void test_main(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kfunc_tests); i++) { + if (!test__start_subtest(kfunc_tests[i].prog_name)) + continue;
- kfunc_call_test_lskel__destroy(skel); + verify_success(&kfunc_tests[i]); + } }
static void test_subprog(void) @@ -121,8 +171,7 @@ static void test_destructive(void)
void test_kfunc_call(void) { - if (test__start_subtest("main")) - test_main(); + test_main();
if (test__start_subtest("subprog")) test_subprog();
On Tue, 6 Sept 2022 at 17:13, Benjamin Tissoires benjamin.tissoires@redhat.com wrote:
Similar to tools/testing/selftests/bpf/prog_tests/dynptr.c: we declare an array of tests that we run one by one in a for loop.
Followup patches will add more similar-ish tests, so avoid a lot of copy paste by grouping the declaration in an array.
For light skeletons, we have to rely on the offsetof() macro so we can statically declare which program we are using. In the libbpf case, we can rely on bpf_object__find_program_by_name(). So also change the Makefile to generate both light skeletons and normal ones.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
Acked-by: Kumar Kartikeya Dwivedi memxor@gmail.com
changes in v11:
- use of both light skeletons and normal libbpf
new in v10
tools/testing/selftests/bpf/Makefile | 5 +- .../selftests/bpf/prog_tests/kfunc_call.c | 81 +++++++++++++++---- 2 files changed, 68 insertions(+), 18 deletions(-)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index eecad99f1735..314216d77b8d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -351,11 +351,12 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ test_subskeleton.skel.h test_subskeleton_lib.skel.h \ test_usdt.skel.h
-LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ +LSKELS := fentry_test.c fexit_test.c fexit_sleep.c \ test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ map_ptr_kern.c core_kern.c core_kern_overflow.c # Generate both light skeleton and libbpf skeleton for these -LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c +LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
kfunc_call_test_subprog.c
SKEL_BLACKLIST += $$(LSKELS)
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index eede7c304f86..9dfbe5355a2d 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include <network_helpers.h> +#include "kfunc_call_test.skel.h" #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" #include "kfunc_call_test_subprog.lskel.h" @@ -9,9 +10,31 @@
#include "cap_helpers.h"
-static void test_main(void) +struct kfunc_test_params {
const char *prog_name;
unsigned long lskel_prog_desc_offset;
int retval;
+};
+#define TC_TEST(name, __retval) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \
.retval = __retval, \
}
+static struct kfunc_test_params kfunc_tests[] = {
TC_TEST(kfunc_call_test1, 12),
TC_TEST(kfunc_call_test2, 3),
TC_TEST(kfunc_call_test_ref_btf_id, 0),
+};
+static void verify_success(struct kfunc_test_params *param) {
struct kfunc_call_test_lskel *skel;
struct kfunc_call_test_lskel *lskel = NULL;
struct bpf_prog_desc *lskel_prog;
struct kfunc_call_test *skel;
struct bpf_program *prog; int prog_fd, err; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4,
@@ -19,26 +42,53 @@ static void test_main(void) .repeat = 1, );
skel = kfunc_call_test_lskel__open_and_load();
/* first test with normal libbpf */
skel = kfunc_call_test__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return;
prog_fd = skel->progs.kfunc_call_test1.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test1)");
ASSERT_EQ(topts.retval, 12, "test1-retval");
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
prog_fd = skel->progs.kfunc_call_test2.prog_fd;
prog_fd = bpf_program__fd(prog); err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test2)");
ASSERT_EQ(topts.retval, 3, "test2-retval");
if (!ASSERT_OK(err, param->prog_name))
goto cleanup;
if (!ASSERT_EQ(topts.retval, param->retval, "retval"))
goto cleanup;
/* second test with light skeletons */
lskel = kfunc_call_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(lskel, "lskel"))
goto cleanup;
lskel_prog = (struct bpf_prog_desc *)((char *)lskel + param->lskel_prog_desc_offset);
prog_fd = skel->progs.kfunc_call_test_ref_btf_id.prog_fd;
prog_fd = lskel_prog->prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test_ref_btf_id)");
ASSERT_EQ(topts.retval, 0, "test_ref_btf_id-retval");
if (!ASSERT_OK(err, param->prog_name))
goto cleanup;
ASSERT_EQ(topts.retval, param->retval, "retval");
+cleanup:
kfunc_call_test__destroy(skel);
if (lskel)
kfunc_call_test_lskel__destroy(lskel);
+}
+static void test_main(void) +{
int i;
for (i = 0; i < ARRAY_SIZE(kfunc_tests); i++) {
if (!test__start_subtest(kfunc_tests[i].prog_name))
continue;
kfunc_call_test_lskel__destroy(skel);
verify_success(&kfunc_tests[i]);
}
}
static void test_subprog(void) @@ -121,8 +171,7 @@ static void test_destructive(void)
void test_kfunc_call(void) {
if (test__start_subtest("main"))
test_main();
test_main(); if (test__start_subtest("subprog")) test_subprog();
-- 2.36.1
btf_check_subprog_arg_match() was used twice in verifier.c: - when checking for the type mismatches between a (sub)prog declaration and BTF - when checking the call of a subprog to see if the provided arguments are correct and valid
This is problematic when we check if the first argument of a program (pointer to ctx) is correctly accessed: To be able to ensure we access a valid memory in the ctx, the verifier assumes the pointer to context is not null. This has the side effect of marking the program accessing the entire context, even if the context is never dereferenced.
For example, by checking the context access with the current code, the following eBPF program would fail with -EINVAL if the ctx is set to null from the userspace:
``` SEC("syscall") int prog(struct my_ctx *args) { return 0; } ```
In that particular case, we do not want to actually check that the memory is correct while checking for the BTF validity, but we just want to ensure that the (sub)prog definition matches the BTF we have.
So split btf_check_subprog_arg_match() in two so we can actually check for the memory used when in a call, and ignore that part when not.
Note that a further patch is in preparation to disentangled btf_check_func_arg_match() from these two purposes, and so right now we just add a new hack around that by adding a boolean to this function.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
---
no changes in v11
new in v10 --- include/linux/bpf.h | 2 ++ kernel/bpf/btf.c | 54 +++++++++++++++++++++++++++++++++++++++---- kernel/bpf/verifier.c | 2 +- 3 files changed, 52 insertions(+), 6 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9c1674973e03..c9c72a089579 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1943,6 +1943,8 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, struct bpf_reg_state; int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *regs); +int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *regs); int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 903719b89238..eca9ea78ee5f 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6170,7 +6170,8 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, bool ptr_to_mem_ok, - u32 kfunc_flags) + u32 kfunc_flags, + bool processing_call) { enum bpf_prog_type prog_type = resolve_prog_type(env->prog); bool rel = false, kptr_get = false, trusted_arg = false; @@ -6356,7 +6357,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, reg_ref_tname); return -EINVAL; } - } else if (ptr_to_mem_ok) { + } else if (ptr_to_mem_ok && processing_call) { const struct btf_type *resolve_ret; u32 type_size;
@@ -6431,7 +6432,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return rel ? ref_regno : 0; }
-/* Compare BTF of a function with given bpf_reg_state. +/* Compare BTF of a function declaration with given bpf_reg_state. * Returns: * EFAULT - there is a verifier bug. Abort verification. * EINVAL - there is a type mismatch or BTF is not available. @@ -6458,7 +6459,50 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; - err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0); + err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, false); + + /* Compiler optimizations can remove arguments from static functions + * or mismatched type can be passed into a global function. + * In such cases mark the function as unreliable from BTF point of view. + */ + if (err) + prog->aux->func_info_aux[subprog].unreliable = true; + return err; +} + +/* Compare BTF of a function call with given bpf_reg_state. + * Returns: + * EFAULT - there is a verifier bug. Abort verification. + * EINVAL - there is a type mismatch or BTF is not available. + * 0 - BTF matches with what bpf_reg_state expects. + * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. + * + * NOTE: the code is duplicated from btf_check_subprog_arg_match() + * because btf_check_func_arg_match() is still doing both. Once that + * function is split in 2, we can call from here btf_check_subprog_arg_match() + * first, and then treat the calling part in a new code path. + */ +int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *regs) +{ + struct bpf_prog *prog = env->prog; + struct btf *btf = prog->aux->btf; + bool is_global; + u32 btf_id; + int err; + + if (!prog->aux->func_info) + return -EINVAL; + + btf_id = prog->aux->func_info[subprog].type_id; + if (!btf_id) + return -EFAULT; + + if (prog->aux->func_info_aux[subprog].unreliable) + return -EINVAL; + + is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; + err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, true);
/* Compiler optimizations can remove arguments from static functions * or mismatched type can be passed into a global function. @@ -6474,7 +6518,7 @@ int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 kfunc_flags) { - return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags); + return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags, true); }
/* Convert BTF of a function into bpf_reg_state if possible diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0194a36d0b36..d27fae3ce949 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6626,7 +6626,7 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn func_info_aux = env->prog->aux->func_info_aux; if (func_info_aux) is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; - err = btf_check_subprog_arg_match(env, subprog, caller->regs); + err = btf_check_subprog_call(env, subprog, caller->regs); if (err == -EFAULT) return err; if (is_global) {
On Tue, 6 Sept 2022 at 17:13, Benjamin Tissoires benjamin.tissoires@redhat.com wrote:
btf_check_subprog_arg_match() was used twice in verifier.c:
- when checking for the type mismatches between a (sub)prog declaration and BTF
- when checking the call of a subprog to see if the provided arguments are correct and valid
This is problematic when we check if the first argument of a program (pointer to ctx) is correctly accessed: To be able to ensure we access a valid memory in the ctx, the verifier assumes the pointer to context is not null. This has the side effect of marking the program accessing the entire context, even if the context is never dereferenced.
For example, by checking the context access with the current code, the following eBPF program would fail with -EINVAL if the ctx is set to null from the userspace:
SEC("syscall") int prog(struct my_ctx *args) { return 0; }
In that particular case, we do not want to actually check that the memory is correct while checking for the BTF validity, but we just want to ensure that the (sub)prog definition matches the BTF we have.
So split btf_check_subprog_arg_match() in two so we can actually check for the memory used when in a call, and ignore that part when not.
Note that a further patch is in preparation to disentangled btf_check_func_arg_match() from these two purposes, and so right now we just add a new hack around that by adding a boolean to this function.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
Given I'll fix it properly in my kfunc rework, LGTM otherwise: Acked-by: Kumar Kartikeya Dwivedi memxor@gmail.com
no changes in v11
new in v10
include/linux/bpf.h | 2 ++ kernel/bpf/btf.c | 54 +++++++++++++++++++++++++++++++++++++++---- kernel/bpf/verifier.c | 2 +- 3 files changed, 52 insertions(+), 6 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9c1674973e03..c9c72a089579 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1943,6 +1943,8 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, struct bpf_reg_state; int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *regs); +int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 903719b89238..eca9ea78ee5f 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6170,7 +6170,8 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, bool ptr_to_mem_ok,
u32 kfunc_flags)
u32 kfunc_flags,
bool processing_call)
{ enum bpf_prog_type prog_type = resolve_prog_type(env->prog); bool rel = false, kptr_get = false, trusted_arg = false; @@ -6356,7 +6357,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, reg_ref_tname); return -EINVAL; }
} else if (ptr_to_mem_ok) {
} else if (ptr_to_mem_ok && processing_call) { const struct btf_type *resolve_ret; u32 type_size;
@@ -6431,7 +6432,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return rel ? ref_regno : 0; }
-/* Compare BTF of a function with given bpf_reg_state. +/* Compare BTF of a function declaration with given bpf_reg_state.
- Returns:
- EFAULT - there is a verifier bug. Abort verification.
- EINVAL - there is a type mismatch or BTF is not available.
@@ -6458,7 +6459,50 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0);
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, false);
/* Compiler optimizations can remove arguments from static functions
* or mismatched type can be passed into a global function.
* In such cases mark the function as unreliable from BTF point of view.
*/
if (err)
prog->aux->func_info_aux[subprog].unreliable = true;
return err;
+}
+/* Compare BTF of a function call with given bpf_reg_state.
- Returns:
- EFAULT - there is a verifier bug. Abort verification.
- EINVAL - there is a type mismatch or BTF is not available.
- 0 - BTF matches with what bpf_reg_state expects.
- Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
- NOTE: the code is duplicated from btf_check_subprog_arg_match()
- because btf_check_func_arg_match() is still doing both. Once that
- function is split in 2, we can call from here btf_check_subprog_arg_match()
- first, and then treat the calling part in a new code path.
- */
+int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs)
+{
struct bpf_prog *prog = env->prog;
struct btf *btf = prog->aux->btf;
bool is_global;
u32 btf_id;
int err;
if (!prog->aux->func_info)
return -EINVAL;
btf_id = prog->aux->func_info[subprog].type_id;
if (!btf_id)
return -EFAULT;
if (prog->aux->func_info_aux[subprog].unreliable)
return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, true); /* Compiler optimizations can remove arguments from static functions * or mismatched type can be passed into a global function.
@@ -6474,7 +6518,7 @@ int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 kfunc_flags) {
return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags);
return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags, true);
}
/* Convert BTF of a function into bpf_reg_state if possible diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0194a36d0b36..d27fae3ce949 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6626,7 +6626,7 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn func_info_aux = env->prog->aux->func_info_aux; if (func_info_aux) is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_subprog_arg_match(env, subprog, caller->regs);
err = btf_check_subprog_call(env, subprog, caller->regs); if (err == -EFAULT) return err; if (is_global) {
-- 2.36.1
When a function was trying to access data from context in a syscall eBPF program, the verifier was rejecting the call unless it was accessing the first element. This is because the syscall context is not known at compile time, and so we need to check this when actually accessing it.
Check for the valid memory access if there is no convert_ctx callback, and allow such situation to happen.
There is a slight hiccup with subprogs. btf_check_subprog_arg_match() will check that the types are matching, which is a good thing, but to have an accurate result, it hides the fact that the context register may be null. This makes env->prog->aux->max_ctx_offset being set to the size of the context, which is incompatible with a NULL context.
Solve that last problem by storing max_ctx_offset before the type check and restoring it after.
Acked-by: Kumar Kartikeya Dwivedi memxor@gmail.com Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
---
no changes in v11
changes in v10: - dropped the hunk in btf.c saving/restoring max_ctx_offset
changes in v9: - rewrote the commit title and description - made it so all functions can make use of context even if there is no convert_ctx - remove the is_kfunc field in bpf_call_arg_meta
changes in v8: - fixup comment - return -EACCESS instead of -EINVAL for consistency
changes in v7: - renamed access_t into atype - allow zero-byte read - check_mem_access() to the correct offset/size
new in v6 --- kernel/bpf/verifier.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d27fae3ce949..3f9e6fa92cde 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5233,6 +5233,25 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, env, regno, reg->off, access_size, zero_size_allowed, ACCESS_HELPER, meta); + case PTR_TO_CTX: + /* in case the function doesn't know how to access the context, + * (because we are in a program of type SYSCALL for example), we + * can not statically check its size. + * Dynamically check it now. + */ + if (!env->ops->convert_ctx_access) { + enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ; + int offset = access_size - 1; + + /* Allow zero-byte read from PTR_TO_CTX */ + if (access_size == 0) + return zero_size_allowed ? 0 : -EACCES; + + return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, + atype, -1, false); + } + + fallthrough; default: /* scalar_value or invalid ptr */ /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 &&
We need to also export the kfunc set to the syscall program type, and then add a couple of eBPF programs that are testing those calls.
The first one checks for valid access, and the second one is OK from a static analysis point of view but fails at run time because we are trying to access outside of the allocated memory.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
---
changes in v11: - use new way of declaring tests
changes in v10: - use new definitions for tests in an array - add a new kfunc syscall_test_null_fail test
no changes in v9
no changes in v8
changes in v7: - add 1 more case to ensure we can read the entire sizeof(ctx) - add a test case for when the context is NULL
new in v6 --- net/bpf/test_run.c | 1 + .../selftests/bpf/prog_tests/kfunc_call.c | 143 +++++++++++++++++- .../selftests/bpf/progs/kfunc_call_fail.c | 39 +++++ .../selftests/bpf/progs/kfunc_call_test.c | 38 +++++ 4 files changed, 214 insertions(+), 7 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/kfunc_call_fail.c
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 25d8ecf105aa..f16baf977a21 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1634,6 +1634,7 @@ static int __init bpf_prog_test_run_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, ARRAY_SIZE(bpf_prog_test_dtor_kfunc), THIS_MODULE); diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 9dfbe5355a2d..d5881c3331a8 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include <network_helpers.h> +#include "kfunc_call_fail.skel.h" #include "kfunc_call_test.skel.h" #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" @@ -10,37 +11,96 @@
#include "cap_helpers.h"
+static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +enum kfunc_test_type { + tc_test = 0, + syscall_test, + syscall_null_ctx_test, +}; + struct kfunc_test_params { const char *prog_name; unsigned long lskel_prog_desc_offset; int retval; + enum kfunc_test_type test_type; + const char *expected_err_msg; };
-#define TC_TEST(name, __retval) \ +#define __BPF_TEST_SUCCESS(name, __retval, type) \ { \ .prog_name = #name, \ .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \ .retval = __retval, \ + .test_type = type, \ + .expected_err_msg = NULL, \ + } + +#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \ + { \ + .prog_name = #name, \ + .lskel_prog_desc_offset = 0 /* unused when test is failing */, \ + .retval = __retval, \ + .test_type = type, \ + .expected_err_msg = error_msg, \ }
+#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test) +#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) +#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test) + +#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \ + __BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg) + static struct kfunc_test_params kfunc_tests[] = { + /* failure cases: + * if retval is 0 -> the program will fail to load and the error message is an error + * if retval is not 0 -> the program can be loaded but running it will gives the + * provided return value. The error message is thus the one + * from a successful load + */ + SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"), + SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"), + + /* success cases */ TC_TEST(kfunc_call_test1, 12), TC_TEST(kfunc_call_test2, 3), TC_TEST(kfunc_call_test_ref_btf_id, 0), + SYSCALL_TEST(kfunc_syscall_test, 0), + SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0), +}; + +struct syscall_test_args { + __u8 data[16]; + size_t size; };
static void verify_success(struct kfunc_test_params *param) { struct kfunc_call_test_lskel *lskel = NULL; + LIBBPF_OPTS(bpf_test_run_opts, topts); struct bpf_prog_desc *lskel_prog; struct kfunc_call_test *skel; struct bpf_program *prog; int prog_fd, err; - LIBBPF_OPTS(bpf_test_run_opts, topts, - .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), - .repeat = 1, - ); + struct syscall_test_args args = { + .size = 10, + }; + + switch (param->test_type) { + case syscall_test: + topts.ctx_in = &args; + topts.ctx_size_in = sizeof(args); + /* fallthrough */ + case syscall_null_ctx_test: + break; + case tc_test: + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); + topts.repeat = 1; + break; + }
/* first test with normal libbpf */ skel = kfunc_call_test__open_and_load(); @@ -79,6 +139,72 @@ static void verify_success(struct kfunc_test_params *param) kfunc_call_test_lskel__destroy(lskel); }
+static void verify_fail(struct kfunc_test_params *param) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct bpf_program *prog; + struct kfunc_call_fail *skel; + int prog_fd, err; + struct syscall_test_args args = { + .size = 10, + }; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + switch (param->test_type) { + case syscall_test: + topts.ctx_in = &args; + topts.ctx_size_in = sizeof(args); + /* fallthrough */ + case syscall_null_ctx_test: + break; + case tc_test: + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); + break; + topts.repeat = 1; + } + + skel = kfunc_call_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, param->prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + err = kfunc_call_fail__load(skel); + if (!param->retval) { + /* the verifier is supposed to complain and refuses to load */ + if (!ASSERT_ERR(err, "unexpected load success")) + goto out_err; + + } else { + /* the program is loaded but must dynamically fail */ + if (!ASSERT_OK(err, "unexpected load error")) + goto out_err; + + prog_fd = bpf_program__fd(prog); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_EQ(err, param->retval, param->prog_name)) + goto out_err; + } + +out_err: + if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + kfunc_call_fail__destroy(skel); +} + static void test_main(void) { int i; @@ -87,7 +213,10 @@ static void test_main(void) if (!test__start_subtest(kfunc_tests[i].prog_name)) continue;
- verify_success(&kfunc_tests[i]); + if (!kfunc_tests[i].expected_err_msg) + verify_success(&kfunc_tests[i]); + else + verify_fail(&kfunc_tests[i]); } }
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c new file mode 100644 index 000000000000..4168027f2ab1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; + +struct syscall_test_args { + __u8 data[16]; + size_t size; +}; + +SEC("?syscall") +int kfunc_syscall_test_fail(struct syscall_test_args *args) +{ + bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1); + + return 0; +} + +SEC("?syscall") +int kfunc_syscall_test_null_fail(struct syscall_test_args *args) +{ + /* Must be called with args as a NULL pointer + * we do not check for it to have the verifier consider that + * the pointer might not be null, and so we can load it. + * + * So the following can not be added: + * + * if (args) + * return -22; + */ + + bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args)); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 5aecbb9fdc68..94c05267e5e7 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -92,4 +92,42 @@ int kfunc_call_test_pass(struct __sk_buff *skb) return 0; }
+struct syscall_test_args { + __u8 data[16]; + size_t size; +}; + +SEC("syscall") +int kfunc_syscall_test(struct syscall_test_args *args) +{ + const int size = args->size; + + if (size > sizeof(args->data)) + return -7; /* -E2BIG */ + + bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data)); + bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args)); + bpf_kfunc_call_test_mem_len_pass1(&args->data, size); + + return 0; +} + +SEC("syscall") +int kfunc_syscall_test_null(struct syscall_test_args *args) +{ + /* Must be called with args as a NULL pointer + * we do not check for it to have the verifier consider that + * the pointer might not be null, and so we can load it. + * + * So the following can not be added: + * + * if (args) + * return -22; + */ + + bpf_kfunc_call_test_mem_len_pass1(args, 0); + + return 0; +} + char _license[] SEC("license") = "GPL";
On Tue, 6 Sept 2022 at 17:13, Benjamin Tissoires benjamin.tissoires@redhat.com wrote:
We need to also export the kfunc set to the syscall program type, and then add a couple of eBPF programs that are testing those calls.
The first one checks for valid access, and the second one is OK from a static analysis point of view but fails at run time because we are trying to access outside of the allocated memory.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
CI is failing for test_progs-no_alu32: https://github.com/kernel-patches/bpf/runs/8220916615?check_suite_focus=true
changes in v11:
- use new way of declaring tests
changes in v10:
- use new definitions for tests in an array
- add a new kfunc syscall_test_null_fail test
no changes in v9
no changes in v8
changes in v7:
- add 1 more case to ensure we can read the entire sizeof(ctx)
- add a test case for when the context is NULL
new in v6
net/bpf/test_run.c | 1 + .../selftests/bpf/prog_tests/kfunc_call.c | 143 +++++++++++++++++- .../selftests/bpf/progs/kfunc_call_fail.c | 39 +++++ .../selftests/bpf/progs/kfunc_call_test.c | 38 +++++ 4 files changed, 214 insertions(+), 7 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/kfunc_call_fail.c
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 25d8ecf105aa..f16baf977a21 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1634,6 +1634,7 @@ static int __init bpf_prog_test_run_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, ARRAY_SIZE(bpf_prog_test_dtor_kfunc), THIS_MODULE);
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 9dfbe5355a2d..d5881c3331a8 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include <network_helpers.h> +#include "kfunc_call_fail.skel.h" #include "kfunc_call_test.skel.h" #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" @@ -10,37 +11,96 @@
#include "cap_helpers.h"
+static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576];
+enum kfunc_test_type {
tc_test = 0,
syscall_test,
syscall_null_ctx_test,
+};
struct kfunc_test_params { const char *prog_name; unsigned long lskel_prog_desc_offset; int retval;
enum kfunc_test_type test_type;
const char *expected_err_msg;
};
-#define TC_TEST(name, __retval) \ +#define __BPF_TEST_SUCCESS(name, __retval, type) \ { \ .prog_name = #name, \ .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \ .retval = __retval, \
.test_type = type, \
.expected_err_msg = NULL, \
}
+#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = 0 /* unused when test is failing */, \
.retval = __retval, \
.test_type = type, \
.expected_err_msg = error_msg, \ }
+#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test) +#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) +#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
+#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \
__BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
static struct kfunc_test_params kfunc_tests[] = {
/* failure cases:
* if retval is 0 -> the program will fail to load and the error message is an error
* if retval is not 0 -> the program can be loaded but running it will gives the
* provided return value. The error message is thus the one
* from a successful load
*/
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"),
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"),
/* success cases */ TC_TEST(kfunc_call_test1, 12), TC_TEST(kfunc_call_test2, 3), TC_TEST(kfunc_call_test_ref_btf_id, 0),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
+};
+struct syscall_test_args {
__u8 data[16];
size_t size;
};
static void verify_success(struct kfunc_test_params *param) { struct kfunc_call_test_lskel *lskel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts); struct bpf_prog_desc *lskel_prog; struct kfunc_call_test *skel; struct bpf_program *prog; int prog_fd, err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct syscall_test_args args = {
.size = 10,
};
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
topts.repeat = 1;
break;
} /* first test with normal libbpf */ skel = kfunc_call_test__open_and_load();
@@ -79,6 +139,72 @@ static void verify_success(struct kfunc_test_params *param) kfunc_call_test_lskel__destroy(lskel); }
+static void verify_fail(struct kfunc_test_params *param) +{
LIBBPF_OPTS(bpf_object_open_opts, opts);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_program *prog;
struct kfunc_call_fail *skel;
int prog_fd, err;
struct syscall_test_args args = {
.size = 10,
};
opts.kernel_log_buf = obj_log_buf;
opts.kernel_log_size = log_buf_sz;
opts.kernel_log_level = 1;
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
break;
topts.repeat = 1;
}
skel = kfunc_call_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
bpf_program__set_autoload(prog, true);
err = kfunc_call_fail__load(skel);
if (!param->retval) {
/* the verifier is supposed to complain and refuses to load */
if (!ASSERT_ERR(err, "unexpected load success"))
goto out_err;
} else {
/* the program is loaded but must dynamically fail */
if (!ASSERT_OK(err, "unexpected load error"))
goto out_err;
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_EQ(err, param->retval, param->prog_name))
goto out_err;
}
+out_err:
if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) {
fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg);
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
}
+cleanup:
kfunc_call_fail__destroy(skel);
+}
static void test_main(void) { int i; @@ -87,7 +213,10 @@ static void test_main(void) if (!test__start_subtest(kfunc_tests[i].prog_name)) continue;
verify_success(&kfunc_tests[i]);
if (!kfunc_tests[i].expected_err_msg)
verify_success(&kfunc_tests[i]);
else
verify_fail(&kfunc_tests[i]); }
}
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c new file mode 100644 index 000000000000..4168027f2ab1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h>
+extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
+struct syscall_test_args {
__u8 data[16];
size_t size;
+};
+SEC("?syscall") +int kfunc_syscall_test_fail(struct syscall_test_args *args) +{
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1);
return 0;
+}
+SEC("?syscall") +int kfunc_syscall_test_null_fail(struct syscall_test_args *args) +{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args));
return 0;
+}
+char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 5aecbb9fdc68..94c05267e5e7 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -92,4 +92,42 @@ int kfunc_call_test_pass(struct __sk_buff *skb) return 0; }
+struct syscall_test_args {
__u8 data[16];
size_t size;
+};
+SEC("syscall") +int kfunc_syscall_test(struct syscall_test_args *args) +{
const int size = args->size;
if (size > sizeof(args->data))
return -7; /* -E2BIG */
Looks like it is due to this. Verifier is confused because: r7 = args->data; r1 = r7;
then it does r1 <<= 32; r1 >>=32; clearing upper 32 bits, so both r1 and r7 lose the id association which propagates the bounds of r1 learnt from comparison of it with sizeof(args->data);
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
Later llvm assigns r7 to r2 for this call's 2nd arg. At this point the verifier still thinks r7 is unbounded, while to make a call with mem, len pair you need non-negative min value.
Easiest way might be to just do args->size & sizeof(args->data), as the verifier log says. You might still keep the error above. Others may have better ideas/insights.
bpf_kfunc_call_test_mem_len_pass1(&args->data, size);
return 0;
+}
+SEC("syscall") +int kfunc_syscall_test_null(struct syscall_test_args *args) +{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, 0);
return 0;
+}
char _license[] SEC("license") = "GPL";
2.36.1
On Wed, Sep 7, 2022 at 10:46 AM Kumar Kartikeya Dwivedi memxor@gmail.com wrote:
On Tue, 6 Sept 2022 at 17:13, Benjamin Tissoires benjamin.tissoires@redhat.com wrote:
We need to also export the kfunc set to the syscall program type, and then add a couple of eBPF programs that are testing those calls.
The first one checks for valid access, and the second one is OK from a static analysis point of view but fails at run time because we are trying to access outside of the allocated memory.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
CI is failing for test_progs-no_alu32: https://github.com/kernel-patches/bpf/runs/8220916615?check_suite_focus=true
changes in v11:
- use new way of declaring tests
changes in v10:
- use new definitions for tests in an array
- add a new kfunc syscall_test_null_fail test
no changes in v9
no changes in v8
changes in v7:
- add 1 more case to ensure we can read the entire sizeof(ctx)
- add a test case for when the context is NULL
new in v6
net/bpf/test_run.c | 1 + .../selftests/bpf/prog_tests/kfunc_call.c | 143 +++++++++++++++++- .../selftests/bpf/progs/kfunc_call_fail.c | 39 +++++ .../selftests/bpf/progs/kfunc_call_test.c | 38 +++++ 4 files changed, 214 insertions(+), 7 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/kfunc_call_fail.c
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 25d8ecf105aa..f16baf977a21 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1634,6 +1634,7 @@ static int __init bpf_prog_test_run_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, ARRAY_SIZE(bpf_prog_test_dtor_kfunc), THIS_MODULE);
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 9dfbe5355a2d..d5881c3331a8 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include <network_helpers.h> +#include "kfunc_call_fail.skel.h" #include "kfunc_call_test.skel.h" #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" @@ -10,37 +11,96 @@
#include "cap_helpers.h"
+static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576];
+enum kfunc_test_type {
tc_test = 0,
syscall_test,
syscall_null_ctx_test,
+};
struct kfunc_test_params { const char *prog_name; unsigned long lskel_prog_desc_offset; int retval;
enum kfunc_test_type test_type;
const char *expected_err_msg;
};
-#define TC_TEST(name, __retval) \ +#define __BPF_TEST_SUCCESS(name, __retval, type) \ { \ .prog_name = #name, \ .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \ .retval = __retval, \
.test_type = type, \
.expected_err_msg = NULL, \
}
+#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = 0 /* unused when test is failing */, \
.retval = __retval, \
.test_type = type, \
.expected_err_msg = error_msg, \ }
+#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test) +#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) +#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
+#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \
__BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
static struct kfunc_test_params kfunc_tests[] = {
/* failure cases:
* if retval is 0 -> the program will fail to load and the error message is an error
* if retval is not 0 -> the program can be loaded but running it will gives the
* provided return value. The error message is thus the one
* from a successful load
*/
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"),
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"),
/* success cases */ TC_TEST(kfunc_call_test1, 12), TC_TEST(kfunc_call_test2, 3), TC_TEST(kfunc_call_test_ref_btf_id, 0),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
+};
+struct syscall_test_args {
__u8 data[16];
size_t size;
};
static void verify_success(struct kfunc_test_params *param) { struct kfunc_call_test_lskel *lskel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts); struct bpf_prog_desc *lskel_prog; struct kfunc_call_test *skel; struct bpf_program *prog; int prog_fd, err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct syscall_test_args args = {
.size = 10,
};
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
topts.repeat = 1;
break;
} /* first test with normal libbpf */ skel = kfunc_call_test__open_and_load();
@@ -79,6 +139,72 @@ static void verify_success(struct kfunc_test_params *param) kfunc_call_test_lskel__destroy(lskel); }
+static void verify_fail(struct kfunc_test_params *param) +{
LIBBPF_OPTS(bpf_object_open_opts, opts);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_program *prog;
struct kfunc_call_fail *skel;
int prog_fd, err;
struct syscall_test_args args = {
.size = 10,
};
opts.kernel_log_buf = obj_log_buf;
opts.kernel_log_size = log_buf_sz;
opts.kernel_log_level = 1;
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
break;
topts.repeat = 1;
}
skel = kfunc_call_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
bpf_program__set_autoload(prog, true);
err = kfunc_call_fail__load(skel);
if (!param->retval) {
/* the verifier is supposed to complain and refuses to load */
if (!ASSERT_ERR(err, "unexpected load success"))
goto out_err;
} else {
/* the program is loaded but must dynamically fail */
if (!ASSERT_OK(err, "unexpected load error"))
goto out_err;
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_EQ(err, param->retval, param->prog_name))
goto out_err;
}
+out_err:
if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) {
fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg);
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
}
+cleanup:
kfunc_call_fail__destroy(skel);
+}
static void test_main(void) { int i; @@ -87,7 +213,10 @@ static void test_main(void) if (!test__start_subtest(kfunc_tests[i].prog_name)) continue;
verify_success(&kfunc_tests[i]);
if (!kfunc_tests[i].expected_err_msg)
verify_success(&kfunc_tests[i]);
else
verify_fail(&kfunc_tests[i]); }
}
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c new file mode 100644 index 000000000000..4168027f2ab1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h>
+extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
+struct syscall_test_args {
__u8 data[16];
size_t size;
+};
+SEC("?syscall") +int kfunc_syscall_test_fail(struct syscall_test_args *args) +{
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1);
return 0;
+}
+SEC("?syscall") +int kfunc_syscall_test_null_fail(struct syscall_test_args *args) +{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args));
return 0;
+}
+char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 5aecbb9fdc68..94c05267e5e7 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -92,4 +92,42 @@ int kfunc_call_test_pass(struct __sk_buff *skb) return 0; }
+struct syscall_test_args {
__u8 data[16];
size_t size;
+};
+SEC("syscall") +int kfunc_syscall_test(struct syscall_test_args *args) +{
const int size = args->size;
if (size > sizeof(args->data))
return -7; /* -E2BIG */
Looks like it is due to this. Verifier is confused because: r7 = args->data; r1 = r7;
then it does r1 <<= 32; r1 >>=32; clearing upper 32 bits, so both r1 and r7 lose the id association which propagates the bounds of r1 learnt from comparison of it with sizeof(args->data);
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
Later llvm assigns r7 to r2 for this call's 2nd arg. At this point the verifier still thinks r7 is unbounded, while to make a call with mem, len pair you need non-negative min value.
Easiest way might be to just do args->size & sizeof(args->data), as the verifier log says. You might still keep the error above. Others may have better ideas/insights.
I just did s/const int size/const long size/ to fix the issues.
Also fixed commit in patch 3 that talks about max_ctx_offset and did: - BTF_KFUNC_SET_MAX_CNT = 64, + BTF_KFUNC_SET_MAX_CNT = 256,
and applied.
On Wed, Sep 7, 2022 at 8:09 PM Alexei Starovoitov alexei.starovoitov@gmail.com wrote:
On Wed, Sep 7, 2022 at 10:46 AM Kumar Kartikeya Dwivedi memxor@gmail.com wrote:
On Tue, 6 Sept 2022 at 17:13, Benjamin Tissoires benjamin.tissoires@redhat.com wrote:
We need to also export the kfunc set to the syscall program type, and then add a couple of eBPF programs that are testing those calls.
The first one checks for valid access, and the second one is OK from a static analysis point of view but fails at run time because we are trying to access outside of the allocated memory.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
CI is failing for test_progs-no_alu32: https://github.com/kernel-patches/bpf/runs/8220916615?check_suite_focus=true
changes in v11:
- use new way of declaring tests
changes in v10:
- use new definitions for tests in an array
- add a new kfunc syscall_test_null_fail test
no changes in v9
no changes in v8
changes in v7:
- add 1 more case to ensure we can read the entire sizeof(ctx)
- add a test case for when the context is NULL
new in v6
net/bpf/test_run.c | 1 + .../selftests/bpf/prog_tests/kfunc_call.c | 143 +++++++++++++++++- .../selftests/bpf/progs/kfunc_call_fail.c | 39 +++++ .../selftests/bpf/progs/kfunc_call_test.c | 38 +++++ 4 files changed, 214 insertions(+), 7 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/kfunc_call_fail.c
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 25d8ecf105aa..f16baf977a21 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1634,6 +1634,7 @@ static int __init bpf_prog_test_run_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, ARRAY_SIZE(bpf_prog_test_dtor_kfunc), THIS_MODULE);
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 9dfbe5355a2d..d5881c3331a8 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include <network_helpers.h> +#include "kfunc_call_fail.skel.h" #include "kfunc_call_test.skel.h" #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" @@ -10,37 +11,96 @@
#include "cap_helpers.h"
+static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576];
+enum kfunc_test_type {
tc_test = 0,
syscall_test,
syscall_null_ctx_test,
+};
struct kfunc_test_params { const char *prog_name; unsigned long lskel_prog_desc_offset; int retval;
enum kfunc_test_type test_type;
const char *expected_err_msg;
};
-#define TC_TEST(name, __retval) \ +#define __BPF_TEST_SUCCESS(name, __retval, type) \ { \ .prog_name = #name, \ .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \ .retval = __retval, \
.test_type = type, \
.expected_err_msg = NULL, \
}
+#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = 0 /* unused when test is failing */, \
.retval = __retval, \
.test_type = type, \
.expected_err_msg = error_msg, \ }
+#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test) +#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) +#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
+#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \
__BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
static struct kfunc_test_params kfunc_tests[] = {
/* failure cases:
* if retval is 0 -> the program will fail to load and the error message is an error
* if retval is not 0 -> the program can be loaded but running it will gives the
* provided return value. The error message is thus the one
* from a successful load
*/
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"),
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"),
/* success cases */ TC_TEST(kfunc_call_test1, 12), TC_TEST(kfunc_call_test2, 3), TC_TEST(kfunc_call_test_ref_btf_id, 0),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
+};
+struct syscall_test_args {
__u8 data[16];
size_t size;
};
static void verify_success(struct kfunc_test_params *param) { struct kfunc_call_test_lskel *lskel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts); struct bpf_prog_desc *lskel_prog; struct kfunc_call_test *skel; struct bpf_program *prog; int prog_fd, err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct syscall_test_args args = {
.size = 10,
};
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
topts.repeat = 1;
break;
} /* first test with normal libbpf */ skel = kfunc_call_test__open_and_load();
@@ -79,6 +139,72 @@ static void verify_success(struct kfunc_test_params *param) kfunc_call_test_lskel__destroy(lskel); }
+static void verify_fail(struct kfunc_test_params *param) +{
LIBBPF_OPTS(bpf_object_open_opts, opts);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_program *prog;
struct kfunc_call_fail *skel;
int prog_fd, err;
struct syscall_test_args args = {
.size = 10,
};
opts.kernel_log_buf = obj_log_buf;
opts.kernel_log_size = log_buf_sz;
opts.kernel_log_level = 1;
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
break;
topts.repeat = 1;
}
skel = kfunc_call_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
bpf_program__set_autoload(prog, true);
err = kfunc_call_fail__load(skel);
if (!param->retval) {
/* the verifier is supposed to complain and refuses to load */
if (!ASSERT_ERR(err, "unexpected load success"))
goto out_err;
} else {
/* the program is loaded but must dynamically fail */
if (!ASSERT_OK(err, "unexpected load error"))
goto out_err;
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_EQ(err, param->retval, param->prog_name))
goto out_err;
}
+out_err:
if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) {
fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg);
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
}
+cleanup:
kfunc_call_fail__destroy(skel);
+}
static void test_main(void) { int i; @@ -87,7 +213,10 @@ static void test_main(void) if (!test__start_subtest(kfunc_tests[i].prog_name)) continue;
verify_success(&kfunc_tests[i]);
if (!kfunc_tests[i].expected_err_msg)
verify_success(&kfunc_tests[i]);
else
verify_fail(&kfunc_tests[i]); }
}
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c new file mode 100644 index 000000000000..4168027f2ab1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h>
+extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
+struct syscall_test_args {
__u8 data[16];
size_t size;
+};
+SEC("?syscall") +int kfunc_syscall_test_fail(struct syscall_test_args *args) +{
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1);
return 0;
+}
+SEC("?syscall") +int kfunc_syscall_test_null_fail(struct syscall_test_args *args) +{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args));
return 0;
+}
+char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 5aecbb9fdc68..94c05267e5e7 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -92,4 +92,42 @@ int kfunc_call_test_pass(struct __sk_buff *skb) return 0; }
+struct syscall_test_args {
__u8 data[16];
size_t size;
+};
+SEC("syscall") +int kfunc_syscall_test(struct syscall_test_args *args) +{
const int size = args->size;
if (size > sizeof(args->data))
return -7; /* -E2BIG */
Looks like it is due to this. Verifier is confused because: r7 = args->data; r1 = r7;
then it does r1 <<= 32; r1 >>=32; clearing upper 32 bits, so both r1 and r7 lose the id association which propagates the bounds of r1 learnt from comparison of it with sizeof(args->data);
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
Later llvm assigns r7 to r2 for this call's 2nd arg. At this point the verifier still thinks r7 is unbounded, while to make a call with mem, len pair you need non-negative min value.
Easiest way might be to just do args->size & sizeof(args->data), as the verifier log says. You might still keep the error above. Others may have better ideas/insights.
I just did s/const int size/const long size/ to fix the issues.
Also fixed commit in patch 3 that talks about max_ctx_offset and did:
BTF_KFUNC_SET_MAX_CNT = 64,
BTF_KFUNC_SET_MAX_CNT = 256,
and applied.
Great!
Many thanks to both of you for your time and getting me there :)
Cheers, Benjamin
net/bpf/test_run.c is already presenting 20 kfuncs. net/netfilter/nf_conntrack_bpf.c is also presenting an extra 10 kfuncs.
Given that all the kfuncs are regrouped into one unique set, having only 2 space left prevent us to add more selftests.
Bump it to 64 for now.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
---
no changes in v11
new in v10 --- kernel/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index eca9ea78ee5f..8280c1a8dbce 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -208,7 +208,7 @@ enum btf_kfunc_hook { };
enum { - BTF_KFUNC_SET_MAX_CNT = 32, + BTF_KFUNC_SET_MAX_CNT = 64, BTF_DTOR_KFUNC_MAX_CNT = 256, };
On Tue, 6 Sept 2022 at 17:13, Benjamin Tissoires benjamin.tissoires@redhat.com wrote:
net/bpf/test_run.c is already presenting 20 kfuncs. net/netfilter/nf_conntrack_bpf.c is also presenting an extra 10 kfuncs.
Given that all the kfuncs are regrouped into one unique set, having only 2 space left prevent us to add more selftests.
Bump it to 64 for now.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
I can imagine pinning this down as the reason the program was failing to load must have been fun, since I ended up requiring this too in the linked list series...
Acked-by: Kumar Kartikeya Dwivedi memxor@gmail.com
no changes in v11
new in v10
kernel/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index eca9ea78ee5f..8280c1a8dbce 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -208,7 +208,7 @@ enum btf_kfunc_hook { };
enum {
BTF_KFUNC_SET_MAX_CNT = 32,
BTF_KFUNC_SET_MAX_CNT = 64, BTF_DTOR_KFUNC_MAX_CNT = 256,
};
-- 2.36.1
For drivers (outside of network), the incoming data is not statically defined in a struct. Most of the time the data buffer is kzalloc-ed and thus we can not rely on eBPF and BTF to explore the data.
This commit allows to return an arbitrary memory, previously allocated by the driver. An interesting extra point is that the kfunc can mark the exported memory region as read only or read/write.
So, when a kfunc is not returning a pointer to a struct but to a plain type, we can consider it is a valid allocated memory assuming that: - one of the arguments is either called rdonly_buf_size or rdwr_buf_size - and this argument is a const from the caller point of view
We can then use this parameter as the size of the allocated memory.
The memory is either read-only or read-write based on the name of the size parameter.
Acked-by: Kumar Kartikeya Dwivedi memxor@gmail.com Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
---
no changes in v11
changes in v10: - remove extra check to PTR_TO_BTF_ID - mark_chain_precision on const register (https://lore.kernel.org/bpf/20220823185300.406-2-memxor@gmail.com)
changes in v9: - updated to match upstream (replaced kfunc_flag by a field in kfunc_meta)
no changes in v8
changes in v7: - ensures btf_type_is_struct_ptr() checks for a ptr first (squashed from next commit) - remove multiple_ref_obj_id need - use btf_type_skip_modifiers instead of manually doing it in btf_type_is_struct_ptr() - s/strncmp/strcmp/ in btf_is_kfunc_arg_mem_size() - check for tnum_is_const when retrieving the size value - have only one check for "Ensure only one argument is referenced PTR_TO_BTF_ID" - add some more context to the commit message
changes in v6: - code review from Kartikeya: - remove comment change that had no reasons to be - remove handling of PTR_TO_MEM with kfunc releases - introduce struct bpf_kfunc_arg_meta - do rdonly/rdwr_buf_size check in btf_check_kfunc_arg_match - reverted most of the changes in verifier.c - make sure kfunc acquire is using a struct pointer, not just a plain pointer - also forward ref_obj_id to PTR_TO_MEM in kfunc to not use after free the allocated memory
changes in v5: - updated PTR_TO_MEM comment in btf.c to match upstream - make it read-only or read-write based on the name of size
new in v4
change btf.h
fix allow kfunc to return an allocated mem --- include/linux/bpf.h | 9 +++- include/linux/bpf_verifier.h | 2 + include/linux/btf.h | 10 ++++ kernel/bpf/btf.c | 101 ++++++++++++++++++++++++++++------- kernel/bpf/verifier.c | 45 +++++++++++----- 5 files changed, 133 insertions(+), 34 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c9c72a089579..b879465306fb 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1940,6 +1940,13 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, const char *func_name, struct btf_func_model *m);
+struct bpf_kfunc_arg_meta { + u64 r0_size; + bool r0_rdonly; + int ref_obj_id; + u32 flags; +}; + struct bpf_reg_state; int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *regs); @@ -1948,7 +1955,7 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, - u32 kfunc_flags); + struct bpf_kfunc_arg_meta *meta); int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *reg); int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1fdddbf3546b..8fbc1d05281e 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -598,6 +598,8 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, struct bpf_attach_target_info *tgt_info); void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+int mark_chain_precision(struct bpf_verifier_env *env, int regno); + #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
/* extract base type from bpf_{arg, return, reg}_type. */ diff --git a/include/linux/btf.h b/include/linux/btf.h index ad93c2d9cc1c..1fcc833a8690 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -441,4 +441,14 @@ static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dt } #endif
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) +{ + if (!btf_type_is_ptr(t)) + return false; + + t = btf_type_skip_modifiers(btf, t->type, NULL); + + return btf_type_is_struct(t); +} + #endif diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 8280c1a8dbce..13203d7cb1c6 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6166,11 +6166,36 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf, return true; }
+static bool btf_is_kfunc_arg_mem_size(const struct btf *btf, + const struct btf_param *arg, + const struct bpf_reg_state *reg, + const char *name) +{ + int len, target_len = strlen(name); + const struct btf_type *t; + const char *param_name; + + t = btf_type_skip_modifiers(btf, arg->type, NULL); + if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) + return false; + + param_name = btf_name_by_offset(btf, arg->name_off); + if (str_is_empty(param_name)) + return false; + len = strlen(param_name); + if (len != target_len) + return false; + if (strcmp(param_name, name)) + return false; + + return true; +} + static int btf_check_func_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, bool ptr_to_mem_ok, - u32 kfunc_flags, + struct bpf_kfunc_arg_meta *kfunc_meta, bool processing_call) { enum bpf_prog_type prog_type = resolve_prog_type(env->prog); @@ -6208,12 +6233,12 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return -EINVAL; }
- if (is_kfunc) { + if (is_kfunc && kfunc_meta) { /* Only kfunc can be release func */ - rel = kfunc_flags & KF_RELEASE; - kptr_get = kfunc_flags & KF_KPTR_GET; - trusted_arg = kfunc_flags & KF_TRUSTED_ARGS; - sleepable = kfunc_flags & KF_SLEEPABLE; + rel = kfunc_meta->flags & KF_RELEASE; + kptr_get = kfunc_meta->flags & KF_KPTR_GET; + trusted_arg = kfunc_meta->flags & KF_TRUSTED_ARGS; + sleepable = kfunc_meta->flags & KF_SLEEPABLE; }
/* check that BTF function arguments match actual types that the @@ -6226,6 +6251,38 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
t = btf_type_skip_modifiers(btf, args[i].type, NULL); if (btf_type_is_scalar(t)) { + if (is_kfunc && kfunc_meta) { + bool is_buf_size = false; + + /* check for any const scalar parameter of name "rdonly_buf_size" + * or "rdwr_buf_size" + */ + if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg, + "rdonly_buf_size")) { + kfunc_meta->r0_rdonly = true; + is_buf_size = true; + } else if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg, + "rdwr_buf_size")) + is_buf_size = true; + + if (is_buf_size) { + if (kfunc_meta->r0_size) { + bpf_log(log, "2 or more rdonly/rdwr_buf_size parameters for kfunc"); + return -EINVAL; + } + + if (!tnum_is_const(reg->var_off)) { + bpf_log(log, "R%d is not a const\n", regno); + return -EINVAL; + } + + kfunc_meta->r0_size = reg->var_off.value; + ret = mark_chain_precision(env, regno); + if (ret) + return ret; + } + } + if (reg->type == SCALAR_VALUE) continue; bpf_log(log, "R%d is not a scalar\n", regno); @@ -6256,6 +6313,17 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, if (ret < 0) return ret;
+ if (is_kfunc && reg->ref_obj_id) { + /* Ensure only one argument is referenced PTR_TO_BTF_ID */ + if (ref_obj_id) { + bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", + regno, reg->ref_obj_id, ref_obj_id); + return -EFAULT; + } + ref_regno = regno; + ref_obj_id = reg->ref_obj_id; + } + /* kptr_get is only true for kfunc */ if (i == 0 && kptr_get) { struct bpf_map_value_off_desc *off_desc; @@ -6328,16 +6396,6 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, if (reg->type == PTR_TO_BTF_ID) { reg_btf = reg->btf; reg_ref_id = reg->btf_id; - /* Ensure only one argument is referenced PTR_TO_BTF_ID */ - if (reg->ref_obj_id) { - if (ref_obj_id) { - bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", - regno, reg->ref_obj_id, ref_obj_id); - return -EFAULT; - } - ref_regno = regno; - ref_obj_id = reg->ref_obj_id; - } } else { reg_btf = btf_vmlinux; reg_ref_id = *reg2btf_ids[base_type(reg->type)]; @@ -6428,6 +6486,9 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return -EINVAL; }
+ if (kfunc_meta && ref_obj_id) + kfunc_meta->ref_obj_id = ref_obj_id; + /* returns argument register number > 0 in case of reference release kfunc */ return rel ? ref_regno : 0; } @@ -6459,7 +6520,7 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; - err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, false); + err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, false);
/* Compiler optimizations can remove arguments from static functions * or mismatched type can be passed into a global function. @@ -6502,7 +6563,7 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; - err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, true); + err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, true);
/* Compiler optimizations can remove arguments from static functions * or mismatched type can be passed into a global function. @@ -6516,9 +6577,9 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, - u32 kfunc_flags) + struct bpf_kfunc_arg_meta *meta) { - return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags, true); + return btf_check_func_arg_match(env, btf, func_id, regs, true, meta, true); }
/* Convert BTF of a function into bpf_reg_state if possible diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3f9e6fa92cde..ca12ee8262bf 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2908,7 +2908,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, return 0; }
-static int mark_chain_precision(struct bpf_verifier_env *env, int regno) +int mark_chain_precision(struct bpf_verifier_env *env, int regno) { return __mark_chain_precision(env, regno, -1); } @@ -7594,6 +7594,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, { const struct btf_type *t, *func, *func_proto, *ptr_type; struct bpf_reg_state *regs = cur_regs(env); + struct bpf_kfunc_arg_meta meta = { 0 }; const char *func_name, *ptr_type_name; u32 i, nargs, func_id, ptr_type_id; int err, insn_idx = *insn_idx_p; @@ -7628,8 +7629,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
acq = *kfunc_flags & KF_ACQUIRE;
+ meta.flags = *kfunc_flags; + /* Check the arguments */ - err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, *kfunc_flags); + err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, &meta); if (err < 0) return err; /* In case of release function, we get register number of refcounted @@ -7650,7 +7653,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, /* Check return type */ t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
- if (acq && !btf_type_is_ptr(t)) { + if (acq && !btf_type_is_struct_ptr(desc_btf, t)) { verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); return -EINVAL; } @@ -7662,17 +7665,33 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); if (!btf_type_is_struct(ptr_type)) { - ptr_type_name = btf_name_by_offset(desc_btf, - ptr_type->name_off); - verbose(env, "kernel function %s returns pointer type %s %s is not supported\n", - func_name, btf_type_str(ptr_type), - ptr_type_name); - return -EINVAL; + if (!meta.r0_size) { + ptr_type_name = btf_name_by_offset(desc_btf, + ptr_type->name_off); + verbose(env, + "kernel function %s returns pointer type %s %s is not supported\n", + func_name, + btf_type_str(ptr_type), + ptr_type_name); + return -EINVAL; + } + + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_MEM; + regs[BPF_REG_0].mem_size = meta.r0_size; + + if (meta.r0_rdonly) + regs[BPF_REG_0].type |= MEM_RDONLY; + + /* Ensures we don't access the memory after a release_reference() */ + if (meta.ref_obj_id) + regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; + } else { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].btf = desc_btf; + regs[BPF_REG_0].type = PTR_TO_BTF_ID; + regs[BPF_REG_0].btf_id = ptr_type_id; } - mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].btf = desc_btf; - regs[BPF_REG_0].type = PTR_TO_BTF_ID; - regs[BPF_REG_0].btf_id = ptr_type_id; if (*kfunc_flags & KF_RET_NULL) { regs[BPF_REG_0].type |= PTR_MAYBE_NULL; /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
We add 2 new kfuncs that are following the RET_PTR_TO_MEM capability from the previous commit. Then we test them in selftests: the first tests are testing valid case, and are not failing, and the later ones are actually preventing the program to be loaded because they are wrong.
To work around that, we mark the failing ones as not autoloaded (with SEC("?tc")), and we manually enable them one by one, ensuring the verifier rejects them.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
---
changes in v11: - use new TC_* declaration of tests
changes in v10: - use new definition for tests - remove the Makefile change, it was done before - renamed the failed tests to be more explicit - add 2 more test cases for return mem: oob access and non const access - add one more test case for an invalid acquire function returning an int pointer
changes in v9: - updated to match upstream (net/bpf/test_run.c id sets is now using flags)
no changes in v8
changes in v7: - removed stray include/linux/btf.h change
new in v6 --- net/bpf/test_run.c | 36 ++++++ .../selftests/bpf/prog_tests/kfunc_call.c | 7 + .../selftests/bpf/progs/kfunc_call_fail.c | 121 ++++++++++++++++++ .../selftests/bpf/progs/kfunc_call_test.c | 33 +++++ 4 files changed, 197 insertions(+)
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f16baf977a21..13d578ce2a09 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -606,6 +606,38 @@ noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) WARN_ON_ONCE(1); }
+static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) +{ + if (size > 2 * sizeof(int)) + return NULL; + + return (int *)p; +} + +noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); +} + +noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +/* the next 2 ones can't be really used for testing expect to ensure + * that the verifier rejects the call. + * Acquire functions must return struct pointers, so these ones are + * failing. + */ +noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +noinline void bpf_kfunc_call_int_mem_release(int *p) +{ +} + noinline struct prog_test_ref_kfunc * bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) { @@ -712,6 +744,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index d5881c3331a8..5af1ee8f0e6e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -50,6 +50,7 @@ struct kfunc_test_params { #define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) #define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
+#define TC_FAIL(name, retval, error_msg) __BPF_TEST_FAIL(name, retval, tc_test, error_msg) #define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \ __BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
@@ -62,11 +63,17 @@ static struct kfunc_test_params kfunc_tests[] = { */ SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"), SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"), + TC_FAIL(kfunc_call_test_get_mem_fail_rdonly, 0, "R0 cannot write into rdonly_mem"), + TC_FAIL(kfunc_call_test_get_mem_fail_use_after_free, 0, "invalid mem access 'scalar'"), + TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"), + TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"), + TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"),
/* success cases */ TC_TEST(kfunc_call_test1, 12), TC_TEST(kfunc_call_test2, 3), TC_TEST(kfunc_call_test_ref_btf_id, 0), + TC_TEST(kfunc_call_test_get_mem, 42), SYSCALL_TEST(kfunc_syscall_test, 0), SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0), }; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index 4168027f2ab1..b98313d391c6 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -3,7 +3,13 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h>
+extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; +extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +extern int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +extern void bpf_kfunc_call_int_mem_release(int *p) __ksym;
struct syscall_test_args { __u8 data[16]; @@ -36,4 +42,119 @@ int kfunc_syscall_test_null_fail(struct syscall_test_args *args) return 0; }
+SEC("?tc") +int kfunc_call_test_get_mem_fail_rdonly(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + p[0] = 42; /* this is a read-only buffer, so -EACCES */ + else + ret = -1; + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + +SEC("?tc") +int kfunc_call_test_get_mem_fail_use_after_free(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int)); + if (p) { + p[0] = 42; + ret = p[1]; /* 108 */ + } else { + ret = -1; + } + + bpf_kfunc_call_test_release(pt); + } + if (p) + ret = p[0]; /* p is not valid anymore */ + + return ret; +} + +SEC("?tc") +int kfunc_call_test_get_mem_fail_oob(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + ret = p[2 * sizeof(int)]; /* oob access, so -EACCES */ + else + ret = -1; + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + +int not_const_size = 2 * sizeof(int); + +SEC("?tc") +int kfunc_call_test_get_mem_fail_not_const(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, not_const_size); /* non const size, -EINVAL */ + if (p) + ret = p[0]; + else + ret = -1; + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + +SEC("?tc") +int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + /* we are failing on this one, because we are not acquiring a PTR_TO_BTF_ID (a struct ptr) */ + p = bpf_kfunc_call_test_acq_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + ret = p[0]; + else + ret = -1; + + bpf_kfunc_call_int_mem_release(p); + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 94c05267e5e7..56c96f7969f0 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -14,6 +14,8 @@ extern void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym; extern void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym; extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; +extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
SEC("tc") int kfunc_call_test2(struct __sk_buff *skb) @@ -130,4 +132,35 @@ int kfunc_syscall_test_null(struct syscall_test_args *args) return 0; }
+SEC("tc") +int kfunc_call_test_get_mem(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int)); + if (p) { + p[0] = 42; + ret = p[1]; /* 108 */ + } else { + ret = -1; + } + + if (ret >= 0) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + ret = p[0]; /* 42 */ + else + ret = -1; + } + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + char _license[] SEC("license") = "GPL";
On Tue, 6 Sept 2022 at 17:13, Benjamin Tissoires benjamin.tissoires@redhat.com wrote:
We add 2 new kfuncs that are following the RET_PTR_TO_MEM capability from the previous commit. Then we test them in selftests: the first tests are testing valid case, and are not failing, and the later ones are actually preventing the program to be loaded because they are wrong.
To work around that, we mark the failing ones as not autoloaded (with SEC("?tc")), and we manually enable them one by one, ensuring the verifier rejects them.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com
Thanks for adding these tests. Acked-by: Kumar Kartikeya Dwivedi memxor@gmail.com
changes in v11:
- use new TC_* declaration of tests
changes in v10:
- use new definition for tests
- remove the Makefile change, it was done before
- renamed the failed tests to be more explicit
- add 2 more test cases for return mem: oob access and non const access
- add one more test case for an invalid acquire function returning an int pointer
changes in v9:
- updated to match upstream (net/bpf/test_run.c id sets is now using flags)
no changes in v8
changes in v7:
- removed stray include/linux/btf.h change
new in v6
net/bpf/test_run.c | 36 ++++++ .../selftests/bpf/prog_tests/kfunc_call.c | 7 + .../selftests/bpf/progs/kfunc_call_fail.c | 121 ++++++++++++++++++ .../selftests/bpf/progs/kfunc_call_test.c | 33 +++++ 4 files changed, 197 insertions(+)
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f16baf977a21..13d578ce2a09 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -606,6 +606,38 @@ noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) WARN_ON_ONCE(1); }
+static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) +{
if (size > 2 * sizeof(int))
return NULL;
return (int *)p;
+}
+noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) +{
return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
+}
+noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) +{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
+}
+/* the next 2 ones can't be really used for testing expect to ensure
- that the verifier rejects the call.
- Acquire functions must return struct pointers, so these ones are
- failing.
- */
+noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) +{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
+}
+noinline void bpf_kfunc_call_int_mem_release(int *p) +{ +}
noinline struct prog_test_ref_kfunc * bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) { @@ -712,6 +744,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index d5881c3331a8..5af1ee8f0e6e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -50,6 +50,7 @@ struct kfunc_test_params { #define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) #define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
+#define TC_FAIL(name, retval, error_msg) __BPF_TEST_FAIL(name, retval, tc_test, error_msg) #define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \ __BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
@@ -62,11 +63,17 @@ static struct kfunc_test_params kfunc_tests[] = { */ SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"), SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"),
TC_FAIL(kfunc_call_test_get_mem_fail_rdonly, 0, "R0 cannot write into rdonly_mem"),
TC_FAIL(kfunc_call_test_get_mem_fail_use_after_free, 0, "invalid mem access 'scalar'"),
TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"),
TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"),
TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"), /* success cases */ TC_TEST(kfunc_call_test1, 12), TC_TEST(kfunc_call_test2, 3), TC_TEST(kfunc_call_test_ref_btf_id, 0),
TC_TEST(kfunc_call_test_get_mem, 42), SYSCALL_TEST(kfunc_syscall_test, 0), SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
}; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index 4168027f2ab1..b98313d391c6 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -3,7 +3,13 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h>
+extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; +extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +extern int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +extern void bpf_kfunc_call_int_mem_release(int *p) __ksym;
struct syscall_test_args { __u8 data[16]; @@ -36,4 +42,119 @@ int kfunc_syscall_test_null_fail(struct syscall_test_args *args) return 0; }
+SEC("?tc") +int kfunc_call_test_get_mem_fail_rdonly(struct __sk_buff *skb) +{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
p[0] = 42; /* this is a read-only buffer, so -EACCES */
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
+}
+SEC("?tc") +int kfunc_call_test_get_mem_fail_use_after_free(struct __sk_buff *skb) +{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
if (p) {
p[0] = 42;
ret = p[1]; /* 108 */
} else {
ret = -1;
}
bpf_kfunc_call_test_release(pt);
}
if (p)
ret = p[0]; /* p is not valid anymore */
return ret;
+}
+SEC("?tc") +int kfunc_call_test_get_mem_fail_oob(struct __sk_buff *skb) +{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[2 * sizeof(int)]; /* oob access, so -EACCES */
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
+}
+int not_const_size = 2 * sizeof(int);
+SEC("?tc") +int kfunc_call_test_get_mem_fail_not_const(struct __sk_buff *skb) +{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, not_const_size); /* non const size, -EINVAL */
if (p)
ret = p[0];
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
+}
+SEC("?tc") +int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb) +{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
/* we are failing on this one, because we are not acquiring a PTR_TO_BTF_ID (a struct ptr) */
p = bpf_kfunc_call_test_acq_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[0];
else
ret = -1;
bpf_kfunc_call_int_mem_release(p);
bpf_kfunc_call_test_release(pt);
}
return ret;
+}
char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 94c05267e5e7..56c96f7969f0 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -14,6 +14,8 @@ extern void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym; extern void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym; extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; +extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
SEC("tc") int kfunc_call_test2(struct __sk_buff *skb) @@ -130,4 +132,35 @@ int kfunc_syscall_test_null(struct syscall_test_args *args) return 0; }
+SEC("tc") +int kfunc_call_test_get_mem(struct __sk_buff *skb) +{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
if (p) {
p[0] = 42;
ret = p[1]; /* 108 */
} else {
ret = -1;
}
if (ret >= 0) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[0]; /* 42 */
else
ret = -1;
}
bpf_kfunc_call_test_release(pt);
}
return ret;
+}
char _license[] SEC("license") = "GPL";
2.36.1
Hello:
This series was applied to bpf/bpf-next.git (master) by Alexei Starovoitov ast@kernel.org:
On Tue, 6 Sep 2022 17:12:56 +0200 you wrote:
Hi,
well, given that the HID changes haven't moved a lot in the past revisions and that I am cc-ing a bunch of people, I have dropped them while we focus on the last 2 requirements in bpf-core changes.
I'll submit a HID targeted series when we get these in tree, which would make things a lore more independent.
[...]
Here is the summary with links: - [bpf-next,v11,1/7] selftests/bpf: regroup and declare similar kfuncs selftests in an array https://git.kernel.org/bpf/bpf-next/c/012ba1156e4a - [bpf-next,v11,2/7] bpf: split btf_check_subprog_arg_match in two https://git.kernel.org/bpf/bpf-next/c/95f2f26f3cac - [bpf-next,v11,3/7] bpf/verifier: allow all functions to read user provided context https://git.kernel.org/bpf/bpf-next/c/15baa55ff5b0 - [bpf-next,v11,4/7] selftests/bpf: add test for accessing ctx from syscall program type https://git.kernel.org/bpf/bpf-next/c/fb66223a244f - [bpf-next,v11,5/7] bpf/btf: bump BTF_KFUNC_SET_MAX_CNT https://git.kernel.org/bpf/bpf-next/c/f9b348185f4d - [bpf-next,v11,6/7] bpf/verifier: allow kfunc to return an allocated mem https://git.kernel.org/bpf/bpf-next/c/eb1f7f71c126 - [bpf-next,v11,7/7] selftests/bpf: Add tests for kfunc returning a memory pointer https://git.kernel.org/bpf/bpf-next/c/22ed8d5a4652
You are awesome, thank you!
linux-kselftest-mirror@lists.linaro.org