This series is a follow up to the recent change [1] which added per-cpu insert/delete statistics for maps. The bpf_map_sum_elem_count kfunc presented in the original series was only available to tracing programs, so let's make it available to all.
The first patch makes types listed in the reg2btf_ids[] array to be considered trusted by kfuncs.
The second patch allows to treat CONST_PTR_TO_MAP as trusted pointers from kfunc's point of view by adding it to the reg2btf_ids[] array.
The third patch adds missing const to the map argument of the bpf_map_sum_elem_count kfunc.
The fourth patch registers the bpf_map_sum_elem_count for all programs, and patches selftests correspondingly.
[1] https://lore.kernel.org/bpf/20230705160139.19967-1-aspsk@isovalent.com/
v1 -> v2: * treat the whole reg2btf_ids array as trusted (Alexei)
Anton Protopopov (4): bpf: consider types listed in reg2btf_ids as trusted bpf: consider CONST_PTR_TO_MAP as trusted pointer to struct bpf_map bpf: make an argument const in the bpf_map_sum_elem_count kfunc bpf: allow any program to use the bpf_map_sum_elem_count kfunc
include/linux/btf_ids.h | 1 + kernel/bpf/map_iter.c | 7 +++--- kernel/bpf/verifier.c | 22 +++++++++++-------- .../selftests/bpf/progs/map_ptr_kern.c | 5 +++++ 4 files changed, 22 insertions(+), 13 deletions(-)
The reg2btf_ids array contains a list of types for which we can (and need) to find a corresponding static BTF id. All the types in the list can be considered as trusted for purposes of kfuncs.
Signed-off-by: Anton Protopopov aspsk@isovalent.com --- kernel/bpf/verifier.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0b9da95331d7..05123feab378 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5413,12 +5413,24 @@ static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) return reg->type == PTR_TO_FLOW_KEYS; }
+static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { +#ifdef CONFIG_NET + [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], + [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], + [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], +#endif +}; + static bool is_trusted_reg(const struct bpf_reg_state *reg) { /* A referenced register is always trusted. */ if (reg->ref_obj_id) return true;
+ /* Types listed in the reg2btf_ids are always trusted */ + if (reg2btf_ids[base_type(reg->type)]) + return true; + /* If a register is not referenced, it is trusted if it has the * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the * other type modifiers may be safe, but we elect to take an opt-in @@ -10052,15 +10064,6 @@ static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, return true; }
- -static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { -#ifdef CONFIG_NET - [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], - [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], - [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], -#endif -}; - enum kfunc_ptr_arg_type { KF_ARG_PTR_TO_CTX, KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */
Add the BTF id of struct bpf_map to the reg2btf_ids array. This makes the values of the CONST_PTR_TO_MAP type to be considered as trusted by kfuncs. This, in turn, allows users to execute trusted kfuncs which accept `struct bpf_map *` arguments from non-tracing programs.
While exporting the btf_bpf_map_id variable, save some bytes by defining it as BTF_ID_LIST_GLOBAL_SINGLE (which is u32[1]) and not as BTF_ID_LIST (which is u32[64]).
Signed-off-by: Anton Protopopov aspsk@isovalent.com --- include/linux/btf_ids.h | 1 + kernel/bpf/map_iter.c | 3 +-- kernel/bpf/verifier.c | 1 + 3 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 00950cc03bff..a3462a9b8e18 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -267,5 +267,6 @@ MAX_BTF_TRACING_TYPE, extern u32 btf_tracing_ids[]; extern u32 bpf_cgroup_btf_id[]; extern u32 bpf_local_storage_map_btf_id[]; +extern u32 btf_bpf_map_id[];
#endif diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index d06d3b7150e5..b67996147895 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -78,8 +78,7 @@ static const struct seq_operations bpf_map_seq_ops = { .show = bpf_map_seq_show, };
-BTF_ID_LIST(btf_bpf_map_id) -BTF_ID(struct, bpf_map) +BTF_ID_LIST_GLOBAL_SINGLE(btf_bpf_map_id, struct, bpf_map)
static const struct bpf_iter_seq_info bpf_map_seq_info = { .seq_ops = &bpf_map_seq_ops, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 05123feab378..803b91135ca0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5419,6 +5419,7 @@ static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], #endif + [CONST_PTR_TO_MAP] = btf_bpf_map_id, };
static bool is_trusted_reg(const struct bpf_reg_state *reg)
We use the map pointer only to read the counter values, no locking involved, so mark the argument as const.
Signed-off-by: Anton Protopopov aspsk@isovalent.com --- kernel/bpf/map_iter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index b67996147895..011adb41858e 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -197,7 +197,7 @@ __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in vmlinux BTF");
-__bpf_kfunc s64 bpf_map_sum_elem_count(struct bpf_map *map) +__bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) { s64 *pcount; s64 ret = 0;
Register the bpf_map_sum_elem_count func for all programs, and update the map_ptr subtest of the test_progs test to test the new functionality.
The usage is allowed as long as the pointer to the map is trusted (when using tracing programs) or is a const pointer to map, as in the following example:
struct { __uint(type, BPF_MAP_TYPE_HASH); ... } hash SEC(".maps");
...
static inline int some_bpf_prog(void) { struct bpf_map *map = (struct bpf_map *)&hash; __s64 count;
count = bpf_map_sum_elem_count(map);
... }
Signed-off-by: Anton Protopopov aspsk@isovalent.com --- kernel/bpf/map_iter.c | 2 +- tools/testing/selftests/bpf/progs/map_ptr_kern.c | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 011adb41858e..6fc9dae9edc8 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -226,6 +226,6 @@ static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set = {
static int init_subsystem(void) { - return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_map_iter_kfunc_set); + return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_map_iter_kfunc_set); } late_initcall(init_subsystem); diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c index db388f593d0a..3325da17ec81 100644 --- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -103,6 +103,8 @@ struct { __type(value, __u32); } m_hash SEC(".maps");
+__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym; + static inline int check_hash(void) { struct bpf_htab *hash = (struct bpf_htab *)&m_hash; @@ -115,6 +117,8 @@ static inline int check_hash(void) VERIFY(hash->elem_size == 64);
VERIFY(hash->count.counter == 0); + VERIFY(bpf_map_sum_elem_count(map) == 0); + for (i = 0; i < HALF_ENTRIES; ++i) { const __u32 key = i; const __u32 val = 1; @@ -123,6 +127,7 @@ static inline int check_hash(void) return 0; } VERIFY(hash->count.counter == HALF_ENTRIES); + VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
return 1; }
Hello:
This series was applied to bpf/bpf-next.git (master) by Alexei Starovoitov ast@kernel.org:
On Wed, 19 Jul 2023 09:29:48 +0000 you wrote:
This series is a follow up to the recent change [1] which added per-cpu insert/delete statistics for maps. The bpf_map_sum_elem_count kfunc presented in the original series was only available to tracing programs, so let's make it available to all.
The first patch makes types listed in the reg2btf_ids[] array to be considered trusted by kfuncs.
[...]
Here is the summary with links: - [v2,bpf-next,1/4] bpf: consider types listed in reg2btf_ids as trusted https://git.kernel.org/bpf/bpf-next/c/831deb2976de - [v2,bpf-next,2/4] bpf: consider CONST_PTR_TO_MAP as trusted pointer to struct bpf_map https://git.kernel.org/bpf/bpf-next/c/5ba190c29cf9 - [v2,bpf-next,3/4] bpf: make an argument const in the bpf_map_sum_elem_count kfunc https://git.kernel.org/bpf/bpf-next/c/9c29804961c1 - [v2,bpf-next,4/4] bpf: allow any program to use the bpf_map_sum_elem_count kfunc https://git.kernel.org/bpf/bpf-next/c/72829b1c1f16
You are awesome, thank you!
linux-kselftest-mirror@lists.linaro.org