On Sat, Apr 20, 2024 at 2:10 AM Benjamin Tissoires bentiss@kernel.org wrote:
@@ -11018,6 +11027,7 @@ enum special_kfunc_type { KF_bpf_percpu_obj_drop_impl, KF_bpf_throw, KF_bpf_iter_css_task_new,
KF_bpf_wq_set_callback_impl,
};
BTF_SET_START(special_kfunc_set) @@ -11044,6 +11054,7 @@ BTF_ID(func, bpf_throw) #ifdef CONFIG_CGROUPS BTF_ID(func, bpf_iter_css_task_new) #endif +BTF_ID(func, bpf_wq_set_callback_impl) BTF_SET_END(special_kfunc_set)
BTF_ID_LIST(special_kfunc_list) @@ -11074,6 +11085,7 @@ BTF_ID(func, bpf_iter_css_task_new) #else BTF_ID_UNUSED #endif +BTF_ID(func, bpf_wq_set_callback_impl)
This is broken on !CONFIG_CGROUPS. KF_bpf_wq_set_callback_impl in enum special_kfunc_type won't match special_kfunc_list. I moved this line up while applying.
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -11402,12 +11414,28 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id) return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; }
+static bool is_async_callback_calling_kfunc(u32 btf_id) +{
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
+}
static bool is_bpf_throw_kfunc(struct bpf_insn *insn) { return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && insn->imm == special_kfunc_list[KF_bpf_throw]; }
+static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id) +{
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
+}
+static bool is_callback_calling_kfunc(u32 btf_id) +{
return is_sync_callback_calling_kfunc(btf_id) ||
is_async_callback_calling_kfunc(btf_id);
+}
static bool is_rbtree_lock_required_kfunc(u32 btf_id) { return is_bpf_rbtree_api_kfunc(btf_id); @@ -12219,6 +12247,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } }
if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_timer_callback_state);
if (err) {
verbose(env, "kfunc %s#%d failed callback verification\n",
func_name, meta.func_id);
return err;
}
}
rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
@@ -16982,6 +17020,9 @@ static bool states_equal(struct bpf_verifier_env *env, if (old->active_rcu_lock != cur->active_rcu_lock) return false;
if (old->in_sleepable != cur->in_sleepable)
return false;
/* for states to be equal callsites have to be the same * and all frame states need to be equivalent */
@@ -19653,6 +19694,28 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); *cnt = 1;
} else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) {
/* The verifier will process callback_fn as many times as necessary
* with different maps and the register states prepared by
* set_timer_callback_state will be accurate.
*
* The following use case is valid:
* map1 is shared by prog1, prog2, prog3.
* prog1 calls bpf_timer_init for some map1 elements
* prog2 calls bpf_timer_set_callback for some map1 elements.
* Those that were not bpf_timer_init-ed will return -EINVAL.
* prog3 calls bpf_timer_start for some map1 elements.
* Those that were not both bpf_timer_init-ed and
* bpf_timer_set_callback-ed will return -EINVAL.
*/
Also removed this comment. It's not correct here.
struct bpf_insn ld_addrs[2] = {
BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux),
};
insn_buf[0] = ld_addrs[0];
insn_buf[1] = ld_addrs[1];
insn_buf[2] = *insn;
*cnt = 3; } return 0;
}
-- 2.44.0