On 12/2/22 1:59 AM, Eyal Birger wrote:
+__used noinline +int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx,
const struct bpf_xfrm_info *from)
+{
- struct sk_buff *skb = (struct sk_buff *)skb_ctx;
- struct metadata_dst *md_dst;
- struct xfrm_md_info *info;
- if (unlikely(skb_metadata_dst(skb)))
return -EINVAL;
- md_dst = this_cpu_ptr(xfrm_md_dst);
- info = &md_dst->u.xfrm_info;
- info->if_id = from->if_id;
- info->link = from->link;
- skb_dst_force(skb);
- info->dst_orig = skb_dst(skb);
- dst_hold((struct dst_entry *)md_dst);
- skb_dst_set(skb, (struct dst_entry *)md_dst);
I may be missed something obvious and this just came to my mind,
What stops cleanup_xfrm_interface_bpf() being run while skb is still holding the md_dst?
[ ... ]
+static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = {
- .owner = THIS_MODULE,
- .set = &xfrm_ifc_kfunc_set,
+};
+int __init register_xfrm_interface_bpf(void) +{
- int err;
- xfrm_md_dst = metadata_dst_alloc_percpu(0, METADATA_XFRM,
GFP_KERNEL);
- if (!xfrm_md_dst)
return -ENOMEM;
- err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
&xfrm_interface_kfunc_set);
- if (err < 0) {
metadata_dst_free_percpu(xfrm_md_dst);
return err;
- }
- return 0;
+}
+void cleanup_xfrm_interface_bpf(void) +{
- metadata_dst_free_percpu(xfrm_md_dst);
+}