For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can make skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
With the read/write accessing to skb->csum and read accessing to skb->ip_summed, now we can define the inlined csum update functions in libbpf, which are much more efficient.
Signed-off-by: Menglong Dong menglong8.dong@gmail.com --- include/linux/skbuff.h | 2 ++ include/uapi/linux/bpf.h | 2 ++ net/core/filter.c | 22 ++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 2 ++ 4 files changed, 28 insertions(+)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ea5c8ab3ed00..a0ec404c7009 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1077,8 +1077,10 @@ struct sk_buff { /* if you move pkt_type around you also must adapt those constants */ #ifdef __BIG_ENDIAN_BITFIELD #define PKT_TYPE_MAX (7 << 5) +#define IP_SUMMED_RSH 1 #else #define PKT_TYPE_MAX 7 +#define IP_SUMMED_RSH 5 #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 754e68ca8744..b450e27f5a8d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6148,6 +6148,8 @@ struct __sk_buff { __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; + __u32 csum; + __u32 ip_summed; };
struct bpf_tunnel_key { diff --git a/net/core/filter.c b/net/core/filter.c index 24061f29c9dd..23c22d88da1b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8858,6 +8858,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, queue_mapping): + case bpf_ctx_range(struct __sk_buff, csum): break; default: return false; @@ -8885,6 +8886,8 @@ static bool tc_cls_act_is_valid_access(int off, int size, */ ((struct bpf_prog *)prog)->tstamp_type_access = 1; return size == sizeof(__u8); + case bpf_ctx_range_till(struct __sk_buff, csum, ip_summed): + return size == sizeof(__u32); }
return bpf_skb_is_valid_access(off, size, type, prog, info); @@ -9513,6 +9516,25 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, #endif break;
+ case offsetof(struct __sk_buff, ip_summed): + *target_size = 1; + *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, + PKT_TYPE_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, IP_SUMMED_RSH); + *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 3); + break; + + case offsetof(struct __sk_buff, csum): + if (type == BPF_WRITE) + *insn++ = BPF_EMIT_STORE(BPF_W, si, + bpf_target_off(struct sk_buff, csum, 4, + target_size)); + else + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct sk_buff, csum, 4, + target_size)); + break; + case offsetof(struct __sk_buff, queue_mapping): if (type == BPF_WRITE) { u32 off = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 7f24d898efbb..31fd5ee40864 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6148,6 +6148,8 @@ struct __sk_buff { __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; + __u32 csum; + __u32 ip_summed; };
struct bpf_tunnel_key {