For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can cause skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
In the first patch, we make skb->csum readable and writable, and we make skb->ip_summed readable. For now, for tc only. With these 2 fields, we don't need to call bpf helpers for csum update any more.
In the second patch, we add some testcases for the read/write testing for skb->csum and skb->ip_summed.
If this series is acceptable, we can define the inlined functions for csum update in libbpf in the next step.
Menglong Dong (2): bpf: add csum/ip_summed fields to __sk_buff testcases/bpf: add testcases for skb->csum to ctx_skb.c
include/linux/skbuff.h | 2 + include/uapi/linux/bpf.h | 2 + net/core/filter.c | 22 ++++++++++ tools/include/uapi/linux/bpf.h | 2 + .../testing/selftests/bpf/verifier/ctx_skb.c | 43 +++++++++++++++++++ 5 files changed, 71 insertions(+)
For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can make skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
With the read/write accessing to skb->csum and read accessing to skb->ip_summed, now we can define the inlined csum update functions in libbpf, which are much more efficient.
Signed-off-by: Menglong Dong menglong8.dong@gmail.com --- include/linux/skbuff.h | 2 ++ include/uapi/linux/bpf.h | 2 ++ net/core/filter.c | 22 ++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 2 ++ 4 files changed, 28 insertions(+)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ea5c8ab3ed00..a0ec404c7009 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1077,8 +1077,10 @@ struct sk_buff { /* if you move pkt_type around you also must adapt those constants */ #ifdef __BIG_ENDIAN_BITFIELD #define PKT_TYPE_MAX (7 << 5) +#define IP_SUMMED_RSH 1 #else #define PKT_TYPE_MAX 7 +#define IP_SUMMED_RSH 5 #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 754e68ca8744..b450e27f5a8d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6148,6 +6148,8 @@ struct __sk_buff { __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; + __u32 csum; + __u32 ip_summed; };
struct bpf_tunnel_key { diff --git a/net/core/filter.c b/net/core/filter.c index 24061f29c9dd..23c22d88da1b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8858,6 +8858,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, queue_mapping): + case bpf_ctx_range(struct __sk_buff, csum): break; default: return false; @@ -8885,6 +8886,8 @@ static bool tc_cls_act_is_valid_access(int off, int size, */ ((struct bpf_prog *)prog)->tstamp_type_access = 1; return size == sizeof(__u8); + case bpf_ctx_range_till(struct __sk_buff, csum, ip_summed): + return size == sizeof(__u32); }
return bpf_skb_is_valid_access(off, size, type, prog, info); @@ -9513,6 +9516,25 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, #endif break;
+ case offsetof(struct __sk_buff, ip_summed): + *target_size = 1; + *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, + PKT_TYPE_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, IP_SUMMED_RSH); + *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 3); + break; + + case offsetof(struct __sk_buff, csum): + if (type == BPF_WRITE) + *insn++ = BPF_EMIT_STORE(BPF_W, si, + bpf_target_off(struct sk_buff, csum, 4, + target_size)); + else + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct sk_buff, csum, 4, + target_size)); + break; + case offsetof(struct __sk_buff, queue_mapping): if (type == BPF_WRITE) { u32 off = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 7f24d898efbb..31fd5ee40864 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6148,6 +6148,8 @@ struct __sk_buff { __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; + __u32 csum; + __u32 ip_summed; };
struct bpf_tunnel_key {
The testcases for read/write access of skb->csum is added to ctx_skb.c. And the read access testing for skb->ip_summed is also added.
Signed-off-by: Menglong Dong menglong8.dong@gmail.com --- .../testing/selftests/bpf/verifier/ctx_skb.c | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+)
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c index 0b394a7f7a2d..f15301686843 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_skb.c +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c @@ -1193,3 +1193,46 @@ .prog_type = BPF_PROG_TYPE_SK_SKB, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "valid access __sk_buff csum", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, csum)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "valid access __sk_buff ip_summed", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, ip_summed)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "check skb->csum is writeable by CLS/ACT", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, csum)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "invalid bpf_context access", +}, +{ + "check skb->ip_summed is not writeable", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, csum)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", +},
On 12/29, Menglong Dong wrote:
For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can cause skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
In the first patch, we make skb->csum readable and writable, and we make skb->ip_summed readable. For now, for tc only. With these 2 fields, we don't need to call bpf helpers for csum update any more.
In the second patch, we add some testcases for the read/write testing for skb->csum and skb->ip_summed.
If this series is acceptable, we can define the inlined functions for csum update in libbpf in the next step.
One downside of exposing those as __sk_buff fields is that all this skb internal csum stuff now becomes a UAPI. And I'm not sure we want that :-) Should we add a lightweight kfunc to reset the fields instead? Or will it still have an unacceptable overhead?
On 1/2/24 10:11 AM, Stanislav Fomichev wrote:
On 12/29, Menglong Dong wrote:
For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can cause skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
There is bpf_csum_update(), does it work? A helper call should be acceptable comparing with the csum calculation itself.
In the first patch, we make skb->csum readable and writable, and we make skb->ip_summed readable. For now, for tc only. With these 2 fields, we don't need to call bpf helpers for csum update any more.
In the second patch, we add some testcases for the read/write testing for skb->csum and skb->ip_summed.
If this series is acceptable, we can define the inlined functions for csum update in libbpf in the next step.
One downside of exposing those as __sk_buff fields is that all this skb internal csum stuff now becomes a UAPI. And I'm not sure we want
+1. Please no new __sk_buff extension and no new conversion in bpf_convert_ctx_access().
that :-) Should we add a lightweight kfunc to reset the fields instead? Or will it still have an unacceptable overhead?
On Wed, Jan 3, 2024 at 8:52 AM Martin KaFai Lau martin.lau@linux.dev wrote:
On 1/2/24 10:11 AM, Stanislav Fomichev wrote:
On 12/29, Menglong Dong wrote:
For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can cause skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
There is bpf_csum_update(), does it work? A helper call should be acceptable comparing with the csum calculation itself.
Yeah, this helper works in this case! Now we miss the last piece for the tx path: ip_summed. We need to know if it is CHECKSUM_PARTIAL to decide if we should update the csum in the packet. In the tx path, the csum in the L4 is the pseudo header only if skb->ip_summed is CHECKSUM_PARTIAL.
Maybe we can introduce a lightweight kfunc to get its value? Such as bpf_skb_csum_mode(). As we need only call it once, there shouldn't be overhead on it.
Thanks! Menglong Dong
In the first patch, we make skb->csum readable and writable, and we make skb->ip_summed readable. For now, for tc only. With these 2 fields, we don't need to call bpf helpers for csum update any more.
In the second patch, we add some testcases for the read/write testing for skb->csum and skb->ip_summed.
If this series is acceptable, we can define the inlined functions for csum update in libbpf in the next step.
One downside of exposing those as __sk_buff fields is that all this skb internal csum stuff now becomes a UAPI. And I'm not sure we want
+1. Please no new __sk_buff extension and no new conversion in bpf_convert_ctx_access().
that :-) Should we add a lightweight kfunc to reset the fields instead? Or will it still have an unacceptable overhead?
On 1/2/24 6:54 PM, Menglong Dong wrote:
On Wed, Jan 3, 2024 at 8:52 AM Martin KaFai Lau martin.lau@linux.dev wrote:
On 1/2/24 10:11 AM, Stanislav Fomichev wrote:
On 12/29, Menglong Dong wrote:
For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can cause skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
There is bpf_csum_update(), does it work? A helper call should be acceptable comparing with the csum calculation itself.
Yeah, this helper works in this case! Now we miss the last piece for the tx path: ip_summed. We need to know if it is CHECKSUM_PARTIAL to decide if we should update the csum in the packet. In the tx path, the csum in the L4 is the pseudo header only if skb->ip_summed is CHECKSUM_PARTIAL.
Maybe we can introduce a lightweight kfunc to get its value? Such as bpf_skb_csum_mode(). As we need only call it once, there shouldn't be overhead on it.
You don't need kfunc, you can do checking like struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb); if (kskb->ip_summed == CHECKSUM_PARTIAL) ... ...
Thanks! Menglong Dong
In the first patch, we make skb->csum readable and writable, and we make skb->ip_summed readable. For now, for tc only. With these 2 fields, we don't need to call bpf helpers for csum update any more.
In the second patch, we add some testcases for the read/write testing for skb->csum and skb->ip_summed.
If this series is acceptable, we can define the inlined functions for csum update in libbpf in the next step.
One downside of exposing those as __sk_buff fields is that all this skb internal csum stuff now becomes a UAPI. And I'm not sure we want
+1. Please no new __sk_buff extension and no new conversion in bpf_convert_ctx_access().
that :-) Should we add a lightweight kfunc to reset the fields instead? Or will it still have an unacceptable overhead?
On Wed, Jan 3, 2024 at 11:55 AM Yonghong Song yonghong.song@linux.dev wrote:
On 1/2/24 6:54 PM, Menglong Dong wrote:
On Wed, Jan 3, 2024 at 8:52 AM Martin KaFai Lau martin.lau@linux.dev wrote:
On 1/2/24 10:11 AM, Stanislav Fomichev wrote:
On 12/29, Menglong Dong wrote:
For now, we have to call some helpers when we need to update the csum, such as bpf_l4_csum_replace, bpf_l3_csum_replace, etc. These helpers are not inlined, which causes poor performance.
In fact, we can define our own csum update functions in BPF program instead of bpf_l3_csum_replace, which is totally inlined and efficient. However, we can't do this for bpf_l4_csum_replace for now, as we can't update skb->csum, which can cause skb->csum invalid in the rx path with CHECKSUM_COMPLETE mode.
What's more, we can't use the direct data access and have to use skb_store_bytes() with the BPF_F_RECOMPUTE_CSUM flag in some case, such as modifing the vni in the vxlan header and the underlay udp header has no checksum.
There is bpf_csum_update(), does it work? A helper call should be acceptable comparing with the csum calculation itself.
Yeah, this helper works in this case! Now we miss the last piece for the tx path: ip_summed. We need to know if it is CHECKSUM_PARTIAL to decide if we should update the csum in the packet. In the tx path, the csum in the L4 is the pseudo header only if skb->ip_summed is CHECKSUM_PARTIAL.
Maybe we can introduce a lightweight kfunc to get its value? Such as bpf_skb_csum_mode(). As we need only call it once, there shouldn't be overhead on it.
You don't need kfunc, you can do checking like struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb); if (kskb->ip_summed == CHECKSUM_PARTIAL) ... ...
Great, this is exactly what I need! Thanks~
linux-kselftest-mirror@lists.linaro.org