From: Ilpo Järvinen ij@kernel.org
Include echoed CE count into rate_sample. Replace local ecn_count variable with it.
Co-developed-by: Olivier Tilmans olivier.tilmans@nokia.com Signed-off-by: Olivier Tilmans olivier.tilmans@nokia.com Signed-off-by: Ilpo Järvinen ij@kernel.org Signed-off-by: Chia-Yu Chang chia-yu.chang@nokia-bell-labs.com --- include/net/tcp.h | 1 + net/ipv4/tcp_input.c | 37 ++++++++++++++++--------------------- 2 files changed, 17 insertions(+), 21 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h index 7c51a0a5ace8..23f0355d1991 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1220,6 +1220,7 @@ struct rate_sample { int losses; /* number of packets marked lost upon ACK */ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ u32 prior_in_flight; /* in flight before this ACK */ + u32 ece_delta; /* is this ACK echoing some received CE? */ u32 last_end_seq; /* end_seq of most recently ACKed packet */ bool is_app_limited; /* is sample from packet with bubble in pipe? */ bool is_retrans; /* is sample from retransmission? */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0b25bf03ae6a..6f7c2f0fc0f6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -578,9 +578,9 @@ static u32 __tcp_accecn_process(struct sock *sk, const struct sk_buff *skb, return safe_delta; }
-static u32 tcp_accecn_process(struct sock *sk, const struct sk_buff *skb, - u32 delivered_pkts, u32 delivered_bytes, - int *flag) +static void tcp_accecn_process(struct sock *sk, struct rate_sample *rs, + const struct sk_buff *skb, u32 delivered_pkts, + u32 delivered_bytes, int *flag) { struct tcp_sock *tp = tcp_sk(sk); u32 delta; @@ -590,11 +590,11 @@ static u32 tcp_accecn_process(struct sock *sk, const struct sk_buff *skb, if (delta > 0) { tcp_count_delivered_ce(tp, delta); *flag |= FLAG_ECE; + rs->ece_delta = delta; /* Recalculate header predictor */ if (tp->pred_flags) tcp_fast_path_on(tp); } - return delta; }
/* Buffer size and advertised window tuning. @@ -3971,8 +3971,8 @@ static void tcp_xmit_recovery(struct sock *sk, int rexmit) }
/* Returns the number of packets newly acked or sacked by the current ACK */ -static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, - u32 ecn_count, int flag) +static u32 tcp_newly_delivered(struct sock *sk, struct rate_sample *rs, + u32 prior_delivered, int flag) { const struct net *net = sock_net(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -3983,8 +3983,8 @@ static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered,
if (flag & FLAG_ECE) { if (tcp_ecn_mode_rfc3168(tp)) - ecn_count = delivered; - NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, ecn_count); + rs->ece_delta = delivered; + NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, rs->ece_delta); }
return delivered; @@ -3996,7 +3996,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_sacktag_state sack_state; - struct rate_sample rs = { .prior_delivered = 0 }; + struct rate_sample rs = { .prior_delivered = 0, .ece_delta = 0 }; u32 prior_snd_una = tp->snd_una; bool is_sack_reneg = tp->is_sack_reneg; u32 ack_seq = TCP_SKB_CB(skb)->seq; @@ -4006,7 +4006,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) u32 delivered = tp->delivered; u32 lost = tp->lost; int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ - u32 ecn_count = 0; /* Did we receive ECE/an AccECN ACE update? */ u32 prior_fack;
sack_state.first_sackt = 0; @@ -4116,10 +4115,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_rack_update_reo_wnd(sk, &rs);
if (tcp_ecn_mode_accecn(tp)) - ecn_count = tcp_accecn_process(sk, skb, - tp->delivered - delivered, - sack_state.delivered_bytes, - &flag); + tcp_accecn_process(sk, &rs, skb, tp->delivered - delivered, + sack_state.delivered_bytes, &flag);
tcp_in_ack_event(sk, flag);
@@ -4145,7 +4142,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) sk_dst_confirm(sk);
- delivered = tcp_newly_delivered(sk, delivered, ecn_count, flag); + delivered = tcp_newly_delivered(sk, &rs, delivered, flag);
lost = tp->lost - lost; /* freshly marked lost */ rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED); @@ -4156,16 +4153,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue: if (tcp_ecn_mode_accecn(tp)) - ecn_count = tcp_accecn_process(sk, skb, - tp->delivered - delivered, - sack_state.delivered_bytes, - &flag); + tcp_accecn_process(sk, &rs, skb, tp->delivered - delivered, + sack_state.delivered_bytes, &flag); tcp_in_ack_event(sk, flag); /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) { tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, &rexmit); - tcp_newly_delivered(sk, delivered, ecn_count, flag); + tcp_newly_delivered(sk, &rs, delivered, flag); } /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than @@ -4186,7 +4181,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) &sack_state); tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, &rexmit); - tcp_newly_delivered(sk, delivered, ecn_count, flag); + tcp_newly_delivered(sk, &rs, delivered, flag); tcp_xmit_recovery(sk, rexmit); }