blob: d3ea89020c69c17189f6a5eefb28e92bd97ac2e1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07002#include <linux/tcp.h>
3#include <net/tcp.h>
4
Yuchung Chengdb8da6b2017-01-12 22:11:30 -08005static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
6{
7 struct tcp_sock *tp = tcp_sk(sk);
8
9 tcp_skb_mark_lost_uncond_verify(tp, skb);
10 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
11 /* Account for retransmits that are lost again */
12 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
13 tp->retrans_out -= tcp_skb_pcount(skb);
Yuchung Chengecde8f32017-04-04 14:15:39 -070014 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
15 tcp_skb_pcount(skb));
Yuchung Chengdb8da6b2017-01-12 22:11:30 -080016 }
17}
18
Eric Dumazet9a568de2017-05-16 14:00:14 -070019static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080020{
Eric Dumazet9a568de2017-05-16 14:00:14 -070021 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080022}
23
Yuchung Chenga0370b32017-01-12 22:11:36 -080024/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
25 *
26 * Marks a packet lost, if some packet sent later has been (s)acked.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070027 * The underlying idea is similar to the traditional dupthresh and FACK
28 * but they look at different metrics:
29 *
30 * dupthresh: 3 OOO packets delivered (packet count)
31 * FACK: sequence delta to highest sacked sequence (sequence space)
32 * RACK: sent time delta to the latest delivered packet (time domain)
33 *
34 * The advantage of RACK is it applies to both original and retransmitted
35 * packet and therefore is robust against tail losses. Another advantage
36 * is being more resilient to reordering by simply allowing some
37 * "settling delay", instead of tweaking the dupthresh.
38 *
Yuchung Chenga0370b32017-01-12 22:11:36 -080039 * When tcp_rack_detect_loss() detects some packets are lost and we
40 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
41 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
42 * make us enter the CA_Recovery state.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070043 */
Eric Dumazet7c1c7302017-04-25 10:15:33 -070044static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070045{
46 struct tcp_sock *tp = tcp_sk(sk);
Priyaranjan Jha1f255692017-11-03 16:38:48 -070047 u32 min_rtt = tcp_min_rtt(tp);
Yuchung Cheng043b87d2017-10-04 12:59:59 -070048 struct sk_buff *skb, *n;
Yuchung Chenge636f8b2017-01-12 22:11:31 -080049 u32 reo_wnd;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070050
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080051 *reo_timeout = 0;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070052 /* To be more reordering resilient, allow min_rtt/4 settling delay
53 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
54 * RTT because reordering is often a path property and less related
55 * to queuing or delayed ACKs.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070056 */
57 reo_wnd = 1000;
Priyaranjan Jha1f255692017-11-03 16:38:48 -070058 if ((tp->rack.reord || !tp->lost_out) && min_rtt != ~0U) {
59 reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
60 reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
61 }
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070062
Yuchung Cheng043b87d2017-10-04 12:59:59 -070063 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
64 tcp_tsorted_anchor) {
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070065 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
Yuchung Chengbef06222017-10-04 13:00:00 -070066 s32 remaining;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070067
Yuchung Chengbef06222017-10-04 13:00:00 -070068 /* Skip ones marked lost but not yet retransmitted */
69 if ((scb->sacked & TCPCB_LOST) &&
70 !(scb->sacked & TCPCB_SACKED_RETRANS))
71 continue;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080072
Yuchung Chengbef06222017-10-04 13:00:00 -070073 if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
74 tp->rack.end_seq, scb->end_seq))
75 break;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080076
Yuchung Chengbef06222017-10-04 13:00:00 -070077 /* A packet is lost if it has not been s/acked beyond
78 * the recent RTT plus the reordering window.
79 */
80 remaining = tp->rack.rtt_us + reo_wnd -
81 tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
82 if (remaining < 0) {
83 tcp_rack_mark_skb_lost(sk, skb);
84 list_del_init(&skb->tcp_tsorted_anchor);
85 } else {
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080086 /* Record maximum wait time (+1 to avoid 0) */
87 *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070088 }
89 }
Yuchung Chenge636f8b2017-01-12 22:11:31 -080090}
91
Eric Dumazet128eda82017-04-25 10:15:34 -070092void tcp_rack_mark_lost(struct sock *sk)
Yuchung Chenge636f8b2017-01-12 22:11:31 -080093{
94 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080095 u32 timeout;
Yuchung Chenge636f8b2017-01-12 22:11:31 -080096
Yuchung Chenga0370b32017-01-12 22:11:36 -080097 if (!tp->rack.advanced)
Yuchung Chenge636f8b2017-01-12 22:11:31 -080098 return;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080099
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800100 /* Reset the advanced flag to avoid unnecessary queue scanning */
101 tp->rack.advanced = 0;
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700102 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800103 if (timeout) {
Yuchung Chengbb4d9912017-07-19 15:41:26 -0700104 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800105 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
106 timeout, inet_csk(sk)->icsk_rto);
107 }
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -0700108}
109
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800110/* Record the most recently (re)sent time among the (s)acked packets
111 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
112 * draft-cheng-tcpm-rack-00.txt
113 */
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800114void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700115 u64 xmit_time)
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700116{
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800117 u32 rtt_us;
118
Eric Dumazet9a568de2017-05-16 14:00:14 -0700119 if (tp->rack.mstamp &&
120 !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800121 end_seq, tp->rack.end_seq))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700122 return;
123
Eric Dumazet9a568de2017-05-16 14:00:14 -0700124 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700125 if (sacked & TCPCB_RETRANS) {
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700126 /* If the sacked packet was retransmitted, it's ambiguous
127 * whether the retransmission or the original (or the prior
128 * retransmission) was sacked.
129 *
130 * If the original is lost, there is no ambiguity. Otherwise
131 * we assume the original can be delayed up to aRTT + min_rtt.
132 * the aRTT term is bounded by the fast recovery or timeout,
133 * so it's at least one RTT (i.e., retransmission is at least
134 * an RTT later).
135 */
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800136 if (rtt_us < tcp_min_rtt(tp))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700137 return;
138 }
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800139 tp->rack.rtt_us = rtt_us;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700140 tp->rack.mstamp = xmit_time;
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800141 tp->rack.end_seq = end_seq;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700142 tp->rack.advanced = 1;
143}
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800144
145/* We have waited long enough to accommodate reordering. Mark the expired
146 * packets lost and retransmit them.
147 */
148void tcp_rack_reo_timeout(struct sock *sk)
149{
150 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800151 u32 timeout, prior_inflight;
152
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800153 prior_inflight = tcp_packets_in_flight(tp);
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700154 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800155 if (prior_inflight != tcp_packets_in_flight(tp)) {
156 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
157 tcp_enter_recovery(sk, false);
158 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
159 tcp_cwnd_reduction(sk, 1, 0);
160 }
161 tcp_xmit_retransmit_queue(sk);
162 }
163 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
164 tcp_rearm_rto(sk);
165}
Priyaranjan Jha1f255692017-11-03 16:38:48 -0700166
167/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
168 *
169 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
170 * by srtt), since there is possibility that spurious retransmission was
171 * due to reordering delay longer than reo_wnd.
172 *
173 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
174 * no. of successful recoveries (accounts for full DSACK-based loss
175 * recovery undo). After that, reset it to default (min_rtt/4).
176 *
177 * At max, reo_wnd is incremented only once per rtt. So that the new
178 * DSACK on which we are reacting, is due to the spurious retx (approx)
179 * after the reo_wnd has been updated last time.
180 *
181 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
182 * absolute value to account for change in rtt.
183 */
184void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
185{
186 struct tcp_sock *tp = tcp_sk(sk);
187
188 if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
189 !rs->prior_delivered)
190 return;
191
192 /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
193 if (before(rs->prior_delivered, tp->rack.last_delivered))
194 tp->rack.dsack_seen = 0;
195
196 /* Adjust the reo_wnd if update is pending */
197 if (tp->rack.dsack_seen) {
198 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
199 tp->rack.reo_wnd_steps + 1);
200 tp->rack.dsack_seen = 0;
201 tp->rack.last_delivered = tp->delivered;
202 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
203 } else if (!tp->rack.reo_wnd_persist) {
204 tp->rack.reo_wnd_steps = 1;
205 }
206}