blob: c81aadff769b2c3eee02e6de3a5545c27e8cbc38 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07002#include <linux/tcp.h>
3#include <net/tcp.h>
4
Yuchung Chengd716bfd2018-05-16 16:40:13 -07005void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
Yuchung Chengdb8da6b2017-01-12 22:11:30 -08006{
7 struct tcp_sock *tp = tcp_sk(sk);
8
9 tcp_skb_mark_lost_uncond_verify(tp, skb);
10 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
11 /* Account for retransmits that are lost again */
12 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
13 tp->retrans_out -= tcp_skb_pcount(skb);
Yuchung Chengecde8f32017-04-04 14:15:39 -070014 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
15 tcp_skb_pcount(skb));
Yuchung Chengdb8da6b2017-01-12 22:11:30 -080016 }
17}
18
Eric Dumazet9a568de2017-05-16 14:00:14 -070019static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080020{
Eric Dumazet9a568de2017-05-16 14:00:14 -070021 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080022}
23
kbuild test robot1f7455c2018-05-18 13:14:23 +080024static u32 tcp_rack_reo_wnd(const struct sock *sk)
Yuchung Cheng20b654d2018-05-16 16:40:10 -070025{
26 struct tcp_sock *tp = tcp_sk(sk);
27
Wei Wang7ec65372018-07-31 17:46:24 -070028 if (!tp->reord_seen) {
Yuchung Cheng20b654d2018-05-16 16:40:10 -070029 /* If reordering has not been observed, be aggressive during
30 * the recovery or starting the recovery by DUPACK threshold.
31 */
32 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
33 return 0;
34
35 if (tp->sacked_out >= tp->reordering &&
36 !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
37 return 0;
38 }
39
40 /* To be more reordering resilient, allow min_rtt/4 settling delay.
41 * Use min_rtt instead of the smoothed RTT because reordering is
42 * often a path property and less related to queuing or delayed ACKs.
43 * Upon receiving DSACKs, linearly increase the window up to the
44 * smoothed RTT.
45 */
46 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
47 tp->srtt_us >> 3);
48}
49
Yuchung Chengb8fef652018-05-16 16:40:16 -070050s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
51{
52 return tp->rack.rtt_us + reo_wnd -
53 tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
54}
55
Yuchung Chenga0370b32017-01-12 22:11:36 -080056/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
57 *
58 * Marks a packet lost, if some packet sent later has been (s)acked.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070059 * The underlying idea is similar to the traditional dupthresh and FACK
60 * but they look at different metrics:
61 *
62 * dupthresh: 3 OOO packets delivered (packet count)
63 * FACK: sequence delta to highest sacked sequence (sequence space)
64 * RACK: sent time delta to the latest delivered packet (time domain)
65 *
66 * The advantage of RACK is it applies to both original and retransmitted
67 * packet and therefore is robust against tail losses. Another advantage
68 * is being more resilient to reordering by simply allowing some
69 * "settling delay", instead of tweaking the dupthresh.
70 *
Yuchung Chenga0370b32017-01-12 22:11:36 -080071 * When tcp_rack_detect_loss() detects some packets are lost and we
72 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
73 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
74 * make us enter the CA_Recovery state.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070075 */
Eric Dumazet7c1c7302017-04-25 10:15:33 -070076static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070077{
78 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng043b87d2017-10-04 12:59:59 -070079 struct sk_buff *skb, *n;
Yuchung Chenge636f8b2017-01-12 22:11:31 -080080 u32 reo_wnd;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070081
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080082 *reo_timeout = 0;
Yuchung Cheng20b654d2018-05-16 16:40:10 -070083 reo_wnd = tcp_rack_reo_wnd(sk);
Yuchung Cheng043b87d2017-10-04 12:59:59 -070084 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
85 tcp_tsorted_anchor) {
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070086 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
Yuchung Chengbef06222017-10-04 13:00:00 -070087 s32 remaining;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070088
Yuchung Chengbef06222017-10-04 13:00:00 -070089 /* Skip ones marked lost but not yet retransmitted */
90 if ((scb->sacked & TCPCB_LOST) &&
91 !(scb->sacked & TCPCB_SACKED_RETRANS))
92 continue;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080093
Yuchung Chengbef06222017-10-04 13:00:00 -070094 if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
95 tp->rack.end_seq, scb->end_seq))
96 break;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080097
Yuchung Chengbef06222017-10-04 13:00:00 -070098 /* A packet is lost if it has not been s/acked beyond
99 * the recent RTT plus the reordering window.
100 */
Yuchung Chengb8fef652018-05-16 16:40:16 -0700101 remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
Yuchung Cheng428aec52017-12-07 11:33:32 -0800102 if (remaining <= 0) {
Yuchung Chengd716bfd2018-05-16 16:40:13 -0700103 tcp_mark_skb_lost(sk, skb);
Yuchung Chengbef06222017-10-04 13:00:00 -0700104 list_del_init(&skb->tcp_tsorted_anchor);
105 } else {
Yuchung Cheng428aec52017-12-07 11:33:32 -0800106 /* Record maximum wait time */
107 *reo_timeout = max_t(u32, *reo_timeout, remaining);
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -0700108 }
109 }
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800110}
111
Eric Dumazet128eda82017-04-25 10:15:34 -0700112void tcp_rack_mark_lost(struct sock *sk)
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800113{
114 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800115 u32 timeout;
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800116
Yuchung Chenga0370b32017-01-12 22:11:36 -0800117 if (!tp->rack.advanced)
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800118 return;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800119
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800120 /* Reset the advanced flag to avoid unnecessary queue scanning */
121 tp->rack.advanced = 0;
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700122 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800123 if (timeout) {
Yuchung Chengbb4d9912017-07-19 15:41:26 -0700124 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800125 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
126 timeout, inet_csk(sk)->icsk_rto);
127 }
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -0700128}
129
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800130/* Record the most recently (re)sent time among the (s)acked packets
131 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
132 * draft-cheng-tcpm-rack-00.txt
133 */
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800134void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700135 u64 xmit_time)
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700136{
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800137 u32 rtt_us;
138
Eric Dumazet9a568de2017-05-16 14:00:14 -0700139 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
Yuchung Cheng6065fd02017-12-07 11:33:33 -0800140 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700141 /* If the sacked packet was retransmitted, it's ambiguous
142 * whether the retransmission or the original (or the prior
143 * retransmission) was sacked.
144 *
145 * If the original is lost, there is no ambiguity. Otherwise
146 * we assume the original can be delayed up to aRTT + min_rtt.
147 * the aRTT term is bounded by the fast recovery or timeout,
148 * so it's at least one RTT (i.e., retransmission is at least
149 * an RTT later).
150 */
Yuchung Cheng6065fd02017-12-07 11:33:33 -0800151 return;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700152 }
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700153 tp->rack.advanced = 1;
Yuchung Cheng6065fd02017-12-07 11:33:33 -0800154 tp->rack.rtt_us = rtt_us;
155 if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
156 end_seq, tp->rack.end_seq)) {
157 tp->rack.mstamp = xmit_time;
158 tp->rack.end_seq = end_seq;
159 }
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700160}
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800161
162/* We have waited long enough to accommodate reordering. Mark the expired
163 * packets lost and retransmit them.
164 */
165void tcp_rack_reo_timeout(struct sock *sk)
166{
167 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800168 u32 timeout, prior_inflight;
169
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800170 prior_inflight = tcp_packets_in_flight(tp);
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700171 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800172 if (prior_inflight != tcp_packets_in_flight(tp)) {
173 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
174 tcp_enter_recovery(sk, false);
175 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
176 tcp_cwnd_reduction(sk, 1, 0);
177 }
178 tcp_xmit_retransmit_queue(sk);
179 }
180 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
181 tcp_rearm_rto(sk);
182}
Priyaranjan Jha1f255692017-11-03 16:38:48 -0700183
184/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
185 *
186 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
187 * by srtt), since there is possibility that spurious retransmission was
188 * due to reordering delay longer than reo_wnd.
189 *
190 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
191 * no. of successful recoveries (accounts for full DSACK-based loss
192 * recovery undo). After that, reset it to default (min_rtt/4).
193 *
194 * At max, reo_wnd is incremented only once per rtt. So that the new
195 * DSACK on which we are reacting, is due to the spurious retx (approx)
196 * after the reo_wnd has been updated last time.
197 *
198 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
199 * absolute value to account for change in rtt.
200 */
201void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
202{
203 struct tcp_sock *tp = tcp_sk(sk);
204
205 if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
206 !rs->prior_delivered)
207 return;
208
209 /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
210 if (before(rs->prior_delivered, tp->rack.last_delivered))
211 tp->rack.dsack_seen = 0;
212
213 /* Adjust the reo_wnd if update is pending */
214 if (tp->rack.dsack_seen) {
215 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
216 tp->rack.reo_wnd_steps + 1);
217 tp->rack.dsack_seen = 0;
218 tp->rack.last_delivered = tp->delivered;
219 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
220 } else if (!tp->rack.reo_wnd_persist) {
221 tp->rack.reo_wnd_steps = 1;
222 }
223}
Yuchung Cheng6ac06ec2018-05-16 16:40:12 -0700224
225/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
226 * the next unacked packet upon receiving
227 * a) three or more DUPACKs to start the fast recovery
228 * b) an ACK acknowledging new data during the fast recovery.
229 */
230void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
231{
232 const u8 state = inet_csk(sk)->icsk_ca_state;
233 struct tcp_sock *tp = tcp_sk(sk);
234
235 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
236 (state == TCP_CA_Recovery && snd_una_advanced)) {
237 struct sk_buff *skb = tcp_rtx_queue_head(sk);
238 u32 mss;
239
240 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
241 return;
242
243 mss = tcp_skb_mss(skb);
244 if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
245 tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
246 mss, mss, GFP_ATOMIC);
247
248 tcp_skb_mark_lost_uncond_verify(tp, skb);
249 }
250}