blob: 362b8c75bfab44cf87c2a01398a146a271bc1119 [file] [log] [blame]
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07001#include <linux/tcp.h>
2#include <net/tcp.h>
3
Yuchung Chenga0370b32017-01-12 22:11:36 -08004int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -07005
Yuchung Chengdb8da6b2017-01-12 22:11:30 -08006static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
7{
8 struct tcp_sock *tp = tcp_sk(sk);
9
10 tcp_skb_mark_lost_uncond_verify(tp, skb);
11 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb);
Yuchung Chengecde8f32017-04-04 14:15:39 -070015 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16 tcp_skb_pcount(skb));
Yuchung Chengdb8da6b2017-01-12 22:11:30 -080017 }
18}
19
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080020static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
21 const struct skb_mstamp *t2,
22 u32 seq1, u32 seq2)
23{
24 return skb_mstamp_after(t1, t2) ||
25 (t1->v64 == t2->v64 && after(seq1, seq2));
26}
27
Yuchung Chenga0370b32017-01-12 22:11:36 -080028/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
29 *
30 * Marks a packet lost, if some packet sent later has been (s)acked.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070031 * The underlying idea is similar to the traditional dupthresh and FACK
32 * but they look at different metrics:
33 *
34 * dupthresh: 3 OOO packets delivered (packet count)
35 * FACK: sequence delta to highest sacked sequence (sequence space)
36 * RACK: sent time delta to the latest delivered packet (time domain)
37 *
38 * The advantage of RACK is it applies to both original and retransmitted
39 * packet and therefore is robust against tail losses. Another advantage
40 * is being more resilient to reordering by simply allowing some
41 * "settling delay", instead of tweaking the dupthresh.
42 *
Yuchung Chenga0370b32017-01-12 22:11:36 -080043 * When tcp_rack_detect_loss() detects some packets are lost and we
44 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
45 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
46 * make us enter the CA_Recovery state.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070047 */
Eric Dumazet7c1c7302017-04-25 10:15:33 -070048static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070049{
50 struct tcp_sock *tp = tcp_sk(sk);
51 struct sk_buff *skb;
Yuchung Chenge636f8b2017-01-12 22:11:31 -080052 u32 reo_wnd;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070053
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080054 *reo_timeout = 0;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070055 /* To be more reordering resilient, allow min_rtt/4 settling delay
56 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
57 * RTT because reordering is often a path property and less related
58 * to queuing or delayed ACKs.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070059 */
60 reo_wnd = 1000;
Yuchung Chenga0370b32017-01-12 22:11:36 -080061 if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070062 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
63
64 tcp_for_write_queue(skb, sk) {
65 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
66
67 if (skb == tcp_send_head(sk))
68 break;
69
70 /* Skip ones already (s)acked */
71 if (!after(scb->end_seq, tp->snd_una) ||
72 scb->sacked & TCPCB_SACKED_ACKED)
73 continue;
74
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080075 if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
76 tp->rack.end_seq, scb->end_seq)) {
Yuchung Chengdeed7be2017-01-12 22:11:32 -080077 /* Step 3 in draft-cheng-tcpm-rack-00.txt:
78 * A packet is lost if its elapsed time is beyond
79 * the recent RTT plus the reordering window.
80 */
Eric Dumazet7c1c7302017-04-25 10:15:33 -070081 u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp,
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080082 &skb->skb_mstamp);
83 s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
84
85 if (remaining < 0) {
Yuchung Chengdeed7be2017-01-12 22:11:32 -080086 tcp_rack_mark_skb_lost(sk, skb);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080087 continue;
Yuchung Chengdeed7be2017-01-12 22:11:32 -080088 }
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080089
90 /* Skip ones marked lost but not yet retransmitted */
91 if ((scb->sacked & TCPCB_LOST) &&
92 !(scb->sacked & TCPCB_SACKED_RETRANS))
93 continue;
94
95 /* Record maximum wait time (+1 to avoid 0) */
96 *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
97
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070098 } else if (!(scb->sacked & TCPCB_RETRANS)) {
99 /* Original data are sent sequentially so stop early
100 * b/c the rest are all sent after rack_sent
101 */
102 break;
103 }
104 }
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800105}
106
Eric Dumazet128eda82017-04-25 10:15:34 -0700107void tcp_rack_mark_lost(struct sock *sk)
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800108{
109 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800110 u32 timeout;
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800111
Yuchung Chenga0370b32017-01-12 22:11:36 -0800112 if (!tp->rack.advanced)
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800113 return;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800114
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800115 /* Reset the advanced flag to avoid unnecessary queue scanning */
116 tp->rack.advanced = 0;
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700117 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800118 if (timeout) {
119 timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
120 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
121 timeout, inet_csk(sk)->icsk_rto);
122 }
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -0700123}
124
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800125/* Record the most recently (re)sent time among the (s)acked packets
126 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
127 * draft-cheng-tcpm-rack-00.txt
128 */
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800129void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
Eric Dumazetd2329f12017-04-25 10:15:38 -0700130 const struct skb_mstamp *xmit_time)
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700131{
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800132 u32 rtt_us;
133
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700134 if (tp->rack.mstamp.v64 &&
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800135 !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
136 end_seq, tp->rack.end_seq))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700137 return;
138
Eric Dumazetd2329f12017-04-25 10:15:38 -0700139 rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time);
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700140 if (sacked & TCPCB_RETRANS) {
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700141 /* If the sacked packet was retransmitted, it's ambiguous
142 * whether the retransmission or the original (or the prior
143 * retransmission) was sacked.
144 *
145 * If the original is lost, there is no ambiguity. Otherwise
146 * we assume the original can be delayed up to aRTT + min_rtt.
147 * the aRTT term is bounded by the fast recovery or timeout,
148 * so it's at least one RTT (i.e., retransmission is at least
149 * an RTT later).
150 */
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800151 if (rtt_us < tcp_min_rtt(tp))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700152 return;
153 }
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800154 tp->rack.rtt_us = rtt_us;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700155 tp->rack.mstamp = *xmit_time;
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800156 tp->rack.end_seq = end_seq;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700157 tp->rack.advanced = 1;
158}
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800159
160/* We have waited long enough to accommodate reordering. Mark the expired
161 * packets lost and retransmit them.
162 */
163void tcp_rack_reo_timeout(struct sock *sk)
164{
165 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800166 u32 timeout, prior_inflight;
167
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800168 prior_inflight = tcp_packets_in_flight(tp);
Eric Dumazet4b726e82017-04-26 21:10:55 -0700169 skb_mstamp_get(&tp->tcp_mstamp);
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700170 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800171 if (prior_inflight != tcp_packets_in_flight(tp)) {
172 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
173 tcp_enter_recovery(sk, false);
174 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
175 tcp_cwnd_reduction(sk, 1, 0);
176 }
177 tcp_xmit_retransmit_queue(sk);
178 }
179 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
180 tcp_rearm_rto(sk);
181}