blob: 1e330a2f913dee25cc5a5cc99bead162b00d3bf8 [file] [log] [blame]
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07001#include <linux/tcp.h>
2#include <net/tcp.h>
3
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -07004int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
5
Yuchung Chengdb8da6b2017-01-12 22:11:30 -08006static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
7{
8 struct tcp_sock *tp = tcp_sk(sk);
9
10 tcp_skb_mark_lost_uncond_verify(tp, skb);
11 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb);
15 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
16 }
17}
18
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080019static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
20 const struct skb_mstamp *t2,
21 u32 seq1, u32 seq2)
22{
23 return skb_mstamp_after(t1, t2) ||
24 (t1->v64 == t2->v64 && after(seq1, seq2));
25}
26
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070027/* Marks a packet lost, if some packet sent later has been (s)acked.
28 * The underlying idea is similar to the traditional dupthresh and FACK
29 * but they look at different metrics:
30 *
31 * dupthresh: 3 OOO packets delivered (packet count)
32 * FACK: sequence delta to highest sacked sequence (sequence space)
33 * RACK: sent time delta to the latest delivered packet (time domain)
34 *
35 * The advantage of RACK is it applies to both original and retransmitted
36 * packet and therefore is robust against tail losses. Another advantage
37 * is being more resilient to reordering by simply allowing some
38 * "settling delay", instead of tweaking the dupthresh.
39 *
40 * The current version is only used after recovery starts but can be
41 * easily extended to detect the first loss.
42 */
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080043static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
44 u32 *reo_timeout)
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070045{
46 struct tcp_sock *tp = tcp_sk(sk);
47 struct sk_buff *skb;
Yuchung Chenge636f8b2017-01-12 22:11:31 -080048 u32 reo_wnd;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070049
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080050 *reo_timeout = 0;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070051 /* To be more reordering resilient, allow min_rtt/4 settling delay
52 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
53 * RTT because reordering is often a path property and less related
54 * to queuing or delayed ACKs.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070055 */
56 reo_wnd = 1000;
57 if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
58 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
59
60 tcp_for_write_queue(skb, sk) {
61 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
62
63 if (skb == tcp_send_head(sk))
64 break;
65
66 /* Skip ones already (s)acked */
67 if (!after(scb->end_seq, tp->snd_una) ||
68 scb->sacked & TCPCB_SACKED_ACKED)
69 continue;
70
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080071 if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
72 tp->rack.end_seq, scb->end_seq)) {
Yuchung Chengdeed7be2017-01-12 22:11:32 -080073 /* Step 3 in draft-cheng-tcpm-rack-00.txt:
74 * A packet is lost if its elapsed time is beyond
75 * the recent RTT plus the reordering window.
76 */
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080077 u32 elapsed = skb_mstamp_us_delta(now,
78 &skb->skb_mstamp);
79 s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
80
81 if (remaining < 0) {
Yuchung Chengdeed7be2017-01-12 22:11:32 -080082 tcp_rack_mark_skb_lost(sk, skb);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080083 continue;
Yuchung Chengdeed7be2017-01-12 22:11:32 -080084 }
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080085
86 /* Skip ones marked lost but not yet retransmitted */
87 if ((scb->sacked & TCPCB_LOST) &&
88 !(scb->sacked & TCPCB_SACKED_RETRANS))
89 continue;
90
91 /* Record maximum wait time (+1 to avoid 0) */
92 *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
93
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070094 } else if (!(scb->sacked & TCPCB_RETRANS)) {
95 /* Original data are sent sequentially so stop early
96 * b/c the rest are all sent after rack_sent
97 */
98 break;
99 }
100 }
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800101}
102
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800103void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800104{
105 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800106 u32 timeout;
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800107
108 if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
109 return;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800110
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800111 /* Reset the advanced flag to avoid unnecessary queue scanning */
112 tp->rack.advanced = 0;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800113 tcp_rack_detect_loss(sk, now, &timeout);
114 if (timeout) {
115 timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
116 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
117 timeout, inet_csk(sk)->icsk_rto);
118 }
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -0700119}
120
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800121/* Record the most recently (re)sent time among the (s)acked packets
122 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
123 * draft-cheng-tcpm-rack-00.txt
124 */
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800125void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800126 const struct skb_mstamp *xmit_time,
127 const struct skb_mstamp *ack_time)
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700128{
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800129 u32 rtt_us;
130
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700131 if (tp->rack.mstamp.v64 &&
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800132 !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
133 end_seq, tp->rack.end_seq))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700134 return;
135
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800136 rtt_us = skb_mstamp_us_delta(ack_time, xmit_time);
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700137 if (sacked & TCPCB_RETRANS) {
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700138 /* If the sacked packet was retransmitted, it's ambiguous
139 * whether the retransmission or the original (or the prior
140 * retransmission) was sacked.
141 *
142 * If the original is lost, there is no ambiguity. Otherwise
143 * we assume the original can be delayed up to aRTT + min_rtt.
144 * the aRTT term is bounded by the fast recovery or timeout,
145 * so it's at least one RTT (i.e., retransmission is at least
146 * an RTT later).
147 */
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800148 if (rtt_us < tcp_min_rtt(tp))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700149 return;
150 }
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800151 tp->rack.rtt_us = rtt_us;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700152 tp->rack.mstamp = *xmit_time;
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800153 tp->rack.end_seq = end_seq;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700154 tp->rack.advanced = 1;
155}
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800156
157/* We have waited long enough to accommodate reordering. Mark the expired
158 * packets lost and retransmit them.
159 */
160void tcp_rack_reo_timeout(struct sock *sk)
161{
162 struct tcp_sock *tp = tcp_sk(sk);
163 struct skb_mstamp now;
164 u32 timeout, prior_inflight;
165
166 skb_mstamp_get(&now);
167 prior_inflight = tcp_packets_in_flight(tp);
168 tcp_rack_detect_loss(sk, &now, &timeout);
169 if (prior_inflight != tcp_packets_in_flight(tp)) {
170 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
171 tcp_enter_recovery(sk, false);
172 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
173 tcp_cwnd_reduction(sk, 1, 0);
174 }
175 tcp_xmit_retransmit_queue(sk);
176 }
177 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
178 tcp_rearm_rto(sk);
179}