blob: e36df4fcfeba3042f3d84337a18311427f68a418 [file] [log] [blame]
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07001#include <linux/tcp.h>
2#include <net/tcp.h>
3
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -07004int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
5
6/* Marks a packet lost, if some packet sent later has been (s)acked.
7 * The underlying idea is similar to the traditional dupthresh and FACK
8 * but they look at different metrics:
9 *
10 * dupthresh: 3 OOO packets delivered (packet count)
11 * FACK: sequence delta to highest sacked sequence (sequence space)
12 * RACK: sent time delta to the latest delivered packet (time domain)
13 *
14 * The advantage of RACK is it applies to both original and retransmitted
15 * packet and therefore is robust against tail losses. Another advantage
16 * is being more resilient to reordering by simply allowing some
17 * "settling delay", instead of tweaking the dupthresh.
18 *
19 * The current version is only used after recovery starts but can be
20 * easily extended to detect the first loss.
21 */
22int tcp_rack_mark_lost(struct sock *sk)
23{
24 struct tcp_sock *tp = tcp_sk(sk);
25 struct sk_buff *skb;
26 u32 reo_wnd, prior_retrans = tp->retrans_out;
27
28 if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
29 return 0;
30
31 /* Reset the advanced flag to avoid unnecessary queue scanning */
32 tp->rack.advanced = 0;
33
34 /* To be more reordering resilient, allow min_rtt/4 settling delay
35 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
36 * RTT because reordering is often a path property and less related
37 * to queuing or delayed ACKs.
38 *
39 * TODO: measure and adapt to the observed reordering delay, and
40 * use a timer to retransmit like the delayed early retransmit.
41 */
42 reo_wnd = 1000;
43 if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
44 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
45
46 tcp_for_write_queue(skb, sk) {
47 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
48
49 if (skb == tcp_send_head(sk))
50 break;
51
52 /* Skip ones already (s)acked */
53 if (!after(scb->end_seq, tp->snd_una) ||
54 scb->sacked & TCPCB_SACKED_ACKED)
55 continue;
56
57 if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
58
59 if (skb_mstamp_us_delta(&tp->rack.mstamp,
60 &skb->skb_mstamp) <= reo_wnd)
61 continue;
62
63 /* skb is lost if packet sent later is sacked */
64 tcp_skb_mark_lost_uncond_verify(tp, skb);
65 if (scb->sacked & TCPCB_SACKED_RETRANS) {
66 scb->sacked &= ~TCPCB_SACKED_RETRANS;
67 tp->retrans_out -= tcp_skb_pcount(skb);
Eric Dumazetc10d9312016-04-29 14:16:47 -070068 NET_INC_STATS(sock_net(sk),
69 LINUX_MIB_TCPLOSTRETRANSMIT);
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070070 }
71 } else if (!(scb->sacked & TCPCB_RETRANS)) {
72 /* Original data are sent sequentially so stop early
73 * b/c the rest are all sent after rack_sent
74 */
75 break;
76 }
77 }
78 return prior_retrans - tp->retrans_out;
79}
80
Yuchung Cheng659a8ad2015-10-16 21:57:46 -070081/* Record the most recently (re)sent time among the (s)acked packets */
82void tcp_rack_advance(struct tcp_sock *tp,
83 const struct skb_mstamp *xmit_time, u8 sacked)
84{
85 if (tp->rack.mstamp.v64 &&
86 !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
87 return;
88
89 if (sacked & TCPCB_RETRANS) {
90 struct skb_mstamp now;
91
92 /* If the sacked packet was retransmitted, it's ambiguous
93 * whether the retransmission or the original (or the prior
94 * retransmission) was sacked.
95 *
96 * If the original is lost, there is no ambiguity. Otherwise
97 * we assume the original can be delayed up to aRTT + min_rtt.
98 * the aRTT term is bounded by the fast recovery or timeout,
99 * so it's at least one RTT (i.e., retransmission is at least
100 * an RTT later).
101 */
102 skb_mstamp_get(&now);
103 if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
104 return;
105 }
106
107 tp->rack.mstamp = *xmit_time;
108 tp->rack.advanced = 1;
109}