blob: b6a2aa1dcf56cbaa029b0ec6c5e512e33c90fff9 [file] [log] [blame]
Daniel Borkmann28850dc2013-06-07 05:11:46 +00001/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv4 GSO/GRO support
11 */
12
13#include <linux/skbuff.h>
14#include <net/tcp.h>
15#include <net/protocol.h>
16
Willem de Bruijnf066e2b2014-08-06 15:09:44 -040017static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
18 unsigned int seq, unsigned int mss)
Willem de Bruijn4ed2d762014-08-04 22:11:49 -040019{
20 while (skb) {
Willem de Bruijnf066e2b2014-08-06 15:09:44 -040021 if (before(ts_seq, seq + mss)) {
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
Willem de Bruijn4ed2d762014-08-04 22:11:49 -040023 skb_shinfo(skb)->tskey = ts_seq;
24 return;
25 }
26
27 skb = skb->next;
28 seq += mss;
29 }
30}
31
Eric Dumazet74abc202015-02-26 19:08:59 -080032static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features)
Tom Herbertd020f8f2014-09-20 14:52:28 -070034{
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL);
37
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
40 struct tcphdr *th = tcp_hdr(skb);
41
42 /* Set up checksum pseudo header, usually expect stack to
43 * have done this already.
44 */
45
46 th->check = 0;
47 skb->ip_summed = CHECKSUM_PARTIAL;
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
49 }
50
51 return tcp_gso_segment(skb, features);
52}
53
Eric Dumazet28be6e02013-10-18 10:36:17 -070054struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
Daniel Borkmann28850dc2013-06-07 05:11:46 +000055 netdev_features_t features)
56{
57 struct sk_buff *segs = ERR_PTR(-EINVAL);
Eric Dumazet0d08c422013-10-25 17:26:17 -070058 unsigned int sum_truesize = 0;
Daniel Borkmann28850dc2013-06-07 05:11:46 +000059 struct tcphdr *th;
60 unsigned int thlen;
61 unsigned int seq;
62 __be32 delta;
63 unsigned int oldlen;
64 unsigned int mss;
65 struct sk_buff *gso_skb = skb;
66 __sum16 newcheck;
67 bool ooo_okay, copy_destructor;
68
Daniel Borkmann28850dc2013-06-07 05:11:46 +000069 th = tcp_hdr(skb);
70 thlen = th->doff * 4;
71 if (thlen < sizeof(*th))
72 goto out;
73
74 if (!pskb_may_pull(skb, thlen))
75 goto out;
76
77 oldlen = (u16)~skb->len;
78 __skb_pull(skb, thlen);
79
Eric Dumazeta7eea412015-06-11 09:15:15 -070080 mss = skb_shinfo(skb)->gso_size;
Daniel Borkmann28850dc2013-06-07 05:11:46 +000081 if (unlikely(skb->len <= mss))
82 goto out;
83
84 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
85 /* Packet is from an untrusted source, reset gso_segs. */
Daniel Borkmann28850dc2013-06-07 05:11:46 +000086
87 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
88
89 segs = NULL;
90 goto out;
91 }
92
93 copy_destructor = gso_skb->destructor == tcp_wfree;
94 ooo_okay = gso_skb->ooo_okay;
95 /* All segments but the first should have ooo_okay cleared */
96 skb->ooo_okay = 0;
97
98 segs = skb_segment(skb, features);
99 if (IS_ERR(segs))
100 goto out;
101
102 /* Only first segment might have ooo_okay set */
103 segs->ooo_okay = ooo_okay;
104
Steffen Klassert07b26c92016-09-19 12:58:47 +0200105 /* GSO partial and frag_list segmentation only requires splitting
106 * the frame into an MSS multiple and possibly a remainder, both
107 * cases return a GSO skb. So update the mss now.
108 */
109 if (skb_is_gso(segs))
110 mss *= skb_shinfo(segs)->gso_segs;
111
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000112 delta = htonl(oldlen + (thlen + mss));
113
114 skb = segs;
115 th = tcp_hdr(skb);
116 seq = ntohl(th->seq);
117
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400118 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
119 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
120
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000121 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
122 (__force u32)delta));
123
Alexander Duyck802ab552016-04-10 21:45:03 -0400124 while (skb->next) {
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000125 th->fin = th->psh = 0;
126 th->check = newcheck;
127
Alexander Duyck08b64fc2016-02-05 15:27:49 -0800128 if (skb->ip_summed == CHECKSUM_PARTIAL)
129 gso_reset_checksum(skb, ~th->check);
130 else
Tom Herberte9c3a242014-06-04 17:20:09 -0700131 th->check = gso_make_checksum(skb, ~th->check);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000132
133 seq += mss;
134 if (copy_destructor) {
135 skb->destructor = gso_skb->destructor;
136 skb->sk = gso_skb->sk;
Eric Dumazet0d08c422013-10-25 17:26:17 -0700137 sum_truesize += skb->truesize;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000138 }
139 skb = skb->next;
140 th = tcp_hdr(skb);
141
142 th->seq = htonl(seq);
143 th->cwr = 0;
Alexander Duyck802ab552016-04-10 21:45:03 -0400144 }
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000145
146 /* Following permits TCP Small Queues to work well with GSO :
147 * The callback to TCP stack will be called at the time last frag
148 * is freed at TX completion, and not right now when gso_skb
149 * is freed by GSO engine
150 */
151 if (copy_destructor) {
Eric Dumazet7ec318f2017-11-07 15:15:04 -0800152 int delta;
153
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000154 swap(gso_skb->sk, skb->sk);
155 swap(gso_skb->destructor, skb->destructor);
Eric Dumazet0d08c422013-10-25 17:26:17 -0700156 sum_truesize += skb->truesize;
Eric Dumazet7ec318f2017-11-07 15:15:04 -0800157 delta = sum_truesize - gso_skb->truesize;
158 /* In some pathological cases, delta can be negative.
159 * We need to either use refcount_add() or refcount_sub_and_test()
160 */
161 if (likely(delta >= 0))
162 refcount_add(delta, &skb->sk->sk_wmem_alloc);
163 else
164 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000165 }
166
167 delta = htonl(oldlen + (skb_tail_pointer(skb) -
168 skb_transport_header(skb)) +
169 skb->data_len);
170 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
171 (__force u32)delta));
Alexander Duyck08b64fc2016-02-05 15:27:49 -0800172 if (skb->ip_summed == CHECKSUM_PARTIAL)
173 gso_reset_checksum(skb, ~th->check);
174 else
Tom Herberte9c3a242014-06-04 17:20:09 -0700175 th->check = gso_make_checksum(skb, ~th->check);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000176out:
177 return segs;
178}
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000179
180struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
181{
182 struct sk_buff **pp = NULL;
183 struct sk_buff *p;
184 struct tcphdr *th;
185 struct tcphdr *th2;
186 unsigned int len;
187 unsigned int thlen;
188 __be32 flags;
189 unsigned int mss = 1;
190 unsigned int hlen;
191 unsigned int off;
192 int flush = 1;
193 int i;
194
195 off = skb_gro_offset(skb);
196 hlen = off + sizeof(*th);
197 th = skb_gro_header_fast(skb, off);
198 if (skb_gro_header_hard(skb, hlen)) {
199 th = skb_gro_header_slow(skb, hlen, off);
200 if (unlikely(!th))
201 goto out;
202 }
203
204 thlen = th->doff * 4;
205 if (thlen < sizeof(*th))
206 goto out;
207
208 hlen = off + thlen;
209 if (skb_gro_header_hard(skb, hlen)) {
210 th = skb_gro_header_slow(skb, hlen, off);
211 if (unlikely(!th))
212 goto out;
213 }
214
215 skb_gro_pull(skb, thlen);
216
217 len = skb_gro_len(skb);
218 flags = tcp_flag_word(th);
219
220 for (; (p = *head); head = &p->next) {
221 if (!NAPI_GRO_CB(p)->same_flow)
222 continue;
223
224 th2 = tcp_hdr(p);
225
226 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
227 NAPI_GRO_CB(p)->same_flow = 0;
228 continue;
229 }
230
231 goto found;
232 }
233
234 goto out_check_final;
235
236found:
Jerry Chubf5a7552014-01-07 10:23:19 -0800237 /* Include the IP ID check below from the inner most IP hdr */
Alexander Duyck15305452016-04-10 21:44:57 -0400238 flush = NAPI_GRO_CB(p)->flush;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000239 flush |= (__force int)(flags & TCP_FLAG_CWR);
240 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
241 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
242 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
243 for (i = sizeof(*th); i < thlen; i += 4)
244 flush |= *(u32 *)((u8 *)th + i) ^
245 *(u32 *)((u8 *)th2 + i);
246
Alexander Duyck15305452016-04-10 21:44:57 -0400247 /* When we receive our second frame we can made a decision on if we
248 * continue this flow as an atomic flow with a fixed ID or if we use
249 * an incrementing ID.
250 */
251 if (NAPI_GRO_CB(p)->flush_id != 1 ||
252 NAPI_GRO_CB(p)->count != 1 ||
253 !NAPI_GRO_CB(p)->is_atomic)
254 flush |= NAPI_GRO_CB(p)->flush_id;
255 else
256 NAPI_GRO_CB(p)->is_atomic = false;
257
Eric Dumazeta7eea412015-06-11 09:15:15 -0700258 mss = skb_shinfo(p)->gso_size;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000259
260 flush |= (len - 1) >= mss;
261 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
262
263 if (flush || skb_gro_receive(head, skb)) {
264 mss = 1;
265 goto out_check_final;
266 }
267
268 p = *head;
269 th2 = tcp_hdr(p);
270 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
271
272out_check_final:
273 flush = len < mss;
274 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
275 TCP_FLAG_RST | TCP_FLAG_SYN |
276 TCP_FLAG_FIN));
277
278 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
279 pp = head;
280
281out:
Jerry Chubf5a7552014-01-07 10:23:19 -0800282 NAPI_GRO_CB(skb)->flush |= (flush != 0);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000283
284 return pp;
285}
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000286
287int tcp_gro_complete(struct sk_buff *skb)
288{
289 struct tcphdr *th = tcp_hdr(skb);
290
Jerry Chu299603e82013-12-11 20:53:45 -0800291 skb->csum_start = (unsigned char *)th - skb->head;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000292 skb->csum_offset = offsetof(struct tcphdr, check);
293 skb->ip_summed = CHECKSUM_PARTIAL;
294
295 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
296
297 if (th->cwr)
298 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
299
300 return 0;
301}
302EXPORT_SYMBOL(tcp_gro_complete);
303
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000304static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
305{
Herbert Xucc5c00b2013-11-22 10:31:29 +0800306 /* Don't bother verifying checksum if we're going to flush anyway. */
Tom Herbert149d0772014-08-22 13:34:30 -0700307 if (!NAPI_GRO_CB(skb)->flush &&
308 skb_gro_checksum_validate(skb, IPPROTO_TCP,
309 inet_gro_compute_pseudo)) {
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000310 NAPI_GRO_CB(skb)->flush = 1;
311 return NULL;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000312 }
313
314 return tcp_gro_receive(head, skb);
315}
316
Jerry Chu299603e82013-12-11 20:53:45 -0800317static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000318{
319 const struct iphdr *iph = ip_hdr(skb);
320 struct tcphdr *th = tcp_hdr(skb);
321
Jerry Chu299603e82013-12-11 20:53:45 -0800322 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
323 iph->daddr, 0);
Jerry Chuc3caf112014-07-14 15:54:46 -0700324 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000325
Alexander Duyck15305452016-04-10 21:44:57 -0400326 if (NAPI_GRO_CB(skb)->is_atomic)
327 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
328
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000329 return tcp_gro_complete(skb);
330}
331
332static const struct net_offload tcpv4_offload = {
333 .callbacks = {
Tom Herbertd020f8f2014-09-20 14:52:28 -0700334 .gso_segment = tcp4_gso_segment,
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000335 .gro_receive = tcp4_gro_receive,
336 .gro_complete = tcp4_gro_complete,
337 },
338};
339
340int __init tcpv4_offload_init(void)
341{
342 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
343}