blob: a1b2a5624f91cfc08c7aff20bc05da4e24a97147 [file] [log] [blame]
Daniel Borkmann28850dc2013-06-07 05:11:46 +00001/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv4 GSO/GRO support
11 */
12
13#include <linux/skbuff.h>
14#include <net/tcp.h>
15#include <net/protocol.h>
16
Willem de Bruijnf066e2b2014-08-06 15:09:44 -040017static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
18 unsigned int seq, unsigned int mss)
Willem de Bruijn4ed2d762014-08-04 22:11:49 -040019{
20 while (skb) {
Willem de Bruijnf066e2b2014-08-06 15:09:44 -040021 if (before(ts_seq, seq + mss)) {
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
Willem de Bruijn4ed2d762014-08-04 22:11:49 -040023 skb_shinfo(skb)->tskey = ts_seq;
24 return;
25 }
26
27 skb = skb->next;
28 seq += mss;
29 }
30}
31
Tom Herbertd020f8f2014-09-20 14:52:28 -070032struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features)
34{
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL);
37
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
40 struct tcphdr *th = tcp_hdr(skb);
41
42 /* Set up checksum pseudo header, usually expect stack to
43 * have done this already.
44 */
45
46 th->check = 0;
47 skb->ip_summed = CHECKSUM_PARTIAL;
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
49 }
50
51 return tcp_gso_segment(skb, features);
52}
53
Eric Dumazet28be6e02013-10-18 10:36:17 -070054struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
Daniel Borkmann28850dc2013-06-07 05:11:46 +000055 netdev_features_t features)
56{
57 struct sk_buff *segs = ERR_PTR(-EINVAL);
Eric Dumazet0d08c422013-10-25 17:26:17 -070058 unsigned int sum_truesize = 0;
Daniel Borkmann28850dc2013-06-07 05:11:46 +000059 struct tcphdr *th;
60 unsigned int thlen;
61 unsigned int seq;
62 __be32 delta;
63 unsigned int oldlen;
64 unsigned int mss;
65 struct sk_buff *gso_skb = skb;
66 __sum16 newcheck;
67 bool ooo_okay, copy_destructor;
68
Daniel Borkmann28850dc2013-06-07 05:11:46 +000069 th = tcp_hdr(skb);
70 thlen = th->doff * 4;
71 if (thlen < sizeof(*th))
72 goto out;
73
74 if (!pskb_may_pull(skb, thlen))
75 goto out;
76
77 oldlen = (u16)~skb->len;
78 __skb_pull(skb, thlen);
79
80 mss = tcp_skb_mss(skb);
81 if (unlikely(skb->len <= mss))
82 goto out;
83
84 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
85 /* Packet is from an untrusted source, reset gso_segs. */
86 int type = skb_shinfo(skb)->gso_type;
87
88 if (unlikely(type &
89 ~(SKB_GSO_TCPV4 |
90 SKB_GSO_DODGY |
91 SKB_GSO_TCP_ECN |
92 SKB_GSO_TCPV6 |
93 SKB_GSO_GRE |
Tom Herbert4749c092014-06-04 17:20:23 -070094 SKB_GSO_GRE_CSUM |
Eric Dumazetcb32f512013-10-19 11:42:57 -070095 SKB_GSO_IPIP |
Eric Dumazet61c1db72013-10-20 20:47:30 -070096 SKB_GSO_SIT |
Daniel Borkmann28850dc2013-06-07 05:11:46 +000097 SKB_GSO_MPLS |
98 SKB_GSO_UDP_TUNNEL |
Tom Herbert0f4f4ff2014-06-04 17:20:16 -070099 SKB_GSO_UDP_TUNNEL_CSUM |
Tom Herberte585f232014-11-04 09:06:54 -0800100 SKB_GSO_TUNNEL_REMCSUM |
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000101 0) ||
102 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
103 goto out;
104
105 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
106
107 segs = NULL;
108 goto out;
109 }
110
111 copy_destructor = gso_skb->destructor == tcp_wfree;
112 ooo_okay = gso_skb->ooo_okay;
113 /* All segments but the first should have ooo_okay cleared */
114 skb->ooo_okay = 0;
115
116 segs = skb_segment(skb, features);
117 if (IS_ERR(segs))
118 goto out;
119
120 /* Only first segment might have ooo_okay set */
121 segs->ooo_okay = ooo_okay;
122
123 delta = htonl(oldlen + (thlen + mss));
124
125 skb = segs;
126 th = tcp_hdr(skb);
127 seq = ntohl(th->seq);
128
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400129 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
130 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
131
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000132 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
133 (__force u32)delta));
134
135 do {
136 th->fin = th->psh = 0;
137 th->check = newcheck;
138
139 if (skb->ip_summed != CHECKSUM_PARTIAL)
Tom Herberte9c3a242014-06-04 17:20:09 -0700140 th->check = gso_make_checksum(skb, ~th->check);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000141
142 seq += mss;
143 if (copy_destructor) {
144 skb->destructor = gso_skb->destructor;
145 skb->sk = gso_skb->sk;
Eric Dumazet0d08c422013-10-25 17:26:17 -0700146 sum_truesize += skb->truesize;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000147 }
148 skb = skb->next;
149 th = tcp_hdr(skb);
150
151 th->seq = htonl(seq);
152 th->cwr = 0;
153 } while (skb->next);
154
155 /* Following permits TCP Small Queues to work well with GSO :
156 * The callback to TCP stack will be called at the time last frag
157 * is freed at TX completion, and not right now when gso_skb
158 * is freed by GSO engine
159 */
160 if (copy_destructor) {
161 swap(gso_skb->sk, skb->sk);
162 swap(gso_skb->destructor, skb->destructor);
Eric Dumazet0d08c422013-10-25 17:26:17 -0700163 sum_truesize += skb->truesize;
164 atomic_add(sum_truesize - gso_skb->truesize,
165 &skb->sk->sk_wmem_alloc);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000166 }
167
168 delta = htonl(oldlen + (skb_tail_pointer(skb) -
169 skb_transport_header(skb)) +
170 skb->data_len);
171 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
172 (__force u32)delta));
173 if (skb->ip_summed != CHECKSUM_PARTIAL)
Tom Herberte9c3a242014-06-04 17:20:09 -0700174 th->check = gso_make_checksum(skb, ~th->check);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000175out:
176 return segs;
177}
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000178
179struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
180{
181 struct sk_buff **pp = NULL;
182 struct sk_buff *p;
183 struct tcphdr *th;
184 struct tcphdr *th2;
185 unsigned int len;
186 unsigned int thlen;
187 __be32 flags;
188 unsigned int mss = 1;
189 unsigned int hlen;
190 unsigned int off;
191 int flush = 1;
192 int i;
193
194 off = skb_gro_offset(skb);
195 hlen = off + sizeof(*th);
196 th = skb_gro_header_fast(skb, off);
197 if (skb_gro_header_hard(skb, hlen)) {
198 th = skb_gro_header_slow(skb, hlen, off);
199 if (unlikely(!th))
200 goto out;
201 }
202
203 thlen = th->doff * 4;
204 if (thlen < sizeof(*th))
205 goto out;
206
207 hlen = off + thlen;
208 if (skb_gro_header_hard(skb, hlen)) {
209 th = skb_gro_header_slow(skb, hlen, off);
210 if (unlikely(!th))
211 goto out;
212 }
213
214 skb_gro_pull(skb, thlen);
215
216 len = skb_gro_len(skb);
217 flags = tcp_flag_word(th);
218
219 for (; (p = *head); head = &p->next) {
220 if (!NAPI_GRO_CB(p)->same_flow)
221 continue;
222
223 th2 = tcp_hdr(p);
224
225 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
226 NAPI_GRO_CB(p)->same_flow = 0;
227 continue;
228 }
229
230 goto found;
231 }
232
233 goto out_check_final;
234
235found:
Jerry Chubf5a7552014-01-07 10:23:19 -0800236 /* Include the IP ID check below from the inner most IP hdr */
237 flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000238 flush |= (__force int)(flags & TCP_FLAG_CWR);
239 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
240 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
241 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
242 for (i = sizeof(*th); i < thlen; i += 4)
243 flush |= *(u32 *)((u8 *)th + i) ^
244 *(u32 *)((u8 *)th2 + i);
245
246 mss = tcp_skb_mss(p);
247
248 flush |= (len - 1) >= mss;
249 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
250
251 if (flush || skb_gro_receive(head, skb)) {
252 mss = 1;
253 goto out_check_final;
254 }
255
256 p = *head;
257 th2 = tcp_hdr(p);
258 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
259
260out_check_final:
261 flush = len < mss;
262 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
263 TCP_FLAG_RST | TCP_FLAG_SYN |
264 TCP_FLAG_FIN));
265
266 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
267 pp = head;
268
269out:
Jerry Chubf5a7552014-01-07 10:23:19 -0800270 NAPI_GRO_CB(skb)->flush |= (flush != 0);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000271
272 return pp;
273}
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000274
275int tcp_gro_complete(struct sk_buff *skb)
276{
277 struct tcphdr *th = tcp_hdr(skb);
278
Jerry Chu299603e82013-12-11 20:53:45 -0800279 skb->csum_start = (unsigned char *)th - skb->head;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000280 skb->csum_offset = offsetof(struct tcphdr, check);
281 skb->ip_summed = CHECKSUM_PARTIAL;
282
283 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
284
285 if (th->cwr)
286 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
287
288 return 0;
289}
290EXPORT_SYMBOL(tcp_gro_complete);
291
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000292static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
293{
Herbert Xucc5c00b2013-11-22 10:31:29 +0800294 /* Don't bother verifying checksum if we're going to flush anyway. */
Tom Herbert149d0772014-08-22 13:34:30 -0700295 if (!NAPI_GRO_CB(skb)->flush &&
296 skb_gro_checksum_validate(skb, IPPROTO_TCP,
297 inet_gro_compute_pseudo)) {
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000298 NAPI_GRO_CB(skb)->flush = 1;
299 return NULL;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000300 }
301
302 return tcp_gro_receive(head, skb);
303}
304
Jerry Chu299603e82013-12-11 20:53:45 -0800305static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000306{
307 const struct iphdr *iph = ip_hdr(skb);
308 struct tcphdr *th = tcp_hdr(skb);
309
Jerry Chu299603e82013-12-11 20:53:45 -0800310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
311 iph->daddr, 0);
Jerry Chuc3caf112014-07-14 15:54:46 -0700312 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000313
314 return tcp_gro_complete(skb);
315}
316
317static const struct net_offload tcpv4_offload = {
318 .callbacks = {
Tom Herbertd020f8f2014-09-20 14:52:28 -0700319 .gso_segment = tcp4_gso_segment,
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000320 .gro_receive = tcp4_gro_receive,
321 .gro_complete = tcp4_gro_complete,
322 },
323};
324
325int __init tcpv4_offload_init(void)
326{
327 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
328}