blob: 19ebe6a39ddcf37fc67e123a7e1f10cf3d6b7208 [file] [log] [blame]
Daniel Borkmannda5bab02013-06-08 12:56:03 +02001/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * UDPv4 GSO support
11 */
12
13#include <linux/skbuff.h>
14#include <net/udp.h>
15#include <net/protocol.h>
16
Or Gerlitzb582ef02014-01-20 13:59:19 +020017static DEFINE_SPINLOCK(udp_offload_lock);
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +020018static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
Or Gerlitzb582ef02014-01-20 13:59:19 +020019
Shlomo Pongratza664a4f2014-02-02 15:42:10 +020020#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
21
Or Gerlitzb582ef02014-01-20 13:59:19 +020022struct udp_offload_priv {
23 struct udp_offload *offload;
24 struct rcu_head rcu;
25 struct udp_offload_priv __rcu *next;
26};
27
Tom Herbert155e0102014-07-13 19:49:56 -070028struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
29 netdev_features_t features)
30{
31 struct sk_buff *segs = ERR_PTR(-EINVAL);
32 u16 mac_offset = skb->mac_header;
33 int mac_len = skb->mac_len;
34 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
35 __be16 protocol = skb->protocol;
36 netdev_features_t enc_features;
37 int udp_offset, outer_hlen;
38 unsigned int oldlen;
39 bool need_csum;
40
41 oldlen = (u16)~skb->len;
42
43 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
44 goto out;
45
46 skb->encapsulation = 0;
47 __skb_pull(skb, tnl_hlen);
48 skb_reset_mac_header(skb);
49 skb_set_network_header(skb, skb_inner_network_offset(skb));
50 skb->mac_len = skb_inner_network_offset(skb);
51 skb->protocol = htons(ETH_P_TEB);
52
53 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
54 if (need_csum)
55 skb->encap_hdr_csum = 1;
56
57 /* segment inner packet. */
58 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
59 segs = skb_mac_gso_segment(skb, enc_features);
Himangi Saraogi27446442014-07-27 12:38:38 +053060 if (IS_ERR_OR_NULL(segs)) {
Tom Herbert155e0102014-07-13 19:49:56 -070061 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
62 mac_len);
63 goto out;
64 }
65
66 outer_hlen = skb_tnl_header_len(skb);
67 udp_offset = outer_hlen - tnl_hlen;
68 skb = segs;
69 do {
70 struct udphdr *uh;
71 int len;
72
73 skb_reset_inner_headers(skb);
74 skb->encapsulation = 1;
75
76 skb->mac_len = mac_len;
77
78 skb_push(skb, outer_hlen);
79 skb_reset_mac_header(skb);
80 skb_set_network_header(skb, mac_len);
81 skb_set_transport_header(skb, udp_offset);
82 len = skb->len - udp_offset;
83 uh = udp_hdr(skb);
84 uh->len = htons(len);
85
86 if (need_csum) {
87 __be32 delta = htonl(oldlen + len);
88
89 uh->check = ~csum_fold((__force __wsum)
90 ((__force u32)uh->check +
91 (__force u32)delta));
92 uh->check = gso_make_checksum(skb, ~uh->check);
93
94 if (uh->check == 0)
95 uh->check = CSUM_MANGLED_0;
96 }
97
98 skb->protocol = protocol;
99 } while ((skb = skb->next));
100out:
101 return segs;
102}
103
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200104static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
105 netdev_features_t features)
106{
107 struct sk_buff *segs = ERR_PTR(-EINVAL);
108 unsigned int mss;
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -0800109 __wsum csum;
Tom Herbertf71470b2014-09-20 14:52:29 -0700110 struct udphdr *uh;
111 struct iphdr *iph;
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -0800112
113 if (skb->encapsulation &&
Tom Herbert0f4f4ff2014-06-04 17:20:16 -0700114 (skb_shinfo(skb)->gso_type &
115 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -0800116 segs = skb_udp_tunnel_segment(skb, features);
117 goto out;
118 }
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200119
Tom Herbertf71470b2014-09-20 14:52:29 -0700120 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
121 goto out;
122
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200123 mss = skb_shinfo(skb)->gso_size;
124 if (unlikely(skb->len <= mss))
125 goto out;
126
127 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
128 /* Packet is from an untrusted source, reset gso_segs. */
129 int type = skb_shinfo(skb)->gso_type;
130
131 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
132 SKB_GSO_UDP_TUNNEL |
Tom Herbert0f4f4ff2014-06-04 17:20:16 -0700133 SKB_GSO_UDP_TUNNEL_CSUM |
Eric Dumazetcb32f512013-10-19 11:42:57 -0700134 SKB_GSO_IPIP |
Tom Herbert4749c092014-06-04 17:20:23 -0700135 SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
136 SKB_GSO_MPLS) ||
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200137 !(type & (SKB_GSO_UDP))))
138 goto out;
139
140 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
141
142 segs = NULL;
143 goto out;
144 }
145
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -0800146 /* Do software UFO. Complete and fill in the UDP checksum as
147 * HW cannot do checksum of UDP packets sent as multiple
148 * IP fragments.
149 */
Tom Herbertf71470b2014-09-20 14:52:29 -0700150
151 uh = udp_hdr(skb);
152 iph = ip_hdr(skb);
153
154 uh->check = 0;
155 csum = skb_checksum(skb, 0, skb->len, 0);
156 uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
157 if (uh->check == 0)
158 uh->check = CSUM_MANGLED_0;
159
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -0800160 skb->ip_summed = CHECKSUM_NONE;
161
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200162 /* Fragment the skb. IP headers of the fragments are updated in
163 * inet_gso_segment()
164 */
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -0800165 segs = skb_segment(skb, features);
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200166out:
167 return segs;
168}
169
Or Gerlitzb582ef02014-01-20 13:59:19 +0200170int udp_add_offload(struct udp_offload *uo)
171{
Or Gerlitzb5aaab12014-01-29 18:08:59 +0200172 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200173
174 if (!new_offload)
175 return -ENOMEM;
176
177 new_offload->offload = uo;
178
179 spin_lock(&udp_offload_lock);
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200180 new_offload->next = udp_offload_base;
181 rcu_assign_pointer(udp_offload_base, new_offload);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200182 spin_unlock(&udp_offload_lock);
183
184 return 0;
185}
186EXPORT_SYMBOL(udp_add_offload);
187
188static void udp_offload_free_routine(struct rcu_head *head)
189{
190 struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
191 kfree(ou_priv);
192}
193
194void udp_del_offload(struct udp_offload *uo)
195{
196 struct udp_offload_priv __rcu **head = &udp_offload_base;
197 struct udp_offload_priv *uo_priv;
198
199 spin_lock(&udp_offload_lock);
200
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200201 uo_priv = udp_deref_protected(*head);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200202 for (; uo_priv != NULL;
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200203 uo_priv = udp_deref_protected(*head)) {
Or Gerlitzb582ef02014-01-20 13:59:19 +0200204 if (uo_priv->offload == uo) {
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200205 rcu_assign_pointer(*head,
206 udp_deref_protected(uo_priv->next));
Or Gerlitzb582ef02014-01-20 13:59:19 +0200207 goto unlock;
208 }
209 head = &uo_priv->next;
210 }
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +0200211 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
Or Gerlitzb582ef02014-01-20 13:59:19 +0200212unlock:
213 spin_unlock(&udp_offload_lock);
214 if (uo_priv != NULL)
215 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
216}
217EXPORT_SYMBOL(udp_del_offload);
218
Tom Herbert57c67ff2014-08-22 13:34:44 -0700219struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
220 struct udphdr *uh)
Or Gerlitzb582ef02014-01-20 13:59:19 +0200221{
222 struct udp_offload_priv *uo_priv;
223 struct sk_buff *p, **pp = NULL;
Tom Herbert57c67ff2014-08-22 13:34:44 -0700224 struct udphdr *uh2;
225 unsigned int off = skb_gro_offset(skb);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200226 int flush = 1;
227
228 if (NAPI_GRO_CB(skb)->udp_mark ||
Tom Herbert662880f2014-08-27 21:26:56 -0700229 (skb->ip_summed != CHECKSUM_PARTIAL &&
230 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
231 !NAPI_GRO_CB(skb)->csum_valid))
Or Gerlitzb582ef02014-01-20 13:59:19 +0200232 goto out;
233
234 /* mark that this skb passed once through the udp gro layer */
235 NAPI_GRO_CB(skb)->udp_mark = 1;
Or Gerlitzb582ef02014-01-20 13:59:19 +0200236
237 rcu_read_lock();
238 uo_priv = rcu_dereference(udp_offload_base);
239 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
240 if (uo_priv->offload->port == uh->dest &&
241 uo_priv->offload->callbacks.gro_receive)
242 goto unflush;
243 }
244 goto out_unlock;
245
246unflush:
247 flush = 0;
248
249 for (p = *head; p; p = p->next) {
250 if (!NAPI_GRO_CB(p)->same_flow)
251 continue;
252
253 uh2 = (struct udphdr *)(p->data + off);
Tom Herbert57c67ff2014-08-22 13:34:44 -0700254
255 /* Match ports and either checksums are either both zero
256 * or nonzero.
257 */
258 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
259 (!uh->check ^ !uh2->check)) {
Or Gerlitzb582ef02014-01-20 13:59:19 +0200260 NAPI_GRO_CB(p)->same_flow = 0;
261 continue;
262 }
263 }
264
265 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
Tom Herbert6bae1d42014-06-10 18:54:26 -0700266 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
Tom Herbertafe93322014-09-17 12:25:57 -0700267 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
Or Gerlitzb582ef02014-01-20 13:59:19 +0200268 pp = uo_priv->offload->callbacks.gro_receive(head, skb);
269
270out_unlock:
271 rcu_read_unlock();
272out:
273 NAPI_GRO_CB(skb)->flush |= flush;
274 return pp;
275}
276
Tom Herbert57c67ff2014-08-22 13:34:44 -0700277static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
278 struct sk_buff *skb)
279{
280 struct udphdr *uh = udp_gro_udphdr(skb);
281
Tom Herbert2abb7cd2014-08-31 15:12:43 -0700282 if (unlikely(!uh))
283 goto flush;
Tom Herbert57c67ff2014-08-22 13:34:44 -0700284
Tom Herbert2abb7cd2014-08-31 15:12:43 -0700285 /* Don't bother verifying checksum if we're going to flush anyway. */
Scott Wood2d8f7e22014-09-10 21:23:18 -0500286 if (NAPI_GRO_CB(skb)->flush)
Tom Herbert2abb7cd2014-08-31 15:12:43 -0700287 goto skip;
288
289 if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
290 inet_gro_compute_pseudo))
291 goto flush;
292 else if (uh->check)
293 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
294 inet_gro_compute_pseudo);
295skip:
Tom Herbert57c67ff2014-08-22 13:34:44 -0700296 return udp_gro_receive(head, skb, uh);
Tom Herbert2abb7cd2014-08-31 15:12:43 -0700297
298flush:
299 NAPI_GRO_CB(skb)->flush = 1;
300 return NULL;
Tom Herbert57c67ff2014-08-22 13:34:44 -0700301}
302
303int udp_gro_complete(struct sk_buff *skb, int nhoff)
Or Gerlitzb582ef02014-01-20 13:59:19 +0200304{
305 struct udp_offload_priv *uo_priv;
306 __be16 newlen = htons(skb->len - nhoff);
307 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
308 int err = -ENOSYS;
309
310 uh->len = newlen;
311
312 rcu_read_lock();
313
314 uo_priv = rcu_dereference(udp_offload_base);
315 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
316 if (uo_priv->offload->port == uh->dest &&
317 uo_priv->offload->callbacks.gro_complete)
318 break;
319 }
320
Tom Herbertafe93322014-09-17 12:25:57 -0700321 if (uo_priv != NULL) {
322 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
Or Gerlitzb582ef02014-01-20 13:59:19 +0200323 err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
Tom Herbertafe93322014-09-17 12:25:57 -0700324 }
Or Gerlitzb582ef02014-01-20 13:59:19 +0200325
326 rcu_read_unlock();
327 return err;
328}
329
Eric Dumazet72bb17b2014-09-09 08:29:12 -0700330static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
Tom Herbert57c67ff2014-08-22 13:34:44 -0700331{
332 const struct iphdr *iph = ip_hdr(skb);
333 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
334
335 if (uh->check)
336 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
337 iph->daddr, 0);
338
339 return udp_gro_complete(skb, nhoff);
340}
341
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200342static const struct net_offload udpv4_offload = {
343 .callbacks = {
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200344 .gso_segment = udp4_ufo_fragment,
Tom Herbert57c67ff2014-08-22 13:34:44 -0700345 .gro_receive = udp4_gro_receive,
346 .gro_complete = udp4_gro_complete,
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200347 },
348};
349
350int __init udpv4_offload_init(void)
351{
352 return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
353}