blob: 88b4023ecfcfc85df907ff7472354084b3b16264 [file] [log] [blame]
Daniel Borkmannda5bab02013-06-08 12:56:03 +02001/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * UDPv4 GSO support
11 */
12
13#include <linux/skbuff.h>
14#include <net/udp.h>
15#include <net/protocol.h>
16
Or Gerlitzb582ef02014-01-20 13:59:19 +020017static DEFINE_SPINLOCK(udp_offload_lock);
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +020018static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
Or Gerlitzb582ef02014-01-20 13:59:19 +020019
Shlomo Pongratza664a4f2014-02-02 15:42:10 +020020#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
21
Or Gerlitzb582ef02014-01-20 13:59:19 +020022struct udp_offload_priv {
23 struct udp_offload *offload;
24 struct rcu_head rcu;
25 struct udp_offload_priv __rcu *next;
26};
27
Daniel Borkmannda5bab02013-06-08 12:56:03 +020028static int udp4_ufo_send_check(struct sk_buff *skb)
29{
30 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
31 return -EINVAL;
32
33 if (likely(!skb->encapsulation)) {
34 const struct iphdr *iph;
35 struct udphdr *uh;
36
37 iph = ip_hdr(skb);
38 uh = udp_hdr(skb);
39
40 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
41 IPPROTO_UDP, 0);
42 skb->csum_start = skb_transport_header(skb) - skb->head;
43 skb->csum_offset = offsetof(struct udphdr, check);
44 skb->ip_summed = CHECKSUM_PARTIAL;
45 }
46
47 return 0;
48}
49
50static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
51 netdev_features_t features)
52{
53 struct sk_buff *segs = ERR_PTR(-EINVAL);
54 unsigned int mss;
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -080055 int offset;
56 __wsum csum;
57
58 if (skb->encapsulation &&
59 skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
60 segs = skb_udp_tunnel_segment(skb, features);
61 goto out;
62 }
Daniel Borkmannda5bab02013-06-08 12:56:03 +020063
64 mss = skb_shinfo(skb)->gso_size;
65 if (unlikely(skb->len <= mss))
66 goto out;
67
68 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
69 /* Packet is from an untrusted source, reset gso_segs. */
70 int type = skb_shinfo(skb)->gso_type;
71
72 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
73 SKB_GSO_UDP_TUNNEL |
Eric Dumazetcb32f512013-10-19 11:42:57 -070074 SKB_GSO_IPIP |
Daniel Borkmannda5bab02013-06-08 12:56:03 +020075 SKB_GSO_GRE | SKB_GSO_MPLS) ||
76 !(type & (SKB_GSO_UDP))))
77 goto out;
78
79 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
80
81 segs = NULL;
82 goto out;
83 }
84
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -080085 /* Do software UFO. Complete and fill in the UDP checksum as
86 * HW cannot do checksum of UDP packets sent as multiple
87 * IP fragments.
88 */
89 offset = skb_checksum_start_offset(skb);
90 csum = skb_checksum(skb, offset, skb->len - offset, 0);
91 offset += skb->csum_offset;
92 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
93 skb->ip_summed = CHECKSUM_NONE;
94
Daniel Borkmannda5bab02013-06-08 12:56:03 +020095 /* Fragment the skb. IP headers of the fragments are updated in
96 * inet_gso_segment()
97 */
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -080098 segs = skb_segment(skb, features);
Daniel Borkmannda5bab02013-06-08 12:56:03 +020099out:
100 return segs;
101}
102
Or Gerlitzb582ef02014-01-20 13:59:19 +0200103int udp_add_offload(struct udp_offload *uo)
104{
Or Gerlitzb5aaab12014-01-29 18:08:59 +0200105 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200106
107 if (!new_offload)
108 return -ENOMEM;
109
110 new_offload->offload = uo;
111
112 spin_lock(&udp_offload_lock);
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200113 new_offload->next = udp_offload_base;
114 rcu_assign_pointer(udp_offload_base, new_offload);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200115 spin_unlock(&udp_offload_lock);
116
117 return 0;
118}
119EXPORT_SYMBOL(udp_add_offload);
120
121static void udp_offload_free_routine(struct rcu_head *head)
122{
123 struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
124 kfree(ou_priv);
125}
126
127void udp_del_offload(struct udp_offload *uo)
128{
129 struct udp_offload_priv __rcu **head = &udp_offload_base;
130 struct udp_offload_priv *uo_priv;
131
132 spin_lock(&udp_offload_lock);
133
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200134 uo_priv = udp_deref_protected(*head);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200135 for (; uo_priv != NULL;
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200136 uo_priv = udp_deref_protected(*head)) {
Or Gerlitzb582ef02014-01-20 13:59:19 +0200137 if (uo_priv->offload == uo) {
Shlomo Pongratza664a4f2014-02-02 15:42:10 +0200138 rcu_assign_pointer(*head,
139 udp_deref_protected(uo_priv->next));
Or Gerlitzb582ef02014-01-20 13:59:19 +0200140 goto unlock;
141 }
142 head = &uo_priv->next;
143 }
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +0200144 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
Or Gerlitzb582ef02014-01-20 13:59:19 +0200145unlock:
146 spin_unlock(&udp_offload_lock);
147 if (uo_priv != NULL)
148 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
149}
150EXPORT_SYMBOL(udp_del_offload);
151
152static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
153{
154 struct udp_offload_priv *uo_priv;
155 struct sk_buff *p, **pp = NULL;
156 struct udphdr *uh, *uh2;
157 unsigned int hlen, off;
158 int flush = 1;
159
160 if (NAPI_GRO_CB(skb)->udp_mark ||
161 (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
162 goto out;
163
164 /* mark that this skb passed once through the udp gro layer */
165 NAPI_GRO_CB(skb)->udp_mark = 1;
166
167 off = skb_gro_offset(skb);
168 hlen = off + sizeof(*uh);
169 uh = skb_gro_header_fast(skb, off);
170 if (skb_gro_header_hard(skb, hlen)) {
171 uh = skb_gro_header_slow(skb, hlen, off);
172 if (unlikely(!uh))
173 goto out;
174 }
175
176 rcu_read_lock();
177 uo_priv = rcu_dereference(udp_offload_base);
178 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
179 if (uo_priv->offload->port == uh->dest &&
180 uo_priv->offload->callbacks.gro_receive)
181 goto unflush;
182 }
183 goto out_unlock;
184
185unflush:
186 flush = 0;
187
188 for (p = *head; p; p = p->next) {
189 if (!NAPI_GRO_CB(p)->same_flow)
190 continue;
191
192 uh2 = (struct udphdr *)(p->data + off);
193 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
194 NAPI_GRO_CB(p)->same_flow = 0;
195 continue;
196 }
197 }
198
199 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
200 pp = uo_priv->offload->callbacks.gro_receive(head, skb);
201
202out_unlock:
203 rcu_read_unlock();
204out:
205 NAPI_GRO_CB(skb)->flush |= flush;
206 return pp;
207}
208
209static int udp_gro_complete(struct sk_buff *skb, int nhoff)
210{
211 struct udp_offload_priv *uo_priv;
212 __be16 newlen = htons(skb->len - nhoff);
213 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
214 int err = -ENOSYS;
215
216 uh->len = newlen;
217
218 rcu_read_lock();
219
220 uo_priv = rcu_dereference(udp_offload_base);
221 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
222 if (uo_priv->offload->port == uh->dest &&
223 uo_priv->offload->callbacks.gro_complete)
224 break;
225 }
226
227 if (uo_priv != NULL)
228 err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
229
230 rcu_read_unlock();
231 return err;
232}
233
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200234static const struct net_offload udpv4_offload = {
235 .callbacks = {
236 .gso_send_check = udp4_ufo_send_check,
237 .gso_segment = udp4_ufo_fragment,
Or Gerlitzb582ef02014-01-20 13:59:19 +0200238 .gro_receive = udp_gro_receive,
239 .gro_complete = udp_gro_complete,
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200240 },
241};
242
243int __init udpv4_offload_init(void)
244{
245 return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
246}