blob: 25f5cee3a08a3ea22f2b30ef15809da73af44f08 [file] [log] [blame]
Daniel Borkmannda5bab02013-06-08 12:56:03 +02001/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * UDPv4 GSO support
11 */
12
13#include <linux/skbuff.h>
14#include <net/udp.h>
15#include <net/protocol.h>
16
Or Gerlitzb582ef02014-01-20 13:59:19 +020017static DEFINE_SPINLOCK(udp_offload_lock);
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +020018static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
Or Gerlitzb582ef02014-01-20 13:59:19 +020019
20struct udp_offload_priv {
21 struct udp_offload *offload;
22 struct rcu_head rcu;
23 struct udp_offload_priv __rcu *next;
24};
25
Daniel Borkmannda5bab02013-06-08 12:56:03 +020026static int udp4_ufo_send_check(struct sk_buff *skb)
27{
28 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
29 return -EINVAL;
30
31 if (likely(!skb->encapsulation)) {
32 const struct iphdr *iph;
33 struct udphdr *uh;
34
35 iph = ip_hdr(skb);
36 uh = udp_hdr(skb);
37
38 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
39 IPPROTO_UDP, 0);
40 skb->csum_start = skb_transport_header(skb) - skb->head;
41 skb->csum_offset = offsetof(struct udphdr, check);
42 skb->ip_summed = CHECKSUM_PARTIAL;
43 }
44
45 return 0;
46}
47
48static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
49 netdev_features_t features)
50{
51 struct sk_buff *segs = ERR_PTR(-EINVAL);
52 unsigned int mss;
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -080053 int offset;
54 __wsum csum;
55
56 if (skb->encapsulation &&
57 skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
58 segs = skb_udp_tunnel_segment(skb, features);
59 goto out;
60 }
Daniel Borkmannda5bab02013-06-08 12:56:03 +020061
62 mss = skb_shinfo(skb)->gso_size;
63 if (unlikely(skb->len <= mss))
64 goto out;
65
66 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
67 /* Packet is from an untrusted source, reset gso_segs. */
68 int type = skb_shinfo(skb)->gso_type;
69
70 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
71 SKB_GSO_UDP_TUNNEL |
Eric Dumazetcb32f512013-10-19 11:42:57 -070072 SKB_GSO_IPIP |
Daniel Borkmannda5bab02013-06-08 12:56:03 +020073 SKB_GSO_GRE | SKB_GSO_MPLS) ||
74 !(type & (SKB_GSO_UDP))))
75 goto out;
76
77 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
78
79 segs = NULL;
80 goto out;
81 }
82
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -080083 /* Do software UFO. Complete and fill in the UDP checksum as
84 * HW cannot do checksum of UDP packets sent as multiple
85 * IP fragments.
86 */
87 offset = skb_checksum_start_offset(skb);
88 csum = skb_checksum(skb, offset, skb->len - offset, 0);
89 offset += skb->csum_offset;
90 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
91 skb->ip_summed = CHECKSUM_NONE;
92
Daniel Borkmannda5bab02013-06-08 12:56:03 +020093 /* Fragment the skb. IP headers of the fragments are updated in
94 * inet_gso_segment()
95 */
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -080096 segs = skb_segment(skb, features);
Daniel Borkmannda5bab02013-06-08 12:56:03 +020097out:
98 return segs;
99}
100
Or Gerlitzb582ef02014-01-20 13:59:19 +0200101int udp_add_offload(struct udp_offload *uo)
102{
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +0200103 struct udp_offload_priv __rcu **head = &udp_offload_base;
Or Gerlitzb582ef02014-01-20 13:59:19 +0200104 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL);
105
106 if (!new_offload)
107 return -ENOMEM;
108
109 new_offload->offload = uo;
110
111 spin_lock(&udp_offload_lock);
112 rcu_assign_pointer(new_offload->next, rcu_dereference(*head));
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +0200113 rcu_assign_pointer(*head, new_offload);
Or Gerlitzb582ef02014-01-20 13:59:19 +0200114 spin_unlock(&udp_offload_lock);
115
116 return 0;
117}
118EXPORT_SYMBOL(udp_add_offload);
119
120static void udp_offload_free_routine(struct rcu_head *head)
121{
122 struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
123 kfree(ou_priv);
124}
125
126void udp_del_offload(struct udp_offload *uo)
127{
128 struct udp_offload_priv __rcu **head = &udp_offload_base;
129 struct udp_offload_priv *uo_priv;
130
131 spin_lock(&udp_offload_lock);
132
133 uo_priv = rcu_dereference(*head);
134 for (; uo_priv != NULL;
135 uo_priv = rcu_dereference(*head)) {
136
137 if (uo_priv->offload == uo) {
138 rcu_assign_pointer(*head, rcu_dereference(uo_priv->next));
139 goto unlock;
140 }
141 head = &uo_priv->next;
142 }
Shlomo Pongratza1d0cd82014-01-22 15:23:29 +0200143 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
Or Gerlitzb582ef02014-01-20 13:59:19 +0200144unlock:
145 spin_unlock(&udp_offload_lock);
146 if (uo_priv != NULL)
147 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
148}
149EXPORT_SYMBOL(udp_del_offload);
150
151static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
152{
153 struct udp_offload_priv *uo_priv;
154 struct sk_buff *p, **pp = NULL;
155 struct udphdr *uh, *uh2;
156 unsigned int hlen, off;
157 int flush = 1;
158
159 if (NAPI_GRO_CB(skb)->udp_mark ||
160 (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
161 goto out;
162
163 /* mark that this skb passed once through the udp gro layer */
164 NAPI_GRO_CB(skb)->udp_mark = 1;
165
166 off = skb_gro_offset(skb);
167 hlen = off + sizeof(*uh);
168 uh = skb_gro_header_fast(skb, off);
169 if (skb_gro_header_hard(skb, hlen)) {
170 uh = skb_gro_header_slow(skb, hlen, off);
171 if (unlikely(!uh))
172 goto out;
173 }
174
175 rcu_read_lock();
176 uo_priv = rcu_dereference(udp_offload_base);
177 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
178 if (uo_priv->offload->port == uh->dest &&
179 uo_priv->offload->callbacks.gro_receive)
180 goto unflush;
181 }
182 goto out_unlock;
183
184unflush:
185 flush = 0;
186
187 for (p = *head; p; p = p->next) {
188 if (!NAPI_GRO_CB(p)->same_flow)
189 continue;
190
191 uh2 = (struct udphdr *)(p->data + off);
192 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
193 NAPI_GRO_CB(p)->same_flow = 0;
194 continue;
195 }
196 }
197
198 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
199 pp = uo_priv->offload->callbacks.gro_receive(head, skb);
200
201out_unlock:
202 rcu_read_unlock();
203out:
204 NAPI_GRO_CB(skb)->flush |= flush;
205 return pp;
206}
207
208static int udp_gro_complete(struct sk_buff *skb, int nhoff)
209{
210 struct udp_offload_priv *uo_priv;
211 __be16 newlen = htons(skb->len - nhoff);
212 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
213 int err = -ENOSYS;
214
215 uh->len = newlen;
216
217 rcu_read_lock();
218
219 uo_priv = rcu_dereference(udp_offload_base);
220 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
221 if (uo_priv->offload->port == uh->dest &&
222 uo_priv->offload->callbacks.gro_complete)
223 break;
224 }
225
226 if (uo_priv != NULL)
227 err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
228
229 rcu_read_unlock();
230 return err;
231}
232
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200233static const struct net_offload udpv4_offload = {
234 .callbacks = {
235 .gso_send_check = udp4_ufo_send_check,
236 .gso_segment = udp4_ufo_fragment,
Or Gerlitzb582ef02014-01-20 13:59:19 +0200237 .gro_receive = udp_gro_receive,
238 .gro_complete = udp_gro_complete,
Daniel Borkmannda5bab02013-06-08 12:56:03 +0200239 },
240};
241
242int __init udpv4_offload_init(void)
243{
244 return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
245}