blob: 335e75207284e13eab64b748188d82764818b46d [file] [log] [blame]
Tom Herbert23461552014-09-17 12:25:56 -07001#include <linux/module.h>
2#include <linux/errno.h>
3#include <linux/socket.h>
4#include <linux/skbuff.h>
5#include <linux/ip.h>
6#include <linux/udp.h>
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <net/genetlink.h>
Tom Herbert37dd0242014-10-03 15:48:09 -070010#include <net/gue.h>
Tom Herbert23461552014-09-17 12:25:56 -070011#include <net/ip.h>
Tom Herbertafe93322014-09-17 12:25:57 -070012#include <net/protocol.h>
Tom Herbert23461552014-09-17 12:25:56 -070013#include <net/udp.h>
14#include <net/udp_tunnel.h>
15#include <net/xfrm.h>
16#include <uapi/linux/fou.h>
17#include <uapi/linux/genetlink.h>
18
19static DEFINE_SPINLOCK(fou_lock);
20static LIST_HEAD(fou_list);
21
22struct fou {
23 struct socket *sock;
24 u8 protocol;
Tom Herbertfe881ef2015-02-10 16:30:33 -080025 u8 flags;
Tom Herbert23461552014-09-17 12:25:56 -070026 u16 port;
Tom Herbertafe93322014-09-17 12:25:57 -070027 struct udp_offload udp_offloads;
Tom Herbert23461552014-09-17 12:25:56 -070028 struct list_head list;
29};
30
Tom Herbertfe881ef2015-02-10 16:30:33 -080031#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
32
Tom Herbert23461552014-09-17 12:25:56 -070033struct fou_cfg {
Tom Herbert37dd0242014-10-03 15:48:09 -070034 u16 type;
Tom Herbert23461552014-09-17 12:25:56 -070035 u8 protocol;
Tom Herbertfe881ef2015-02-10 16:30:33 -080036 u8 flags;
Tom Herbert23461552014-09-17 12:25:56 -070037 struct udp_port_cfg udp_config;
38};
39
40static inline struct fou *fou_from_sock(struct sock *sk)
41{
42 return sk->sk_user_data;
43}
44
Tom Herbert5024c332014-11-04 09:06:53 -080045static void fou_recv_pull(struct sk_buff *skb, size_t len)
Tom Herbert23461552014-09-17 12:25:56 -070046{
47 struct iphdr *iph = ip_hdr(skb);
48
49 /* Remove 'len' bytes from the packet (UDP header and
Tom Herbert5024c332014-11-04 09:06:53 -080050 * FOU header if present).
Tom Herbert23461552014-09-17 12:25:56 -070051 */
52 iph->tot_len = htons(ntohs(iph->tot_len) - len);
53 __skb_pull(skb, len);
54 skb_postpull_rcsum(skb, udp_hdr(skb), len);
55 skb_reset_transport_header(skb);
Tom Herbert23461552014-09-17 12:25:56 -070056}
57
58static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
59{
60 struct fou *fou = fou_from_sock(sk);
61
62 if (!fou)
63 return 1;
64
Tom Herbert5024c332014-11-04 09:06:53 -080065 fou_recv_pull(skb, sizeof(struct udphdr));
66
67 return -fou->protocol;
68}
69
Tom Herberta8d31c12014-11-04 09:06:57 -080070static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
Tom Herbertfe881ef2015-02-10 16:30:33 -080071 void *data, size_t hdrlen, u8 ipproto,
72 bool nopartial)
Tom Herberta8d31c12014-11-04 09:06:57 -080073{
74 __be16 *pd = data;
Tom Herbert4fd671d2014-11-25 11:21:20 -080075 size_t start = ntohs(pd[0]);
76 size_t offset = ntohs(pd[1]);
77 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
Tom Herberta8d31c12014-11-04 09:06:57 -080078
Tom Herberta8d31c12014-11-04 09:06:57 -080079 if (!pskb_may_pull(skb, plen))
80 return NULL;
81 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
82
Tom Herbertfe881ef2015-02-10 16:30:33 -080083 skb_remcsum_process(skb, (void *)guehdr + hdrlen,
84 start, offset, nopartial);
Tom Herberta8d31c12014-11-04 09:06:57 -080085
86 return guehdr;
87}
88
Tom Herbert5024c332014-11-04 09:06:53 -080089static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
90{
91 /* No support yet */
92 kfree_skb(skb);
93 return 0;
Tom Herbert23461552014-09-17 12:25:56 -070094}
95
Tom Herbert37dd0242014-10-03 15:48:09 -070096static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
97{
98 struct fou *fou = fou_from_sock(sk);
Tom Herbert5024c332014-11-04 09:06:53 -080099 size_t len, optlen, hdrlen;
Tom Herbert37dd0242014-10-03 15:48:09 -0700100 struct guehdr *guehdr;
Tom Herbert5024c332014-11-04 09:06:53 -0800101 void *data;
Tom Herberta8d31c12014-11-04 09:06:57 -0800102 u16 doffset = 0;
Tom Herbert37dd0242014-10-03 15:48:09 -0700103
104 if (!fou)
105 return 1;
106
107 len = sizeof(struct udphdr) + sizeof(struct guehdr);
108 if (!pskb_may_pull(skb, len))
109 goto drop;
110
Tom Herbert5024c332014-11-04 09:06:53 -0800111 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
Tom Herbert37dd0242014-10-03 15:48:09 -0700112
Tom Herbert5024c332014-11-04 09:06:53 -0800113 optlen = guehdr->hlen << 2;
114 len += optlen;
115
Tom Herbert37dd0242014-10-03 15:48:09 -0700116 if (!pskb_may_pull(skb, len))
117 goto drop;
118
Tom Herbert5024c332014-11-04 09:06:53 -0800119 /* guehdr may change after pull */
120 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
Li RongQingd8f00d22014-10-17 16:53:47 +0800121
Tom Herbert5024c332014-11-04 09:06:53 -0800122 hdrlen = sizeof(struct guehdr) + optlen;
123
124 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
Tom Herbert37dd0242014-10-03 15:48:09 -0700125 goto drop;
126
Tom Herberta8d31c12014-11-04 09:06:57 -0800127 hdrlen = sizeof(struct guehdr) + optlen;
128
129 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
130
Tom Herberta8d31c12014-11-04 09:06:57 -0800131 /* Pull csum through the guehdr now . This can be used if
132 * there is a remote checksum offload.
133 */
134 skb_postpull_rcsum(skb, udp_hdr(skb), len);
Tom Herbert5024c332014-11-04 09:06:53 -0800135
136 data = &guehdr[1];
137
138 if (guehdr->flags & GUE_FLAG_PRIV) {
Tom Herberta8d31c12014-11-04 09:06:57 -0800139 __be32 flags = *(__be32 *)(data + doffset);
Tom Herbert5024c332014-11-04 09:06:53 -0800140
Tom Herberta8d31c12014-11-04 09:06:57 -0800141 doffset += GUE_LEN_PRIV;
142
143 if (flags & GUE_PFLAG_REMCSUM) {
144 guehdr = gue_remcsum(skb, guehdr, data + doffset,
Tom Herbertfe881ef2015-02-10 16:30:33 -0800145 hdrlen, guehdr->proto_ctype,
146 !!(fou->flags &
147 FOU_F_REMCSUM_NOPARTIAL));
Tom Herberta8d31c12014-11-04 09:06:57 -0800148 if (!guehdr)
149 goto drop;
150
151 data = &guehdr[1];
152
153 doffset += GUE_PLEN_REMCSUM;
154 }
Tom Herbert37dd0242014-10-03 15:48:09 -0700155 }
156
Tom Herbert5024c332014-11-04 09:06:53 -0800157 if (unlikely(guehdr->control))
158 return gue_control_message(skb, guehdr);
159
Tom Herbert4fd671d2014-11-25 11:21:20 -0800160 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
Tom Herberta8d31c12014-11-04 09:06:57 -0800161 skb_reset_transport_header(skb);
162
Tom Herbert5024c332014-11-04 09:06:53 -0800163 return -guehdr->proto_ctype;
164
Tom Herbert37dd0242014-10-03 15:48:09 -0700165drop:
166 kfree_skb(skb);
167 return 0;
168}
169
Tom Herbertafe93322014-09-17 12:25:57 -0700170static struct sk_buff **fou_gro_receive(struct sk_buff **head,
Tom Herberta2b12f32015-01-12 17:00:37 -0800171 struct sk_buff *skb,
172 struct udp_offload *uoff)
Tom Herbertafe93322014-09-17 12:25:57 -0700173{
174 const struct net_offload *ops;
175 struct sk_buff **pp = NULL;
176 u8 proto = NAPI_GRO_CB(skb)->proto;
Tom Herbertefc98d02014-10-03 15:48:08 -0700177 const struct net_offload **offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700178
179 rcu_read_lock();
Tom Herbertefc98d02014-10-03 15:48:08 -0700180 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700181 ops = rcu_dereference(offloads[proto]);
182 if (!ops || !ops->callbacks.gro_receive)
183 goto out_unlock;
184
185 pp = ops->callbacks.gro_receive(head, skb);
186
187out_unlock:
188 rcu_read_unlock();
189
190 return pp;
191}
192
Tom Herberta2b12f32015-01-12 17:00:37 -0800193static int fou_gro_complete(struct sk_buff *skb, int nhoff,
194 struct udp_offload *uoff)
Tom Herbertafe93322014-09-17 12:25:57 -0700195{
196 const struct net_offload *ops;
197 u8 proto = NAPI_GRO_CB(skb)->proto;
198 int err = -ENOSYS;
Tom Herbertefc98d02014-10-03 15:48:08 -0700199 const struct net_offload **offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700200
Jesse Grosscfdf1e12014-11-10 11:45:13 -0800201 udp_tunnel_gro_complete(skb, nhoff);
202
Tom Herbertafe93322014-09-17 12:25:57 -0700203 rcu_read_lock();
Tom Herbertefc98d02014-10-03 15:48:08 -0700204 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700205 ops = rcu_dereference(offloads[proto]);
206 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
207 goto out_unlock;
208
209 err = ops->callbacks.gro_complete(skb, nhoff);
210
211out_unlock:
212 rcu_read_unlock();
213
214 return err;
215}
216
Tom Herberta8d31c12014-11-04 09:06:57 -0800217static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
218 struct guehdr *guehdr, void *data,
Tom Herbert26c4f7d2015-02-10 16:30:27 -0800219 size_t hdrlen, u8 ipproto,
Tom Herbertfe881ef2015-02-10 16:30:33 -0800220 struct gro_remcsum *grc, bool nopartial)
Tom Herberta8d31c12014-11-04 09:06:57 -0800221{
222 __be16 *pd = data;
Tom Herbert4fd671d2014-11-25 11:21:20 -0800223 size_t start = ntohs(pd[0]);
224 size_t offset = ntohs(pd[1]);
225 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
Tom Herberta8d31c12014-11-04 09:06:57 -0800226
227 if (skb->remcsum_offload)
Tom Herbert26c4f7d2015-02-10 16:30:27 -0800228 return NULL;
Tom Herberta8d31c12014-11-04 09:06:57 -0800229
Tom Herbert4fd671d2014-11-25 11:21:20 -0800230 if (!NAPI_GRO_CB(skb)->csum_valid)
Tom Herberta8d31c12014-11-04 09:06:57 -0800231 return NULL;
232
Tom Herberta8d31c12014-11-04 09:06:57 -0800233 /* Pull checksum that will be written */
234 if (skb_gro_header_hard(skb, off + plen)) {
235 guehdr = skb_gro_header_slow(skb, off + plen, off);
236 if (!guehdr)
237 return NULL;
238 }
239
Tom Herbert26c4f7d2015-02-10 16:30:27 -0800240 skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
Tom Herbertfe881ef2015-02-10 16:30:33 -0800241 start, offset, grc, nopartial);
Tom Herberta8d31c12014-11-04 09:06:57 -0800242
243 skb->remcsum_offload = 1;
244
245 return guehdr;
246}
247
Tom Herbert37dd0242014-10-03 15:48:09 -0700248static struct sk_buff **gue_gro_receive(struct sk_buff **head,
Tom Herberta2b12f32015-01-12 17:00:37 -0800249 struct sk_buff *skb,
250 struct udp_offload *uoff)
Tom Herbert37dd0242014-10-03 15:48:09 -0700251{
252 const struct net_offload **offloads;
253 const struct net_offload *ops;
254 struct sk_buff **pp = NULL;
255 struct sk_buff *p;
Tom Herbert37dd0242014-10-03 15:48:09 -0700256 struct guehdr *guehdr;
Tom Herbert5024c332014-11-04 09:06:53 -0800257 size_t len, optlen, hdrlen, off;
258 void *data;
Tom Herberta8d31c12014-11-04 09:06:57 -0800259 u16 doffset = 0;
Tom Herbert37dd0242014-10-03 15:48:09 -0700260 int flush = 1;
Tom Herbertfe881ef2015-02-10 16:30:33 -0800261 struct fou *fou = container_of(uoff, struct fou, udp_offloads);
Tom Herbert26c4f7d2015-02-10 16:30:27 -0800262 struct gro_remcsum grc;
263
264 skb_gro_remcsum_init(&grc);
Tom Herbert37dd0242014-10-03 15:48:09 -0700265
266 off = skb_gro_offset(skb);
Tom Herbert5024c332014-11-04 09:06:53 -0800267 len = off + sizeof(*guehdr);
268
Tom Herbert37dd0242014-10-03 15:48:09 -0700269 guehdr = skb_gro_header_fast(skb, off);
Tom Herbert5024c332014-11-04 09:06:53 -0800270 if (skb_gro_header_hard(skb, len)) {
271 guehdr = skb_gro_header_slow(skb, len, off);
Tom Herbert37dd0242014-10-03 15:48:09 -0700272 if (unlikely(!guehdr))
273 goto out;
274 }
275
Tom Herbert5024c332014-11-04 09:06:53 -0800276 optlen = guehdr->hlen << 2;
277 len += optlen;
Tom Herbert37dd0242014-10-03 15:48:09 -0700278
Tom Herbert5024c332014-11-04 09:06:53 -0800279 if (skb_gro_header_hard(skb, len)) {
280 guehdr = skb_gro_header_slow(skb, len, off);
Tom Herbert37dd0242014-10-03 15:48:09 -0700281 if (unlikely(!guehdr))
Tom Herbert5024c332014-11-04 09:06:53 -0800282 goto out;
283 }
284
285 if (unlikely(guehdr->control) || guehdr->version != 0 ||
286 validate_gue_flags(guehdr, optlen))
287 goto out;
288
289 hdrlen = sizeof(*guehdr) + optlen;
290
Tom Herberta8d31c12014-11-04 09:06:57 -0800291 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
292 * this is needed if there is a remote checkcsum offload.
293 */
Tom Herbert5024c332014-11-04 09:06:53 -0800294 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
295
296 data = &guehdr[1];
297
298 if (guehdr->flags & GUE_FLAG_PRIV) {
Tom Herberta8d31c12014-11-04 09:06:57 -0800299 __be32 flags = *(__be32 *)(data + doffset);
Tom Herbert5024c332014-11-04 09:06:53 -0800300
Tom Herberta8d31c12014-11-04 09:06:57 -0800301 doffset += GUE_LEN_PRIV;
302
303 if (flags & GUE_PFLAG_REMCSUM) {
304 guehdr = gue_gro_remcsum(skb, off, guehdr,
305 data + doffset, hdrlen,
Tom Herbertfe881ef2015-02-10 16:30:33 -0800306 guehdr->proto_ctype, &grc,
307 !!(fou->flags &
308 FOU_F_REMCSUM_NOPARTIAL));
Tom Herberta8d31c12014-11-04 09:06:57 -0800309 if (!guehdr)
310 goto out;
311
312 data = &guehdr[1];
313
314 doffset += GUE_PLEN_REMCSUM;
315 }
Tom Herbert37dd0242014-10-03 15:48:09 -0700316 }
317
Tom Herberta8d31c12014-11-04 09:06:57 -0800318 skb_gro_pull(skb, hdrlen);
319
Tom Herbert37dd0242014-10-03 15:48:09 -0700320 flush = 0;
321
322 for (p = *head; p; p = p->next) {
323 const struct guehdr *guehdr2;
324
325 if (!NAPI_GRO_CB(p)->same_flow)
326 continue;
327
328 guehdr2 = (struct guehdr *)(p->data + off);
329
330 /* Compare base GUE header to be equal (covers
Tom Herbert5024c332014-11-04 09:06:53 -0800331 * hlen, version, proto_ctype, and flags.
Tom Herbert37dd0242014-10-03 15:48:09 -0700332 */
333 if (guehdr->word != guehdr2->word) {
334 NAPI_GRO_CB(p)->same_flow = 0;
335 continue;
336 }
337
338 /* Compare optional fields are the same. */
339 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
340 guehdr->hlen << 2)) {
341 NAPI_GRO_CB(p)->same_flow = 0;
342 continue;
343 }
344 }
345
Tom Herbert5024c332014-11-04 09:06:53 -0800346 rcu_read_lock();
347 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
348 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
349 if (WARN_ON(!ops || !ops->callbacks.gro_receive))
350 goto out_unlock;
Tom Herbert37dd0242014-10-03 15:48:09 -0700351
352 pp = ops->callbacks.gro_receive(head, skb);
353
354out_unlock:
355 rcu_read_unlock();
356out:
357 NAPI_GRO_CB(skb)->flush |= flush;
Tom Herbert26c4f7d2015-02-10 16:30:27 -0800358 skb_gro_remcsum_cleanup(skb, &grc);
Tom Herbert37dd0242014-10-03 15:48:09 -0700359
360 return pp;
361}
362
Tom Herberta2b12f32015-01-12 17:00:37 -0800363static int gue_gro_complete(struct sk_buff *skb, int nhoff,
364 struct udp_offload *uoff)
Tom Herbert37dd0242014-10-03 15:48:09 -0700365{
366 const struct net_offload **offloads;
367 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
368 const struct net_offload *ops;
369 unsigned int guehlen;
370 u8 proto;
371 int err = -ENOENT;
372
Tom Herbert5024c332014-11-04 09:06:53 -0800373 proto = guehdr->proto_ctype;
Tom Herbert37dd0242014-10-03 15:48:09 -0700374
375 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
376
377 rcu_read_lock();
378 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
379 ops = rcu_dereference(offloads[proto]);
380 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
381 goto out_unlock;
382
383 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
384
385out_unlock:
386 rcu_read_unlock();
387 return err;
388}
389
Tom Herbert23461552014-09-17 12:25:56 -0700390static int fou_add_to_port_list(struct fou *fou)
391{
392 struct fou *fout;
393
394 spin_lock(&fou_lock);
395 list_for_each_entry(fout, &fou_list, list) {
396 if (fou->port == fout->port) {
397 spin_unlock(&fou_lock);
398 return -EALREADY;
399 }
400 }
401
402 list_add(&fou->list, &fou_list);
403 spin_unlock(&fou_lock);
404
405 return 0;
406}
407
408static void fou_release(struct fou *fou)
409{
410 struct socket *sock = fou->sock;
411 struct sock *sk = sock->sk;
412
413 udp_del_offload(&fou->udp_offloads);
414
415 list_del(&fou->list);
416
417 /* Remove hooks into tunnel socket */
418 sk->sk_user_data = NULL;
419
420 sock_release(sock);
421
422 kfree(fou);
423}
424
Tom Herbert37dd0242014-10-03 15:48:09 -0700425static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
426{
427 udp_sk(sk)->encap_rcv = fou_udp_recv;
428 fou->protocol = cfg->protocol;
429 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
430 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
431 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
432 fou->udp_offloads.ipproto = cfg->protocol;
433
434 return 0;
435}
436
437static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
438{
439 udp_sk(sk)->encap_rcv = gue_udp_recv;
440 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
441 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
442 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
443
444 return 0;
445}
446
Tom Herbert23461552014-09-17 12:25:56 -0700447static int fou_create(struct net *net, struct fou_cfg *cfg,
448 struct socket **sockp)
449{
450 struct fou *fou = NULL;
451 int err;
452 struct socket *sock = NULL;
453 struct sock *sk;
454
455 /* Open UDP socket */
456 err = udp_sock_create(net, &cfg->udp_config, &sock);
457 if (err < 0)
458 goto error;
459
460 /* Allocate FOU port structure */
461 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
462 if (!fou) {
463 err = -ENOMEM;
464 goto error;
465 }
466
467 sk = sock->sk;
468
Tom Herbertfe881ef2015-02-10 16:30:33 -0800469 fou->flags = cfg->flags;
Tom Herbert37dd0242014-10-03 15:48:09 -0700470 fou->port = cfg->udp_config.local_udp_port;
471
472 /* Initial for fou type */
473 switch (cfg->type) {
474 case FOU_ENCAP_DIRECT:
475 err = fou_encap_init(sk, fou, cfg);
476 if (err)
477 goto error;
478 break;
479 case FOU_ENCAP_GUE:
480 err = gue_encap_init(sk, fou, cfg);
481 if (err)
482 goto error;
483 break;
484 default:
485 err = -EINVAL;
486 goto error;
487 }
Tom Herbert23461552014-09-17 12:25:56 -0700488
489 udp_sk(sk)->encap_type = 1;
490 udp_encap_enable();
491
492 sk->sk_user_data = fou;
493 fou->sock = sock;
494
Tom Herbert224d0192015-01-05 13:56:14 -0800495 inet_inc_convert_csum(sk);
Tom Herbert23461552014-09-17 12:25:56 -0700496
497 sk->sk_allocation = GFP_ATOMIC;
498
Tom Herbertafe93322014-09-17 12:25:57 -0700499 if (cfg->udp_config.family == AF_INET) {
500 err = udp_add_offload(&fou->udp_offloads);
501 if (err)
502 goto error;
503 }
504
Tom Herbert23461552014-09-17 12:25:56 -0700505 err = fou_add_to_port_list(fou);
506 if (err)
507 goto error;
508
509 if (sockp)
510 *sockp = sock;
511
512 return 0;
513
514error:
515 kfree(fou);
516 if (sock)
517 sock_release(sock);
518
519 return err;
520}
521
522static int fou_destroy(struct net *net, struct fou_cfg *cfg)
523{
524 struct fou *fou;
525 u16 port = cfg->udp_config.local_udp_port;
526 int err = -EINVAL;
527
528 spin_lock(&fou_lock);
529 list_for_each_entry(fou, &fou_list, list) {
530 if (fou->port == port) {
Tom Herbertafe93322014-09-17 12:25:57 -0700531 udp_del_offload(&fou->udp_offloads);
Tom Herbert23461552014-09-17 12:25:56 -0700532 fou_release(fou);
533 err = 0;
534 break;
535 }
536 }
537 spin_unlock(&fou_lock);
538
539 return err;
540}
541
542static struct genl_family fou_nl_family = {
543 .id = GENL_ID_GENERATE,
544 .hdrsize = 0,
545 .name = FOU_GENL_NAME,
546 .version = FOU_GENL_VERSION,
547 .maxattr = FOU_ATTR_MAX,
548 .netnsok = true,
549};
550
551static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
552 [FOU_ATTR_PORT] = { .type = NLA_U16, },
553 [FOU_ATTR_AF] = { .type = NLA_U8, },
554 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
Tom Herbert37dd0242014-10-03 15:48:09 -0700555 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
Tom Herbertfe881ef2015-02-10 16:30:33 -0800556 [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
Tom Herbert23461552014-09-17 12:25:56 -0700557};
558
559static int parse_nl_config(struct genl_info *info,
560 struct fou_cfg *cfg)
561{
562 memset(cfg, 0, sizeof(*cfg));
563
564 cfg->udp_config.family = AF_INET;
565
566 if (info->attrs[FOU_ATTR_AF]) {
567 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
568
569 if (family != AF_INET && family != AF_INET6)
570 return -EINVAL;
571
572 cfg->udp_config.family = family;
573 }
574
575 if (info->attrs[FOU_ATTR_PORT]) {
576 u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
577
578 cfg->udp_config.local_udp_port = port;
579 }
580
581 if (info->attrs[FOU_ATTR_IPPROTO])
582 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
583
Tom Herbert37dd0242014-10-03 15:48:09 -0700584 if (info->attrs[FOU_ATTR_TYPE])
585 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
586
Tom Herbertfe881ef2015-02-10 16:30:33 -0800587 if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
588 cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
589
Tom Herbert23461552014-09-17 12:25:56 -0700590 return 0;
591}
592
593static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
594{
595 struct fou_cfg cfg;
596 int err;
597
598 err = parse_nl_config(info, &cfg);
599 if (err)
600 return err;
601
602 return fou_create(&init_net, &cfg, NULL);
603}
604
605static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
606{
607 struct fou_cfg cfg;
608
609 parse_nl_config(info, &cfg);
610
611 return fou_destroy(&init_net, &cfg);
612}
613
614static const struct genl_ops fou_nl_ops[] = {
615 {
616 .cmd = FOU_CMD_ADD,
617 .doit = fou_nl_cmd_add_port,
618 .policy = fou_nl_policy,
619 .flags = GENL_ADMIN_PERM,
620 },
621 {
622 .cmd = FOU_CMD_DEL,
623 .doit = fou_nl_cmd_rm_port,
624 .policy = fou_nl_policy,
625 .flags = GENL_ADMIN_PERM,
626 },
627};
628
Tom Herberta8c5f902014-11-12 11:54:09 -0800629size_t fou_encap_hlen(struct ip_tunnel_encap *e)
630{
631 return sizeof(struct udphdr);
632}
633EXPORT_SYMBOL(fou_encap_hlen);
634
635size_t gue_encap_hlen(struct ip_tunnel_encap *e)
636{
637 size_t len;
638 bool need_priv = false;
639
640 len = sizeof(struct udphdr) + sizeof(struct guehdr);
641
642 if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
643 len += GUE_PLEN_REMCSUM;
644 need_priv = true;
645 }
646
647 len += need_priv ? GUE_LEN_PRIV : 0;
648
649 return len;
650}
651EXPORT_SYMBOL(gue_encap_hlen);
652
Tom Herbert63487ba2014-11-04 09:06:51 -0800653static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
654 struct flowi4 *fl4, u8 *protocol, __be16 sport)
655{
656 struct udphdr *uh;
657
658 skb_push(skb, sizeof(struct udphdr));
659 skb_reset_transport_header(skb);
660
661 uh = udp_hdr(skb);
662
663 uh->dest = e->dport;
664 uh->source = sport;
665 uh->len = htons(skb->len);
666 uh->check = 0;
667 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
668 fl4->saddr, fl4->daddr, skb->len);
669
670 *protocol = IPPROTO_UDP;
671}
672
673int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
674 u8 *protocol, struct flowi4 *fl4)
675{
676 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
677 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
678 __be16 sport;
679
680 skb = iptunnel_handle_offloads(skb, csum, type);
681
682 if (IS_ERR(skb))
683 return PTR_ERR(skb);
684
685 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
686 skb, 0, 0, false);
687 fou_build_udp(skb, e, fl4, protocol, sport);
688
689 return 0;
690}
691EXPORT_SYMBOL(fou_build_header);
692
693int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
694 u8 *protocol, struct flowi4 *fl4)
695{
696 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
697 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
698 struct guehdr *guehdr;
Tom Herbertb17f7092014-11-04 09:06:56 -0800699 size_t hdrlen, optlen = 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800700 __be16 sport;
Tom Herbert5024c332014-11-04 09:06:53 -0800701 void *data;
702 bool need_priv = false;
703
Tom Herbertb17f7092014-11-04 09:06:56 -0800704 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
705 skb->ip_summed == CHECKSUM_PARTIAL) {
706 csum = false;
707 optlen += GUE_PLEN_REMCSUM;
708 type |= SKB_GSO_TUNNEL_REMCSUM;
709 need_priv = true;
710 }
711
Tom Herbert5024c332014-11-04 09:06:53 -0800712 optlen += need_priv ? GUE_LEN_PRIV : 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800713
714 skb = iptunnel_handle_offloads(skb, csum, type);
715
716 if (IS_ERR(skb))
717 return PTR_ERR(skb);
718
719 /* Get source port (based on flow hash) before skb_push */
720 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
721 skb, 0, 0, false);
722
Tom Herbertb17f7092014-11-04 09:06:56 -0800723 hdrlen = sizeof(struct guehdr) + optlen;
724
725 skb_push(skb, hdrlen);
Tom Herbert63487ba2014-11-04 09:06:51 -0800726
727 guehdr = (struct guehdr *)skb->data;
728
Tom Herbert5024c332014-11-04 09:06:53 -0800729 guehdr->control = 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800730 guehdr->version = 0;
Tom Herbert5024c332014-11-04 09:06:53 -0800731 guehdr->hlen = optlen >> 2;
Tom Herbert63487ba2014-11-04 09:06:51 -0800732 guehdr->flags = 0;
Tom Herbert5024c332014-11-04 09:06:53 -0800733 guehdr->proto_ctype = *protocol;
734
735 data = &guehdr[1];
736
737 if (need_priv) {
738 __be32 *flags = data;
739
740 guehdr->flags |= GUE_FLAG_PRIV;
741 *flags = 0;
742 data += GUE_LEN_PRIV;
743
Tom Herbertb17f7092014-11-04 09:06:56 -0800744 if (type & SKB_GSO_TUNNEL_REMCSUM) {
745 u16 csum_start = skb_checksum_start_offset(skb);
746 __be16 *pd = data;
747
748 if (csum_start < hdrlen)
749 return -EINVAL;
750
751 csum_start -= hdrlen;
752 pd[0] = htons(csum_start);
753 pd[1] = htons(csum_start + skb->csum_offset);
754
755 if (!skb_is_gso(skb)) {
756 skb->ip_summed = CHECKSUM_NONE;
757 skb->encapsulation = 0;
758 }
759
760 *flags |= GUE_PFLAG_REMCSUM;
761 data += GUE_PLEN_REMCSUM;
762 }
763
Tom Herbert5024c332014-11-04 09:06:53 -0800764 }
Tom Herbert63487ba2014-11-04 09:06:51 -0800765
766 fou_build_udp(skb, e, fl4, protocol, sport);
767
768 return 0;
769}
770EXPORT_SYMBOL(gue_build_header);
771
Tom Herberta8c5f902014-11-12 11:54:09 -0800772#ifdef CONFIG_NET_FOU_IP_TUNNELS
773
Andi Kleen5eeb2922015-04-08 06:04:31 -0700774static const struct ip_tunnel_encap_ops fou_iptun_ops = {
Tom Herberta8c5f902014-11-12 11:54:09 -0800775 .encap_hlen = fou_encap_hlen,
776 .build_header = fou_build_header,
777};
778
Andi Kleen5eeb2922015-04-08 06:04:31 -0700779static const struct ip_tunnel_encap_ops gue_iptun_ops = {
Tom Herberta8c5f902014-11-12 11:54:09 -0800780 .encap_hlen = gue_encap_hlen,
781 .build_header = gue_build_header,
782};
783
784static int ip_tunnel_encap_add_fou_ops(void)
785{
786 int ret;
787
788 ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
789 if (ret < 0) {
790 pr_err("can't add fou ops\n");
791 return ret;
792 }
793
794 ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
795 if (ret < 0) {
796 pr_err("can't add gue ops\n");
797 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
798 return ret;
799 }
800
801 return 0;
802}
803
804static void ip_tunnel_encap_del_fou_ops(void)
805{
806 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
807 ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
808}
809
810#else
811
812static int ip_tunnel_encap_add_fou_ops(void)
813{
814 return 0;
815}
816
Thomas Graf882288c2014-11-13 12:48:21 +0100817static void ip_tunnel_encap_del_fou_ops(void)
Tom Herberta8c5f902014-11-12 11:54:09 -0800818{
819}
820
821#endif
822
Tom Herbert23461552014-09-17 12:25:56 -0700823static int __init fou_init(void)
824{
825 int ret;
826
827 ret = genl_register_family_with_ops(&fou_nl_family,
828 fou_nl_ops);
829
Tom Herberta8c5f902014-11-12 11:54:09 -0800830 if (ret < 0)
831 goto exit;
832
833 ret = ip_tunnel_encap_add_fou_ops();
834 if (ret < 0)
835 genl_unregister_family(&fou_nl_family);
836
837exit:
Tom Herbert23461552014-09-17 12:25:56 -0700838 return ret;
839}
840
841static void __exit fou_fini(void)
842{
843 struct fou *fou, *next;
844
Tom Herberta8c5f902014-11-12 11:54:09 -0800845 ip_tunnel_encap_del_fou_ops();
846
Tom Herbert23461552014-09-17 12:25:56 -0700847 genl_unregister_family(&fou_nl_family);
848
849 /* Close all the FOU sockets */
850
851 spin_lock(&fou_lock);
852 list_for_each_entry_safe(fou, next, &fou_list, list)
853 fou_release(fou);
854 spin_unlock(&fou_lock);
855}
856
857module_init(fou_init);
858module_exit(fou_fini);
859MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
860MODULE_LICENSE("GPL");