blob: d039f8fff57fa52aa5944a0cc073a3f2821c97b9 [file] [log] [blame]
Tom Herbert23461552014-09-17 12:25:56 -07001#include <linux/module.h>
2#include <linux/errno.h>
3#include <linux/socket.h>
4#include <linux/skbuff.h>
5#include <linux/ip.h>
6#include <linux/udp.h>
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <net/genetlink.h>
Tom Herbert37dd0242014-10-03 15:48:09 -070010#include <net/gue.h>
Tom Herbert23461552014-09-17 12:25:56 -070011#include <net/ip.h>
Tom Herbertafe93322014-09-17 12:25:57 -070012#include <net/protocol.h>
Tom Herbert23461552014-09-17 12:25:56 -070013#include <net/udp.h>
14#include <net/udp_tunnel.h>
15#include <net/xfrm.h>
16#include <uapi/linux/fou.h>
17#include <uapi/linux/genetlink.h>
18
Tom Herbert23461552014-09-17 12:25:56 -070019struct fou {
20 struct socket *sock;
21 u8 protocol;
Tom Herbertfe881ef2015-02-10 16:30:33 -080022 u8 flags;
WANG Cong4cbcdf22015-04-10 12:00:28 -070023 __be16 port;
WANG Cong7a6c8c32015-04-10 12:00:30 -070024 u16 type;
Tom Herbert23461552014-09-17 12:25:56 -070025 struct list_head list;
Hannes Frederic Sowa3036fac2015-12-15 21:01:53 +010026 struct rcu_head rcu;
Tom Herbert23461552014-09-17 12:25:56 -070027};
28
Tom Herbertfe881ef2015-02-10 16:30:33 -080029#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
30
Tom Herbert23461552014-09-17 12:25:56 -070031struct fou_cfg {
Tom Herbert37dd0242014-10-03 15:48:09 -070032 u16 type;
Tom Herbert23461552014-09-17 12:25:56 -070033 u8 protocol;
Tom Herbertfe881ef2015-02-10 16:30:33 -080034 u8 flags;
Tom Herbert23461552014-09-17 12:25:56 -070035 struct udp_port_cfg udp_config;
36};
37
WANG Cong02d793c2015-04-10 12:00:29 -070038static unsigned int fou_net_id;
39
40struct fou_net {
41 struct list_head fou_list;
42 struct mutex fou_lock;
43};
44
Tom Herbert23461552014-09-17 12:25:56 -070045static inline struct fou *fou_from_sock(struct sock *sk)
46{
47 return sk->sk_user_data;
48}
49
Jesse Grossa09a4c82016-03-19 09:32:02 -070050static int fou_recv_pull(struct sk_buff *skb, size_t len)
Tom Herbert23461552014-09-17 12:25:56 -070051{
52 struct iphdr *iph = ip_hdr(skb);
53
54 /* Remove 'len' bytes from the packet (UDP header and
Tom Herbert5024c332014-11-04 09:06:53 -080055 * FOU header if present).
Tom Herbert23461552014-09-17 12:25:56 -070056 */
57 iph->tot_len = htons(ntohs(iph->tot_len) - len);
58 __skb_pull(skb, len);
59 skb_postpull_rcsum(skb, udp_hdr(skb), len);
60 skb_reset_transport_header(skb);
Jesse Grossa09a4c82016-03-19 09:32:02 -070061 return iptunnel_pull_offloads(skb);
Tom Herbert23461552014-09-17 12:25:56 -070062}
63
64static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
65{
66 struct fou *fou = fou_from_sock(sk);
67
68 if (!fou)
69 return 1;
70
Jesse Grossa09a4c82016-03-19 09:32:02 -070071 if (fou_recv_pull(skb, sizeof(struct udphdr)))
72 goto drop;
Tom Herbert5024c332014-11-04 09:06:53 -080073
74 return -fou->protocol;
Jesse Grossa09a4c82016-03-19 09:32:02 -070075
76drop:
77 kfree_skb(skb);
78 return 0;
Tom Herbert5024c332014-11-04 09:06:53 -080079}
80
Tom Herberta8d31c12014-11-04 09:06:57 -080081static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
Tom Herbertfe881ef2015-02-10 16:30:33 -080082 void *data, size_t hdrlen, u8 ipproto,
83 bool nopartial)
Tom Herberta8d31c12014-11-04 09:06:57 -080084{
85 __be16 *pd = data;
Tom Herbert4fd671d2014-11-25 11:21:20 -080086 size_t start = ntohs(pd[0]);
87 size_t offset = ntohs(pd[1]);
Tom Herbertb7fe10e2015-08-19 17:07:32 -070088 size_t plen = sizeof(struct udphdr) + hdrlen +
89 max_t(size_t, offset + sizeof(u16), start);
90
91 if (skb->remcsum_offload)
92 return guehdr;
Tom Herberta8d31c12014-11-04 09:06:57 -080093
Tom Herberta8d31c12014-11-04 09:06:57 -080094 if (!pskb_may_pull(skb, plen))
95 return NULL;
96 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
97
Tom Herbertfe881ef2015-02-10 16:30:33 -080098 skb_remcsum_process(skb, (void *)guehdr + hdrlen,
99 start, offset, nopartial);
Tom Herberta8d31c12014-11-04 09:06:57 -0800100
101 return guehdr;
102}
103
Tom Herbert5024c332014-11-04 09:06:53 -0800104static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
105{
106 /* No support yet */
107 kfree_skb(skb);
108 return 0;
Tom Herbert23461552014-09-17 12:25:56 -0700109}
110
Tom Herbert37dd0242014-10-03 15:48:09 -0700111static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
112{
113 struct fou *fou = fou_from_sock(sk);
Tom Herbert5024c332014-11-04 09:06:53 -0800114 size_t len, optlen, hdrlen;
Tom Herbert37dd0242014-10-03 15:48:09 -0700115 struct guehdr *guehdr;
Tom Herbert5024c332014-11-04 09:06:53 -0800116 void *data;
Tom Herberta8d31c12014-11-04 09:06:57 -0800117 u16 doffset = 0;
Tom Herbert37dd0242014-10-03 15:48:09 -0700118
119 if (!fou)
120 return 1;
121
122 len = sizeof(struct udphdr) + sizeof(struct guehdr);
123 if (!pskb_may_pull(skb, len))
124 goto drop;
125
Tom Herbert5024c332014-11-04 09:06:53 -0800126 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
Tom Herbert37dd0242014-10-03 15:48:09 -0700127
Tom Herbert5024c332014-11-04 09:06:53 -0800128 optlen = guehdr->hlen << 2;
129 len += optlen;
130
Tom Herbert37dd0242014-10-03 15:48:09 -0700131 if (!pskb_may_pull(skb, len))
132 goto drop;
133
Tom Herbert5024c332014-11-04 09:06:53 -0800134 /* guehdr may change after pull */
135 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
Li RongQingd8f00d22014-10-17 16:53:47 +0800136
Tom Herbert5024c332014-11-04 09:06:53 -0800137 hdrlen = sizeof(struct guehdr) + optlen;
138
139 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
Tom Herbert37dd0242014-10-03 15:48:09 -0700140 goto drop;
141
Tom Herberta8d31c12014-11-04 09:06:57 -0800142 hdrlen = sizeof(struct guehdr) + optlen;
143
144 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
145
Tom Herberta8d31c12014-11-04 09:06:57 -0800146 /* Pull csum through the guehdr now . This can be used if
147 * there is a remote checksum offload.
148 */
149 skb_postpull_rcsum(skb, udp_hdr(skb), len);
Tom Herbert5024c332014-11-04 09:06:53 -0800150
151 data = &guehdr[1];
152
153 if (guehdr->flags & GUE_FLAG_PRIV) {
Tom Herberta8d31c12014-11-04 09:06:57 -0800154 __be32 flags = *(__be32 *)(data + doffset);
Tom Herbert5024c332014-11-04 09:06:53 -0800155
Tom Herberta8d31c12014-11-04 09:06:57 -0800156 doffset += GUE_LEN_PRIV;
157
158 if (flags & GUE_PFLAG_REMCSUM) {
159 guehdr = gue_remcsum(skb, guehdr, data + doffset,
Tom Herbertfe881ef2015-02-10 16:30:33 -0800160 hdrlen, guehdr->proto_ctype,
161 !!(fou->flags &
162 FOU_F_REMCSUM_NOPARTIAL));
Tom Herberta8d31c12014-11-04 09:06:57 -0800163 if (!guehdr)
164 goto drop;
165
166 data = &guehdr[1];
167
168 doffset += GUE_PLEN_REMCSUM;
169 }
Tom Herbert37dd0242014-10-03 15:48:09 -0700170 }
171
Tom Herbert5024c332014-11-04 09:06:53 -0800172 if (unlikely(guehdr->control))
173 return gue_control_message(skb, guehdr);
174
Tom Herbert4fd671d2014-11-25 11:21:20 -0800175 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
Tom Herberta8d31c12014-11-04 09:06:57 -0800176 skb_reset_transport_header(skb);
177
Jesse Grossa09a4c82016-03-19 09:32:02 -0700178 if (iptunnel_pull_offloads(skb))
179 goto drop;
180
Tom Herbert5024c332014-11-04 09:06:53 -0800181 return -guehdr->proto_ctype;
182
Tom Herbert37dd0242014-10-03 15:48:09 -0700183drop:
184 kfree_skb(skb);
185 return 0;
186}
187
Tom Herbertd92283e2016-04-05 08:22:54 -0700188static struct sk_buff **fou_gro_receive(struct sock *sk,
189 struct sk_buff **head,
190 struct sk_buff *skb)
Tom Herbertafe93322014-09-17 12:25:57 -0700191{
192 const struct net_offload *ops;
193 struct sk_buff **pp = NULL;
Tom Herbertd92283e2016-04-05 08:22:54 -0700194 u8 proto = fou_from_sock(sk)->protocol;
Tom Herbertefc98d02014-10-03 15:48:08 -0700195 const struct net_offload **offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700196
Alexander Duyckc3483382016-03-29 14:55:22 -0700197 /* We can clear the encap_mark for FOU as we are essentially doing
198 * one of two possible things. We are either adding an L4 tunnel
199 * header to the outer L3 tunnel header, or we are are simply
200 * treating the GRE tunnel header as though it is a UDP protocol
201 * specific header such as VXLAN or GENEVE.
202 */
203 NAPI_GRO_CB(skb)->encap_mark = 0;
204
Alexander Duycka0ca1532016-04-05 09:13:39 -0700205 /* Flag this frame as already having an outer encap header */
206 NAPI_GRO_CB(skb)->is_fou = 1;
207
Tom Herbertafe93322014-09-17 12:25:57 -0700208 rcu_read_lock();
Tom Herbertefc98d02014-10-03 15:48:08 -0700209 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700210 ops = rcu_dereference(offloads[proto]);
211 if (!ops || !ops->callbacks.gro_receive)
212 goto out_unlock;
213
214 pp = ops->callbacks.gro_receive(head, skb);
215
216out_unlock:
217 rcu_read_unlock();
218
219 return pp;
220}
221
Tom Herbertd92283e2016-04-05 08:22:54 -0700222static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
223 int nhoff)
Tom Herbertafe93322014-09-17 12:25:57 -0700224{
225 const struct net_offload *ops;
Tom Herbertd92283e2016-04-05 08:22:54 -0700226 u8 proto = fou_from_sock(sk)->protocol;
Tom Herbertafe93322014-09-17 12:25:57 -0700227 int err = -ENOSYS;
Tom Herbertefc98d02014-10-03 15:48:08 -0700228 const struct net_offload **offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700229
Jesse Grosscfdf1e12014-11-10 11:45:13 -0800230 udp_tunnel_gro_complete(skb, nhoff);
231
Tom Herbertafe93322014-09-17 12:25:57 -0700232 rcu_read_lock();
Tom Herbertefc98d02014-10-03 15:48:08 -0700233 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700234 ops = rcu_dereference(offloads[proto]);
235 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
236 goto out_unlock;
237
238 err = ops->callbacks.gro_complete(skb, nhoff);
239
240out_unlock:
241 rcu_read_unlock();
242
243 return err;
244}
245
Tom Herberta8d31c12014-11-04 09:06:57 -0800246static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
247 struct guehdr *guehdr, void *data,
Tom Herbertb7fe10e2015-08-19 17:07:32 -0700248 size_t hdrlen, struct gro_remcsum *grc,
249 bool nopartial)
Tom Herberta8d31c12014-11-04 09:06:57 -0800250{
251 __be16 *pd = data;
Tom Herbert4fd671d2014-11-25 11:21:20 -0800252 size_t start = ntohs(pd[0]);
253 size_t offset = ntohs(pd[1]);
Tom Herberta8d31c12014-11-04 09:06:57 -0800254
255 if (skb->remcsum_offload)
Tom Herbertb7fe10e2015-08-19 17:07:32 -0700256 return guehdr;
Tom Herberta8d31c12014-11-04 09:06:57 -0800257
Tom Herbert4fd671d2014-11-25 11:21:20 -0800258 if (!NAPI_GRO_CB(skb)->csum_valid)
Tom Herberta8d31c12014-11-04 09:06:57 -0800259 return NULL;
260
Tom Herbertb7fe10e2015-08-19 17:07:32 -0700261 guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
262 start, offset, grc, nopartial);
Tom Herberta8d31c12014-11-04 09:06:57 -0800263
264 skb->remcsum_offload = 1;
265
266 return guehdr;
267}
268
Tom Herbertd92283e2016-04-05 08:22:54 -0700269static struct sk_buff **gue_gro_receive(struct sock *sk,
270 struct sk_buff **head,
271 struct sk_buff *skb)
Tom Herbert37dd0242014-10-03 15:48:09 -0700272{
273 const struct net_offload **offloads;
274 const struct net_offload *ops;
275 struct sk_buff **pp = NULL;
276 struct sk_buff *p;
Tom Herbert37dd0242014-10-03 15:48:09 -0700277 struct guehdr *guehdr;
Tom Herbert5024c332014-11-04 09:06:53 -0800278 size_t len, optlen, hdrlen, off;
279 void *data;
Tom Herberta8d31c12014-11-04 09:06:57 -0800280 u16 doffset = 0;
Tom Herbert37dd0242014-10-03 15:48:09 -0700281 int flush = 1;
Tom Herbertd92283e2016-04-05 08:22:54 -0700282 struct fou *fou = fou_from_sock(sk);
Tom Herbert26c4f7d2015-02-10 16:30:27 -0800283 struct gro_remcsum grc;
284
285 skb_gro_remcsum_init(&grc);
Tom Herbert37dd0242014-10-03 15:48:09 -0700286
287 off = skb_gro_offset(skb);
Tom Herbert5024c332014-11-04 09:06:53 -0800288 len = off + sizeof(*guehdr);
289
Tom Herbert37dd0242014-10-03 15:48:09 -0700290 guehdr = skb_gro_header_fast(skb, off);
Tom Herbert5024c332014-11-04 09:06:53 -0800291 if (skb_gro_header_hard(skb, len)) {
292 guehdr = skb_gro_header_slow(skb, len, off);
Tom Herbert37dd0242014-10-03 15:48:09 -0700293 if (unlikely(!guehdr))
294 goto out;
295 }
296
Tom Herbert5024c332014-11-04 09:06:53 -0800297 optlen = guehdr->hlen << 2;
298 len += optlen;
Tom Herbert37dd0242014-10-03 15:48:09 -0700299
Tom Herbert5024c332014-11-04 09:06:53 -0800300 if (skb_gro_header_hard(skb, len)) {
301 guehdr = skb_gro_header_slow(skb, len, off);
Tom Herbert37dd0242014-10-03 15:48:09 -0700302 if (unlikely(!guehdr))
Tom Herbert5024c332014-11-04 09:06:53 -0800303 goto out;
304 }
305
306 if (unlikely(guehdr->control) || guehdr->version != 0 ||
307 validate_gue_flags(guehdr, optlen))
308 goto out;
309
310 hdrlen = sizeof(*guehdr) + optlen;
311
Tom Herberta8d31c12014-11-04 09:06:57 -0800312 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
313 * this is needed if there is a remote checkcsum offload.
314 */
Tom Herbert5024c332014-11-04 09:06:53 -0800315 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
316
317 data = &guehdr[1];
318
319 if (guehdr->flags & GUE_FLAG_PRIV) {
Tom Herberta8d31c12014-11-04 09:06:57 -0800320 __be32 flags = *(__be32 *)(data + doffset);
Tom Herbert5024c332014-11-04 09:06:53 -0800321
Tom Herberta8d31c12014-11-04 09:06:57 -0800322 doffset += GUE_LEN_PRIV;
323
324 if (flags & GUE_PFLAG_REMCSUM) {
325 guehdr = gue_gro_remcsum(skb, off, guehdr,
Tom Herbertb7fe10e2015-08-19 17:07:32 -0700326 data + doffset, hdrlen, &grc,
Tom Herbertfe881ef2015-02-10 16:30:33 -0800327 !!(fou->flags &
328 FOU_F_REMCSUM_NOPARTIAL));
Tom Herbertb7fe10e2015-08-19 17:07:32 -0700329
Tom Herberta8d31c12014-11-04 09:06:57 -0800330 if (!guehdr)
331 goto out;
332
333 data = &guehdr[1];
334
335 doffset += GUE_PLEN_REMCSUM;
336 }
Tom Herbert37dd0242014-10-03 15:48:09 -0700337 }
338
Tom Herberta8d31c12014-11-04 09:06:57 -0800339 skb_gro_pull(skb, hdrlen);
340
Tom Herbert37dd0242014-10-03 15:48:09 -0700341 for (p = *head; p; p = p->next) {
342 const struct guehdr *guehdr2;
343
344 if (!NAPI_GRO_CB(p)->same_flow)
345 continue;
346
347 guehdr2 = (struct guehdr *)(p->data + off);
348
349 /* Compare base GUE header to be equal (covers
Tom Herbert5024c332014-11-04 09:06:53 -0800350 * hlen, version, proto_ctype, and flags.
Tom Herbert37dd0242014-10-03 15:48:09 -0700351 */
352 if (guehdr->word != guehdr2->word) {
353 NAPI_GRO_CB(p)->same_flow = 0;
354 continue;
355 }
356
357 /* Compare optional fields are the same. */
358 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
359 guehdr->hlen << 2)) {
360 NAPI_GRO_CB(p)->same_flow = 0;
361 continue;
362 }
363 }
364
Alexander Duyckc3483382016-03-29 14:55:22 -0700365 /* We can clear the encap_mark for GUE as we are essentially doing
366 * one of two possible things. We are either adding an L4 tunnel
367 * header to the outer L3 tunnel header, or we are are simply
368 * treating the GRE tunnel header as though it is a UDP protocol
369 * specific header such as VXLAN or GENEVE.
370 */
371 NAPI_GRO_CB(skb)->encap_mark = 0;
372
Alexander Duycka0ca1532016-04-05 09:13:39 -0700373 /* Flag this frame as already having an outer encap header */
374 NAPI_GRO_CB(skb)->is_fou = 1;
375
Tom Herbert5024c332014-11-04 09:06:53 -0800376 rcu_read_lock();
377 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
378 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
Tom Herbert27013662015-08-19 17:07:34 -0700379 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
Tom Herbert5024c332014-11-04 09:06:53 -0800380 goto out_unlock;
Tom Herbert37dd0242014-10-03 15:48:09 -0700381
382 pp = ops->callbacks.gro_receive(head, skb);
Alexander Duyckc194cf92016-03-09 09:24:23 -0800383 flush = 0;
Tom Herbert37dd0242014-10-03 15:48:09 -0700384
385out_unlock:
386 rcu_read_unlock();
387out:
388 NAPI_GRO_CB(skb)->flush |= flush;
Tom Herbert26c4f7d2015-02-10 16:30:27 -0800389 skb_gro_remcsum_cleanup(skb, &grc);
Tom Herbert37dd0242014-10-03 15:48:09 -0700390
391 return pp;
392}
393
Tom Herbertd92283e2016-04-05 08:22:54 -0700394static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
Tom Herbert37dd0242014-10-03 15:48:09 -0700395{
396 const struct net_offload **offloads;
397 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
398 const struct net_offload *ops;
399 unsigned int guehlen;
400 u8 proto;
401 int err = -ENOENT;
402
Tom Herbert5024c332014-11-04 09:06:53 -0800403 proto = guehdr->proto_ctype;
Tom Herbert37dd0242014-10-03 15:48:09 -0700404
405 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
406
407 rcu_read_lock();
408 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
409 ops = rcu_dereference(offloads[proto]);
410 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
411 goto out_unlock;
412
413 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
414
415out_unlock:
416 rcu_read_unlock();
417 return err;
418}
419
WANG Cong02d793c2015-04-10 12:00:29 -0700420static int fou_add_to_port_list(struct net *net, struct fou *fou)
Tom Herbert23461552014-09-17 12:25:56 -0700421{
WANG Cong02d793c2015-04-10 12:00:29 -0700422 struct fou_net *fn = net_generic(net, fou_net_id);
Tom Herbert23461552014-09-17 12:25:56 -0700423 struct fou *fout;
424
WANG Cong02d793c2015-04-10 12:00:29 -0700425 mutex_lock(&fn->fou_lock);
426 list_for_each_entry(fout, &fn->fou_list, list) {
Tom Herbert23461552014-09-17 12:25:56 -0700427 if (fou->port == fout->port) {
WANG Cong02d793c2015-04-10 12:00:29 -0700428 mutex_unlock(&fn->fou_lock);
Tom Herbert23461552014-09-17 12:25:56 -0700429 return -EALREADY;
430 }
431 }
432
WANG Cong02d793c2015-04-10 12:00:29 -0700433 list_add(&fou->list, &fn->fou_list);
434 mutex_unlock(&fn->fou_lock);
Tom Herbert23461552014-09-17 12:25:56 -0700435
436 return 0;
437}
438
439static void fou_release(struct fou *fou)
440{
441 struct socket *sock = fou->sock;
Tom Herbert23461552014-09-17 12:25:56 -0700442
Tom Herbert23461552014-09-17 12:25:56 -0700443 list_del(&fou->list);
WANG Cong02d793c2015-04-10 12:00:29 -0700444 udp_tunnel_sock_release(sock);
Tom Herbert23461552014-09-17 12:25:56 -0700445
Hannes Frederic Sowa3036fac2015-12-15 21:01:53 +0100446 kfree_rcu(fou, rcu);
Tom Herbert23461552014-09-17 12:25:56 -0700447}
448
Tom Herbert37dd0242014-10-03 15:48:09 -0700449static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
450{
451 udp_sk(sk)->encap_rcv = fou_udp_recv;
Tom Herbertd92283e2016-04-05 08:22:54 -0700452 udp_sk(sk)->gro_receive = fou_gro_receive;
453 udp_sk(sk)->gro_complete = fou_gro_complete;
454 fou_from_sock(sk)->protocol = cfg->protocol;
Tom Herbert37dd0242014-10-03 15:48:09 -0700455
456 return 0;
457}
458
459static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
460{
461 udp_sk(sk)->encap_rcv = gue_udp_recv;
Tom Herbertd92283e2016-04-05 08:22:54 -0700462 udp_sk(sk)->gro_receive = gue_gro_receive;
463 udp_sk(sk)->gro_complete = gue_gro_complete;
Tom Herbert37dd0242014-10-03 15:48:09 -0700464
465 return 0;
466}
467
Tom Herbert23461552014-09-17 12:25:56 -0700468static int fou_create(struct net *net, struct fou_cfg *cfg,
469 struct socket **sockp)
470{
Tom Herbert23461552014-09-17 12:25:56 -0700471 struct socket *sock = NULL;
WANG Cong02d793c2015-04-10 12:00:29 -0700472 struct fou *fou = NULL;
Tom Herbert23461552014-09-17 12:25:56 -0700473 struct sock *sk;
WANG Cong02d793c2015-04-10 12:00:29 -0700474 int err;
Tom Herbert23461552014-09-17 12:25:56 -0700475
476 /* Open UDP socket */
477 err = udp_sock_create(net, &cfg->udp_config, &sock);
478 if (err < 0)
479 goto error;
480
481 /* Allocate FOU port structure */
482 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
483 if (!fou) {
484 err = -ENOMEM;
485 goto error;
486 }
487
488 sk = sock->sk;
489
Tom Herbertfe881ef2015-02-10 16:30:33 -0800490 fou->flags = cfg->flags;
Tom Herbert37dd0242014-10-03 15:48:09 -0700491 fou->port = cfg->udp_config.local_udp_port;
492
493 /* Initial for fou type */
494 switch (cfg->type) {
495 case FOU_ENCAP_DIRECT:
496 err = fou_encap_init(sk, fou, cfg);
497 if (err)
498 goto error;
499 break;
500 case FOU_ENCAP_GUE:
501 err = gue_encap_init(sk, fou, cfg);
502 if (err)
503 goto error;
504 break;
505 default:
506 err = -EINVAL;
507 goto error;
508 }
Tom Herbert23461552014-09-17 12:25:56 -0700509
WANG Cong7a6c8c32015-04-10 12:00:30 -0700510 fou->type = cfg->type;
511
Tom Herbert23461552014-09-17 12:25:56 -0700512 udp_sk(sk)->encap_type = 1;
513 udp_encap_enable();
514
515 sk->sk_user_data = fou;
516 fou->sock = sock;
517
Tom Herbert224d0192015-01-05 13:56:14 -0800518 inet_inc_convert_csum(sk);
Tom Herbert23461552014-09-17 12:25:56 -0700519
520 sk->sk_allocation = GFP_ATOMIC;
521
WANG Cong02d793c2015-04-10 12:00:29 -0700522 err = fou_add_to_port_list(net, fou);
Tom Herbert23461552014-09-17 12:25:56 -0700523 if (err)
524 goto error;
525
526 if (sockp)
527 *sockp = sock;
528
529 return 0;
530
531error:
532 kfree(fou);
533 if (sock)
WANG Cong02d793c2015-04-10 12:00:29 -0700534 udp_tunnel_sock_release(sock);
Tom Herbert23461552014-09-17 12:25:56 -0700535
536 return err;
537}
538
539static int fou_destroy(struct net *net, struct fou_cfg *cfg)
540{
WANG Cong02d793c2015-04-10 12:00:29 -0700541 struct fou_net *fn = net_generic(net, fou_net_id);
WANG Cong4cbcdf22015-04-10 12:00:28 -0700542 __be16 port = cfg->udp_config.local_udp_port;
Tom Herbert23461552014-09-17 12:25:56 -0700543 int err = -EINVAL;
WANG Cong02d793c2015-04-10 12:00:29 -0700544 struct fou *fou;
Tom Herbert23461552014-09-17 12:25:56 -0700545
WANG Cong02d793c2015-04-10 12:00:29 -0700546 mutex_lock(&fn->fou_lock);
547 list_for_each_entry(fou, &fn->fou_list, list) {
Tom Herbert23461552014-09-17 12:25:56 -0700548 if (fou->port == port) {
Tom Herbert23461552014-09-17 12:25:56 -0700549 fou_release(fou);
550 err = 0;
551 break;
552 }
553 }
WANG Cong02d793c2015-04-10 12:00:29 -0700554 mutex_unlock(&fn->fou_lock);
Tom Herbert23461552014-09-17 12:25:56 -0700555
556 return err;
557}
558
559static struct genl_family fou_nl_family = {
560 .id = GENL_ID_GENERATE,
561 .hdrsize = 0,
562 .name = FOU_GENL_NAME,
563 .version = FOU_GENL_VERSION,
564 .maxattr = FOU_ATTR_MAX,
565 .netnsok = true,
566};
567
568static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
569 [FOU_ATTR_PORT] = { .type = NLA_U16, },
570 [FOU_ATTR_AF] = { .type = NLA_U8, },
571 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
Tom Herbert37dd0242014-10-03 15:48:09 -0700572 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
Tom Herbertfe881ef2015-02-10 16:30:33 -0800573 [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
Tom Herbert23461552014-09-17 12:25:56 -0700574};
575
576static int parse_nl_config(struct genl_info *info,
577 struct fou_cfg *cfg)
578{
579 memset(cfg, 0, sizeof(*cfg));
580
581 cfg->udp_config.family = AF_INET;
582
583 if (info->attrs[FOU_ATTR_AF]) {
584 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
585
Jiri Bencb9b66952015-08-28 20:48:21 +0200586 if (family != AF_INET)
Tom Herbert23461552014-09-17 12:25:56 -0700587 return -EINVAL;
588
589 cfg->udp_config.family = family;
590 }
591
592 if (info->attrs[FOU_ATTR_PORT]) {
WANG Cong4cbcdf22015-04-10 12:00:28 -0700593 __be16 port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
Tom Herbert23461552014-09-17 12:25:56 -0700594
595 cfg->udp_config.local_udp_port = port;
596 }
597
598 if (info->attrs[FOU_ATTR_IPPROTO])
599 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
600
Tom Herbert37dd0242014-10-03 15:48:09 -0700601 if (info->attrs[FOU_ATTR_TYPE])
602 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
603
Tom Herbertfe881ef2015-02-10 16:30:33 -0800604 if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
605 cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
606
Tom Herbert23461552014-09-17 12:25:56 -0700607 return 0;
608}
609
610static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
611{
WANG Cong02d793c2015-04-10 12:00:29 -0700612 struct net *net = genl_info_net(info);
Tom Herbert23461552014-09-17 12:25:56 -0700613 struct fou_cfg cfg;
614 int err;
615
616 err = parse_nl_config(info, &cfg);
617 if (err)
618 return err;
619
WANG Cong02d793c2015-04-10 12:00:29 -0700620 return fou_create(net, &cfg, NULL);
Tom Herbert23461552014-09-17 12:25:56 -0700621}
622
623static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
624{
WANG Cong02d793c2015-04-10 12:00:29 -0700625 struct net *net = genl_info_net(info);
Tom Herbert23461552014-09-17 12:25:56 -0700626 struct fou_cfg cfg;
WANG Cong67270632015-04-10 12:00:27 -0700627 int err;
Tom Herbert23461552014-09-17 12:25:56 -0700628
WANG Cong67270632015-04-10 12:00:27 -0700629 err = parse_nl_config(info, &cfg);
630 if (err)
631 return err;
Tom Herbert23461552014-09-17 12:25:56 -0700632
WANG Cong02d793c2015-04-10 12:00:29 -0700633 return fou_destroy(net, &cfg);
Tom Herbert23461552014-09-17 12:25:56 -0700634}
635
WANG Cong7a6c8c32015-04-10 12:00:30 -0700636static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
637{
638 if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
639 nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
640 nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
641 nla_put_u8(msg, FOU_ATTR_TYPE, fou->type))
642 return -1;
643
644 if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
645 if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
646 return -1;
647 return 0;
648}
649
650static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
651 u32 flags, struct sk_buff *skb, u8 cmd)
652{
653 void *hdr;
654
655 hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
656 if (!hdr)
657 return -ENOMEM;
658
659 if (fou_fill_info(fou, skb) < 0)
660 goto nla_put_failure;
661
662 genlmsg_end(skb, hdr);
663 return 0;
664
665nla_put_failure:
666 genlmsg_cancel(skb, hdr);
667 return -EMSGSIZE;
668}
669
670static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
671{
672 struct net *net = genl_info_net(info);
673 struct fou_net *fn = net_generic(net, fou_net_id);
674 struct sk_buff *msg;
675 struct fou_cfg cfg;
676 struct fou *fout;
677 __be16 port;
678 int ret;
679
680 ret = parse_nl_config(info, &cfg);
681 if (ret)
682 return ret;
683 port = cfg.udp_config.local_udp_port;
684 if (port == 0)
685 return -EINVAL;
686
687 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
688 if (!msg)
689 return -ENOMEM;
690
691 ret = -ESRCH;
692 mutex_lock(&fn->fou_lock);
693 list_for_each_entry(fout, &fn->fou_list, list) {
694 if (port == fout->port) {
695 ret = fou_dump_info(fout, info->snd_portid,
696 info->snd_seq, 0, msg,
697 info->genlhdr->cmd);
698 break;
699 }
700 }
701 mutex_unlock(&fn->fou_lock);
702 if (ret < 0)
703 goto out_free;
704
705 return genlmsg_reply(msg, info);
706
707out_free:
708 nlmsg_free(msg);
709 return ret;
710}
711
712static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
713{
714 struct net *net = sock_net(skb->sk);
715 struct fou_net *fn = net_generic(net, fou_net_id);
716 struct fou *fout;
717 int idx = 0, ret;
718
719 mutex_lock(&fn->fou_lock);
720 list_for_each_entry(fout, &fn->fou_list, list) {
721 if (idx++ < cb->args[0])
722 continue;
723 ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
724 cb->nlh->nlmsg_seq, NLM_F_MULTI,
725 skb, FOU_CMD_GET);
726 if (ret)
WANG Cong540207a2015-04-15 11:48:49 -0700727 break;
WANG Cong7a6c8c32015-04-10 12:00:30 -0700728 }
729 mutex_unlock(&fn->fou_lock);
730
WANG Cong7a6c8c32015-04-10 12:00:30 -0700731 cb->args[0] = idx;
732 return skb->len;
733}
734
Tom Herbert23461552014-09-17 12:25:56 -0700735static const struct genl_ops fou_nl_ops[] = {
736 {
737 .cmd = FOU_CMD_ADD,
738 .doit = fou_nl_cmd_add_port,
739 .policy = fou_nl_policy,
740 .flags = GENL_ADMIN_PERM,
741 },
742 {
743 .cmd = FOU_CMD_DEL,
744 .doit = fou_nl_cmd_rm_port,
745 .policy = fou_nl_policy,
746 .flags = GENL_ADMIN_PERM,
747 },
WANG Cong7a6c8c32015-04-10 12:00:30 -0700748 {
749 .cmd = FOU_CMD_GET,
750 .doit = fou_nl_cmd_get_port,
751 .dumpit = fou_nl_dump,
752 .policy = fou_nl_policy,
753 },
Tom Herbert23461552014-09-17 12:25:56 -0700754};
755
Tom Herberta8c5f902014-11-12 11:54:09 -0800756size_t fou_encap_hlen(struct ip_tunnel_encap *e)
757{
758 return sizeof(struct udphdr);
759}
760EXPORT_SYMBOL(fou_encap_hlen);
761
762size_t gue_encap_hlen(struct ip_tunnel_encap *e)
763{
764 size_t len;
765 bool need_priv = false;
766
767 len = sizeof(struct udphdr) + sizeof(struct guehdr);
768
769 if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
770 len += GUE_PLEN_REMCSUM;
771 need_priv = true;
772 }
773
774 len += need_priv ? GUE_LEN_PRIV : 0;
775
776 return len;
777}
778EXPORT_SYMBOL(gue_encap_hlen);
779
Tom Herbert63487ba2014-11-04 09:06:51 -0800780static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
781 struct flowi4 *fl4, u8 *protocol, __be16 sport)
782{
783 struct udphdr *uh;
784
785 skb_push(skb, sizeof(struct udphdr));
786 skb_reset_transport_header(skb);
787
788 uh = udp_hdr(skb);
789
790 uh->dest = e->dport;
791 uh->source = sport;
792 uh->len = htons(skb->len);
Tom Herbert63487ba2014-11-04 09:06:51 -0800793 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
794 fl4->saddr, fl4->daddr, skb->len);
795
796 *protocol = IPPROTO_UDP;
797}
798
799int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
800 u8 *protocol, struct flowi4 *fl4)
801{
Edward Cree06f62292016-02-11 21:00:16 +0000802 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
803 SKB_GSO_UDP_TUNNEL;
Tom Herbert63487ba2014-11-04 09:06:51 -0800804 __be16 sport;
805
Edward Cree6fa79662016-02-11 21:02:31 +0000806 skb = iptunnel_handle_offloads(skb, type);
Tom Herbert63487ba2014-11-04 09:06:51 -0800807
808 if (IS_ERR(skb))
809 return PTR_ERR(skb);
810
811 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
812 skb, 0, 0, false);
813 fou_build_udp(skb, e, fl4, protocol, sport);
814
815 return 0;
816}
817EXPORT_SYMBOL(fou_build_header);
818
819int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
820 u8 *protocol, struct flowi4 *fl4)
821{
Edward Cree06f62292016-02-11 21:00:16 +0000822 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
823 SKB_GSO_UDP_TUNNEL;
Tom Herbert63487ba2014-11-04 09:06:51 -0800824 struct guehdr *guehdr;
Tom Herbertb17f7092014-11-04 09:06:56 -0800825 size_t hdrlen, optlen = 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800826 __be16 sport;
Tom Herbert5024c332014-11-04 09:06:53 -0800827 void *data;
828 bool need_priv = false;
829
Tom Herbertb17f7092014-11-04 09:06:56 -0800830 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
831 skb->ip_summed == CHECKSUM_PARTIAL) {
Tom Herbertb17f7092014-11-04 09:06:56 -0800832 optlen += GUE_PLEN_REMCSUM;
833 type |= SKB_GSO_TUNNEL_REMCSUM;
834 need_priv = true;
835 }
836
Tom Herbert5024c332014-11-04 09:06:53 -0800837 optlen += need_priv ? GUE_LEN_PRIV : 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800838
Edward Cree6fa79662016-02-11 21:02:31 +0000839 skb = iptunnel_handle_offloads(skb, type);
Tom Herbert63487ba2014-11-04 09:06:51 -0800840
841 if (IS_ERR(skb))
842 return PTR_ERR(skb);
843
844 /* Get source port (based on flow hash) before skb_push */
845 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
846 skb, 0, 0, false);
847
Tom Herbertb17f7092014-11-04 09:06:56 -0800848 hdrlen = sizeof(struct guehdr) + optlen;
849
850 skb_push(skb, hdrlen);
Tom Herbert63487ba2014-11-04 09:06:51 -0800851
852 guehdr = (struct guehdr *)skb->data;
853
Tom Herbert5024c332014-11-04 09:06:53 -0800854 guehdr->control = 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800855 guehdr->version = 0;
Tom Herbert5024c332014-11-04 09:06:53 -0800856 guehdr->hlen = optlen >> 2;
Tom Herbert63487ba2014-11-04 09:06:51 -0800857 guehdr->flags = 0;
Tom Herbert5024c332014-11-04 09:06:53 -0800858 guehdr->proto_ctype = *protocol;
859
860 data = &guehdr[1];
861
862 if (need_priv) {
863 __be32 *flags = data;
864
865 guehdr->flags |= GUE_FLAG_PRIV;
866 *flags = 0;
867 data += GUE_LEN_PRIV;
868
Tom Herbertb17f7092014-11-04 09:06:56 -0800869 if (type & SKB_GSO_TUNNEL_REMCSUM) {
870 u16 csum_start = skb_checksum_start_offset(skb);
871 __be16 *pd = data;
872
873 if (csum_start < hdrlen)
874 return -EINVAL;
875
876 csum_start -= hdrlen;
877 pd[0] = htons(csum_start);
878 pd[1] = htons(csum_start + skb->csum_offset);
879
880 if (!skb_is_gso(skb)) {
881 skb->ip_summed = CHECKSUM_NONE;
882 skb->encapsulation = 0;
883 }
884
885 *flags |= GUE_PFLAG_REMCSUM;
886 data += GUE_PLEN_REMCSUM;
887 }
888
Tom Herbert5024c332014-11-04 09:06:53 -0800889 }
Tom Herbert63487ba2014-11-04 09:06:51 -0800890
891 fou_build_udp(skb, e, fl4, protocol, sport);
892
893 return 0;
894}
895EXPORT_SYMBOL(gue_build_header);
896
Tom Herberta8c5f902014-11-12 11:54:09 -0800897#ifdef CONFIG_NET_FOU_IP_TUNNELS
898
Andi Kleen5eeb2922015-04-08 06:04:31 -0700899static const struct ip_tunnel_encap_ops fou_iptun_ops = {
Tom Herberta8c5f902014-11-12 11:54:09 -0800900 .encap_hlen = fou_encap_hlen,
901 .build_header = fou_build_header,
902};
903
Andi Kleen5eeb2922015-04-08 06:04:31 -0700904static const struct ip_tunnel_encap_ops gue_iptun_ops = {
Tom Herberta8c5f902014-11-12 11:54:09 -0800905 .encap_hlen = gue_encap_hlen,
906 .build_header = gue_build_header,
907};
908
909static int ip_tunnel_encap_add_fou_ops(void)
910{
911 int ret;
912
913 ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
914 if (ret < 0) {
915 pr_err("can't add fou ops\n");
916 return ret;
917 }
918
919 ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
920 if (ret < 0) {
921 pr_err("can't add gue ops\n");
922 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
923 return ret;
924 }
925
926 return 0;
927}
928
929static void ip_tunnel_encap_del_fou_ops(void)
930{
931 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
932 ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
933}
934
935#else
936
937static int ip_tunnel_encap_add_fou_ops(void)
938{
939 return 0;
940}
941
Thomas Graf882288c2014-11-13 12:48:21 +0100942static void ip_tunnel_encap_del_fou_ops(void)
Tom Herberta8c5f902014-11-12 11:54:09 -0800943{
944}
945
946#endif
947
WANG Cong02d793c2015-04-10 12:00:29 -0700948static __net_init int fou_init_net(struct net *net)
949{
950 struct fou_net *fn = net_generic(net, fou_net_id);
951
952 INIT_LIST_HEAD(&fn->fou_list);
953 mutex_init(&fn->fou_lock);
954 return 0;
955}
956
957static __net_exit void fou_exit_net(struct net *net)
958{
959 struct fou_net *fn = net_generic(net, fou_net_id);
960 struct fou *fou, *next;
961
962 /* Close all the FOU sockets */
963 mutex_lock(&fn->fou_lock);
964 list_for_each_entry_safe(fou, next, &fn->fou_list, list)
965 fou_release(fou);
966 mutex_unlock(&fn->fou_lock);
967}
968
969static struct pernet_operations fou_net_ops = {
970 .init = fou_init_net,
971 .exit = fou_exit_net,
972 .id = &fou_net_id,
973 .size = sizeof(struct fou_net),
974};
975
Tom Herbert23461552014-09-17 12:25:56 -0700976static int __init fou_init(void)
977{
978 int ret;
979
WANG Cong02d793c2015-04-10 12:00:29 -0700980 ret = register_pernet_device(&fou_net_ops);
981 if (ret)
Tom Herberta8c5f902014-11-12 11:54:09 -0800982 goto exit;
983
WANG Cong02d793c2015-04-10 12:00:29 -0700984 ret = genl_register_family_with_ops(&fou_nl_family,
985 fou_nl_ops);
Tom Herberta8c5f902014-11-12 11:54:09 -0800986 if (ret < 0)
WANG Cong02d793c2015-04-10 12:00:29 -0700987 goto unregister;
Tom Herberta8c5f902014-11-12 11:54:09 -0800988
WANG Cong02d793c2015-04-10 12:00:29 -0700989 ret = ip_tunnel_encap_add_fou_ops();
990 if (ret == 0)
991 return 0;
992
993 genl_unregister_family(&fou_nl_family);
994unregister:
995 unregister_pernet_device(&fou_net_ops);
Tom Herberta8c5f902014-11-12 11:54:09 -0800996exit:
Tom Herbert23461552014-09-17 12:25:56 -0700997 return ret;
998}
999
1000static void __exit fou_fini(void)
1001{
Tom Herberta8c5f902014-11-12 11:54:09 -08001002 ip_tunnel_encap_del_fou_ops();
Tom Herbert23461552014-09-17 12:25:56 -07001003 genl_unregister_family(&fou_nl_family);
WANG Cong02d793c2015-04-10 12:00:29 -07001004 unregister_pernet_device(&fou_net_ops);
Tom Herbert23461552014-09-17 12:25:56 -07001005}
1006
1007module_init(fou_init);
1008module_exit(fou_fini);
1009MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
1010MODULE_LICENSE("GPL");