blob: a9e8a7356c41d617a256ad365701ac236fe038ff [file] [log] [blame]
Pablo Neira459aa662016-05-09 00:55:48 +02001/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
2 *
3 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
4 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
5 *
6 * Author: Harald Welte <hwelte@sysmocom.de>
7 * Pablo Neira Ayuso <pablo@netfilter.org>
8 * Andreas Schultz <aschultz@travelping.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/module.h>
Pablo Neira459aa662016-05-09 00:55:48 +020019#include <linux/skbuff.h>
20#include <linux/udp.h>
21#include <linux/rculist.h>
22#include <linux/jhash.h>
23#include <linux/if_tunnel.h>
24#include <linux/net.h>
25#include <linux/file.h>
26#include <linux/gtp.h>
27
28#include <net/net_namespace.h>
29#include <net/protocol.h>
30#include <net/ip.h>
31#include <net/udp.h>
32#include <net/udp_tunnel.h>
33#include <net/icmp.h>
34#include <net/xfrm.h>
35#include <net/genetlink.h>
36#include <net/netns/generic.h>
37#include <net/gtp.h>
38
39/* An active session for the subscriber. */
40struct pdp_ctx {
41 struct hlist_node hlist_tid;
42 struct hlist_node hlist_addr;
43
44 union {
Pablo Neira459aa662016-05-09 00:55:48 +020045 struct {
46 u64 tid;
47 u16 flow;
48 } v0;
49 struct {
50 u32 i_tei;
51 u32 o_tei;
52 } v1;
53 } u;
54 u8 gtp_version;
55 u16 af;
56
57 struct in_addr ms_addr_ip4;
58 struct in_addr sgsn_addr_ip4;
59
60 atomic_t tx_seq;
61 struct rcu_head rcu_head;
62};
63
64/* One instance of the GTP device. */
65struct gtp_dev {
66 struct list_head list;
67
68 struct socket *sock0;
69 struct socket *sock1u;
70
Pablo Neira459aa662016-05-09 00:55:48 +020071 struct net_device *dev;
72
73 unsigned int hash_size;
74 struct hlist_head *tid_hash;
75 struct hlist_head *addr_hash;
76};
77
78static int gtp_net_id __read_mostly;
79
80struct gtp_net {
81 struct list_head gtp_dev_list;
82};
83
84static u32 gtp_h_initval;
85
86static inline u32 gtp0_hashfn(u64 tid)
87{
88 u32 *tid32 = (u32 *) &tid;
89 return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
90}
91
92static inline u32 gtp1u_hashfn(u32 tid)
93{
94 return jhash_1word(tid, gtp_h_initval);
95}
96
97static inline u32 ipv4_hashfn(__be32 ip)
98{
99 return jhash_1word((__force u32)ip, gtp_h_initval);
100}
101
102/* Resolve a PDP context structure based on the 64bit TID. */
103static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
104{
105 struct hlist_head *head;
106 struct pdp_ctx *pdp;
107
108 head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
109
110 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
111 if (pdp->gtp_version == GTP_V0 &&
112 pdp->u.v0.tid == tid)
113 return pdp;
114 }
115 return NULL;
116}
117
118/* Resolve a PDP context structure based on the 32bit TEI. */
119static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
120{
121 struct hlist_head *head;
122 struct pdp_ctx *pdp;
123
124 head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
125
126 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
127 if (pdp->gtp_version == GTP_V1 &&
128 pdp->u.v1.i_tei == tid)
129 return pdp;
130 }
131 return NULL;
132}
133
134/* Resolve a PDP context based on IPv4 address of MS. */
135static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
136{
137 struct hlist_head *head;
138 struct pdp_ctx *pdp;
139
140 head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
141
142 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
143 if (pdp->af == AF_INET &&
144 pdp->ms_addr_ip4.s_addr == ms_addr)
145 return pdp;
146 }
147
148 return NULL;
149}
150
151static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
152 unsigned int hdrlen)
153{
154 struct iphdr *iph;
155
156 if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
157 return false;
158
159 iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
160
161 return iph->saddr != pctx->ms_addr_ip4.s_addr;
162}
163
164/* Check if the inner IP source address in this packet is assigned to any
165 * existing mobile subscriber.
166 */
167static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
168 unsigned int hdrlen)
169{
170 switch (ntohs(skb->protocol)) {
171 case ETH_P_IP:
172 return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
173 }
174 return false;
175}
176
177/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
178static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
179 bool xnet)
180{
181 unsigned int hdrlen = sizeof(struct udphdr) +
182 sizeof(struct gtp0_header);
183 struct gtp0_header *gtp0;
184 struct pdp_ctx *pctx;
185 int ret = 0;
186
187 if (!pskb_may_pull(skb, hdrlen))
188 return -1;
189
190 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
191
192 if ((gtp0->flags >> 5) != GTP_V0)
193 return 1;
194
195 if (gtp0->type != GTP_TPDU)
196 return 1;
197
198 rcu_read_lock();
199 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
200 if (!pctx) {
201 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
202 ret = -1;
203 goto out_rcu;
204 }
205
206 if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
207 netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
208 ret = -1;
209 goto out_rcu;
210 }
211 rcu_read_unlock();
212
213 /* Get rid of the GTP + UDP headers. */
214 return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
215out_rcu:
216 rcu_read_unlock();
217 return ret;
218}
219
220static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
221 bool xnet)
222{
223 unsigned int hdrlen = sizeof(struct udphdr) +
224 sizeof(struct gtp1_header);
225 struct gtp1_header *gtp1;
226 struct pdp_ctx *pctx;
227 int ret = 0;
228
229 if (!pskb_may_pull(skb, hdrlen))
230 return -1;
231
232 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
233
234 if ((gtp1->flags >> 5) != GTP_V1)
235 return 1;
236
237 if (gtp1->type != GTP_TPDU)
238 return 1;
239
240 /* From 29.060: "This field shall be present if and only if any one or
241 * more of the S, PN and E flags are set.".
242 *
243 * If any of the bit is set, then the remaining ones also have to be
244 * set.
245 */
246 if (gtp1->flags & GTP1_F_MASK)
247 hdrlen += 4;
248
249 /* Make sure the header is larger enough, including extensions. */
250 if (!pskb_may_pull(skb, hdrlen))
251 return -1;
252
Pablo Neira93edb8c2016-05-10 21:33:38 +0200253 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
254
Pablo Neira459aa662016-05-09 00:55:48 +0200255 rcu_read_lock();
256 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
257 if (!pctx) {
258 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
259 ret = -1;
260 goto out_rcu;
261 }
262
263 if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
264 netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
265 ret = -1;
266 goto out_rcu;
267 }
268 rcu_read_unlock();
269
270 /* Get rid of the GTP + UDP headers. */
271 return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
272out_rcu:
273 rcu_read_unlock();
274 return ret;
275}
276
277static void gtp_encap_disable(struct gtp_dev *gtp)
278{
279 if (gtp->sock0 && gtp->sock0->sk) {
280 udp_sk(gtp->sock0->sk)->encap_type = 0;
281 rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
282 }
283 if (gtp->sock1u && gtp->sock1u->sk) {
284 udp_sk(gtp->sock1u->sk)->encap_type = 0;
285 rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
286 }
287
288 gtp->sock0 = NULL;
289 gtp->sock1u = NULL;
290}
291
292static void gtp_encap_destroy(struct sock *sk)
293{
294 struct gtp_dev *gtp;
295
296 gtp = rcu_dereference_sk_user_data(sk);
297 if (gtp)
298 gtp_encap_disable(gtp);
299}
300
301/* UDP encapsulation receive handler. See net/ipv4/udp.c.
302 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
303 */
304static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
305{
306 struct pcpu_sw_netstats *stats;
307 struct gtp_dev *gtp;
308 bool xnet;
309 int ret;
310
311 gtp = rcu_dereference_sk_user_data(sk);
312 if (!gtp)
313 return 1;
314
315 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
316
Andreas Schultzc8d6f832017-01-27 10:40:58 +0100317 xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
Pablo Neira459aa662016-05-09 00:55:48 +0200318
319 switch (udp_sk(sk)->encap_type) {
320 case UDP_ENCAP_GTP0:
321 netdev_dbg(gtp->dev, "received GTP0 packet\n");
322 ret = gtp0_udp_encap_recv(gtp, skb, xnet);
323 break;
324 case UDP_ENCAP_GTP1U:
325 netdev_dbg(gtp->dev, "received GTP1U packet\n");
326 ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
327 break;
328 default:
329 ret = -1; /* Shouldn't happen. */
330 }
331
332 switch (ret) {
333 case 1:
334 netdev_dbg(gtp->dev, "pass up to the process\n");
335 return 1;
336 case 0:
337 netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
338 break;
339 case -1:
340 netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
341 kfree_skb(skb);
342 return 0;
343 }
344
345 /* Now that the UDP and the GTP header have been removed, set up the
346 * new network header. This is required by the upper layer to
347 * calculate the transport header.
348 */
349 skb_reset_network_header(skb);
350
351 skb->dev = gtp->dev;
352
353 stats = this_cpu_ptr(gtp->dev->tstats);
354 u64_stats_update_begin(&stats->syncp);
355 stats->rx_packets++;
356 stats->rx_bytes += skb->len;
357 u64_stats_update_end(&stats->syncp);
358
359 netif_rx(skb);
360
361 return 0;
362}
363
364static int gtp_dev_init(struct net_device *dev)
365{
366 struct gtp_dev *gtp = netdev_priv(dev);
367
368 gtp->dev = dev;
369
370 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
371 if (!dev->tstats)
372 return -ENOMEM;
373
374 return 0;
375}
376
377static void gtp_dev_uninit(struct net_device *dev)
378{
379 struct gtp_dev *gtp = netdev_priv(dev);
380
381 gtp_encap_disable(gtp);
382 free_percpu(dev->tstats);
383}
384
385static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
386 const struct sock *sk, __be32 daddr)
387{
388 memset(fl4, 0, sizeof(*fl4));
389 fl4->flowi4_oif = sk->sk_bound_dev_if;
390 fl4->daddr = daddr;
391 fl4->saddr = inet_sk(sk)->inet_saddr;
392 fl4->flowi4_tos = RT_CONN_FLAGS(sk);
393 fl4->flowi4_proto = sk->sk_protocol;
394
395 return ip_route_output_key(net, fl4);
396}
397
398static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
399{
400 int payload_len = skb->len;
401 struct gtp0_header *gtp0;
402
403 gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
404
405 gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
406 gtp0->type = GTP_TPDU;
407 gtp0->length = htons(payload_len);
408 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
409 gtp0->flow = htons(pctx->u.v0.flow);
410 gtp0->number = 0xff;
411 gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
412 gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
413}
414
415static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
416{
417 int payload_len = skb->len;
418 struct gtp1_header *gtp1;
419
420 gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
421
422 /* Bits 8 7 6 5 4 3 2 1
423 * +--+--+--+--+--+--+--+--+
424 * |version |PT| 1| E| S|PN|
425 * +--+--+--+--+--+--+--+--+
426 * 0 0 1 1 1 0 0 0
427 */
428 gtp1->flags = 0x38; /* v1, GTP-non-prime. */
429 gtp1->type = GTP_TPDU;
430 gtp1->length = htons(payload_len);
431 gtp1->tid = htonl(pctx->u.v1.o_tei);
432
433 /* TODO: Suppport for extension header, sequence number and N-PDU.
434 * Update the length field if any of them is available.
435 */
436}
437
438struct gtp_pktinfo {
439 struct sock *sk;
440 struct iphdr *iph;
441 struct flowi4 fl4;
442 struct rtable *rt;
443 struct pdp_ctx *pctx;
444 struct net_device *dev;
445 __be16 gtph_port;
446};
447
448static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
449{
450 switch (pktinfo->pctx->gtp_version) {
451 case GTP_V0:
452 pktinfo->gtph_port = htons(GTP0_PORT);
453 gtp0_push_header(skb, pktinfo->pctx);
454 break;
455 case GTP_V1:
456 pktinfo->gtph_port = htons(GTP1U_PORT);
457 gtp1_push_header(skb, pktinfo->pctx);
458 break;
459 }
460}
461
462static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
463 struct sock *sk, struct iphdr *iph,
464 struct pdp_ctx *pctx, struct rtable *rt,
465 struct flowi4 *fl4,
466 struct net_device *dev)
467{
468 pktinfo->sk = sk;
469 pktinfo->iph = iph;
470 pktinfo->pctx = pctx;
471 pktinfo->rt = rt;
472 pktinfo->fl4 = *fl4;
473 pktinfo->dev = dev;
474}
475
476static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
477 struct gtp_pktinfo *pktinfo)
478{
479 struct gtp_dev *gtp = netdev_priv(dev);
480 struct pdp_ctx *pctx;
481 struct rtable *rt;
482 struct flowi4 fl4;
483 struct iphdr *iph;
484 struct sock *sk;
485 __be16 df;
486 int mtu;
487
488 /* Read the IP destination address and resolve the PDP context.
489 * Prepend PDP header with TEI/TID from PDP ctx.
490 */
491 iph = ip_hdr(skb);
492 pctx = ipv4_pdp_find(gtp, iph->daddr);
493 if (!pctx) {
494 netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
495 &iph->daddr);
496 return -ENOENT;
497 }
498 netdev_dbg(dev, "found PDP context %p\n", pctx);
499
500 switch (pctx->gtp_version) {
501 case GTP_V0:
502 if (gtp->sock0)
503 sk = gtp->sock0->sk;
504 else
505 sk = NULL;
506 break;
507 case GTP_V1:
508 if (gtp->sock1u)
509 sk = gtp->sock1u->sk;
510 else
511 sk = NULL;
512 break;
513 default:
514 return -ENOENT;
515 }
516
517 if (!sk) {
518 netdev_dbg(dev, "no userspace socket is available, skip\n");
519 return -ENOENT;
520 }
521
522 rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
523 pctx->sgsn_addr_ip4.s_addr);
524 if (IS_ERR(rt)) {
525 netdev_dbg(dev, "no route to SSGN %pI4\n",
526 &pctx->sgsn_addr_ip4.s_addr);
527 dev->stats.tx_carrier_errors++;
528 goto err;
529 }
530
531 if (rt->dst.dev == dev) {
532 netdev_dbg(dev, "circular route to SSGN %pI4\n",
533 &pctx->sgsn_addr_ip4.s_addr);
534 dev->stats.collisions++;
535 goto err_rt;
536 }
537
538 skb_dst_drop(skb);
539
540 /* This is similar to tnl_update_pmtu(). */
541 df = iph->frag_off;
542 if (df) {
543 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
544 sizeof(struct iphdr) - sizeof(struct udphdr);
545 switch (pctx->gtp_version) {
546 case GTP_V0:
547 mtu -= sizeof(struct gtp0_header);
548 break;
549 case GTP_V1:
550 mtu -= sizeof(struct gtp1_header);
551 break;
552 }
553 } else {
554 mtu = dst_mtu(&rt->dst);
555 }
556
557 rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
558
559 if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
560 mtu < ntohs(iph->tot_len)) {
561 netdev_dbg(dev, "packet too big, fragmentation needed\n");
562 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
563 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
564 htonl(mtu));
565 goto err_rt;
566 }
567
568 gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
569 gtp_push_header(skb, pktinfo);
570
571 return 0;
572err_rt:
573 ip_rt_put(rt);
574err:
575 return -EBADMSG;
576}
577
578static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
579{
580 unsigned int proto = ntohs(skb->protocol);
581 struct gtp_pktinfo pktinfo;
582 int err;
583
584 /* Ensure there is sufficient headroom. */
585 if (skb_cow_head(skb, dev->needed_headroom))
586 goto tx_err;
587
588 skb_reset_inner_headers(skb);
589
590 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
591 rcu_read_lock();
592 switch (proto) {
593 case ETH_P_IP:
594 err = gtp_build_skb_ip4(skb, dev, &pktinfo);
595 break;
596 default:
597 err = -EOPNOTSUPP;
598 break;
599 }
600 rcu_read_unlock();
601
602 if (err < 0)
603 goto tx_err;
604
605 switch (proto) {
606 case ETH_P_IP:
607 netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
608 &pktinfo.iph->saddr, &pktinfo.iph->daddr);
609 udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
610 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
611 pktinfo.iph->tos,
612 ip4_dst_hoplimit(&pktinfo.rt->dst),
Andreas Schultzd812be82017-01-27 10:40:57 +0100613 0,
Pablo Neira459aa662016-05-09 00:55:48 +0200614 pktinfo.gtph_port, pktinfo.gtph_port,
615 true, false);
616 break;
617 }
618
619 return NETDEV_TX_OK;
620tx_err:
621 dev->stats.tx_errors++;
622 dev_kfree_skb(skb);
623 return NETDEV_TX_OK;
624}
625
626static const struct net_device_ops gtp_netdev_ops = {
627 .ndo_init = gtp_dev_init,
628 .ndo_uninit = gtp_dev_uninit,
629 .ndo_start_xmit = gtp_dev_xmit,
630 .ndo_get_stats64 = ip_tunnel_get_stats64,
631};
632
633static void gtp_link_setup(struct net_device *dev)
634{
635 dev->netdev_ops = &gtp_netdev_ops;
636 dev->destructor = free_netdev;
637
638 dev->hard_header_len = 0;
639 dev->addr_len = 0;
640
641 /* Zero header length. */
642 dev->type = ARPHRD_NONE;
643 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
644
645 dev->priv_flags |= IFF_NO_QUEUE;
646 dev->features |= NETIF_F_LLTX;
647 netif_keep_dst(dev);
648
649 /* Assume largest header, ie. GTPv0. */
650 dev->needed_headroom = LL_MAX_HEADER +
651 sizeof(struct iphdr) +
652 sizeof(struct udphdr) +
653 sizeof(struct gtp0_header);
654}
655
656static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
657static void gtp_hashtable_free(struct gtp_dev *gtp);
658static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
Andreas Schultzc8d6f832017-01-27 10:40:58 +0100659 int fd_gtp0, int fd_gtp1);
Pablo Neira459aa662016-05-09 00:55:48 +0200660
661static int gtp_newlink(struct net *src_net, struct net_device *dev,
662 struct nlattr *tb[], struct nlattr *data[])
663{
664 int hashsize, err, fd0, fd1;
665 struct gtp_dev *gtp;
666 struct gtp_net *gn;
667
668 if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
669 return -EINVAL;
670
671 gtp = netdev_priv(dev);
672
673 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
674 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
675
Andreas Schultzc8d6f832017-01-27 10:40:58 +0100676 err = gtp_encap_enable(dev, gtp, fd0, fd1);
Pablo Neira459aa662016-05-09 00:55:48 +0200677 if (err < 0)
678 goto out_err;
679
Taehee Yoo3db5dee2019-12-11 08:23:48 +0000680 if (!data[IFLA_GTP_PDP_HASHSIZE]) {
Pablo Neira459aa662016-05-09 00:55:48 +0200681 hashsize = 1024;
Taehee Yoo3db5dee2019-12-11 08:23:48 +0000682 } else {
Pablo Neira459aa662016-05-09 00:55:48 +0200683 hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
Taehee Yoo3db5dee2019-12-11 08:23:48 +0000684 if (!hashsize)
685 hashsize = 1024;
686 }
Pablo Neira459aa662016-05-09 00:55:48 +0200687
688 err = gtp_hashtable_new(gtp, hashsize);
689 if (err < 0)
690 goto out_encap;
691
692 err = register_netdevice(dev);
693 if (err < 0) {
694 netdev_dbg(dev, "failed to register new netdev %d\n", err);
695 goto out_hashtable;
696 }
697
698 gn = net_generic(dev_net(dev), gtp_net_id);
699 list_add_rcu(&gtp->list, &gn->gtp_dev_list);
700
701 netdev_dbg(dev, "registered new GTP interface\n");
702
703 return 0;
704
705out_hashtable:
706 gtp_hashtable_free(gtp);
707out_encap:
708 gtp_encap_disable(gtp);
709out_err:
710 return err;
711}
712
713static void gtp_dellink(struct net_device *dev, struct list_head *head)
714{
715 struct gtp_dev *gtp = netdev_priv(dev);
716
717 gtp_encap_disable(gtp);
718 gtp_hashtable_free(gtp);
719 list_del_rcu(&gtp->list);
720 unregister_netdevice_queue(dev, head);
721}
722
723static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
724 [IFLA_GTP_FD0] = { .type = NLA_U32 },
725 [IFLA_GTP_FD1] = { .type = NLA_U32 },
726 [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
727};
728
729static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
730{
731 if (!data)
732 return -EINVAL;
733
734 return 0;
735}
736
737static size_t gtp_get_size(const struct net_device *dev)
738{
739 return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */
740}
741
742static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
743{
744 struct gtp_dev *gtp = netdev_priv(dev);
745
746 if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
747 goto nla_put_failure;
748
749 return 0;
750
751nla_put_failure:
752 return -EMSGSIZE;
753}
754
755static struct rtnl_link_ops gtp_link_ops __read_mostly = {
756 .kind = "gtp",
757 .maxtype = IFLA_GTP_MAX,
758 .policy = gtp_policy,
759 .priv_size = sizeof(struct gtp_dev),
760 .setup = gtp_link_setup,
761 .validate = gtp_validate,
762 .newlink = gtp_newlink,
763 .dellink = gtp_dellink,
764 .get_size = gtp_get_size,
765 .fill_info = gtp_fill_info,
766};
767
768static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
769{
770 struct net *net;
771
772 /* Examine the link attributes and figure out which network namespace
773 * we are talking about.
774 */
775 if (tb[GTPA_NET_NS_FD])
776 net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
777 else
778 net = get_net(src_net);
779
780 return net;
781}
782
783static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
784{
785 int i;
786
Taehee Yoo16c0f942020-02-04 03:24:59 +0000787 gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize,
788 GFP_KERNEL | __GFP_NOWARN);
Pablo Neira459aa662016-05-09 00:55:48 +0200789 if (gtp->addr_hash == NULL)
790 return -ENOMEM;
791
Taehee Yoo16c0f942020-02-04 03:24:59 +0000792 gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize,
793 GFP_KERNEL | __GFP_NOWARN);
Pablo Neira459aa662016-05-09 00:55:48 +0200794 if (gtp->tid_hash == NULL)
795 goto err1;
796
797 gtp->hash_size = hsize;
798
799 for (i = 0; i < hsize; i++) {
800 INIT_HLIST_HEAD(&gtp->addr_hash[i]);
801 INIT_HLIST_HEAD(&gtp->tid_hash[i]);
802 }
803 return 0;
804err1:
805 kfree(gtp->addr_hash);
806 return -ENOMEM;
807}
808
809static void gtp_hashtable_free(struct gtp_dev *gtp)
810{
811 struct pdp_ctx *pctx;
812 int i;
813
814 for (i = 0; i < gtp->hash_size; i++) {
815 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
816 hlist_del_rcu(&pctx->hlist_tid);
817 hlist_del_rcu(&pctx->hlist_addr);
818 kfree_rcu(pctx, rcu_head);
819 }
820 }
821 synchronize_rcu();
822 kfree(gtp->addr_hash);
823 kfree(gtp->tid_hash);
824}
825
826static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
Andreas Schultzc8d6f832017-01-27 10:40:58 +0100827 int fd_gtp0, int fd_gtp1)
Pablo Neira459aa662016-05-09 00:55:48 +0200828{
829 struct udp_tunnel_sock_cfg tuncfg = {NULL};
830 struct socket *sock0, *sock1u;
831 int err;
832
833 netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
834
835 sock0 = sockfd_lookup(fd_gtp0, &err);
836 if (sock0 == NULL) {
837 netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
838 return -ENOENT;
839 }
840
Eric Dumazet3410eb22020-01-21 23:17:14 -0800841 if (sock0->sk->sk_protocol != IPPROTO_UDP ||
842 sock0->sk->sk_type != SOCK_DGRAM ||
843 (sock0->sk->sk_family != AF_INET && sock0->sk->sk_family != AF_INET6)) {
Pablo Neira459aa662016-05-09 00:55:48 +0200844 netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
845 err = -EINVAL;
846 goto err1;
847 }
848
849 sock1u = sockfd_lookup(fd_gtp1, &err);
850 if (sock1u == NULL) {
851 netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
852 err = -ENOENT;
853 goto err1;
854 }
855
856 if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
857 netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
858 err = -EINVAL;
859 goto err2;
860 }
861
862 netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
863
864 gtp->sock0 = sock0;
865 gtp->sock1u = sock1u;
Pablo Neira459aa662016-05-09 00:55:48 +0200866
867 tuncfg.sk_user_data = gtp;
868 tuncfg.encap_rcv = gtp_encap_recv;
869 tuncfg.encap_destroy = gtp_encap_destroy;
870
871 tuncfg.encap_type = UDP_ENCAP_GTP0;
872 setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
873
874 tuncfg.encap_type = UDP_ENCAP_GTP1U;
875 setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
876
877 err = 0;
878err2:
879 sockfd_put(sock1u);
880err1:
881 sockfd_put(sock0);
882 return err;
883}
884
885static struct net_device *gtp_find_dev(struct net *net, int ifindex)
886{
887 struct gtp_net *gn = net_generic(net, gtp_net_id);
888 struct gtp_dev *gtp;
889
890 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
891 if (ifindex == gtp->dev->ifindex)
892 return gtp->dev;
893 }
894 return NULL;
895}
896
897static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
898{
899 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
900 pctx->af = AF_INET;
901 pctx->sgsn_addr_ip4.s_addr =
902 nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
903 pctx->ms_addr_ip4.s_addr =
904 nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
905
906 switch (pctx->gtp_version) {
907 case GTP_V0:
908 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
909 * label needs to be the same for uplink and downlink packets,
910 * so let's annotate this.
911 */
912 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
913 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
914 break;
915 case GTP_V1:
916 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
917 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
918 break;
919 default:
920 break;
921 }
922}
923
924static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
925{
926 struct gtp_dev *gtp = netdev_priv(dev);
927 u32 hash_ms, hash_tid = 0;
928 struct pdp_ctx *pctx;
929 bool found = false;
930 __be32 ms_addr;
931
932 ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
933 hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
934
935 hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
936 if (pctx->ms_addr_ip4.s_addr == ms_addr) {
937 found = true;
938 break;
939 }
940 }
941
942 if (found) {
943 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
944 return -EEXIST;
945 if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
946 return -EOPNOTSUPP;
947
948 ipv4_pdp_fill(pctx, info);
949
950 if (pctx->gtp_version == GTP_V0)
951 netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
952 pctx->u.v0.tid, pctx);
953 else if (pctx->gtp_version == GTP_V1)
954 netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
955 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
956
957 return 0;
958
959 }
960
Taehee Yoo1f9ec642019-07-03 00:23:13 +0900961 pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
Pablo Neira459aa662016-05-09 00:55:48 +0200962 if (pctx == NULL)
963 return -ENOMEM;
964
965 ipv4_pdp_fill(pctx, info);
966 atomic_set(&pctx->tx_seq, 0);
967
968 switch (pctx->gtp_version) {
969 case GTP_V0:
970 /* TS 09.60: "The flow label identifies unambiguously a GTP
971 * flow.". We use the tid for this instead, I cannot find a
972 * situation in which this doesn't unambiguosly identify the
973 * PDP context.
974 */
975 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
976 break;
977 case GTP_V1:
978 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
979 break;
980 }
981
982 hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
983 hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
984
985 switch (pctx->gtp_version) {
986 case GTP_V0:
987 netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
988 pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
989 &pctx->ms_addr_ip4, pctx);
990 break;
991 case GTP_V1:
992 netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
993 pctx->u.v1.i_tei, pctx->u.v1.o_tei,
994 &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
995 break;
996 }
997
998 return 0;
999}
1000
1001static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
1002{
1003 struct net_device *dev;
1004 struct net *net;
1005
1006 if (!info->attrs[GTPA_VERSION] ||
1007 !info->attrs[GTPA_LINK] ||
1008 !info->attrs[GTPA_SGSN_ADDRESS] ||
1009 !info->attrs[GTPA_MS_ADDRESS])
1010 return -EINVAL;
1011
1012 switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
1013 case GTP_V0:
1014 if (!info->attrs[GTPA_TID] ||
1015 !info->attrs[GTPA_FLOW])
1016 return -EINVAL;
1017 break;
1018 case GTP_V1:
1019 if (!info->attrs[GTPA_I_TEI] ||
1020 !info->attrs[GTPA_O_TEI])
1021 return -EINVAL;
1022 break;
1023
1024 default:
1025 return -EINVAL;
1026 }
1027
1028 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
1029 if (IS_ERR(net))
1030 return PTR_ERR(net);
1031
1032 /* Check if there's an existing gtpX device to configure */
1033 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
Pablo Neira27ee4412016-05-12 17:16:31 +02001034 if (dev == NULL) {
1035 put_net(net);
Pablo Neira459aa662016-05-09 00:55:48 +02001036 return -ENODEV;
Pablo Neira27ee4412016-05-12 17:16:31 +02001037 }
1038 put_net(net);
Pablo Neira459aa662016-05-09 00:55:48 +02001039
1040 return ipv4_pdp_add(dev, info);
1041}
1042
1043static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
1044{
1045 struct net_device *dev;
1046 struct pdp_ctx *pctx;
1047 struct gtp_dev *gtp;
1048 struct net *net;
1049
1050 if (!info->attrs[GTPA_VERSION] ||
1051 !info->attrs[GTPA_LINK])
1052 return -EINVAL;
1053
1054 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
1055 if (IS_ERR(net))
1056 return PTR_ERR(net);
1057
1058 /* Check if there's an existing gtpX device to configure */
1059 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
Pablo Neira27ee4412016-05-12 17:16:31 +02001060 if (dev == NULL) {
1061 put_net(net);
Pablo Neira459aa662016-05-09 00:55:48 +02001062 return -ENODEV;
Pablo Neira27ee4412016-05-12 17:16:31 +02001063 }
1064 put_net(net);
Pablo Neira459aa662016-05-09 00:55:48 +02001065
1066 gtp = netdev_priv(dev);
1067
1068 switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
1069 case GTP_V0:
1070 if (!info->attrs[GTPA_TID])
1071 return -EINVAL;
1072 pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
1073 break;
1074 case GTP_V1:
1075 if (!info->attrs[GTPA_I_TEI])
1076 return -EINVAL;
1077 pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
1078 break;
1079
1080 default:
1081 return -EINVAL;
1082 }
1083
1084 if (pctx == NULL)
1085 return -ENOENT;
1086
1087 if (pctx->gtp_version == GTP_V0)
1088 netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1089 pctx->u.v0.tid, pctx);
1090 else if (pctx->gtp_version == GTP_V1)
1091 netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1092 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1093
1094 hlist_del_rcu(&pctx->hlist_tid);
1095 hlist_del_rcu(&pctx->hlist_addr);
1096 kfree_rcu(pctx, rcu_head);
1097
1098 return 0;
1099}
1100
1101static struct genl_family gtp_genl_family = {
1102 .id = GENL_ID_GENERATE,
1103 .name = "gtp",
1104 .version = 0,
1105 .hdrsize = 0,
1106 .maxattr = GTPA_MAX,
1107 .netnsok = true,
1108};
1109
1110static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
1111 u32 type, struct pdp_ctx *pctx)
1112{
1113 void *genlh;
1114
1115 genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
1116 type);
1117 if (genlh == NULL)
1118 goto nlmsg_failure;
1119
1120 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
1121 nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
1122 nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
1123 goto nla_put_failure;
1124
1125 switch (pctx->gtp_version) {
1126 case GTP_V0:
1127 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
1128 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
1129 goto nla_put_failure;
1130 break;
1131 case GTP_V1:
1132 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
1133 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
1134 goto nla_put_failure;
1135 break;
1136 }
1137 genlmsg_end(skb, genlh);
1138 return 0;
1139
1140nlmsg_failure:
1141nla_put_failure:
1142 genlmsg_cancel(skb, genlh);
1143 return -EMSGSIZE;
1144}
1145
1146static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
1147{
1148 struct pdp_ctx *pctx = NULL;
1149 struct net_device *dev;
1150 struct sk_buff *skb2;
1151 struct gtp_dev *gtp;
1152 u32 gtp_version;
1153 struct net *net;
1154 int err;
1155
1156 if (!info->attrs[GTPA_VERSION] ||
1157 !info->attrs[GTPA_LINK])
1158 return -EINVAL;
1159
1160 gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
1161 switch (gtp_version) {
1162 case GTP_V0:
1163 case GTP_V1:
1164 break;
1165 default:
1166 return -EINVAL;
1167 }
1168
1169 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
1170 if (IS_ERR(net))
1171 return PTR_ERR(net);
1172
1173 /* Check if there's an existing gtpX device to configure */
1174 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
Pablo Neira27ee4412016-05-12 17:16:31 +02001175 if (dev == NULL) {
1176 put_net(net);
Pablo Neira459aa662016-05-09 00:55:48 +02001177 return -ENODEV;
Pablo Neira27ee4412016-05-12 17:16:31 +02001178 }
1179 put_net(net);
Pablo Neira459aa662016-05-09 00:55:48 +02001180
1181 gtp = netdev_priv(dev);
1182
1183 rcu_read_lock();
1184 if (gtp_version == GTP_V0 &&
1185 info->attrs[GTPA_TID]) {
1186 u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
1187
1188 pctx = gtp0_pdp_find(gtp, tid);
1189 } else if (gtp_version == GTP_V1 &&
1190 info->attrs[GTPA_I_TEI]) {
1191 u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
1192
1193 pctx = gtp1_pdp_find(gtp, tid);
1194 } else if (info->attrs[GTPA_MS_ADDRESS]) {
1195 __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1196
1197 pctx = ipv4_pdp_find(gtp, ip);
1198 }
1199
1200 if (pctx == NULL) {
1201 err = -ENOENT;
1202 goto err_unlock;
1203 }
1204
1205 skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
1206 if (skb2 == NULL) {
1207 err = -ENOMEM;
1208 goto err_unlock;
1209 }
1210
1211 err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
1212 info->snd_seq, info->nlhdr->nlmsg_type, pctx);
1213 if (err < 0)
1214 goto err_unlock_free;
1215
1216 rcu_read_unlock();
1217 return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
1218
1219err_unlock_free:
1220 kfree_skb(skb2);
1221err_unlock:
1222 rcu_read_unlock();
1223 return err;
1224}
1225
1226static int gtp_genl_dump_pdp(struct sk_buff *skb,
1227 struct netlink_callback *cb)
1228{
1229 struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
Taehee Yoo75f08d92019-12-11 08:23:17 +00001230 int i, j, bucket = cb->args[0], skip = cb->args[1];
Pablo Neira459aa662016-05-09 00:55:48 +02001231 struct net *net = sock_net(skb->sk);
Pablo Neira459aa662016-05-09 00:55:48 +02001232 struct pdp_ctx *pctx;
Taehee Yoo75f08d92019-12-11 08:23:17 +00001233 struct gtp_net *gn;
1234
1235 gn = net_generic(net, gtp_net_id);
Pablo Neira459aa662016-05-09 00:55:48 +02001236
1237 if (cb->args[4])
1238 return 0;
1239
Taehee Yoo75f08d92019-12-11 08:23:17 +00001240 rcu_read_lock();
Pablo Neira459aa662016-05-09 00:55:48 +02001241 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
1242 if (last_gtp && last_gtp != gtp)
1243 continue;
1244 else
1245 last_gtp = NULL;
1246
Taehee Yoo75f08d92019-12-11 08:23:17 +00001247 for (i = bucket; i < gtp->hash_size; i++) {
1248 j = 0;
1249 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
1250 hlist_tid) {
1251 if (j >= skip &&
1252 gtp_genl_fill_info(skb,
1253 NETLINK_CB(cb->skb).portid,
1254 cb->nlh->nlmsg_seq,
1255 cb->nlh->nlmsg_type, pctx)) {
Pablo Neira459aa662016-05-09 00:55:48 +02001256 cb->args[0] = i;
Taehee Yoo75f08d92019-12-11 08:23:17 +00001257 cb->args[1] = j;
Pablo Neira459aa662016-05-09 00:55:48 +02001258 cb->args[2] = (unsigned long)gtp;
1259 goto out;
1260 }
Taehee Yoo75f08d92019-12-11 08:23:17 +00001261 j++;
Pablo Neira459aa662016-05-09 00:55:48 +02001262 }
Taehee Yoo75f08d92019-12-11 08:23:17 +00001263 skip = 0;
Pablo Neira459aa662016-05-09 00:55:48 +02001264 }
Taehee Yoo75f08d92019-12-11 08:23:17 +00001265 bucket = 0;
Pablo Neira459aa662016-05-09 00:55:48 +02001266 }
1267 cb->args[4] = 1;
1268out:
Taehee Yoo75f08d92019-12-11 08:23:17 +00001269 rcu_read_unlock();
Pablo Neira459aa662016-05-09 00:55:48 +02001270 return skb->len;
1271}
1272
1273static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
1274 [GTPA_LINK] = { .type = NLA_U32, },
1275 [GTPA_VERSION] = { .type = NLA_U32, },
1276 [GTPA_TID] = { .type = NLA_U64, },
1277 [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, },
1278 [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
1279 [GTPA_FLOW] = { .type = NLA_U16, },
1280 [GTPA_NET_NS_FD] = { .type = NLA_U32, },
1281 [GTPA_I_TEI] = { .type = NLA_U32, },
1282 [GTPA_O_TEI] = { .type = NLA_U32, },
1283};
1284
1285static const struct genl_ops gtp_genl_ops[] = {
1286 {
1287 .cmd = GTP_CMD_NEWPDP,
1288 .doit = gtp_genl_new_pdp,
1289 .policy = gtp_genl_policy,
1290 .flags = GENL_ADMIN_PERM,
1291 },
1292 {
1293 .cmd = GTP_CMD_DELPDP,
1294 .doit = gtp_genl_del_pdp,
1295 .policy = gtp_genl_policy,
1296 .flags = GENL_ADMIN_PERM,
1297 },
1298 {
1299 .cmd = GTP_CMD_GETPDP,
1300 .doit = gtp_genl_get_pdp,
1301 .dumpit = gtp_genl_dump_pdp,
1302 .policy = gtp_genl_policy,
1303 .flags = GENL_ADMIN_PERM,
1304 },
1305};
1306
1307static int __net_init gtp_net_init(struct net *net)
1308{
1309 struct gtp_net *gn = net_generic(net, gtp_net_id);
1310
1311 INIT_LIST_HEAD(&gn->gtp_dev_list);
1312 return 0;
1313}
1314
1315static void __net_exit gtp_net_exit(struct net *net)
1316{
1317 struct gtp_net *gn = net_generic(net, gtp_net_id);
1318 struct gtp_dev *gtp;
1319 LIST_HEAD(list);
1320
1321 rtnl_lock();
1322 list_for_each_entry(gtp, &gn->gtp_dev_list, list)
1323 gtp_dellink(gtp->dev, &list);
1324
1325 unregister_netdevice_many(&list);
1326 rtnl_unlock();
1327}
1328
1329static struct pernet_operations gtp_net_ops = {
1330 .init = gtp_net_init,
1331 .exit = gtp_net_exit,
1332 .id = &gtp_net_id,
1333 .size = sizeof(struct gtp_net),
1334};
1335
1336static int __init gtp_init(void)
1337{
1338 int err;
1339
1340 get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
1341
1342 err = rtnl_link_register(&gtp_link_ops);
1343 if (err < 0)
1344 goto error_out;
1345
1346 err = genl_register_family_with_ops(&gtp_genl_family, gtp_genl_ops);
1347 if (err < 0)
1348 goto unreg_rtnl_link;
1349
1350 err = register_pernet_subsys(&gtp_net_ops);
1351 if (err < 0)
1352 goto unreg_genl_family;
1353
1354 pr_info("GTP module loaded (pdp ctx size %Zd bytes)\n",
1355 sizeof(struct pdp_ctx));
1356 return 0;
1357
1358unreg_genl_family:
1359 genl_unregister_family(&gtp_genl_family);
1360unreg_rtnl_link:
1361 rtnl_link_unregister(&gtp_link_ops);
1362error_out:
1363 pr_err("error loading GTP module loaded\n");
1364 return err;
1365}
1366late_initcall(gtp_init);
1367
1368static void __exit gtp_fini(void)
1369{
Pablo Neira459aa662016-05-09 00:55:48 +02001370 genl_unregister_family(&gtp_genl_family);
1371 rtnl_link_unregister(&gtp_link_ops);
Taehee Yoo0d1e5612019-07-03 00:23:42 +09001372 unregister_pernet_subsys(&gtp_net_ops);
Pablo Neira459aa662016-05-09 00:55:48 +02001373
1374 pr_info("GTP module unloaded\n");
1375}
1376module_exit(gtp_fini);
1377
1378MODULE_LICENSE("GPL");
1379MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1380MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1381MODULE_ALIAS_RTNL_LINK("gtp");
Andreas Schultzeb846412017-01-27 10:40:56 +01001382MODULE_ALIAS_GENL_FAMILY("gtp");