blob: 522e219f3558a3e0e3ad70f27e2de3d09ed8f543 [file] [log] [blame]
James Chapman0d767512010-04-02 06:19:00 +00001/*
2 * L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/icmp.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/random.h>
16#include <linux/socket.h>
17#include <linux/l2tp.h>
18#include <linux/in.h>
19#include <net/sock.h>
20#include <net/ip.h>
21#include <net/icmp.h>
22#include <net/udp.h>
23#include <net/inet_common.h>
24#include <net/inet_hashtables.h>
25#include <net/tcp_states.h>
26#include <net/protocol.h>
27#include <net/xfrm.h>
28
29#include "l2tp_core.h"
30
31struct l2tp_ip_sock {
32 /* inet_sock has to be the first member of l2tp_ip_sock */
33 struct inet_sock inet;
34
35 __u32 conn_id;
36 __u32 peer_conn_id;
37
38 __u64 tx_packets;
39 __u64 tx_bytes;
40 __u64 tx_errors;
41 __u64 rx_packets;
42 __u64 rx_bytes;
43 __u64 rx_errors;
44};
45
46static DEFINE_RWLOCK(l2tp_ip_lock);
47static struct hlist_head l2tp_ip_table;
48static struct hlist_head l2tp_ip_bind_table;
49
50static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
51{
52 return (struct l2tp_ip_sock *)sk;
53}
54
55static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
56{
57 struct hlist_node *node;
58 struct sock *sk;
59
60 sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
61 struct inet_sock *inet = inet_sk(sk);
62 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
63
64 if (l2tp == NULL)
65 continue;
66
67 if ((l2tp->conn_id == tunnel_id) &&
Eric Dumazete83726b2010-10-21 04:39:09 -070068 net_eq(sock_net(sk), net) &&
James Chapman0d767512010-04-02 06:19:00 +000069 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
70 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
71 goto found;
72 }
73
74 sk = NULL;
75found:
76 return sk;
77}
78
79static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
80{
81 struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
82 if (sk)
83 sock_hold(sk);
84
85 return sk;
86}
87
88/* When processing receive frames, there are two cases to
89 * consider. Data frames consist of a non-zero session-id and an
90 * optional cookie. Control frames consist of a regular L2TP header
91 * preceded by 32-bits of zeros.
92 *
93 * L2TPv3 Session Header Over IP
94 *
95 * 0 1 2 3
96 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
97 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
98 * | Session ID |
99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 * | Cookie (optional, maximum 64 bits)...
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 *
105 * L2TPv3 Control Message Header Over IP
106 *
107 * 0 1 2 3
108 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 * | (32 bits of zeros) |
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 * | Control Connection ID |
115 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 * | Ns | Nr |
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 *
119 * All control frames are passed to userspace.
120 */
121static int l2tp_ip_recv(struct sk_buff *skb)
122{
123 struct sock *sk;
124 u32 session_id;
125 u32 tunnel_id;
126 unsigned char *ptr, *optr;
127 struct l2tp_session *session;
128 struct l2tp_tunnel *tunnel = NULL;
129 int length;
130 int offset;
131
132 /* Point to L2TP header */
133 optr = ptr = skb->data;
134
135 if (!pskb_may_pull(skb, 4))
136 goto discard;
137
138 session_id = ntohl(*((__be32 *) ptr));
139 ptr += 4;
140
141 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
142 * the session_id. If it is 0, the packet is a L2TP control
143 * frame and the session_id value can be discarded.
144 */
145 if (session_id == 0) {
146 __skb_pull(skb, 4);
147 goto pass_up;
148 }
149
150 /* Ok, this is a data packet. Lookup the session. */
151 session = l2tp_session_find(&init_net, NULL, session_id);
152 if (session == NULL)
153 goto discard;
154
155 tunnel = session->tunnel;
156 if (tunnel == NULL)
157 goto discard;
158
159 /* Trace packet contents, if enabled */
160 if (tunnel->debug & L2TP_MSG_DATA) {
161 length = min(32u, skb->len);
162 if (!pskb_may_pull(skb, length))
163 goto discard;
164
165 printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
166
167 offset = 0;
168 do {
169 printk(" %02X", ptr[offset]);
170 } while (++offset < length);
171
172 printk("\n");
173 }
174
175 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
176
177 return 0;
178
179pass_up:
180 /* Get the tunnel_id from the L2TP header */
181 if (!pskb_may_pull(skb, 12))
182 goto discard;
183
184 if ((skb->data[0] & 0xc0) != 0xc0)
185 goto discard;
186
187 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
188 tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
189 if (tunnel != NULL)
190 sk = tunnel->sock;
191 else {
192 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
193
194 read_lock_bh(&l2tp_ip_lock);
195 sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
196 read_unlock_bh(&l2tp_ip_lock);
197 }
198
199 if (sk == NULL)
200 goto discard;
201
202 sock_hold(sk);
203
204 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
205 goto discard_put;
206
207 nf_reset(skb);
208
209 return sk_receive_skb(sk, skb, 1);
210
211discard_put:
212 sock_put(sk);
213
214discard:
215 kfree_skb(skb);
216 return 0;
217}
218
219static int l2tp_ip_open(struct sock *sk)
220{
221 /* Prevent autobind. We don't have ports. */
222 inet_sk(sk)->inet_num = IPPROTO_L2TP;
223
224 write_lock_bh(&l2tp_ip_lock);
225 sk_add_node(sk, &l2tp_ip_table);
226 write_unlock_bh(&l2tp_ip_lock);
227
228 return 0;
229}
230
231static void l2tp_ip_close(struct sock *sk, long timeout)
232{
233 write_lock_bh(&l2tp_ip_lock);
234 hlist_del_init(&sk->sk_bind_node);
235 hlist_del_init(&sk->sk_node);
236 write_unlock_bh(&l2tp_ip_lock);
237 sk_common_release(sk);
238}
239
240static void l2tp_ip_destroy_sock(struct sock *sk)
241{
242 struct sk_buff *skb;
243
244 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
245 kfree_skb(skb);
246
247 sk_refcnt_debug_dec(sk);
248}
249
250static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251{
252 struct inet_sock *inet = inet_sk(sk);
253 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
254 int ret = -EINVAL;
255 int chk_addr_ret;
256
257 ret = -EADDRINUSE;
258 read_lock_bh(&l2tp_ip_lock);
259 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
260 goto out_in_use;
261
262 read_unlock_bh(&l2tp_ip_lock);
263
264 lock_sock(sk);
265 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
266 goto out;
267
268 chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
269 ret = -EADDRNOTAVAIL;
270 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
271 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
272 goto out;
273
274 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
275 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
276 inet->inet_saddr = 0; /* Use device */
277 sk_dst_reset(sk);
278
279 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
280
281 write_lock_bh(&l2tp_ip_lock);
282 sk_add_bind_node(sk, &l2tp_ip_bind_table);
283 sk_del_node_init(sk);
284 write_unlock_bh(&l2tp_ip_lock);
285 ret = 0;
286out:
287 release_sock(sk);
288
289 return ret;
290
291out_in_use:
292 read_unlock_bh(&l2tp_ip_lock);
293
294 return ret;
295}
296
297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
298{
299 int rc;
300 struct inet_sock *inet = inet_sk(sk);
301 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
302 struct rtable *rt;
303 __be32 saddr;
304 int oif;
305
306 rc = -EINVAL;
307 if (addr_len < sizeof(*lsa))
308 goto out;
309
310 rc = -EAFNOSUPPORT;
311 if (lsa->l2tp_family != AF_INET)
312 goto out;
313
314 sk_dst_reset(sk);
315
316 oif = sk->sk_bound_dev_if;
317 saddr = inet->inet_saddr;
318
319 rc = -EINVAL;
320 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
321 goto out;
322
323 rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
324 RT_CONN_FLAGS(sk), oif,
325 IPPROTO_L2TP,
326 0, 0, sk, 1);
327 if (rc) {
328 if (rc == -ENETUNREACH)
329 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
330 goto out;
331 }
332
333 rc = -ENETUNREACH;
334 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
335 ip_rt_put(rt);
336 goto out;
337 }
338
339 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
340
341 if (!inet->inet_saddr)
342 inet->inet_saddr = rt->rt_src;
343 if (!inet->inet_rcv_saddr)
344 inet->inet_rcv_saddr = rt->rt_src;
345 inet->inet_daddr = rt->rt_dst;
346 sk->sk_state = TCP_ESTABLISHED;
347 inet->inet_id = jiffies;
348
Changli Gaod8d1f302010-06-10 23:31:35 -0700349 sk_dst_set(sk, &rt->dst);
James Chapman0d767512010-04-02 06:19:00 +0000350
351 write_lock_bh(&l2tp_ip_lock);
352 hlist_del_init(&sk->sk_bind_node);
353 sk_add_bind_node(sk, &l2tp_ip_bind_table);
354 write_unlock_bh(&l2tp_ip_lock);
355
356 rc = 0;
357out:
358 return rc;
359}
360
361static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
362 int *uaddr_len, int peer)
363{
364 struct sock *sk = sock->sk;
365 struct inet_sock *inet = inet_sk(sk);
366 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
367 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
368
369 memset(lsa, 0, sizeof(*lsa));
370 lsa->l2tp_family = AF_INET;
371 if (peer) {
372 if (!inet->inet_dport)
373 return -ENOTCONN;
374 lsa->l2tp_conn_id = lsk->peer_conn_id;
375 lsa->l2tp_addr.s_addr = inet->inet_daddr;
376 } else {
377 __be32 addr = inet->inet_rcv_saddr;
378 if (!addr)
379 addr = inet->inet_saddr;
380 lsa->l2tp_conn_id = lsk->conn_id;
381 lsa->l2tp_addr.s_addr = addr;
382 }
383 *uaddr_len = sizeof(*lsa);
384 return 0;
385}
386
387static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
388{
389 int rc;
390
391 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
392 goto drop;
393
394 nf_reset(skb);
395
396 /* Charge it to the socket, dropping if the queue is full. */
397 rc = sock_queue_rcv_skb(sk, skb);
398 if (rc < 0)
399 goto drop;
400
401 return 0;
402
403drop:
404 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
405 kfree_skb(skb);
406 return -1;
407}
408
409/* Userspace will call sendmsg() on the tunnel socket to send L2TP
410 * control frames.
411 */
412static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
413{
414 struct sk_buff *skb;
415 int rc;
416 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
417 struct inet_sock *inet = inet_sk(sk);
418 struct ip_options *opt = inet->opt;
419 struct rtable *rt = NULL;
420 int connected = 0;
421 __be32 daddr;
422
423 if (sock_flag(sk, SOCK_DEAD))
424 return -ENOTCONN;
425
426 /* Get and verify the address. */
427 if (msg->msg_name) {
428 struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
429 if (msg->msg_namelen < sizeof(*lip))
430 return -EINVAL;
431
432 if (lip->l2tp_family != AF_INET) {
433 if (lip->l2tp_family != AF_UNSPEC)
434 return -EAFNOSUPPORT;
435 }
436
437 daddr = lip->l2tp_addr.s_addr;
438 } else {
439 if (sk->sk_state != TCP_ESTABLISHED)
440 return -EDESTADDRREQ;
441
442 daddr = inet->inet_daddr;
443 connected = 1;
444 }
445
446 /* Allocate a socket buffer */
447 rc = -ENOMEM;
448 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
449 4 + len, 0, GFP_KERNEL);
450 if (!skb)
451 goto error;
452
453 /* Reserve space for headers, putting IP header on 4-byte boundary. */
454 skb_reserve(skb, 2 + NET_SKB_PAD);
455 skb_reset_network_header(skb);
456 skb_reserve(skb, sizeof(struct iphdr));
457 skb_reset_transport_header(skb);
458
459 /* Insert 0 session_id */
460 *((__be32 *) skb_put(skb, 4)) = 0;
461
462 /* Copy user data into skb */
463 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
464 if (rc < 0) {
465 kfree_skb(skb);
466 goto error;
467 }
468
469 if (connected)
470 rt = (struct rtable *) __sk_dst_check(sk, 0);
471
472 if (rt == NULL) {
473 /* Use correct destination address if we have options. */
474 if (opt && opt->srr)
475 daddr = opt->faddr;
476
477 {
478 struct flowi fl = { .oif = sk->sk_bound_dev_if,
479 .nl_u = { .ip4_u = {
480 .daddr = daddr,
481 .saddr = inet->inet_saddr,
482 .tos = RT_CONN_FLAGS(sk) } },
483 .proto = sk->sk_protocol,
484 .flags = inet_sk_flowi_flags(sk),
485 .uli_u = { .ports = {
486 .sport = inet->inet_sport,
487 .dport = inet->inet_dport } } };
488
489 /* If this fails, retransmit mechanism of transport layer will
490 * keep trying until route appears or the connection times
491 * itself out.
492 */
493 security_sk_classify_flow(sk, &fl);
494 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
495 goto no_route;
496 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700497 sk_setup_caps(sk, &rt->dst);
James Chapman0d767512010-04-02 06:19:00 +0000498 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700499 skb_dst_set(skb, dst_clone(&rt->dst));
James Chapman0d767512010-04-02 06:19:00 +0000500
501 /* Queue the packet to IP for output */
Shan Wei4e15ed42010-04-15 16:43:08 +0000502 rc = ip_queue_xmit(skb);
James Chapman0d767512010-04-02 06:19:00 +0000503
504error:
505 /* Update stats */
506 if (rc >= 0) {
507 lsa->tx_packets++;
508 lsa->tx_bytes += len;
509 rc = len;
510 } else {
511 lsa->tx_errors++;
512 }
513
514 return rc;
515
516no_route:
517 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
518 kfree_skb(skb);
519 return -EHOSTUNREACH;
520}
521
522static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
523 size_t len, int noblock, int flags, int *addr_len)
524{
525 struct inet_sock *inet = inet_sk(sk);
526 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
527 size_t copied = 0;
528 int err = -EOPNOTSUPP;
529 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
530 struct sk_buff *skb;
531
532 if (flags & MSG_OOB)
533 goto out;
534
535 if (addr_len)
536 *addr_len = sizeof(*sin);
537
538 skb = skb_recv_datagram(sk, flags, noblock, &err);
539 if (!skb)
540 goto out;
541
542 copied = skb->len;
543 if (len < copied) {
544 msg->msg_flags |= MSG_TRUNC;
545 copied = len;
546 }
547
548 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
549 if (err)
550 goto done;
551
552 sock_recv_timestamp(msg, sk, skb);
553
554 /* Copy the address. */
555 if (sin) {
556 sin->sin_family = AF_INET;
557 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
558 sin->sin_port = 0;
559 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
560 }
561 if (inet->cmsg_flags)
562 ip_cmsg_recv(msg, skb);
563 if (flags & MSG_TRUNC)
564 copied = skb->len;
565done:
566 skb_free_datagram(sk, skb);
567out:
568 if (err) {
569 lsk->rx_errors++;
570 return err;
571 }
572
573 lsk->rx_packets++;
574 lsk->rx_bytes += copied;
575
576 return copied;
577}
578
stephen hemmingerfc130842010-10-21 07:50:46 +0000579static struct proto l2tp_ip_prot = {
James Chapman0d767512010-04-02 06:19:00 +0000580 .name = "L2TP/IP",
581 .owner = THIS_MODULE,
582 .init = l2tp_ip_open,
583 .close = l2tp_ip_close,
584 .bind = l2tp_ip_bind,
585 .connect = l2tp_ip_connect,
586 .disconnect = udp_disconnect,
587 .ioctl = udp_ioctl,
588 .destroy = l2tp_ip_destroy_sock,
589 .setsockopt = ip_setsockopt,
590 .getsockopt = ip_getsockopt,
591 .sendmsg = l2tp_ip_sendmsg,
592 .recvmsg = l2tp_ip_recvmsg,
593 .backlog_rcv = l2tp_ip_backlog_recv,
594 .hash = inet_hash,
595 .unhash = inet_unhash,
596 .obj_size = sizeof(struct l2tp_ip_sock),
597#ifdef CONFIG_COMPAT
598 .compat_setsockopt = compat_ip_setsockopt,
599 .compat_getsockopt = compat_ip_getsockopt,
600#endif
601};
602
603static const struct proto_ops l2tp_ip_ops = {
604 .family = PF_INET,
605 .owner = THIS_MODULE,
606 .release = inet_release,
607 .bind = inet_bind,
608 .connect = inet_dgram_connect,
609 .socketpair = sock_no_socketpair,
610 .accept = sock_no_accept,
611 .getname = l2tp_ip_getname,
612 .poll = datagram_poll,
613 .ioctl = inet_ioctl,
614 .listen = sock_no_listen,
615 .shutdown = inet_shutdown,
616 .setsockopt = sock_common_setsockopt,
617 .getsockopt = sock_common_getsockopt,
618 .sendmsg = inet_sendmsg,
619 .recvmsg = sock_common_recvmsg,
620 .mmap = sock_no_mmap,
621 .sendpage = sock_no_sendpage,
622#ifdef CONFIG_COMPAT
623 .compat_setsockopt = compat_sock_common_setsockopt,
624 .compat_getsockopt = compat_sock_common_getsockopt,
625#endif
626};
627
628static struct inet_protosw l2tp_ip_protosw = {
629 .type = SOCK_DGRAM,
630 .protocol = IPPROTO_L2TP,
631 .prot = &l2tp_ip_prot,
632 .ops = &l2tp_ip_ops,
633 .no_check = 0,
634};
635
636static struct net_protocol l2tp_ip_protocol __read_mostly = {
637 .handler = l2tp_ip_recv,
638};
639
640static int __init l2tp_ip_init(void)
641{
642 int err;
643
644 printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
645
646 err = proto_register(&l2tp_ip_prot, 1);
647 if (err != 0)
648 goto out;
649
650 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
651 if (err)
652 goto out1;
653
654 inet_register_protosw(&l2tp_ip_protosw);
655 return 0;
656
657out1:
658 proto_unregister(&l2tp_ip_prot);
659out:
660 return err;
661}
662
663static void __exit l2tp_ip_exit(void)
664{
665 inet_unregister_protosw(&l2tp_ip_protosw);
666 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
667 proto_unregister(&l2tp_ip_prot);
668}
669
670module_init(l2tp_ip_init);
671module_exit(l2tp_ip_exit);
672
673MODULE_LICENSE("GPL");
674MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
675MODULE_DESCRIPTION("L2TP over IP");
676MODULE_VERSION("1.0");
Michal Mareke8d34a882010-12-06 02:39:12 +0000677
678/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
679 * enums
680 */
681MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);