blob: c6e59f84c3ae78594dca71aa480a724cae9a00d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/types.h>
18#include <linux/socket.h>
19#include <linux/in.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/spinlock.h>
23#include <linux/timer.h>
24#include <linux/string.h>
25#include <linux/sockios.h>
26#include <linux/net.h>
27#include <linux/stat.h>
28#include <net/ax25.h>
29#include <linux/inet.h>
30#include <linux/netdevice.h>
31#include <linux/if_arp.h>
32#include <linux/skbuff.h>
33#include <net/sock.h>
34#include <asm/system.h>
35#include <asm/uaccess.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/mm.h>
39#include <linux/interrupt.h>
40#include <linux/notifier.h>
41#include <net/rose.h>
42#include <linux/proc_fs.h>
43#include <linux/seq_file.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070044#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <net/ip.h>
46#include <net/arp.h>
47
48static int rose_ndevs = 10;
49
50int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
51int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
52int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
53int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
54int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
55int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
56int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
57int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
58int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
59int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
60
61static HLIST_HEAD(rose_list);
62static DEFINE_SPINLOCK(rose_list_lock);
63
64static struct proto_ops rose_proto_ops;
65
66ax25_address rose_callsign;
67
68/*
69 * Convert a ROSE address into text.
70 */
71const char *rose2asc(const rose_address *addr)
72{
73 static char buffer[11];
74
75 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
76 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
77 addr->rose_addr[4] == 0x00) {
78 strcpy(buffer, "*");
79 } else {
80 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
81 addr->rose_addr[1] & 0xFF,
82 addr->rose_addr[2] & 0xFF,
83 addr->rose_addr[3] & 0xFF,
84 addr->rose_addr[4] & 0xFF);
85 }
86
87 return buffer;
88}
89
90/*
91 * Compare two ROSE addresses, 0 == equal.
92 */
93int rosecmp(rose_address *addr1, rose_address *addr2)
94{
95 int i;
96
97 for (i = 0; i < 5; i++)
98 if (addr1->rose_addr[i] != addr2->rose_addr[i])
99 return 1;
100
101 return 0;
102}
103
104/*
105 * Compare two ROSE addresses for only mask digits, 0 == equal.
106 */
107int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
108{
109 int i, j;
110
111 if (mask > 10)
112 return 1;
113
114 for (i = 0; i < mask; i++) {
115 j = i / 2;
116
117 if ((i % 2) != 0) {
118 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
119 return 1;
120 } else {
121 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
122 return 1;
123 }
124 }
125
126 return 0;
127}
128
129/*
130 * Socket removal during an interrupt is now safe.
131 */
132static void rose_remove_socket(struct sock *sk)
133{
134 spin_lock_bh(&rose_list_lock);
135 sk_del_node_init(sk);
136 spin_unlock_bh(&rose_list_lock);
137}
138
139/*
140 * Kill all bound sockets on a broken link layer connection to a
141 * particular neighbour.
142 */
143void rose_kill_by_neigh(struct rose_neigh *neigh)
144{
145 struct sock *s;
146 struct hlist_node *node;
147
148 spin_lock_bh(&rose_list_lock);
149 sk_for_each(s, node, &rose_list) {
150 struct rose_sock *rose = rose_sk(s);
151
152 if (rose->neighbour == neigh) {
153 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
154 rose->neighbour->use--;
155 rose->neighbour = NULL;
156 }
157 }
158 spin_unlock_bh(&rose_list_lock);
159}
160
161/*
162 * Kill all bound sockets on a dropped device.
163 */
164static void rose_kill_by_device(struct net_device *dev)
165{
166 struct sock *s;
167 struct hlist_node *node;
168
169 spin_lock_bh(&rose_list_lock);
170 sk_for_each(s, node, &rose_list) {
171 struct rose_sock *rose = rose_sk(s);
172
173 if (rose->device == dev) {
174 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
175 rose->neighbour->use--;
176 rose->device = NULL;
177 }
178 }
179 spin_unlock_bh(&rose_list_lock);
180}
181
182/*
183 * Handle device status changes.
184 */
185static int rose_device_event(struct notifier_block *this, unsigned long event,
186 void *ptr)
187{
188 struct net_device *dev = (struct net_device *)ptr;
189
190 if (event != NETDEV_DOWN)
191 return NOTIFY_DONE;
192
193 switch (dev->type) {
194 case ARPHRD_ROSE:
195 rose_kill_by_device(dev);
196 break;
197 case ARPHRD_AX25:
198 rose_link_device_down(dev);
199 rose_rt_device_down(dev);
200 break;
201 }
202
203 return NOTIFY_DONE;
204}
205
206/*
207 * Add a socket to the bound sockets list.
208 */
209static void rose_insert_socket(struct sock *sk)
210{
211
212 spin_lock_bh(&rose_list_lock);
213 sk_add_node(sk, &rose_list);
214 spin_unlock_bh(&rose_list_lock);
215}
216
217/*
218 * Find a socket that wants to accept the Call Request we just
219 * received.
220 */
221static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
222{
223 struct sock *s;
224 struct hlist_node *node;
225
226 spin_lock_bh(&rose_list_lock);
227 sk_for_each(s, node, &rose_list) {
228 struct rose_sock *rose = rose_sk(s);
229
230 if (!rosecmp(&rose->source_addr, addr) &&
231 !ax25cmp(&rose->source_call, call) &&
232 !rose->source_ndigis && s->sk_state == TCP_LISTEN)
233 goto found;
234 }
235
236 sk_for_each(s, node, &rose_list) {
237 struct rose_sock *rose = rose_sk(s);
238
239 if (!rosecmp(&rose->source_addr, addr) &&
240 !ax25cmp(&rose->source_call, &null_ax25_address) &&
241 s->sk_state == TCP_LISTEN)
242 goto found;
243 }
244 s = NULL;
245found:
246 spin_unlock_bh(&rose_list_lock);
247 return s;
248}
249
250/*
251 * Find a connected ROSE socket given my LCI and device.
252 */
253struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
254{
255 struct sock *s;
256 struct hlist_node *node;
257
258 spin_lock_bh(&rose_list_lock);
259 sk_for_each(s, node, &rose_list) {
260 struct rose_sock *rose = rose_sk(s);
261
262 if (rose->lci == lci && rose->neighbour == neigh)
263 goto found;
264 }
265 s = NULL;
266found:
267 spin_unlock_bh(&rose_list_lock);
268 return s;
269}
270
271/*
272 * Find a unique LCI for a given device.
273 */
274unsigned int rose_new_lci(struct rose_neigh *neigh)
275{
276 int lci;
277
278 if (neigh->dce_mode) {
279 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
280 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
281 return lci;
282 } else {
283 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
284 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
285 return lci;
286 }
287
288 return 0;
289}
290
291/*
292 * Deferred destroy.
293 */
294void rose_destroy_socket(struct sock *);
295
296/*
297 * Handler for deferred kills.
298 */
299static void rose_destroy_timer(unsigned long data)
300{
301 rose_destroy_socket((struct sock *)data);
302}
303
304/*
305 * This is called from user mode and the timers. Thus it protects itself
306 * against interrupt users but doesn't worry about being called during
307 * work. Once it is removed from the queue no interrupt or bottom half
308 * will touch it and we are (fairly 8-) ) safe.
309 */
310void rose_destroy_socket(struct sock *sk)
311{
312 struct sk_buff *skb;
313
314 rose_remove_socket(sk);
315 rose_stop_heartbeat(sk);
316 rose_stop_idletimer(sk);
317 rose_stop_timer(sk);
318
319 rose_clear_queues(sk); /* Flush the queues */
320
321 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
322 if (skb->sk != sk) { /* A pending connection */
323 /* Queue the unaccepted socket for death */
324 sock_set_flag(skb->sk, SOCK_DEAD);
325 rose_start_heartbeat(skb->sk);
326 rose_sk(skb->sk)->state = ROSE_STATE_0;
327 }
328
329 kfree_skb(skb);
330 }
331
332 if (atomic_read(&sk->sk_wmem_alloc) ||
333 atomic_read(&sk->sk_rmem_alloc)) {
334 /* Defer: outstanding buffers */
335 init_timer(&sk->sk_timer);
336 sk->sk_timer.expires = jiffies + 10 * HZ;
337 sk->sk_timer.function = rose_destroy_timer;
338 sk->sk_timer.data = (unsigned long)sk;
339 add_timer(&sk->sk_timer);
340 } else
341 sock_put(sk);
342}
343
344/*
345 * Handling for system calls applied via the various interfaces to a
346 * ROSE socket object.
347 */
348
349static int rose_setsockopt(struct socket *sock, int level, int optname,
350 char __user *optval, int optlen)
351{
352 struct sock *sk = sock->sk;
353 struct rose_sock *rose = rose_sk(sk);
354 int opt;
355
356 if (level != SOL_ROSE)
357 return -ENOPROTOOPT;
358
359 if (optlen < sizeof(int))
360 return -EINVAL;
361
362 if (get_user(opt, (int __user *)optval))
363 return -EFAULT;
364
365 switch (optname) {
366 case ROSE_DEFER:
367 rose->defer = opt ? 1 : 0;
368 return 0;
369
370 case ROSE_T1:
371 if (opt < 1)
372 return -EINVAL;
373 rose->t1 = opt * HZ;
374 return 0;
375
376 case ROSE_T2:
377 if (opt < 1)
378 return -EINVAL;
379 rose->t2 = opt * HZ;
380 return 0;
381
382 case ROSE_T3:
383 if (opt < 1)
384 return -EINVAL;
385 rose->t3 = opt * HZ;
386 return 0;
387
388 case ROSE_HOLDBACK:
389 if (opt < 1)
390 return -EINVAL;
391 rose->hb = opt * HZ;
392 return 0;
393
394 case ROSE_IDLE:
395 if (opt < 0)
396 return -EINVAL;
397 rose->idle = opt * 60 * HZ;
398 return 0;
399
400 case ROSE_QBITINCL:
401 rose->qbitincl = opt ? 1 : 0;
402 return 0;
403
404 default:
405 return -ENOPROTOOPT;
406 }
407}
408
409static int rose_getsockopt(struct socket *sock, int level, int optname,
410 char __user *optval, int __user *optlen)
411{
412 struct sock *sk = sock->sk;
413 struct rose_sock *rose = rose_sk(sk);
414 int val = 0;
415 int len;
416
417 if (level != SOL_ROSE)
418 return -ENOPROTOOPT;
419
420 if (get_user(len, optlen))
421 return -EFAULT;
422
423 if (len < 0)
424 return -EINVAL;
425
426 switch (optname) {
427 case ROSE_DEFER:
428 val = rose->defer;
429 break;
430
431 case ROSE_T1:
432 val = rose->t1 / HZ;
433 break;
434
435 case ROSE_T2:
436 val = rose->t2 / HZ;
437 break;
438
439 case ROSE_T3:
440 val = rose->t3 / HZ;
441 break;
442
443 case ROSE_HOLDBACK:
444 val = rose->hb / HZ;
445 break;
446
447 case ROSE_IDLE:
448 val = rose->idle / (60 * HZ);
449 break;
450
451 case ROSE_QBITINCL:
452 val = rose->qbitincl;
453 break;
454
455 default:
456 return -ENOPROTOOPT;
457 }
458
459 len = min_t(unsigned int, len, sizeof(int));
460
461 if (put_user(len, optlen))
462 return -EFAULT;
463
464 return copy_to_user(optval, &val, len) ? -EFAULT : 0;
465}
466
467static int rose_listen(struct socket *sock, int backlog)
468{
469 struct sock *sk = sock->sk;
470
471 if (sk->sk_state != TCP_LISTEN) {
472 struct rose_sock *rose = rose_sk(sk);
473
474 rose->dest_ndigis = 0;
475 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
476 memset(&rose->dest_call, 0, AX25_ADDR_LEN);
477 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
478 sk->sk_max_ack_backlog = backlog;
479 sk->sk_state = TCP_LISTEN;
480 return 0;
481 }
482
483 return -EOPNOTSUPP;
484}
485
486static struct proto rose_proto = {
487 .name = "ROSE",
488 .owner = THIS_MODULE,
489 .obj_size = sizeof(struct rose_sock),
490};
491
492static int rose_create(struct socket *sock, int protocol)
493{
494 struct sock *sk;
495 struct rose_sock *rose;
496
497 if (sock->type != SOCK_SEQPACKET || protocol != 0)
498 return -ESOCKTNOSUPPORT;
499
500 if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
501 return -ENOMEM;
502
503 rose = rose_sk(sk);
504
505 sock_init_data(sock, sk);
506
507 skb_queue_head_init(&rose->ack_queue);
508#ifdef M_BIT
509 skb_queue_head_init(&rose->frag_queue);
510 rose->fraglen = 0;
511#endif
512
513 sock->ops = &rose_proto_ops;
514 sk->sk_protocol = protocol;
515
516 init_timer(&rose->timer);
517 init_timer(&rose->idletimer);
518
519 rose->t1 = sysctl_rose_call_request_timeout;
520 rose->t2 = sysctl_rose_reset_request_timeout;
521 rose->t3 = sysctl_rose_clear_request_timeout;
522 rose->hb = sysctl_rose_ack_hold_back_timeout;
523 rose->idle = sysctl_rose_no_activity_timeout;
524
525 rose->state = ROSE_STATE_0;
526
527 return 0;
528}
529
530static struct sock *rose_make_new(struct sock *osk)
531{
532 struct sock *sk;
533 struct rose_sock *rose, *orose;
534
535 if (osk->sk_type != SOCK_SEQPACKET)
536 return NULL;
537
538 if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
539 return NULL;
540
541 rose = rose_sk(sk);
542
543 sock_init_data(NULL, sk);
544
545 skb_queue_head_init(&rose->ack_queue);
546#ifdef M_BIT
547 skb_queue_head_init(&rose->frag_queue);
548 rose->fraglen = 0;
549#endif
550
551 sk->sk_type = osk->sk_type;
552 sk->sk_socket = osk->sk_socket;
553 sk->sk_priority = osk->sk_priority;
554 sk->sk_protocol = osk->sk_protocol;
555 sk->sk_rcvbuf = osk->sk_rcvbuf;
556 sk->sk_sndbuf = osk->sk_sndbuf;
557 sk->sk_state = TCP_ESTABLISHED;
558 sk->sk_sleep = osk->sk_sleep;
Ralf Baechle53b924b2005-08-23 10:11:30 -0700559 sock_copy_flags(sk, osk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 init_timer(&rose->timer);
562 init_timer(&rose->idletimer);
563
564 orose = rose_sk(osk);
565 rose->t1 = orose->t1;
566 rose->t2 = orose->t2;
567 rose->t3 = orose->t3;
568 rose->hb = orose->hb;
569 rose->idle = orose->idle;
570 rose->defer = orose->defer;
571 rose->device = orose->device;
572 rose->qbitincl = orose->qbitincl;
573
574 return sk;
575}
576
577static int rose_release(struct socket *sock)
578{
579 struct sock *sk = sock->sk;
580 struct rose_sock *rose;
581
582 if (sk == NULL) return 0;
583
584 rose = rose_sk(sk);
585
586 switch (rose->state) {
587 case ROSE_STATE_0:
588 rose_disconnect(sk, 0, -1, -1);
589 rose_destroy_socket(sk);
590 break;
591
592 case ROSE_STATE_2:
593 rose->neighbour->use--;
594 rose_disconnect(sk, 0, -1, -1);
595 rose_destroy_socket(sk);
596 break;
597
598 case ROSE_STATE_1:
599 case ROSE_STATE_3:
600 case ROSE_STATE_4:
601 case ROSE_STATE_5:
602 rose_clear_queues(sk);
603 rose_stop_idletimer(sk);
604 rose_write_internal(sk, ROSE_CLEAR_REQUEST);
605 rose_start_t3timer(sk);
606 rose->state = ROSE_STATE_2;
607 sk->sk_state = TCP_CLOSE;
608 sk->sk_shutdown |= SEND_SHUTDOWN;
609 sk->sk_state_change(sk);
610 sock_set_flag(sk, SOCK_DEAD);
611 sock_set_flag(sk, SOCK_DESTROY);
612 break;
613
614 default:
615 break;
616 }
617
618 sock->sk = NULL;
619
620 return 0;
621}
622
623static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
624{
625 struct sock *sk = sock->sk;
626 struct rose_sock *rose = rose_sk(sk);
627 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
628 struct net_device *dev;
Ralf Baechle01d7dd02005-08-23 10:11:45 -0700629 ax25_address *source;
630 ax25_uid_assoc *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 int n;
632
633 if (!sock_flag(sk, SOCK_ZAPPED))
634 return -EINVAL;
635
636 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
637 return -EINVAL;
638
639 if (addr->srose_family != AF_ROSE)
640 return -EINVAL;
641
642 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
643 return -EINVAL;
644
645 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
646 return -EINVAL;
647
648 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
649 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
650 return -EADDRNOTAVAIL;
651 }
652
653 source = &addr->srose_call;
654
Ralf Baechle01d7dd02005-08-23 10:11:45 -0700655 user = ax25_findbyuid(current->euid);
656 if (user) {
657 rose->source_call = user->call;
658 ax25_uid_put(user);
659 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
661 return -EACCES;
Ralf Baechle01d7dd02005-08-23 10:11:45 -0700662 rose->source_call = *source;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 }
664
665 rose->source_addr = addr->srose_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 rose->device = dev;
667 rose->source_ndigis = addr->srose_ndigis;
668
669 if (addr_len == sizeof(struct full_sockaddr_rose)) {
670 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
671 for (n = 0 ; n < addr->srose_ndigis ; n++)
672 rose->source_digis[n] = full_addr->srose_digis[n];
673 } else {
674 if (rose->source_ndigis == 1) {
675 rose->source_digis[0] = addr->srose_digi;
676 }
677 }
678
679 rose_insert_socket(sk);
680
681 sock_reset_flag(sk, SOCK_ZAPPED);
682 SOCK_DEBUG(sk, "ROSE: socket is bound\n");
683 return 0;
684}
685
686static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
687{
688 struct sock *sk = sock->sk;
689 struct rose_sock *rose = rose_sk(sk);
690 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
691 unsigned char cause, diagnostic;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 struct net_device *dev;
Ralf Baechle01d7dd02005-08-23 10:11:45 -0700693 ax25_uid_assoc *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 int n;
695
696 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
697 sock->state = SS_CONNECTED;
698 return 0; /* Connect completed during a ERESTARTSYS event */
699 }
700
701 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
702 sock->state = SS_UNCONNECTED;
703 return -ECONNREFUSED;
704 }
705
706 if (sk->sk_state == TCP_ESTABLISHED)
707 return -EISCONN; /* No reconnect on a seqpacket socket */
708
709 sk->sk_state = TCP_CLOSE;
710 sock->state = SS_UNCONNECTED;
711
712 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
713 return -EINVAL;
714
715 if (addr->srose_family != AF_ROSE)
716 return -EINVAL;
717
718 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
719 return -EINVAL;
720
721 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
722 return -EINVAL;
723
724 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
725 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
726 return -EINVAL;
727
728 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
729 &diagnostic);
730 if (!rose->neighbour)
731 return -ENETUNREACH;
732
733 rose->lci = rose_new_lci(rose->neighbour);
734 if (!rose->lci)
735 return -ENETUNREACH;
736
737 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
738 sock_reset_flag(sk, SOCK_ZAPPED);
739
740 if ((dev = rose_dev_first()) == NULL)
741 return -ENETUNREACH;
742
Ralf Baechle01d7dd02005-08-23 10:11:45 -0700743 user = ax25_findbyuid(current->euid);
744 if (!user)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 return -EINVAL;
746
747 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
Ralf Baechle01d7dd02005-08-23 10:11:45 -0700748 rose->source_call = user->call;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 rose->device = dev;
Ralf Baechle01d7dd02005-08-23 10:11:45 -0700750 ax25_uid_put(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 rose_insert_socket(sk); /* Finish the bind */
753 }
754
755 rose->dest_addr = addr->srose_addr;
756 rose->dest_call = addr->srose_call;
757 rose->rand = ((long)rose & 0xFFFF) + rose->lci;
758 rose->dest_ndigis = addr->srose_ndigis;
759
760 if (addr_len == sizeof(struct full_sockaddr_rose)) {
761 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
762 for (n = 0 ; n < addr->srose_ndigis ; n++)
763 rose->dest_digis[n] = full_addr->srose_digis[n];
764 } else {
765 if (rose->dest_ndigis == 1) {
766 rose->dest_digis[0] = addr->srose_digi;
767 }
768 }
769
770 /* Move to connecting socket, start sending Connect Requests */
771 sock->state = SS_CONNECTING;
772 sk->sk_state = TCP_SYN_SENT;
773
774 rose->state = ROSE_STATE_1;
775
776 rose->neighbour->use++;
777
778 rose_write_internal(sk, ROSE_CALL_REQUEST);
779 rose_start_heartbeat(sk);
780 rose_start_t1timer(sk);
781
782 /* Now the loop */
783 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
784 return -EINPROGRESS;
785
786 /*
787 * A Connect Ack with Choke or timeout or failed routing will go to
788 * closed.
789 */
790 if (sk->sk_state == TCP_SYN_SENT) {
791 struct task_struct *tsk = current;
792 DECLARE_WAITQUEUE(wait, tsk);
793
794 add_wait_queue(sk->sk_sleep, &wait);
795 for (;;) {
796 set_current_state(TASK_INTERRUPTIBLE);
797 if (sk->sk_state != TCP_SYN_SENT)
798 break;
799 if (!signal_pending(tsk)) {
800 schedule();
801 continue;
802 }
803 current->state = TASK_RUNNING;
804 remove_wait_queue(sk->sk_sleep, &wait);
805 return -ERESTARTSYS;
806 }
807 current->state = TASK_RUNNING;
808 remove_wait_queue(sk->sk_sleep, &wait);
809 }
810
811 if (sk->sk_state != TCP_ESTABLISHED) {
812 sock->state = SS_UNCONNECTED;
813 return sock_error(sk); /* Always set at this point */
814 }
815
816 sock->state = SS_CONNECTED;
817
818 return 0;
819}
820
821static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
822{
823 struct task_struct *tsk = current;
824 DECLARE_WAITQUEUE(wait, tsk);
825 struct sk_buff *skb;
826 struct sock *newsk;
827 struct sock *sk;
828 int err = 0;
829
830 if ((sk = sock->sk) == NULL)
831 return -EINVAL;
832
833 lock_sock(sk);
834 if (sk->sk_type != SOCK_SEQPACKET) {
835 err = -EOPNOTSUPP;
836 goto out;
837 }
838
839 if (sk->sk_state != TCP_LISTEN) {
840 err = -EINVAL;
841 goto out;
842 }
843
844 /*
845 * The write queue this time is holding sockets ready to use
846 * hooked into the SABM we saved
847 */
848 add_wait_queue(sk->sk_sleep, &wait);
849 for (;;) {
850 skb = skb_dequeue(&sk->sk_receive_queue);
851 if (skb)
852 break;
853
854 current->state = TASK_INTERRUPTIBLE;
855 release_sock(sk);
856 if (flags & O_NONBLOCK) {
857 current->state = TASK_RUNNING;
858 remove_wait_queue(sk->sk_sleep, &wait);
859 return -EWOULDBLOCK;
860 }
861 if (!signal_pending(tsk)) {
862 schedule();
863 lock_sock(sk);
864 continue;
865 }
866 return -ERESTARTSYS;
867 }
868 current->state = TASK_RUNNING;
869 remove_wait_queue(sk->sk_sleep, &wait);
870
871 newsk = skb->sk;
872 newsk->sk_socket = newsock;
873 newsk->sk_sleep = &newsock->wait;
874
875 /* Now attach up the new socket */
876 skb->sk = NULL;
877 kfree_skb(skb);
878 sk->sk_ack_backlog--;
879 newsock->sk = newsk;
880
881out:
882 release_sock(sk);
883
884 return err;
885}
886
887static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
888 int *uaddr_len, int peer)
889{
890 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
891 struct sock *sk = sock->sk;
892 struct rose_sock *rose = rose_sk(sk);
893 int n;
894
895 if (peer != 0) {
896 if (sk->sk_state != TCP_ESTABLISHED)
897 return -ENOTCONN;
898 srose->srose_family = AF_ROSE;
899 srose->srose_addr = rose->dest_addr;
900 srose->srose_call = rose->dest_call;
901 srose->srose_ndigis = rose->dest_ndigis;
902 for (n = 0; n < rose->dest_ndigis; n++)
903 srose->srose_digis[n] = rose->dest_digis[n];
904 } else {
905 srose->srose_family = AF_ROSE;
906 srose->srose_addr = rose->source_addr;
907 srose->srose_call = rose->source_call;
908 srose->srose_ndigis = rose->source_ndigis;
909 for (n = 0; n < rose->source_ndigis; n++)
910 srose->srose_digis[n] = rose->source_digis[n];
911 }
912
913 *uaddr_len = sizeof(struct full_sockaddr_rose);
914 return 0;
915}
916
917int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
918{
919 struct sock *sk;
920 struct sock *make;
921 struct rose_sock *make_rose;
922 struct rose_facilities_struct facilities;
923 int n, len;
924
925 skb->sk = NULL; /* Initially we don't know who it's for */
926
927 /*
928 * skb->data points to the rose frame start
929 */
930 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
931
932 len = (((skb->data[3] >> 4) & 0x0F) + 1) / 2;
933 len += (((skb->data[3] >> 0) & 0x0F) + 1) / 2;
934 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
935 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
936 return 0;
937 }
938
939 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
940
941 /*
942 * We can't accept the Call Request.
943 */
944 if (sk == NULL || sk_acceptq_is_full(sk) ||
945 (make = rose_make_new(sk)) == NULL) {
946 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
947 return 0;
948 }
949
950 skb->sk = make;
951 make->sk_state = TCP_ESTABLISHED;
952 make_rose = rose_sk(make);
953
954 make_rose->lci = lci;
955 make_rose->dest_addr = facilities.dest_addr;
956 make_rose->dest_call = facilities.dest_call;
957 make_rose->dest_ndigis = facilities.dest_ndigis;
958 for (n = 0 ; n < facilities.dest_ndigis ; n++)
959 make_rose->dest_digis[n] = facilities.dest_digis[n];
960 make_rose->source_addr = facilities.source_addr;
961 make_rose->source_call = facilities.source_call;
962 make_rose->source_ndigis = facilities.source_ndigis;
963 for (n = 0 ; n < facilities.source_ndigis ; n++)
964 make_rose->source_digis[n]= facilities.source_digis[n];
965 make_rose->neighbour = neigh;
966 make_rose->device = dev;
967 make_rose->facilities = facilities;
968
969 make_rose->neighbour->use++;
970
971 if (rose_sk(sk)->defer) {
972 make_rose->state = ROSE_STATE_5;
973 } else {
974 rose_write_internal(make, ROSE_CALL_ACCEPTED);
975 make_rose->state = ROSE_STATE_3;
976 rose_start_idletimer(make);
977 }
978
979 make_rose->condition = 0x00;
980 make_rose->vs = 0;
981 make_rose->va = 0;
982 make_rose->vr = 0;
983 make_rose->vl = 0;
984 sk->sk_ack_backlog++;
985
986 rose_insert_socket(make);
987
988 skb_queue_head(&sk->sk_receive_queue, skb);
989
990 rose_start_heartbeat(make);
991
992 if (!sock_flag(sk, SOCK_DEAD))
993 sk->sk_data_ready(sk, skb->len);
994
995 return 1;
996}
997
998static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
999 struct msghdr *msg, size_t len)
1000{
1001 struct sock *sk = sock->sk;
1002 struct rose_sock *rose = rose_sk(sk);
1003 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
1004 int err;
1005 struct full_sockaddr_rose srose;
1006 struct sk_buff *skb;
1007 unsigned char *asmptr;
1008 int n, size, qbit = 0;
1009
1010 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1011 return -EINVAL;
1012
1013 if (sock_flag(sk, SOCK_ZAPPED))
1014 return -EADDRNOTAVAIL;
1015
1016 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1017 send_sig(SIGPIPE, current, 0);
1018 return -EPIPE;
1019 }
1020
1021 if (rose->neighbour == NULL || rose->device == NULL)
1022 return -ENETUNREACH;
1023
1024 if (usrose != NULL) {
1025 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1026 return -EINVAL;
1027 memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1028 memcpy(&srose, usrose, msg->msg_namelen);
1029 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1030 ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1031 return -EISCONN;
1032 if (srose.srose_ndigis != rose->dest_ndigis)
1033 return -EISCONN;
1034 if (srose.srose_ndigis == rose->dest_ndigis) {
1035 for (n = 0 ; n < srose.srose_ndigis ; n++)
1036 if (ax25cmp(&rose->dest_digis[n],
1037 &srose.srose_digis[n]))
1038 return -EISCONN;
1039 }
1040 if (srose.srose_family != AF_ROSE)
1041 return -EINVAL;
1042 } else {
1043 if (sk->sk_state != TCP_ESTABLISHED)
1044 return -ENOTCONN;
1045
1046 srose.srose_family = AF_ROSE;
1047 srose.srose_addr = rose->dest_addr;
1048 srose.srose_call = rose->dest_call;
1049 srose.srose_ndigis = rose->dest_ndigis;
1050 for (n = 0 ; n < rose->dest_ndigis ; n++)
1051 srose.srose_digis[n] = rose->dest_digis[n];
1052 }
1053
1054 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1055
1056 /* Build a packet */
1057 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1058 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1059
1060 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1061 return err;
1062
1063 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1064
1065 /*
1066 * Put the data on the end
1067 */
1068 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1069
1070 asmptr = skb->h.raw = skb_put(skb, len);
1071
1072 err = memcpy_fromiovec(asmptr, msg->msg_iov, len);
1073 if (err) {
1074 kfree_skb(skb);
1075 return err;
1076 }
1077
1078 /*
1079 * If the Q BIT Include socket option is in force, the first
1080 * byte of the user data is the logical value of the Q Bit.
1081 */
1082 if (rose->qbitincl) {
1083 qbit = skb->data[0];
1084 skb_pull(skb, 1);
1085 }
1086
1087 /*
1088 * Push down the ROSE header
1089 */
1090 asmptr = skb_push(skb, ROSE_MIN_LEN);
1091
1092 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1093
1094 /* Build a ROSE Network header */
1095 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1096 asmptr[1] = (rose->lci >> 0) & 0xFF;
1097 asmptr[2] = ROSE_DATA;
1098
1099 if (qbit)
1100 asmptr[0] |= ROSE_Q_BIT;
1101
1102 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1103
1104 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1105
1106 if (sk->sk_state != TCP_ESTABLISHED) {
1107 kfree_skb(skb);
1108 return -ENOTCONN;
1109 }
1110
1111#ifdef M_BIT
1112#define ROSE_PACLEN (256-ROSE_MIN_LEN)
1113 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1114 unsigned char header[ROSE_MIN_LEN];
1115 struct sk_buff *skbn;
1116 int frontlen;
1117 int lg;
1118
1119 /* Save a copy of the Header */
1120 memcpy(header, skb->data, ROSE_MIN_LEN);
1121 skb_pull(skb, ROSE_MIN_LEN);
1122
1123 frontlen = skb_headroom(skb);
1124
1125 while (skb->len > 0) {
1126 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1127 kfree_skb(skb);
1128 return err;
1129 }
1130
1131 skbn->sk = sk;
1132 skbn->free = 1;
1133 skbn->arp = 1;
1134
1135 skb_reserve(skbn, frontlen);
1136
1137 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1138
1139 /* Copy the user data */
1140 memcpy(skb_put(skbn, lg), skb->data, lg);
1141 skb_pull(skb, lg);
1142
1143 /* Duplicate the Header */
1144 skb_push(skbn, ROSE_MIN_LEN);
1145 memcpy(skbn->data, header, ROSE_MIN_LEN);
1146
1147 if (skb->len > 0)
1148 skbn->data[2] |= M_BIT;
1149
1150 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1151 }
1152
1153 skb->free = 1;
1154 kfree_skb(skb);
1155 } else {
1156 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
1157 }
1158#else
1159 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
1160#endif
1161
1162 rose_kick(sk);
1163
1164 return len;
1165}
1166
1167
1168static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1169 struct msghdr *msg, size_t size, int flags)
1170{
1171 struct sock *sk = sock->sk;
1172 struct rose_sock *rose = rose_sk(sk);
1173 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
1174 size_t copied;
1175 unsigned char *asmptr;
1176 struct sk_buff *skb;
1177 int n, er, qbit;
1178
1179 /*
1180 * This works for seqpacket too. The receiver has ordered the queue for
1181 * us! We do one quick check first though
1182 */
1183 if (sk->sk_state != TCP_ESTABLISHED)
1184 return -ENOTCONN;
1185
1186 /* Now we can treat all alike */
1187 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1188 return er;
1189
1190 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1191
1192 skb_pull(skb, ROSE_MIN_LEN);
1193
1194 if (rose->qbitincl) {
1195 asmptr = skb_push(skb, 1);
1196 *asmptr = qbit;
1197 }
1198
1199 skb->h.raw = skb->data;
1200 copied = skb->len;
1201
1202 if (copied > size) {
1203 copied = size;
1204 msg->msg_flags |= MSG_TRUNC;
1205 }
1206
1207 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1208
1209 if (srose != NULL) {
1210 srose->srose_family = AF_ROSE;
1211 srose->srose_addr = rose->dest_addr;
1212 srose->srose_call = rose->dest_call;
1213 srose->srose_ndigis = rose->dest_ndigis;
1214 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
1215 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
1216 for (n = 0 ; n < rose->dest_ndigis ; n++)
1217 full_srose->srose_digis[n] = rose->dest_digis[n];
1218 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1219 } else {
1220 if (rose->dest_ndigis >= 1) {
1221 srose->srose_ndigis = 1;
1222 srose->srose_digi = rose->dest_digis[0];
1223 }
1224 msg->msg_namelen = sizeof(struct sockaddr_rose);
1225 }
1226 }
1227
1228 skb_free_datagram(sk, skb);
1229
1230 return copied;
1231}
1232
1233
1234static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1235{
1236 struct sock *sk = sock->sk;
1237 struct rose_sock *rose = rose_sk(sk);
1238 void __user *argp = (void __user *)arg;
1239
1240 switch (cmd) {
1241 case TIOCOUTQ: {
1242 long amount;
1243 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1244 if (amount < 0)
1245 amount = 0;
1246 return put_user(amount, (unsigned int __user *)argp);
1247 }
1248
1249 case TIOCINQ: {
1250 struct sk_buff *skb;
1251 long amount = 0L;
1252 /* These two are safe on a single CPU system as only user tasks fiddle here */
1253 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1254 amount = skb->len;
1255 return put_user(amount, (unsigned int __user *)argp);
1256 }
1257
1258 case SIOCGSTAMP:
1259 if (sk != NULL)
1260 return sock_get_timestamp(sk, (struct timeval __user *)argp);
1261 return -EINVAL;
1262
1263 case SIOCGIFADDR:
1264 case SIOCSIFADDR:
1265 case SIOCGIFDSTADDR:
1266 case SIOCSIFDSTADDR:
1267 case SIOCGIFBRDADDR:
1268 case SIOCSIFBRDADDR:
1269 case SIOCGIFNETMASK:
1270 case SIOCSIFNETMASK:
1271 case SIOCGIFMETRIC:
1272 case SIOCSIFMETRIC:
1273 return -EINVAL;
1274
1275 case SIOCADDRT:
1276 case SIOCDELRT:
1277 case SIOCRSCLRRT:
1278 if (!capable(CAP_NET_ADMIN))
1279 return -EPERM;
1280 return rose_rt_ioctl(cmd, argp);
1281
1282 case SIOCRSGCAUSE: {
1283 struct rose_cause_struct rose_cause;
1284 rose_cause.cause = rose->cause;
1285 rose_cause.diagnostic = rose->diagnostic;
1286 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1287 }
1288
1289 case SIOCRSSCAUSE: {
1290 struct rose_cause_struct rose_cause;
1291 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1292 return -EFAULT;
1293 rose->cause = rose_cause.cause;
1294 rose->diagnostic = rose_cause.diagnostic;
1295 return 0;
1296 }
1297
1298 case SIOCRSSL2CALL:
1299 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1300 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1301 ax25_listen_release(&rose_callsign, NULL);
1302 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1303 return -EFAULT;
1304 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1305 ax25_listen_register(&rose_callsign, NULL);
1306 return 0;
1307
1308 case SIOCRSGL2CALL:
1309 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1310
1311 case SIOCRSACCEPT:
1312 if (rose->state == ROSE_STATE_5) {
1313 rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1314 rose_start_idletimer(sk);
1315 rose->condition = 0x00;
1316 rose->vs = 0;
1317 rose->va = 0;
1318 rose->vr = 0;
1319 rose->vl = 0;
1320 rose->state = ROSE_STATE_3;
1321 }
1322 return 0;
1323
1324 default:
1325 return dev_ioctl(cmd, argp);
1326 }
1327
1328 return 0;
1329}
1330
1331#ifdef CONFIG_PROC_FS
1332static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1333{
1334 int i;
1335 struct sock *s;
1336 struct hlist_node *node;
1337
1338 spin_lock_bh(&rose_list_lock);
1339 if (*pos == 0)
1340 return SEQ_START_TOKEN;
1341
1342 i = 1;
1343 sk_for_each(s, node, &rose_list) {
1344 if (i == *pos)
1345 return s;
1346 ++i;
1347 }
1348 return NULL;
1349}
1350
1351static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1352{
1353 ++*pos;
1354
1355 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1356 : sk_next((struct sock *)v);
1357}
1358
1359static void rose_info_stop(struct seq_file *seq, void *v)
1360{
1361 spin_unlock_bh(&rose_list_lock);
1362}
1363
1364static int rose_info_show(struct seq_file *seq, void *v)
1365{
1366 if (v == SEQ_START_TOKEN)
1367 seq_puts(seq,
1368 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1369
1370 else {
1371 struct sock *s = v;
1372 struct rose_sock *rose = rose_sk(s);
1373 const char *devname, *callsign;
1374 const struct net_device *dev = rose->device;
1375
1376 if (!dev)
1377 devname = "???";
1378 else
1379 devname = dev->name;
1380
1381 seq_printf(seq, "%-10s %-9s ",
1382 rose2asc(&rose->dest_addr),
1383 ax2asc(&rose->dest_call));
1384
1385 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1386 callsign = "??????-?";
1387 else
1388 callsign = ax2asc(&rose->source_call);
1389
1390 seq_printf(seq,
1391 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1392 rose2asc(&rose->source_addr),
1393 callsign,
1394 devname,
1395 rose->lci & 0x0FFF,
1396 (rose->neighbour) ? rose->neighbour->number : 0,
1397 rose->state,
1398 rose->vs,
1399 rose->vr,
1400 rose->va,
1401 ax25_display_timer(&rose->timer) / HZ,
1402 rose->t1 / HZ,
1403 rose->t2 / HZ,
1404 rose->t3 / HZ,
1405 rose->hb / HZ,
1406 ax25_display_timer(&rose->idletimer) / (60 * HZ),
1407 rose->idle / (60 * HZ),
1408 atomic_read(&s->sk_wmem_alloc),
1409 atomic_read(&s->sk_rmem_alloc),
1410 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1411 }
1412
1413 return 0;
1414}
1415
1416static struct seq_operations rose_info_seqops = {
1417 .start = rose_info_start,
1418 .next = rose_info_next,
1419 .stop = rose_info_stop,
1420 .show = rose_info_show,
1421};
1422
1423static int rose_info_open(struct inode *inode, struct file *file)
1424{
1425 return seq_open(file, &rose_info_seqops);
1426}
1427
1428static struct file_operations rose_info_fops = {
1429 .owner = THIS_MODULE,
1430 .open = rose_info_open,
1431 .read = seq_read,
1432 .llseek = seq_lseek,
1433 .release = seq_release,
1434};
1435#endif /* CONFIG_PROC_FS */
1436
1437static struct net_proto_family rose_family_ops = {
1438 .family = PF_ROSE,
1439 .create = rose_create,
1440 .owner = THIS_MODULE,
1441};
1442
1443static struct proto_ops rose_proto_ops = {
1444 .family = PF_ROSE,
1445 .owner = THIS_MODULE,
1446 .release = rose_release,
1447 .bind = rose_bind,
1448 .connect = rose_connect,
1449 .socketpair = sock_no_socketpair,
1450 .accept = rose_accept,
1451 .getname = rose_getname,
1452 .poll = datagram_poll,
1453 .ioctl = rose_ioctl,
1454 .listen = rose_listen,
1455 .shutdown = sock_no_shutdown,
1456 .setsockopt = rose_setsockopt,
1457 .getsockopt = rose_getsockopt,
1458 .sendmsg = rose_sendmsg,
1459 .recvmsg = rose_recvmsg,
1460 .mmap = sock_no_mmap,
1461 .sendpage = sock_no_sendpage,
1462};
1463
1464static struct notifier_block rose_dev_notifier = {
1465 .notifier_call = rose_device_event,
1466};
1467
1468static struct net_device **dev_rose;
1469
1470static const char banner[] = KERN_INFO "F6FBB/G4KLX ROSE for Linux. Version 0.62 for AX25.037 Linux 2.4\n";
1471
1472static int __init rose_proto_init(void)
1473{
1474 int i;
1475 int rc = proto_register(&rose_proto, 0);
1476
1477 if (rc != 0)
1478 goto out;
1479
1480 rose_callsign = null_ax25_address;
1481
1482 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1483 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1484 return -1;
1485 }
1486
1487 dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1488 if (dev_rose == NULL) {
1489 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1490 return -1;
1491 }
1492
1493 memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
1494 for (i = 0; i < rose_ndevs; i++) {
1495 struct net_device *dev;
1496 char name[IFNAMSIZ];
1497
1498 sprintf(name, "rose%d", i);
1499 dev = alloc_netdev(sizeof(struct net_device_stats),
1500 name, rose_setup);
1501 if (!dev) {
1502 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1503 goto fail;
1504 }
1505 if (register_netdev(dev)) {
1506 printk(KERN_ERR "ROSE: netdevice regeistration failed\n");
1507 free_netdev(dev);
1508 goto fail;
1509 }
1510 dev_rose[i] = dev;
1511 }
1512
1513 sock_register(&rose_family_ops);
1514 register_netdevice_notifier(&rose_dev_notifier);
1515 printk(banner);
1516
1517 ax25_protocol_register(AX25_P_ROSE, rose_route_frame);
1518 ax25_linkfail_register(rose_link_failed);
1519
1520#ifdef CONFIG_SYSCTL
1521 rose_register_sysctl();
1522#endif
1523 rose_loopback_init();
1524
1525 rose_add_loopback_neigh();
1526
1527 proc_net_fops_create("rose", S_IRUGO, &rose_info_fops);
1528 proc_net_fops_create("rose_neigh", S_IRUGO, &rose_neigh_fops);
1529 proc_net_fops_create("rose_nodes", S_IRUGO, &rose_nodes_fops);
1530 proc_net_fops_create("rose_routes", S_IRUGO, &rose_routes_fops);
1531out:
1532 return rc;
1533fail:
1534 while (--i >= 0) {
1535 unregister_netdev(dev_rose[i]);
1536 free_netdev(dev_rose[i]);
1537 }
1538 kfree(dev_rose);
1539 proto_unregister(&rose_proto);
1540 return -ENOMEM;
1541}
1542module_init(rose_proto_init);
1543
1544module_param(rose_ndevs, int, 0);
1545MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1546
1547MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1548MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1549MODULE_LICENSE("GPL");
1550MODULE_ALIAS_NETPROTO(PF_ROSE);
1551
1552static void __exit rose_exit(void)
1553{
1554 int i;
1555
1556 proc_net_remove("rose");
1557 proc_net_remove("rose_neigh");
1558 proc_net_remove("rose_nodes");
1559 proc_net_remove("rose_routes");
1560 rose_loopback_clear();
1561
1562 rose_rt_free();
1563
1564 ax25_protocol_release(AX25_P_ROSE);
1565 ax25_linkfail_release(rose_link_failed);
1566
1567 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1568 ax25_listen_release(&rose_callsign, NULL);
1569
1570#ifdef CONFIG_SYSCTL
1571 rose_unregister_sysctl();
1572#endif
1573 unregister_netdevice_notifier(&rose_dev_notifier);
1574
1575 sock_unregister(PF_ROSE);
1576
1577 for (i = 0; i < rose_ndevs; i++) {
1578 struct net_device *dev = dev_rose[i];
1579
1580 if (dev) {
1581 unregister_netdev(dev);
1582 free_netdev(dev);
1583 }
1584 }
1585
1586 kfree(dev_rose);
1587 proto_unregister(&rose_proto);
1588}
1589
1590module_exit(rose_exit);