blob: c82fcc264161d64a643251519859f3c6b4c227c6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * X.25 Packet Layer release 002
3 *
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
YOSHIFUJI Hideakif8e1d2012007-02-09 23:25:27 +09006 * screw up. It might even work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This code REQUIRES 2.1.15 or higher
9 *
10 * This module:
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * History
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor New timer architecture.
YOSHIFUJI Hideakif8e1d2012007-02-09 23:25:27 +090019 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 * negotiation.
21 * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
22 */
23
24#include <linux/kernel.h>
25#include <linux/jiffies.h>
26#include <linux/timer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/netdevice.h>
29#include <linux/skbuff.h>
30#include <asm/uaccess.h>
31#include <linux/init.h>
32#include <net/x25.h>
33
Sachin Sharma20d658e2014-02-18 00:16:23 -080034#ifdef KW_TAINT_ANALYSIS
35 extern void * get_tainted_stuff();
36#endif
andrew hendry5595a1a2010-11-25 02:18:15 +000037LIST_HEAD(x25_neigh_list);
38DEFINE_RWLOCK(x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40static void x25_t20timer_expiry(unsigned long);
41
42static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
43static void x25_transmit_restart_request(struct x25_neigh *nb);
44
45/*
46 * Linux set/reset timer routines
47 */
48static inline void x25_start_t20timer(struct x25_neigh *nb)
49{
50 mod_timer(&nb->t20timer, jiffies + nb->t20);
51}
52
53static void x25_t20timer_expiry(unsigned long param)
54{
55 struct x25_neigh *nb = (struct x25_neigh *)param;
56
57 x25_transmit_restart_request(nb);
58
59 x25_start_t20timer(nb);
60}
61
62static inline void x25_stop_t20timer(struct x25_neigh *nb)
63{
64 del_timer(&nb->t20timer);
65}
66
67static inline int x25_t20timer_pending(struct x25_neigh *nb)
68{
69 return timer_pending(&nb->t20timer);
70}
71
72/*
73 * This handles all restart and diagnostic frames.
74 */
75void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
76 unsigned short frametype)
77{
78 struct sk_buff *skbn;
79 int confirm;
80
81 switch (frametype) {
Joe Perchesfddc5f32011-07-01 09:43:13 +000082 case X25_RESTART_REQUEST:
83 confirm = !x25_t20timer_pending(nb);
84 x25_stop_t20timer(nb);
85 nb->state = X25_LINK_STATE_3;
86 if (confirm)
87 x25_transmit_restart_confirmation(nb);
88 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Joe Perchesfddc5f32011-07-01 09:43:13 +000090 case X25_RESTART_CONFIRMATION:
91 x25_stop_t20timer(nb);
92 nb->state = X25_LINK_STATE_3;
93 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Joe Perchesfddc5f32011-07-01 09:43:13 +000095 case X25_DIAGNOSTIC:
Matthew Daleycb101ed2011-10-14 18:45:04 +000096 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
97 break;
98
Joe Perchesfddc5f32011-07-01 09:43:13 +000099 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
100 skb->data[3], skb->data[4],
101 skb->data[5], skb->data[6]);
102 break;
YOSHIFUJI Hideakif8e1d2012007-02-09 23:25:27 +0900103
Joe Perchesfddc5f32011-07-01 09:43:13 +0000104 default:
105 printk(KERN_WARNING "x25: received unknown %02X with LCI 000\n",
106 frametype);
107 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109
110 if (nb->state == X25_LINK_STATE_3)
111 while ((skbn = skb_dequeue(&nb->queue)) != NULL)
112 x25_send_frame(skbn, nb);
113}
114
115/*
116 * This routine is called when a Restart Request is needed
117 */
118static void x25_transmit_restart_request(struct x25_neigh *nb)
119{
120 unsigned char *dptr;
121 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
122 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
123
124 if (!skb)
125 return;
126
127 skb_reserve(skb, X25_MAX_L2_LEN);
128
129 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
130
131 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
132 *dptr++ = 0x00;
133 *dptr++ = X25_RESTART_REQUEST;
134 *dptr++ = 0x00;
135 *dptr++ = 0;
136
137 skb->sk = NULL;
138
139 x25_send_frame(skb, nb);
140}
141
142/*
143 * This routine is called when a Restart Confirmation is needed
144 */
145static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
146{
147 unsigned char *dptr;
148 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
149 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
150
151 if (!skb)
152 return;
153
154 skb_reserve(skb, X25_MAX_L2_LEN);
155
156 dptr = skb_put(skb, X25_STD_MIN_LEN);
157
158 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
159 *dptr++ = 0x00;
160 *dptr++ = X25_RESTART_CONFIRMATION;
161
162 skb->sk = NULL;
163
164 x25_send_frame(skb, nb);
165}
166
167/*
168 * This routine is called when a Clear Request is needed outside of the context
169 * of a connected socket.
170 */
171void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
172 unsigned char cause)
173{
174 unsigned char *dptr;
175 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
176 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
177
178 if (!skb)
179 return;
180
181 skb_reserve(skb, X25_MAX_L2_LEN);
182
183 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
184
185 *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
186 X25_GFI_EXTSEQ :
187 X25_GFI_STDSEQ);
188 *dptr++ = (lci >> 0) & 0xFF;
189 *dptr++ = X25_CLEAR_REQUEST;
190 *dptr++ = cause;
191 *dptr++ = 0x00;
192
193 skb->sk = NULL;
194
195 x25_send_frame(skb, nb);
196}
197
198void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
199{
200 switch (nb->state) {
Joe Perchesfddc5f32011-07-01 09:43:13 +0000201 case X25_LINK_STATE_0:
202 skb_queue_tail(&nb->queue, skb);
203 nb->state = X25_LINK_STATE_1;
204 x25_establish_link(nb);
205 break;
206 case X25_LINK_STATE_1:
207 case X25_LINK_STATE_2:
208 skb_queue_tail(&nb->queue, skb);
209 break;
210 case X25_LINK_STATE_3:
211 x25_send_frame(skb, nb);
212 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214}
215
216/*
217 * Called when the link layer has become established.
218 */
219void x25_link_established(struct x25_neigh *nb)
220{
221 switch (nb->state) {
Joe Perchesfddc5f32011-07-01 09:43:13 +0000222 case X25_LINK_STATE_0:
223 nb->state = X25_LINK_STATE_2;
224 break;
225 case X25_LINK_STATE_1:
226 x25_transmit_restart_request(nb);
227 nb->state = X25_LINK_STATE_2;
228 x25_start_t20timer(nb);
229 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231}
232
233/*
234 * Called when the link layer has terminated, or an establishment
235 * request has failed.
236 */
237
238void x25_link_terminated(struct x25_neigh *nb)
239{
240 nb->state = X25_LINK_STATE_0;
241 /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
242 x25_kill_by_neigh(nb);
243}
244
245/*
246 * Add a new device.
247 */
248void x25_link_device_up(struct net_device *dev)
249{
250 struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
251
252 if (!nb)
253 return;
254
255 skb_queue_head_init(&nb->queue);
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -0800256 setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258 dev_hold(dev);
259 nb->dev = dev;
260 nb->state = X25_LINK_STATE_0;
261 nb->extended = 0;
262 /*
263 * Enables negotiation
264 */
265 nb->global_facil_mask = X25_MASK_REVERSE |
266 X25_MASK_THROUGHPUT |
267 X25_MASK_PACKET_SIZE |
268 X25_MASK_WINDOW_SIZE;
269 nb->t20 = sysctl_x25_restart_request_timeout;
270 atomic_set(&nb->refcnt, 1);
271
272 write_lock_bh(&x25_neigh_list_lock);
273 list_add(&nb->node, &x25_neigh_list);
274 write_unlock_bh(&x25_neigh_list_lock);
275}
276
277/**
278 * __x25_remove_neigh - remove neighbour from x25_neigh_list
279 * @nb - neigh to remove
280 *
281 * Remove neighbour from x25_neigh_list. If it was there.
282 * Caller must hold x25_neigh_list_lock.
283 */
284static void __x25_remove_neigh(struct x25_neigh *nb)
285{
286 skb_queue_purge(&nb->queue);
287 x25_stop_t20timer(nb);
288
289 if (nb->node.next) {
290 list_del(&nb->node);
291 x25_neigh_put(nb);
292 }
293}
294
295/*
296 * A device has been removed, remove its links.
297 */
298void x25_link_device_down(struct net_device *dev)
299{
300 struct x25_neigh *nb;
301 struct list_head *entry, *tmp;
302
303 write_lock_bh(&x25_neigh_list_lock);
304
305 list_for_each_safe(entry, tmp, &x25_neigh_list) {
306 nb = list_entry(entry, struct x25_neigh, node);
307
308 if (nb->dev == dev) {
309 __x25_remove_neigh(nb);
310 dev_put(dev);
311 }
312 }
313
314 write_unlock_bh(&x25_neigh_list_lock);
315}
316
317/*
318 * Given a device, return the neighbour address.
319 */
320struct x25_neigh *x25_get_neigh(struct net_device *dev)
321{
322 struct x25_neigh *nb, *use = NULL;
323 struct list_head *entry;
324
325 read_lock_bh(&x25_neigh_list_lock);
326 list_for_each(entry, &x25_neigh_list) {
327 nb = list_entry(entry, struct x25_neigh, node);
328
329 if (nb->dev == dev) {
330 use = nb;
331 break;
332 }
333 }
334
335 if (use)
336 x25_neigh_hold(use);
337 read_unlock_bh(&x25_neigh_list_lock);
338 return use;
339}
340
341/*
342 * Handle the ioctls that control the subscription functions.
343 */
Sachin Sharma20d658e2014-02-18 00:16:23 -0800344int x25_subscr_ioctl(unsigned int cmd, void __user *arg_actual)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 struct x25_subscrip_struct x25_subscr;
347 struct x25_neigh *nb;
348 struct net_device *dev;
349 int rc = -EINVAL;
Sachin Sharma20d658e2014-02-18 00:16:23 -0800350 #ifdef KW_TAINT_ANALYSIS
351 void __user *arg = (void __user *)get_tainted_stuff();
352 #else
353 void __user *arg = arg_actual;
354 #endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
356 goto out;
357
358 rc = -EFAULT;
359 if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
360 goto out;
361
362 rc = -EINVAL;
363 if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
364 goto out;
365
366 if ((nb = x25_get_neigh(dev)) == NULL)
367 goto out_dev_put;
368
369 dev_put(dev);
370
371 if (cmd == SIOCX25GSUBSCRIP) {
andrew hendry5595a1a2010-11-25 02:18:15 +0000372 read_lock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 x25_subscr.extended = nb->extended;
374 x25_subscr.global_facil_mask = nb->global_facil_mask;
andrew hendry5595a1a2010-11-25 02:18:15 +0000375 read_unlock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 rc = copy_to_user(arg, &x25_subscr,
377 sizeof(x25_subscr)) ? -EFAULT : 0;
378 } else {
379 rc = -EINVAL;
380 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
381 rc = 0;
andrew hendry5595a1a2010-11-25 02:18:15 +0000382 write_lock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 nb->extended = x25_subscr.extended;
384 nb->global_facil_mask = x25_subscr.global_facil_mask;
andrew hendry5595a1a2010-11-25 02:18:15 +0000385 write_unlock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
387 }
388 x25_neigh_put(nb);
389out:
390 return rc;
391out_dev_put:
392 dev_put(dev);
393 goto out;
394}
395
396
397/*
398 * Release all memory associated with X.25 neighbour structures.
399 */
400void __exit x25_link_free(void)
401{
402 struct x25_neigh *nb;
403 struct list_head *entry, *tmp;
404
405 write_lock_bh(&x25_neigh_list_lock);
406
407 list_for_each_safe(entry, tmp, &x25_neigh_list) {
David S. Miller96642d42011-02-09 21:48:36 -0800408 struct net_device *dev;
409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 nb = list_entry(entry, struct x25_neigh, node);
David S. Miller96642d42011-02-09 21:48:36 -0800411 dev = nb->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 __x25_remove_neigh(nb);
David S. Miller96642d42011-02-09 21:48:36 -0800413 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
415 write_unlock_bh(&x25_neigh_list_lock);
416}