blob: be2acab9be9d8473fcf00beb861981979f381048 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/errno.h>
12#include <linux/types.h>
13#include <linux/socket.h>
14#include <linux/in.h>
15#include <linux/kernel.h>
Ralf Baechle70868ea2006-05-03 23:25:17 -070016#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/timer.h>
18#include <linux/string.h>
19#include <linux/sockios.h>
20#include <linux/spinlock.h>
21#include <linux/net.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <net/ax25.h>
24#include <linux/inet.h>
25#include <linux/netdevice.h>
26#include <linux/skbuff.h>
27#include <linux/netfilter.h>
28#include <net/sock.h>
29#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/fcntl.h>
31#include <linux/mm.h>
32#include <linux/interrupt.h>
33
34static DEFINE_SPINLOCK(ax25_frag_lock);
35
36ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
37{
38 ax25_dev *ax25_dev;
39 ax25_cb *ax25;
40
41 /*
42 * Take the default packet length for the device if zero is
43 * specified.
44 */
45 if (paclen == 0) {
46 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
47 return NULL;
48
49 paclen = ax25_dev->values[AX25_VALUES_PACLEN];
50 }
51
52 /*
53 * Look for an existing connection.
54 */
55 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
56 ax25_output(ax25, paclen, skb);
57 return ax25; /* It already existed */
58 }
59
60 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
61 return NULL;
62
63 if ((ax25 = ax25_create_cb()) == NULL)
64 return NULL;
65
66 ax25_fillin_cb(ax25, ax25_dev);
67
68 ax25->source_addr = *src;
69 ax25->dest_addr = *dest;
70
71 if (digi != NULL) {
Arnaldo Carvalho de Melo0459d70a2006-11-17 12:43:07 -020072 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
73 if (ax25->digipeat == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 ax25_cb_put(ax25);
75 return NULL;
76 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 }
78
79 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
80 case AX25_PROTO_STD_SIMPLEX:
81 case AX25_PROTO_STD_DUPLEX:
82 ax25_std_establish_data_link(ax25);
83 break;
84
85#ifdef CONFIG_AX25_DAMA_SLAVE
86 case AX25_PROTO_DAMA_SLAVE:
87 if (ax25_dev->dama.slave)
88 ax25_ds_establish_data_link(ax25);
89 else
90 ax25_std_establish_data_link(ax25);
91 break;
92#endif
93 }
94
Jarek Poplawskid00c3622010-01-16 01:04:04 -080095 /*
96 * There is one ref for the state machine; a caller needs
97 * one more to put it back, just like with the existing one.
98 */
99 ax25_cb_hold(ax25);
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 ax25_cb_add(ax25);
102
103 ax25->state = AX25_STATE_1;
104
105 ax25_start_heartbeat(ax25);
106
107 ax25_output(ax25, paclen, skb);
108
109 return ax25; /* We had to create it */
110}
111
Ralf Baechle70868ea2006-05-03 23:25:17 -0700112EXPORT_SYMBOL(ax25_send_frame);
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114/*
115 * All outgoing AX.25 I frames pass via this routine. Therefore this is
116 * where the fragmentation of frames takes place. If fragment is set to
117 * zero then we are not allowed to do fragmentation, even if the frame
118 * is too large.
119 */
120void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
121{
122 struct sk_buff *skbn;
123 unsigned char *p;
124 int frontlen, len, fragno, ka9qfrag, first = 1;
125
Jarek Poplawskif47b7252008-02-17 22:31:19 -0800126 if (paclen < 16) {
127 WARN_ON_ONCE(1);
128 kfree_skb(skb);
129 return;
130 }
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 if ((skb->len - 1) > paclen) {
133 if (*skb->data == AX25_P_TEXT) {
134 skb_pull(skb, 1); /* skip PID */
135 ka9qfrag = 0;
136 } else {
137 paclen -= 2; /* Allow for fragment control info */
138 ka9qfrag = 1;
139 }
140
141 fragno = skb->len / paclen;
142 if (skb->len % paclen == 0) fragno--;
143
144 frontlen = skb_headroom(skb); /* Address space + CTRL */
145
146 while (skb->len > 0) {
147 spin_lock_bh(&ax25_frag_lock);
148 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
149 spin_unlock_bh(&ax25_frag_lock);
150 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
151 return;
152 }
153
154 if (skb->sk != NULL)
155 skb_set_owner_w(skbn, skb->sk);
156
157 spin_unlock_bh(&ax25_frag_lock);
158
159 len = (paclen > skb->len) ? skb->len : paclen;
160
161 if (ka9qfrag == 1) {
162 skb_reserve(skbn, frontlen + 2);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300163 skb_set_network_header(skbn,
164 skb_network_offset(skb));
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300165 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 p = skb_push(skbn, 2);
167
168 *p++ = AX25_P_SEGMENT;
169
170 *p = fragno--;
171 if (first) {
172 *p |= AX25_SEG_FIRST;
173 first = 0;
174 }
175 } else {
176 skb_reserve(skbn, frontlen + 1);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300177 skb_set_network_header(skbn,
178 skb_network_offset(skb));
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300179 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 p = skb_push(skbn, 1);
181 *p = AX25_P_TEXT;
182 }
183
184 skb_pull(skb, len);
185 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
186 }
187
188 kfree_skb(skb);
189 } else {
190 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
191 }
192
193 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
194 case AX25_PROTO_STD_SIMPLEX:
195 case AX25_PROTO_STD_DUPLEX:
196 ax25_kick(ax25);
197 break;
198
199#ifdef CONFIG_AX25_DAMA_SLAVE
200 /*
201 * A DAMA slave is _required_ to work as normal AX.25L2V2
202 * if no DAMA master is available.
203 */
204 case AX25_PROTO_DAMA_SLAVE:
205 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
206 break;
207#endif
208 }
209}
210
211/*
212 * This procedure is passed a buffer descriptor for an iframe. It builds
213 * the rest of the control part of the frame and then writes it out.
214 */
215static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
216{
217 unsigned char *frame;
218
219 if (skb == NULL)
220 return;
221
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700222 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 if (ax25->modulus == AX25_MODULUS) {
225 frame = skb_push(skb, 1);
226
227 *frame = AX25_I;
228 *frame |= (poll_bit) ? AX25_PF : 0;
229 *frame |= (ax25->vr << 5);
230 *frame |= (ax25->vs << 1);
231 } else {
232 frame = skb_push(skb, 2);
233
234 frame[0] = AX25_I;
235 frame[0] |= (ax25->vs << 1);
236 frame[1] = (poll_bit) ? AX25_EPF : 0;
237 frame[1] |= (ax25->vr << 1);
238 }
239
240 ax25_start_idletimer(ax25);
241
242 ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
243}
244
245void ax25_kick(ax25_cb *ax25)
246{
247 struct sk_buff *skb, *skbn;
248 int last = 1;
249 unsigned short start, end, next;
250
251 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
252 return;
253
254 if (ax25->condition & AX25_COND_PEER_RX_BUSY)
255 return;
256
257 if (skb_peek(&ax25->write_queue) == NULL)
258 return;
259
260 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
261 end = (ax25->va + ax25->window) % ax25->modulus;
262
263 if (start == end)
264 return;
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 /*
267 * Transmit data until either we're out of data to send or
268 * the window is full. Send a poll on the final I frame if
269 * the window is filled.
270 */
271
272 /*
273 * Dequeue the frame and copy it.
Jarek Poplawskif47b7252008-02-17 22:31:19 -0800274 * Check for race with ax25_clear_queues().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 */
276 skb = skb_dequeue(&ax25->write_queue);
Jarek Poplawskif47b7252008-02-17 22:31:19 -0800277 if (!skb)
278 return;
279
280 ax25->vs = start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282 do {
283 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
284 skb_queue_head(&ax25->write_queue, skb);
285 break;
286 }
287
288 if (skb->sk != NULL)
289 skb_set_owner_w(skbn, skb->sk);
290
291 next = (ax25->vs + 1) % ax25->modulus;
292 last = (next == end);
293
294 /*
295 * Transmit the frame copy.
296 * bke 960114: do not set the Poll bit on the last frame
297 * in DAMA mode.
298 */
299 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
300 case AX25_PROTO_STD_SIMPLEX:
301 case AX25_PROTO_STD_DUPLEX:
302 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
303 break;
304
305#ifdef CONFIG_AX25_DAMA_SLAVE
306 case AX25_PROTO_DAMA_SLAVE:
307 ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
308 break;
309#endif
310 }
311
312 ax25->vs = next;
313
314 /*
315 * Requeue the original data frame.
316 */
317 skb_queue_tail(&ax25->ack_queue, skb);
318
319 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
320
321 ax25->condition &= ~AX25_COND_ACK_PENDING;
322
323 if (!ax25_t1timer_running(ax25)) {
324 ax25_stop_t3timer(ax25);
325 ax25_calculate_t1(ax25);
326 ax25_start_t1timer(ax25);
327 }
328}
329
330void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
331{
332 struct sk_buff *skbn;
333 unsigned char *ptr;
334 int headroom;
335
336 if (ax25->ax25_dev == NULL) {
337 ax25_disconnect(ax25, ENETUNREACH);
338 return;
339 }
340
341 headroom = ax25_addr_size(ax25->digipeat);
342
343 if (skb_headroom(skb) < headroom) {
344 if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
345 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
346 kfree_skb(skb);
347 return;
348 }
349
350 if (skb->sk != NULL)
351 skb_set_owner_w(skbn, skb->sk);
352
Eric Dumazet5d0ba552012-06-04 01:17:19 +0000353 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 skb = skbn;
355 }
356
357 ptr = skb_push(skb, headroom);
358
359 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
360
Arnaldo Carvalho de Melo29c4be52005-04-21 16:46:56 -0700361 ax25_queue_xmit(skb, ax25->ax25_dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363
364/*
365 * A small shim to dev_queue_xmit to add the KISS control byte, and do
366 * any packet forwarding in operation.
367 */
Arnaldo Carvalho de Melo29c4be52005-04-21 16:46:56 -0700368void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
370 unsigned char *ptr;
371
Arnaldo Carvalho de Melo56cb5152005-04-24 18:53:06 -0700372 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 ptr = skb_push(skb, 1);
375 *ptr = 0x00; /* KISS */
376
377 dev_queue_xmit(skb);
378}
379
380int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
381{
382 if (ax25->vs == nr) {
383 ax25_frames_acked(ax25, nr);
384 ax25_calculate_rtt(ax25);
385 ax25_stop_t1timer(ax25);
386 ax25_start_t3timer(ax25);
387 return 1;
388 } else {
389 if (ax25->va != nr) {
390 ax25_frames_acked(ax25, nr);
391 ax25_calculate_t1(ax25);
392 ax25_start_t1timer(ax25);
393 return 1;
394 }
395 }
396 return 0;
397}
398