blob: 9fc035354a76c5983aeaca19d2d3465d002eade5 [file] [log] [blame]
Inaky Perez-Gonzaleza21b9632008-09-17 16:34:21 +01001/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
26 *
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
34 *
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
45 *
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
48 *
49 * ROADMAP:
50 *
51 * ENTRY POINTS:
52 *
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
54 *
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
56 *
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
60 *
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
64 *
65 */
66
67#include <linux/netdevice.h>
68#include <linux/etherdevice.h>
69#include "i1480u-wlp.h"
70
71#define D_LOCAL 0
72#include <linux/uwb/debug.h>
73
74
75/**
76 * Setup the RX context
77 *
78 * Each URB is provided with a transfer_buffer that is the data field
79 * of a new socket buffer.
80 */
81int i1480u_rx_setup(struct i1480u *i1480u)
82{
83 int result, cnt;
84 struct device *dev = &i1480u->usb_iface->dev;
85 struct net_device *net_dev = i1480u->net_dev;
86 struct usb_endpoint_descriptor *epd;
87 struct sk_buff *skb;
88
89 /* Alloc RX stuff */
90 i1480u->rx_skb = NULL; /* not in process of receiving packet */
91 result = -ENOMEM;
92 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
93 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
94 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
95 rx_buf->i1480u = i1480u;
96 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
97 if (!skb) {
98 dev_err(dev,
99 "RX: cannot allocate RX buffer %d\n", cnt);
100 result = -ENOMEM;
101 goto error;
102 }
103 skb->dev = net_dev;
104 skb->ip_summed = CHECKSUM_NONE;
105 skb_reserve(skb, 2);
106 rx_buf->data = skb;
107 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
108 if (unlikely(rx_buf->urb == NULL)) {
109 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
110 result = -ENOMEM;
111 goto error;
112 }
113 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
114 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
115 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
116 i1480u_rx_cb, rx_buf);
117 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
118 if (unlikely(result < 0)) {
119 dev_err(dev, "RX: cannot submit URB %d: %d\n",
120 cnt, result);
121 goto error;
122 }
123 }
124 return 0;
125
126error:
127 i1480u_rx_release(i1480u);
128 return result;
129}
130
131
132/** Release resources associated to the rx context */
133void i1480u_rx_release(struct i1480u *i1480u)
134{
135 int cnt;
136 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
137 if (i1480u->rx_buf[cnt].data)
138 dev_kfree_skb(i1480u->rx_buf[cnt].data);
139 if (i1480u->rx_buf[cnt].urb) {
140 usb_kill_urb(i1480u->rx_buf[cnt].urb);
141 usb_free_urb(i1480u->rx_buf[cnt].urb);
142 }
143 }
144 if (i1480u->rx_skb != NULL)
145 dev_kfree_skb(i1480u->rx_skb);
146}
147
148static
149void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
150{
151 int cnt;
152 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
153 if (i1480u->rx_buf[cnt].urb)
154 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
155 }
156}
157
158/** Fix an out-of-sequence packet */
159#define i1480u_fix(i1480u, msg...) \
160do { \
161 if (printk_ratelimit()) \
162 dev_err(&i1480u->usb_iface->dev, msg); \
163 dev_kfree_skb_irq(i1480u->rx_skb); \
164 i1480u->rx_skb = NULL; \
165 i1480u->rx_untd_pkt_size = 0; \
166} while (0)
167
168
169/** Drop an out-of-sequence packet */
170#define i1480u_drop(i1480u, msg...) \
171do { \
172 if (printk_ratelimit()) \
173 dev_err(&i1480u->usb_iface->dev, msg); \
174 i1480u->stats.rx_dropped++; \
175} while (0)
176
177
178
179
180/** Finalizes setting up the SKB and delivers it
181 *
182 * We first pass the incoming frame to WLP substack for verification. It
183 * may also be a WLP association frame in which case WLP will take over the
184 * processing. If WLP does not take it over it will still verify it, if the
185 * frame is invalid the skb will be freed by WLP and we will not continue
186 * parsing.
187 * */
188static
189void i1480u_skb_deliver(struct i1480u *i1480u)
190{
191 int should_parse;
192 struct net_device *net_dev = i1480u->net_dev;
193 struct device *dev = &i1480u->usb_iface->dev;
194
195 d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n",
196 i1480u->rx_skb, i1480u->rx_skb->len);
197 d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len);
198 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
199 &i1480u->rx_srcaddr);
200 if (!should_parse)
201 goto out;
202 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
203 d_printf(5, dev, "RX delivered skb(%p), %u bytes\n",
204 i1480u->rx_skb, i1480u->rx_skb->len);
205 d_dump(7, dev, i1480u->rx_skb->data,
206 i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len);
207 i1480u->stats.rx_packets++;
208 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
209 net_dev->last_rx = jiffies;
210 /* FIXME: flow control: check netif_rx() retval */
211
212 netif_rx(i1480u->rx_skb); /* deliver */
213out:
214 i1480u->rx_skb = NULL;
215 i1480u->rx_untd_pkt_size = 0;
216}
217
218
219/**
220 * Process a buffer of data received from the USB RX endpoint
221 *
222 * First fragment arrives with next or last fragment. All other fragments
223 * arrive alone.
224 *
225 * /me hates long functions.
226 */
227static
228void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
229{
230 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
231 size_t untd_hdr_size, untd_frg_size;
232 size_t i1480u_hdr_size;
233 struct wlp_rx_hdr *i1480u_hdr = NULL;
234
235 struct i1480u *i1480u = rx_buf->i1480u;
236 struct sk_buff *skb = rx_buf->data;
237 int size_left = rx_buf->urb->actual_length;
238 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
239 struct untd_hdr *untd_hdr;
240
241 struct net_device *net_dev = i1480u->net_dev;
242 struct device *dev = &i1480u->usb_iface->dev;
243 struct sk_buff *new_skb;
244
245#if 0
246 dev_fnstart(dev,
247 "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
248 dev_err(dev, "RX packet, %zu bytes\n", size_left);
249 dump_bytes(dev, ptr, size_left);
250#endif
251 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
252
253 while (size_left > 0) {
254 if (pkt_completed) {
255 i1480u_drop(i1480u, "RX: fragment follows completed"
256 "packet in same buffer. Dropping\n");
257 break;
258 }
259 untd_hdr = ptr;
260 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
261 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
262 goto out;
263 }
264 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
265 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
266 goto out;
267 }
268 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
269 case i1480u_PKT_FRAG_1ST: {
270 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
271 dev_dbg(dev, "1st fragment\n");
272 untd_hdr_size = sizeof(struct untd_hdr_1st);
273 if (i1480u->rx_skb != NULL)
274 i1480u_fix(i1480u, "RX: 1st fragment out of "
275 "sequence! Fixing\n");
276 if (size_left < untd_hdr_size + i1480u_hdr_size) {
277 i1480u_drop(i1480u, "RX: short 1st fragment! "
278 "Dropping\n");
279 goto out;
280 }
281 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
282 - i1480u_hdr_size;
283 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
284 if (size_left < untd_hdr_size + untd_frg_size) {
285 i1480u_drop(i1480u,
286 "RX: short payload! Dropping\n");
287 goto out;
288 }
289 i1480u->rx_skb = skb;
290 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
291 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
292 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
293 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
294 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
295 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
296 rx_buf->data = NULL; /* need to create new buffer */
297 break;
298 }
299 case i1480u_PKT_FRAG_NXT: {
300 dev_dbg(dev, "nxt fragment\n");
301 untd_hdr_size = sizeof(struct untd_hdr_rst);
302 if (i1480u->rx_skb == NULL) {
303 i1480u_drop(i1480u, "RX: next fragment out of "
304 "sequence! Dropping\n");
305 goto out;
306 }
307 if (size_left < untd_hdr_size) {
308 i1480u_drop(i1480u, "RX: short NXT fragment! "
309 "Dropping\n");
310 goto out;
311 }
312 untd_frg_size = le16_to_cpu(untd_hdr->len);
313 if (size_left < untd_hdr_size + untd_frg_size) {
314 i1480u_drop(i1480u,
315 "RX: short payload! Dropping\n");
316 goto out;
317 }
318 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
319 ptr + untd_hdr_size, untd_frg_size);
320 break;
321 }
322 case i1480u_PKT_FRAG_LST: {
323 dev_dbg(dev, "Lst fragment\n");
324 untd_hdr_size = sizeof(struct untd_hdr_rst);
325 if (i1480u->rx_skb == NULL) {
326 i1480u_drop(i1480u, "RX: last fragment out of "
327 "sequence! Dropping\n");
328 goto out;
329 }
330 if (size_left < untd_hdr_size) {
331 i1480u_drop(i1480u, "RX: short LST fragment! "
332 "Dropping\n");
333 goto out;
334 }
335 untd_frg_size = le16_to_cpu(untd_hdr->len);
336 if (size_left < untd_frg_size + untd_hdr_size) {
337 i1480u_drop(i1480u,
338 "RX: short payload! Dropping\n");
339 goto out;
340 }
341 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
342 ptr + untd_hdr_size, untd_frg_size);
343 pkt_completed = 1;
344 break;
345 }
346 case i1480u_PKT_FRAG_CMP: {
347 dev_dbg(dev, "cmp fragment\n");
348 untd_hdr_size = sizeof(struct untd_hdr_cmp);
349 if (i1480u->rx_skb != NULL)
350 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
351 " fragment!\n");
352 if (size_left < untd_hdr_size + i1480u_hdr_size) {
353 i1480u_drop(i1480u, "RX: short CMP fragment! "
354 "Dropping\n");
355 goto out;
356 }
357 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
358 untd_frg_size = i1480u->rx_untd_pkt_size;
359 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
360 i1480u_drop(i1480u,
361 "RX: short payload! Dropping\n");
362 goto out;
363 }
364 i1480u->rx_skb = skb;
365 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
366 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
367 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
368 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
369 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
370 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
371 rx_buf->data = NULL; /* for hand off skb to network stack */
372 pkt_completed = 1;
373 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
374 break;
375 }
376 default:
377 i1480u_drop(i1480u, "RX: unknown packet type %u! "
378 "Dropping\n", untd_hdr_type(untd_hdr));
379 goto out;
380 }
381 size_left -= untd_hdr_size + untd_frg_size;
382 if (size_left > 0)
383 ptr += untd_hdr_size + untd_frg_size;
384 }
385 if (pkt_completed)
386 i1480u_skb_deliver(i1480u);
387out:
388 /* recreate needed RX buffers*/
389 if (rx_buf->data == NULL) {
390 /* buffer is being used to receive packet, create new */
391 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
392 if (!new_skb) {
393 if (printk_ratelimit())
394 dev_err(dev,
395 "RX: cannot allocate RX buffer\n");
396 } else {
397 new_skb->dev = net_dev;
398 new_skb->ip_summed = CHECKSUM_NONE;
399 skb_reserve(new_skb, 2);
400 rx_buf->data = new_skb;
401 }
402 }
403 return;
404}
405
406
407/**
408 * Called when an RX URB has finished receiving or has found some kind
409 * of error condition.
410 *
411 * LIMITATIONS:
412 *
413 * - We read USB-transfers, each transfer contains a SINGLE fragment
414 * (can contain a complete packet, or a 1st, next, or last fragment
415 * of a packet).
416 * Looks like a transfer can contain more than one fragment (07/18/06)
417 *
418 * - Each transfer buffer is the size of the maximum packet size (minus
419 * headroom), i1480u_MAX_PKT_SIZE - 2
420 *
421 * - We always read the full USB-transfer, no partials.
422 *
423 * - Each transfer is read directly into a skb. This skb will be used to
424 * send data to the upper layers if it is the first fragment or a complete
425 * packet. In the other cases the data will be copied from the skb to
426 * another skb that is being prepared for the upper layers from a prev
427 * first fragment.
428 *
429 * It is simply too much of a pain. Gosh, there should be a unified
430 * SG infrastructure for *everything* [so that I could declare a SG
431 * buffer, pass it to USB for receiving, append some space to it if
432 * I wish, receive more until I have the whole chunk, adapt
433 * pointers on each fragment to remove hardware headers and then
434 * attach that to an skbuff and netif_rx()].
435 */
436void i1480u_rx_cb(struct urb *urb)
437{
438 int result;
439 int do_parse_buffer = 1;
440 struct i1480u_rx_buf *rx_buf = urb->context;
441 struct i1480u *i1480u = rx_buf->i1480u;
442 struct device *dev = &i1480u->usb_iface->dev;
443 unsigned long flags;
444 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
445
446 switch (urb->status) {
447 case 0:
448 break;
449 case -ECONNRESET: /* Not an error, but a controlled situation; */
450 case -ENOENT: /* (we killed the URB)...so, no broadcast */
451 case -ESHUTDOWN: /* going away! */
452 dev_err(dev, "RX URB[%u]: goind down %d\n",
453 rx_buf_idx, urb->status);
454 goto error;
455 default:
456 dev_err(dev, "RX URB[%u]: unknown status %d\n",
457 rx_buf_idx, urb->status);
458 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
459 EDC_ERROR_TIMEFRAME)) {
460 dev_err(dev, "RX: max acceptable errors exceeded,"
461 " resetting device.\n");
462 i1480u_rx_unlink_urbs(i1480u);
463 wlp_reset_all(&i1480u->wlp);
464 goto error;
465 }
466 do_parse_buffer = 0;
467 break;
468 }
469 spin_lock_irqsave(&i1480u->lock, flags);
470 /* chew the data fragments, extract network packets */
471 if (do_parse_buffer) {
472 i1480u_rx_buffer(rx_buf);
473 if (rx_buf->data) {
474 rx_buf->urb->transfer_buffer = rx_buf->data->data;
475 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
476 if (result < 0) {
477 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
478 rx_buf_idx, result);
479 }
480 }
481 }
482 spin_unlock_irqrestore(&i1480u->lock, flags);
483error:
484 return;
485}
486