blob: e097da30a26b7bb348c93eeb56a36bbacfb65b1c [file] [log] [blame]
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030064 * (a) set up a timer every time an rpipe's use count drops to 1
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +010065 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
Gilles Espinassef77f13e2010-03-29 15:41:47 +020079 * availability of the different required components (blocks,
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +010080 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/init.h>
83#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090084#include <linux/slab.h>
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +010085#include <linux/hash.h>
Manuel Zerpies9708cd22011-06-16 14:15:16 +020086#include <linux/ratelimit.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040087#include <linux/export.h>
Thomas Pugliese2b81c082013-06-11 10:39:31 -050088#include <linux/scatterlist.h>
David Vrabelbce83692008-12-22 18:22:50 +000089
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +010090#include "wa-hc.h"
91#include "wusbhc.h"
92
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +010093enum {
94 WA_SEGS_MAX = 255,
95};
96
97enum wa_seg_status {
98 WA_SEG_NOTREADY,
99 WA_SEG_READY,
100 WA_SEG_DELAYED,
101 WA_SEG_SUBMITTED,
102 WA_SEG_PENDING,
103 WA_SEG_DTI_PENDING,
104 WA_SEG_DONE,
105 WA_SEG_ERROR,
106 WA_SEG_ABORTED,
107};
108
109static void wa_xfer_delayed_run(struct wa_rpipe *);
110
111/*
112 * Life cycle governed by 'struct urb' (the refcount of the struct is
113 * that of the 'struct urb' and usb_free_urb() would free the whole
114 * struct).
115 */
116struct wa_seg {
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500117 struct urb tr_urb; /* transfer request urb. */
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500118 struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500119 struct urb *dto_urb; /* for data output. */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100120 struct list_head list_node; /* for rpipe->req_list */
121 struct wa_xfer *xfer; /* out xfer */
122 u8 index; /* which segment we are */
123 enum wa_seg_status status;
124 ssize_t result; /* bytes xfered or error */
125 struct wa_xfer_hdr xfer_hdr;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100126};
127
Thomas Pugliese66591015d2013-08-15 14:37:43 -0500128static inline void wa_seg_init(struct wa_seg *seg)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100129{
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500130 usb_init_urb(&seg->tr_urb);
Thomas Pugliese66591015d2013-08-15 14:37:43 -0500131
132 /* set the remaining memory to 0. */
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500133 memset(((void *)seg) + sizeof(seg->tr_urb), 0,
134 sizeof(*seg) - sizeof(seg->tr_urb));
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100135}
136
137/*
138 * Protected by xfer->lock
139 *
140 */
141struct wa_xfer {
142 struct kref refcnt;
143 struct list_head list_node;
144 spinlock_t lock;
145 u32 id;
146
147 struct wahc *wa; /* Wire adapter we are plugged to */
148 struct usb_host_endpoint *ep;
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300149 struct urb *urb; /* URB we are transferring for */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100150 struct wa_seg **seg; /* transfer segments */
151 u8 segs, segs_submitted, segs_done;
152 unsigned is_inbound:1;
153 unsigned is_dma:1;
154 size_t seg_size;
155 int result;
156
157 gfp_t gfp; /* allocation mask */
158
159 struct wusb_dev *wusb_dev; /* for activity timestamps */
160};
161
162static inline void wa_xfer_init(struct wa_xfer *xfer)
163{
164 kref_init(&xfer->refcnt);
165 INIT_LIST_HEAD(&xfer->list_node);
166 spin_lock_init(&xfer->lock);
167}
168
169/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300170 * Destroy a transfer structure
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100171 *
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500172 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
Thomas Pugliese79731cb2013-08-15 14:37:42 -0500173 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100174 */
175static void wa_xfer_destroy(struct kref *_xfer)
176{
177 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
178 if (xfer->seg) {
179 unsigned cnt;
180 for (cnt = 0; cnt < xfer->segs; cnt++) {
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500181 struct wa_seg *seg = xfer->seg[cnt];
182 if (seg) {
183 usb_free_urb(seg->isoc_pack_desc_urb);
184 if (seg->dto_urb) {
185 kfree(seg->dto_urb->sg);
186 usb_free_urb(seg->dto_urb);
Thomas Pugliesed9936702013-09-26 14:08:13 -0500187 }
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500188 usb_free_urb(&seg->tr_urb);
Thomas Pugliesed9936702013-09-26 14:08:13 -0500189 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100190 }
Thomas Pugliesed9936702013-09-26 14:08:13 -0500191 kfree(xfer->seg);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100192 }
193 kfree(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100194}
195
196static void wa_xfer_get(struct wa_xfer *xfer)
197{
198 kref_get(&xfer->refcnt);
199}
200
201static void wa_xfer_put(struct wa_xfer *xfer)
202{
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100203 kref_put(&xfer->refcnt, wa_xfer_destroy);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100204}
205
206/*
207 * xfer is referenced
208 *
209 * xfer->lock has to be unlocked
210 *
211 * We take xfer->lock for setting the result; this is a barrier
212 * against drivers/usb/core/hcd.c:unlink1() being called after we call
213 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
214 * reference to the transfer.
215 */
216static void wa_xfer_giveback(struct wa_xfer *xfer)
217{
218 unsigned long flags;
David Vrabelbce83692008-12-22 18:22:50 +0000219
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100220 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
221 list_del_init(&xfer->list_node);
222 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
223 /* FIXME: segmentation broken -- kills DWA */
224 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
225 wa_put(xfer->wa);
226 wa_xfer_put(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100227}
228
229/*
230 * xfer is referenced
231 *
232 * xfer->lock has to be unlocked
233 */
234static void wa_xfer_completion(struct wa_xfer *xfer)
235{
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100236 if (xfer->wusb_dev)
237 wusb_dev_put(xfer->wusb_dev);
238 rpipe_put(xfer->ep->hcpriv);
239 wa_xfer_giveback(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100240}
241
242/*
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100243 * Initialize a transfer's ID
244 *
245 * We need to use a sequential number; if we use the pointer or the
246 * hash of the pointer, it can repeat over sequential transfers and
247 * then it will confuse the HWA....wonder why in hell they put a 32
248 * bit handle in there then.
249 */
250static void wa_xfer_id_init(struct wa_xfer *xfer)
251{
252 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
253}
254
Thomas Pugliesefdd160c2013-09-27 15:33:35 -0500255/* Return the xfer's ID. */
256static inline u32 wa_xfer_id(struct wa_xfer *xfer)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100257{
258 return xfer->id;
259}
260
Thomas Pugliesefdd160c2013-09-27 15:33:35 -0500261/* Return the xfer's ID in transport format (little endian). */
262static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
263{
264 return cpu_to_le32(xfer->id);
265}
266
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100267/*
Thomas Puglieseb9c84be2013-09-27 15:33:36 -0500268 * If transfer is done, wrap it up and return true
269 *
270 * xfer->lock has to be locked
271 */
272static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
273{
274 struct device *dev = &xfer->wa->usb_iface->dev;
275 unsigned result, cnt;
276 struct wa_seg *seg;
277 struct urb *urb = xfer->urb;
278 unsigned found_short = 0;
279
280 result = xfer->segs_done == xfer->segs_submitted;
281 if (result == 0)
282 goto out;
283 urb->actual_length = 0;
284 for (cnt = 0; cnt < xfer->segs; cnt++) {
285 seg = xfer->seg[cnt];
286 switch (seg->status) {
287 case WA_SEG_DONE:
288 if (found_short && seg->result > 0) {
289 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
290 xfer, wa_xfer_id(xfer), cnt,
291 seg->result);
292 urb->status = -EINVAL;
293 goto out;
294 }
295 urb->actual_length += seg->result;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500296 if (!(usb_pipeisoc(xfer->urb->pipe))
297 && seg->result < xfer->seg_size
Thomas Puglieseb9c84be2013-09-27 15:33:36 -0500298 && cnt != xfer->segs-1)
299 found_short = 1;
300 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
301 "result %zu urb->actual_length %d\n",
302 xfer, wa_xfer_id(xfer), seg->index, found_short,
303 seg->result, urb->actual_length);
304 break;
305 case WA_SEG_ERROR:
306 xfer->result = seg->result;
Thomas Pugliesecccd3a252013-09-30 22:48:46 -0500307 dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n",
Thomas Puglieseb9c84be2013-09-27 15:33:36 -0500308 xfer, wa_xfer_id(xfer), seg->index, seg->result,
309 seg->result);
310 goto out;
311 case WA_SEG_ABORTED:
312 dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n",
313 xfer, wa_xfer_id(xfer), seg->index,
314 urb->status);
315 xfer->result = urb->status;
316 goto out;
317 default:
318 dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
319 xfer, wa_xfer_id(xfer), cnt, seg->status);
320 xfer->result = -EINVAL;
321 goto out;
322 }
323 }
324 xfer->result = 0;
325out:
326 return result;
327}
328
329/*
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100330 * Search for a transfer list ID on the HCD's URB list
331 *
332 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
333 * 32-bit hash of the pointer.
334 *
335 * @returns NULL if not found.
336 */
337static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
338{
339 unsigned long flags;
340 struct wa_xfer *xfer_itr;
341 spin_lock_irqsave(&wa->xfer_list_lock, flags);
342 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
343 if (id == xfer_itr->id) {
344 wa_xfer_get(xfer_itr);
345 goto out;
346 }
347 }
348 xfer_itr = NULL;
349out:
350 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
351 return xfer_itr;
352}
353
354struct wa_xfer_abort_buffer {
355 struct urb urb;
356 struct wa_xfer_abort cmd;
357};
358
359static void __wa_xfer_abort_cb(struct urb *urb)
360{
361 struct wa_xfer_abort_buffer *b = urb->context;
362 usb_put_urb(&b->urb);
363}
364
365/*
366 * Aborts an ongoing transaction
367 *
368 * Assumes the transfer is referenced and locked and in a submitted
369 * state (mainly that there is an endpoint/rpipe assigned).
370 *
371 * The callback (see above) does nothing but freeing up the data by
372 * putting the URB. Because the URB is allocated at the head of the
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -0500373 * struct, the whole space we allocated is kfreed. *
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100374 */
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -0500375static int __wa_xfer_abort(struct wa_xfer *xfer)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100376{
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -0500377 int result = -ENOMEM;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100378 struct device *dev = &xfer->wa->usb_iface->dev;
379 struct wa_xfer_abort_buffer *b;
380 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
381
382 b = kmalloc(sizeof(*b), GFP_ATOMIC);
383 if (b == NULL)
384 goto error_kmalloc;
385 b->cmd.bLength = sizeof(b->cmd);
386 b->cmd.bRequestType = WA_XFER_ABORT;
387 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
Thomas Pugliesefdd160c2013-09-27 15:33:35 -0500388 b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100389
390 usb_init_urb(&b->urb);
391 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
392 usb_sndbulkpipe(xfer->wa->usb_dev,
393 xfer->wa->dto_epd->bEndpointAddress),
394 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
395 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
396 if (result < 0)
397 goto error_submit;
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -0500398 return result; /* callback frees! */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100399
400
401error_submit:
402 if (printk_ratelimit())
403 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
404 xfer, result);
405 kfree(b);
406error_kmalloc:
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -0500407 return result;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100408
409}
410
411/*
412 *
413 * @returns < 0 on error, transfer segment request size if ok
414 */
415static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
416 enum wa_xfer_type *pxfer_type)
417{
418 ssize_t result;
419 struct device *dev = &xfer->wa->usb_iface->dev;
420 size_t maxpktsize;
421 struct urb *urb = xfer->urb;
422 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
423
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100424 switch (rpipe->descr.bmAttribute & 0x3) {
425 case USB_ENDPOINT_XFER_CONTROL:
426 *pxfer_type = WA_XFER_TYPE_CTL;
427 result = sizeof(struct wa_xfer_ctl);
428 break;
429 case USB_ENDPOINT_XFER_INT:
430 case USB_ENDPOINT_XFER_BULK:
431 *pxfer_type = WA_XFER_TYPE_BI;
432 result = sizeof(struct wa_xfer_bi);
433 break;
434 case USB_ENDPOINT_XFER_ISOC:
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500435 if (usb_pipeout(urb->pipe)) {
436 *pxfer_type = WA_XFER_TYPE_ISO;
437 result = sizeof(struct wa_xfer_hwaiso);
438 } else {
439 dev_err(dev, "FIXME: ISOC IN not implemented\n");
440 result = -ENOSYS;
441 goto error;
442 }
443 break;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100444 default:
445 /* never happens */
446 BUG();
447 result = -EINVAL; /* shut gcc up */
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500448 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100449 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
450 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500451
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100452 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500453 if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
454 xfer->seg_size = maxpktsize;
455 xfer->segs = urb->number_of_packets;
456 } else {
457 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
458 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
459 /* Compute the segment size and make sure it is a multiple of
460 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
461 * a check (FIXME) */
462 if (xfer->seg_size < maxpktsize) {
463 dev_err(dev,
464 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
465 xfer->seg_size, maxpktsize);
466 result = -EINVAL;
467 goto error;
468 }
469 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
470 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
471 xfer->seg_size);
472 if (xfer->segs >= WA_SEGS_MAX) {
473 dev_err(dev, "BUG? oops, number of segments %d bigger than %d\n",
474 (urb->transfer_buffer_length/xfer->seg_size),
475 WA_SEGS_MAX);
476 result = -EINVAL;
477 goto error;
478 }
479 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
480 xfer->segs = 1;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100481 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100482error:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100483 return result;
484}
485
David Vrabelbce83692008-12-22 18:22:50 +0000486/* Fill in the common request header and xfer-type specific data. */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100487static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
488 struct wa_xfer_hdr *xfer_hdr0,
489 enum wa_xfer_type xfer_type,
490 size_t xfer_hdr_size)
491{
492 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
493
494 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
495 xfer_hdr0->bLength = xfer_hdr_size;
496 xfer_hdr0->bRequestType = xfer_type;
497 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
Thomas Pugliesefdd160c2013-09-27 15:33:35 -0500498 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100499 xfer_hdr0->bTransferSegment = 0;
500 switch (xfer_type) {
501 case WA_XFER_TYPE_CTL: {
502 struct wa_xfer_ctl *xfer_ctl =
503 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
504 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100505 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
506 sizeof(xfer_ctl->baSetupData));
507 break;
508 }
509 case WA_XFER_TYPE_BI:
510 break;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500511 case WA_XFER_TYPE_ISO: {
512 struct wa_xfer_hwaiso *xfer_iso =
513 container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
514 struct wa_xfer_packet_info_hwaiso *packet_desc =
515 ((void *)xfer_iso) + xfer_hdr_size;
516 struct usb_iso_packet_descriptor *iso_frame_desc =
517 &(xfer->urb->iso_frame_desc[0]);
518 /* populate the isoc section of the transfer request. */
519 xfer_iso->dwNumOfPackets = cpu_to_le32(1);
520 /*
521 * populate isoc packet descriptor. This assumes 1
522 * packet per segment.
523 */
524 packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
525 sizeof(packet_desc->PacketLength[0]));
526 packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
527 packet_desc->PacketLength[0] =
528 cpu_to_le16(iso_frame_desc->length);
529 break;
530 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100531 default:
532 BUG();
533 };
534}
535
536/*
537 * Callback for the OUT data phase of the segment request
538 *
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500539 * Check wa_seg_tr_cb(); most comments also apply here because this
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100540 * function does almost the same thing and they work closely
541 * together.
542 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300543 * If the seg request has failed but this DTO phase has succeeded,
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500544 * wa_seg_tr_cb() has already failed the segment and moved the
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100545 * status to WA_SEG_ERROR, so this will go through 'case 0' and
546 * effectively do nothing.
547 */
548static void wa_seg_dto_cb(struct urb *urb)
549{
550 struct wa_seg *seg = urb->context;
551 struct wa_xfer *xfer = seg->xfer;
552 struct wahc *wa;
553 struct device *dev;
554 struct wa_rpipe *rpipe;
555 unsigned long flags;
556 unsigned rpipe_ready = 0;
557 u8 done = 0;
558
Thomas Pugliesed5b5c9f2013-09-26 14:08:15 -0500559 /* free the sg if it was used. */
560 kfree(urb->sg);
561 urb->sg = NULL;
562
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100563 switch (urb->status) {
564 case 0:
565 spin_lock_irqsave(&xfer->lock, flags);
566 wa = xfer->wa;
567 dev = &wa->usb_iface->dev;
David Vrabelbce83692008-12-22 18:22:50 +0000568 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
569 xfer, seg->index, urb->actual_length);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100570 if (seg->status < WA_SEG_PENDING)
571 seg->status = WA_SEG_PENDING;
572 seg->result = urb->actual_length;
573 spin_unlock_irqrestore(&xfer->lock, flags);
574 break;
575 case -ECONNRESET: /* URB unlinked; no need to do anything */
576 case -ENOENT: /* as it was done by the who unlinked us */
577 break;
578 default: /* Other errors ... */
579 spin_lock_irqsave(&xfer->lock, flags);
580 wa = xfer->wa;
581 dev = &wa->usb_iface->dev;
582 rpipe = xfer->ep->hcpriv;
David Vrabelbce83692008-12-22 18:22:50 +0000583 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
584 xfer, seg->index, urb->status);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100585 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
586 EDC_ERROR_TIMEFRAME)){
587 dev_err(dev, "DTO: URB max acceptable errors "
588 "exceeded, resetting device\n");
589 wa_reset_all(wa);
590 }
591 if (seg->status != WA_SEG_ERROR) {
592 seg->status = WA_SEG_ERROR;
593 seg->result = urb->status;
594 xfer->segs_done++;
595 __wa_xfer_abort(xfer);
596 rpipe_ready = rpipe_avail_inc(rpipe);
597 done = __wa_xfer_is_done(xfer);
598 }
599 spin_unlock_irqrestore(&xfer->lock, flags);
600 if (done)
601 wa_xfer_completion(xfer);
602 if (rpipe_ready)
603 wa_xfer_delayed_run(rpipe);
604 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100605}
606
607/*
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500608 * Callback for the isoc packet descriptor phase of the segment request
609 *
610 * Check wa_seg_tr_cb(); most comments also apply here because this
611 * function does almost the same thing and they work closely
612 * together.
613 *
614 * If the seg request has failed but this phase has succeeded,
615 * wa_seg_tr_cb() has already failed the segment and moved the
616 * status to WA_SEG_ERROR, so this will go through 'case 0' and
617 * effectively do nothing.
618 */
619static void wa_seg_iso_pack_desc_cb(struct urb *urb)
620{
621 struct wa_seg *seg = urb->context;
622 struct wa_xfer *xfer = seg->xfer;
623 struct wahc *wa;
624 struct device *dev;
625 struct wa_rpipe *rpipe;
626 unsigned long flags;
627 unsigned rpipe_ready = 0;
628 u8 done = 0;
629
630 switch (urb->status) {
631 case 0:
632 spin_lock_irqsave(&xfer->lock, flags);
633 wa = xfer->wa;
634 dev = &wa->usb_iface->dev;
635 dev_dbg(dev, "iso xfer %p#%u: packet descriptor done\n",
636 xfer, seg->index);
637 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
638 seg->status = WA_SEG_PENDING;
639 spin_unlock_irqrestore(&xfer->lock, flags);
640 break;
641 case -ECONNRESET: /* URB unlinked; no need to do anything */
642 case -ENOENT: /* as it was done by the who unlinked us */
643 break;
644 default: /* Other errors ... */
645 spin_lock_irqsave(&xfer->lock, flags);
646 wa = xfer->wa;
647 dev = &wa->usb_iface->dev;
648 rpipe = xfer->ep->hcpriv;
649 pr_err_ratelimited("iso xfer %p#%u: packet descriptor error %d\n",
650 xfer, seg->index, urb->status);
651 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
652 EDC_ERROR_TIMEFRAME)){
653 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
654 wa_reset_all(wa);
655 }
656 if (seg->status != WA_SEG_ERROR) {
657 usb_unlink_urb(seg->dto_urb);
658 seg->status = WA_SEG_ERROR;
659 seg->result = urb->status;
660 xfer->segs_done++;
661 __wa_xfer_abort(xfer);
662 rpipe_ready = rpipe_avail_inc(rpipe);
663 done = __wa_xfer_is_done(xfer);
664 }
665 spin_unlock_irqrestore(&xfer->lock, flags);
666 if (done)
667 wa_xfer_completion(xfer);
668 if (rpipe_ready)
669 wa_xfer_delayed_run(rpipe);
670 }
671}
672
673/*
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100674 * Callback for the segment request
675 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200676 * If successful transition state (unless already transitioned or
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100677 * outbound transfer); otherwise, take a note of the error, mark this
678 * segment done and try completion.
679 *
680 * Note we don't access until we are sure that the transfer hasn't
681 * been cancelled (ECONNRESET, ENOENT), which could mean that
682 * seg->xfer could be already gone.
683 *
684 * We have to check before setting the status to WA_SEG_PENDING
685 * because sometimes the xfer result callback arrives before this
686 * callback (geeeeeeze), so it might happen that we are already in
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500687 * another state. As well, we don't set it if the transfer is not inbound,
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100688 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
689 * finishes.
690 */
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500691static void wa_seg_tr_cb(struct urb *urb)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100692{
693 struct wa_seg *seg = urb->context;
694 struct wa_xfer *xfer = seg->xfer;
695 struct wahc *wa;
696 struct device *dev;
697 struct wa_rpipe *rpipe;
698 unsigned long flags;
699 unsigned rpipe_ready;
700 u8 done = 0;
701
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100702 switch (urb->status) {
703 case 0:
704 spin_lock_irqsave(&xfer->lock, flags);
705 wa = xfer->wa;
706 dev = &wa->usb_iface->dev;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500707 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
708 xfer, wa_xfer_id(xfer), seg->index);
709 if (xfer->is_inbound &&
710 seg->status < WA_SEG_PENDING &&
711 !(usb_pipeisoc(xfer->urb->pipe)))
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100712 seg->status = WA_SEG_PENDING;
713 spin_unlock_irqrestore(&xfer->lock, flags);
714 break;
715 case -ECONNRESET: /* URB unlinked; no need to do anything */
716 case -ENOENT: /* as it was done by the who unlinked us */
717 break;
718 default: /* Other errors ... */
719 spin_lock_irqsave(&xfer->lock, flags);
720 wa = xfer->wa;
721 dev = &wa->usb_iface->dev;
722 rpipe = xfer->ep->hcpriv;
723 if (printk_ratelimit())
Thomas Puglieseb9c84be2013-09-27 15:33:36 -0500724 dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
725 xfer, wa_xfer_id(xfer), seg->index,
726 urb->status);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100727 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
728 EDC_ERROR_TIMEFRAME)){
729 dev_err(dev, "DTO: URB max acceptable errors "
730 "exceeded, resetting device\n");
731 wa_reset_all(wa);
732 }
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500733 usb_unlink_urb(seg->isoc_pack_desc_urb);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100734 usb_unlink_urb(seg->dto_urb);
735 seg->status = WA_SEG_ERROR;
736 seg->result = urb->status;
737 xfer->segs_done++;
738 __wa_xfer_abort(xfer);
739 rpipe_ready = rpipe_avail_inc(rpipe);
740 done = __wa_xfer_is_done(xfer);
741 spin_unlock_irqrestore(&xfer->lock, flags);
742 if (done)
743 wa_xfer_completion(xfer);
744 if (rpipe_ready)
745 wa_xfer_delayed_run(rpipe);
746 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100747}
748
Thomas Puglieseffd6d172013-09-26 14:08:14 -0500749/*
750 * Allocate an SG list to store bytes_to_transfer bytes and copy the
Thomas Pugliese2b81c082013-06-11 10:39:31 -0500751 * subset of the in_sg that matches the buffer subset
Thomas Puglieseffd6d172013-09-26 14:08:14 -0500752 * we are about to transfer.
753 */
Thomas Pugliese2b81c082013-06-11 10:39:31 -0500754static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
755 const unsigned int bytes_transferred,
756 const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
757{
758 struct scatterlist *out_sg;
759 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
760 nents;
761 struct scatterlist *current_xfer_sg = in_sg;
762 struct scatterlist *current_seg_sg, *last_seg_sg;
763
764 /* skip previously transferred pages. */
765 while ((current_xfer_sg) &&
766 (bytes_processed < bytes_transferred)) {
767 bytes_processed += current_xfer_sg->length;
768
769 /* advance the sg if current segment starts on or past the
770 next page. */
771 if (bytes_processed <= bytes_transferred)
772 current_xfer_sg = sg_next(current_xfer_sg);
773 }
774
775 /* the data for the current segment starts in current_xfer_sg.
776 calculate the offset. */
777 if (bytes_processed > bytes_transferred) {
778 offset_into_current_page_data = current_xfer_sg->length -
779 (bytes_processed - bytes_transferred);
780 }
781
782 /* calculate the number of pages needed by this segment. */
783 nents = DIV_ROUND_UP((bytes_to_transfer +
784 offset_into_current_page_data +
785 current_xfer_sg->offset),
786 PAGE_SIZE);
787
788 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
789 if (out_sg) {
790 sg_init_table(out_sg, nents);
791
792 /* copy the portion of the incoming SG that correlates to the
793 * data to be transferred by this segment to the segment SG. */
794 last_seg_sg = current_seg_sg = out_sg;
795 bytes_processed = 0;
796
797 /* reset nents and calculate the actual number of sg entries
798 needed. */
799 nents = 0;
800 while ((bytes_processed < bytes_to_transfer) &&
801 current_seg_sg && current_xfer_sg) {
802 unsigned int page_len = min((current_xfer_sg->length -
803 offset_into_current_page_data),
804 (bytes_to_transfer - bytes_processed));
805
806 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
807 page_len,
808 current_xfer_sg->offset +
809 offset_into_current_page_data);
810
811 bytes_processed += page_len;
812
813 last_seg_sg = current_seg_sg;
814 current_seg_sg = sg_next(current_seg_sg);
815 current_xfer_sg = sg_next(current_xfer_sg);
816
817 /* only the first page may require additional offset. */
818 offset_into_current_page_data = 0;
819 nents++;
820 }
821
822 /* update num_sgs and terminate the list since we may have
823 * concatenated pages. */
824 sg_mark_end(last_seg_sg);
825 *out_num_sgs = nents;
826 }
827
828 return out_sg;
829}
830
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100831/*
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500832 * Populate DMA buffer info for the isoc dto urb.
833 */
834static void __wa_populate_dto_urb_iso(struct wa_xfer *xfer,
835 struct wa_seg *seg, int curr_iso_frame)
836{
837 /*
838 * dto urb buffer address and size pulled from
839 * iso_frame_desc.
840 */
841 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
842 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
843 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
844 seg->dto_urb->sg = NULL;
845 seg->dto_urb->num_sgs = 0;
846 seg->dto_urb->transfer_buffer_length =
847 xfer->urb->iso_frame_desc[curr_iso_frame].length;
848}
849
850/*
Thomas Puglieseffd6d172013-09-26 14:08:14 -0500851 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
852 */
853static int __wa_populate_dto_urb(struct wa_xfer *xfer,
854 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
855{
856 int result = 0;
857
858 if (xfer->is_dma) {
859 seg->dto_urb->transfer_dma =
860 xfer->urb->transfer_dma + buf_itr_offset;
861 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
862 seg->dto_urb->sg = NULL;
863 seg->dto_urb->num_sgs = 0;
864 } else {
865 /* do buffer or SG processing. */
866 seg->dto_urb->transfer_flags &=
867 ~URB_NO_TRANSFER_DMA_MAP;
868 /* this should always be 0 before a resubmit. */
869 seg->dto_urb->num_mapped_sgs = 0;
870
871 if (xfer->urb->transfer_buffer) {
872 seg->dto_urb->transfer_buffer =
873 xfer->urb->transfer_buffer +
874 buf_itr_offset;
875 seg->dto_urb->sg = NULL;
876 seg->dto_urb->num_sgs = 0;
877 } else {
878 seg->dto_urb->transfer_buffer = NULL;
879
880 /*
881 * allocate an SG list to store seg_size bytes
882 * and copy the subset of the xfer->urb->sg that
883 * matches the buffer subset we are about to
884 * read.
885 */
886 seg->dto_urb->sg = wa_xfer_create_subset_sg(
887 xfer->urb->sg,
888 buf_itr_offset, buf_itr_size,
889 &(seg->dto_urb->num_sgs));
890 if (!(seg->dto_urb->sg))
891 result = -ENOMEM;
892 }
893 }
894 seg->dto_urb->transfer_buffer_length = buf_itr_size;
895
896 return result;
897}
898
899/*
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100900 * Allocate the segs array and initialize each of them
901 *
902 * The segments are freed by wa_xfer_destroy() when the xfer use count
903 * drops to zero; however, because each segment is given the same life
904 * cycle as the USB URB it contains, it is actually freed by
905 * usb_put_urb() on the contained USB URB (twisted, eh?).
906 */
907static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
908{
909 int result, cnt;
910 size_t alloc_size = sizeof(*xfer->seg[0])
911 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
912 struct usb_device *usb_dev = xfer->wa->usb_dev;
913 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
914 struct wa_seg *seg;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500915 size_t buf_itr, buf_size, buf_itr_size, iso_pkt_descr_size = 0;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100916
917 result = -ENOMEM;
David Vrabel92c4d9b2008-10-15 14:50:10 +0100918 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100919 if (xfer->seg == NULL)
920 goto error_segs_kzalloc;
921 buf_itr = 0;
922 buf_size = xfer->urb->transfer_buffer_length;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500923
924 if (usb_pipeisoc(xfer->urb->pipe)) {
925 /*
926 * This calculation assumes one isoc packet per xfer segment.
927 * It will need to be updated if this changes.
928 */
929 iso_pkt_descr_size = sizeof(struct wa_xfer_packet_info_hwaiso) +
930 sizeof(__le16);
931 alloc_size += iso_pkt_descr_size;
932 }
933
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100934 for (cnt = 0; cnt < xfer->segs; cnt++) {
Thomas Pugliese66591015d2013-08-15 14:37:43 -0500935 seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100936 if (seg == NULL)
Thomas Pugliese66591015d2013-08-15 14:37:43 -0500937 goto error_seg_kmalloc;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100938 wa_seg_init(seg);
939 seg->xfer = xfer;
940 seg->index = cnt;
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500941 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100942 usb_sndbulkpipe(usb_dev,
943 dto_epd->bEndpointAddress),
944 &seg->xfer_hdr, xfer_hdr_size,
Thomas Pugliese09d94cb2013-09-26 10:49:40 -0500945 wa_seg_tr_cb, seg);
Thomas Pugliese2b81c082013-06-11 10:39:31 -0500946 buf_itr_size = min(buf_size, xfer->seg_size);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100947 if (xfer->is_inbound == 0 && buf_size > 0) {
Thomas Pugliese2b81c082013-06-11 10:39:31 -0500948 /* outbound data. */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100949 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
950 if (seg->dto_urb == NULL)
951 goto error_dto_alloc;
952 usb_fill_bulk_urb(
953 seg->dto_urb, usb_dev,
954 usb_sndbulkpipe(usb_dev,
955 dto_epd->bEndpointAddress),
956 NULL, 0, wa_seg_dto_cb, seg);
Thomas Pugliese2b81c082013-06-11 10:39:31 -0500957
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500958 if (usb_pipeisoc(xfer->urb->pipe)) {
959 /* iso packet descriptor. */
960 seg->isoc_pack_desc_urb =
961 usb_alloc_urb(0, GFP_ATOMIC);
962 if (seg->isoc_pack_desc_urb == NULL)
963 goto error_iso_pack_desc_alloc;
964 /*
965 * The buffer for the isoc packet descriptor
966 * after the transfer request header in the
967 * segment object memory buffer.
968 */
969 usb_fill_bulk_urb(
970 seg->isoc_pack_desc_urb, usb_dev,
971 usb_sndbulkpipe(usb_dev,
972 dto_epd->bEndpointAddress),
973 (void *)(&seg->xfer_hdr) +
974 xfer_hdr_size,
975 iso_pkt_descr_size,
976 wa_seg_iso_pack_desc_cb, seg);
Thomas Pugliese2b81c082013-06-11 10:39:31 -0500977
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -0500978 /* fill in the xfer buffer information. */
979 __wa_populate_dto_urb_iso(xfer, seg, cnt);
980 } else {
981 /* fill in the xfer buffer information. */
982 result = __wa_populate_dto_urb(xfer, seg,
983 buf_itr, buf_itr_size);
984 if (result < 0)
985 goto error_seg_outbound_populate;
986
987 buf_itr += buf_itr_size;
988 buf_size -= buf_itr_size;
989 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100990 }
991 seg->status = WA_SEG_READY;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +0100992 }
993 return 0;
994
Thomas Puglieseffd6d172013-09-26 14:08:14 -0500995 /*
996 * Free the memory for the current segment which failed to init.
997 * Use the fact that cnt is left at were it failed. The remaining
998 * segments will be cleaned up by wa_xfer_destroy.
999 */
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001000error_iso_pack_desc_alloc:
Thomas Puglieseffd6d172013-09-26 14:08:14 -05001001error_seg_outbound_populate:
Thomas Pugliese11b1bf82013-08-15 14:37:41 -05001002 usb_free_urb(xfer->seg[cnt]->dto_urb);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001003error_dto_alloc:
1004 kfree(xfer->seg[cnt]);
Thomas Puglieseffd6d172013-09-26 14:08:14 -05001005 xfer->seg[cnt] = NULL;
Thomas Pugliese66591015d2013-08-15 14:37:43 -05001006error_seg_kmalloc:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001007error_segs_kzalloc:
1008 return result;
1009}
1010
1011/*
1012 * Allocates all the stuff needed to submit a transfer
1013 *
1014 * Breaks the whole data buffer in a list of segments, each one has a
1015 * structure allocated to it and linked in xfer->seg[index]
1016 *
1017 * FIXME: merge setup_segs() and the last part of this function, no
1018 * need to do two for loops when we could run everything in a
1019 * single one
1020 */
1021static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1022{
1023 int result;
1024 struct device *dev = &xfer->wa->usb_iface->dev;
1025 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1026 size_t xfer_hdr_size, cnt, transfer_size;
1027 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1028
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001029 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1030 if (result < 0)
1031 goto error_setup_sizes;
1032 xfer_hdr_size = result;
1033 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1034 if (result < 0) {
1035 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1036 xfer, xfer->segs, result);
1037 goto error_setup_segs;
1038 }
1039 /* Fill the first header */
1040 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1041 wa_xfer_id_init(xfer);
1042 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1043
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001044 /* Fill remaining headers */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001045 xfer_hdr = xfer_hdr0;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001046 if (xfer_type == WA_XFER_TYPE_ISO) {
1047 xfer_hdr0->dwTransferLength =
1048 cpu_to_le32(xfer->urb->iso_frame_desc[0].length);
1049 for (cnt = 1; cnt < xfer->segs; cnt++) {
1050 struct usb_iso_packet_descriptor *iso_frame_desc =
1051 &(xfer->urb->iso_frame_desc[cnt]);
1052 struct wa_xfer_packet_info_hwaiso *packet_desc;
1053
1054 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1055 packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1056 /*
1057 * Copy values from the 0th header and isoc packet
1058 * descriptor. Segment specific values are set below.
1059 */
1060 memcpy(xfer_hdr, xfer_hdr0,
1061 xfer_hdr_size + sizeof(*packet_desc));
1062 xfer_hdr->bTransferSegment = cnt;
1063 xfer_hdr->dwTransferLength =
1064 cpu_to_le32(iso_frame_desc->length);
1065 /* populate isoc packet descriptor length. */
1066 packet_desc->PacketLength[0] =
1067 cpu_to_le16(iso_frame_desc->length);
1068
1069 xfer->seg[cnt]->status = WA_SEG_READY;
1070 }
1071 } else {
1072 transfer_size = urb->transfer_buffer_length;
1073 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1074 cpu_to_le32(xfer->seg_size) :
1075 cpu_to_le32(transfer_size);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001076 transfer_size -= xfer->seg_size;
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001077 for (cnt = 1; cnt < xfer->segs; cnt++) {
1078 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1079 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1080 xfer_hdr->bTransferSegment = cnt;
1081 xfer_hdr->dwTransferLength =
1082 transfer_size > xfer->seg_size ?
1083 cpu_to_le32(xfer->seg_size)
1084 : cpu_to_le32(transfer_size);
1085 xfer->seg[cnt]->status = WA_SEG_READY;
1086 transfer_size -= xfer->seg_size;
1087 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001088 }
1089 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
1090 result = 0;
1091error_setup_segs:
1092error_setup_sizes:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001093 return result;
1094}
1095
1096/*
1097 *
1098 *
1099 * rpipe->seg_lock is held!
1100 */
1101static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1102 struct wa_seg *seg)
1103{
1104 int result;
Thomas Pugliese09d94cb2013-09-26 10:49:40 -05001105 /* submit the transfer request. */
1106 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001107 if (result < 0) {
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001108 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1109 __func__, xfer, seg->index, result);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001110 goto error_seg_submit;
1111 }
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001112 /* submit the isoc packet descriptor if present. */
1113 if (seg->isoc_pack_desc_urb) {
1114 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1115 if (result < 0) {
1116 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1117 __func__, xfer, seg->index, result);
1118 goto error_iso_pack_desc_submit;
1119 }
1120 }
Thomas Pugliese09d94cb2013-09-26 10:49:40 -05001121 /* submit the out data if this is an out request. */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001122 if (seg->dto_urb) {
1123 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1124 if (result < 0) {
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001125 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1126 __func__, xfer, seg->index, result);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001127 goto error_dto_submit;
1128 }
1129 }
1130 seg->status = WA_SEG_SUBMITTED;
1131 rpipe_avail_dec(rpipe);
1132 return 0;
1133
1134error_dto_submit:
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001135 usb_unlink_urb(seg->isoc_pack_desc_urb);
1136error_iso_pack_desc_submit:
Thomas Pugliese09d94cb2013-09-26 10:49:40 -05001137 usb_unlink_urb(&seg->tr_urb);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001138error_seg_submit:
1139 seg->status = WA_SEG_ERROR;
1140 seg->result = result;
1141 return result;
1142}
1143
1144/*
1145 * Execute more queued request segments until the maximum concurrent allowed
1146 *
1147 * The ugly unlock/lock sequence on the error path is needed as the
1148 * xfer->lock normally nests the seg_lock and not viceversa.
1149 *
1150 */
1151static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1152{
1153 int result;
1154 struct device *dev = &rpipe->wa->usb_iface->dev;
1155 struct wa_seg *seg;
1156 struct wa_xfer *xfer;
1157 unsigned long flags;
1158
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001159 spin_lock_irqsave(&rpipe->seg_lock, flags);
1160 while (atomic_read(&rpipe->segs_available) > 0
1161 && !list_empty(&rpipe->seg_list)) {
Thomas Pugliesee9a088f2013-08-12 10:10:53 -05001162 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001163 list_node);
1164 list_del(&seg->list_node);
1165 xfer = seg->xfer;
1166 result = __wa_seg_submit(rpipe, xfer, seg);
Thomas Puglieseb9c84be2013-09-27 15:33:36 -05001167 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1168 xfer, wa_xfer_id(xfer), seg->index,
1169 atomic_read(&rpipe->segs_available), result);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001170 if (unlikely(result < 0)) {
1171 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1172 spin_lock_irqsave(&xfer->lock, flags);
1173 __wa_xfer_abort(xfer);
1174 xfer->segs_done++;
1175 spin_unlock_irqrestore(&xfer->lock, flags);
1176 spin_lock_irqsave(&rpipe->seg_lock, flags);
1177 }
1178 }
1179 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001180}
1181
1182/*
1183 *
1184 * xfer->lock is taken
1185 *
1186 * On failure submitting we just stop submitting and return error;
1187 * wa_urb_enqueue_b() will execute the completion path
1188 */
1189static int __wa_xfer_submit(struct wa_xfer *xfer)
1190{
1191 int result;
1192 struct wahc *wa = xfer->wa;
1193 struct device *dev = &wa->usb_iface->dev;
1194 unsigned cnt;
1195 struct wa_seg *seg;
1196 unsigned long flags;
1197 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1198 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1199 u8 available;
1200 u8 empty;
1201
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001202 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1203 list_add_tail(&xfer->list_node, &wa->xfer_list);
1204 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1205
1206 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1207 result = 0;
1208 spin_lock_irqsave(&rpipe->seg_lock, flags);
1209 for (cnt = 0; cnt < xfer->segs; cnt++) {
1210 available = atomic_read(&rpipe->segs_available);
1211 empty = list_empty(&rpipe->seg_list);
1212 seg = xfer->seg[cnt];
Thomas Puglieseb9c84be2013-09-27 15:33:36 -05001213 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u (%s)\n",
1214 xfer, wa_xfer_id(xfer), cnt, available, empty,
David Vrabelbce83692008-12-22 18:22:50 +00001215 available == 0 || !empty ? "delayed" : "submitted");
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001216 if (available == 0 || !empty) {
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001217 seg->status = WA_SEG_DELAYED;
1218 list_add_tail(&seg->list_node, &rpipe->seg_list);
1219 } else {
1220 result = __wa_seg_submit(rpipe, xfer, seg);
David Vrabelbce83692008-12-22 18:22:50 +00001221 if (result < 0) {
1222 __wa_xfer_abort(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001223 goto error_seg_submit;
David Vrabelbce83692008-12-22 18:22:50 +00001224 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001225 }
1226 xfer->segs_submitted++;
1227 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001228error_seg_submit:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001229 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001230 return result;
1231}
1232
1233/*
1234 * Second part of a URB/transfer enqueuement
1235 *
1236 * Assumes this comes from wa_urb_enqueue() [maybe through
1237 * wa_urb_enqueue_run()]. At this point:
1238 *
1239 * xfer->wa filled and refcounted
1240 * xfer->ep filled with rpipe refcounted if
1241 * delayed == 0
1242 * xfer->urb filled and refcounted (this is the case when called
1243 * from wa_urb_enqueue() as we come from usb_submit_urb()
1244 * and when called by wa_urb_enqueue_run(), as we took an
1245 * extra ref dropped by _run() after we return).
1246 * xfer->gfp filled
1247 *
1248 * If we fail at __wa_xfer_submit(), then we just check if we are done
1249 * and if so, we run the completion procedure. However, if we are not
1250 * yet done, we do nothing and wait for the completion handlers from
1251 * the submitted URBs or from the xfer-result path to kick in. If xfer
1252 * result never kicks in, the xfer will timeout from the USB code and
1253 * dequeue() will be called.
1254 */
Thomas Pugliese33186c42013-10-01 10:14:56 -05001255static int wa_urb_enqueue_b(struct wa_xfer *xfer)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001256{
1257 int result;
1258 unsigned long flags;
1259 struct urb *urb = xfer->urb;
1260 struct wahc *wa = xfer->wa;
1261 struct wusbhc *wusbhc = wa->wusb;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001262 struct wusb_dev *wusb_dev;
1263 unsigned done;
1264
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001265 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
Thomas Pugliese33186c42013-10-01 10:14:56 -05001266 if (result < 0) {
1267 pr_err("%s: error_rpipe_get\n", __func__);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001268 goto error_rpipe_get;
Thomas Pugliese33186c42013-10-01 10:14:56 -05001269 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001270 result = -ENODEV;
1271 /* FIXME: segmentation broken -- kills DWA */
1272 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
Jiri Slaby49fa0922009-03-11 21:47:40 +01001273 if (urb->dev == NULL) {
1274 mutex_unlock(&wusbhc->mutex);
Thomas Pugliese33186c42013-10-01 10:14:56 -05001275 pr_err("%s: error usb dev gone\n", __func__);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001276 goto error_dev_gone;
Jiri Slaby49fa0922009-03-11 21:47:40 +01001277 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001278 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1279 if (wusb_dev == NULL) {
1280 mutex_unlock(&wusbhc->mutex);
Thomas Pugliese33186c42013-10-01 10:14:56 -05001281 pr_err("%s: error wusb dev gone\n", __func__);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001282 goto error_dev_gone;
1283 }
1284 mutex_unlock(&wusbhc->mutex);
1285
1286 spin_lock_irqsave(&xfer->lock, flags);
1287 xfer->wusb_dev = wusb_dev;
1288 result = urb->status;
Thomas Pugliese33186c42013-10-01 10:14:56 -05001289 if (urb->status != -EINPROGRESS) {
1290 pr_err("%s: error_dequeued\n", __func__);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001291 goto error_dequeued;
Thomas Pugliese33186c42013-10-01 10:14:56 -05001292 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001293
1294 result = __wa_xfer_setup(xfer, urb);
Thomas Pugliese33186c42013-10-01 10:14:56 -05001295 if (result < 0) {
1296 pr_err("%s: error_xfer_setup\n", __func__);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001297 goto error_xfer_setup;
Thomas Pugliese33186c42013-10-01 10:14:56 -05001298 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001299 result = __wa_xfer_submit(xfer);
Thomas Pugliese33186c42013-10-01 10:14:56 -05001300 if (result < 0) {
1301 pr_err("%s: error_xfer_submit\n", __func__);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001302 goto error_xfer_submit;
Thomas Pugliese33186c42013-10-01 10:14:56 -05001303 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001304 spin_unlock_irqrestore(&xfer->lock, flags);
Thomas Pugliese33186c42013-10-01 10:14:56 -05001305 return 0;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001306
Thomas Pugliese33186c42013-10-01 10:14:56 -05001307 /*
1308 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1309 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1310 * setup().
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001311 */
1312error_xfer_setup:
1313error_dequeued:
1314 spin_unlock_irqrestore(&xfer->lock, flags);
1315 /* FIXME: segmentation broken, kills DWA */
1316 if (wusb_dev)
1317 wusb_dev_put(wusb_dev);
1318error_dev_gone:
1319 rpipe_put(xfer->ep->hcpriv);
1320error_rpipe_get:
1321 xfer->result = result;
Thomas Pugliese33186c42013-10-01 10:14:56 -05001322 return result;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001323
1324error_xfer_submit:
1325 done = __wa_xfer_is_done(xfer);
1326 xfer->result = result;
1327 spin_unlock_irqrestore(&xfer->lock, flags);
1328 if (done)
1329 wa_xfer_completion(xfer);
Thomas Pugliese33186c42013-10-01 10:14:56 -05001330 /* return success since the completion routine will run. */
1331 return 0;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001332}
1333
1334/*
1335 * Execute the delayed transfers in the Wire Adapter @wa
1336 *
1337 * We need to be careful here, as dequeue() could be called in the
1338 * middle. That's why we do the whole thing under the
Thomas Pugliesee9a088f2013-08-12 10:10:53 -05001339 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001340 * and then checks the list -- so as we would be acquiring in inverse
Thomas Pugliesee9a088f2013-08-12 10:10:53 -05001341 * order, we move the delayed list to a separate list while locked and then
1342 * submit them without the list lock held.
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001343 */
1344void wa_urb_enqueue_run(struct work_struct *ws)
1345{
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001346 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001347 struct wa_xfer *xfer, *next;
1348 struct urb *urb;
Thomas Pugliesee9a088f2013-08-12 10:10:53 -05001349 LIST_HEAD(tmp_list);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001350
Thomas Pugliesee9a088f2013-08-12 10:10:53 -05001351 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001352 spin_lock_irq(&wa->xfer_list_lock);
Thomas Pugliesee9a088f2013-08-12 10:10:53 -05001353 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1354 wa->xfer_delayed_list.prev);
1355 spin_unlock_irq(&wa->xfer_list_lock);
1356
1357 /*
1358 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1359 * can take xfer->lock as well as lock mutexes.
1360 */
1361 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001362 list_del_init(&xfer->list_node);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001363
1364 urb = xfer->urb;
Thomas Pugliese33186c42013-10-01 10:14:56 -05001365 if (wa_urb_enqueue_b(xfer) < 0)
1366 wa_xfer_giveback(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001367 usb_put_urb(urb); /* taken when queuing */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001368 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001369}
1370EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1371
1372/*
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001373 * Process the errored transfers on the Wire Adapter outside of interrupt.
1374 */
1375void wa_process_errored_transfers_run(struct work_struct *ws)
1376{
1377 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1378 struct wa_xfer *xfer, *next;
1379 LIST_HEAD(tmp_list);
1380
1381 pr_info("%s: Run delayed STALL processing.\n", __func__);
1382
1383 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1384 spin_lock_irq(&wa->xfer_list_lock);
1385 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1386 wa->xfer_errored_list.prev);
1387 spin_unlock_irq(&wa->xfer_list_lock);
1388
1389 /*
1390 * run rpipe_clear_feature_stalled from temp list without list lock
1391 * held.
1392 */
1393 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1394 struct usb_host_endpoint *ep;
1395 unsigned long flags;
1396 struct wa_rpipe *rpipe;
1397
1398 spin_lock_irqsave(&xfer->lock, flags);
1399 ep = xfer->ep;
1400 rpipe = ep->hcpriv;
1401 spin_unlock_irqrestore(&xfer->lock, flags);
1402
1403 /* clear RPIPE feature stalled without holding a lock. */
1404 rpipe_clear_feature_stalled(wa, ep);
1405
1406 /* complete the xfer. This removes it from the tmp list. */
1407 wa_xfer_completion(xfer);
1408
1409 /* check for work. */
1410 wa_xfer_delayed_run(rpipe);
1411 }
1412}
1413EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1414
1415/*
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001416 * Submit a transfer to the Wire Adapter in a delayed way
1417 *
1418 * The process of enqueuing involves possible sleeps() [see
1419 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1420 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1421 *
1422 * @urb: We own a reference to it done by the HCI Linux USB stack that
1423 * will be given up by calling usb_hcd_giveback_urb() or by
1424 * returning error from this function -> ergo we don't have to
1425 * refcount it.
1426 */
1427int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1428 struct urb *urb, gfp_t gfp)
1429{
1430 int result;
1431 struct device *dev = &wa->usb_iface->dev;
1432 struct wa_xfer *xfer;
1433 unsigned long my_flags;
1434 unsigned cant_sleep = irqs_disabled() | in_atomic();
1435
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001436 if ((urb->transfer_buffer == NULL)
1437 && (urb->sg == NULL)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001438 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1439 && urb->transfer_buffer_length != 0) {
1440 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1441 dump_stack();
1442 }
1443
1444 result = -ENOMEM;
1445 xfer = kzalloc(sizeof(*xfer), gfp);
1446 if (xfer == NULL)
1447 goto error_kmalloc;
1448
1449 result = -ENOENT;
1450 if (urb->status != -EINPROGRESS) /* cancelled */
1451 goto error_dequeued; /* before starting? */
1452 wa_xfer_init(xfer);
1453 xfer->wa = wa_get(wa);
1454 xfer->urb = urb;
1455 xfer->gfp = gfp;
1456 xfer->ep = ep;
1457 urb->hcpriv = xfer;
David Vrabelbce83692008-12-22 18:22:50 +00001458
1459 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1460 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1461 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1462 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1463 cant_sleep ? "deferred" : "inline");
1464
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001465 if (cant_sleep) {
1466 usb_get_urb(urb);
1467 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1468 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1469 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001470 queue_work(wusbd, &wa->xfer_enqueue_work);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001471 } else {
Thomas Pugliese33186c42013-10-01 10:14:56 -05001472 result = wa_urb_enqueue_b(xfer);
1473 if (result < 0) {
1474 /*
1475 * URB submit/enqueue failed. Clean up, return an
1476 * error and do not run the callback. This avoids
1477 * an infinite submit/complete loop.
1478 */
1479 dev_err(dev, "%s: URB enqueue failed: %d\n",
1480 __func__, result);
1481 wa_put(xfer->wa);
1482 wa_xfer_put(xfer);
1483 return result;
1484 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001485 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001486 return 0;
1487
1488error_dequeued:
1489 kfree(xfer);
1490error_kmalloc:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001491 return result;
1492}
1493EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1494
1495/*
1496 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1497 * handler] is called.
1498 *
1499 * Until a transfer goes successfully through wa_urb_enqueue() it
1500 * needs to be dequeued with completion calling; when stuck in delayed
1501 * or before wa_xfer_setup() is called, we need to do completion.
1502 *
1503 * not setup If there is no hcpriv yet, that means that that enqueue
1504 * still had no time to set the xfer up. Because
1505 * urb->status should be other than -EINPROGRESS,
1506 * enqueue() will catch that and bail out.
1507 *
1508 * If the transfer has gone through setup, we just need to clean it
1509 * up. If it has gone through submit(), we have to abort it [with an
1510 * asynch request] and then make sure we cancel each segment.
1511 *
1512 */
1513int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1514{
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001515 unsigned long flags, flags2;
1516 struct wa_xfer *xfer;
1517 struct wa_seg *seg;
1518 struct wa_rpipe *rpipe;
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001519 unsigned cnt, done = 0, xfer_abort_pending;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001520 unsigned rpipe_ready = 0;
1521
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001522 xfer = urb->hcpriv;
1523 if (xfer == NULL) {
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001524 /*
1525 * Nothing setup yet enqueue will see urb->status !=
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001526 * -EINPROGRESS (by hcd layer) and bail out with
1527 * error, no need to do completion
1528 */
1529 BUG_ON(urb->status == -EINPROGRESS);
1530 goto out;
1531 }
1532 spin_lock_irqsave(&xfer->lock, flags);
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001533 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001534 rpipe = xfer->ep->hcpriv;
Thomas Puglieseec58fad2013-08-09 09:52:13 -05001535 if (rpipe == NULL) {
1536 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1537 __func__, wa_xfer_id(xfer),
1538 "Probably already aborted.\n" );
1539 goto out_unlock;
1540 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001541 /* Check the delayed list -> if there, release and complete */
1542 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1543 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1544 goto dequeue_delayed;
1545 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1546 if (xfer->seg == NULL) /* still hasn't reached */
1547 goto out_unlock; /* setup(), enqueue_b() completes */
1548 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001549 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001550 for (cnt = 0; cnt < xfer->segs; cnt++) {
1551 seg = xfer->seg[cnt];
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001552 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1553 __func__, wa_xfer_id(xfer), cnt, seg->status);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001554 switch (seg->status) {
1555 case WA_SEG_NOTREADY:
1556 case WA_SEG_READY:
1557 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1558 xfer, cnt, seg->status);
1559 WARN_ON(1);
1560 break;
1561 case WA_SEG_DELAYED:
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001562 /*
1563 * delete from rpipe delayed list. If no segments on
1564 * this xfer have been submitted, __wa_xfer_is_done will
1565 * trigger a giveback below. Otherwise, the submitted
1566 * segments will be completed in the DTI interrupt.
1567 */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001568 seg->status = WA_SEG_ABORTED;
1569 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1570 list_del(&seg->list_node);
1571 xfer->segs_done++;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001572 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1573 break;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001574 case WA_SEG_DONE:
1575 case WA_SEG_ERROR:
1576 case WA_SEG_ABORTED:
1577 break;
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001578 /*
1579 * In the states below, the HWA device already knows
1580 * about the transfer. If an abort request was sent,
1581 * allow the HWA to process it and wait for the
1582 * results. Otherwise, the DTI state and seg completed
1583 * counts can get out of sync.
1584 */
1585 case WA_SEG_SUBMITTED:
1586 case WA_SEG_PENDING:
1587 case WA_SEG_DTI_PENDING:
1588 /*
1589 * Check if the abort was successfully sent. This could
1590 * be false if the HWA has been removed but we haven't
1591 * gotten the disconnect notification yet.
1592 */
1593 if (!xfer_abort_pending) {
1594 seg->status = WA_SEG_ABORTED;
1595 rpipe_ready = rpipe_avail_inc(rpipe);
1596 xfer->segs_done++;
1597 }
1598 break;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001599 }
1600 }
1601 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001602 done = __wa_xfer_is_done(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001603 spin_unlock_irqrestore(&xfer->lock, flags);
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001604 if (done)
1605 wa_xfer_completion(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001606 if (rpipe_ready)
1607 wa_xfer_delayed_run(rpipe);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001608 return 0;
1609
1610out_unlock:
1611 spin_unlock_irqrestore(&xfer->lock, flags);
1612out:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001613 return 0;
1614
1615dequeue_delayed:
1616 list_del_init(&xfer->list_node);
1617 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1618 xfer->result = urb->status;
1619 spin_unlock_irqrestore(&xfer->lock, flags);
1620 wa_xfer_giveback(xfer);
1621 usb_put_urb(urb); /* we got a ref in enqueue() */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001622 return 0;
1623}
1624EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1625
1626/*
1627 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1628 * codes
1629 *
1630 * Positive errno values are internal inconsistencies and should be
1631 * flagged louder. Negative are to be passed up to the user in the
1632 * normal way.
1633 *
1634 * @status: USB WA status code -- high two bits are stripped.
1635 */
1636static int wa_xfer_status_to_errno(u8 status)
1637{
1638 int errno;
1639 u8 real_status = status;
1640 static int xlat[] = {
1641 [WA_XFER_STATUS_SUCCESS] = 0,
1642 [WA_XFER_STATUS_HALTED] = -EPIPE,
1643 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1644 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1645 [WA_XFER_RESERVED] = EINVAL,
1646 [WA_XFER_STATUS_NOT_FOUND] = 0,
1647 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1648 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1649 [WA_XFER_STATUS_ABORTED] = -EINTR,
1650 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1651 [WA_XFER_INVALID_FORMAT] = EINVAL,
1652 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1653 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1654 };
1655 status &= 0x3f;
1656
1657 if (status == 0)
1658 return 0;
1659 if (status >= ARRAY_SIZE(xlat)) {
Manuel Zerpies9708cd22011-06-16 14:15:16 +02001660 printk_ratelimited(KERN_ERR "%s(): BUG? "
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001661 "Unknown WA transfer status 0x%02x\n",
1662 __func__, real_status);
1663 return -EINVAL;
1664 }
1665 errno = xlat[status];
1666 if (unlikely(errno > 0)) {
Manuel Zerpies9708cd22011-06-16 14:15:16 +02001667 printk_ratelimited(KERN_ERR "%s(): BUG? "
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001668 "Inconsistent WA status: 0x%02x\n",
1669 __func__, real_status);
1670 errno = -errno;
1671 }
1672 return errno;
1673}
1674
1675/*
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001676 * If a last segment flag and/or a transfer result error is encountered,
1677 * no other segment transfer results will be returned from the device.
1678 * Mark the remaining submitted or pending xfers as completed so that
1679 * the xfer will complete cleanly.
1680 */
1681static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
1682 struct wa_seg *incoming_seg)
1683{
1684 int index;
1685 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1686
1687 for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
1688 index++) {
1689 struct wa_seg *current_seg = xfer->seg[index];
1690
1691 BUG_ON(current_seg == NULL);
1692
1693 switch (current_seg->status) {
1694 case WA_SEG_SUBMITTED:
1695 case WA_SEG_PENDING:
1696 case WA_SEG_DTI_PENDING:
1697 rpipe_avail_inc(rpipe);
1698 /*
1699 * do not increment RPIPE avail for the WA_SEG_DELAYED case
1700 * since it has not been submitted to the RPIPE.
1701 */
1702 case WA_SEG_DELAYED:
1703 xfer->segs_done++;
1704 current_seg->status = incoming_seg->status;
1705 break;
1706 case WA_SEG_ABORTED:
1707 break;
1708 default:
1709 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
1710 __func__, wa_xfer_id(xfer), index,
1711 current_seg->status);
1712 break;
1713 }
1714 }
1715}
1716
1717/*
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001718 * Process a xfer result completion message
1719 *
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001720 * inbound transfers: need to schedule a buf_in_urb read
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001721 *
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001722 * FIXME: this function needs to be broken up in parts
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001723 */
Thomas Pugliese0367eef2013-09-26 10:49:41 -05001724static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
1725 struct wa_xfer_result *xfer_result)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001726{
1727 int result;
1728 struct device *dev = &wa->usb_iface->dev;
1729 unsigned long flags;
1730 u8 seg_idx;
1731 struct wa_seg *seg;
1732 struct wa_rpipe *rpipe;
Thomas Pugliese0367eef2013-09-26 10:49:41 -05001733 unsigned done = 0;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001734 u8 usb_status;
1735 unsigned rpipe_ready = 0;
1736
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001737 spin_lock_irqsave(&xfer->lock, flags);
1738 seg_idx = xfer_result->bTransferSegment & 0x7f;
1739 if (unlikely(seg_idx >= xfer->segs))
1740 goto error_bad_seg;
1741 seg = xfer->seg[seg_idx];
1742 rpipe = xfer->ep->hcpriv;
1743 usb_status = xfer_result->bTransferStatus;
Thomas Puglieseb9c84be2013-09-27 15:33:36 -05001744 dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
1745 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001746 if (seg->status == WA_SEG_ABORTED
1747 || seg->status == WA_SEG_ERROR) /* already handled */
1748 goto segment_aborted;
1749 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1750 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1751 if (seg->status != WA_SEG_PENDING) {
1752 if (printk_ratelimit())
1753 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1754 xfer, seg_idx, seg->status);
1755 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1756 }
1757 if (usb_status & 0x80) {
1758 seg->result = wa_xfer_status_to_errno(usb_status);
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001759 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1760 xfer, xfer->id, seg->index, usb_status);
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001761 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
1762 WA_SEG_ABORTED : WA_SEG_ERROR;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001763 goto error_complete;
1764 }
1765 /* FIXME: we ignore warnings, tally them for stats */
1766 if (usb_status & 0x40) /* Warning?... */
1767 usb_status = 0; /* ... pass */
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001768 if (usb_pipeisoc(xfer->urb->pipe)) {
1769 /* set up WA state to read the isoc packet status next. */
1770 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
1771 wa->dti_isoc_xfer_seg = seg_idx;
1772 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
1773 } else if (xfer->is_inbound) { /* IN data phase: read to buffer */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001774 seg->status = WA_SEG_DTI_PENDING;
1775 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001776 /* this should always be 0 before a resubmit. */
1777 wa->buf_in_urb->num_mapped_sgs = 0;
1778
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001779 if (xfer->is_dma) {
1780 wa->buf_in_urb->transfer_dma =
1781 xfer->urb->transfer_dma
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001782 + (seg_idx * xfer->seg_size);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001783 wa->buf_in_urb->transfer_flags
1784 |= URB_NO_TRANSFER_DMA_MAP;
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001785 wa->buf_in_urb->transfer_buffer = NULL;
1786 wa->buf_in_urb->sg = NULL;
1787 wa->buf_in_urb->num_sgs = 0;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001788 } else {
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001789 /* do buffer or SG processing. */
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001790 wa->buf_in_urb->transfer_flags
1791 &= ~URB_NO_TRANSFER_DMA_MAP;
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001792
1793 if (xfer->urb->transfer_buffer) {
1794 wa->buf_in_urb->transfer_buffer =
1795 xfer->urb->transfer_buffer
1796 + (seg_idx * xfer->seg_size);
1797 wa->buf_in_urb->sg = NULL;
1798 wa->buf_in_urb->num_sgs = 0;
1799 } else {
1800 /* allocate an SG list to store seg_size bytes
1801 and copy the subset of the xfer->urb->sg
1802 that matches the buffer subset we are
1803 about to read. */
1804 wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1805 xfer->urb->sg,
1806 seg_idx * xfer->seg_size,
1807 le32_to_cpu(
1808 xfer_result->dwTransferLength),
1809 &(wa->buf_in_urb->num_sgs));
1810
1811 if (!(wa->buf_in_urb->sg)) {
1812 wa->buf_in_urb->num_sgs = 0;
1813 goto error_sg_alloc;
1814 }
1815 wa->buf_in_urb->transfer_buffer = NULL;
1816 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001817 }
1818 wa->buf_in_urb->transfer_buffer_length =
1819 le32_to_cpu(xfer_result->dwTransferLength);
1820 wa->buf_in_urb->context = seg;
1821 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1822 if (result < 0)
1823 goto error_submit_buf_in;
1824 } else {
1825 /* OUT data phase, complete it -- */
1826 seg->status = WA_SEG_DONE;
1827 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1828 xfer->segs_done++;
1829 rpipe_ready = rpipe_avail_inc(rpipe);
1830 done = __wa_xfer_is_done(xfer);
1831 }
1832 spin_unlock_irqrestore(&xfer->lock, flags);
1833 if (done)
1834 wa_xfer_completion(xfer);
1835 if (rpipe_ready)
1836 wa_xfer_delayed_run(rpipe);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001837 return;
1838
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001839error_submit_buf_in:
1840 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1841 dev_err(dev, "DTI: URB max acceptable errors "
1842 "exceeded, resetting device\n");
1843 wa_reset_all(wa);
1844 }
1845 if (printk_ratelimit())
1846 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1847 xfer, seg_idx, result);
1848 seg->result = result;
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001849 kfree(wa->buf_in_urb->sg);
Thomas Pugliese67414482013-09-26 14:08:16 -05001850 wa->buf_in_urb->sg = NULL;
Thomas Pugliese2b81c082013-06-11 10:39:31 -05001851error_sg_alloc:
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001852 __wa_xfer_abort(xfer);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001853 seg->status = WA_SEG_ERROR;
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001854error_complete:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001855 xfer->segs_done++;
1856 rpipe_ready = rpipe_avail_inc(rpipe);
Thomas Pugliese14e1d2d2013-09-30 15:58:24 -05001857 wa_complete_remaining_xfer_segs(xfer, seg);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001858 done = __wa_xfer_is_done(xfer);
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001859 /*
1860 * queue work item to clear STALL for control endpoints.
1861 * Otherwise, let endpoint_reset take care of it.
1862 */
1863 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1864 usb_endpoint_xfer_control(&xfer->ep->desc) &&
1865 done) {
1866
1867 dev_info(dev, "Control EP stall. Queue delayed work.\n");
1868 spin_lock_irq(&wa->xfer_list_lock);
Wei Yongjun8eb41292013-09-23 14:16:22 +08001869 /* move xfer from xfer_list to xfer_errored_list. */
1870 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
Thomas Pugliese6d33f7b2013-08-15 12:21:30 -05001871 spin_unlock_irq(&wa->xfer_list_lock);
1872 spin_unlock_irqrestore(&xfer->lock, flags);
1873 queue_work(wusbd, &wa->xfer_error_work);
1874 } else {
1875 spin_unlock_irqrestore(&xfer->lock, flags);
1876 if (done)
1877 wa_xfer_completion(xfer);
1878 if (rpipe_ready)
1879 wa_xfer_delayed_run(rpipe);
1880 }
1881
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001882 return;
1883
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001884error_bad_seg:
1885 spin_unlock_irqrestore(&xfer->lock, flags);
1886 wa_urb_dequeue(wa, xfer->urb);
1887 if (printk_ratelimit())
1888 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1889 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1890 dev_err(dev, "DTI: URB max acceptable errors "
1891 "exceeded, resetting device\n");
1892 wa_reset_all(wa);
1893 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001894 return;
1895
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001896segment_aborted:
1897 /* nothing to do, as the aborter did the completion */
1898 spin_unlock_irqrestore(&xfer->lock, flags);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001899}
1900
1901/*
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05001902 * Process a isochronous packet status message
1903 *
1904 * inbound transfers: need to schedule a buf_in_urb read
1905 */
1906static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
1907{
1908 struct device *dev = &wa->usb_iface->dev;
1909 struct wa_xfer_packet_status_hwaiso *packet_status;
1910 struct wa_xfer *xfer;
1911 unsigned long flags;
1912 struct wa_seg *seg;
1913 struct wa_rpipe *rpipe;
1914 unsigned done = 0;
1915 unsigned rpipe_ready = 0;
1916 const int expected_size = sizeof(*packet_status) +
1917 sizeof(packet_status->PacketStatus[0]);
1918
1919 /* We have a xfer result buffer; check it */
1920 dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
1921 urb->actual_length, urb->transfer_buffer);
1922 if (urb->actual_length != expected_size) {
1923 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %zu needed)\n",
1924 urb->actual_length, expected_size);
1925 goto error_parse_buffer;
1926 }
1927 packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
1928 if (le16_to_cpu(packet_status->wLength) != expected_size) {
1929 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
1930 le16_to_cpu(packet_status->wLength));
1931 goto error_parse_buffer;
1932 }
1933 if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
1934 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
1935 packet_status->bPacketType);
1936 goto error_parse_buffer;
1937 }
1938 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
1939 if (xfer == NULL) {
1940 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
1941 wa->dti_isoc_xfer_in_progress);
1942 goto error_parse_buffer;
1943 }
1944 spin_lock_irqsave(&xfer->lock, flags);
1945 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
1946 goto error_bad_seg;
1947 seg = xfer->seg[wa->dti_isoc_xfer_seg];
1948 rpipe = xfer->ep->hcpriv;
1949
1950 /* set urb isoc packet status and length. */
1951 xfer->urb->iso_frame_desc[seg->index].status =
1952 wa_xfer_status_to_errno(
1953 le16_to_cpu(packet_status->PacketStatus[0].PacketStatus));
1954 xfer->urb->iso_frame_desc[seg->index].actual_length =
1955 le16_to_cpu(packet_status->PacketStatus[0].PacketLength);
1956
1957 if (!xfer->is_inbound) {
1958 /* OUT transfer, complete it -- */
1959 seg->status = WA_SEG_DONE;
1960 xfer->segs_done++;
1961 rpipe_ready = rpipe_avail_inc(rpipe);
1962 done = __wa_xfer_is_done(xfer);
1963 }
1964 spin_unlock_irqrestore(&xfer->lock, flags);
1965 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
1966 if (done)
1967 wa_xfer_completion(xfer);
1968 if (rpipe_ready)
1969 wa_xfer_delayed_run(rpipe);
1970 wa_xfer_put(xfer);
1971 return;
1972
1973error_bad_seg:
1974 spin_unlock_irqrestore(&xfer->lock, flags);
1975 wa_xfer_put(xfer);
1976error_parse_buffer:
1977 return;
1978}
1979
1980/*
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001981 * Callback for the IN data phase
1982 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001983 * If successful transition state; otherwise, take a note of the
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01001984 * error, mark this segment done and try completion.
1985 *
1986 * Note we don't access until we are sure that the transfer hasn't
1987 * been cancelled (ECONNRESET, ENOENT), which could mean that
1988 * seg->xfer could be already gone.
1989 */
1990static void wa_buf_in_cb(struct urb *urb)
1991{
1992 struct wa_seg *seg = urb->context;
1993 struct wa_xfer *xfer = seg->xfer;
1994 struct wahc *wa;
1995 struct device *dev;
1996 struct wa_rpipe *rpipe;
1997 unsigned rpipe_ready;
1998 unsigned long flags;
1999 u8 done = 0;
2000
Thomas Pugliese2b81c082013-06-11 10:39:31 -05002001 /* free the sg if it was used. */
2002 kfree(urb->sg);
2003 urb->sg = NULL;
2004
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002005 switch (urb->status) {
2006 case 0:
2007 spin_lock_irqsave(&xfer->lock, flags);
2008 wa = xfer->wa;
2009 dev = &wa->usb_iface->dev;
2010 rpipe = xfer->ep->hcpriv;
David Vrabelbce83692008-12-22 18:22:50 +00002011 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
2012 xfer, seg->index, (size_t)urb->actual_length);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002013 seg->status = WA_SEG_DONE;
2014 seg->result = urb->actual_length;
2015 xfer->segs_done++;
2016 rpipe_ready = rpipe_avail_inc(rpipe);
2017 done = __wa_xfer_is_done(xfer);
2018 spin_unlock_irqrestore(&xfer->lock, flags);
2019 if (done)
2020 wa_xfer_completion(xfer);
2021 if (rpipe_ready)
2022 wa_xfer_delayed_run(rpipe);
2023 break;
2024 case -ECONNRESET: /* URB unlinked; no need to do anything */
2025 case -ENOENT: /* as it was done by the who unlinked us */
2026 break;
2027 default: /* Other errors ... */
2028 spin_lock_irqsave(&xfer->lock, flags);
2029 wa = xfer->wa;
2030 dev = &wa->usb_iface->dev;
2031 rpipe = xfer->ep->hcpriv;
2032 if (printk_ratelimit())
2033 dev_err(dev, "xfer %p#%u: data in error %d\n",
2034 xfer, seg->index, urb->status);
2035 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2036 EDC_ERROR_TIMEFRAME)){
2037 dev_err(dev, "DTO: URB max acceptable errors "
2038 "exceeded, resetting device\n");
2039 wa_reset_all(wa);
2040 }
2041 seg->status = WA_SEG_ERROR;
2042 seg->result = urb->status;
2043 xfer->segs_done++;
2044 rpipe_ready = rpipe_avail_inc(rpipe);
2045 __wa_xfer_abort(xfer);
2046 done = __wa_xfer_is_done(xfer);
2047 spin_unlock_irqrestore(&xfer->lock, flags);
2048 if (done)
2049 wa_xfer_completion(xfer);
2050 if (rpipe_ready)
2051 wa_xfer_delayed_run(rpipe);
2052 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002053}
2054
2055/*
2056 * Handle an incoming transfer result buffer
2057 *
2058 * Given a transfer result buffer, it completes the transfer (possibly
2059 * scheduling and buffer in read) and then resubmits the DTI URB for a
2060 * new transfer result read.
2061 *
2062 *
2063 * The xfer_result DTI URB state machine
2064 *
2065 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2066 *
2067 * We start in OFF mode, the first xfer_result notification [through
2068 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2069 * read.
2070 *
2071 * We receive a buffer -- if it is not a xfer_result, we complain and
2072 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2073 * request accounting. If it is an IN segment, we move to RBI and post
2074 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2075 * repost the DTI-URB and move to RXR state. if there was no IN
2076 * segment, it will repost the DTI-URB.
2077 *
2078 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2079 * errors) in the URBs.
2080 */
Thomas Pugliese0367eef2013-09-26 10:49:41 -05002081static void wa_dti_cb(struct urb *urb)
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002082{
2083 int result;
2084 struct wahc *wa = urb->context;
2085 struct device *dev = &wa->usb_iface->dev;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002086 u32 xfer_id;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002087 u8 usb_status;
2088
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002089 BUG_ON(wa->dti_urb != urb);
2090 switch (wa->dti_urb->status) {
2091 case 0:
Thomas Pugliese7a32d9b2013-10-04 10:40:45 -05002092 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2093 struct wa_xfer_result *xfer_result;
2094 struct wa_xfer *xfer;
2095
2096 /* We have a xfer result buffer; check it */
2097 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2098 urb->actual_length, urb->transfer_buffer);
2099 if (urb->actual_length != sizeof(*xfer_result)) {
2100 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2101 urb->actual_length,
2102 sizeof(*xfer_result));
2103 break;
2104 }
2105 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2106 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2107 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2108 xfer_result->hdr.bLength);
2109 break;
2110 }
2111 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2112 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2113 xfer_result->hdr.bNotifyType);
2114 break;
2115 }
2116 usb_status = xfer_result->bTransferStatus & 0x3f;
2117 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
2118 /* taken care of already */
2119 break;
2120 xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2121 xfer = wa_xfer_get_by_id(wa, xfer_id);
2122 if (xfer == NULL) {
2123 /* FIXME: transaction not found. */
2124 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2125 xfer_id, usb_status);
2126 break;
2127 }
2128 wa_xfer_result_chew(wa, xfer, xfer_result);
2129 wa_xfer_put(xfer);
2130 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2131 wa_process_iso_packet_status(wa, urb);
2132 } else {
2133 dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2134 wa->dti_state);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002135 }
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002136 break;
2137 case -ENOENT: /* (we killed the URB)...so, no broadcast */
2138 case -ESHUTDOWN: /* going away! */
2139 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2140 goto out;
2141 default:
2142 /* Unknown error */
2143 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2144 EDC_ERROR_TIMEFRAME)) {
2145 dev_err(dev, "DTI: URB max acceptable errors "
2146 "exceeded, resetting device\n");
2147 wa_reset_all(wa);
2148 goto out;
2149 }
2150 if (printk_ratelimit())
2151 dev_err(dev, "DTI: URB error %d\n", urb->status);
2152 break;
2153 }
2154 /* Resubmit the DTI URB */
2155 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2156 if (result < 0) {
2157 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
2158 "resetting\n", result);
2159 wa_reset_all(wa);
2160 }
2161out:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002162 return;
2163}
2164
2165/*
2166 * Transfer complete notification
2167 *
2168 * Called from the notif.c code. We get a notification on EP2 saying
2169 * that some endpoint has some transfer result data available. We are
2170 * about to read it.
2171 *
2172 * To speed up things, we always have a URB reading the DTI URB; we
2173 * don't really set it up and start it until the first xfer complete
2174 * notification arrives, which is what we do here.
2175 *
Thomas Pugliese0367eef2013-09-26 10:49:41 -05002176 * Follow up in wa_dti_cb(), as that's where the whole state
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002177 * machine starts.
2178 *
2179 * So here we just initialize the DTI URB for reading transfer result
2180 * notifications and also the buffer-in URB, for reading buffers. Then
2181 * we just submit the DTI URB.
2182 *
2183 * @wa shall be referenced
2184 */
2185void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2186{
2187 int result;
2188 struct device *dev = &wa->usb_iface->dev;
2189 struct wa_notif_xfer *notif_xfer;
2190 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2191
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002192 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2193 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2194
2195 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2196 /* FIXME: hardcoded limitation, adapt */
2197 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2198 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2199 goto error;
2200 }
2201 if (wa->dti_urb != NULL) /* DTI URB already started */
2202 goto out;
2203
2204 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2205 if (wa->dti_urb == NULL) {
2206 dev_err(dev, "Can't allocate DTI URB\n");
2207 goto error_dti_urb_alloc;
2208 }
2209 usb_fill_bulk_urb(
2210 wa->dti_urb, wa->usb_dev,
2211 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
Thomas Pugliese0367eef2013-09-26 10:49:41 -05002212 wa->dti_buf, wa->dti_buf_size,
2213 wa_dti_cb, wa);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002214
2215 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
2216 if (wa->buf_in_urb == NULL) {
2217 dev_err(dev, "Can't allocate BUF-IN URB\n");
2218 goto error_buf_in_urb_alloc;
2219 }
2220 usb_fill_bulk_urb(
2221 wa->buf_in_urb, wa->usb_dev,
2222 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
2223 NULL, 0, wa_buf_in_cb, wa);
2224 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2225 if (result < 0) {
2226 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
2227 "resetting\n", result);
2228 goto error_dti_urb_submit;
2229 }
2230out:
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002231 return;
2232
2233error_dti_urb_submit:
2234 usb_put_urb(wa->buf_in_urb);
Thomas Pugliese67414482013-09-26 14:08:16 -05002235 wa->buf_in_urb = NULL;
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002236error_buf_in_urb_alloc:
2237 usb_put_urb(wa->dti_urb);
2238 wa->dti_urb = NULL;
2239error_dti_urb_alloc:
2240error:
2241 wa_reset_all(wa);
Inaky Perez-Gonzalezdf365422008-09-17 16:34:29 +01002242}