blob: 890a41ccc5cd96175ab2ad62474465e96ed1631d [file] [log] [blame]
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +01001/*
2 * USB Host Controller Driver for IMX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24
25 /*
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
29 *
30 * The data memory is shared between the host and fuction controlers
31 * (but this driver only supports the host controler)
32 *
33 * So setting up a transfer involves:
34 * * Allocating a ETD
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
37 * * Activate the ETD
38 * * Get interrupt when done.
39 *
40 * An ETD is assigned to each active endpoint.
41 *
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
44 *
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46 *
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
50 */
51
52#include <linux/clk.h>
53#include <linux/io.h>
54#include <linux/kernel.h>
55#include <linux/list.h>
56#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090057#include <linux/slab.h>
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +010058#include <linux/usb.h>
Eric Lescouet27729aa2010-04-24 23:21:52 +020059#include <linux/usb/hcd.h>
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +010060
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +010061#include "imx21-hcd.h"
62
63#ifdef DEBUG
64#define DEBUG_LOG_FRAME(imx21, etd, event) \
65 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
66#else
67#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
68#endif
69
70static const char hcd_name[] = "imx21-hcd";
71
72static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
73{
74 return (struct imx21 *)hcd->hcd_priv;
75}
76
77
78/* =========================================== */
79/* Hardware access helpers */
80/* =========================================== */
81
82static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
83{
84 void __iomem *reg = imx21->regs + offset;
85 writel(readl(reg) | mask, reg);
86}
87
88static inline void clear_register_bits(struct imx21 *imx21,
89 u32 offset, u32 mask)
90{
91 void __iomem *reg = imx21->regs + offset;
92 writel(readl(reg) & ~mask, reg);
93}
94
95static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
96{
97 void __iomem *reg = imx21->regs + offset;
98
99 if (readl(reg) & mask)
100 writel(mask, reg);
101}
102
103static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
104{
105 void __iomem *reg = imx21->regs + offset;
106
107 if (!(readl(reg) & mask))
108 writel(mask, reg);
109}
110
111static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
112{
113 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
114}
115
116static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
117{
118 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
119}
120
121static inline int wrap_frame(int counter)
122{
123 return counter & 0xFFFF;
124}
125
126static inline int frame_after(int frame, int after)
127{
128 /* handle wrapping like jiffies time_afer */
129 return (s16)((s16)after - (s16)frame) < 0;
130}
131
132static int imx21_hc_get_frame(struct usb_hcd *hcd)
133{
134 struct imx21 *imx21 = hcd_to_imx21(hcd);
135
136 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
137}
138
139
140#include "imx21-dbg.c"
141
142/* =========================================== */
143/* ETD management */
144/* =========================================== */
145
146static int alloc_etd(struct imx21 *imx21)
147{
148 int i;
149 struct etd_priv *etd = imx21->etd;
150
151 for (i = 0; i < USB_NUM_ETD; i++, etd++) {
152 if (etd->alloc == 0) {
153 memset(etd, 0, sizeof(imx21->etd[0]));
154 etd->alloc = 1;
155 debug_etd_allocated(imx21);
156 return i;
157 }
158 }
159 return -1;
160}
161
162static void disactivate_etd(struct imx21 *imx21, int num)
163{
164 int etd_mask = (1 << num);
165 struct etd_priv *etd = &imx21->etd[num];
166
167 writel(etd_mask, imx21->regs + USBH_ETDENCLR);
168 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
169 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
170 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
171
172 etd->active_count = 0;
173
174 DEBUG_LOG_FRAME(imx21, etd, disactivated);
175}
176
177static void reset_etd(struct imx21 *imx21, int num)
178{
179 struct etd_priv *etd = imx21->etd + num;
180 int i;
181
182 disactivate_etd(imx21, num);
183
184 for (i = 0; i < 4; i++)
185 etd_writel(imx21, num, i, 0);
186 etd->urb = NULL;
187 etd->ep = NULL;
188 etd->td = NULL;;
189}
190
191static void free_etd(struct imx21 *imx21, int num)
192{
193 if (num < 0)
194 return;
195
196 if (num >= USB_NUM_ETD) {
197 dev_err(imx21->dev, "BAD etd=%d!\n", num);
198 return;
199 }
200 if (imx21->etd[num].alloc == 0) {
201 dev_err(imx21->dev, "ETD %d already free!\n", num);
202 return;
203 }
204
205 debug_etd_freed(imx21);
206 reset_etd(imx21, num);
207 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
208}
209
210
211static void setup_etd_dword0(struct imx21 *imx21,
212 int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
213{
214 etd_writel(imx21, etd_num, 0,
215 ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
216 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
217 ((u32) dir << DW0_DIRECT) |
218 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
219 1 : 0) << DW0_SPEED) |
220 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
221 ((u32) maxpacket << DW0_MAXPKTSIZ));
222}
223
224static void activate_etd(struct imx21 *imx21,
225 int etd_num, dma_addr_t dma, u8 dir)
226{
227 u32 etd_mask = 1 << etd_num;
228 struct etd_priv *etd = &imx21->etd[etd_num];
229
230 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
231 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
232 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
233 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
234
235 if (dma) {
236 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
237 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
238 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
239 writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
240 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
241 } else {
242 if (dir != TD_DIR_IN) {
243 /* need to set for ZLP */
244 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
245 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
246 }
247 }
248
249 DEBUG_LOG_FRAME(imx21, etd, activated);
250
251#ifdef DEBUG
252 if (!etd->active_count) {
253 int i;
254 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
255 etd->disactivated_frame = -1;
256 etd->last_int_frame = -1;
257 etd->last_req_frame = -1;
258
259 for (i = 0; i < 4; i++)
260 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
261 }
262#endif
263
264 etd->active_count = 1;
265 writel(etd_mask, imx21->regs + USBH_ETDENSET);
266}
267
268/* =========================================== */
269/* Data memory management */
270/* =========================================== */
271
272static int alloc_dmem(struct imx21 *imx21, unsigned int size,
273 struct usb_host_endpoint *ep)
274{
275 unsigned int offset = 0;
276 struct imx21_dmem_area *area;
277 struct imx21_dmem_area *tmp;
278
279 size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
280
281 if (size > DMEM_SIZE) {
282 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
283 size, DMEM_SIZE);
284 return -EINVAL;
285 }
286
287 list_for_each_entry(tmp, &imx21->dmem_list, list) {
288 if ((size + offset) < offset)
289 goto fail;
290 if ((size + offset) <= tmp->offset)
291 break;
292 offset = tmp->size + tmp->offset;
293 if ((offset + size) > DMEM_SIZE)
294 goto fail;
295 }
296
297 area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
298 if (area == NULL)
299 return -ENOMEM;
300
301 area->ep = ep;
302 area->offset = offset;
303 area->size = size;
304 list_add_tail(&area->list, &tmp->list);
305 debug_dmem_allocated(imx21, size);
306 return offset;
307
308fail:
309 return -ENOMEM;
310}
311
312/* Memory now available for a queued ETD - activate it */
313static void activate_queued_etd(struct imx21 *imx21,
314 struct etd_priv *etd, u32 dmem_offset)
315{
316 struct urb_priv *urb_priv = etd->urb->hcpriv;
317 int etd_num = etd - &imx21->etd[0];
318 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
319 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
320
321 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
322 etd_num);
323 etd_writel(imx21, etd_num, 1,
324 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
325
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200326 etd->dmem_offset = dmem_offset;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100327 urb_priv->active = 1;
328 activate_etd(imx21, etd_num, etd->dma_handle, dir);
329}
330
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200331static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100332{
333 struct imx21_dmem_area *area;
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200334 struct etd_priv *tmp;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100335 int found = 0;
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200336 int offset;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100337
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200338 if (!etd->dmem_size)
339 return;
340 etd->dmem_size = 0;
341
342 offset = etd->dmem_offset;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100343 list_for_each_entry(area, &imx21->dmem_list, list) {
344 if (area->offset == offset) {
345 debug_dmem_freed(imx21, area->size);
346 list_del(&area->list);
347 kfree(area);
348 found = 1;
349 break;
350 }
351 }
352
353 if (!found) {
354 dev_err(imx21->dev,
355 "Trying to free unallocated DMEM %d\n", offset);
356 return;
357 }
358
359 /* Try again to allocate memory for anything we've queued */
360 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
361 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
362 if (offset >= 0) {
363 list_del(&etd->queue);
364 activate_queued_etd(imx21, etd, (u32)offset);
365 }
366 }
367}
368
369static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
370{
371 struct imx21_dmem_area *area, *tmp;
372
373 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
374 if (area->ep == ep) {
375 dev_err(imx21->dev,
376 "Active DMEM %d for disabled ep=%p\n",
377 area->offset, ep);
378 list_del(&area->list);
379 kfree(area);
380 }
381 }
382}
383
384
385/* =========================================== */
386/* End handling */
387/* =========================================== */
388static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
389
390/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
391static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
392{
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100393 int i;
394
395 for (i = 0; i < NUM_ISO_ETDS; i++) {
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200396 int etd_num = ep_priv->etd[i];
397 struct etd_priv *etd;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100398 if (etd_num < 0)
399 continue;
400
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200401 etd = &imx21->etd[etd_num];
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100402 ep_priv->etd[i] = -1;
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200403
404 free_dmem(imx21, etd); /* for isoc */
405
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100406 if (list_empty(&imx21->queue_for_etd)) {
407 free_etd(imx21, etd_num);
408 continue;
409 }
410
411 dev_dbg(imx21->dev,
412 "assigning idle etd %d for queued request\n", etd_num);
413 ep_priv = list_first_entry(&imx21->queue_for_etd,
414 struct ep_priv, queue);
415 list_del(&ep_priv->queue);
416 reset_etd(imx21, etd_num);
417 ep_priv->waiting_etd = 0;
418 ep_priv->etd[i] = etd_num;
419
420 if (list_empty(&ep_priv->ep->urb_list)) {
421 dev_err(imx21->dev, "No urb for queued ep!\n");
422 continue;
423 }
424 schedule_nonisoc_etd(imx21, list_first_entry(
425 &ep_priv->ep->urb_list, struct urb, urb_list));
426 }
427}
428
429static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
430__releases(imx21->lock)
431__acquires(imx21->lock)
432{
433 struct imx21 *imx21 = hcd_to_imx21(hcd);
434 struct ep_priv *ep_priv = urb->ep->hcpriv;
435 struct urb_priv *urb_priv = urb->hcpriv;
436
437 debug_urb_completed(imx21, urb, status);
438 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
439
440 kfree(urb_priv->isoc_td);
441 kfree(urb->hcpriv);
442 urb->hcpriv = NULL;
443 usb_hcd_unlink_urb_from_ep(hcd, urb);
444 spin_unlock(&imx21->lock);
445 usb_hcd_giveback_urb(hcd, urb, status);
446 spin_lock(&imx21->lock);
447 if (list_empty(&ep_priv->ep->urb_list))
448 ep_idle(imx21, ep_priv);
449}
450
451/* =========================================== */
452/* ISOC Handling ... */
453/* =========================================== */
454
455static void schedule_isoc_etds(struct usb_hcd *hcd,
456 struct usb_host_endpoint *ep)
457{
458 struct imx21 *imx21 = hcd_to_imx21(hcd);
459 struct ep_priv *ep_priv = ep->hcpriv;
460 struct etd_priv *etd;
461 struct urb_priv *urb_priv;
462 struct td *td;
463 int etd_num;
464 int i;
465 int cur_frame;
466 u8 dir;
467
468 for (i = 0; i < NUM_ISO_ETDS; i++) {
469too_late:
470 if (list_empty(&ep_priv->td_list))
471 break;
472
473 etd_num = ep_priv->etd[i];
474 if (etd_num < 0)
475 break;
476
477 etd = &imx21->etd[etd_num];
478 if (etd->urb)
479 continue;
480
481 td = list_entry(ep_priv->td_list.next, struct td, list);
482 list_del(&td->list);
483 urb_priv = td->urb->hcpriv;
484
485 cur_frame = imx21_hc_get_frame(hcd);
486 if (frame_after(cur_frame, td->frame)) {
487 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
488 cur_frame, td->frame);
489 urb_priv->isoc_status = -EXDEV;
490 td->urb->iso_frame_desc[
491 td->isoc_index].actual_length = 0;
492 td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
493 if (--urb_priv->isoc_remaining == 0)
494 urb_done(hcd, td->urb, urb_priv->isoc_status);
495 goto too_late;
496 }
497
498 urb_priv->active = 1;
499 etd->td = td;
500 etd->ep = td->ep;
501 etd->urb = td->urb;
502 etd->len = td->len;
503
504 debug_isoc_submitted(imx21, cur_frame, td);
505
506 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
507 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
508 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
509 etd_writel(imx21, etd_num, 2,
510 (TD_NOTACCESSED << DW2_COMPCODE) |
511 ((td->frame & 0xFFFF) << DW2_STARTFRM));
512 etd_writel(imx21, etd_num, 3,
513 (TD_NOTACCESSED << DW3_COMPCODE0) |
514 (td->len << DW3_PKTLEN0));
515
516 activate_etd(imx21, etd_num, td->data, dir);
517 }
518}
519
520static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
521{
522 struct imx21 *imx21 = hcd_to_imx21(hcd);
523 int etd_mask = 1 << etd_num;
524 struct urb_priv *urb_priv = urb->hcpriv;
525 struct etd_priv *etd = imx21->etd + etd_num;
526 struct td *td = etd->td;
527 struct usb_host_endpoint *ep = etd->ep;
528 int isoc_index = td->isoc_index;
529 unsigned int pipe = urb->pipe;
530 int dir_in = usb_pipein(pipe);
531 int cc;
532 int bytes_xfrd;
533
534 disactivate_etd(imx21, etd_num);
535
536 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
537 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
538
539 /* Input doesn't always fill the buffer, don't generate an error
540 * when this happens.
541 */
542 if (dir_in && (cc == TD_DATAUNDERRUN))
543 cc = TD_CC_NOERROR;
544
545 if (cc == TD_NOTACCESSED)
546 bytes_xfrd = 0;
547
548 debug_isoc_completed(imx21,
549 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
550 if (cc) {
551 urb_priv->isoc_status = -EXDEV;
552 dev_dbg(imx21->dev,
553 "bad iso cc=0x%X frame=%d sched frame=%d "
554 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
555 cc, imx21_hc_get_frame(hcd), td->frame,
556 bytes_xfrd, td->len, urb, etd_num, isoc_index);
557 }
558
559 if (dir_in)
560 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
561
562 urb->actual_length += bytes_xfrd;
563 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
564 urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
565
566 etd->td = NULL;
567 etd->urb = NULL;
568 etd->ep = NULL;
569
570 if (--urb_priv->isoc_remaining == 0)
571 urb_done(hcd, urb, urb_priv->isoc_status);
572
573 schedule_isoc_etds(hcd, ep);
574}
575
576static struct ep_priv *alloc_isoc_ep(
577 struct imx21 *imx21, struct usb_host_endpoint *ep)
578{
579 struct ep_priv *ep_priv;
580 int i;
581
582 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200583 if (!ep_priv)
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100584 return NULL;
585
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200586 for (i = 0; i < NUM_ISO_ETDS; i++)
587 ep_priv->etd[i] = -1;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100588
589 INIT_LIST_HEAD(&ep_priv->td_list);
590 ep_priv->ep = ep;
591 ep->hcpriv = ep_priv;
592 return ep_priv;
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200593}
594
595static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
596{
597 int i, j;
598 int etd_num;
599
600 /* Allocate the ETDs if required */
601 for (i = 0; i < NUM_ISO_ETDS; i++) {
602 if (ep_priv->etd[i] < 0) {
603 etd_num = alloc_etd(imx21);
604 if (etd_num < 0)
605 goto alloc_etd_failed;
606
607 ep_priv->etd[i] = etd_num;
608 imx21->etd[etd_num].ep = ep_priv->ep;
609 }
610 }
611 return 0;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100612
613alloc_etd_failed:
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200614 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
615 for (j = 0; j < i; j++) {
616 free_etd(imx21, ep_priv->etd[j]);
617 ep_priv->etd[j] = -1;
618 }
619 return -ENOMEM;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100620}
621
622static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
623 struct usb_host_endpoint *ep,
624 struct urb *urb, gfp_t mem_flags)
625{
626 struct imx21 *imx21 = hcd_to_imx21(hcd);
627 struct urb_priv *urb_priv;
628 unsigned long flags;
629 struct ep_priv *ep_priv;
630 struct td *td = NULL;
631 int i;
632 int ret;
633 int cur_frame;
634 u16 maxpacket;
635
636 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
637 if (urb_priv == NULL)
638 return -ENOMEM;
639
640 urb_priv->isoc_td = kzalloc(
641 sizeof(struct td) * urb->number_of_packets, mem_flags);
642 if (urb_priv->isoc_td == NULL) {
643 ret = -ENOMEM;
644 goto alloc_td_failed;
645 }
646
647 spin_lock_irqsave(&imx21->lock, flags);
648
649 if (ep->hcpriv == NULL) {
650 ep_priv = alloc_isoc_ep(imx21, ep);
651 if (ep_priv == NULL) {
652 ret = -ENOMEM;
653 goto alloc_ep_failed;
654 }
655 } else {
656 ep_priv = ep->hcpriv;
657 }
658
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200659 ret = alloc_isoc_etds(imx21, ep_priv);
660 if (ret)
661 goto alloc_etd_failed;
662
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100663 ret = usb_hcd_link_urb_to_ep(hcd, urb);
664 if (ret)
665 goto link_failed;
666
667 urb->status = -EINPROGRESS;
668 urb->actual_length = 0;
669 urb->error_count = 0;
670 urb->hcpriv = urb_priv;
671 urb_priv->ep = ep;
672
673 /* allocate data memory for largest packets if not already done */
674 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
675 for (i = 0; i < NUM_ISO_ETDS; i++) {
676 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
677
678 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
679 /* not sure if this can really occur.... */
680 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
681 etd->dmem_size, maxpacket);
682 ret = -EMSGSIZE;
683 goto alloc_dmem_failed;
684 }
685
686 if (etd->dmem_size == 0) {
687 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
688 if (etd->dmem_offset < 0) {
689 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
690 ret = -EAGAIN;
691 goto alloc_dmem_failed;
692 }
693 etd->dmem_size = maxpacket;
694 }
695 }
696
697 /* calculate frame */
698 cur_frame = imx21_hc_get_frame(hcd);
699 if (urb->transfer_flags & URB_ISO_ASAP) {
700 if (list_empty(&ep_priv->td_list))
701 urb->start_frame = cur_frame + 5;
702 else
703 urb->start_frame = list_entry(
704 ep_priv->td_list.prev,
705 struct td, list)->frame + urb->interval;
706 }
707 urb->start_frame = wrap_frame(urb->start_frame);
708 if (frame_after(cur_frame, urb->start_frame)) {
709 dev_dbg(imx21->dev,
710 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
711 urb->start_frame, cur_frame,
712 (urb->transfer_flags & URB_ISO_ASAP) != 0);
713 urb->start_frame = wrap_frame(cur_frame + 1);
714 }
715
716 /* set up transfers */
717 td = urb_priv->isoc_td;
718 for (i = 0; i < urb->number_of_packets; i++, td++) {
719 td->ep = ep;
720 td->urb = urb;
721 td->len = urb->iso_frame_desc[i].length;
722 td->isoc_index = i;
723 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
724 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
725 list_add_tail(&td->list, &ep_priv->td_list);
726 }
727
728 urb_priv->isoc_remaining = urb->number_of_packets;
729 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
730 urb->number_of_packets, urb->start_frame, td->frame);
731
732 debug_urb_submitted(imx21, urb);
733 schedule_isoc_etds(hcd, ep);
734
735 spin_unlock_irqrestore(&imx21->lock, flags);
736 return 0;
737
738alloc_dmem_failed:
739 usb_hcd_unlink_urb_from_ep(hcd, urb);
740
741link_failed:
Martin Fuzzey7a7e7892010-10-01 00:21:48 +0200742alloc_etd_failed:
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100743alloc_ep_failed:
744 spin_unlock_irqrestore(&imx21->lock, flags);
745 kfree(urb_priv->isoc_td);
746
747alloc_td_failed:
748 kfree(urb_priv);
749 return ret;
750}
751
752static void dequeue_isoc_urb(struct imx21 *imx21,
753 struct urb *urb, struct ep_priv *ep_priv)
754{
755 struct urb_priv *urb_priv = urb->hcpriv;
756 struct td *td, *tmp;
757 int i;
758
759 if (urb_priv->active) {
760 for (i = 0; i < NUM_ISO_ETDS; i++) {
761 int etd_num = ep_priv->etd[i];
762 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
763 struct etd_priv *etd = imx21->etd + etd_num;
764
765 reset_etd(imx21, etd_num);
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200766 free_dmem(imx21, etd);
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100767 }
768 }
769 }
770
771 list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
772 if (td->urb == urb) {
773 dev_vdbg(imx21->dev, "removing td %p\n", td);
774 list_del(&td->list);
775 }
776 }
777}
778
779/* =========================================== */
780/* NON ISOC Handling ... */
781/* =========================================== */
782
783static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
784{
785 unsigned int pipe = urb->pipe;
786 struct urb_priv *urb_priv = urb->hcpriv;
787 struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
788 int state = urb_priv->state;
789 int etd_num = ep_priv->etd[0];
790 struct etd_priv *etd;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100791 u32 count;
792 u16 etd_buf_size;
793 u16 maxpacket;
794 u8 dir;
795 u8 bufround;
796 u8 datatoggle;
797 u8 interval = 0;
798 u8 relpolpos = 0;
799
800 if (etd_num < 0) {
801 dev_err(imx21->dev, "No valid ETD\n");
802 return;
803 }
804 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
805 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
806
807 etd = &imx21->etd[etd_num];
808 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
809 if (!maxpacket)
810 maxpacket = 8;
811
812 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
813 if (state == US_CTRL_SETUP) {
814 dir = TD_DIR_SETUP;
815 etd->dma_handle = urb->setup_dma;
816 bufround = 0;
817 count = 8;
818 datatoggle = TD_TOGGLE_DATA0;
819 } else { /* US_CTRL_ACK */
820 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
821 etd->dma_handle = urb->transfer_dma;
822 bufround = 0;
823 count = 0;
824 datatoggle = TD_TOGGLE_DATA1;
825 }
826 } else {
827 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
828 bufround = (dir == TD_DIR_IN) ? 1 : 0;
829 etd->dma_handle = urb->transfer_dma;
830 if (usb_pipebulk(pipe) && (state == US_BULK0))
831 count = 0;
832 else
833 count = urb->transfer_buffer_length;
834
835 if (usb_pipecontrol(pipe)) {
836 datatoggle = TD_TOGGLE_DATA1;
837 } else {
838 if (usb_gettoggle(
839 urb->dev,
840 usb_pipeendpoint(urb->pipe),
841 usb_pipeout(urb->pipe)))
842 datatoggle = TD_TOGGLE_DATA1;
843 else
844 datatoggle = TD_TOGGLE_DATA0;
845 }
846 }
847
848 etd->urb = urb;
849 etd->ep = urb_priv->ep;
850 etd->len = count;
851
852 if (usb_pipeint(pipe)) {
853 interval = urb->interval;
854 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
855 }
856
857 /* Write ETD to device memory */
858 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
859
860 etd_writel(imx21, etd_num, 2,
861 (u32) interval << DW2_POLINTERV |
862 ((u32) relpolpos << DW2_RELPOLPOS) |
863 ((u32) dir << DW2_DIRPID) |
864 ((u32) bufround << DW2_BUFROUND) |
865 ((u32) datatoggle << DW2_DATATOG) |
866 ((u32) TD_NOTACCESSED << DW2_COMPCODE));
867
868 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
869 is smaller. Make sure we don't overrun the buffer!
870 */
871 if (count && count < maxpacket)
872 etd_buf_size = count;
873 else
874 etd_buf_size = maxpacket;
875
876 etd_writel(imx21, etd_num, 3,
877 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
878
879 if (!count)
880 etd->dma_handle = 0;
881
882 /* allocate x and y buffer space at once */
883 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200884 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
885 if (etd->dmem_offset < 0) {
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100886 /* Setup everything we can in HW and update when we get DMEM */
887 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
888
889 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
890 debug_urb_queued_for_dmem(imx21, urb);
891 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
892 return;
893 }
894
895 etd_writel(imx21, etd_num, 1,
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200896 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
897 (u32) etd->dmem_offset);
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100898
899 urb_priv->active = 1;
900
901 /* enable the ETD to kick off transfer */
902 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
903 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
904 activate_etd(imx21, etd_num, etd->dma_handle, dir);
905
906}
907
908static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
909{
910 struct imx21 *imx21 = hcd_to_imx21(hcd);
911 struct etd_priv *etd = &imx21->etd[etd_num];
912 u32 etd_mask = 1 << etd_num;
913 struct urb_priv *urb_priv = urb->hcpriv;
914 int dir;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100915 int cc;
916 u32 bytes_xfrd;
917 int etd_done;
918
919 disactivate_etd(imx21, etd_num);
920
921 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100922 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
923 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
924
925 /* save toggle carry */
926 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
927 usb_pipeout(urb->pipe),
928 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
929
930 if (dir == TD_DIR_IN) {
931 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
932 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
933 }
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +0200934 free_dmem(imx21, etd);
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +0100935
936 urb->error_count = 0;
937 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
938 && (cc == TD_DATAUNDERRUN))
939 cc = TD_CC_NOERROR;
940
941 if (cc != 0)
942 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
943
944 etd_done = (cc_to_error[cc] != 0); /* stop if error */
945
946 switch (usb_pipetype(urb->pipe)) {
947 case PIPE_CONTROL:
948 switch (urb_priv->state) {
949 case US_CTRL_SETUP:
950 if (urb->transfer_buffer_length > 0)
951 urb_priv->state = US_CTRL_DATA;
952 else
953 urb_priv->state = US_CTRL_ACK;
954 break;
955 case US_CTRL_DATA:
956 urb->actual_length += bytes_xfrd;
957 urb_priv->state = US_CTRL_ACK;
958 break;
959 case US_CTRL_ACK:
960 etd_done = 1;
961 break;
962 default:
963 dev_err(imx21->dev,
964 "Invalid pipe state %d\n", urb_priv->state);
965 etd_done = 1;
966 break;
967 }
968 break;
969
970 case PIPE_BULK:
971 urb->actual_length += bytes_xfrd;
972 if ((urb_priv->state == US_BULK)
973 && (urb->transfer_flags & URB_ZERO_PACKET)
974 && urb->transfer_buffer_length > 0
975 && ((urb->transfer_buffer_length %
976 usb_maxpacket(urb->dev, urb->pipe,
977 usb_pipeout(urb->pipe))) == 0)) {
978 /* need a 0-packet */
979 urb_priv->state = US_BULK0;
980 } else {
981 etd_done = 1;
982 }
983 break;
984
985 case PIPE_INTERRUPT:
986 urb->actual_length += bytes_xfrd;
987 etd_done = 1;
988 break;
989 }
990
991 if (!etd_done) {
992 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
993 schedule_nonisoc_etd(imx21, urb);
994 } else {
995 struct usb_host_endpoint *ep = urb->ep;
996
997 urb_done(hcd, urb, cc_to_error[cc]);
998 etd->urb = NULL;
999
1000 if (!list_empty(&ep->urb_list)) {
1001 urb = list_first_entry(&ep->urb_list,
1002 struct urb, urb_list);
1003 dev_vdbg(imx21->dev, "next URB %p\n", urb);
1004 schedule_nonisoc_etd(imx21, urb);
1005 }
1006 }
1007}
1008
1009static struct ep_priv *alloc_ep(void)
1010{
1011 int i;
1012 struct ep_priv *ep_priv;
1013
1014 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
1015 if (!ep_priv)
1016 return NULL;
1017
1018 for (i = 0; i < NUM_ISO_ETDS; ++i)
1019 ep_priv->etd[i] = -1;
1020
1021 return ep_priv;
1022}
1023
1024static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1025 struct urb *urb, gfp_t mem_flags)
1026{
1027 struct imx21 *imx21 = hcd_to_imx21(hcd);
1028 struct usb_host_endpoint *ep = urb->ep;
1029 struct urb_priv *urb_priv;
1030 struct ep_priv *ep_priv;
1031 struct etd_priv *etd;
1032 int ret;
1033 unsigned long flags;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +01001034
1035 dev_vdbg(imx21->dev,
1036 "enqueue urb=%p ep=%p len=%d "
1037 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1038 urb, ep,
1039 urb->transfer_buffer_length,
1040 urb->transfer_buffer, urb->transfer_dma,
1041 urb->setup_packet, urb->setup_dma);
1042
1043 if (usb_pipeisoc(urb->pipe))
1044 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1045
1046 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1047 if (!urb_priv)
1048 return -ENOMEM;
1049
1050 spin_lock_irqsave(&imx21->lock, flags);
1051
1052 ep_priv = ep->hcpriv;
1053 if (ep_priv == NULL) {
1054 ep_priv = alloc_ep();
1055 if (!ep_priv) {
1056 ret = -ENOMEM;
1057 goto failed_alloc_ep;
1058 }
1059 ep->hcpriv = ep_priv;
1060 ep_priv->ep = ep;
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +01001061 }
1062
1063 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1064 if (ret)
1065 goto failed_link;
1066
1067 urb->status = -EINPROGRESS;
1068 urb->actual_length = 0;
1069 urb->error_count = 0;
1070 urb->hcpriv = urb_priv;
1071 urb_priv->ep = ep;
1072
1073 switch (usb_pipetype(urb->pipe)) {
1074 case PIPE_CONTROL:
1075 urb_priv->state = US_CTRL_SETUP;
1076 break;
1077 case PIPE_BULK:
1078 urb_priv->state = US_BULK;
1079 break;
1080 }
1081
1082 debug_urb_submitted(imx21, urb);
1083 if (ep_priv->etd[0] < 0) {
1084 if (ep_priv->waiting_etd) {
1085 dev_dbg(imx21->dev,
1086 "no ETD available already queued %p\n",
1087 ep_priv);
1088 debug_urb_queued_for_etd(imx21, urb);
1089 goto out;
1090 }
1091 ep_priv->etd[0] = alloc_etd(imx21);
1092 if (ep_priv->etd[0] < 0) {
1093 dev_dbg(imx21->dev,
1094 "no ETD available queueing %p\n", ep_priv);
1095 debug_urb_queued_for_etd(imx21, urb);
1096 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1097 ep_priv->waiting_etd = 1;
1098 goto out;
1099 }
1100 }
1101
1102 /* Schedule if no URB already active for this endpoint */
1103 etd = &imx21->etd[ep_priv->etd[0]];
1104 if (etd->urb == NULL) {
1105 DEBUG_LOG_FRAME(imx21, etd, last_req);
1106 schedule_nonisoc_etd(imx21, urb);
1107 }
1108
1109out:
1110 spin_unlock_irqrestore(&imx21->lock, flags);
1111 return 0;
1112
1113failed_link:
1114failed_alloc_ep:
1115 spin_unlock_irqrestore(&imx21->lock, flags);
1116 kfree(urb_priv);
1117 return ret;
1118}
1119
1120static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1121 int status)
1122{
1123 struct imx21 *imx21 = hcd_to_imx21(hcd);
1124 unsigned long flags;
1125 struct usb_host_endpoint *ep;
1126 struct ep_priv *ep_priv;
1127 struct urb_priv *urb_priv = urb->hcpriv;
1128 int ret = -EINVAL;
1129
1130 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1131 urb, usb_pipeisoc(urb->pipe), status);
1132
1133 spin_lock_irqsave(&imx21->lock, flags);
1134
1135 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1136 if (ret)
1137 goto fail;
1138 ep = urb_priv->ep;
1139 ep_priv = ep->hcpriv;
1140
1141 debug_urb_unlinked(imx21, urb);
1142
1143 if (usb_pipeisoc(urb->pipe)) {
1144 dequeue_isoc_urb(imx21, urb, ep_priv);
1145 schedule_isoc_etds(hcd, ep);
1146 } else if (urb_priv->active) {
1147 int etd_num = ep_priv->etd[0];
1148 if (etd_num != -1) {
1149 disactivate_etd(imx21, etd_num);
Martin Fuzzeyb2a068d2010-10-01 00:21:43 +02001150 free_dmem(imx21, &imx21->etd[etd_num]);
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +01001151 imx21->etd[etd_num].urb = NULL;
1152 }
1153 }
1154
1155 urb_done(hcd, urb, status);
1156
1157 spin_unlock_irqrestore(&imx21->lock, flags);
1158 return 0;
1159
1160fail:
1161 spin_unlock_irqrestore(&imx21->lock, flags);
1162 return ret;
1163}
1164
1165/* =========================================== */
1166/* Interrupt dispatch */
1167/* =========================================== */
1168
1169static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1170{
1171 int etd_num;
1172 int enable_sof_int = 0;
1173 unsigned long flags;
1174
1175 spin_lock_irqsave(&imx21->lock, flags);
1176
1177 for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1178 u32 etd_mask = 1 << etd_num;
1179 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1180 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1181 struct etd_priv *etd = &imx21->etd[etd_num];
1182
1183
1184 if (done) {
1185 DEBUG_LOG_FRAME(imx21, etd, last_int);
1186 } else {
1187/*
1188 * Kludge warning!
1189 *
1190 * When multiple transfers are using the bus we sometimes get into a state
1191 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1192 * the ETD has self disabled but the ETDDONESTAT flag is not set
1193 * (and hence no interrupt occurs).
1194 * This causes the transfer in question to hang.
1195 * The kludge below checks for this condition at each SOF and processes any
1196 * blocked ETDs (after an arbitary 10 frame wait)
1197 *
1198 * With a single active transfer the usbtest test suite will run for days
1199 * without the kludge.
1200 * With other bus activity (eg mass storage) even just test1 will hang without
1201 * the kludge.
1202 */
1203 u32 dword0;
1204 int cc;
1205
1206 if (etd->active_count && !enabled) /* suspicious... */
1207 enable_sof_int = 1;
1208
1209 if (!sof || enabled || !etd->active_count)
1210 continue;
1211
1212 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1213 if (cc == TD_NOTACCESSED)
1214 continue;
1215
1216 if (++etd->active_count < 10)
1217 continue;
1218
1219 dword0 = etd_readl(imx21, etd_num, 0);
1220 dev_dbg(imx21->dev,
1221 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1222 etd_num, dword0 & 0x7F,
1223 (dword0 >> DW0_ENDPNT) & 0x0F,
1224 cc);
1225
1226#ifdef DEBUG
1227 dev_dbg(imx21->dev,
1228 "frame: act=%d disact=%d"
1229 " int=%d req=%d cur=%d\n",
1230 etd->activated_frame,
1231 etd->disactivated_frame,
1232 etd->last_int_frame,
1233 etd->last_req_frame,
1234 readl(imx21->regs + USBH_FRMNUB));
1235 imx21->debug_unblocks++;
1236#endif
1237 etd->active_count = 0;
1238/* End of kludge */
1239 }
1240
1241 if (etd->ep == NULL || etd->urb == NULL) {
1242 dev_dbg(imx21->dev,
1243 "Interrupt for unexpected etd %d"
1244 " ep=%p urb=%p\n",
1245 etd_num, etd->ep, etd->urb);
1246 disactivate_etd(imx21, etd_num);
1247 continue;
1248 }
1249
1250 if (usb_pipeisoc(etd->urb->pipe))
1251 isoc_etd_done(hcd, etd->urb, etd_num);
1252 else
1253 nonisoc_etd_done(hcd, etd->urb, etd_num);
1254 }
1255
1256 /* only enable SOF interrupt if it may be needed for the kludge */
1257 if (enable_sof_int)
1258 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1259 else
1260 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1261
1262
1263 spin_unlock_irqrestore(&imx21->lock, flags);
1264}
1265
1266static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1267{
1268 struct imx21 *imx21 = hcd_to_imx21(hcd);
1269 u32 ints = readl(imx21->regs + USBH_SYSISR);
1270
1271 if (ints & USBH_SYSIEN_HERRINT)
1272 dev_dbg(imx21->dev, "Scheduling error\n");
1273
1274 if (ints & USBH_SYSIEN_SORINT)
1275 dev_dbg(imx21->dev, "Scheduling overrun\n");
1276
1277 if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1278 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1279
1280 writel(ints, imx21->regs + USBH_SYSISR);
1281 return IRQ_HANDLED;
1282}
1283
1284static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1285 struct usb_host_endpoint *ep)
1286{
1287 struct imx21 *imx21 = hcd_to_imx21(hcd);
1288 unsigned long flags;
1289 struct ep_priv *ep_priv;
1290 int i;
1291
1292 if (ep == NULL)
1293 return;
1294
1295 spin_lock_irqsave(&imx21->lock, flags);
1296 ep_priv = ep->hcpriv;
1297 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1298
1299 if (!list_empty(&ep->urb_list))
1300 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1301
1302 if (ep_priv != NULL) {
1303 for (i = 0; i < NUM_ISO_ETDS; i++) {
1304 if (ep_priv->etd[i] > -1)
1305 dev_dbg(imx21->dev, "free etd %d for disable\n",
1306 ep_priv->etd[i]);
1307
1308 free_etd(imx21, ep_priv->etd[i]);
1309 }
1310 kfree(ep_priv);
1311 ep->hcpriv = NULL;
1312 }
1313
1314 for (i = 0; i < USB_NUM_ETD; i++) {
1315 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1316 dev_err(imx21->dev,
1317 "Active etd %d for disabled ep=%p!\n", i, ep);
1318 free_etd(imx21, i);
1319 }
1320 }
1321 free_epdmem(imx21, ep);
1322 spin_unlock_irqrestore(&imx21->lock, flags);
1323}
1324
1325/* =========================================== */
1326/* Hub handling */
1327/* =========================================== */
1328
1329static int get_hub_descriptor(struct usb_hcd *hcd,
1330 struct usb_hub_descriptor *desc)
1331{
1332 struct imx21 *imx21 = hcd_to_imx21(hcd);
1333 desc->bDescriptorType = 0x29; /* HUB descriptor */
1334 desc->bHubContrCurrent = 0;
1335
1336 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1337 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1338 desc->bDescLength = 9;
1339 desc->bPwrOn2PwrGood = 0;
1340 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1341 0x0002 | /* No power switching */
1342 0x0010 | /* No over current protection */
1343 0);
1344
1345 desc->bitmap[0] = 1 << 1;
1346 desc->bitmap[1] = ~0;
1347 return 0;
1348}
1349
1350static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1351{
1352 struct imx21 *imx21 = hcd_to_imx21(hcd);
1353 int ports;
1354 int changed = 0;
1355 int i;
1356 unsigned long flags;
1357
1358 spin_lock_irqsave(&imx21->lock, flags);
1359 ports = readl(imx21->regs + USBH_ROOTHUBA)
1360 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1361 if (ports > 7) {
1362 ports = 7;
1363 dev_err(imx21->dev, "ports %d > 7\n", ports);
1364 }
1365 for (i = 0; i < ports; i++) {
1366 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1367 (USBH_PORTSTAT_CONNECTSC |
1368 USBH_PORTSTAT_PRTENBLSC |
1369 USBH_PORTSTAT_PRTSTATSC |
1370 USBH_PORTSTAT_OVRCURIC |
1371 USBH_PORTSTAT_PRTRSTSC)) {
1372
1373 changed = 1;
1374 buf[0] |= 1 << (i + 1);
1375 }
1376 }
1377 spin_unlock_irqrestore(&imx21->lock, flags);
1378
1379 if (changed)
1380 dev_info(imx21->dev, "Hub status changed\n");
1381 return changed;
1382}
1383
1384static int imx21_hc_hub_control(struct usb_hcd *hcd,
1385 u16 typeReq,
1386 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1387{
1388 struct imx21 *imx21 = hcd_to_imx21(hcd);
1389 int rc = 0;
1390 u32 status_write = 0;
1391
1392 switch (typeReq) {
1393 case ClearHubFeature:
1394 dev_dbg(imx21->dev, "ClearHubFeature\n");
1395 switch (wValue) {
1396 case C_HUB_OVER_CURRENT:
1397 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1398 break;
1399 case C_HUB_LOCAL_POWER:
1400 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1401 break;
1402 default:
1403 dev_dbg(imx21->dev, " unknown\n");
1404 rc = -EINVAL;
1405 break;
1406 }
1407 break;
1408
1409 case ClearPortFeature:
1410 dev_dbg(imx21->dev, "ClearPortFeature\n");
1411 switch (wValue) {
1412 case USB_PORT_FEAT_ENABLE:
1413 dev_dbg(imx21->dev, " ENABLE\n");
1414 status_write = USBH_PORTSTAT_CURCONST;
1415 break;
1416 case USB_PORT_FEAT_SUSPEND:
1417 dev_dbg(imx21->dev, " SUSPEND\n");
1418 status_write = USBH_PORTSTAT_PRTOVRCURI;
1419 break;
1420 case USB_PORT_FEAT_POWER:
1421 dev_dbg(imx21->dev, " POWER\n");
1422 status_write = USBH_PORTSTAT_LSDEVCON;
1423 break;
1424 case USB_PORT_FEAT_C_ENABLE:
1425 dev_dbg(imx21->dev, " C_ENABLE\n");
1426 status_write = USBH_PORTSTAT_PRTENBLSC;
1427 break;
1428 case USB_PORT_FEAT_C_SUSPEND:
1429 dev_dbg(imx21->dev, " C_SUSPEND\n");
1430 status_write = USBH_PORTSTAT_PRTSTATSC;
1431 break;
1432 case USB_PORT_FEAT_C_CONNECTION:
1433 dev_dbg(imx21->dev, " C_CONNECTION\n");
1434 status_write = USBH_PORTSTAT_CONNECTSC;
1435 break;
1436 case USB_PORT_FEAT_C_OVER_CURRENT:
1437 dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
1438 status_write = USBH_PORTSTAT_OVRCURIC;
1439 break;
1440 case USB_PORT_FEAT_C_RESET:
1441 dev_dbg(imx21->dev, " C_RESET\n");
1442 status_write = USBH_PORTSTAT_PRTRSTSC;
1443 break;
1444 default:
1445 dev_dbg(imx21->dev, " unknown\n");
1446 rc = -EINVAL;
1447 break;
1448 }
1449
1450 break;
1451
1452 case GetHubDescriptor:
1453 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1454 rc = get_hub_descriptor(hcd, (void *)buf);
1455 break;
1456
1457 case GetHubStatus:
1458 dev_dbg(imx21->dev, " GetHubStatus\n");
1459 *(__le32 *) buf = 0;
1460 break;
1461
1462 case GetPortStatus:
1463 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1464 wIndex, USBH_PORTSTAT(wIndex - 1));
1465 *(__le32 *) buf = readl(imx21->regs +
1466 USBH_PORTSTAT(wIndex - 1));
1467 break;
1468
1469 case SetHubFeature:
1470 dev_dbg(imx21->dev, "SetHubFeature\n");
1471 switch (wValue) {
1472 case C_HUB_OVER_CURRENT:
1473 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1474 break;
1475
1476 case C_HUB_LOCAL_POWER:
1477 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1478 break;
1479 default:
1480 dev_dbg(imx21->dev, " unknown\n");
1481 rc = -EINVAL;
1482 break;
1483 }
1484
1485 break;
1486
1487 case SetPortFeature:
1488 dev_dbg(imx21->dev, "SetPortFeature\n");
1489 switch (wValue) {
1490 case USB_PORT_FEAT_SUSPEND:
1491 dev_dbg(imx21->dev, " SUSPEND\n");
1492 status_write = USBH_PORTSTAT_PRTSUSPST;
1493 break;
1494 case USB_PORT_FEAT_POWER:
1495 dev_dbg(imx21->dev, " POWER\n");
1496 status_write = USBH_PORTSTAT_PRTPWRST;
1497 break;
1498 case USB_PORT_FEAT_RESET:
1499 dev_dbg(imx21->dev, " RESET\n");
1500 status_write = USBH_PORTSTAT_PRTRSTST;
1501 break;
1502 default:
1503 dev_dbg(imx21->dev, " unknown\n");
1504 rc = -EINVAL;
1505 break;
1506 }
1507 break;
1508
1509 default:
1510 dev_dbg(imx21->dev, " unknown\n");
1511 rc = -EINVAL;
1512 break;
1513 }
1514
1515 if (status_write)
1516 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1517 return rc;
1518}
1519
1520/* =========================================== */
1521/* Host controller management */
1522/* =========================================== */
1523
1524static int imx21_hc_reset(struct usb_hcd *hcd)
1525{
1526 struct imx21 *imx21 = hcd_to_imx21(hcd);
1527 unsigned long timeout;
1528 unsigned long flags;
1529
1530 spin_lock_irqsave(&imx21->lock, flags);
1531
1532 /* Reset the Host controler modules */
1533 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1534 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1535 imx21->regs + USBOTG_RST_CTRL);
1536
1537 /* Wait for reset to finish */
1538 timeout = jiffies + HZ;
1539 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1540 if (time_after(jiffies, timeout)) {
1541 spin_unlock_irqrestore(&imx21->lock, flags);
1542 dev_err(imx21->dev, "timeout waiting for reset\n");
1543 return -ETIMEDOUT;
1544 }
1545 spin_unlock_irq(&imx21->lock);
Kulikov Vasiliy9a4b7c32010-07-26 12:26:22 +04001546 schedule_timeout_uninterruptible(1);
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +01001547 spin_lock_irq(&imx21->lock);
1548 }
1549 spin_unlock_irqrestore(&imx21->lock, flags);
1550 return 0;
1551}
1552
1553static int __devinit imx21_hc_start(struct usb_hcd *hcd)
1554{
1555 struct imx21 *imx21 = hcd_to_imx21(hcd);
1556 unsigned long flags;
1557 int i, j;
1558 u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1559 u32 usb_control = 0;
1560
1561 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1562 USBOTG_HWMODE_HOSTXCVR_MASK);
1563 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1564 USBOTG_HWMODE_OTGXCVR_MASK);
1565
1566 if (imx21->pdata->host1_txenoe)
1567 usb_control |= USBCTRL_HOST1_TXEN_OE;
1568
1569 if (!imx21->pdata->host1_xcverless)
1570 usb_control |= USBCTRL_HOST1_BYP_TLL;
1571
1572 if (imx21->pdata->otg_ext_xcvr)
1573 usb_control |= USBCTRL_OTC_RCV_RXDP;
1574
1575
1576 spin_lock_irqsave(&imx21->lock, flags);
1577
1578 writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1579 imx21->regs + USBOTG_CLK_CTRL);
1580 writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1581 writel(usb_control, imx21->regs + USBCTRL);
1582 writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
1583 imx21->regs + USB_MISCCONTROL);
1584
1585 /* Clear the ETDs */
1586 for (i = 0; i < USB_NUM_ETD; i++)
1587 for (j = 0; j < 4; j++)
1588 etd_writel(imx21, i, j, 0);
1589
1590 /* Take the HC out of reset */
1591 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1592 imx21->regs + USBH_HOST_CTRL);
1593
1594 /* Enable ports */
1595 if (imx21->pdata->enable_otg_host)
1596 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1597 imx21->regs + USBH_PORTSTAT(0));
1598
1599 if (imx21->pdata->enable_host1)
1600 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1601 imx21->regs + USBH_PORTSTAT(1));
1602
1603 if (imx21->pdata->enable_host2)
1604 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1605 imx21->regs + USBH_PORTSTAT(2));
1606
1607
1608 hcd->state = HC_STATE_RUNNING;
1609
1610 /* Enable host controller interrupts */
1611 set_register_bits(imx21, USBH_SYSIEN,
1612 USBH_SYSIEN_HERRINT |
1613 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1614 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1615
1616 spin_unlock_irqrestore(&imx21->lock, flags);
1617
1618 return 0;
1619}
1620
1621static void imx21_hc_stop(struct usb_hcd *hcd)
1622{
1623 struct imx21 *imx21 = hcd_to_imx21(hcd);
1624 unsigned long flags;
1625
1626 spin_lock_irqsave(&imx21->lock, flags);
1627
1628 writel(0, imx21->regs + USBH_SYSIEN);
1629 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1630 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1631 USBOTG_CLK_CTRL);
1632 spin_unlock_irqrestore(&imx21->lock, flags);
1633}
1634
1635/* =========================================== */
1636/* Driver glue */
1637/* =========================================== */
1638
1639static struct hc_driver imx21_hc_driver = {
1640 .description = hcd_name,
1641 .product_desc = "IMX21 USB Host Controller",
1642 .hcd_priv_size = sizeof(struct imx21),
1643
1644 .flags = HCD_USB11,
1645 .irq = imx21_irq,
1646
1647 .reset = imx21_hc_reset,
1648 .start = imx21_hc_start,
1649 .stop = imx21_hc_stop,
1650
1651 /* I/O requests */
1652 .urb_enqueue = imx21_hc_urb_enqueue,
1653 .urb_dequeue = imx21_hc_urb_dequeue,
1654 .endpoint_disable = imx21_hc_endpoint_disable,
1655
1656 /* scheduling support */
1657 .get_frame_number = imx21_hc_get_frame,
1658
1659 /* Root hub support */
1660 .hub_status_data = imx21_hc_hub_status_data,
1661 .hub_control = imx21_hc_hub_control,
1662
1663};
1664
1665static struct mx21_usbh_platform_data default_pdata = {
1666 .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1667 .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1668 .enable_host1 = 1,
1669 .enable_host2 = 1,
1670 .enable_otg_host = 1,
1671
1672};
1673
1674static int imx21_remove(struct platform_device *pdev)
1675{
1676 struct usb_hcd *hcd = platform_get_drvdata(pdev);
1677 struct imx21 *imx21 = hcd_to_imx21(hcd);
1678 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1679
1680 remove_debug_files(imx21);
1681 usb_remove_hcd(hcd);
1682
1683 if (res != NULL) {
1684 clk_disable(imx21->clk);
1685 clk_put(imx21->clk);
1686 iounmap(imx21->regs);
1687 release_mem_region(res->start, resource_size(res));
1688 }
1689
1690 kfree(hcd);
1691 return 0;
1692}
1693
1694
1695static int imx21_probe(struct platform_device *pdev)
1696{
1697 struct usb_hcd *hcd;
1698 struct imx21 *imx21;
1699 struct resource *res;
1700 int ret;
1701 int irq;
1702
1703 printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1704
1705 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1706 if (!res)
1707 return -ENODEV;
1708 irq = platform_get_irq(pdev, 0);
1709 if (irq < 0)
1710 return -ENXIO;
1711
1712 hcd = usb_create_hcd(&imx21_hc_driver,
1713 &pdev->dev, dev_name(&pdev->dev));
1714 if (hcd == NULL) {
1715 dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1716 dev_name(&pdev->dev));
1717 return -ENOMEM;
1718 }
1719
1720 imx21 = hcd_to_imx21(hcd);
1721 imx21->dev = &pdev->dev;
1722 imx21->pdata = pdev->dev.platform_data;
1723 if (!imx21->pdata)
1724 imx21->pdata = &default_pdata;
1725
1726 spin_lock_init(&imx21->lock);
1727 INIT_LIST_HEAD(&imx21->dmem_list);
1728 INIT_LIST_HEAD(&imx21->queue_for_etd);
1729 INIT_LIST_HEAD(&imx21->queue_for_dmem);
1730 create_debug_files(imx21);
1731
1732 res = request_mem_region(res->start, resource_size(res), hcd_name);
1733 if (!res) {
1734 ret = -EBUSY;
1735 goto failed_request_mem;
1736 }
1737
1738 imx21->regs = ioremap(res->start, resource_size(res));
1739 if (imx21->regs == NULL) {
1740 dev_err(imx21->dev, "Cannot map registers\n");
1741 ret = -ENOMEM;
1742 goto failed_ioremap;
1743 }
1744
1745 /* Enable clocks source */
1746 imx21->clk = clk_get(imx21->dev, NULL);
1747 if (IS_ERR(imx21->clk)) {
1748 dev_err(imx21->dev, "no clock found\n");
1749 ret = PTR_ERR(imx21->clk);
1750 goto failed_clock_get;
1751 }
1752
1753 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1754 if (ret)
1755 goto failed_clock_set;
1756 ret = clk_enable(imx21->clk);
1757 if (ret)
1758 goto failed_clock_enable;
1759
1760 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1761 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1762
1763 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
1764 if (ret != 0) {
1765 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1766 goto failed_add_hcd;
1767 }
1768
1769 return 0;
1770
1771failed_add_hcd:
1772 clk_disable(imx21->clk);
1773failed_clock_enable:
1774failed_clock_set:
1775 clk_put(imx21->clk);
1776failed_clock_get:
1777 iounmap(imx21->regs);
1778failed_ioremap:
1779 release_mem_region(res->start, res->end - res->start);
1780failed_request_mem:
1781 remove_debug_files(imx21);
1782 usb_put_hcd(hcd);
1783 return ret;
1784}
1785
1786static struct platform_driver imx21_hcd_driver = {
1787 .driver = {
1788 .name = (char *)hcd_name,
1789 },
1790 .probe = imx21_probe,
1791 .remove = imx21_remove,
1792 .suspend = NULL,
1793 .resume = NULL,
1794};
1795
1796static int __init imx21_hcd_init(void)
1797{
1798 return platform_driver_register(&imx21_hcd_driver);
1799}
1800
1801static void __exit imx21_hcd_cleanup(void)
1802{
1803 platform_driver_unregister(&imx21_hcd_driver);
1804}
1805
1806module_init(imx21_hcd_init);
1807module_exit(imx21_hcd_cleanup);
1808
1809MODULE_DESCRIPTION("i.MX21 USB Host controller");
1810MODULE_AUTHOR("Martin Fuzzey");
1811MODULE_LICENSE("GPL");
1812MODULE_ALIAS("platform:imx21-hcd");