blob: 213e270e1c2983ae4b430af95451519398efe1fb [file] [log] [blame]
Martin Fuzzey23d3e7a2009-11-21 12:14:48 +01001/*
2 * USB Host Controller Driver for IMX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24
25 /*
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
29 *
30 * The data memory is shared between the host and fuction controlers
31 * (but this driver only supports the host controler)
32 *
33 * So setting up a transfer involves:
34 * * Allocating a ETD
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
37 * * Activate the ETD
38 * * Get interrupt when done.
39 *
40 * An ETD is assigned to each active endpoint.
41 *
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
44 *
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46 *
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
50 */
51
52#include <linux/clk.h>
53#include <linux/io.h>
54#include <linux/kernel.h>
55#include <linux/list.h>
56#include <linux/platform_device.h>
57#include <linux/usb.h>
58
59#include "../core/hcd.h"
60#include "imx21-hcd.h"
61
62#ifdef DEBUG
63#define DEBUG_LOG_FRAME(imx21, etd, event) \
64 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
65#else
66#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
67#endif
68
69static const char hcd_name[] = "imx21-hcd";
70
71static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
72{
73 return (struct imx21 *)hcd->hcd_priv;
74}
75
76
77/* =========================================== */
78/* Hardware access helpers */
79/* =========================================== */
80
81static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
82{
83 void __iomem *reg = imx21->regs + offset;
84 writel(readl(reg) | mask, reg);
85}
86
87static inline void clear_register_bits(struct imx21 *imx21,
88 u32 offset, u32 mask)
89{
90 void __iomem *reg = imx21->regs + offset;
91 writel(readl(reg) & ~mask, reg);
92}
93
94static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
95{
96 void __iomem *reg = imx21->regs + offset;
97
98 if (readl(reg) & mask)
99 writel(mask, reg);
100}
101
102static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
103{
104 void __iomem *reg = imx21->regs + offset;
105
106 if (!(readl(reg) & mask))
107 writel(mask, reg);
108}
109
110static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
111{
112 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
113}
114
115static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
116{
117 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
118}
119
120static inline int wrap_frame(int counter)
121{
122 return counter & 0xFFFF;
123}
124
125static inline int frame_after(int frame, int after)
126{
127 /* handle wrapping like jiffies time_afer */
128 return (s16)((s16)after - (s16)frame) < 0;
129}
130
131static int imx21_hc_get_frame(struct usb_hcd *hcd)
132{
133 struct imx21 *imx21 = hcd_to_imx21(hcd);
134
135 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
136}
137
138
139#include "imx21-dbg.c"
140
141/* =========================================== */
142/* ETD management */
143/* =========================================== */
144
145static int alloc_etd(struct imx21 *imx21)
146{
147 int i;
148 struct etd_priv *etd = imx21->etd;
149
150 for (i = 0; i < USB_NUM_ETD; i++, etd++) {
151 if (etd->alloc == 0) {
152 memset(etd, 0, sizeof(imx21->etd[0]));
153 etd->alloc = 1;
154 debug_etd_allocated(imx21);
155 return i;
156 }
157 }
158 return -1;
159}
160
161static void disactivate_etd(struct imx21 *imx21, int num)
162{
163 int etd_mask = (1 << num);
164 struct etd_priv *etd = &imx21->etd[num];
165
166 writel(etd_mask, imx21->regs + USBH_ETDENCLR);
167 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
168 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
169 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
170
171 etd->active_count = 0;
172
173 DEBUG_LOG_FRAME(imx21, etd, disactivated);
174}
175
176static void reset_etd(struct imx21 *imx21, int num)
177{
178 struct etd_priv *etd = imx21->etd + num;
179 int i;
180
181 disactivate_etd(imx21, num);
182
183 for (i = 0; i < 4; i++)
184 etd_writel(imx21, num, i, 0);
185 etd->urb = NULL;
186 etd->ep = NULL;
187 etd->td = NULL;;
188}
189
190static void free_etd(struct imx21 *imx21, int num)
191{
192 if (num < 0)
193 return;
194
195 if (num >= USB_NUM_ETD) {
196 dev_err(imx21->dev, "BAD etd=%d!\n", num);
197 return;
198 }
199 if (imx21->etd[num].alloc == 0) {
200 dev_err(imx21->dev, "ETD %d already free!\n", num);
201 return;
202 }
203
204 debug_etd_freed(imx21);
205 reset_etd(imx21, num);
206 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
207}
208
209
210static void setup_etd_dword0(struct imx21 *imx21,
211 int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
212{
213 etd_writel(imx21, etd_num, 0,
214 ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
215 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
216 ((u32) dir << DW0_DIRECT) |
217 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
218 1 : 0) << DW0_SPEED) |
219 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
220 ((u32) maxpacket << DW0_MAXPKTSIZ));
221}
222
223static void activate_etd(struct imx21 *imx21,
224 int etd_num, dma_addr_t dma, u8 dir)
225{
226 u32 etd_mask = 1 << etd_num;
227 struct etd_priv *etd = &imx21->etd[etd_num];
228
229 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
230 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
231 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
232 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
233
234 if (dma) {
235 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
236 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
237 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
238 writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
239 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
240 } else {
241 if (dir != TD_DIR_IN) {
242 /* need to set for ZLP */
243 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
244 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
245 }
246 }
247
248 DEBUG_LOG_FRAME(imx21, etd, activated);
249
250#ifdef DEBUG
251 if (!etd->active_count) {
252 int i;
253 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
254 etd->disactivated_frame = -1;
255 etd->last_int_frame = -1;
256 etd->last_req_frame = -1;
257
258 for (i = 0; i < 4; i++)
259 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
260 }
261#endif
262
263 etd->active_count = 1;
264 writel(etd_mask, imx21->regs + USBH_ETDENSET);
265}
266
267/* =========================================== */
268/* Data memory management */
269/* =========================================== */
270
271static int alloc_dmem(struct imx21 *imx21, unsigned int size,
272 struct usb_host_endpoint *ep)
273{
274 unsigned int offset = 0;
275 struct imx21_dmem_area *area;
276 struct imx21_dmem_area *tmp;
277
278 size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
279
280 if (size > DMEM_SIZE) {
281 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
282 size, DMEM_SIZE);
283 return -EINVAL;
284 }
285
286 list_for_each_entry(tmp, &imx21->dmem_list, list) {
287 if ((size + offset) < offset)
288 goto fail;
289 if ((size + offset) <= tmp->offset)
290 break;
291 offset = tmp->size + tmp->offset;
292 if ((offset + size) > DMEM_SIZE)
293 goto fail;
294 }
295
296 area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
297 if (area == NULL)
298 return -ENOMEM;
299
300 area->ep = ep;
301 area->offset = offset;
302 area->size = size;
303 list_add_tail(&area->list, &tmp->list);
304 debug_dmem_allocated(imx21, size);
305 return offset;
306
307fail:
308 return -ENOMEM;
309}
310
311/* Memory now available for a queued ETD - activate it */
312static void activate_queued_etd(struct imx21 *imx21,
313 struct etd_priv *etd, u32 dmem_offset)
314{
315 struct urb_priv *urb_priv = etd->urb->hcpriv;
316 int etd_num = etd - &imx21->etd[0];
317 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
318 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
319
320 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
321 etd_num);
322 etd_writel(imx21, etd_num, 1,
323 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
324
325 urb_priv->active = 1;
326 activate_etd(imx21, etd_num, etd->dma_handle, dir);
327}
328
329static void free_dmem(struct imx21 *imx21, int offset)
330{
331 struct imx21_dmem_area *area;
332 struct etd_priv *etd, *tmp;
333 int found = 0;
334
335 list_for_each_entry(area, &imx21->dmem_list, list) {
336 if (area->offset == offset) {
337 debug_dmem_freed(imx21, area->size);
338 list_del(&area->list);
339 kfree(area);
340 found = 1;
341 break;
342 }
343 }
344
345 if (!found) {
346 dev_err(imx21->dev,
347 "Trying to free unallocated DMEM %d\n", offset);
348 return;
349 }
350
351 /* Try again to allocate memory for anything we've queued */
352 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
353 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
354 if (offset >= 0) {
355 list_del(&etd->queue);
356 activate_queued_etd(imx21, etd, (u32)offset);
357 }
358 }
359}
360
361static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
362{
363 struct imx21_dmem_area *area, *tmp;
364
365 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
366 if (area->ep == ep) {
367 dev_err(imx21->dev,
368 "Active DMEM %d for disabled ep=%p\n",
369 area->offset, ep);
370 list_del(&area->list);
371 kfree(area);
372 }
373 }
374}
375
376
377/* =========================================== */
378/* End handling */
379/* =========================================== */
380static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
381
382/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
383static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
384{
385 int etd_num;
386 int i;
387
388 for (i = 0; i < NUM_ISO_ETDS; i++) {
389 etd_num = ep_priv->etd[i];
390 if (etd_num < 0)
391 continue;
392
393 ep_priv->etd[i] = -1;
394 if (list_empty(&imx21->queue_for_etd)) {
395 free_etd(imx21, etd_num);
396 continue;
397 }
398
399 dev_dbg(imx21->dev,
400 "assigning idle etd %d for queued request\n", etd_num);
401 ep_priv = list_first_entry(&imx21->queue_for_etd,
402 struct ep_priv, queue);
403 list_del(&ep_priv->queue);
404 reset_etd(imx21, etd_num);
405 ep_priv->waiting_etd = 0;
406 ep_priv->etd[i] = etd_num;
407
408 if (list_empty(&ep_priv->ep->urb_list)) {
409 dev_err(imx21->dev, "No urb for queued ep!\n");
410 continue;
411 }
412 schedule_nonisoc_etd(imx21, list_first_entry(
413 &ep_priv->ep->urb_list, struct urb, urb_list));
414 }
415}
416
417static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
418__releases(imx21->lock)
419__acquires(imx21->lock)
420{
421 struct imx21 *imx21 = hcd_to_imx21(hcd);
422 struct ep_priv *ep_priv = urb->ep->hcpriv;
423 struct urb_priv *urb_priv = urb->hcpriv;
424
425 debug_urb_completed(imx21, urb, status);
426 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
427
428 kfree(urb_priv->isoc_td);
429 kfree(urb->hcpriv);
430 urb->hcpriv = NULL;
431 usb_hcd_unlink_urb_from_ep(hcd, urb);
432 spin_unlock(&imx21->lock);
433 usb_hcd_giveback_urb(hcd, urb, status);
434 spin_lock(&imx21->lock);
435 if (list_empty(&ep_priv->ep->urb_list))
436 ep_idle(imx21, ep_priv);
437}
438
439/* =========================================== */
440/* ISOC Handling ... */
441/* =========================================== */
442
443static void schedule_isoc_etds(struct usb_hcd *hcd,
444 struct usb_host_endpoint *ep)
445{
446 struct imx21 *imx21 = hcd_to_imx21(hcd);
447 struct ep_priv *ep_priv = ep->hcpriv;
448 struct etd_priv *etd;
449 struct urb_priv *urb_priv;
450 struct td *td;
451 int etd_num;
452 int i;
453 int cur_frame;
454 u8 dir;
455
456 for (i = 0; i < NUM_ISO_ETDS; i++) {
457too_late:
458 if (list_empty(&ep_priv->td_list))
459 break;
460
461 etd_num = ep_priv->etd[i];
462 if (etd_num < 0)
463 break;
464
465 etd = &imx21->etd[etd_num];
466 if (etd->urb)
467 continue;
468
469 td = list_entry(ep_priv->td_list.next, struct td, list);
470 list_del(&td->list);
471 urb_priv = td->urb->hcpriv;
472
473 cur_frame = imx21_hc_get_frame(hcd);
474 if (frame_after(cur_frame, td->frame)) {
475 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
476 cur_frame, td->frame);
477 urb_priv->isoc_status = -EXDEV;
478 td->urb->iso_frame_desc[
479 td->isoc_index].actual_length = 0;
480 td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
481 if (--urb_priv->isoc_remaining == 0)
482 urb_done(hcd, td->urb, urb_priv->isoc_status);
483 goto too_late;
484 }
485
486 urb_priv->active = 1;
487 etd->td = td;
488 etd->ep = td->ep;
489 etd->urb = td->urb;
490 etd->len = td->len;
491
492 debug_isoc_submitted(imx21, cur_frame, td);
493
494 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
495 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
496 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
497 etd_writel(imx21, etd_num, 2,
498 (TD_NOTACCESSED << DW2_COMPCODE) |
499 ((td->frame & 0xFFFF) << DW2_STARTFRM));
500 etd_writel(imx21, etd_num, 3,
501 (TD_NOTACCESSED << DW3_COMPCODE0) |
502 (td->len << DW3_PKTLEN0));
503
504 activate_etd(imx21, etd_num, td->data, dir);
505 }
506}
507
508static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
509{
510 struct imx21 *imx21 = hcd_to_imx21(hcd);
511 int etd_mask = 1 << etd_num;
512 struct urb_priv *urb_priv = urb->hcpriv;
513 struct etd_priv *etd = imx21->etd + etd_num;
514 struct td *td = etd->td;
515 struct usb_host_endpoint *ep = etd->ep;
516 int isoc_index = td->isoc_index;
517 unsigned int pipe = urb->pipe;
518 int dir_in = usb_pipein(pipe);
519 int cc;
520 int bytes_xfrd;
521
522 disactivate_etd(imx21, etd_num);
523
524 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
525 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
526
527 /* Input doesn't always fill the buffer, don't generate an error
528 * when this happens.
529 */
530 if (dir_in && (cc == TD_DATAUNDERRUN))
531 cc = TD_CC_NOERROR;
532
533 if (cc == TD_NOTACCESSED)
534 bytes_xfrd = 0;
535
536 debug_isoc_completed(imx21,
537 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
538 if (cc) {
539 urb_priv->isoc_status = -EXDEV;
540 dev_dbg(imx21->dev,
541 "bad iso cc=0x%X frame=%d sched frame=%d "
542 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
543 cc, imx21_hc_get_frame(hcd), td->frame,
544 bytes_xfrd, td->len, urb, etd_num, isoc_index);
545 }
546
547 if (dir_in)
548 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
549
550 urb->actual_length += bytes_xfrd;
551 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
552 urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
553
554 etd->td = NULL;
555 etd->urb = NULL;
556 etd->ep = NULL;
557
558 if (--urb_priv->isoc_remaining == 0)
559 urb_done(hcd, urb, urb_priv->isoc_status);
560
561 schedule_isoc_etds(hcd, ep);
562}
563
564static struct ep_priv *alloc_isoc_ep(
565 struct imx21 *imx21, struct usb_host_endpoint *ep)
566{
567 struct ep_priv *ep_priv;
568 int i;
569
570 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
571 if (ep_priv == NULL)
572 return NULL;
573
574 /* Allocate the ETDs */
575 for (i = 0; i < NUM_ISO_ETDS; i++) {
576 ep_priv->etd[i] = alloc_etd(imx21);
577 if (ep_priv->etd[i] < 0) {
578 int j;
579 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
580 for (j = 0; j < i; j++)
581 free_etd(imx21, ep_priv->etd[j]);
582 goto alloc_etd_failed;
583 }
584 imx21->etd[ep_priv->etd[i]].ep = ep;
585 }
586
587 INIT_LIST_HEAD(&ep_priv->td_list);
588 ep_priv->ep = ep;
589 ep->hcpriv = ep_priv;
590 return ep_priv;
591
592alloc_etd_failed:
593 kfree(ep_priv);
594 return NULL;
595}
596
597static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
598 struct usb_host_endpoint *ep,
599 struct urb *urb, gfp_t mem_flags)
600{
601 struct imx21 *imx21 = hcd_to_imx21(hcd);
602 struct urb_priv *urb_priv;
603 unsigned long flags;
604 struct ep_priv *ep_priv;
605 struct td *td = NULL;
606 int i;
607 int ret;
608 int cur_frame;
609 u16 maxpacket;
610
611 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
612 if (urb_priv == NULL)
613 return -ENOMEM;
614
615 urb_priv->isoc_td = kzalloc(
616 sizeof(struct td) * urb->number_of_packets, mem_flags);
617 if (urb_priv->isoc_td == NULL) {
618 ret = -ENOMEM;
619 goto alloc_td_failed;
620 }
621
622 spin_lock_irqsave(&imx21->lock, flags);
623
624 if (ep->hcpriv == NULL) {
625 ep_priv = alloc_isoc_ep(imx21, ep);
626 if (ep_priv == NULL) {
627 ret = -ENOMEM;
628 goto alloc_ep_failed;
629 }
630 } else {
631 ep_priv = ep->hcpriv;
632 }
633
634 ret = usb_hcd_link_urb_to_ep(hcd, urb);
635 if (ret)
636 goto link_failed;
637
638 urb->status = -EINPROGRESS;
639 urb->actual_length = 0;
640 urb->error_count = 0;
641 urb->hcpriv = urb_priv;
642 urb_priv->ep = ep;
643
644 /* allocate data memory for largest packets if not already done */
645 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
646 for (i = 0; i < NUM_ISO_ETDS; i++) {
647 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
648
649 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
650 /* not sure if this can really occur.... */
651 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
652 etd->dmem_size, maxpacket);
653 ret = -EMSGSIZE;
654 goto alloc_dmem_failed;
655 }
656
657 if (etd->dmem_size == 0) {
658 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
659 if (etd->dmem_offset < 0) {
660 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
661 ret = -EAGAIN;
662 goto alloc_dmem_failed;
663 }
664 etd->dmem_size = maxpacket;
665 }
666 }
667
668 /* calculate frame */
669 cur_frame = imx21_hc_get_frame(hcd);
670 if (urb->transfer_flags & URB_ISO_ASAP) {
671 if (list_empty(&ep_priv->td_list))
672 urb->start_frame = cur_frame + 5;
673 else
674 urb->start_frame = list_entry(
675 ep_priv->td_list.prev,
676 struct td, list)->frame + urb->interval;
677 }
678 urb->start_frame = wrap_frame(urb->start_frame);
679 if (frame_after(cur_frame, urb->start_frame)) {
680 dev_dbg(imx21->dev,
681 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
682 urb->start_frame, cur_frame,
683 (urb->transfer_flags & URB_ISO_ASAP) != 0);
684 urb->start_frame = wrap_frame(cur_frame + 1);
685 }
686
687 /* set up transfers */
688 td = urb_priv->isoc_td;
689 for (i = 0; i < urb->number_of_packets; i++, td++) {
690 td->ep = ep;
691 td->urb = urb;
692 td->len = urb->iso_frame_desc[i].length;
693 td->isoc_index = i;
694 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
695 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
696 list_add_tail(&td->list, &ep_priv->td_list);
697 }
698
699 urb_priv->isoc_remaining = urb->number_of_packets;
700 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
701 urb->number_of_packets, urb->start_frame, td->frame);
702
703 debug_urb_submitted(imx21, urb);
704 schedule_isoc_etds(hcd, ep);
705
706 spin_unlock_irqrestore(&imx21->lock, flags);
707 return 0;
708
709alloc_dmem_failed:
710 usb_hcd_unlink_urb_from_ep(hcd, urb);
711
712link_failed:
713alloc_ep_failed:
714 spin_unlock_irqrestore(&imx21->lock, flags);
715 kfree(urb_priv->isoc_td);
716
717alloc_td_failed:
718 kfree(urb_priv);
719 return ret;
720}
721
722static void dequeue_isoc_urb(struct imx21 *imx21,
723 struct urb *urb, struct ep_priv *ep_priv)
724{
725 struct urb_priv *urb_priv = urb->hcpriv;
726 struct td *td, *tmp;
727 int i;
728
729 if (urb_priv->active) {
730 for (i = 0; i < NUM_ISO_ETDS; i++) {
731 int etd_num = ep_priv->etd[i];
732 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
733 struct etd_priv *etd = imx21->etd + etd_num;
734
735 reset_etd(imx21, etd_num);
736 if (etd->dmem_size)
737 free_dmem(imx21, etd->dmem_offset);
738 etd->dmem_size = 0;
739 }
740 }
741 }
742
743 list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
744 if (td->urb == urb) {
745 dev_vdbg(imx21->dev, "removing td %p\n", td);
746 list_del(&td->list);
747 }
748 }
749}
750
751/* =========================================== */
752/* NON ISOC Handling ... */
753/* =========================================== */
754
755static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
756{
757 unsigned int pipe = urb->pipe;
758 struct urb_priv *urb_priv = urb->hcpriv;
759 struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
760 int state = urb_priv->state;
761 int etd_num = ep_priv->etd[0];
762 struct etd_priv *etd;
763 int dmem_offset;
764 u32 count;
765 u16 etd_buf_size;
766 u16 maxpacket;
767 u8 dir;
768 u8 bufround;
769 u8 datatoggle;
770 u8 interval = 0;
771 u8 relpolpos = 0;
772
773 if (etd_num < 0) {
774 dev_err(imx21->dev, "No valid ETD\n");
775 return;
776 }
777 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
778 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
779
780 etd = &imx21->etd[etd_num];
781 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
782 if (!maxpacket)
783 maxpacket = 8;
784
785 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
786 if (state == US_CTRL_SETUP) {
787 dir = TD_DIR_SETUP;
788 etd->dma_handle = urb->setup_dma;
789 bufround = 0;
790 count = 8;
791 datatoggle = TD_TOGGLE_DATA0;
792 } else { /* US_CTRL_ACK */
793 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
794 etd->dma_handle = urb->transfer_dma;
795 bufround = 0;
796 count = 0;
797 datatoggle = TD_TOGGLE_DATA1;
798 }
799 } else {
800 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
801 bufround = (dir == TD_DIR_IN) ? 1 : 0;
802 etd->dma_handle = urb->transfer_dma;
803 if (usb_pipebulk(pipe) && (state == US_BULK0))
804 count = 0;
805 else
806 count = urb->transfer_buffer_length;
807
808 if (usb_pipecontrol(pipe)) {
809 datatoggle = TD_TOGGLE_DATA1;
810 } else {
811 if (usb_gettoggle(
812 urb->dev,
813 usb_pipeendpoint(urb->pipe),
814 usb_pipeout(urb->pipe)))
815 datatoggle = TD_TOGGLE_DATA1;
816 else
817 datatoggle = TD_TOGGLE_DATA0;
818 }
819 }
820
821 etd->urb = urb;
822 etd->ep = urb_priv->ep;
823 etd->len = count;
824
825 if (usb_pipeint(pipe)) {
826 interval = urb->interval;
827 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
828 }
829
830 /* Write ETD to device memory */
831 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
832
833 etd_writel(imx21, etd_num, 2,
834 (u32) interval << DW2_POLINTERV |
835 ((u32) relpolpos << DW2_RELPOLPOS) |
836 ((u32) dir << DW2_DIRPID) |
837 ((u32) bufround << DW2_BUFROUND) |
838 ((u32) datatoggle << DW2_DATATOG) |
839 ((u32) TD_NOTACCESSED << DW2_COMPCODE));
840
841 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
842 is smaller. Make sure we don't overrun the buffer!
843 */
844 if (count && count < maxpacket)
845 etd_buf_size = count;
846 else
847 etd_buf_size = maxpacket;
848
849 etd_writel(imx21, etd_num, 3,
850 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
851
852 if (!count)
853 etd->dma_handle = 0;
854
855 /* allocate x and y buffer space at once */
856 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
857 dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
858 if (dmem_offset < 0) {
859 /* Setup everything we can in HW and update when we get DMEM */
860 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
861
862 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
863 debug_urb_queued_for_dmem(imx21, urb);
864 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
865 return;
866 }
867
868 etd_writel(imx21, etd_num, 1,
869 (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
870 (u32) dmem_offset);
871
872 urb_priv->active = 1;
873
874 /* enable the ETD to kick off transfer */
875 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
876 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
877 activate_etd(imx21, etd_num, etd->dma_handle, dir);
878
879}
880
881static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
882{
883 struct imx21 *imx21 = hcd_to_imx21(hcd);
884 struct etd_priv *etd = &imx21->etd[etd_num];
885 u32 etd_mask = 1 << etd_num;
886 struct urb_priv *urb_priv = urb->hcpriv;
887 int dir;
888 u16 xbufaddr;
889 int cc;
890 u32 bytes_xfrd;
891 int etd_done;
892
893 disactivate_etd(imx21, etd_num);
894
895 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
896 xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
897 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
898 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
899
900 /* save toggle carry */
901 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
902 usb_pipeout(urb->pipe),
903 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
904
905 if (dir == TD_DIR_IN) {
906 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
907 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
908 }
909 free_dmem(imx21, xbufaddr);
910
911 urb->error_count = 0;
912 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
913 && (cc == TD_DATAUNDERRUN))
914 cc = TD_CC_NOERROR;
915
916 if (cc != 0)
917 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
918
919 etd_done = (cc_to_error[cc] != 0); /* stop if error */
920
921 switch (usb_pipetype(urb->pipe)) {
922 case PIPE_CONTROL:
923 switch (urb_priv->state) {
924 case US_CTRL_SETUP:
925 if (urb->transfer_buffer_length > 0)
926 urb_priv->state = US_CTRL_DATA;
927 else
928 urb_priv->state = US_CTRL_ACK;
929 break;
930 case US_CTRL_DATA:
931 urb->actual_length += bytes_xfrd;
932 urb_priv->state = US_CTRL_ACK;
933 break;
934 case US_CTRL_ACK:
935 etd_done = 1;
936 break;
937 default:
938 dev_err(imx21->dev,
939 "Invalid pipe state %d\n", urb_priv->state);
940 etd_done = 1;
941 break;
942 }
943 break;
944
945 case PIPE_BULK:
946 urb->actual_length += bytes_xfrd;
947 if ((urb_priv->state == US_BULK)
948 && (urb->transfer_flags & URB_ZERO_PACKET)
949 && urb->transfer_buffer_length > 0
950 && ((urb->transfer_buffer_length %
951 usb_maxpacket(urb->dev, urb->pipe,
952 usb_pipeout(urb->pipe))) == 0)) {
953 /* need a 0-packet */
954 urb_priv->state = US_BULK0;
955 } else {
956 etd_done = 1;
957 }
958 break;
959
960 case PIPE_INTERRUPT:
961 urb->actual_length += bytes_xfrd;
962 etd_done = 1;
963 break;
964 }
965
966 if (!etd_done) {
967 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
968 schedule_nonisoc_etd(imx21, urb);
969 } else {
970 struct usb_host_endpoint *ep = urb->ep;
971
972 urb_done(hcd, urb, cc_to_error[cc]);
973 etd->urb = NULL;
974
975 if (!list_empty(&ep->urb_list)) {
976 urb = list_first_entry(&ep->urb_list,
977 struct urb, urb_list);
978 dev_vdbg(imx21->dev, "next URB %p\n", urb);
979 schedule_nonisoc_etd(imx21, urb);
980 }
981 }
982}
983
984static struct ep_priv *alloc_ep(void)
985{
986 int i;
987 struct ep_priv *ep_priv;
988
989 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
990 if (!ep_priv)
991 return NULL;
992
993 for (i = 0; i < NUM_ISO_ETDS; ++i)
994 ep_priv->etd[i] = -1;
995
996 return ep_priv;
997}
998
999static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1000 struct urb *urb, gfp_t mem_flags)
1001{
1002 struct imx21 *imx21 = hcd_to_imx21(hcd);
1003 struct usb_host_endpoint *ep = urb->ep;
1004 struct urb_priv *urb_priv;
1005 struct ep_priv *ep_priv;
1006 struct etd_priv *etd;
1007 int ret;
1008 unsigned long flags;
1009 int new_ep = 0;
1010
1011 dev_vdbg(imx21->dev,
1012 "enqueue urb=%p ep=%p len=%d "
1013 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1014 urb, ep,
1015 urb->transfer_buffer_length,
1016 urb->transfer_buffer, urb->transfer_dma,
1017 urb->setup_packet, urb->setup_dma);
1018
1019 if (usb_pipeisoc(urb->pipe))
1020 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1021
1022 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1023 if (!urb_priv)
1024 return -ENOMEM;
1025
1026 spin_lock_irqsave(&imx21->lock, flags);
1027
1028 ep_priv = ep->hcpriv;
1029 if (ep_priv == NULL) {
1030 ep_priv = alloc_ep();
1031 if (!ep_priv) {
1032 ret = -ENOMEM;
1033 goto failed_alloc_ep;
1034 }
1035 ep->hcpriv = ep_priv;
1036 ep_priv->ep = ep;
1037 new_ep = 1;
1038 }
1039
1040 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1041 if (ret)
1042 goto failed_link;
1043
1044 urb->status = -EINPROGRESS;
1045 urb->actual_length = 0;
1046 urb->error_count = 0;
1047 urb->hcpriv = urb_priv;
1048 urb_priv->ep = ep;
1049
1050 switch (usb_pipetype(urb->pipe)) {
1051 case PIPE_CONTROL:
1052 urb_priv->state = US_CTRL_SETUP;
1053 break;
1054 case PIPE_BULK:
1055 urb_priv->state = US_BULK;
1056 break;
1057 }
1058
1059 debug_urb_submitted(imx21, urb);
1060 if (ep_priv->etd[0] < 0) {
1061 if (ep_priv->waiting_etd) {
1062 dev_dbg(imx21->dev,
1063 "no ETD available already queued %p\n",
1064 ep_priv);
1065 debug_urb_queued_for_etd(imx21, urb);
1066 goto out;
1067 }
1068 ep_priv->etd[0] = alloc_etd(imx21);
1069 if (ep_priv->etd[0] < 0) {
1070 dev_dbg(imx21->dev,
1071 "no ETD available queueing %p\n", ep_priv);
1072 debug_urb_queued_for_etd(imx21, urb);
1073 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1074 ep_priv->waiting_etd = 1;
1075 goto out;
1076 }
1077 }
1078
1079 /* Schedule if no URB already active for this endpoint */
1080 etd = &imx21->etd[ep_priv->etd[0]];
1081 if (etd->urb == NULL) {
1082 DEBUG_LOG_FRAME(imx21, etd, last_req);
1083 schedule_nonisoc_etd(imx21, urb);
1084 }
1085
1086out:
1087 spin_unlock_irqrestore(&imx21->lock, flags);
1088 return 0;
1089
1090failed_link:
1091failed_alloc_ep:
1092 spin_unlock_irqrestore(&imx21->lock, flags);
1093 kfree(urb_priv);
1094 return ret;
1095}
1096
1097static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1098 int status)
1099{
1100 struct imx21 *imx21 = hcd_to_imx21(hcd);
1101 unsigned long flags;
1102 struct usb_host_endpoint *ep;
1103 struct ep_priv *ep_priv;
1104 struct urb_priv *urb_priv = urb->hcpriv;
1105 int ret = -EINVAL;
1106
1107 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1108 urb, usb_pipeisoc(urb->pipe), status);
1109
1110 spin_lock_irqsave(&imx21->lock, flags);
1111
1112 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1113 if (ret)
1114 goto fail;
1115 ep = urb_priv->ep;
1116 ep_priv = ep->hcpriv;
1117
1118 debug_urb_unlinked(imx21, urb);
1119
1120 if (usb_pipeisoc(urb->pipe)) {
1121 dequeue_isoc_urb(imx21, urb, ep_priv);
1122 schedule_isoc_etds(hcd, ep);
1123 } else if (urb_priv->active) {
1124 int etd_num = ep_priv->etd[0];
1125 if (etd_num != -1) {
1126 disactivate_etd(imx21, etd_num);
1127 free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff);
1128 imx21->etd[etd_num].urb = NULL;
1129 }
1130 }
1131
1132 urb_done(hcd, urb, status);
1133
1134 spin_unlock_irqrestore(&imx21->lock, flags);
1135 return 0;
1136
1137fail:
1138 spin_unlock_irqrestore(&imx21->lock, flags);
1139 return ret;
1140}
1141
1142/* =========================================== */
1143/* Interrupt dispatch */
1144/* =========================================== */
1145
1146static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1147{
1148 int etd_num;
1149 int enable_sof_int = 0;
1150 unsigned long flags;
1151
1152 spin_lock_irqsave(&imx21->lock, flags);
1153
1154 for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1155 u32 etd_mask = 1 << etd_num;
1156 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1157 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1158 struct etd_priv *etd = &imx21->etd[etd_num];
1159
1160
1161 if (done) {
1162 DEBUG_LOG_FRAME(imx21, etd, last_int);
1163 } else {
1164/*
1165 * Kludge warning!
1166 *
1167 * When multiple transfers are using the bus we sometimes get into a state
1168 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1169 * the ETD has self disabled but the ETDDONESTAT flag is not set
1170 * (and hence no interrupt occurs).
1171 * This causes the transfer in question to hang.
1172 * The kludge below checks for this condition at each SOF and processes any
1173 * blocked ETDs (after an arbitary 10 frame wait)
1174 *
1175 * With a single active transfer the usbtest test suite will run for days
1176 * without the kludge.
1177 * With other bus activity (eg mass storage) even just test1 will hang without
1178 * the kludge.
1179 */
1180 u32 dword0;
1181 int cc;
1182
1183 if (etd->active_count && !enabled) /* suspicious... */
1184 enable_sof_int = 1;
1185
1186 if (!sof || enabled || !etd->active_count)
1187 continue;
1188
1189 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1190 if (cc == TD_NOTACCESSED)
1191 continue;
1192
1193 if (++etd->active_count < 10)
1194 continue;
1195
1196 dword0 = etd_readl(imx21, etd_num, 0);
1197 dev_dbg(imx21->dev,
1198 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1199 etd_num, dword0 & 0x7F,
1200 (dword0 >> DW0_ENDPNT) & 0x0F,
1201 cc);
1202
1203#ifdef DEBUG
1204 dev_dbg(imx21->dev,
1205 "frame: act=%d disact=%d"
1206 " int=%d req=%d cur=%d\n",
1207 etd->activated_frame,
1208 etd->disactivated_frame,
1209 etd->last_int_frame,
1210 etd->last_req_frame,
1211 readl(imx21->regs + USBH_FRMNUB));
1212 imx21->debug_unblocks++;
1213#endif
1214 etd->active_count = 0;
1215/* End of kludge */
1216 }
1217
1218 if (etd->ep == NULL || etd->urb == NULL) {
1219 dev_dbg(imx21->dev,
1220 "Interrupt for unexpected etd %d"
1221 " ep=%p urb=%p\n",
1222 etd_num, etd->ep, etd->urb);
1223 disactivate_etd(imx21, etd_num);
1224 continue;
1225 }
1226
1227 if (usb_pipeisoc(etd->urb->pipe))
1228 isoc_etd_done(hcd, etd->urb, etd_num);
1229 else
1230 nonisoc_etd_done(hcd, etd->urb, etd_num);
1231 }
1232
1233 /* only enable SOF interrupt if it may be needed for the kludge */
1234 if (enable_sof_int)
1235 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1236 else
1237 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1238
1239
1240 spin_unlock_irqrestore(&imx21->lock, flags);
1241}
1242
1243static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1244{
1245 struct imx21 *imx21 = hcd_to_imx21(hcd);
1246 u32 ints = readl(imx21->regs + USBH_SYSISR);
1247
1248 if (ints & USBH_SYSIEN_HERRINT)
1249 dev_dbg(imx21->dev, "Scheduling error\n");
1250
1251 if (ints & USBH_SYSIEN_SORINT)
1252 dev_dbg(imx21->dev, "Scheduling overrun\n");
1253
1254 if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1255 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1256
1257 writel(ints, imx21->regs + USBH_SYSISR);
1258 return IRQ_HANDLED;
1259}
1260
1261static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1262 struct usb_host_endpoint *ep)
1263{
1264 struct imx21 *imx21 = hcd_to_imx21(hcd);
1265 unsigned long flags;
1266 struct ep_priv *ep_priv;
1267 int i;
1268
1269 if (ep == NULL)
1270 return;
1271
1272 spin_lock_irqsave(&imx21->lock, flags);
1273 ep_priv = ep->hcpriv;
1274 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1275
1276 if (!list_empty(&ep->urb_list))
1277 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1278
1279 if (ep_priv != NULL) {
1280 for (i = 0; i < NUM_ISO_ETDS; i++) {
1281 if (ep_priv->etd[i] > -1)
1282 dev_dbg(imx21->dev, "free etd %d for disable\n",
1283 ep_priv->etd[i]);
1284
1285 free_etd(imx21, ep_priv->etd[i]);
1286 }
1287 kfree(ep_priv);
1288 ep->hcpriv = NULL;
1289 }
1290
1291 for (i = 0; i < USB_NUM_ETD; i++) {
1292 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1293 dev_err(imx21->dev,
1294 "Active etd %d for disabled ep=%p!\n", i, ep);
1295 free_etd(imx21, i);
1296 }
1297 }
1298 free_epdmem(imx21, ep);
1299 spin_unlock_irqrestore(&imx21->lock, flags);
1300}
1301
1302/* =========================================== */
1303/* Hub handling */
1304/* =========================================== */
1305
1306static int get_hub_descriptor(struct usb_hcd *hcd,
1307 struct usb_hub_descriptor *desc)
1308{
1309 struct imx21 *imx21 = hcd_to_imx21(hcd);
1310 desc->bDescriptorType = 0x29; /* HUB descriptor */
1311 desc->bHubContrCurrent = 0;
1312
1313 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1314 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1315 desc->bDescLength = 9;
1316 desc->bPwrOn2PwrGood = 0;
1317 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1318 0x0002 | /* No power switching */
1319 0x0010 | /* No over current protection */
1320 0);
1321
1322 desc->bitmap[0] = 1 << 1;
1323 desc->bitmap[1] = ~0;
1324 return 0;
1325}
1326
1327static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1328{
1329 struct imx21 *imx21 = hcd_to_imx21(hcd);
1330 int ports;
1331 int changed = 0;
1332 int i;
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&imx21->lock, flags);
1336 ports = readl(imx21->regs + USBH_ROOTHUBA)
1337 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1338 if (ports > 7) {
1339 ports = 7;
1340 dev_err(imx21->dev, "ports %d > 7\n", ports);
1341 }
1342 for (i = 0; i < ports; i++) {
1343 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1344 (USBH_PORTSTAT_CONNECTSC |
1345 USBH_PORTSTAT_PRTENBLSC |
1346 USBH_PORTSTAT_PRTSTATSC |
1347 USBH_PORTSTAT_OVRCURIC |
1348 USBH_PORTSTAT_PRTRSTSC)) {
1349
1350 changed = 1;
1351 buf[0] |= 1 << (i + 1);
1352 }
1353 }
1354 spin_unlock_irqrestore(&imx21->lock, flags);
1355
1356 if (changed)
1357 dev_info(imx21->dev, "Hub status changed\n");
1358 return changed;
1359}
1360
1361static int imx21_hc_hub_control(struct usb_hcd *hcd,
1362 u16 typeReq,
1363 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1364{
1365 struct imx21 *imx21 = hcd_to_imx21(hcd);
1366 int rc = 0;
1367 u32 status_write = 0;
1368
1369 switch (typeReq) {
1370 case ClearHubFeature:
1371 dev_dbg(imx21->dev, "ClearHubFeature\n");
1372 switch (wValue) {
1373 case C_HUB_OVER_CURRENT:
1374 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1375 break;
1376 case C_HUB_LOCAL_POWER:
1377 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1378 break;
1379 default:
1380 dev_dbg(imx21->dev, " unknown\n");
1381 rc = -EINVAL;
1382 break;
1383 }
1384 break;
1385
1386 case ClearPortFeature:
1387 dev_dbg(imx21->dev, "ClearPortFeature\n");
1388 switch (wValue) {
1389 case USB_PORT_FEAT_ENABLE:
1390 dev_dbg(imx21->dev, " ENABLE\n");
1391 status_write = USBH_PORTSTAT_CURCONST;
1392 break;
1393 case USB_PORT_FEAT_SUSPEND:
1394 dev_dbg(imx21->dev, " SUSPEND\n");
1395 status_write = USBH_PORTSTAT_PRTOVRCURI;
1396 break;
1397 case USB_PORT_FEAT_POWER:
1398 dev_dbg(imx21->dev, " POWER\n");
1399 status_write = USBH_PORTSTAT_LSDEVCON;
1400 break;
1401 case USB_PORT_FEAT_C_ENABLE:
1402 dev_dbg(imx21->dev, " C_ENABLE\n");
1403 status_write = USBH_PORTSTAT_PRTENBLSC;
1404 break;
1405 case USB_PORT_FEAT_C_SUSPEND:
1406 dev_dbg(imx21->dev, " C_SUSPEND\n");
1407 status_write = USBH_PORTSTAT_PRTSTATSC;
1408 break;
1409 case USB_PORT_FEAT_C_CONNECTION:
1410 dev_dbg(imx21->dev, " C_CONNECTION\n");
1411 status_write = USBH_PORTSTAT_CONNECTSC;
1412 break;
1413 case USB_PORT_FEAT_C_OVER_CURRENT:
1414 dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
1415 status_write = USBH_PORTSTAT_OVRCURIC;
1416 break;
1417 case USB_PORT_FEAT_C_RESET:
1418 dev_dbg(imx21->dev, " C_RESET\n");
1419 status_write = USBH_PORTSTAT_PRTRSTSC;
1420 break;
1421 default:
1422 dev_dbg(imx21->dev, " unknown\n");
1423 rc = -EINVAL;
1424 break;
1425 }
1426
1427 break;
1428
1429 case GetHubDescriptor:
1430 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1431 rc = get_hub_descriptor(hcd, (void *)buf);
1432 break;
1433
1434 case GetHubStatus:
1435 dev_dbg(imx21->dev, " GetHubStatus\n");
1436 *(__le32 *) buf = 0;
1437 break;
1438
1439 case GetPortStatus:
1440 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1441 wIndex, USBH_PORTSTAT(wIndex - 1));
1442 *(__le32 *) buf = readl(imx21->regs +
1443 USBH_PORTSTAT(wIndex - 1));
1444 break;
1445
1446 case SetHubFeature:
1447 dev_dbg(imx21->dev, "SetHubFeature\n");
1448 switch (wValue) {
1449 case C_HUB_OVER_CURRENT:
1450 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1451 break;
1452
1453 case C_HUB_LOCAL_POWER:
1454 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1455 break;
1456 default:
1457 dev_dbg(imx21->dev, " unknown\n");
1458 rc = -EINVAL;
1459 break;
1460 }
1461
1462 break;
1463
1464 case SetPortFeature:
1465 dev_dbg(imx21->dev, "SetPortFeature\n");
1466 switch (wValue) {
1467 case USB_PORT_FEAT_SUSPEND:
1468 dev_dbg(imx21->dev, " SUSPEND\n");
1469 status_write = USBH_PORTSTAT_PRTSUSPST;
1470 break;
1471 case USB_PORT_FEAT_POWER:
1472 dev_dbg(imx21->dev, " POWER\n");
1473 status_write = USBH_PORTSTAT_PRTPWRST;
1474 break;
1475 case USB_PORT_FEAT_RESET:
1476 dev_dbg(imx21->dev, " RESET\n");
1477 status_write = USBH_PORTSTAT_PRTRSTST;
1478 break;
1479 default:
1480 dev_dbg(imx21->dev, " unknown\n");
1481 rc = -EINVAL;
1482 break;
1483 }
1484 break;
1485
1486 default:
1487 dev_dbg(imx21->dev, " unknown\n");
1488 rc = -EINVAL;
1489 break;
1490 }
1491
1492 if (status_write)
1493 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1494 return rc;
1495}
1496
1497/* =========================================== */
1498/* Host controller management */
1499/* =========================================== */
1500
1501static int imx21_hc_reset(struct usb_hcd *hcd)
1502{
1503 struct imx21 *imx21 = hcd_to_imx21(hcd);
1504 unsigned long timeout;
1505 unsigned long flags;
1506
1507 spin_lock_irqsave(&imx21->lock, flags);
1508
1509 /* Reset the Host controler modules */
1510 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1511 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1512 imx21->regs + USBOTG_RST_CTRL);
1513
1514 /* Wait for reset to finish */
1515 timeout = jiffies + HZ;
1516 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1517 if (time_after(jiffies, timeout)) {
1518 spin_unlock_irqrestore(&imx21->lock, flags);
1519 dev_err(imx21->dev, "timeout waiting for reset\n");
1520 return -ETIMEDOUT;
1521 }
1522 spin_unlock_irq(&imx21->lock);
1523 schedule_timeout(1);
1524 spin_lock_irq(&imx21->lock);
1525 }
1526 spin_unlock_irqrestore(&imx21->lock, flags);
1527 return 0;
1528}
1529
1530static int __devinit imx21_hc_start(struct usb_hcd *hcd)
1531{
1532 struct imx21 *imx21 = hcd_to_imx21(hcd);
1533 unsigned long flags;
1534 int i, j;
1535 u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1536 u32 usb_control = 0;
1537
1538 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1539 USBOTG_HWMODE_HOSTXCVR_MASK);
1540 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1541 USBOTG_HWMODE_OTGXCVR_MASK);
1542
1543 if (imx21->pdata->host1_txenoe)
1544 usb_control |= USBCTRL_HOST1_TXEN_OE;
1545
1546 if (!imx21->pdata->host1_xcverless)
1547 usb_control |= USBCTRL_HOST1_BYP_TLL;
1548
1549 if (imx21->pdata->otg_ext_xcvr)
1550 usb_control |= USBCTRL_OTC_RCV_RXDP;
1551
1552
1553 spin_lock_irqsave(&imx21->lock, flags);
1554
1555 writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1556 imx21->regs + USBOTG_CLK_CTRL);
1557 writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1558 writel(usb_control, imx21->regs + USBCTRL);
1559 writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
1560 imx21->regs + USB_MISCCONTROL);
1561
1562 /* Clear the ETDs */
1563 for (i = 0; i < USB_NUM_ETD; i++)
1564 for (j = 0; j < 4; j++)
1565 etd_writel(imx21, i, j, 0);
1566
1567 /* Take the HC out of reset */
1568 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1569 imx21->regs + USBH_HOST_CTRL);
1570
1571 /* Enable ports */
1572 if (imx21->pdata->enable_otg_host)
1573 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1574 imx21->regs + USBH_PORTSTAT(0));
1575
1576 if (imx21->pdata->enable_host1)
1577 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1578 imx21->regs + USBH_PORTSTAT(1));
1579
1580 if (imx21->pdata->enable_host2)
1581 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1582 imx21->regs + USBH_PORTSTAT(2));
1583
1584
1585 hcd->state = HC_STATE_RUNNING;
1586
1587 /* Enable host controller interrupts */
1588 set_register_bits(imx21, USBH_SYSIEN,
1589 USBH_SYSIEN_HERRINT |
1590 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1591 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1592
1593 spin_unlock_irqrestore(&imx21->lock, flags);
1594
1595 return 0;
1596}
1597
1598static void imx21_hc_stop(struct usb_hcd *hcd)
1599{
1600 struct imx21 *imx21 = hcd_to_imx21(hcd);
1601 unsigned long flags;
1602
1603 spin_lock_irqsave(&imx21->lock, flags);
1604
1605 writel(0, imx21->regs + USBH_SYSIEN);
1606 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1607 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1608 USBOTG_CLK_CTRL);
1609 spin_unlock_irqrestore(&imx21->lock, flags);
1610}
1611
1612/* =========================================== */
1613/* Driver glue */
1614/* =========================================== */
1615
1616static struct hc_driver imx21_hc_driver = {
1617 .description = hcd_name,
1618 .product_desc = "IMX21 USB Host Controller",
1619 .hcd_priv_size = sizeof(struct imx21),
1620
1621 .flags = HCD_USB11,
1622 .irq = imx21_irq,
1623
1624 .reset = imx21_hc_reset,
1625 .start = imx21_hc_start,
1626 .stop = imx21_hc_stop,
1627
1628 /* I/O requests */
1629 .urb_enqueue = imx21_hc_urb_enqueue,
1630 .urb_dequeue = imx21_hc_urb_dequeue,
1631 .endpoint_disable = imx21_hc_endpoint_disable,
1632
1633 /* scheduling support */
1634 .get_frame_number = imx21_hc_get_frame,
1635
1636 /* Root hub support */
1637 .hub_status_data = imx21_hc_hub_status_data,
1638 .hub_control = imx21_hc_hub_control,
1639
1640};
1641
1642static struct mx21_usbh_platform_data default_pdata = {
1643 .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1644 .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1645 .enable_host1 = 1,
1646 .enable_host2 = 1,
1647 .enable_otg_host = 1,
1648
1649};
1650
1651static int imx21_remove(struct platform_device *pdev)
1652{
1653 struct usb_hcd *hcd = platform_get_drvdata(pdev);
1654 struct imx21 *imx21 = hcd_to_imx21(hcd);
1655 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1656
1657 remove_debug_files(imx21);
1658 usb_remove_hcd(hcd);
1659
1660 if (res != NULL) {
1661 clk_disable(imx21->clk);
1662 clk_put(imx21->clk);
1663 iounmap(imx21->regs);
1664 release_mem_region(res->start, resource_size(res));
1665 }
1666
1667 kfree(hcd);
1668 return 0;
1669}
1670
1671
1672static int imx21_probe(struct platform_device *pdev)
1673{
1674 struct usb_hcd *hcd;
1675 struct imx21 *imx21;
1676 struct resource *res;
1677 int ret;
1678 int irq;
1679
1680 printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1681
1682 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1683 if (!res)
1684 return -ENODEV;
1685 irq = platform_get_irq(pdev, 0);
1686 if (irq < 0)
1687 return -ENXIO;
1688
1689 hcd = usb_create_hcd(&imx21_hc_driver,
1690 &pdev->dev, dev_name(&pdev->dev));
1691 if (hcd == NULL) {
1692 dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1693 dev_name(&pdev->dev));
1694 return -ENOMEM;
1695 }
1696
1697 imx21 = hcd_to_imx21(hcd);
1698 imx21->dev = &pdev->dev;
1699 imx21->pdata = pdev->dev.platform_data;
1700 if (!imx21->pdata)
1701 imx21->pdata = &default_pdata;
1702
1703 spin_lock_init(&imx21->lock);
1704 INIT_LIST_HEAD(&imx21->dmem_list);
1705 INIT_LIST_HEAD(&imx21->queue_for_etd);
1706 INIT_LIST_HEAD(&imx21->queue_for_dmem);
1707 create_debug_files(imx21);
1708
1709 res = request_mem_region(res->start, resource_size(res), hcd_name);
1710 if (!res) {
1711 ret = -EBUSY;
1712 goto failed_request_mem;
1713 }
1714
1715 imx21->regs = ioremap(res->start, resource_size(res));
1716 if (imx21->regs == NULL) {
1717 dev_err(imx21->dev, "Cannot map registers\n");
1718 ret = -ENOMEM;
1719 goto failed_ioremap;
1720 }
1721
1722 /* Enable clocks source */
1723 imx21->clk = clk_get(imx21->dev, NULL);
1724 if (IS_ERR(imx21->clk)) {
1725 dev_err(imx21->dev, "no clock found\n");
1726 ret = PTR_ERR(imx21->clk);
1727 goto failed_clock_get;
1728 }
1729
1730 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1731 if (ret)
1732 goto failed_clock_set;
1733 ret = clk_enable(imx21->clk);
1734 if (ret)
1735 goto failed_clock_enable;
1736
1737 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1738 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1739
1740 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
1741 if (ret != 0) {
1742 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1743 goto failed_add_hcd;
1744 }
1745
1746 return 0;
1747
1748failed_add_hcd:
1749 clk_disable(imx21->clk);
1750failed_clock_enable:
1751failed_clock_set:
1752 clk_put(imx21->clk);
1753failed_clock_get:
1754 iounmap(imx21->regs);
1755failed_ioremap:
1756 release_mem_region(res->start, res->end - res->start);
1757failed_request_mem:
1758 remove_debug_files(imx21);
1759 usb_put_hcd(hcd);
1760 return ret;
1761}
1762
1763static struct platform_driver imx21_hcd_driver = {
1764 .driver = {
1765 .name = (char *)hcd_name,
1766 },
1767 .probe = imx21_probe,
1768 .remove = imx21_remove,
1769 .suspend = NULL,
1770 .resume = NULL,
1771};
1772
1773static int __init imx21_hcd_init(void)
1774{
1775 return platform_driver_register(&imx21_hcd_driver);
1776}
1777
1778static void __exit imx21_hcd_cleanup(void)
1779{
1780 platform_driver_unregister(&imx21_hcd_driver);
1781}
1782
1783module_init(imx21_hcd_init);
1784module_exit(imx21_hcd_cleanup);
1785
1786MODULE_DESCRIPTION("i.MX21 USB Host controller");
1787MODULE_AUTHOR("Martin Fuzzey");
1788MODULE_LICENSE("GPL");
1789MODULE_ALIAS("platform:imx21-hcd");