blob: fff027d30a09d2e564f842baa4eba11a90a80ad2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
David Brownell91987692005-05-07 13:20:19 -07003 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27#undef DEBUG
28// #define VERBOSE DBG_VERBOSE
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/ioport.h>
33#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/errno.h>
35#include <linux/delay.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/timer.h>
40#include <linux/list.h>
41#include <linux/interrupt.h>
42#include <linux/proc_fs.h>
43#include <linux/mm.h>
Russell Kingd052d1b2005-10-29 19:07:23 +010044#include <linux/platform_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/dma-mapping.h>
46
47#include <asm/byteorder.h>
48#include <asm/dma.h>
49#include <asm/io.h>
50#include <asm/irq.h>
51#include <asm/system.h>
52#include <asm/mach-types.h>
53#include <asm/unaligned.h>
54#include <asm/hardware.h>
Milan Svoboda44df45a2006-05-29 03:34:00 -070055#ifdef CONFIG_ARCH_PXA
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <asm/arch/pxa-regs.h>
Milan Svoboda44df45a2006-05-29 03:34:00 -070057#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#include <linux/usb_ch9.h>
60#include <linux/usb_gadget.h>
61
Milan Svobodabf7e8512006-06-29 12:40:00 -070062#include <asm/arch/udc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64
65/*
David Brownell91987692005-05-07 13:20:19 -070066 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 * series processors. The UDC for the IXP 4xx series is very similar.
68 * There are fifteen endpoints, in addition to ep0.
69 *
70 * Such controller drivers work with a gadget driver. The gadget driver
71 * returns descriptors, implements configuration and data protocols used
72 * by the host to interact with this device, and allocates endpoints to
73 * the different protocol interfaces. The controller driver virtualizes
74 * usb hardware so that the gadget drivers will be more portable.
75 *
76 * This UDC hardware wants to implement a bit too much USB protocol, so
77 * it constrains the sorts of USB configuration change events that work.
78 * The errata for these chips are misleading; some "fixed" bugs from
79 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
80 */
81
David Brownell91987692005-05-07 13:20:19 -070082#define DRIVER_VERSION "4-May-2005"
83#define DRIVER_DESC "PXA 25x USB Device Controller driver"
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85
86static const char driver_name [] = "pxa2xx_udc";
87
88static const char ep0name [] = "ep0";
89
90
91// #define USE_DMA
92// #define USE_OUT_DMA
93// #define DISABLE_TEST_MODE
94
95#ifdef CONFIG_ARCH_IXP4XX
96#undef USE_DMA
97
98/* cpu-specific register addresses are compiled in to this code */
99#ifdef CONFIG_ARCH_PXA
100#error "Can't configure both IXP and PXA"
101#endif
102
103#endif
104
105#include "pxa2xx_udc.h"
106
107
108#ifdef USE_DMA
109static int use_dma = 1;
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC (use_dma, "true to use dma");
112
113static void dma_nodesc_handler (int dmach, void *_ep, struct pt_regs *r);
114static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
115
116#ifdef USE_OUT_DMA
117#define DMASTR " (dma support)"
118#else
119#define DMASTR " (dma in)"
120#endif
121
122#else /* !USE_DMA */
123#define DMASTR " (pio only)"
124#undef USE_OUT_DMA
125#endif
126
127#ifdef CONFIG_USB_PXA2XX_SMALL
128#define SIZE_STR " (small)"
129#else
130#define SIZE_STR ""
131#endif
132
133#ifdef DISABLE_TEST_MODE
134/* (mode == 0) == no undocumented chip tweaks
135 * (mode & 1) == double buffer bulk IN
136 * (mode & 2) == double buffer bulk OUT
137 * ... so mode = 3 (or 7, 15, etc) does it for both
138 */
139static ushort fifo_mode = 0;
140module_param(fifo_mode, ushort, 0);
141MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
142#endif
143
144/* ---------------------------------------------------------------------------
145 * endpoint related parts of the api to the usb controller hardware,
146 * used by gadget driver; and the inner talker-to-hardware core.
147 * ---------------------------------------------------------------------------
148 */
149
150static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
151static void nuke (struct pxa2xx_ep *, int status);
152
153static void pio_irq_enable(int bEndpointAddress)
154{
155 bEndpointAddress &= 0xf;
156 if (bEndpointAddress < 8)
157 UICR0 &= ~(1 << bEndpointAddress);
158 else {
159 bEndpointAddress -= 8;
160 UICR1 &= ~(1 << bEndpointAddress);
161 }
162}
163
164static void pio_irq_disable(int bEndpointAddress)
165{
166 bEndpointAddress &= 0xf;
167 if (bEndpointAddress < 8)
168 UICR0 |= 1 << bEndpointAddress;
169 else {
170 bEndpointAddress -= 8;
171 UICR1 |= 1 << bEndpointAddress;
172 }
173}
174
175/* The UDCCR reg contains mask and interrupt status bits,
176 * so using '|=' isn't safe as it may ack an interrupt.
177 */
178#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
179
180static inline void udc_set_mask_UDCCR(int mask)
181{
182 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
183}
184
185static inline void udc_clear_mask_UDCCR(int mask)
186{
187 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
188}
189
190static inline void udc_ack_int_UDCCR(int mask)
191{
192 /* udccr contains the bits we dont want to change */
193 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
194
195 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
196}
197
198/*
199 * endpoint enable/disable
200 *
201 * we need to verify the descriptors used to enable endpoints. since pxa2xx
202 * endpoint configurations are fixed, and are pretty much always enabled,
203 * there's not a lot to manage here.
204 *
205 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
206 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
207 * for a single interface (with only the default altsetting) and for gadget
208 * drivers that don't halt endpoints (not reset by set_interface). that also
209 * means that if you use ISO, you must violate the USB spec rule that all
210 * iso endpoints must be in non-default altsettings.
211 */
212static int pxa2xx_ep_enable (struct usb_ep *_ep,
213 const struct usb_endpoint_descriptor *desc)
214{
215 struct pxa2xx_ep *ep;
216 struct pxa2xx_udc *dev;
217
218 ep = container_of (_ep, struct pxa2xx_ep, ep);
219 if (!_ep || !desc || ep->desc || _ep->name == ep0name
220 || desc->bDescriptorType != USB_DT_ENDPOINT
221 || ep->bEndpointAddress != desc->bEndpointAddress
222 || ep->fifo_size < le16_to_cpu
223 (desc->wMaxPacketSize)) {
224 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
225 return -EINVAL;
226 }
227
228 /* xfer types must match, except that interrupt ~= bulk */
229 if (ep->bmAttributes != desc->bmAttributes
230 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
231 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
232 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
233 return -EINVAL;
234 }
235
236 /* hardware _could_ do smaller, but driver doesn't */
237 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
238 && le16_to_cpu (desc->wMaxPacketSize)
239 != BULK_FIFO_SIZE)
240 || !desc->wMaxPacketSize) {
241 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
242 return -ERANGE;
243 }
244
245 dev = ep->dev;
246 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
247 DMSG("%s, bogus device state\n", __FUNCTION__);
248 return -ESHUTDOWN;
249 }
250
251 ep->desc = desc;
252 ep->dma = -1;
253 ep->stopped = 0;
254 ep->pio_irqs = ep->dma_irqs = 0;
255 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
256
257 /* flush fifo (mostly for OUT buffers) */
258 pxa2xx_ep_fifo_flush (_ep);
259
260 /* ... reset halt state too, if we could ... */
261
262#ifdef USE_DMA
263 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
264 * bind it to the endpoint. otherwise use PIO.
265 */
266 switch (ep->bmAttributes) {
267 case USB_ENDPOINT_XFER_ISOC:
268 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
269 break;
270 // fall through
271 case USB_ENDPOINT_XFER_BULK:
272 if (!use_dma || !ep->reg_drcmr)
273 break;
274 ep->dma = pxa_request_dma ((char *)_ep->name,
275 (le16_to_cpu (desc->wMaxPacketSize) > 64)
276 ? DMA_PRIO_MEDIUM /* some iso */
277 : DMA_PRIO_LOW,
278 dma_nodesc_handler, ep);
279 if (ep->dma >= 0) {
280 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
281 DMSG("%s using dma%d\n", _ep->name, ep->dma);
282 }
283 }
284#endif
285
286 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
287 return 0;
288}
289
290static int pxa2xx_ep_disable (struct usb_ep *_ep)
291{
292 struct pxa2xx_ep *ep;
David Brownell91987692005-05-07 13:20:19 -0700293 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 ep = container_of (_ep, struct pxa2xx_ep, ep);
296 if (!_ep || !ep->desc) {
297 DMSG("%s, %s not enabled\n", __FUNCTION__,
298 _ep ? ep->ep.name : NULL);
299 return -EINVAL;
300 }
David Brownell91987692005-05-07 13:20:19 -0700301 local_irq_save(flags);
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 nuke (ep, -ESHUTDOWN);
304
305#ifdef USE_DMA
306 if (ep->dma >= 0) {
307 *ep->reg_drcmr = 0;
308 pxa_free_dma (ep->dma);
309 ep->dma = -1;
310 }
311#endif
312
313 /* flush fifo (mostly for IN buffers) */
314 pxa2xx_ep_fifo_flush (_ep);
315
316 ep->desc = NULL;
317 ep->stopped = 1;
318
David Brownell91987692005-05-07 13:20:19 -0700319 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
321 return 0;
322}
323
324/*-------------------------------------------------------------------------*/
325
326/* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
327 * must still pass correctly initialized endpoints, since other controller
328 * drivers may care about how it's currently set up (dma issues etc).
329 */
330
331/*
332 * pxa2xx_ep_alloc_request - allocate a request data structure
333 */
334static struct usb_request *
Al Viro55016f12005-10-21 03:21:58 -0400335pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 struct pxa2xx_request *req;
338
Eric Sesterhenn7039f422006-02-27 13:34:10 -0800339 req = kzalloc(sizeof(*req), gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 if (!req)
341 return NULL;
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 INIT_LIST_HEAD (&req->queue);
344 return &req->req;
345}
346
347
348/*
349 * pxa2xx_ep_free_request - deallocate a request data structure
350 */
351static void
352pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
353{
354 struct pxa2xx_request *req;
355
356 req = container_of (_req, struct pxa2xx_request, req);
357 WARN_ON (!list_empty (&req->queue));
358 kfree(req);
359}
360
361
362/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
363 * no device-affinity and the heap works perfectly well for i/o buffers.
364 * It wastes much less memory than dma_alloc_coherent() would, and even
365 * prevents cacheline (32 bytes wide) sharing problems.
366 */
367static void *
368pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
Al Viro55016f12005-10-21 03:21:58 -0400369 dma_addr_t *dma, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
371 char *retval;
372
373 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
374 if (retval)
375#ifdef USE_DMA
376 *dma = virt_to_bus (retval);
377#else
378 *dma = (dma_addr_t)~0;
379#endif
380 return retval;
381}
382
383static void
384pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
385 unsigned bytes)
386{
387 kfree (buf);
388}
389
390/*-------------------------------------------------------------------------*/
391
392/*
393 * done - retire a request; caller blocked irqs
394 */
395static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
396{
397 unsigned stopped = ep->stopped;
398
399 list_del_init(&req->queue);
400
401 if (likely (req->req.status == -EINPROGRESS))
402 req->req.status = status;
403 else
404 status = req->req.status;
405
406 if (status && status != -ESHUTDOWN)
407 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
408 ep->ep.name, &req->req, status,
409 req->req.actual, req->req.length);
410
411 /* don't modify queue heads during completion callback */
412 ep->stopped = 1;
413 req->req.complete(&ep->ep, &req->req);
414 ep->stopped = stopped;
415}
416
417
418static inline void ep0_idle (struct pxa2xx_udc *dev)
419{
420 dev->ep0state = EP0_IDLE;
421}
422
423static int
Ian Campbell63a4b522005-10-28 15:26:42 +0100424write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
426 u8 *buf;
427 unsigned length, count;
428
429 buf = req->req.buf + req->req.actual;
430 prefetch(buf);
431
432 /* how big will this packet be? */
433 length = min(req->req.length - req->req.actual, max);
434 req->req.actual += length;
435
436 count = length;
437 while (likely(count--))
438 *uddr = *buf++;
439
440 return length;
441}
442
443/*
444 * write to an IN endpoint fifo, as many packets as possible.
445 * irqs will use this to write the rest later.
446 * caller guarantees at least one packet buffer is ready (or a zlp).
447 */
448static int
449write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
450{
451 unsigned max;
452
453 max = le16_to_cpu(ep->desc->wMaxPacketSize);
454 do {
455 unsigned count;
456 int is_last, is_short;
457
458 count = write_packet(ep->reg_uddr, req, max);
459
460 /* last packet is usually short (or a zlp) */
461 if (unlikely (count != max))
462 is_last = is_short = 1;
463 else {
464 if (likely(req->req.length != req->req.actual)
465 || req->req.zero)
466 is_last = 0;
467 else
468 is_last = 1;
469 /* interrupt/iso maxpacket may not fill the fifo */
470 is_short = unlikely (max < ep->fifo_size);
471 }
472
473 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
474 ep->ep.name, count,
475 is_last ? "/L" : "", is_short ? "/S" : "",
476 req->req.length - req->req.actual, req);
477
478 /* let loose that packet. maybe try writing another one,
479 * double buffering might work. TSP, TPC, and TFS
480 * bit values are the same for all normal IN endpoints.
481 */
482 *ep->reg_udccs = UDCCS_BI_TPC;
483 if (is_short)
484 *ep->reg_udccs = UDCCS_BI_TSP;
485
486 /* requests complete when all IN data is in the FIFO */
487 if (is_last) {
488 done (ep, req, 0);
489 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
490 pio_irq_disable (ep->bEndpointAddress);
491#ifdef USE_DMA
492 /* unaligned data and zlps couldn't use dma */
493 if (unlikely(!list_empty(&ep->queue))) {
494 req = list_entry(ep->queue.next,
495 struct pxa2xx_request, queue);
496 kick_dma(ep,req);
497 return 0;
498 }
499#endif
500 }
501 return 1;
502 }
503
504 // TODO experiment: how robust can fifo mode tweaking be?
505 // double buffering is off in the default fifo mode, which
506 // prevents TFS from being set here.
507
508 } while (*ep->reg_udccs & UDCCS_BI_TFS);
509 return 0;
510}
511
512/* caller asserts req->pending (ep0 irq status nyet cleared); starts
513 * ep0 data stage. these chips want very simple state transitions.
514 */
515static inline
516void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
517{
518 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
519 USIR0 = USIR0_IR0;
520 dev->req_pending = 0;
521 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
522 __FUNCTION__, tag, UDCCS0, flags);
523}
524
525static int
526write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
527{
528 unsigned count;
529 int is_short;
530
531 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
532 ep->dev->stats.write.bytes += count;
533
534 /* last packet "must be" short (or a zlp) */
535 is_short = (count != EP0_FIFO_SIZE);
536
537 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
538 req->req.length - req->req.actual, req);
539
540 if (unlikely (is_short)) {
541 if (ep->dev->req_pending)
542 ep0start(ep->dev, UDCCS0_IPR, "short IN");
543 else
544 UDCCS0 = UDCCS0_IPR;
545
546 count = req->req.length;
547 done (ep, req, 0);
548 ep0_idle(ep->dev);
Milan Svoboda043ea182006-05-29 03:34:00 -0700549#ifndef CONFIG_ARCH_IXP4XX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550#if 1
551 /* This seems to get rid of lost status irqs in some cases:
552 * host responds quickly, or next request involves config
553 * change automagic, or should have been hidden, or ...
554 *
555 * FIXME get rid of all udelays possible...
556 */
557 if (count >= EP0_FIFO_SIZE) {
558 count = 100;
559 do {
560 if ((UDCCS0 & UDCCS0_OPR) != 0) {
561 /* clear OPR, generate ack */
562 UDCCS0 = UDCCS0_OPR;
563 break;
564 }
565 count--;
566 udelay(1);
567 } while (count);
568 }
569#endif
Milan Svoboda043ea182006-05-29 03:34:00 -0700570#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 } else if (ep->dev->req_pending)
572 ep0start(ep->dev, 0, "IN");
573 return is_short;
574}
575
576
577/*
578 * read_fifo - unload packet(s) from the fifo we use for usb OUT
579 * transfers and put them into the request. caller should have made
580 * sure there's at least one packet ready.
581 *
582 * returns true if the request completed because of short packet or the
583 * request buffer having filled (and maybe overran till end-of-packet).
584 */
585static int
586read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
587{
588 for (;;) {
589 u32 udccs;
590 u8 *buf;
591 unsigned bufferspace, count, is_short;
592
593 /* make sure there's a packet in the FIFO.
594 * UDCCS_{BO,IO}_RPC are all the same bit value.
595 * UDCCS_{BO,IO}_RNE are all the same bit value.
596 */
597 udccs = *ep->reg_udccs;
598 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
599 break;
600 buf = req->req.buf + req->req.actual;
601 prefetchw(buf);
602 bufferspace = req->req.length - req->req.actual;
603
604 /* read all bytes from this packet */
605 if (likely (udccs & UDCCS_BO_RNE)) {
606 count = 1 + (0x0ff & *ep->reg_ubcr);
607 req->req.actual += min (count, bufferspace);
608 } else /* zlp */
609 count = 0;
610 is_short = (count < ep->ep.maxpacket);
611 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
612 ep->ep.name, udccs, count,
613 is_short ? "/S" : "",
614 req, req->req.actual, req->req.length);
615 while (likely (count-- != 0)) {
616 u8 byte = (u8) *ep->reg_uddr;
617
618 if (unlikely (bufferspace == 0)) {
619 /* this happens when the driver's buffer
620 * is smaller than what the host sent.
621 * discard the extra data.
622 */
623 if (req->req.status != -EOVERFLOW)
624 DMSG("%s overflow %d\n",
625 ep->ep.name, count);
626 req->req.status = -EOVERFLOW;
627 } else {
628 *buf++ = byte;
629 bufferspace--;
630 }
631 }
632 *ep->reg_udccs = UDCCS_BO_RPC;
633 /* RPC/RSP/RNE could now reflect the other packet buffer */
634
635 /* iso is one request per packet */
636 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
637 if (udccs & UDCCS_IO_ROF)
638 req->req.status = -EHOSTUNREACH;
639 /* more like "is_done" */
640 is_short = 1;
641 }
642
643 /* completion */
644 if (is_short || req->req.actual == req->req.length) {
645 done (ep, req, 0);
646 if (list_empty(&ep->queue))
647 pio_irq_disable (ep->bEndpointAddress);
648 return 1;
649 }
650
651 /* finished that packet. the next one may be waiting... */
652 }
653 return 0;
654}
655
656/*
657 * special ep0 version of the above. no UBCR0 or double buffering; status
658 * handshaking is magic. most device protocols don't need control-OUT.
659 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
660 * protocols do use them.
661 */
662static int
663read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
664{
665 u8 *buf, byte;
666 unsigned bufferspace;
667
668 buf = req->req.buf + req->req.actual;
669 bufferspace = req->req.length - req->req.actual;
670
671 while (UDCCS0 & UDCCS0_RNE) {
672 byte = (u8) UDDR0;
673
674 if (unlikely (bufferspace == 0)) {
675 /* this happens when the driver's buffer
676 * is smaller than what the host sent.
677 * discard the extra data.
678 */
679 if (req->req.status != -EOVERFLOW)
680 DMSG("%s overflow\n", ep->ep.name);
681 req->req.status = -EOVERFLOW;
682 } else {
683 *buf++ = byte;
684 req->req.actual++;
685 bufferspace--;
686 }
687 }
688
689 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
690
691 /* completion */
692 if (req->req.actual >= req->req.length)
693 return 1;
694
695 /* finished that packet. the next one may be waiting... */
696 return 0;
697}
698
699#ifdef USE_DMA
700
701#define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
702
703static void
704start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
705{
706 u32 dcmd = req->req.length;
707 u32 buf = req->req.dma;
708 u32 fifo = io_v2p ((u32)ep->reg_uddr);
709
710 /* caller guarantees there's a packet or more remaining
711 * - IN may end with a short packet (TSP set separately),
712 * - OUT is always full length
713 */
714 buf += req->req.actual;
715 dcmd -= req->req.actual;
716 ep->dma_fixup = 0;
717
718 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
719 DCSR(ep->dma) = DCSR_NODESC;
720 if (is_in) {
721 DSADR(ep->dma) = buf;
722 DTADR(ep->dma) = fifo;
723 if (dcmd > MAX_IN_DMA)
724 dcmd = MAX_IN_DMA;
725 else
726 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
727 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
728 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
729 } else {
730#ifdef USE_OUT_DMA
731 DSADR(ep->dma) = fifo;
732 DTADR(ep->dma) = buf;
733 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
734 dcmd = ep->ep.maxpacket;
735 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
736 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
737#endif
738 }
739 DCMD(ep->dma) = dcmd;
740 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
741 | (unlikely(is_in)
742 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
743 : 0); /* use handle_ep() */
744}
745
746static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
747{
748 int is_in = ep->bEndpointAddress & USB_DIR_IN;
749
750 if (is_in) {
751 /* unaligned tx buffers and zlps only work with PIO */
752 if ((req->req.dma & 0x0f) != 0
753 || unlikely((req->req.length - req->req.actual)
754 == 0)) {
755 pio_irq_enable(ep->bEndpointAddress);
756 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
757 (void) write_fifo(ep, req);
758 } else {
759 start_dma_nodesc(ep, req, USB_DIR_IN);
760 }
761 } else {
762 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
763 DMSG("%s short dma read...\n", ep->ep.name);
764 /* we're always set up for pio out */
765 read_fifo (ep, req);
766 } else {
767 *ep->reg_udccs = UDCCS_BO_DME
768 | (*ep->reg_udccs & UDCCS_BO_FST);
769 start_dma_nodesc(ep, req, USB_DIR_OUT);
770 }
771 }
772}
773
774static void cancel_dma(struct pxa2xx_ep *ep)
775{
776 struct pxa2xx_request *req;
777 u32 tmp;
778
779 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
780 return;
781
782 DCSR(ep->dma) = 0;
783 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
784 cpu_relax();
785
786 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
787 tmp = DCMD(ep->dma) & DCMD_LENGTH;
788 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
789
790 /* the last tx packet may be incomplete, so flush the fifo.
791 * FIXME correct req.actual if we can
792 */
793 if (ep->bEndpointAddress & USB_DIR_IN)
794 *ep->reg_udccs = UDCCS_BI_FTF;
795}
796
797/* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
798static void dma_nodesc_handler(int dmach, void *_ep, struct pt_regs *r)
799{
800 struct pxa2xx_ep *ep = _ep;
801 struct pxa2xx_request *req;
802 u32 tmp, completed;
803
804 local_irq_disable();
805
806 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
807
808 ep->dma_irqs++;
809 ep->dev->stats.irqs++;
810 HEX_DISPLAY(ep->dev->stats.irqs);
811
812 /* ack/clear */
813 tmp = DCSR(ep->dma);
814 DCSR(ep->dma) = tmp;
815 if ((tmp & DCSR_STOPSTATE) == 0
816 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
817 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
818 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
819 goto done;
820 }
821 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
822
823 /* update transfer status */
824 completed = tmp & DCSR_BUSERR;
825 if (ep->bEndpointAddress & USB_DIR_IN)
826 tmp = DSADR(ep->dma);
827 else
828 tmp = DTADR(ep->dma);
829 req->req.actual = tmp - req->req.dma;
830
831 /* FIXME seems we sometimes see partial transfers... */
832
833 if (unlikely(completed != 0))
834 req->req.status = -EIO;
835 else if (req->req.actual) {
836 /* these registers have zeroes in low bits; they miscount
837 * some (end-of-transfer) short packets: tx 14 as tx 12
838 */
839 if (ep->dma_fixup)
840 req->req.actual = min(req->req.actual + 3,
841 req->req.length);
842
843 tmp = (req->req.length - req->req.actual);
844 completed = (tmp == 0);
845 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
846
847 /* maybe validate final short packet ... */
848 if ((req->req.actual % ep->ep.maxpacket) != 0)
849 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
850
851 /* ... or zlp, using pio fallback */
852 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
853 && req->req.zero) {
854 DMSG("%s zlp terminate ...\n", ep->ep.name);
855 completed = 0;
856 }
857 }
858 }
859
860 if (likely(completed)) {
861 done(ep, req, 0);
862
863 /* maybe re-activate after completion */
864 if (ep->stopped || list_empty(&ep->queue))
865 goto done;
866 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
867 }
868 kick_dma(ep, req);
869done:
870 local_irq_enable();
871}
872
873#endif
874
875/*-------------------------------------------------------------------------*/
876
877static int
Al Viro55016f12005-10-21 03:21:58 -0400878pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
880 struct pxa2xx_request *req;
881 struct pxa2xx_ep *ep;
882 struct pxa2xx_udc *dev;
883 unsigned long flags;
884
885 req = container_of(_req, struct pxa2xx_request, req);
886 if (unlikely (!_req || !_req->complete || !_req->buf
887 || !list_empty(&req->queue))) {
888 DMSG("%s, bad params\n", __FUNCTION__);
889 return -EINVAL;
890 }
891
892 ep = container_of(_ep, struct pxa2xx_ep, ep);
893 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
894 DMSG("%s, bad ep\n", __FUNCTION__);
895 return -EINVAL;
896 }
897
898 dev = ep->dev;
899 if (unlikely (!dev->driver
900 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
901 DMSG("%s, bogus device state\n", __FUNCTION__);
902 return -ESHUTDOWN;
903 }
904
905 /* iso is always one packet per request, that's the only way
906 * we can report per-packet status. that also helps with dma.
907 */
908 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
909 && req->req.length > le16_to_cpu
910 (ep->desc->wMaxPacketSize)))
911 return -EMSGSIZE;
912
913#ifdef USE_DMA
914 // FIXME caller may already have done the dma mapping
915 if (ep->dma >= 0) {
916 _req->dma = dma_map_single(dev->dev,
917 _req->buf, _req->length,
918 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
919 ? DMA_TO_DEVICE
920 : DMA_FROM_DEVICE);
921 }
922#endif
923
924 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
925 _ep->name, _req, _req->length, _req->buf);
926
927 local_irq_save(flags);
928
929 _req->status = -EINPROGRESS;
930 _req->actual = 0;
931
932 /* kickstart this i/o queue? */
933 if (list_empty(&ep->queue) && !ep->stopped) {
934 if (ep->desc == 0 /* ep0 */) {
935 unsigned length = _req->length;
936
937 switch (dev->ep0state) {
938 case EP0_IN_DATA_PHASE:
939 dev->stats.write.ops++;
940 if (write_ep0_fifo(ep, req))
941 req = NULL;
942 break;
943
944 case EP0_OUT_DATA_PHASE:
945 dev->stats.read.ops++;
946 /* messy ... */
947 if (dev->req_config) {
948 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
949 dev->has_cfr ? "" : " raced");
950 if (dev->has_cfr)
951 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
952 |UDCCFR_MB1;
953 done(ep, req, 0);
954 dev->ep0state = EP0_END_XFER;
955 local_irq_restore (flags);
956 return 0;
957 }
958 if (dev->req_pending)
959 ep0start(dev, UDCCS0_IPR, "OUT");
960 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
961 && read_ep0_fifo(ep, req))) {
962 ep0_idle(dev);
963 done(ep, req, 0);
964 req = NULL;
965 }
966 break;
967
968 default:
969 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
970 local_irq_restore (flags);
971 return -EL2HLT;
972 }
973#ifdef USE_DMA
974 /* either start dma or prime pio pump */
975 } else if (ep->dma >= 0) {
976 kick_dma(ep, req);
977#endif
978 /* can the FIFO can satisfy the request immediately? */
David Brownell91987692005-05-07 13:20:19 -0700979 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
980 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
981 && write_fifo(ep, req))
982 req = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
984 && read_fifo(ep, req)) {
985 req = NULL;
986 }
987
988 if (likely (req && ep->desc) && ep->dma < 0)
989 pio_irq_enable(ep->bEndpointAddress);
990 }
991
992 /* pio or dma irq handler advances the queue. */
993 if (likely (req != 0))
994 list_add_tail(&req->queue, &ep->queue);
995 local_irq_restore(flags);
996
997 return 0;
998}
999
1000
1001/*
1002 * nuke - dequeue ALL requests
1003 */
1004static void nuke(struct pxa2xx_ep *ep, int status)
1005{
1006 struct pxa2xx_request *req;
1007
1008 /* called with irqs blocked */
1009#ifdef USE_DMA
1010 if (ep->dma >= 0 && !ep->stopped)
1011 cancel_dma(ep);
1012#endif
1013 while (!list_empty(&ep->queue)) {
1014 req = list_entry(ep->queue.next,
1015 struct pxa2xx_request,
1016 queue);
1017 done(ep, req, status);
1018 }
1019 if (ep->desc)
1020 pio_irq_disable (ep->bEndpointAddress);
1021}
1022
1023
1024/* dequeue JUST ONE request */
1025static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1026{
1027 struct pxa2xx_ep *ep;
1028 struct pxa2xx_request *req;
1029 unsigned long flags;
1030
1031 ep = container_of(_ep, struct pxa2xx_ep, ep);
1032 if (!_ep || ep->ep.name == ep0name)
1033 return -EINVAL;
1034
1035 local_irq_save(flags);
1036
1037 /* make sure it's actually queued on this endpoint */
1038 list_for_each_entry (req, &ep->queue, queue) {
1039 if (&req->req == _req)
1040 break;
1041 }
1042 if (&req->req != _req) {
1043 local_irq_restore(flags);
1044 return -EINVAL;
1045 }
1046
1047#ifdef USE_DMA
1048 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1049 cancel_dma(ep);
1050 done(ep, req, -ECONNRESET);
1051 /* restart i/o */
1052 if (!list_empty(&ep->queue)) {
1053 req = list_entry(ep->queue.next,
1054 struct pxa2xx_request, queue);
1055 kick_dma(ep, req);
1056 }
1057 } else
1058#endif
1059 done(ep, req, -ECONNRESET);
1060
1061 local_irq_restore(flags);
1062 return 0;
1063}
1064
1065/*-------------------------------------------------------------------------*/
1066
1067static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1068{
1069 struct pxa2xx_ep *ep;
1070 unsigned long flags;
1071
1072 ep = container_of(_ep, struct pxa2xx_ep, ep);
1073 if (unlikely (!_ep
1074 || (!ep->desc && ep->ep.name != ep0name))
1075 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1076 DMSG("%s, bad ep\n", __FUNCTION__);
1077 return -EINVAL;
1078 }
1079 if (value == 0) {
1080 /* this path (reset toggle+halt) is needed to implement
1081 * SET_INTERFACE on normal hardware. but it can't be
1082 * done from software on the PXA UDC, and the hardware
1083 * forgets to do it as part of SET_INTERFACE automagic.
1084 */
1085 DMSG("only host can clear %s halt\n", _ep->name);
1086 return -EROFS;
1087 }
1088
1089 local_irq_save(flags);
1090
1091 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1092 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1093 || !list_empty(&ep->queue))) {
1094 local_irq_restore(flags);
1095 return -EAGAIN;
1096 }
1097
1098 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1099 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1100
1101 /* ep0 needs special care */
1102 if (!ep->desc) {
1103 start_watchdog(ep->dev);
1104 ep->dev->req_pending = 0;
1105 ep->dev->ep0state = EP0_STALL;
1106
1107 /* and bulk/intr endpoints like dropping stalls too */
1108 } else {
1109 unsigned i;
1110 for (i = 0; i < 1000; i += 20) {
1111 if (*ep->reg_udccs & UDCCS_BI_SST)
1112 break;
1113 udelay(20);
1114 }
1115 }
1116 local_irq_restore(flags);
1117
1118 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1119 return 0;
1120}
1121
1122static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1123{
1124 struct pxa2xx_ep *ep;
1125
1126 ep = container_of(_ep, struct pxa2xx_ep, ep);
1127 if (!_ep) {
1128 DMSG("%s, bad ep\n", __FUNCTION__);
1129 return -ENODEV;
1130 }
1131 /* pxa can't report unclaimed bytes from IN fifos */
1132 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1133 return -EOPNOTSUPP;
1134 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1135 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1136 return 0;
1137 else
1138 return (*ep->reg_ubcr & 0xfff) + 1;
1139}
1140
1141static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1142{
1143 struct pxa2xx_ep *ep;
1144
1145 ep = container_of(_ep, struct pxa2xx_ep, ep);
1146 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1147 DMSG("%s, bad ep\n", __FUNCTION__);
1148 return;
1149 }
1150
1151 /* toggle and halt bits stay unchanged */
1152
1153 /* for OUT, just read and discard the FIFO contents. */
1154 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1155 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1156 (void) *ep->reg_uddr;
1157 return;
1158 }
1159
1160 /* most IN status is the same, but ISO can't stall */
1161 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1162 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1163 ? 0 : UDCCS_BI_SST;
1164}
1165
1166
1167static struct usb_ep_ops pxa2xx_ep_ops = {
1168 .enable = pxa2xx_ep_enable,
1169 .disable = pxa2xx_ep_disable,
1170
1171 .alloc_request = pxa2xx_ep_alloc_request,
1172 .free_request = pxa2xx_ep_free_request,
1173
1174 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1175 .free_buffer = pxa2xx_ep_free_buffer,
1176
1177 .queue = pxa2xx_ep_queue,
1178 .dequeue = pxa2xx_ep_dequeue,
1179
1180 .set_halt = pxa2xx_ep_set_halt,
1181 .fifo_status = pxa2xx_ep_fifo_status,
1182 .fifo_flush = pxa2xx_ep_fifo_flush,
1183};
1184
1185
1186/* ---------------------------------------------------------------------------
1187 * device-scoped parts of the api to the usb controller hardware
1188 * ---------------------------------------------------------------------------
1189 */
1190
1191static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1192{
1193 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1194}
1195
1196static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1197{
1198 /* host may not have enabled remote wakeup */
1199 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1200 return -EHOSTUNREACH;
1201 udc_set_mask_UDCCR(UDCCR_RSM);
1202 return 0;
1203}
1204
1205static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1206static void udc_enable (struct pxa2xx_udc *);
1207static void udc_disable(struct pxa2xx_udc *);
1208
1209/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1210 * in active use.
1211 */
1212static int pullup(struct pxa2xx_udc *udc, int is_active)
1213{
1214 is_active = is_active && udc->vbus && udc->pullup;
1215 DMSG("%s\n", is_active ? "active" : "inactive");
1216 if (is_active)
1217 udc_enable(udc);
1218 else {
1219 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1220 DMSG("disconnect %s\n", udc->driver
1221 ? udc->driver->driver.name
1222 : "(no driver)");
1223 stop_activity(udc, udc->driver);
1224 }
1225 udc_disable(udc);
1226 }
1227 return 0;
1228}
1229
1230/* VBUS reporting logically comes from a transceiver */
1231static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1232{
1233 struct pxa2xx_udc *udc;
1234
1235 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1236 udc->vbus = is_active = (is_active != 0);
1237 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1238 pullup(udc, is_active);
1239 return 0;
1240}
1241
1242/* drivers may have software control over D+ pullup */
1243static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1244{
1245 struct pxa2xx_udc *udc;
1246
1247 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1248
1249 /* not all boards support pullup control */
1250 if (!udc->mach->udc_command)
1251 return -EOPNOTSUPP;
1252
1253 is_active = (is_active != 0);
1254 udc->pullup = is_active;
1255 pullup(udc, is_active);
1256 return 0;
1257}
1258
1259static const struct usb_gadget_ops pxa2xx_udc_ops = {
1260 .get_frame = pxa2xx_udc_get_frame,
1261 .wakeup = pxa2xx_udc_wakeup,
1262 .vbus_session = pxa2xx_udc_vbus_session,
1263 .pullup = pxa2xx_udc_pullup,
1264
1265 // .vbus_draw ... boards may consume current from VBUS, up to
1266 // 100-500mA based on config. the 500uA suspend ceiling means
1267 // that exclusively vbus-powered PXA designs violate USB specs.
1268};
1269
1270/*-------------------------------------------------------------------------*/
1271
1272#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1273
1274static const char proc_node_name [] = "driver/udc";
1275
1276static int
1277udc_proc_read(char *page, char **start, off_t off, int count,
1278 int *eof, void *_dev)
1279{
1280 char *buf = page;
1281 struct pxa2xx_udc *dev = _dev;
1282 char *next = buf;
1283 unsigned size = count;
1284 unsigned long flags;
1285 int i, t;
1286 u32 tmp;
1287
1288 if (off != 0)
1289 return 0;
1290
1291 local_irq_save(flags);
1292
1293 /* basic device status */
1294 t = scnprintf(next, size, DRIVER_DESC "\n"
1295 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1296 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1297 dev->driver ? dev->driver->driver.name : "(none)",
David Brownell91987692005-05-07 13:20:19 -07001298 is_vbus_present() ? "full speed" : "disconnected");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 size -= t;
1300 next += t;
1301
1302 /* registers for device and ep0 */
1303 t = scnprintf(next, size,
1304 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1305 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1306 size -= t;
1307 next += t;
1308
1309 tmp = UDCCR;
1310 t = scnprintf(next, size,
1311 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1312 (tmp & UDCCR_REM) ? " rem" : "",
1313 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1314 (tmp & UDCCR_SRM) ? " srm" : "",
1315 (tmp & UDCCR_SUSIR) ? " susir" : "",
1316 (tmp & UDCCR_RESIR) ? " resir" : "",
1317 (tmp & UDCCR_RSM) ? " rsm" : "",
1318 (tmp & UDCCR_UDA) ? " uda" : "",
1319 (tmp & UDCCR_UDE) ? " ude" : "");
1320 size -= t;
1321 next += t;
1322
1323 tmp = UDCCS0;
1324 t = scnprintf(next, size,
1325 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1326 (tmp & UDCCS0_SA) ? " sa" : "",
1327 (tmp & UDCCS0_RNE) ? " rne" : "",
1328 (tmp & UDCCS0_FST) ? " fst" : "",
1329 (tmp & UDCCS0_SST) ? " sst" : "",
1330 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1331 (tmp & UDCCS0_FTF) ? " ftf" : "",
1332 (tmp & UDCCS0_IPR) ? " ipr" : "",
1333 (tmp & UDCCS0_OPR) ? " opr" : "");
1334 size -= t;
1335 next += t;
1336
1337 if (dev->has_cfr) {
1338 tmp = UDCCFR;
1339 t = scnprintf(next, size,
1340 "udccfr %02X =%s%s\n", tmp,
1341 (tmp & UDCCFR_AREN) ? " aren" : "",
1342 (tmp & UDCCFR_ACM) ? " acm" : "");
1343 size -= t;
1344 next += t;
1345 }
1346
David Brownell91987692005-05-07 13:20:19 -07001347 if (!is_vbus_present() || !dev->driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 goto done;
1349
1350 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1351 dev->stats.write.bytes, dev->stats.write.ops,
1352 dev->stats.read.bytes, dev->stats.read.ops,
1353 dev->stats.irqs);
1354 size -= t;
1355 next += t;
1356
1357 /* dump endpoint queues */
1358 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1359 struct pxa2xx_ep *ep = &dev->ep [i];
1360 struct pxa2xx_request *req;
1361 int t;
1362
1363 if (i != 0) {
1364 const struct usb_endpoint_descriptor *d;
1365
1366 d = ep->desc;
1367 if (!d)
1368 continue;
1369 tmp = *dev->ep [i].reg_udccs;
1370 t = scnprintf(next, size,
1371 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1372 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1373 (ep->dma >= 0) ? "dma" : "pio", tmp,
1374 ep->pio_irqs, ep->dma_irqs);
1375 /* TODO translate all five groups of udccs bits! */
1376
1377 } else /* ep0 should only have one transfer queued */
1378 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1379 ep->pio_irqs);
1380 if (t <= 0 || t > size)
1381 goto done;
1382 size -= t;
1383 next += t;
1384
1385 if (list_empty(&ep->queue)) {
1386 t = scnprintf(next, size, "\t(nothing queued)\n");
1387 if (t <= 0 || t > size)
1388 goto done;
1389 size -= t;
1390 next += t;
1391 continue;
1392 }
1393 list_for_each_entry(req, &ep->queue, queue) {
1394#ifdef USE_DMA
1395 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1396 t = scnprintf(next, size,
1397 "\treq %p len %d/%d "
1398 "buf %p (dma%d dcmd %08x)\n",
1399 &req->req, req->req.actual,
1400 req->req.length, req->req.buf,
1401 ep->dma, DCMD(ep->dma)
1402 // low 13 bits == bytes-to-go
1403 );
1404 else
1405#endif
1406 t = scnprintf(next, size,
1407 "\treq %p len %d/%d buf %p\n",
1408 &req->req, req->req.actual,
1409 req->req.length, req->req.buf);
1410 if (t <= 0 || t > size)
1411 goto done;
1412 size -= t;
1413 next += t;
1414 }
1415 }
1416
1417done:
1418 local_irq_restore(flags);
1419 *eof = 1;
1420 return count - size;
1421}
1422
1423#define create_proc_files() \
1424 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1425#define remove_proc_files() \
1426 remove_proc_entry(proc_node_name, NULL)
1427
1428#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1429
1430#define create_proc_files() do {} while (0)
1431#define remove_proc_files() do {} while (0)
1432
1433#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1434
1435/* "function" sysfs attribute */
1436static ssize_t
Yani Ioannou10523b32005-05-17 06:43:37 -04001437show_function (struct device *_dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
1439 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1440
1441 if (!dev->driver
1442 || !dev->driver->function
1443 || strlen (dev->driver->function) > PAGE_SIZE)
1444 return 0;
1445 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1446}
1447static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1448
1449/*-------------------------------------------------------------------------*/
1450
1451/*
1452 * udc_disable - disable USB device controller
1453 */
1454static void udc_disable(struct pxa2xx_udc *dev)
1455{
1456 /* block all irqs */
1457 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1458 UICR0 = UICR1 = 0xff;
1459 UFNRH = UFNRH_SIM;
1460
1461 /* if hardware supports it, disconnect from usb */
David Brownell91987692005-05-07 13:20:19 -07001462 pullup_off();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
1464 udc_clear_mask_UDCCR(UDCCR_UDE);
1465
1466#ifdef CONFIG_ARCH_PXA
1467 /* Disable clock for USB device */
1468 pxa_set_cken(CKEN11_USB, 0);
1469#endif
1470
1471 ep0_idle (dev);
1472 dev->gadget.speed = USB_SPEED_UNKNOWN;
1473 LED_CONNECTED_OFF;
1474}
1475
1476
1477/*
1478 * udc_reinit - initialize software state
1479 */
1480static void udc_reinit(struct pxa2xx_udc *dev)
1481{
1482 u32 i;
1483
1484 /* device/ep0 records init */
1485 INIT_LIST_HEAD (&dev->gadget.ep_list);
1486 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1487 dev->ep0state = EP0_IDLE;
1488
1489 /* basic endpoint records init */
1490 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1491 struct pxa2xx_ep *ep = &dev->ep[i];
1492
1493 if (i != 0)
1494 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1495
1496 ep->desc = NULL;
1497 ep->stopped = 0;
1498 INIT_LIST_HEAD (&ep->queue);
1499 ep->pio_irqs = ep->dma_irqs = 0;
1500 }
1501
1502 /* the rest was statically initialized, and is read-only */
1503}
1504
1505/* until it's enabled, this UDC should be completely invisible
1506 * to any USB host.
1507 */
1508static void udc_enable (struct pxa2xx_udc *dev)
1509{
1510 udc_clear_mask_UDCCR(UDCCR_UDE);
1511
1512#ifdef CONFIG_ARCH_PXA
1513 /* Enable clock for USB device */
1514 pxa_set_cken(CKEN11_USB, 1);
1515 udelay(5);
1516#endif
1517
1518 /* try to clear these bits before we enable the udc */
1519 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1520
1521 ep0_idle(dev);
1522 dev->gadget.speed = USB_SPEED_UNKNOWN;
1523 dev->stats.irqs = 0;
1524
1525 /*
1526 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1527 * - enable UDC
1528 * - if RESET is already in progress, ack interrupt
1529 * - unmask reset interrupt
1530 */
1531 udc_set_mask_UDCCR(UDCCR_UDE);
1532 if (!(UDCCR & UDCCR_UDA))
1533 udc_ack_int_UDCCR(UDCCR_RSTIR);
1534
1535 if (dev->has_cfr /* UDC_RES2 is defined */) {
1536 /* pxa255 (a0+) can avoid a set_config race that could
1537 * prevent gadget drivers from configuring correctly
1538 */
1539 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1540 } else {
1541 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1542 * which could result in missing packets and interrupts.
1543 * supposedly one bit per endpoint, controlling whether it
1544 * double buffers or not; ACM/AREN bits fit into the holes.
1545 * zero bits (like USIR0_IRx) disable double buffering.
1546 */
1547 UDC_RES1 = 0x00;
1548 UDC_RES2 = 0x00;
1549 }
1550
1551#ifdef DISABLE_TEST_MODE
1552 /* "test mode" seems to have become the default in later chip
1553 * revs, preventing double buffering (and invalidating docs).
1554 * this EXPERIMENT enables it for bulk endpoints by tweaking
1555 * undefined/reserved register bits (that other drivers clear).
1556 * Belcarra code comments noted this usage.
1557 */
1558 if (fifo_mode & 1) { /* IN endpoints */
1559 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1560 UDC_RES2 |= USIR1_IR11;
1561 }
1562 if (fifo_mode & 2) { /* OUT endpoints */
1563 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1564 UDC_RES2 |= USIR1_IR12;
1565 }
1566#endif
1567
1568 /* enable suspend/resume and reset irqs */
1569 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1570
1571 /* enable ep0 irqs */
1572 UICR0 &= ~UICR0_IM0;
1573
1574 /* if hardware supports it, pullup D+ and wait for reset */
David Brownell91987692005-05-07 13:20:19 -07001575 pullup_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576}
1577
1578
1579/* when a driver is successfully registered, it will receive
1580 * control requests including set_configuration(), which enables
1581 * non-control requests. then usb traffic follows until a
1582 * disconnect is reported. then a host may connect again, or
1583 * the driver might get unbound.
1584 */
1585int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1586{
1587 struct pxa2xx_udc *dev = the_controller;
1588 int retval;
1589
1590 if (!driver
Milan Svoboda7c0642c2006-05-29 03:34:00 -07001591 || driver->speed < USB_SPEED_FULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 || !driver->bind
1593 || !driver->unbind
1594 || !driver->disconnect
1595 || !driver->setup)
1596 return -EINVAL;
1597 if (!dev)
1598 return -ENODEV;
1599 if (dev->driver)
1600 return -EBUSY;
1601
1602 /* first hook up the driver ... */
1603 dev->driver = driver;
1604 dev->gadget.dev.driver = &driver->driver;
1605 dev->pullup = 1;
1606
1607 device_add (&dev->gadget.dev);
1608 retval = driver->bind(&dev->gadget);
1609 if (retval) {
1610 DMSG("bind to driver %s --> error %d\n",
1611 driver->driver.name, retval);
1612 device_del (&dev->gadget.dev);
1613
1614 dev->driver = NULL;
1615 dev->gadget.dev.driver = NULL;
1616 return retval;
1617 }
1618 device_create_file(dev->dev, &dev_attr_function);
1619
1620 /* ... then enable host detection and ep0; and we're ready
1621 * for set_configuration as well as eventual disconnect.
1622 */
1623 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1624 pullup(dev, 1);
1625 dump_state(dev);
1626 return 0;
1627}
1628EXPORT_SYMBOL(usb_gadget_register_driver);
1629
1630static void
1631stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1632{
1633 int i;
1634
1635 /* don't disconnect drivers more than once */
1636 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1637 driver = NULL;
1638 dev->gadget.speed = USB_SPEED_UNKNOWN;
1639
1640 /* prevent new request submissions, kill any outstanding requests */
1641 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1642 struct pxa2xx_ep *ep = &dev->ep[i];
1643
1644 ep->stopped = 1;
1645 nuke(ep, -ESHUTDOWN);
1646 }
1647 del_timer_sync(&dev->timer);
1648
1649 /* report disconnect; the driver is already quiesced */
1650 LED_CONNECTED_OFF;
1651 if (driver)
1652 driver->disconnect(&dev->gadget);
1653
1654 /* re-init driver-visible data structures */
1655 udc_reinit(dev);
1656}
1657
1658int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1659{
1660 struct pxa2xx_udc *dev = the_controller;
1661
1662 if (!dev)
1663 return -ENODEV;
1664 if (!driver || driver != dev->driver)
1665 return -EINVAL;
1666
1667 local_irq_disable();
1668 pullup(dev, 0);
1669 stop_activity(dev, driver);
1670 local_irq_enable();
1671
1672 driver->unbind(&dev->gadget);
1673 dev->driver = NULL;
1674
1675 device_del (&dev->gadget.dev);
1676 device_remove_file(dev->dev, &dev_attr_function);
1677
1678 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1679 dump_state(dev);
1680 return 0;
1681}
1682EXPORT_SYMBOL(usb_gadget_unregister_driver);
1683
1684
1685/*-------------------------------------------------------------------------*/
1686
1687#ifdef CONFIG_ARCH_LUBBOCK
1688
1689/* Lubbock has separate connect and disconnect irqs. More typical designs
1690 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1691 */
1692
1693static irqreturn_t
1694lubbock_vbus_irq(int irq, void *_dev, struct pt_regs *r)
1695{
1696 struct pxa2xx_udc *dev = _dev;
1697 int vbus;
1698
1699 dev->stats.irqs++;
1700 HEX_DISPLAY(dev->stats.irqs);
1701 switch (irq) {
1702 case LUBBOCK_USB_IRQ:
1703 LED_CONNECTED_ON;
1704 vbus = 1;
1705 disable_irq(LUBBOCK_USB_IRQ);
1706 enable_irq(LUBBOCK_USB_DISC_IRQ);
1707 break;
1708 case LUBBOCK_USB_DISC_IRQ:
1709 LED_CONNECTED_OFF;
1710 vbus = 0;
1711 disable_irq(LUBBOCK_USB_DISC_IRQ);
1712 enable_irq(LUBBOCK_USB_IRQ);
1713 break;
1714 default:
1715 return IRQ_NONE;
1716 }
1717
1718 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1719 return IRQ_HANDLED;
1720}
1721
1722#endif
1723
1724
1725/*-------------------------------------------------------------------------*/
1726
1727static inline void clear_ep_state (struct pxa2xx_udc *dev)
1728{
1729 unsigned i;
1730
1731 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1732 * fifos, and pending transactions mustn't be continued in any case.
1733 */
1734 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1735 nuke(&dev->ep[i], -ECONNABORTED);
1736}
1737
1738static void udc_watchdog(unsigned long _dev)
1739{
1740 struct pxa2xx_udc *dev = (void *)_dev;
1741
1742 local_irq_disable();
1743 if (dev->ep0state == EP0_STALL
1744 && (UDCCS0 & UDCCS0_FST) == 0
1745 && (UDCCS0 & UDCCS0_SST) == 0) {
1746 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1747 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1748 start_watchdog(dev);
1749 }
1750 local_irq_enable();
1751}
1752
1753static void handle_ep0 (struct pxa2xx_udc *dev)
1754{
1755 u32 udccs0 = UDCCS0;
1756 struct pxa2xx_ep *ep = &dev->ep [0];
1757 struct pxa2xx_request *req;
1758 union {
1759 struct usb_ctrlrequest r;
1760 u8 raw [8];
1761 u32 word [2];
1762 } u;
1763
1764 if (list_empty(&ep->queue))
1765 req = NULL;
1766 else
1767 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1768
1769 /* clear stall status */
1770 if (udccs0 & UDCCS0_SST) {
1771 nuke(ep, -EPIPE);
1772 UDCCS0 = UDCCS0_SST;
1773 del_timer(&dev->timer);
1774 ep0_idle(dev);
1775 }
1776
1777 /* previous request unfinished? non-error iff back-to-back ... */
1778 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1779 nuke(ep, 0);
1780 del_timer(&dev->timer);
1781 ep0_idle(dev);
1782 }
1783
1784 switch (dev->ep0state) {
1785 case EP0_IDLE:
1786 /* late-breaking status? */
1787 udccs0 = UDCCS0;
1788
1789 /* start control request? */
1790 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1791 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1792 int i;
1793
1794 nuke (ep, -EPROTO);
1795
1796 /* read SETUP packet */
1797 for (i = 0; i < 8; i++) {
1798 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1799bad_setup:
1800 DMSG("SETUP %d!\n", i);
1801 goto stall;
1802 }
1803 u.raw [i] = (u8) UDDR0;
1804 }
1805 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1806 goto bad_setup;
1807
1808got_setup:
1809 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1810 u.r.bRequestType, u.r.bRequest,
1811 le16_to_cpu(u.r.wValue),
1812 le16_to_cpu(u.r.wIndex),
1813 le16_to_cpu(u.r.wLength));
1814
1815 /* cope with automagic for some standard requests. */
1816 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1817 == USB_TYPE_STANDARD;
1818 dev->req_config = 0;
1819 dev->req_pending = 1;
1820 switch (u.r.bRequest) {
1821 /* hardware restricts gadget drivers here! */
1822 case USB_REQ_SET_CONFIGURATION:
1823 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1824 /* reflect hardware's automagic
1825 * up to the gadget driver.
1826 */
1827config_change:
1828 dev->req_config = 1;
1829 clear_ep_state(dev);
1830 /* if !has_cfr, there's no synch
1831 * else use AREN (later) not SA|OPR
1832 * USIR0_IR0 acts edge sensitive
1833 */
1834 }
1835 break;
1836 /* ... and here, even more ... */
1837 case USB_REQ_SET_INTERFACE:
1838 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1839 /* udc hardware is broken by design:
1840 * - altsetting may only be zero;
1841 * - hw resets all interfaces' eps;
1842 * - ep reset doesn't include halt(?).
1843 */
1844 DMSG("broken set_interface (%d/%d)\n",
1845 le16_to_cpu(u.r.wIndex),
1846 le16_to_cpu(u.r.wValue));
1847 goto config_change;
1848 }
1849 break;
1850 /* hardware was supposed to hide this */
1851 case USB_REQ_SET_ADDRESS:
1852 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1853 ep0start(dev, 0, "address");
1854 return;
1855 }
1856 break;
1857 }
1858
1859 if (u.r.bRequestType & USB_DIR_IN)
1860 dev->ep0state = EP0_IN_DATA_PHASE;
1861 else
1862 dev->ep0state = EP0_OUT_DATA_PHASE;
1863
1864 i = dev->driver->setup(&dev->gadget, &u.r);
1865 if (i < 0) {
1866 /* hardware automagic preventing STALL... */
1867 if (dev->req_config) {
1868 /* hardware sometimes neglects to tell
1869 * tell us about config change events,
1870 * so later ones may fail...
1871 */
1872 WARN("config change %02x fail %d?\n",
1873 u.r.bRequest, i);
1874 return;
1875 /* TODO experiment: if has_cfr,
1876 * hardware didn't ACK; maybe we
1877 * could actually STALL!
1878 */
1879 }
1880 DBG(DBG_VERBOSE, "protocol STALL, "
1881 "%02x err %d\n", UDCCS0, i);
1882stall:
1883 /* the watchdog timer helps deal with cases
1884 * where udc seems to clear FST wrongly, and
1885 * then NAKs instead of STALLing.
1886 */
1887 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1888 start_watchdog(dev);
1889 dev->ep0state = EP0_STALL;
1890
1891 /* deferred i/o == no response yet */
1892 } else if (dev->req_pending) {
1893 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1894 || dev->req_std || u.r.wLength))
1895 ep0start(dev, 0, "defer");
1896 else
1897 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1898 }
1899
1900 /* expect at least one data or status stage irq */
1901 return;
1902
1903 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1904 == (UDCCS0_OPR|UDCCS0_SA))) {
1905 unsigned i;
1906
1907 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1908 * still observed on a pxa255 a0.
1909 */
1910 DBG(DBG_VERBOSE, "e131\n");
1911 nuke(ep, -EPROTO);
1912
1913 /* read SETUP data, but don't trust it too much */
1914 for (i = 0; i < 8; i++)
1915 u.raw [i] = (u8) UDDR0;
1916 if ((u.r.bRequestType & USB_RECIP_MASK)
1917 > USB_RECIP_OTHER)
1918 goto stall;
1919 if (u.word [0] == 0 && u.word [1] == 0)
1920 goto stall;
1921 goto got_setup;
1922 } else {
1923 /* some random early IRQ:
1924 * - we acked FST
1925 * - IPR cleared
1926 * - OPR got set, without SA (likely status stage)
1927 */
1928 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1929 }
1930 break;
1931 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1932 if (udccs0 & UDCCS0_OPR) {
1933 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1934 DBG(DBG_VERBOSE, "ep0in premature status\n");
1935 if (req)
1936 done(ep, req, 0);
1937 ep0_idle(dev);
1938 } else /* irq was IPR clearing */ {
1939 if (req) {
1940 /* this IN packet might finish the request */
1941 (void) write_ep0_fifo(ep, req);
1942 } /* else IN token before response was written */
1943 }
1944 break;
1945 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1946 if (udccs0 & UDCCS0_OPR) {
1947 if (req) {
1948 /* this OUT packet might finish the request */
1949 if (read_ep0_fifo(ep, req))
1950 done(ep, req, 0);
1951 /* else more OUT packets expected */
1952 } /* else OUT token before read was issued */
1953 } else /* irq was IPR clearing */ {
1954 DBG(DBG_VERBOSE, "ep0out premature status\n");
1955 if (req)
1956 done(ep, req, 0);
1957 ep0_idle(dev);
1958 }
1959 break;
1960 case EP0_END_XFER:
1961 if (req)
1962 done(ep, req, 0);
1963 /* ack control-IN status (maybe in-zlp was skipped)
1964 * also appears after some config change events.
1965 */
1966 if (udccs0 & UDCCS0_OPR)
1967 UDCCS0 = UDCCS0_OPR;
1968 ep0_idle(dev);
1969 break;
1970 case EP0_STALL:
1971 UDCCS0 = UDCCS0_FST;
1972 break;
1973 }
1974 USIR0 = USIR0_IR0;
1975}
1976
1977static void handle_ep(struct pxa2xx_ep *ep)
1978{
1979 struct pxa2xx_request *req;
1980 int is_in = ep->bEndpointAddress & USB_DIR_IN;
1981 int completed;
1982 u32 udccs, tmp;
1983
1984 do {
1985 completed = 0;
1986 if (likely (!list_empty(&ep->queue)))
1987 req = list_entry(ep->queue.next,
1988 struct pxa2xx_request, queue);
1989 else
1990 req = NULL;
1991
1992 // TODO check FST handling
1993
1994 udccs = *ep->reg_udccs;
1995 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
1996 tmp = UDCCS_BI_TUR;
1997 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1998 tmp |= UDCCS_BI_SST;
1999 tmp &= udccs;
2000 if (likely (tmp))
2001 *ep->reg_udccs = tmp;
2002 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
2003 completed = write_fifo(ep, req);
2004
2005 } else { /* irq from RPC (or for ISO, ROF) */
2006 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2007 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2008 else
2009 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2010 tmp &= udccs;
2011 if (likely(tmp))
2012 *ep->reg_udccs = tmp;
2013
2014 /* fifos can hold packets, ready for reading... */
2015 if (likely(req)) {
2016#ifdef USE_OUT_DMA
2017// TODO didn't yet debug out-dma. this approach assumes
2018// the worst about short packets and RPC; it might be better.
2019
2020 if (likely(ep->dma >= 0)) {
2021 if (!(udccs & UDCCS_BO_RSP)) {
2022 *ep->reg_udccs = UDCCS_BO_RPC;
2023 ep->dma_irqs++;
2024 return;
2025 }
2026 }
2027#endif
2028 completed = read_fifo(ep, req);
2029 } else
2030 pio_irq_disable (ep->bEndpointAddress);
2031 }
2032 ep->pio_irqs++;
2033 } while (completed);
2034}
2035
2036/*
2037 * pxa2xx_udc_irq - interrupt handler
2038 *
2039 * avoid delays in ep0 processing. the control handshaking isn't always
2040 * under software control (pxa250c0 and the pxa255 are better), and delays
2041 * could cause usb protocol errors.
2042 */
2043static irqreturn_t
2044pxa2xx_udc_irq(int irq, void *_dev, struct pt_regs *r)
2045{
2046 struct pxa2xx_udc *dev = _dev;
2047 int handled;
2048
2049 dev->stats.irqs++;
2050 HEX_DISPLAY(dev->stats.irqs);
2051 do {
2052 u32 udccr = UDCCR;
2053
2054 handled = 0;
2055
2056 /* SUSpend Interrupt Request */
2057 if (unlikely(udccr & UDCCR_SUSIR)) {
2058 udc_ack_int_UDCCR(UDCCR_SUSIR);
2059 handled = 1;
David Brownell91987692005-05-07 13:20:19 -07002060 DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 ? "" : "+disconnect");
2062
David Brownell91987692005-05-07 13:20:19 -07002063 if (!is_vbus_present())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 stop_activity(dev, dev->driver);
2065 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2066 && dev->driver
2067 && dev->driver->suspend)
2068 dev->driver->suspend(&dev->gadget);
2069 ep0_idle (dev);
2070 }
2071
2072 /* RESume Interrupt Request */
2073 if (unlikely(udccr & UDCCR_RESIR)) {
2074 udc_ack_int_UDCCR(UDCCR_RESIR);
2075 handled = 1;
2076 DBG(DBG_VERBOSE, "USB resume\n");
2077
2078 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2079 && dev->driver
2080 && dev->driver->resume
David Brownell91987692005-05-07 13:20:19 -07002081 && is_vbus_present())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 dev->driver->resume(&dev->gadget);
2083 }
2084
2085 /* ReSeT Interrupt Request - USB reset */
2086 if (unlikely(udccr & UDCCR_RSTIR)) {
2087 udc_ack_int_UDCCR(UDCCR_RSTIR);
2088 handled = 1;
2089
2090 if ((UDCCR & UDCCR_UDA) == 0) {
2091 DBG(DBG_VERBOSE, "USB reset start\n");
2092
2093 /* reset driver and endpoints,
2094 * in case that's not yet done
2095 */
2096 stop_activity (dev, dev->driver);
2097
2098 } else {
2099 DBG(DBG_VERBOSE, "USB reset end\n");
2100 dev->gadget.speed = USB_SPEED_FULL;
2101 LED_CONNECTED_ON;
2102 memset(&dev->stats, 0, sizeof dev->stats);
2103 /* driver and endpoints are still reset */
2104 }
2105
2106 } else {
2107 u32 usir0 = USIR0 & ~UICR0;
2108 u32 usir1 = USIR1 & ~UICR1;
2109 int i;
2110
2111 if (unlikely (!usir0 && !usir1))
2112 continue;
2113
2114 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2115
2116 /* control traffic */
2117 if (usir0 & USIR0_IR0) {
2118 dev->ep[0].pio_irqs++;
2119 handle_ep0(dev);
2120 handled = 1;
2121 }
2122
2123 /* endpoint data transfers */
2124 for (i = 0; i < 8; i++) {
2125 u32 tmp = 1 << i;
2126
2127 if (i && (usir0 & tmp)) {
2128 handle_ep(&dev->ep[i]);
2129 USIR0 |= tmp;
2130 handled = 1;
2131 }
2132 if (usir1 & tmp) {
2133 handle_ep(&dev->ep[i+8]);
2134 USIR1 |= tmp;
2135 handled = 1;
2136 }
2137 }
2138 }
2139
2140 /* we could also ask for 1 msec SOF (SIR) interrupts */
2141
2142 } while (handled);
2143 return IRQ_HANDLED;
2144}
2145
2146/*-------------------------------------------------------------------------*/
2147
2148static void nop_release (struct device *dev)
2149{
2150 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2151}
2152
2153/* this uses load-time allocation and initialization (instead of
2154 * doing it at run-time) to save code, eliminate fault paths, and
2155 * be more obviously correct.
2156 */
2157static struct pxa2xx_udc memory = {
2158 .gadget = {
2159 .ops = &pxa2xx_udc_ops,
2160 .ep0 = &memory.ep[0].ep,
2161 .name = driver_name,
2162 .dev = {
2163 .bus_id = "gadget",
2164 .release = nop_release,
2165 },
2166 },
2167
2168 /* control endpoint */
2169 .ep[0] = {
2170 .ep = {
2171 .name = ep0name,
2172 .ops = &pxa2xx_ep_ops,
2173 .maxpacket = EP0_FIFO_SIZE,
2174 },
2175 .dev = &memory,
2176 .reg_udccs = &UDCCS0,
2177 .reg_uddr = &UDDR0,
2178 },
2179
2180 /* first group of endpoints */
2181 .ep[1] = {
2182 .ep = {
2183 .name = "ep1in-bulk",
2184 .ops = &pxa2xx_ep_ops,
2185 .maxpacket = BULK_FIFO_SIZE,
2186 },
2187 .dev = &memory,
2188 .fifo_size = BULK_FIFO_SIZE,
2189 .bEndpointAddress = USB_DIR_IN | 1,
2190 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2191 .reg_udccs = &UDCCS1,
2192 .reg_uddr = &UDDR1,
2193 drcmr (25)
2194 },
2195 .ep[2] = {
2196 .ep = {
2197 .name = "ep2out-bulk",
2198 .ops = &pxa2xx_ep_ops,
2199 .maxpacket = BULK_FIFO_SIZE,
2200 },
2201 .dev = &memory,
2202 .fifo_size = BULK_FIFO_SIZE,
2203 .bEndpointAddress = 2,
2204 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2205 .reg_udccs = &UDCCS2,
2206 .reg_ubcr = &UBCR2,
2207 .reg_uddr = &UDDR2,
2208 drcmr (26)
2209 },
2210#ifndef CONFIG_USB_PXA2XX_SMALL
2211 .ep[3] = {
2212 .ep = {
2213 .name = "ep3in-iso",
2214 .ops = &pxa2xx_ep_ops,
2215 .maxpacket = ISO_FIFO_SIZE,
2216 },
2217 .dev = &memory,
2218 .fifo_size = ISO_FIFO_SIZE,
2219 .bEndpointAddress = USB_DIR_IN | 3,
2220 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2221 .reg_udccs = &UDCCS3,
2222 .reg_uddr = &UDDR3,
2223 drcmr (27)
2224 },
2225 .ep[4] = {
2226 .ep = {
2227 .name = "ep4out-iso",
2228 .ops = &pxa2xx_ep_ops,
2229 .maxpacket = ISO_FIFO_SIZE,
2230 },
2231 .dev = &memory,
2232 .fifo_size = ISO_FIFO_SIZE,
2233 .bEndpointAddress = 4,
2234 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2235 .reg_udccs = &UDCCS4,
2236 .reg_ubcr = &UBCR4,
2237 .reg_uddr = &UDDR4,
2238 drcmr (28)
2239 },
2240 .ep[5] = {
2241 .ep = {
2242 .name = "ep5in-int",
2243 .ops = &pxa2xx_ep_ops,
2244 .maxpacket = INT_FIFO_SIZE,
2245 },
2246 .dev = &memory,
2247 .fifo_size = INT_FIFO_SIZE,
2248 .bEndpointAddress = USB_DIR_IN | 5,
2249 .bmAttributes = USB_ENDPOINT_XFER_INT,
2250 .reg_udccs = &UDCCS5,
2251 .reg_uddr = &UDDR5,
2252 },
2253
2254 /* second group of endpoints */
2255 .ep[6] = {
2256 .ep = {
2257 .name = "ep6in-bulk",
2258 .ops = &pxa2xx_ep_ops,
2259 .maxpacket = BULK_FIFO_SIZE,
2260 },
2261 .dev = &memory,
2262 .fifo_size = BULK_FIFO_SIZE,
2263 .bEndpointAddress = USB_DIR_IN | 6,
2264 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2265 .reg_udccs = &UDCCS6,
2266 .reg_uddr = &UDDR6,
2267 drcmr (30)
2268 },
2269 .ep[7] = {
2270 .ep = {
2271 .name = "ep7out-bulk",
2272 .ops = &pxa2xx_ep_ops,
2273 .maxpacket = BULK_FIFO_SIZE,
2274 },
2275 .dev = &memory,
2276 .fifo_size = BULK_FIFO_SIZE,
2277 .bEndpointAddress = 7,
2278 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2279 .reg_udccs = &UDCCS7,
2280 .reg_ubcr = &UBCR7,
2281 .reg_uddr = &UDDR7,
2282 drcmr (31)
2283 },
2284 .ep[8] = {
2285 .ep = {
2286 .name = "ep8in-iso",
2287 .ops = &pxa2xx_ep_ops,
2288 .maxpacket = ISO_FIFO_SIZE,
2289 },
2290 .dev = &memory,
2291 .fifo_size = ISO_FIFO_SIZE,
2292 .bEndpointAddress = USB_DIR_IN | 8,
2293 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2294 .reg_udccs = &UDCCS8,
2295 .reg_uddr = &UDDR8,
2296 drcmr (32)
2297 },
2298 .ep[9] = {
2299 .ep = {
2300 .name = "ep9out-iso",
2301 .ops = &pxa2xx_ep_ops,
2302 .maxpacket = ISO_FIFO_SIZE,
2303 },
2304 .dev = &memory,
2305 .fifo_size = ISO_FIFO_SIZE,
2306 .bEndpointAddress = 9,
2307 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2308 .reg_udccs = &UDCCS9,
2309 .reg_ubcr = &UBCR9,
2310 .reg_uddr = &UDDR9,
2311 drcmr (33)
2312 },
2313 .ep[10] = {
2314 .ep = {
2315 .name = "ep10in-int",
2316 .ops = &pxa2xx_ep_ops,
2317 .maxpacket = INT_FIFO_SIZE,
2318 },
2319 .dev = &memory,
2320 .fifo_size = INT_FIFO_SIZE,
2321 .bEndpointAddress = USB_DIR_IN | 10,
2322 .bmAttributes = USB_ENDPOINT_XFER_INT,
2323 .reg_udccs = &UDCCS10,
2324 .reg_uddr = &UDDR10,
2325 },
2326
2327 /* third group of endpoints */
2328 .ep[11] = {
2329 .ep = {
2330 .name = "ep11in-bulk",
2331 .ops = &pxa2xx_ep_ops,
2332 .maxpacket = BULK_FIFO_SIZE,
2333 },
2334 .dev = &memory,
2335 .fifo_size = BULK_FIFO_SIZE,
2336 .bEndpointAddress = USB_DIR_IN | 11,
2337 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2338 .reg_udccs = &UDCCS11,
2339 .reg_uddr = &UDDR11,
2340 drcmr (35)
2341 },
2342 .ep[12] = {
2343 .ep = {
2344 .name = "ep12out-bulk",
2345 .ops = &pxa2xx_ep_ops,
2346 .maxpacket = BULK_FIFO_SIZE,
2347 },
2348 .dev = &memory,
2349 .fifo_size = BULK_FIFO_SIZE,
2350 .bEndpointAddress = 12,
2351 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2352 .reg_udccs = &UDCCS12,
2353 .reg_ubcr = &UBCR12,
2354 .reg_uddr = &UDDR12,
2355 drcmr (36)
2356 },
2357 .ep[13] = {
2358 .ep = {
2359 .name = "ep13in-iso",
2360 .ops = &pxa2xx_ep_ops,
2361 .maxpacket = ISO_FIFO_SIZE,
2362 },
2363 .dev = &memory,
2364 .fifo_size = ISO_FIFO_SIZE,
2365 .bEndpointAddress = USB_DIR_IN | 13,
2366 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2367 .reg_udccs = &UDCCS13,
2368 .reg_uddr = &UDDR13,
2369 drcmr (37)
2370 },
2371 .ep[14] = {
2372 .ep = {
2373 .name = "ep14out-iso",
2374 .ops = &pxa2xx_ep_ops,
2375 .maxpacket = ISO_FIFO_SIZE,
2376 },
2377 .dev = &memory,
2378 .fifo_size = ISO_FIFO_SIZE,
2379 .bEndpointAddress = 14,
2380 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2381 .reg_udccs = &UDCCS14,
2382 .reg_ubcr = &UBCR14,
2383 .reg_uddr = &UDDR14,
2384 drcmr (38)
2385 },
2386 .ep[15] = {
2387 .ep = {
2388 .name = "ep15in-int",
2389 .ops = &pxa2xx_ep_ops,
2390 .maxpacket = INT_FIFO_SIZE,
2391 },
2392 .dev = &memory,
2393 .fifo_size = INT_FIFO_SIZE,
2394 .bEndpointAddress = USB_DIR_IN | 15,
2395 .bmAttributes = USB_ENDPOINT_XFER_INT,
2396 .reg_udccs = &UDCCS15,
2397 .reg_uddr = &UDDR15,
2398 },
2399#endif /* !CONFIG_USB_PXA2XX_SMALL */
2400};
2401
2402#define CP15R0_VENDOR_MASK 0xffffe000
2403
2404#if defined(CONFIG_ARCH_PXA)
2405#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2406
2407#elif defined(CONFIG_ARCH_IXP4XX)
2408#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2409
2410#endif
2411
2412#define CP15R0_PROD_MASK 0x000003f0
2413#define PXA25x 0x00000100 /* and PXA26x */
2414#define PXA210 0x00000120
2415
2416#define CP15R0_REV_MASK 0x0000000f
2417
2418#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2419
2420#define PXA255_A0 0x00000106 /* or PXA260_B1 */
2421#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2422#define PXA250_B2 0x00000104
2423#define PXA250_B1 0x00000103 /* or PXA260_A0 */
2424#define PXA250_B0 0x00000102
2425#define PXA250_A1 0x00000101
2426#define PXA250_A0 0x00000100
2427
2428#define PXA210_C0 0x00000125
2429#define PXA210_B2 0x00000124
2430#define PXA210_B1 0x00000123
2431#define PXA210_B0 0x00000122
2432#define IXP425_A0 0x000001c1
Milan Svoboda043ea182006-05-29 03:34:00 -07002433#define IXP465_AD 0x00000200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
2435/*
2436 * probe - binds to the platform device
2437 */
Russell King3ae5eae2005-11-09 22:32:44 +00002438static int __init pxa2xx_udc_probe(struct platform_device *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439{
2440 struct pxa2xx_udc *dev = &memory;
2441 int retval, out_dma = 1;
2442 u32 chiprev;
2443
2444 /* insist on Intel/ARM/XScale */
2445 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2446 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2447 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2448 return -ENODEV;
2449 }
2450
2451 /* trigger chiprev-specific logic */
2452 switch (chiprev & CP15R0_PRODREV_MASK) {
2453#if defined(CONFIG_ARCH_PXA)
2454 case PXA255_A0:
2455 dev->has_cfr = 1;
2456 break;
2457 case PXA250_A0:
2458 case PXA250_A1:
2459 /* A0/A1 "not released"; ep 13, 15 unusable */
2460 /* fall through */
2461 case PXA250_B2: case PXA210_B2:
2462 case PXA250_B1: case PXA210_B1:
2463 case PXA250_B0: case PXA210_B0:
2464 out_dma = 0;
2465 /* fall through */
2466 case PXA250_C0: case PXA210_C0:
2467 break;
2468#elif defined(CONFIG_ARCH_IXP4XX)
2469 case IXP425_A0:
Milan Svoboda043ea182006-05-29 03:34:00 -07002470 case IXP465_AD:
2471 dev->has_cfr = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 out_dma = 0;
2473 break;
2474#endif
2475 default:
2476 out_dma = 0;
2477 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2478 driver_name, chiprev);
2479 /* iop3xx, ixp4xx, ... */
2480 return -ENODEV;
2481 }
2482
2483 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
2484 dev->has_cfr ? "" : " (!cfr)",
2485 out_dma ? "" : " (broken dma-out)",
2486 SIZE_STR DMASTR
2487 );
2488
2489#ifdef USE_DMA
2490#ifndef USE_OUT_DMA
2491 out_dma = 0;
2492#endif
2493 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2494 if (!out_dma) {
2495 DMSG("disabled OUT dma\n");
2496 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2497 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2498 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2499 }
2500#endif
2501
2502 /* other non-static parts of init */
Russell King3ae5eae2005-11-09 22:32:44 +00002503 dev->dev = &pdev->dev;
2504 dev->mach = pdev->dev.platform_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506 init_timer(&dev->timer);
2507 dev->timer.function = udc_watchdog;
2508 dev->timer.data = (unsigned long) dev;
2509
2510 device_initialize(&dev->gadget.dev);
Russell King3ae5eae2005-11-09 22:32:44 +00002511 dev->gadget.dev.parent = &pdev->dev;
2512 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
2514 the_controller = dev;
Russell King3ae5eae2005-11-09 22:32:44 +00002515 platform_set_drvdata(pdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
2517 udc_disable(dev);
2518 udc_reinit(dev);
2519
David Brownell91987692005-05-07 13:20:19 -07002520 dev->vbus = is_vbus_present();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
2522 /* irq setup after old hardware state is cleaned up */
2523 retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
Thomas Gleixnerd54b5ca2006-07-01 19:29:44 -07002524 IRQF_DISABLED, driver_name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 if (retval != 0) {
2526 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2527 driver_name, IRQ_USB, retval);
2528 return -EBUSY;
2529 }
2530 dev->got_irq = 1;
2531
2532#ifdef CONFIG_ARCH_LUBBOCK
2533 if (machine_is_lubbock()) {
2534 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2535 lubbock_vbus_irq,
Thomas Gleixnerd54b5ca2006-07-01 19:29:44 -07002536 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 driver_name, dev);
2538 if (retval != 0) {
2539 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2540 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2541lubbock_fail0:
2542 free_irq(IRQ_USB, dev);
2543 return -EBUSY;
2544 }
2545 retval = request_irq(LUBBOCK_USB_IRQ,
2546 lubbock_vbus_irq,
Thomas Gleixnerd54b5ca2006-07-01 19:29:44 -07002547 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 driver_name, dev);
2549 if (retval != 0) {
2550 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2551 driver_name, LUBBOCK_USB_IRQ, retval);
2552 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2553 goto lubbock_fail0;
2554 }
2555#ifdef DEBUG
2556 /* with U-Boot (but not BLOB), hex is off by default */
2557 HEX_DISPLAY(dev->stats.irqs);
2558 LUB_DISC_BLNK_LED &= 0xff;
2559#endif
2560 }
2561#endif
2562 create_proc_files();
2563
2564 return 0;
2565}
David Brownell91987692005-05-07 13:20:19 -07002566
Russell King3ae5eae2005-11-09 22:32:44 +00002567static void pxa2xx_udc_shutdown(struct platform_device *_dev)
David Brownell91987692005-05-07 13:20:19 -07002568{
2569 pullup_off();
2570}
2571
Russell King3ae5eae2005-11-09 22:32:44 +00002572static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573{
Russell King3ae5eae2005-11-09 22:32:44 +00002574 struct pxa2xx_udc *dev = platform_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
2576 udc_disable(dev);
2577 remove_proc_files();
2578 usb_gadget_unregister_driver(dev->driver);
2579
2580 if (dev->got_irq) {
2581 free_irq(IRQ_USB, dev);
2582 dev->got_irq = 0;
2583 }
Milan Svoboda44df45a2006-05-29 03:34:00 -07002584#ifdef CONFIG_ARCH_LUBBOCK
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 if (machine_is_lubbock()) {
2586 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2587 free_irq(LUBBOCK_USB_IRQ, dev);
2588 }
Milan Svoboda44df45a2006-05-29 03:34:00 -07002589#endif
Russell King3ae5eae2005-11-09 22:32:44 +00002590 platform_set_drvdata(pdev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 the_controller = NULL;
2592 return 0;
2593}
2594
2595/*-------------------------------------------------------------------------*/
2596
2597#ifdef CONFIG_PM
2598
2599/* USB suspend (controlled by the host) and system suspend (controlled
2600 * by the PXA) don't necessarily work well together. If USB is active,
2601 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2602 * mode, or any deeper PM saving state.
2603 *
2604 * For now, we punt and forcibly disconnect from the USB host when PXA
2605 * enters any suspend state. While we're disconnected, we always disable
2606 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2607 * Boards without software pullup control shouldn't use those states.
2608 * VBUS IRQs should probably be ignored so that the PXA device just acts
2609 * "dead" to USB hosts until system resume.
2610 */
Russell King3ae5eae2005-11-09 22:32:44 +00002611static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612{
Russell King3ae5eae2005-11-09 22:32:44 +00002613 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Russell King9480e302005-10-28 09:52:56 -07002615 if (!udc->mach->udc_command)
2616 WARN("USB host won't detect disconnect!\n");
2617 pullup(udc, 0);
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 return 0;
2620}
2621
Russell King3ae5eae2005-11-09 22:32:44 +00002622static int pxa2xx_udc_resume(struct platform_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623{
Russell King3ae5eae2005-11-09 22:32:44 +00002624 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625
Russell King9480e302005-10-28 09:52:56 -07002626 pullup(udc, 1);
2627
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 return 0;
2629}
2630
2631#else
2632#define pxa2xx_udc_suspend NULL
2633#define pxa2xx_udc_resume NULL
2634#endif
2635
2636/*-------------------------------------------------------------------------*/
2637
Russell King3ae5eae2005-11-09 22:32:44 +00002638static struct platform_driver udc_driver = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 .probe = pxa2xx_udc_probe,
David Brownell91987692005-05-07 13:20:19 -07002640 .shutdown = pxa2xx_udc_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 .remove = __exit_p(pxa2xx_udc_remove),
2642 .suspend = pxa2xx_udc_suspend,
2643 .resume = pxa2xx_udc_resume,
Russell King3ae5eae2005-11-09 22:32:44 +00002644 .driver = {
2645 .owner = THIS_MODULE,
2646 .name = "pxa2xx-udc",
2647 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648};
2649
2650static int __init udc_init(void)
2651{
2652 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
Russell King3ae5eae2005-11-09 22:32:44 +00002653 return platform_driver_register(&udc_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654}
2655module_init(udc_init);
2656
2657static void __exit udc_exit(void)
2658{
Russell King3ae5eae2005-11-09 22:32:44 +00002659 platform_driver_unregister(&udc_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660}
2661module_exit(udc_exit);
2662
2663MODULE_DESCRIPTION(DRIVER_DESC);
2664MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2665MODULE_LICENSE("GPL");
2666