blob: 501b05a253ccc2a3bbc479de967dca1ca22ba06d [file] [log] [blame]
Neil Zhangdde34cc2011-10-12 16:49:24 +08001/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 * Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
cxie4e7cddda2010-11-30 13:35:15 +080012#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/timer.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/moduleparam.h>
27#include <linux/device.h>
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h>
31#include <linux/pm.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/platform_device.h>
35#include <linux/clk.h>
Neil Zhangdde34cc2011-10-12 16:49:24 +080036#include <linux/platform_data/mv_usb.h>
cxie4e7cddda2010-11-30 13:35:15 +080037#include <asm/system.h>
38#include <asm/unaligned.h>
39
40#include "mv_udc.h"
41
42#define DRIVER_DESC "Marvell PXA USB Device Controller driver"
43#define DRIVER_VERSION "8 Nov 2010"
44
45#define ep_dir(ep) (((ep)->ep_num == 0) ? \
46 ((ep)->udc->ep0_dir) : ((ep)->direction))
47
48/* timeout value -- usec */
49#define RESET_TIMEOUT 10000
50#define FLUSH_TIMEOUT 10000
51#define EPSTATUS_TIMEOUT 10000
52#define PRIME_TIMEOUT 10000
53#define READSAFE_TIMEOUT 1000
54#define DTD_TIMEOUT 1000
55
56#define LOOPS_USEC_SHIFT 4
57#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
58#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
59
Neil Zhangdde34cc2011-10-12 16:49:24 +080060static DECLARE_COMPLETION(release_done);
61
cxie4e7cddda2010-11-30 13:35:15 +080062static const char driver_name[] = "mv_udc";
63static const char driver_desc[] = DRIVER_DESC;
64
65/* controller device global variable */
66static struct mv_udc *the_controller;
67int mv_usb_otgsc;
68
69static void nuke(struct mv_ep *ep, int status);
70
71/* for endpoint 0 operations */
72static const struct usb_endpoint_descriptor mv_ep0_desc = {
73 .bLength = USB_DT_ENDPOINT_SIZE,
74 .bDescriptorType = USB_DT_ENDPOINT,
75 .bEndpointAddress = 0,
76 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
77 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
78};
79
80static void ep0_reset(struct mv_udc *udc)
81{
82 struct mv_ep *ep;
83 u32 epctrlx;
84 int i = 0;
85
86 /* ep0 in and out */
87 for (i = 0; i < 2; i++) {
88 ep = &udc->eps[i];
89 ep->udc = udc;
90
91 /* ep0 dQH */
92 ep->dqh = &udc->ep_dqh[i];
93
94 /* configure ep0 endpoint capabilities in dQH */
95 ep->dqh->max_packet_length =
96 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
97 | EP_QUEUE_HEAD_IOS;
98
Neil Zhangfbebe1f2011-10-12 16:49:29 +080099 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
100
cxie4e7cddda2010-11-30 13:35:15 +0800101 epctrlx = readl(&udc->op_regs->epctrlx[0]);
102 if (i) { /* TX */
Neil Zhang43ad9f32011-10-12 16:49:28 +0800103 epctrlx |= EPCTRL_TX_ENABLE
cxie4e7cddda2010-11-30 13:35:15 +0800104 | (USB_ENDPOINT_XFER_CONTROL
105 << EPCTRL_TX_EP_TYPE_SHIFT);
106
107 } else { /* RX */
Neil Zhang43ad9f32011-10-12 16:49:28 +0800108 epctrlx |= EPCTRL_RX_ENABLE
cxie4e7cddda2010-11-30 13:35:15 +0800109 | (USB_ENDPOINT_XFER_CONTROL
110 << EPCTRL_RX_EP_TYPE_SHIFT);
111 }
112
113 writel(epctrlx, &udc->op_regs->epctrlx[0]);
114 }
115}
116
117/* protocol ep0 stall, will automatically be cleared on new transaction */
118static void ep0_stall(struct mv_udc *udc)
119{
120 u32 epctrlx;
121
122 /* set TX and RX to stall */
123 epctrlx = readl(&udc->op_regs->epctrlx[0]);
124 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
125 writel(epctrlx, &udc->op_regs->epctrlx[0]);
126
127 /* update ep0 state */
128 udc->ep0_state = WAIT_FOR_SETUP;
129 udc->ep0_dir = EP_DIR_OUT;
130}
131
132static int process_ep_req(struct mv_udc *udc, int index,
133 struct mv_req *curr_req)
134{
135 struct mv_dtd *curr_dtd;
136 struct mv_dqh *curr_dqh;
137 int td_complete, actual, remaining_length;
138 int i, direction;
139 int retval = 0;
140 u32 errors;
141
142 curr_dqh = &udc->ep_dqh[index];
143 direction = index % 2;
144
145 curr_dtd = curr_req->head;
146 td_complete = 0;
147 actual = curr_req->req.length;
148
149 for (i = 0; i < curr_req->dtd_count; i++) {
150 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
151 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
152 udc->eps[index].name);
153 return 1;
154 }
155
156 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
157 if (!errors) {
158 remaining_length +=
159 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
160 >> DTD_LENGTH_BIT_POS;
161 actual -= remaining_length;
162 } else {
163 dev_info(&udc->dev->dev,
164 "complete_tr error: ep=%d %s: error = 0x%x\n",
165 index >> 1, direction ? "SEND" : "RECV",
166 errors);
167 if (errors & DTD_STATUS_HALTED) {
168 /* Clear the errors and Halt condition */
169 curr_dqh->size_ioc_int_sts &= ~errors;
170 retval = -EPIPE;
171 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
172 retval = -EPROTO;
173 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
174 retval = -EILSEQ;
175 }
176 }
177 if (i != curr_req->dtd_count - 1)
178 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
179 }
180 if (retval)
181 return retval;
182
183 curr_req->req.actual = actual;
184
185 return 0;
186}
187
188/*
189 * done() - retire a request; caller blocked irqs
190 * @status : request status to be set, only works when
191 * request is still in progress.
192 */
193static void done(struct mv_ep *ep, struct mv_req *req, int status)
194{
195 struct mv_udc *udc = NULL;
196 unsigned char stopped = ep->stopped;
197 struct mv_dtd *curr_td, *next_td;
198 int j;
199
200 udc = (struct mv_udc *)ep->udc;
201 /* Removed the req from fsl_ep->queue */
202 list_del_init(&req->queue);
203
204 /* req.status should be set as -EINPROGRESS in ep_queue() */
205 if (req->req.status == -EINPROGRESS)
206 req->req.status = status;
207 else
208 status = req->req.status;
209
210 /* Free dtd for the request */
211 next_td = req->head;
212 for (j = 0; j < req->dtd_count; j++) {
213 curr_td = next_td;
214 if (j != req->dtd_count - 1)
215 next_td = curr_td->next_dtd_virt;
216 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
217 }
218
219 if (req->mapped) {
220 dma_unmap_single(ep->udc->gadget.dev.parent,
221 req->req.dma, req->req.length,
222 ((ep_dir(ep) == EP_DIR_IN) ?
223 DMA_TO_DEVICE : DMA_FROM_DEVICE));
224 req->req.dma = DMA_ADDR_INVALID;
225 req->mapped = 0;
226 } else
227 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
228 req->req.dma, req->req.length,
229 ((ep_dir(ep) == EP_DIR_IN) ?
230 DMA_TO_DEVICE : DMA_FROM_DEVICE));
231
232 if (status && (status != -ESHUTDOWN))
233 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
234 ep->ep.name, &req->req, status,
235 req->req.actual, req->req.length);
236
237 ep->stopped = 1;
238
239 spin_unlock(&ep->udc->lock);
240 /*
241 * complete() is from gadget layer,
242 * eg fsg->bulk_in_complete()
243 */
244 if (req->req.complete)
245 req->req.complete(&ep->ep, &req->req);
246
247 spin_lock(&ep->udc->lock);
248 ep->stopped = stopped;
249}
250
251static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
252{
253 u32 tmp, epstatus, bit_pos, direction;
254 struct mv_udc *udc;
255 struct mv_dqh *dqh;
256 unsigned int loops;
257 int readsafe, retval = 0;
258
259 udc = ep->udc;
260 direction = ep_dir(ep);
261 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
262 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
263
264 /* check if the pipe is empty */
265 if (!(list_empty(&ep->queue))) {
266 struct mv_req *lastreq;
267 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
268 lastreq->tail->dtd_next =
269 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
270 if (readl(&udc->op_regs->epprime) & bit_pos) {
271 loops = LOOPS(PRIME_TIMEOUT);
272 while (readl(&udc->op_regs->epprime) & bit_pos) {
273 if (loops == 0) {
274 retval = -ETIME;
275 goto done;
276 }
277 udelay(LOOPS_USEC);
278 loops--;
279 }
280 if (readl(&udc->op_regs->epstatus) & bit_pos)
281 goto done;
282 }
283 readsafe = 0;
284 loops = LOOPS(READSAFE_TIMEOUT);
285 while (readsafe == 0) {
286 if (loops == 0) {
287 retval = -ETIME;
288 goto done;
289 }
290 /* start with setting the semaphores */
291 tmp = readl(&udc->op_regs->usbcmd);
292 tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
293 writel(tmp, &udc->op_regs->usbcmd);
294
295 /* read the endpoint status */
296 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
297
298 /*
299 * Reread the ATDTW semaphore bit to check if it is
300 * cleared. When hardware see a hazard, it will clear
301 * the bit or else we remain set to 1 and we can
302 * proceed with priming of endpoint if not already
303 * primed.
304 */
305 if (readl(&udc->op_regs->usbcmd)
306 & USBCMD_ATDTW_TRIPWIRE_SET) {
307 readsafe = 1;
308 }
309 loops--;
310 udelay(LOOPS_USEC);
311 }
312
313 /* Clear the semaphore */
314 tmp = readl(&udc->op_regs->usbcmd);
315 tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
316 writel(tmp, &udc->op_regs->usbcmd);
317
318 /* If endpoint is not active, we activate it now. */
319 if (!epstatus) {
320 if (direction == EP_DIR_IN) {
321 struct mv_dtd *curr_dtd = dma_to_virt(
322 &udc->dev->dev, dqh->curr_dtd_ptr);
323
324 loops = LOOPS(DTD_TIMEOUT);
325 while (curr_dtd->size_ioc_sts
326 & DTD_STATUS_ACTIVE) {
327 if (loops == 0) {
328 retval = -ETIME;
329 goto done;
330 }
331 loops--;
332 udelay(LOOPS_USEC);
333 }
334 }
335 /* No other transfers on the queue */
336
337 /* Write dQH next pointer and terminate bit to 0 */
338 dqh->next_dtd_ptr = req->head->td_dma
339 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
340 dqh->size_ioc_int_sts = 0;
341
342 /*
343 * Ensure that updates to the QH will
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300344 * occur before priming.
cxie4e7cddda2010-11-30 13:35:15 +0800345 */
346 wmb();
347
348 /* Prime the Endpoint */
349 writel(bit_pos, &udc->op_regs->epprime);
350 }
351 } else {
352 /* Write dQH next pointer and terminate bit to 0 */
353 dqh->next_dtd_ptr = req->head->td_dma
354 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;;
355 dqh->size_ioc_int_sts = 0;
356
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300357 /* Ensure that updates to the QH will occur before priming. */
cxie4e7cddda2010-11-30 13:35:15 +0800358 wmb();
359
360 /* Prime the Endpoint */
361 writel(bit_pos, &udc->op_regs->epprime);
362
363 if (direction == EP_DIR_IN) {
364 /* FIXME add status check after prime the IN ep */
365 int prime_again;
366 u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
367
368 loops = LOOPS(DTD_TIMEOUT);
369 prime_again = 0;
370 while ((curr_dtd_ptr != req->head->td_dma)) {
371 curr_dtd_ptr = dqh->curr_dtd_ptr;
372 if (loops == 0) {
373 dev_err(&udc->dev->dev,
374 "failed to prime %s\n",
375 ep->name);
376 retval = -ETIME;
377 goto done;
378 }
379 loops--;
380 udelay(LOOPS_USEC);
381
382 if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
383 if (prime_again)
384 goto done;
385 dev_info(&udc->dev->dev,
386 "prime again\n");
387 writel(bit_pos,
388 &udc->op_regs->epprime);
389 prime_again = 1;
390 }
391 }
392 }
393 }
394done:
395 return retval;;
396}
397
398static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
399 dma_addr_t *dma, int *is_last)
400{
401 u32 temp;
402 struct mv_dtd *dtd;
403 struct mv_udc *udc;
404
405 /* how big will this transfer be? */
406 *length = min(req->req.length - req->req.actual,
407 (unsigned)EP_MAX_LENGTH_TRANSFER);
408
409 udc = req->ep->udc;
410
411 /*
412 * Be careful that no _GFP_HIGHMEM is set,
413 * or we can not use dma_to_virt
414 */
415 dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
416 if (dtd == NULL)
417 return dtd;
418
419 dtd->td_dma = *dma;
420 /* initialize buffer page pointers */
421 temp = (u32)(req->req.dma + req->req.actual);
422 dtd->buff_ptr0 = cpu_to_le32(temp);
423 temp &= ~0xFFF;
424 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
425 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
426 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
427 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
428
429 req->req.actual += *length;
430
431 /* zlp is needed if req->req.zero is set */
432 if (req->req.zero) {
433 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
434 *is_last = 1;
435 else
436 *is_last = 0;
437 } else if (req->req.length == req->req.actual)
438 *is_last = 1;
439 else
440 *is_last = 0;
441
442 /* Fill in the transfer size; set active bit */
443 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
444
445 /* Enable interrupt for the last dtd of a request */
446 if (*is_last && !req->req.no_interrupt)
447 temp |= DTD_IOC;
448
449 dtd->size_ioc_sts = temp;
450
451 mb();
452
453 return dtd;
454}
455
456/* generate dTD linked list for a request */
457static int req_to_dtd(struct mv_req *req)
458{
459 unsigned count;
460 int is_last, is_first = 1;
461 struct mv_dtd *dtd, *last_dtd = NULL;
462 struct mv_udc *udc;
463 dma_addr_t dma;
464
465 udc = req->ep->udc;
466
467 do {
468 dtd = build_dtd(req, &count, &dma, &is_last);
469 if (dtd == NULL)
470 return -ENOMEM;
471
472 if (is_first) {
473 is_first = 0;
474 req->head = dtd;
475 } else {
476 last_dtd->dtd_next = dma;
477 last_dtd->next_dtd_virt = dtd;
478 }
479 last_dtd = dtd;
480 req->dtd_count++;
481 } while (!is_last);
482
483 /* set terminate bit to 1 for the last dTD */
484 dtd->dtd_next = DTD_NEXT_TERMINATE;
485
486 req->tail = dtd;
487
488 return 0;
489}
490
491static int mv_ep_enable(struct usb_ep *_ep,
492 const struct usb_endpoint_descriptor *desc)
493{
494 struct mv_udc *udc;
495 struct mv_ep *ep;
496 struct mv_dqh *dqh;
497 u16 max = 0;
498 u32 bit_pos, epctrlx, direction;
499 unsigned char zlt = 0, ios = 0, mult = 0;
Neil Zhang27cec2b2011-10-12 16:49:31 +0800500 unsigned long flags;
cxie4e7cddda2010-11-30 13:35:15 +0800501
502 ep = container_of(_ep, struct mv_ep, ep);
503 udc = ep->udc;
504
505 if (!_ep || !desc || ep->desc
506 || desc->bDescriptorType != USB_DT_ENDPOINT)
507 return -EINVAL;
508
509 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
510 return -ESHUTDOWN;
511
512 direction = ep_dir(ep);
Kuninori Morimoto29cc8892011-08-23 03:12:03 -0700513 max = usb_endpoint_maxp(desc);
cxie4e7cddda2010-11-30 13:35:15 +0800514
515 /*
516 * disable HW zero length termination select
517 * driver handles zero length packet through req->req.zero
518 */
519 zlt = 1;
520
cxie4e7cddda2010-11-30 13:35:15 +0800521 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
522
523 /* Check if the Endpoint is Primed */
524 if ((readl(&udc->op_regs->epprime) & bit_pos)
525 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
526 dev_info(&udc->dev->dev,
527 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
528 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
529 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
530 (unsigned)readl(&udc->op_regs->epprime),
531 (unsigned)readl(&udc->op_regs->epstatus),
532 (unsigned)bit_pos);
533 goto en_done;
534 }
535 /* Set the max packet length, interrupt on Setup and Mult fields */
536 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
537 case USB_ENDPOINT_XFER_BULK:
538 zlt = 1;
539 mult = 0;
540 break;
541 case USB_ENDPOINT_XFER_CONTROL:
542 ios = 1;
543 case USB_ENDPOINT_XFER_INT:
544 mult = 0;
545 break;
546 case USB_ENDPOINT_XFER_ISOC:
547 /* Calculate transactions needed for high bandwidth iso */
548 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
Neil Zhangbedcff92011-10-12 16:49:27 +0800549 max = max & 0x7ff; /* bit 0~10 */
cxie4e7cddda2010-11-30 13:35:15 +0800550 /* 3 transactions at most */
551 if (mult > 3)
552 goto en_done;
553 break;
554 default:
555 goto en_done;
556 }
Neil Zhang27cec2b2011-10-12 16:49:31 +0800557
558 spin_lock_irqsave(&udc->lock, flags);
559 /* Get the endpoint queue head address */
560 dqh = ep->dqh;
cxie4e7cddda2010-11-30 13:35:15 +0800561 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
562 | (mult << EP_QUEUE_HEAD_MULT_POS)
563 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
564 | (ios ? EP_QUEUE_HEAD_IOS : 0);
565 dqh->next_dtd_ptr = 1;
566 dqh->size_ioc_int_sts = 0;
567
568 ep->ep.maxpacket = max;
569 ep->desc = desc;
570 ep->stopped = 0;
571
572 /* Enable the endpoint for Rx or Tx and set the endpoint type */
573 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
574 if (direction == EP_DIR_IN) {
575 epctrlx &= ~EPCTRL_TX_ALL_MASK;
576 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
577 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
578 << EPCTRL_TX_EP_TYPE_SHIFT);
579 } else {
580 epctrlx &= ~EPCTRL_RX_ALL_MASK;
581 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
582 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
583 << EPCTRL_RX_EP_TYPE_SHIFT);
584 }
585 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
586
587 /*
588 * Implement Guideline (GL# USB-7) The unused endpoint type must
589 * be programmed to bulk.
590 */
591 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
592 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
593 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
594 << EPCTRL_RX_EP_TYPE_SHIFT);
595 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
596 }
597
598 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
599 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
600 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
601 << EPCTRL_TX_EP_TYPE_SHIFT);
602 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
603 }
604
Neil Zhang27cec2b2011-10-12 16:49:31 +0800605 spin_unlock_irqrestore(&udc->lock, flags);
606
cxie4e7cddda2010-11-30 13:35:15 +0800607 return 0;
608en_done:
609 return -EINVAL;
610}
611
612static int mv_ep_disable(struct usb_ep *_ep)
613{
614 struct mv_udc *udc;
615 struct mv_ep *ep;
616 struct mv_dqh *dqh;
617 u32 bit_pos, epctrlx, direction;
Neil Zhang27cec2b2011-10-12 16:49:31 +0800618 unsigned long flags;
cxie4e7cddda2010-11-30 13:35:15 +0800619
620 ep = container_of(_ep, struct mv_ep, ep);
621 if ((_ep == NULL) || !ep->desc)
622 return -EINVAL;
623
624 udc = ep->udc;
625
626 /* Get the endpoint queue head address */
627 dqh = ep->dqh;
628
Neil Zhang27cec2b2011-10-12 16:49:31 +0800629 spin_lock_irqsave(&udc->lock, flags);
630
cxie4e7cddda2010-11-30 13:35:15 +0800631 direction = ep_dir(ep);
632 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
633
634 /* Reset the max packet length and the interrupt on Setup */
635 dqh->max_packet_length = 0;
636
637 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
638 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
639 epctrlx &= ~((direction == EP_DIR_IN)
640 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
641 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
642 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
643
644 /* nuke all pending requests (does flush) */
645 nuke(ep, -ESHUTDOWN);
646
647 ep->desc = NULL;
648 ep->stopped = 1;
Neil Zhang27cec2b2011-10-12 16:49:31 +0800649
650 spin_unlock_irqrestore(&udc->lock, flags);
651
cxie4e7cddda2010-11-30 13:35:15 +0800652 return 0;
653}
654
655static struct usb_request *
656mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
657{
658 struct mv_req *req = NULL;
659
660 req = kzalloc(sizeof *req, gfp_flags);
661 if (!req)
662 return NULL;
663
664 req->req.dma = DMA_ADDR_INVALID;
665 INIT_LIST_HEAD(&req->queue);
666
667 return &req->req;
668}
669
670static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
671{
672 struct mv_req *req = NULL;
673
674 req = container_of(_req, struct mv_req, req);
675
676 if (_req)
677 kfree(req);
678}
679
680static void mv_ep_fifo_flush(struct usb_ep *_ep)
681{
682 struct mv_udc *udc;
683 u32 bit_pos, direction;
684 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
685 unsigned int loops;
686
687 udc = ep->udc;
688 direction = ep_dir(ep);
689 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
690 /*
691 * Flushing will halt the pipe
692 * Write 1 to the Flush register
693 */
694 writel(bit_pos, &udc->op_regs->epflush);
695
696 /* Wait until flushing completed */
697 loops = LOOPS(FLUSH_TIMEOUT);
698 while (readl(&udc->op_regs->epflush) & bit_pos) {
699 /*
700 * ENDPTFLUSH bit should be cleared to indicate this
701 * operation is complete
702 */
703 if (loops == 0) {
704 dev_err(&udc->dev->dev,
705 "TIMEOUT for ENDPTFLUSH=0x%x, bit_pos=0x%x\n",
706 (unsigned)readl(&udc->op_regs->epflush),
707 (unsigned)bit_pos);
708 return;
709 }
710 loops--;
711 udelay(LOOPS_USEC);
712 }
713 loops = LOOPS(EPSTATUS_TIMEOUT);
714 while (readl(&udc->op_regs->epstatus) & bit_pos) {
715 unsigned int inter_loops;
716
717 if (loops == 0) {
718 dev_err(&udc->dev->dev,
719 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
720 (unsigned)readl(&udc->op_regs->epstatus),
721 (unsigned)bit_pos);
722 return;
723 }
724 /* Write 1 to the Flush register */
725 writel(bit_pos, &udc->op_regs->epflush);
726
727 /* Wait until flushing completed */
728 inter_loops = LOOPS(FLUSH_TIMEOUT);
729 while (readl(&udc->op_regs->epflush) & bit_pos) {
730 /*
731 * ENDPTFLUSH bit should be cleared to indicate this
732 * operation is complete
733 */
734 if (inter_loops == 0) {
735 dev_err(&udc->dev->dev,
736 "TIMEOUT for ENDPTFLUSH=0x%x,"
737 "bit_pos=0x%x\n",
738 (unsigned)readl(&udc->op_regs->epflush),
739 (unsigned)bit_pos);
740 return;
741 }
742 inter_loops--;
743 udelay(LOOPS_USEC);
744 }
745 loops--;
746 }
747}
748
749/* queues (submits) an I/O request to an endpoint */
750static int
751mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
752{
753 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
754 struct mv_req *req = container_of(_req, struct mv_req, req);
755 struct mv_udc *udc = ep->udc;
756 unsigned long flags;
757
758 /* catch various bogus parameters */
759 if (!_req || !req->req.complete || !req->req.buf
760 || !list_empty(&req->queue)) {
761 dev_err(&udc->dev->dev, "%s, bad params", __func__);
762 return -EINVAL;
763 }
764 if (unlikely(!_ep || !ep->desc)) {
765 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
766 return -EINVAL;
767 }
768 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
769 if (req->req.length > ep->ep.maxpacket)
770 return -EMSGSIZE;
771 }
772
773 udc = ep->udc;
774 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
775 return -ESHUTDOWN;
776
777 req->ep = ep;
778
779 /* map virtual address to hardware */
780 if (req->req.dma == DMA_ADDR_INVALID) {
781 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
782 req->req.buf,
783 req->req.length, ep_dir(ep)
784 ? DMA_TO_DEVICE
785 : DMA_FROM_DEVICE);
786 req->mapped = 1;
787 } else {
788 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
789 req->req.dma, req->req.length,
790 ep_dir(ep)
791 ? DMA_TO_DEVICE
792 : DMA_FROM_DEVICE);
793 req->mapped = 0;
794 }
795
796 req->req.status = -EINPROGRESS;
797 req->req.actual = 0;
798 req->dtd_count = 0;
799
800 spin_lock_irqsave(&udc->lock, flags);
801
802 /* build dtds and push them to device queue */
803 if (!req_to_dtd(req)) {
804 int retval;
805 retval = queue_dtd(ep, req);
806 if (retval) {
807 spin_unlock_irqrestore(&udc->lock, flags);
808 return retval;
809 }
810 } else {
811 spin_unlock_irqrestore(&udc->lock, flags);
812 return -ENOMEM;
813 }
814
815 /* Update ep0 state */
816 if (ep->ep_num == 0)
817 udc->ep0_state = DATA_STATE_XMIT;
818
819 /* irq handler advances the queue */
820 if (req != NULL)
821 list_add_tail(&req->queue, &ep->queue);
822 spin_unlock_irqrestore(&udc->lock, flags);
823
824 return 0;
825}
826
827/* dequeues (cancels, unlinks) an I/O request from an endpoint */
828static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
829{
830 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
831 struct mv_req *req;
832 struct mv_udc *udc = ep->udc;
833 unsigned long flags;
834 int stopped, ret = 0;
835 u32 epctrlx;
836
837 if (!_ep || !_req)
838 return -EINVAL;
839
840 spin_lock_irqsave(&ep->udc->lock, flags);
841 stopped = ep->stopped;
842
843 /* Stop the ep before we deal with the queue */
844 ep->stopped = 1;
845 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
846 if (ep_dir(ep) == EP_DIR_IN)
847 epctrlx &= ~EPCTRL_TX_ENABLE;
848 else
849 epctrlx &= ~EPCTRL_RX_ENABLE;
850 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
851
852 /* make sure it's actually queued on this endpoint */
853 list_for_each_entry(req, &ep->queue, queue) {
854 if (&req->req == _req)
855 break;
856 }
857 if (&req->req != _req) {
858 ret = -EINVAL;
859 goto out;
860 }
861
862 /* The request is in progress, or completed but not dequeued */
863 if (ep->queue.next == &req->queue) {
864 _req->status = -ECONNRESET;
865 mv_ep_fifo_flush(_ep); /* flush current transfer */
866
867 /* The request isn't the last request in this ep queue */
868 if (req->queue.next != &ep->queue) {
869 struct mv_dqh *qh;
870 struct mv_req *next_req;
871
872 qh = ep->dqh;
873 next_req = list_entry(req->queue.next, struct mv_req,
874 queue);
875
876 /* Point the QH to the first TD of next request */
877 writel((u32) next_req->head, &qh->curr_dtd_ptr);
878 } else {
879 struct mv_dqh *qh;
880
881 qh = ep->dqh;
882 qh->next_dtd_ptr = 1;
883 qh->size_ioc_int_sts = 0;
884 }
885
886 /* The request hasn't been processed, patch up the TD chain */
887 } else {
888 struct mv_req *prev_req;
889
890 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
891 writel(readl(&req->tail->dtd_next),
892 &prev_req->tail->dtd_next);
893
894 }
895
896 done(ep, req, -ECONNRESET);
897
898 /* Enable EP */
899out:
900 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
901 if (ep_dir(ep) == EP_DIR_IN)
902 epctrlx |= EPCTRL_TX_ENABLE;
903 else
904 epctrlx |= EPCTRL_RX_ENABLE;
905 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
906 ep->stopped = stopped;
907
908 spin_unlock_irqrestore(&ep->udc->lock, flags);
909 return ret;
910}
911
912static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
913{
914 u32 epctrlx;
915
916 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
917
918 if (stall) {
919 if (direction == EP_DIR_IN)
920 epctrlx |= EPCTRL_TX_EP_STALL;
921 else
922 epctrlx |= EPCTRL_RX_EP_STALL;
923 } else {
924 if (direction == EP_DIR_IN) {
925 epctrlx &= ~EPCTRL_TX_EP_STALL;
926 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
927 } else {
928 epctrlx &= ~EPCTRL_RX_EP_STALL;
929 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
930 }
931 }
932 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
933}
934
935static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
936{
937 u32 epctrlx;
938
939 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
940
941 if (direction == EP_DIR_OUT)
942 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
943 else
944 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
945}
946
947static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
948{
949 struct mv_ep *ep;
950 unsigned long flags = 0;
951 int status = 0;
952 struct mv_udc *udc;
953
954 ep = container_of(_ep, struct mv_ep, ep);
955 udc = ep->udc;
956 if (!_ep || !ep->desc) {
957 status = -EINVAL;
958 goto out;
959 }
960
961 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
962 status = -EOPNOTSUPP;
963 goto out;
964 }
965
966 /*
967 * Attempt to halt IN ep will fail if any transfer requests
968 * are still queue
969 */
970 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
971 status = -EAGAIN;
972 goto out;
973 }
974
975 spin_lock_irqsave(&ep->udc->lock, flags);
976 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
977 if (halt && wedge)
978 ep->wedge = 1;
979 else if (!halt)
980 ep->wedge = 0;
981 spin_unlock_irqrestore(&ep->udc->lock, flags);
982
983 if (ep->ep_num == 0) {
984 udc->ep0_state = WAIT_FOR_SETUP;
985 udc->ep0_dir = EP_DIR_OUT;
986 }
987out:
988 return status;
989}
990
991static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
992{
993 return mv_ep_set_halt_wedge(_ep, halt, 0);
994}
995
996static int mv_ep_set_wedge(struct usb_ep *_ep)
997{
998 return mv_ep_set_halt_wedge(_ep, 1, 1);
999}
1000
1001static struct usb_ep_ops mv_ep_ops = {
1002 .enable = mv_ep_enable,
1003 .disable = mv_ep_disable,
1004
1005 .alloc_request = mv_alloc_request,
1006 .free_request = mv_free_request,
1007
1008 .queue = mv_ep_queue,
1009 .dequeue = mv_ep_dequeue,
1010
1011 .set_wedge = mv_ep_set_wedge,
1012 .set_halt = mv_ep_set_halt,
1013 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
1014};
1015
Neil Zhangdde34cc2011-10-12 16:49:24 +08001016static void udc_clock_enable(struct mv_udc *udc)
1017{
1018 unsigned int i;
1019
1020 for (i = 0; i < udc->clknum; i++)
1021 clk_enable(udc->clk[i]);
1022}
1023
1024static void udc_clock_disable(struct mv_udc *udc)
1025{
1026 unsigned int i;
1027
1028 for (i = 0; i < udc->clknum; i++)
1029 clk_disable(udc->clk[i]);
1030}
1031
cxie4e7cddda2010-11-30 13:35:15 +08001032static void udc_stop(struct mv_udc *udc)
1033{
1034 u32 tmp;
1035
1036 /* Disable interrupts */
1037 tmp = readl(&udc->op_regs->usbintr);
1038 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1039 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1040 writel(tmp, &udc->op_regs->usbintr);
1041
1042 /* Reset the Run the bit in the command register to stop VUSB */
1043 tmp = readl(&udc->op_regs->usbcmd);
1044 tmp &= ~USBCMD_RUN_STOP;
1045 writel(tmp, &udc->op_regs->usbcmd);
1046}
1047
1048static void udc_start(struct mv_udc *udc)
1049{
1050 u32 usbintr;
1051
1052 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1053 | USBINTR_PORT_CHANGE_DETECT_EN
1054 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1055 /* Enable interrupts */
1056 writel(usbintr, &udc->op_regs->usbintr);
1057
1058 /* Set the Run bit in the command register */
1059 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1060}
1061
1062static int udc_reset(struct mv_udc *udc)
1063{
1064 unsigned int loops;
1065 u32 tmp, portsc;
1066
1067 /* Stop the controller */
1068 tmp = readl(&udc->op_regs->usbcmd);
1069 tmp &= ~USBCMD_RUN_STOP;
1070 writel(tmp, &udc->op_regs->usbcmd);
1071
1072 /* Reset the controller to get default values */
1073 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1074
1075 /* wait for reset to complete */
1076 loops = LOOPS(RESET_TIMEOUT);
1077 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1078 if (loops == 0) {
1079 dev_err(&udc->dev->dev,
1080 "Wait for RESET completed TIMEOUT\n");
1081 return -ETIMEDOUT;
1082 }
1083 loops--;
1084 udelay(LOOPS_USEC);
1085 }
1086
1087 /* set controller to device mode */
1088 tmp = readl(&udc->op_regs->usbmode);
1089 tmp |= USBMODE_CTRL_MODE_DEVICE;
1090
1091 /* turn setup lockout off, require setup tripwire in usbcmd */
1092 tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1093
1094 writel(tmp, &udc->op_regs->usbmode);
1095
1096 writel(0x0, &udc->op_regs->epsetupstat);
1097
1098 /* Configure the Endpoint List Address */
1099 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1100 &udc->op_regs->eplistaddr);
1101
1102 portsc = readl(&udc->op_regs->portsc[0]);
1103 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1104 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1105
1106 if (udc->force_fs)
1107 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1108 else
1109 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1110
1111 writel(portsc, &udc->op_regs->portsc[0]);
1112
1113 tmp = readl(&udc->op_regs->epctrlx[0]);
1114 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1115 writel(tmp, &udc->op_regs->epctrlx[0]);
1116
1117 return 0;
1118}
1119
1120static int mv_udc_get_frame(struct usb_gadget *gadget)
1121{
1122 struct mv_udc *udc;
1123 u16 retval;
1124
1125 if (!gadget)
1126 return -ENODEV;
1127
1128 udc = container_of(gadget, struct mv_udc, gadget);
1129
1130 retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1131
1132 return retval;
1133}
1134
1135/* Tries to wake up the host connected to this gadget */
1136static int mv_udc_wakeup(struct usb_gadget *gadget)
1137{
1138 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1139 u32 portsc;
1140
1141 /* Remote wakeup feature not enabled by host */
1142 if (!udc->remote_wakeup)
1143 return -ENOTSUPP;
1144
1145 portsc = readl(&udc->op_regs->portsc);
1146 /* not suspended? */
1147 if (!(portsc & PORTSCX_PORT_SUSPEND))
1148 return 0;
1149 /* trigger force resume */
1150 portsc |= PORTSCX_PORT_FORCE_RESUME;
1151 writel(portsc, &udc->op_regs->portsc[0]);
1152 return 0;
1153}
1154
1155static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1156{
1157 struct mv_udc *udc;
1158 unsigned long flags;
1159
1160 udc = container_of(gadget, struct mv_udc, gadget);
1161 spin_lock_irqsave(&udc->lock, flags);
1162
1163 udc->softconnect = (is_on != 0);
1164 if (udc->driver && udc->softconnect)
1165 udc_start(udc);
1166 else
1167 udc_stop(udc);
1168
1169 spin_unlock_irqrestore(&udc->lock, flags);
1170 return 0;
1171}
1172
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001173static int mv_udc_start(struct usb_gadget_driver *driver,
1174 int (*bind)(struct usb_gadget *));
1175static int mv_udc_stop(struct usb_gadget_driver *driver);
cxie4e7cddda2010-11-30 13:35:15 +08001176/* device controller usb_gadget_ops structure */
1177static const struct usb_gadget_ops mv_ops = {
1178
1179 /* returns the current frame number */
1180 .get_frame = mv_udc_get_frame,
1181
1182 /* tries to wake up the host connected to this gadget */
1183 .wakeup = mv_udc_wakeup,
1184
1185 /* D+ pullup, software-controlled connect/disconnect to USB host */
1186 .pullup = mv_udc_pullup,
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001187 .start = mv_udc_start,
1188 .stop = mv_udc_stop,
cxie4e7cddda2010-11-30 13:35:15 +08001189};
1190
1191static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
1192{
1193 dev_info(&udc->dev->dev, "Test Mode is not support yet\n");
1194}
1195
1196static int eps_init(struct mv_udc *udc)
1197{
1198 struct mv_ep *ep;
1199 char name[14];
1200 int i;
1201
1202 /* initialize ep0 */
1203 ep = &udc->eps[0];
1204 ep->udc = udc;
1205 strncpy(ep->name, "ep0", sizeof(ep->name));
1206 ep->ep.name = ep->name;
1207 ep->ep.ops = &mv_ep_ops;
1208 ep->wedge = 0;
1209 ep->stopped = 0;
1210 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1211 ep->ep_num = 0;
1212 ep->desc = &mv_ep0_desc;
1213 INIT_LIST_HEAD(&ep->queue);
1214
1215 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1216
1217 /* initialize other endpoints */
1218 for (i = 2; i < udc->max_eps * 2; i++) {
1219 ep = &udc->eps[i];
1220 if (i % 2) {
1221 snprintf(name, sizeof(name), "ep%din", i / 2);
1222 ep->direction = EP_DIR_IN;
1223 } else {
1224 snprintf(name, sizeof(name), "ep%dout", i / 2);
1225 ep->direction = EP_DIR_OUT;
1226 }
1227 ep->udc = udc;
1228 strncpy(ep->name, name, sizeof(ep->name));
1229 ep->ep.name = ep->name;
1230
1231 ep->ep.ops = &mv_ep_ops;
1232 ep->stopped = 0;
1233 ep->ep.maxpacket = (unsigned short) ~0;
1234 ep->ep_num = i / 2;
1235
1236 INIT_LIST_HEAD(&ep->queue);
1237 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1238
1239 ep->dqh = &udc->ep_dqh[i];
1240 }
1241
1242 return 0;
1243}
1244
1245/* delete all endpoint requests, called with spinlock held */
1246static void nuke(struct mv_ep *ep, int status)
1247{
1248 /* called with spinlock held */
1249 ep->stopped = 1;
1250
1251 /* endpoint fifo flush */
1252 mv_ep_fifo_flush(&ep->ep);
1253
1254 while (!list_empty(&ep->queue)) {
1255 struct mv_req *req = NULL;
1256 req = list_entry(ep->queue.next, struct mv_req, queue);
1257 done(ep, req, status);
1258 }
1259}
1260
1261/* stop all USB activities */
1262static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1263{
1264 struct mv_ep *ep;
1265
1266 nuke(&udc->eps[0], -ESHUTDOWN);
1267
1268 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1269 nuke(ep, -ESHUTDOWN);
1270 }
1271
1272 /* report disconnect; the driver is already quiesced */
1273 if (driver) {
1274 spin_unlock(&udc->lock);
1275 driver->disconnect(&udc->gadget);
1276 spin_lock(&udc->lock);
1277 }
1278}
1279
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001280static int mv_udc_start(struct usb_gadget_driver *driver,
cxie4e7cddda2010-11-30 13:35:15 +08001281 int (*bind)(struct usb_gadget *))
1282{
1283 struct mv_udc *udc = the_controller;
1284 int retval = 0;
1285 unsigned long flags;
1286
1287 if (!udc)
1288 return -ENODEV;
1289
1290 if (udc->driver)
1291 return -EBUSY;
1292
1293 spin_lock_irqsave(&udc->lock, flags);
1294
1295 /* hook up the driver ... */
1296 driver->driver.bus = NULL;
1297 udc->driver = driver;
1298 udc->gadget.dev.driver = &driver->driver;
1299
1300 udc->usb_state = USB_STATE_ATTACHED;
1301 udc->ep0_state = WAIT_FOR_SETUP;
1302 udc->ep0_dir = USB_DIR_OUT;
1303
1304 spin_unlock_irqrestore(&udc->lock, flags);
1305
1306 retval = bind(&udc->gadget);
1307 if (retval) {
1308 dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1309 driver->driver.name, retval);
1310 udc->driver = NULL;
1311 udc->gadget.dev.driver = NULL;
1312 return retval;
1313 }
1314 udc_reset(udc);
1315 ep0_reset(udc);
1316 udc_start(udc);
1317
1318 return 0;
1319}
cxie4e7cddda2010-11-30 13:35:15 +08001320
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001321static int mv_udc_stop(struct usb_gadget_driver *driver)
cxie4e7cddda2010-11-30 13:35:15 +08001322{
1323 struct mv_udc *udc = the_controller;
1324 unsigned long flags;
1325
1326 if (!udc)
1327 return -ENODEV;
1328
1329 udc_stop(udc);
1330
1331 spin_lock_irqsave(&udc->lock, flags);
1332
1333 /* stop all usb activities */
1334 udc->gadget.speed = USB_SPEED_UNKNOWN;
1335 stop_activity(udc, driver);
1336 spin_unlock_irqrestore(&udc->lock, flags);
1337
1338 /* unbind gadget driver */
1339 driver->unbind(&udc->gadget);
1340 udc->gadget.dev.driver = NULL;
1341 udc->driver = NULL;
1342
1343 return 0;
1344}
cxie4e7cddda2010-11-30 13:35:15 +08001345
1346static int
1347udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1348{
1349 int retval = 0;
1350 struct mv_req *req;
1351 struct mv_ep *ep;
1352
1353 ep = &udc->eps[0];
1354 udc->ep0_dir = direction;
1355
1356 req = udc->status_req;
1357
1358 /* fill in the reqest structure */
1359 if (empty == false) {
1360 *((u16 *) req->req.buf) = cpu_to_le16(status);
1361 req->req.length = 2;
1362 } else
1363 req->req.length = 0;
1364
1365 req->ep = ep;
1366 req->req.status = -EINPROGRESS;
1367 req->req.actual = 0;
1368 req->req.complete = NULL;
1369 req->dtd_count = 0;
1370
1371 /* prime the data phase */
1372 if (!req_to_dtd(req))
1373 retval = queue_dtd(ep, req);
1374 else{ /* no mem */
1375 retval = -ENOMEM;
1376 goto out;
1377 }
1378
1379 if (retval) {
1380 dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1381 goto out;
1382 }
1383
1384 list_add_tail(&req->queue, &ep->queue);
1385
1386 return 0;
1387out:
1388 return retval;
1389}
1390
1391static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1392{
1393 udc->dev_addr = (u8)setup->wValue;
1394
1395 /* update usb state */
1396 udc->usb_state = USB_STATE_ADDRESS;
1397
1398 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1399 ep0_stall(udc);
1400}
1401
1402static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1403 struct usb_ctrlrequest *setup)
1404{
1405 u16 status;
1406 int retval;
1407
1408 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1409 != (USB_DIR_IN | USB_TYPE_STANDARD))
1410 return;
1411
1412 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1413 status = 1 << USB_DEVICE_SELF_POWERED;
1414 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1415 } else if ((setup->bRequestType & USB_RECIP_MASK)
1416 == USB_RECIP_INTERFACE) {
1417 /* get interface status */
1418 status = 0;
1419 } else if ((setup->bRequestType & USB_RECIP_MASK)
1420 == USB_RECIP_ENDPOINT) {
1421 u8 ep_num, direction;
1422
1423 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1424 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1425 ? EP_DIR_IN : EP_DIR_OUT;
1426 status = ep_is_stall(udc, ep_num, direction)
1427 << USB_ENDPOINT_HALT;
1428 }
1429
1430 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1431 if (retval)
1432 ep0_stall(udc);
1433}
1434
1435static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1436{
1437 u8 ep_num;
1438 u8 direction;
1439 struct mv_ep *ep;
1440
1441 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1442 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1443 switch (setup->wValue) {
1444 case USB_DEVICE_REMOTE_WAKEUP:
1445 udc->remote_wakeup = 0;
1446 break;
1447 case USB_DEVICE_TEST_MODE:
1448 mv_udc_testmode(udc, 0, false);
1449 break;
1450 default:
1451 goto out;
1452 }
1453 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1454 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1455 switch (setup->wValue) {
1456 case USB_ENDPOINT_HALT:
1457 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1458 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1459 ? EP_DIR_IN : EP_DIR_OUT;
1460 if (setup->wValue != 0 || setup->wLength != 0
1461 || ep_num > udc->max_eps)
1462 goto out;
1463 ep = &udc->eps[ep_num * 2 + direction];
1464 if (ep->wedge == 1)
1465 break;
1466 spin_unlock(&udc->lock);
1467 ep_set_stall(udc, ep_num, direction, 0);
1468 spin_lock(&udc->lock);
1469 break;
1470 default:
1471 goto out;
1472 }
1473 } else
1474 goto out;
1475
1476 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1477 ep0_stall(udc);
1478 else
1479 udc->ep0_state = DATA_STATE_XMIT;
1480out:
1481 return;
1482}
1483
1484static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1485{
1486 u8 ep_num;
1487 u8 direction;
1488
1489 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1490 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1491 switch (setup->wValue) {
1492 case USB_DEVICE_REMOTE_WAKEUP:
1493 udc->remote_wakeup = 1;
1494 break;
1495 case USB_DEVICE_TEST_MODE:
1496 if (setup->wIndex & 0xFF
1497 && udc->gadget.speed != USB_SPEED_HIGH)
1498 goto out;
1499 if (udc->usb_state == USB_STATE_CONFIGURED
1500 || udc->usb_state == USB_STATE_ADDRESS
1501 || udc->usb_state == USB_STATE_DEFAULT)
1502 mv_udc_testmode(udc,
1503 setup->wIndex & 0xFF00, true);
1504 else
1505 goto out;
1506 break;
1507 default:
1508 goto out;
1509 }
1510 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1511 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1512 switch (setup->wValue) {
1513 case USB_ENDPOINT_HALT:
1514 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1515 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1516 ? EP_DIR_IN : EP_DIR_OUT;
1517 if (setup->wValue != 0 || setup->wLength != 0
1518 || ep_num > udc->max_eps)
1519 goto out;
1520 spin_unlock(&udc->lock);
1521 ep_set_stall(udc, ep_num, direction, 1);
1522 spin_lock(&udc->lock);
1523 break;
1524 default:
1525 goto out;
1526 }
1527 } else
1528 goto out;
1529
1530 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1531 ep0_stall(udc);
1532out:
1533 return;
1534}
1535
1536static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1537 struct usb_ctrlrequest *setup)
1538{
1539 bool delegate = false;
1540
1541 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1542
1543 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1544 setup->bRequestType, setup->bRequest,
1545 setup->wValue, setup->wIndex, setup->wLength);
1546 /* We process some stardard setup requests here */
1547 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1548 switch (setup->bRequest) {
1549 case USB_REQ_GET_STATUS:
1550 ch9getstatus(udc, ep_num, setup);
1551 break;
1552
1553 case USB_REQ_SET_ADDRESS:
1554 ch9setaddress(udc, setup);
1555 break;
1556
1557 case USB_REQ_CLEAR_FEATURE:
1558 ch9clearfeature(udc, setup);
1559 break;
1560
1561 case USB_REQ_SET_FEATURE:
1562 ch9setfeature(udc, setup);
1563 break;
1564
1565 default:
1566 delegate = true;
1567 }
1568 } else
1569 delegate = true;
1570
1571 /* delegate USB standard requests to the gadget driver */
1572 if (delegate == true) {
1573 /* USB requests handled by gadget */
1574 if (setup->wLength) {
1575 /* DATA phase from gadget, STATUS phase from udc */
1576 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1577 ? EP_DIR_IN : EP_DIR_OUT;
1578 spin_unlock(&udc->lock);
1579 if (udc->driver->setup(&udc->gadget,
1580 &udc->local_setup_buff) < 0)
1581 ep0_stall(udc);
1582 spin_lock(&udc->lock);
1583 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1584 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1585 } else {
1586 /* no DATA phase, IN STATUS phase from gadget */
1587 udc->ep0_dir = EP_DIR_IN;
1588 spin_unlock(&udc->lock);
1589 if (udc->driver->setup(&udc->gadget,
1590 &udc->local_setup_buff) < 0)
1591 ep0_stall(udc);
1592 spin_lock(&udc->lock);
1593 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1594 }
1595 }
1596}
1597
1598/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1599static void ep0_req_complete(struct mv_udc *udc,
1600 struct mv_ep *ep0, struct mv_req *req)
1601{
1602 u32 new_addr;
1603
1604 if (udc->usb_state == USB_STATE_ADDRESS) {
1605 /* set the new address */
1606 new_addr = (u32)udc->dev_addr;
1607 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1608 &udc->op_regs->deviceaddr);
1609 }
1610
1611 done(ep0, req, 0);
1612
1613 switch (udc->ep0_state) {
1614 case DATA_STATE_XMIT:
1615 /* receive status phase */
1616 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1617 ep0_stall(udc);
1618 break;
1619 case DATA_STATE_RECV:
1620 /* send status phase */
1621 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1622 ep0_stall(udc);
1623 break;
1624 case WAIT_FOR_OUT_STATUS:
1625 udc->ep0_state = WAIT_FOR_SETUP;
1626 break;
1627 case WAIT_FOR_SETUP:
1628 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1629 break;
1630 default:
1631 ep0_stall(udc);
1632 break;
1633 }
1634}
1635
1636static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1637{
1638 u32 temp;
1639 struct mv_dqh *dqh;
1640
1641 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1642
1643 /* Clear bit in ENDPTSETUPSTAT */
Neil Zhang96c2bbb2011-10-12 16:49:30 +08001644 writel((1 << ep_num), &udc->op_regs->epsetupstat);
cxie4e7cddda2010-11-30 13:35:15 +08001645
1646 /* while a hazard exists when setup package arrives */
1647 do {
1648 /* Set Setup Tripwire */
1649 temp = readl(&udc->op_regs->usbcmd);
1650 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1651
1652 /* Copy the setup packet to local buffer */
1653 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1654 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1655
1656 /* Clear Setup Tripwire */
1657 temp = readl(&udc->op_regs->usbcmd);
1658 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1659}
1660
1661static void irq_process_tr_complete(struct mv_udc *udc)
1662{
1663 u32 tmp, bit_pos;
1664 int i, ep_num = 0, direction = 0;
1665 struct mv_ep *curr_ep;
1666 struct mv_req *curr_req, *temp_req;
1667 int status;
1668
1669 /*
1670 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1671 * because the setup packets are to be read ASAP
1672 */
1673
1674 /* Process all Setup packet received interrupts */
1675 tmp = readl(&udc->op_regs->epsetupstat);
1676
1677 if (tmp) {
1678 for (i = 0; i < udc->max_eps; i++) {
1679 if (tmp & (1 << i)) {
1680 get_setup_data(udc, i,
1681 (u8 *)(&udc->local_setup_buff));
1682 handle_setup_packet(udc, i,
1683 &udc->local_setup_buff);
1684 }
1685 }
1686 }
1687
1688 /* Don't clear the endpoint setup status register here.
1689 * It is cleared as a setup packet is read out of the buffer
1690 */
1691
1692 /* Process non-setup transaction complete interrupts */
1693 tmp = readl(&udc->op_regs->epcomplete);
1694
1695 if (!tmp)
1696 return;
1697
1698 writel(tmp, &udc->op_regs->epcomplete);
1699
1700 for (i = 0; i < udc->max_eps * 2; i++) {
1701 ep_num = i >> 1;
1702 direction = i % 2;
1703
1704 bit_pos = 1 << (ep_num + 16 * direction);
1705
1706 if (!(bit_pos & tmp))
1707 continue;
1708
1709 if (i == 1)
1710 curr_ep = &udc->eps[0];
1711 else
1712 curr_ep = &udc->eps[i];
1713 /* process the req queue until an uncomplete request */
1714 list_for_each_entry_safe(curr_req, temp_req,
1715 &curr_ep->queue, queue) {
1716 status = process_ep_req(udc, i, curr_req);
1717 if (status)
1718 break;
1719
1720 /* write back status to req */
1721 curr_req->req.status = status;
1722
1723 /* ep0 request completion */
1724 if (ep_num == 0) {
1725 ep0_req_complete(udc, curr_ep, curr_req);
1726 break;
1727 } else {
1728 done(curr_ep, curr_req, status);
1729 }
1730 }
1731 }
1732}
1733
1734void irq_process_reset(struct mv_udc *udc)
1735{
1736 u32 tmp;
1737 unsigned int loops;
1738
1739 udc->ep0_dir = EP_DIR_OUT;
1740 udc->ep0_state = WAIT_FOR_SETUP;
1741 udc->remote_wakeup = 0; /* default to 0 on reset */
1742
1743 /* The address bits are past bit 25-31. Set the address */
1744 tmp = readl(&udc->op_regs->deviceaddr);
1745 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1746 writel(tmp, &udc->op_regs->deviceaddr);
1747
1748 /* Clear all the setup token semaphores */
1749 tmp = readl(&udc->op_regs->epsetupstat);
1750 writel(tmp, &udc->op_regs->epsetupstat);
1751
1752 /* Clear all the endpoint complete status bits */
1753 tmp = readl(&udc->op_regs->epcomplete);
1754 writel(tmp, &udc->op_regs->epcomplete);
1755
1756 /* wait until all endptprime bits cleared */
1757 loops = LOOPS(PRIME_TIMEOUT);
1758 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1759 if (loops == 0) {
1760 dev_err(&udc->dev->dev,
1761 "Timeout for ENDPTPRIME = 0x%x\n",
1762 readl(&udc->op_regs->epprime));
1763 break;
1764 }
1765 loops--;
1766 udelay(LOOPS_USEC);
1767 }
1768
1769 /* Write 1s to the Flush register */
1770 writel((u32)~0, &udc->op_regs->epflush);
1771
1772 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1773 dev_info(&udc->dev->dev, "usb bus reset\n");
1774 udc->usb_state = USB_STATE_DEFAULT;
1775 /* reset all the queues, stop all USB activities */
1776 stop_activity(udc, udc->driver);
1777 } else {
1778 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1779 readl(&udc->op_regs->portsc));
1780
1781 /*
1782 * re-initialize
1783 * controller reset
1784 */
1785 udc_reset(udc);
1786
1787 /* reset all the queues, stop all USB activities */
1788 stop_activity(udc, udc->driver);
1789
1790 /* reset ep0 dQH and endptctrl */
1791 ep0_reset(udc);
1792
1793 /* enable interrupt and set controller to run state */
1794 udc_start(udc);
1795
1796 udc->usb_state = USB_STATE_ATTACHED;
1797 }
1798}
1799
1800static void handle_bus_resume(struct mv_udc *udc)
1801{
1802 udc->usb_state = udc->resume_state;
1803 udc->resume_state = 0;
1804
1805 /* report resume to the driver */
1806 if (udc->driver) {
1807 if (udc->driver->resume) {
1808 spin_unlock(&udc->lock);
1809 udc->driver->resume(&udc->gadget);
1810 spin_lock(&udc->lock);
1811 }
1812 }
1813}
1814
1815static void irq_process_suspend(struct mv_udc *udc)
1816{
1817 udc->resume_state = udc->usb_state;
1818 udc->usb_state = USB_STATE_SUSPENDED;
1819
1820 if (udc->driver->suspend) {
1821 spin_unlock(&udc->lock);
1822 udc->driver->suspend(&udc->gadget);
1823 spin_lock(&udc->lock);
1824 }
1825}
1826
1827static void irq_process_port_change(struct mv_udc *udc)
1828{
1829 u32 portsc;
1830
1831 portsc = readl(&udc->op_regs->portsc[0]);
1832 if (!(portsc & PORTSCX_PORT_RESET)) {
1833 /* Get the speed */
1834 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1835 switch (speed) {
1836 case PORTSCX_PORT_SPEED_HIGH:
1837 udc->gadget.speed = USB_SPEED_HIGH;
1838 break;
1839 case PORTSCX_PORT_SPEED_FULL:
1840 udc->gadget.speed = USB_SPEED_FULL;
1841 break;
1842 case PORTSCX_PORT_SPEED_LOW:
1843 udc->gadget.speed = USB_SPEED_LOW;
1844 break;
1845 default:
1846 udc->gadget.speed = USB_SPEED_UNKNOWN;
1847 break;
1848 }
1849 }
1850
1851 if (portsc & PORTSCX_PORT_SUSPEND) {
1852 udc->resume_state = udc->usb_state;
1853 udc->usb_state = USB_STATE_SUSPENDED;
1854 if (udc->driver->suspend) {
1855 spin_unlock(&udc->lock);
1856 udc->driver->suspend(&udc->gadget);
1857 spin_lock(&udc->lock);
1858 }
1859 }
1860
1861 if (!(portsc & PORTSCX_PORT_SUSPEND)
1862 && udc->usb_state == USB_STATE_SUSPENDED) {
1863 handle_bus_resume(udc);
1864 }
1865
1866 if (!udc->resume_state)
1867 udc->usb_state = USB_STATE_DEFAULT;
1868}
1869
1870static void irq_process_error(struct mv_udc *udc)
1871{
1872 /* Increment the error count */
1873 udc->errors++;
1874}
1875
1876static irqreturn_t mv_udc_irq(int irq, void *dev)
1877{
1878 struct mv_udc *udc = (struct mv_udc *)dev;
1879 u32 status, intr;
1880
1881 spin_lock(&udc->lock);
1882
1883 status = readl(&udc->op_regs->usbsts);
1884 intr = readl(&udc->op_regs->usbintr);
1885 status &= intr;
1886
1887 if (status == 0) {
1888 spin_unlock(&udc->lock);
1889 return IRQ_NONE;
1890 }
1891
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001892 /* Clear all the interrupts occurred */
cxie4e7cddda2010-11-30 13:35:15 +08001893 writel(status, &udc->op_regs->usbsts);
1894
1895 if (status & USBSTS_ERR)
1896 irq_process_error(udc);
1897
1898 if (status & USBSTS_RESET)
1899 irq_process_reset(udc);
1900
1901 if (status & USBSTS_PORT_CHANGE)
1902 irq_process_port_change(udc);
1903
1904 if (status & USBSTS_INT)
1905 irq_process_tr_complete(udc);
1906
1907 if (status & USBSTS_SUSPEND)
1908 irq_process_suspend(udc);
1909
1910 spin_unlock(&udc->lock);
1911
1912 return IRQ_HANDLED;
1913}
1914
1915/* release device structure */
1916static void gadget_release(struct device *_dev)
1917{
1918 struct mv_udc *udc = the_controller;
1919
1920 complete(udc->done);
cxie4e7cddda2010-11-30 13:35:15 +08001921}
1922
Neil Zhang5d0b8d02011-10-12 16:49:25 +08001923static int __devexit mv_udc_remove(struct platform_device *dev)
cxie4e7cddda2010-11-30 13:35:15 +08001924{
1925 struct mv_udc *udc = the_controller;
Neil Zhangdde34cc2011-10-12 16:49:24 +08001926 int clk_i;
cxie4e7cddda2010-11-30 13:35:15 +08001927
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001928 usb_del_gadget_udc(&udc->gadget);
1929
cxie4e7cddda2010-11-30 13:35:15 +08001930 /* free memory allocated in probe */
1931 if (udc->dtd_pool)
1932 dma_pool_destroy(udc->dtd_pool);
1933
1934 if (udc->ep_dqh)
1935 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
1936 udc->ep_dqh, udc->ep_dqh_dma);
1937
1938 kfree(udc->eps);
1939
1940 if (udc->irq)
1941 free_irq(udc->irq, &dev->dev);
1942
1943 if (udc->cap_regs)
1944 iounmap(udc->cap_regs);
1945 udc->cap_regs = NULL;
1946
1947 if (udc->phy_regs)
1948 iounmap((void *)udc->phy_regs);
1949 udc->phy_regs = 0;
1950
1951 if (udc->status_req) {
1952 kfree(udc->status_req->req.buf);
1953 kfree(udc->status_req);
1954 }
1955
Neil Zhangdde34cc2011-10-12 16:49:24 +08001956 for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
1957 clk_put(udc->clk[clk_i]);
1958
cxie4e7cddda2010-11-30 13:35:15 +08001959 device_unregister(&udc->gadget.dev);
1960
1961 /* free dev, wait for the release() finished */
Neil Zhangdde34cc2011-10-12 16:49:24 +08001962 wait_for_completion(udc->done);
1963 kfree(udc);
cxie4e7cddda2010-11-30 13:35:15 +08001964
1965 the_controller = NULL;
1966
1967 return 0;
1968}
1969
Neil Zhang5d0b8d02011-10-12 16:49:25 +08001970static int __devinit mv_udc_probe(struct platform_device *dev)
cxie4e7cddda2010-11-30 13:35:15 +08001971{
Neil Zhangdde34cc2011-10-12 16:49:24 +08001972 struct mv_usb_platform_data *pdata = dev->dev.platform_data;
cxie4e7cddda2010-11-30 13:35:15 +08001973 struct mv_udc *udc;
1974 int retval = 0;
Neil Zhangdde34cc2011-10-12 16:49:24 +08001975 int clk_i = 0;
cxie4e7cddda2010-11-30 13:35:15 +08001976 struct resource *r;
1977 size_t size;
1978
Neil Zhangdde34cc2011-10-12 16:49:24 +08001979 if (pdata == NULL) {
1980 dev_err(&dev->dev, "missing platform_data\n");
1981 return -ENODEV;
cxie4e7cddda2010-11-30 13:35:15 +08001982 }
1983
Neil Zhangdde34cc2011-10-12 16:49:24 +08001984 size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
1985 udc = kzalloc(size, GFP_KERNEL);
1986 if (udc == NULL) {
1987 dev_err(&dev->dev, "failed to allocate memory for udc\n");
1988 return -ENOMEM;
1989 }
1990
1991 the_controller = udc;
1992 udc->done = &release_done;
1993 udc->pdata = dev->dev.platform_data;
cxie4e7cddda2010-11-30 13:35:15 +08001994 spin_lock_init(&udc->lock);
1995
1996 udc->dev = dev;
1997
Neil Zhangdde34cc2011-10-12 16:49:24 +08001998 udc->clknum = pdata->clknum;
1999 for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
2000 udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
2001 if (IS_ERR(udc->clk[clk_i])) {
2002 retval = PTR_ERR(udc->clk[clk_i]);
2003 goto err_put_clk;
2004 }
cxie4e7cddda2010-11-30 13:35:15 +08002005 }
2006
Neil Zhangdde34cc2011-10-12 16:49:24 +08002007 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
cxie4e7cddda2010-11-30 13:35:15 +08002008 if (r == NULL) {
2009 dev_err(&dev->dev, "no I/O memory resource defined\n");
2010 retval = -ENODEV;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002011 goto err_put_clk;
cxie4e7cddda2010-11-30 13:35:15 +08002012 }
2013
2014 udc->cap_regs = (struct mv_cap_regs __iomem *)
2015 ioremap(r->start, resource_size(r));
2016 if (udc->cap_regs == NULL) {
2017 dev_err(&dev->dev, "failed to map I/O memory\n");
2018 retval = -EBUSY;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002019 goto err_put_clk;
cxie4e7cddda2010-11-30 13:35:15 +08002020 }
2021
Neil Zhangdde34cc2011-10-12 16:49:24 +08002022 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
cxie4e7cddda2010-11-30 13:35:15 +08002023 if (r == NULL) {
2024 dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2025 retval = -ENODEV;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002026 goto err_iounmap_capreg;
cxie4e7cddda2010-11-30 13:35:15 +08002027 }
2028
2029 udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
2030 if (udc->phy_regs == 0) {
2031 dev_err(&dev->dev, "failed to map phy I/O memory\n");
2032 retval = -EBUSY;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002033 goto err_iounmap_capreg;
cxie4e7cddda2010-11-30 13:35:15 +08002034 }
2035
2036 /* we will acces controller register, so enable the clk */
Neil Zhangdde34cc2011-10-12 16:49:24 +08002037 udc_clock_enable(udc);
2038 if (pdata->phy_init) {
2039 retval = pdata->phy_init(udc->phy_regs);
2040 if (retval) {
2041 dev_err(&dev->dev, "phy init error %d\n", retval);
2042 goto err_iounmap_phyreg;
2043 }
cxie4e7cddda2010-11-30 13:35:15 +08002044 }
2045
2046 udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
2047 + (readl(&udc->cap_regs->caplength_hciversion)
2048 & CAPLENGTH_MASK));
2049 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2050
Neil Zhang4540a9ab2011-10-12 16:49:26 +08002051 /*
2052 * some platform will use usb to download image, it may not disconnect
2053 * usb gadget before loading kernel. So first stop udc here.
2054 */
2055 udc_stop(udc);
2056 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2057
cxie4e7cddda2010-11-30 13:35:15 +08002058 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2059 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2060 udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2061 &udc->ep_dqh_dma, GFP_KERNEL);
2062
2063 if (udc->ep_dqh == NULL) {
2064 dev_err(&dev->dev, "allocate dQH memory failed\n");
2065 retval = -ENOMEM;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002066 goto err_disable_clock;
cxie4e7cddda2010-11-30 13:35:15 +08002067 }
2068 udc->ep_dqh_size = size;
2069
2070 /* create dTD dma_pool resource */
2071 udc->dtd_pool = dma_pool_create("mv_dtd",
2072 &dev->dev,
2073 sizeof(struct mv_dtd),
2074 DTD_ALIGNMENT,
2075 DMA_BOUNDARY);
2076
2077 if (!udc->dtd_pool) {
2078 retval = -ENOMEM;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002079 goto err_free_dma;
cxie4e7cddda2010-11-30 13:35:15 +08002080 }
2081
2082 size = udc->max_eps * sizeof(struct mv_ep) *2;
2083 udc->eps = kzalloc(size, GFP_KERNEL);
2084 if (udc->eps == NULL) {
2085 dev_err(&dev->dev, "allocate ep memory failed\n");
2086 retval = -ENOMEM;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002087 goto err_destroy_dma;
cxie4e7cddda2010-11-30 13:35:15 +08002088 }
2089
2090 /* initialize ep0 status request structure */
2091 udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2092 if (!udc->status_req) {
2093 dev_err(&dev->dev, "allocate status_req memory failed\n");
2094 retval = -ENOMEM;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002095 goto err_free_eps;
cxie4e7cddda2010-11-30 13:35:15 +08002096 }
2097 INIT_LIST_HEAD(&udc->status_req->queue);
2098
2099 /* allocate a small amount of memory to get valid address */
2100 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2101 udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
2102
2103 udc->resume_state = USB_STATE_NOTATTACHED;
2104 udc->usb_state = USB_STATE_POWERED;
2105 udc->ep0_dir = EP_DIR_OUT;
2106 udc->remote_wakeup = 0;
2107
2108 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2109 if (r == NULL) {
2110 dev_err(&dev->dev, "no IRQ resource defined\n");
2111 retval = -ENODEV;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002112 goto err_free_status_req;
cxie4e7cddda2010-11-30 13:35:15 +08002113 }
2114 udc->irq = r->start;
2115 if (request_irq(udc->irq, mv_udc_irq,
Yong Zhangb5dd18d2011-09-07 16:10:52 +08002116 IRQF_SHARED, driver_name, udc)) {
cxie4e7cddda2010-11-30 13:35:15 +08002117 dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2118 udc->irq);
2119 retval = -ENODEV;
Neil Zhangdde34cc2011-10-12 16:49:24 +08002120 goto err_free_status_req;
cxie4e7cddda2010-11-30 13:35:15 +08002121 }
2122
2123 /* initialize gadget structure */
2124 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2125 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2126 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2127 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2128 udc->gadget.is_dualspeed = 1; /* support dual speed */
2129
2130 /* the "gadget" abstracts/virtualizes the controller */
2131 dev_set_name(&udc->gadget.dev, "gadget");
2132 udc->gadget.dev.parent = &dev->dev;
2133 udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2134 udc->gadget.dev.release = gadget_release;
2135 udc->gadget.name = driver_name; /* gadget name */
2136
2137 retval = device_register(&udc->gadget.dev);
2138 if (retval)
Neil Zhangdde34cc2011-10-12 16:49:24 +08002139 goto err_free_irq;
cxie4e7cddda2010-11-30 13:35:15 +08002140
2141 eps_init(udc);
2142
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03002143 retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
Neil Zhangdde34cc2011-10-12 16:49:24 +08002144 if (retval)
2145 goto err_unregister;
2146
2147 return 0;
2148
2149err_unregister:
2150 device_unregister(&udc->gadget.dev);
2151err_free_irq:
2152 free_irq(udc->irq, &dev->dev);
2153err_free_status_req:
2154 kfree(udc->status_req->req.buf);
2155 kfree(udc->status_req);
2156err_free_eps:
2157 kfree(udc->eps);
2158err_destroy_dma:
2159 dma_pool_destroy(udc->dtd_pool);
2160err_free_dma:
2161 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2162 udc->ep_dqh, udc->ep_dqh_dma);
2163err_disable_clock:
2164 if (udc->pdata->phy_deinit)
2165 udc->pdata->phy_deinit(udc->phy_regs);
2166 udc_clock_disable(udc);
2167err_iounmap_phyreg:
2168 iounmap((void *)udc->phy_regs);
2169err_iounmap_capreg:
2170 iounmap(udc->cap_regs);
2171err_put_clk:
2172 for (clk_i--; clk_i >= 0; clk_i--)
2173 clk_put(udc->clk[clk_i]);
2174 the_controller = NULL;
2175 kfree(udc);
cxie4e7cddda2010-11-30 13:35:15 +08002176 return retval;
2177}
2178
2179#ifdef CONFIG_PM
Sebastian Andrzej Siewiorcb424472011-06-03 19:50:45 +02002180static int mv_udc_suspend(struct device *_dev)
cxie4e7cddda2010-11-30 13:35:15 +08002181{
2182 struct mv_udc *udc = the_controller;
2183
2184 udc_stop(udc);
2185
2186 return 0;
2187}
2188
Sebastian Andrzej Siewiorcb424472011-06-03 19:50:45 +02002189static int mv_udc_resume(struct device *_dev)
cxie4e7cddda2010-11-30 13:35:15 +08002190{
2191 struct mv_udc *udc = the_controller;
2192 int retval;
2193
Neil Zhangdde34cc2011-10-12 16:49:24 +08002194 if (udc->pdata->phy_init) {
2195 retval = udc->pdata->phy_init(udc->phy_regs);
2196 if (retval) {
2197 dev_err(&udc->dev->dev,
2198 "init phy error %d when resume back\n",
2199 retval);
2200 return retval;
2201 }
cxie4e7cddda2010-11-30 13:35:15 +08002202 }
Neil Zhangdde34cc2011-10-12 16:49:24 +08002203
cxie4e7cddda2010-11-30 13:35:15 +08002204 udc_reset(udc);
2205 ep0_reset(udc);
2206 udc_start(udc);
2207
2208 return 0;
2209}
2210
2211static const struct dev_pm_ops mv_udc_pm_ops = {
2212 .suspend = mv_udc_suspend,
2213 .resume = mv_udc_resume,
2214};
2215#endif
2216
2217static struct platform_driver udc_driver = {
2218 .probe = mv_udc_probe,
2219 .remove = __exit_p(mv_udc_remove),
2220 .driver = {
2221 .owner = THIS_MODULE,
2222 .name = "pxa-u2o",
2223#ifdef CONFIG_PM
Sebastian Andrzej Siewiorcb424472011-06-03 19:50:45 +02002224 .pm = &mv_udc_pm_ops,
cxie4e7cddda2010-11-30 13:35:15 +08002225#endif
2226 },
2227};
Sebastian Andrzej Siewior86081d72011-06-29 16:41:55 +03002228MODULE_ALIAS("platform:pxa-u2o");
cxie4e7cddda2010-11-30 13:35:15 +08002229
2230MODULE_DESCRIPTION(DRIVER_DESC);
2231MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2232MODULE_VERSION(DRIVER_VERSION);
2233MODULE_LICENSE("GPL");
2234
2235
2236static int __init init(void)
2237{
2238 return platform_driver_register(&udc_driver);
2239}
2240module_init(init);
2241
2242
2243static void __exit cleanup(void)
2244{
2245 platform_driver_unregister(&udc_driver);
2246}
2247module_exit(cleanup);
2248