blob: 8cfd5b028dbdab755a8d7ea66b676acc93fbb3d0 [file] [log] [blame]
Yu Xu3d4eb9d2012-06-15 21:45:08 +08001/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/ioport.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/timer.h>
20#include <linux/list.h>
21#include <linux/notifier.h>
22#include <linux/interrupt.h>
23#include <linux/moduleparam.h>
24#include <linux/device.h>
25#include <linux/usb/ch9.h>
26#include <linux/usb/gadget.h>
27#include <linux/pm.h>
28#include <linux/io.h>
29#include <linux/irq.h>
30#include <linux/platform_device.h>
31#include <linux/platform_data/mv_usb.h>
32#include <linux/clk.h>
33#include <asm/system.h>
34#include <asm/unaligned.h>
35#include <asm/byteorder.h>
36
37#include "mv_u3d.h"
38
39#define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
40
41static const char driver_name[] = "mv_u3d";
42static const char driver_desc[] = DRIVER_DESC;
43
44static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
45static void mv_u3d_stop_activity(struct mv_u3d *u3d,
46 struct usb_gadget_driver *driver);
47
48/* for endpoint 0 operations */
49static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
50 .bLength = USB_DT_ENDPOINT_SIZE,
51 .bDescriptorType = USB_DT_ENDPOINT,
52 .bEndpointAddress = 0,
53 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
54 .wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE,
55};
56
57static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
58{
59 struct mv_u3d_ep *ep;
60 u32 epxcr;
61 int i;
62
63 for (i = 0; i < 2; i++) {
64 ep = &u3d->eps[i];
65 ep->u3d = u3d;
66
67 /* ep0 ep context, ep0 in and out share the same ep context */
68 ep->ep_context = &u3d->ep_context[1];
69 }
70
71 /* reset ep state machine */
72 /* reset ep0 out */
73 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
74 epxcr |= MV_U3D_EPXCR_EP_INIT;
75 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
76 udelay(5);
77 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
78 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
79
80 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
81 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
82 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
83 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
84 | MV_U3D_EPXCR_EP_TYPE_CONTROL);
85 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
86
87 /* reset ep0 in */
88 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
89 epxcr |= MV_U3D_EPXCR_EP_INIT;
90 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
91 udelay(5);
92 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
93 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
94
95 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
96 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
97 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
98 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
99 | MV_U3D_EPXCR_EP_TYPE_CONTROL);
100 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
101}
102
103static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
104{
105 u32 tmp;
106 dev_dbg(u3d->dev, "%s\n", __func__);
107
108 /* set TX and RX to stall */
109 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
110 tmp |= MV_U3D_EPXCR_EP_HALT;
111 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
112
113 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
114 tmp |= MV_U3D_EPXCR_EP_HALT;
115 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
116
117 /* update ep0 state */
118 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
119 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
120}
121
122static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
123 struct mv_u3d_req *curr_req)
124{
125 struct mv_u3d_trb *curr_trb;
126 dma_addr_t cur_deq_lo;
127 struct mv_u3d_ep_context *curr_ep_context;
128 int trb_complete, actual, remaining_length;
129 int direction, ep_num;
130 int retval = 0;
131 u32 tmp, status, length;
132
133 curr_ep_context = &u3d->ep_context[index];
134 direction = index % 2;
135 ep_num = index / 2;
136
137 trb_complete = 0;
138 actual = curr_req->req.length;
139
140 while (!list_empty(&curr_req->trb_list)) {
141 curr_trb = list_entry(curr_req->trb_list.next,
142 struct mv_u3d_trb, trb_list);
143 if (!curr_trb->trb_hw->ctrl.own) {
144 dev_err(u3d->dev, "%s, TRB own error!\n",
145 u3d->eps[index].name);
146 return 1;
147 }
148
149 curr_trb->trb_hw->ctrl.own = 0;
150 if (direction == MV_U3D_EP_DIR_OUT) {
151 tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
152 cur_deq_lo =
153 ioread32(&u3d->vuc_regs->rxst[ep_num].curdeqlo);
154 } else {
155 tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
156 cur_deq_lo =
157 ioread32(&u3d->vuc_regs->txst[ep_num].curdeqlo);
158 }
159
160 status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
161 length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
162
163 if (status == MV_U3D_COMPLETE_SUCCESS ||
164 (status == MV_U3D_COMPLETE_SHORT_PACKET &&
165 direction == MV_U3D_EP_DIR_OUT)) {
166 remaining_length += length;
167 actual -= remaining_length;
168 } else {
169 dev_err(u3d->dev,
170 "complete_tr error: ep=%d %s: error = 0x%x\n",
171 index >> 1, direction ? "SEND" : "RECV",
172 status);
173 retval = -EPROTO;
174 }
175
176 list_del_init(&curr_trb->trb_list);
177 }
178 if (retval)
179 return retval;
180
181 curr_req->req.actual = actual;
182 return 0;
183}
184
185/*
186 * mv_u3d_done() - retire a request; caller blocked irqs
187 * @status : request status to be set, only works when
188 * request is still in progress.
189 */
190static
191void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
192{
193 struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
194
195 dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
196 /* Removed the req from ep queue */
197 list_del_init(&req->queue);
198
199 /* req.status should be set as -EINPROGRESS in ep_queue() */
200 if (req->req.status == -EINPROGRESS)
201 req->req.status = status;
202 else
203 status = req->req.status;
204
205 /* Free trb for the request */
206 if (!req->chain)
207 dma_pool_free(u3d->trb_pool,
208 req->trb_head->trb_hw, req->trb_head->trb_dma);
209 else {
210 dma_unmap_single(ep->u3d->gadget.dev.parent,
211 (dma_addr_t)req->trb_head->trb_dma,
212 req->trb_count * sizeof(struct mv_u3d_trb_hw),
213 DMA_BIDIRECTIONAL);
214 kfree(req->trb_head->trb_hw);
215 }
216 kfree(req->trb_head);
217
218 usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
219
220 if (status && (status != -ESHUTDOWN)) {
221 dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
222 ep->ep.name, &req->req, status,
223 req->req.actual, req->req.length);
224 }
225
226 spin_unlock(&ep->u3d->lock);
227 /*
228 * complete() is from gadget layer,
229 * eg fsg->bulk_in_complete()
230 */
231 if (req->req.complete)
232 req->req.complete(&ep->ep, &req->req);
233
234 spin_lock(&ep->u3d->lock);
235}
236
237static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
238{
239 u32 tmp, direction;
240 struct mv_u3d *u3d;
241 struct mv_u3d_ep_context *ep_context;
242 int retval = 0;
243
244 u3d = ep->u3d;
245 direction = mv_u3d_ep_dir(ep);
246
247 /* ep0 in and out share the same ep context slot 1*/
248 if (ep->ep_num == 0)
249 ep_context = &(u3d->ep_context[1]);
250 else
251 ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
252
253 /* check if the pipe is empty or not */
254 if (!list_empty(&ep->queue)) {
255 dev_err(u3d->dev, "add trb to non-empty queue!\n");
256 retval = -ENOMEM;
257 WARN_ON(1);
258 } else {
259 ep_context->rsvd0 = cpu_to_le32(1);
260 ep_context->rsvd1 = 0;
261
262 /* Configure the trb address and set the DCS bit.
263 * Both DCS bit and own bit in trb should be set.
264 */
265 ep_context->trb_addr_lo =
266 cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
267 ep_context->trb_addr_hi = 0;
268
269 /* Ensure that updates to the EP Context will
270 * occure before Ring Bell.
271 */
272 wmb();
273
274 /* ring bell the ep */
275 if (ep->ep_num == 0)
276 tmp = 0x1;
277 else
278 tmp = ep->ep_num * 2
279 + ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
280
281 iowrite32(tmp, &u3d->op_regs->doorbell);
282 }
283 return retval;
284}
285
286static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
287 unsigned *length, dma_addr_t *dma)
288{
289 u32 temp;
290 unsigned int direction;
291 struct mv_u3d_trb *trb;
292 struct mv_u3d_trb_hw *trb_hw;
293 struct mv_u3d *u3d;
294
295 /* how big will this transfer be? */
296 *length = req->req.length - req->req.actual;
297 BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
298
299 u3d = req->ep->u3d;
300
301 trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
302 if (!trb) {
303 dev_err(u3d->dev, "%s, trb alloc fail\n", __func__);
304 return NULL;
305 }
306
307 /*
308 * Be careful that no _GFP_HIGHMEM is set,
309 * or we can not use dma_to_virt
310 * cannot use GFP_KERNEL in spin lock
311 */
312 trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
313 if (!trb_hw) {
314 dev_err(u3d->dev,
315 "%s, dma_pool_alloc fail\n", __func__);
316 return NULL;
317 }
318 trb->trb_dma = *dma;
319 trb->trb_hw = trb_hw;
320
321 /* initialize buffer page pointers */
322 temp = (u32)(req->req.dma + req->req.actual);
323
324 trb_hw->buf_addr_lo = cpu_to_le32(temp);
325 trb_hw->buf_addr_hi = 0;
326 trb_hw->trb_len = cpu_to_le32(*length);
327 trb_hw->ctrl.own = 1;
328
329 if (req->ep->ep_num == 0)
330 trb_hw->ctrl.type = TYPE_DATA;
331 else
332 trb_hw->ctrl.type = TYPE_NORMAL;
333
334 req->req.actual += *length;
335
336 direction = mv_u3d_ep_dir(req->ep);
337 if (direction == MV_U3D_EP_DIR_IN)
338 trb_hw->ctrl.dir = 1;
339 else
340 trb_hw->ctrl.dir = 0;
341
342 /* Enable interrupt for the last trb of a request */
343 if (!req->req.no_interrupt)
344 trb_hw->ctrl.ioc = 1;
345
346 trb_hw->ctrl.chain = 0;
347
348 wmb();
349 return trb;
350}
351
352static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
353 struct mv_u3d_trb *trb, int *is_last)
354{
355 u32 temp;
356 unsigned int direction;
357 struct mv_u3d *u3d;
358
359 /* how big will this transfer be? */
360 *length = min(req->req.length - req->req.actual,
361 (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
362
363 u3d = req->ep->u3d;
364
365 trb->trb_dma = 0;
366
367 /* initialize buffer page pointers */
368 temp = (u32)(req->req.dma + req->req.actual);
369
370 trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
371 trb->trb_hw->buf_addr_hi = 0;
372 trb->trb_hw->trb_len = cpu_to_le32(*length);
373 trb->trb_hw->ctrl.own = 1;
374
375 if (req->ep->ep_num == 0)
376 trb->trb_hw->ctrl.type = TYPE_DATA;
377 else
378 trb->trb_hw->ctrl.type = TYPE_NORMAL;
379
380 req->req.actual += *length;
381
382 direction = mv_u3d_ep_dir(req->ep);
383 if (direction == MV_U3D_EP_DIR_IN)
384 trb->trb_hw->ctrl.dir = 1;
385 else
386 trb->trb_hw->ctrl.dir = 0;
387
388 /* zlp is needed if req->req.zero is set */
389 if (req->req.zero) {
390 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
391 *is_last = 1;
392 else
393 *is_last = 0;
394 } else if (req->req.length == req->req.actual)
395 *is_last = 1;
396 else
397 *is_last = 0;
398
399 /* Enable interrupt for the last trb of a request */
400 if (*is_last && !req->req.no_interrupt)
401 trb->trb_hw->ctrl.ioc = 1;
402
403 if (*is_last)
404 trb->trb_hw->ctrl.chain = 0;
405 else {
406 trb->trb_hw->ctrl.chain = 1;
407 dev_dbg(u3d->dev, "chain trb\n");
408 }
409
410 wmb();
411
412 return 0;
413}
414
415/* generate TRB linked list for a request
416 * usb controller only supports continous trb chain,
417 * that trb structure physical address should be continous.
418 */
419static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
420{
421 unsigned count;
422 int is_last;
423 struct mv_u3d_trb *trb;
424 struct mv_u3d_trb_hw *trb_hw;
425 struct mv_u3d *u3d;
426 dma_addr_t dma;
427 unsigned length;
428 unsigned trb_num;
429
430 u3d = req->ep->u3d;
431
432 INIT_LIST_HEAD(&req->trb_list);
433
434 length = req->req.length - req->req.actual;
435 /* normally the request transfer length is less than 16KB.
436 * we use buil_trb_one() to optimize it.
437 */
438 if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
439 trb = mv_u3d_build_trb_one(req, &count, &dma);
440 list_add_tail(&trb->trb_list, &req->trb_list);
441 req->trb_head = trb;
442 req->trb_count = 1;
443 req->chain = 0;
444 } else {
445 trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
446 if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
447 trb_num++;
448
449 trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
450 if (!trb) {
451 dev_err(u3d->dev,
452 "%s, trb alloc fail\n", __func__);
453 return -ENOMEM;
454 }
455
456 trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
457 if (!trb_hw) {
458 dev_err(u3d->dev,
459 "%s, trb_hw alloc fail\n", __func__);
460 return -ENOMEM;
461 }
462
463 do {
464 trb->trb_hw = trb_hw;
465 if (mv_u3d_build_trb_chain(req, &count,
466 trb, &is_last)) {
467 dev_err(u3d->dev,
468 "%s, mv_u3d_build_trb_chain fail\n",
469 __func__);
470 return -EIO;
471 }
472
473 list_add_tail(&trb->trb_list, &req->trb_list);
474 req->trb_count++;
475 trb++;
476 trb_hw++;
477 } while (!is_last);
478
479 req->trb_head = list_entry(req->trb_list.next,
480 struct mv_u3d_trb, trb_list);
481 req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
482 req->trb_head->trb_hw,
483 trb_num * sizeof(*trb_hw),
484 DMA_BIDIRECTIONAL);
485
486 req->chain = 1;
487 }
488
489 return 0;
490}
491
492static int
493mv_u3d_start_queue(struct mv_u3d_ep *ep)
494{
495 struct mv_u3d *u3d = ep->u3d;
496 struct mv_u3d_req *req;
497 int ret;
498
499 if (!list_empty(&ep->req_list) && !ep->processing)
500 req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
501 else
502 return 0;
503
504 ep->processing = 1;
505
506 /* set up dma mapping */
507 ret = usb_gadget_map_request(&u3d->gadget, &req->req,
508 mv_u3d_ep_dir(ep));
509 if (ret)
510 return ret;
511
512 req->req.status = -EINPROGRESS;
513 req->req.actual = 0;
514 req->trb_count = 0;
515
516 /* build trbs and push them to device queue */
517 if (!mv_u3d_req_to_trb(req)) {
518 ret = mv_u3d_queue_trb(ep, req);
519 if (ret) {
520 ep->processing = 0;
521 return ret;
522 }
523 } else {
524 ep->processing = 0;
525 dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
526 return -ENOMEM;
527 }
528
529 /* irq handler advances the queue */
530 if (req)
531 list_add_tail(&req->queue, &ep->queue);
532
533 return 0;
534}
535
536static int mv_u3d_ep_enable(struct usb_ep *_ep,
537 const struct usb_endpoint_descriptor *desc)
538{
539 struct mv_u3d *u3d;
540 struct mv_u3d_ep *ep;
541 struct mv_u3d_ep_context *ep_context;
542 u16 max = 0;
543 unsigned maxburst = 0;
544 u32 epxcr, direction;
545
546 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
547 return -EINVAL;
548
549 ep = container_of(_ep, struct mv_u3d_ep, ep);
550 u3d = ep->u3d;
551
552 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
553 return -ESHUTDOWN;
554
555 direction = mv_u3d_ep_dir(ep);
556 max = le16_to_cpu(desc->wMaxPacketSize);
557
558 if (!_ep->maxburst)
559 _ep->maxburst = 1;
560 maxburst = _ep->maxburst;
561
562 /* Get the endpoint context address */
563 ep_context = (struct mv_u3d_ep_context *)ep->ep_context;
564
565 /* Set the max burst size */
566 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
567 case USB_ENDPOINT_XFER_BULK:
568 if (maxburst > 16) {
569 dev_dbg(u3d->dev,
570 "max burst should not be greater "
571 "than 16 on bulk ep\n");
572 maxburst = 1;
573 _ep->maxburst = maxburst;
574 }
575 dev_dbg(u3d->dev,
576 "maxburst: %d on bulk %s\n", maxburst, ep->name);
577 break;
578 case USB_ENDPOINT_XFER_CONTROL:
579 /* control transfer only supports maxburst as one */
580 maxburst = 1;
581 _ep->maxburst = maxburst;
582 break;
583 case USB_ENDPOINT_XFER_INT:
584 if (maxburst != 1) {
585 dev_dbg(u3d->dev,
586 "max burst should be 1 on int ep "
587 "if transfer size is not 1024\n");
588 maxburst = 1;
589 _ep->maxburst = maxburst;
590 }
591 break;
592 case USB_ENDPOINT_XFER_ISOC:
593 if (maxburst != 1) {
594 dev_dbg(u3d->dev,
595 "max burst should be 1 on isoc ep "
596 "if transfer size is not 1024\n");
597 maxburst = 1;
598 _ep->maxburst = maxburst;
599 }
600 break;
601 default:
602 goto en_done;
603 }
604
605 ep->ep.maxpacket = max;
606 ep->ep.desc = desc;
607 ep->enabled = 1;
608
609 /* Enable the endpoint for Rx or Tx and set the endpoint type */
610 if (direction == MV_U3D_EP_DIR_OUT) {
611 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
612 epxcr |= MV_U3D_EPXCR_EP_INIT;
613 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
614 udelay(5);
615 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
616 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
617
618 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
619 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
620 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
621 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
622 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
623 } else {
624 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
625 epxcr |= MV_U3D_EPXCR_EP_INIT;
626 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
627 udelay(5);
628 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
629 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
630
631 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
632 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
633 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
634 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
635 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
636 }
637
638 return 0;
639en_done:
640 return -EINVAL;
641}
642
643static int mv_u3d_ep_disable(struct usb_ep *_ep)
644{
645 struct mv_u3d *u3d;
646 struct mv_u3d_ep *ep;
647 struct mv_u3d_ep_context *ep_context;
648 u32 epxcr, direction;
649
650 if (!_ep)
651 return -EINVAL;
652
653 ep = container_of(_ep, struct mv_u3d_ep, ep);
654 if (!ep->ep.desc)
655 return -EINVAL;
656
657 u3d = ep->u3d;
658
659 /* Get the endpoint context address */
660 ep_context = ep->ep_context;
661
662 direction = mv_u3d_ep_dir(ep);
663
664 /* nuke all pending requests (does flush) */
665 mv_u3d_nuke(ep, -ESHUTDOWN);
666
667 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
668 if (direction == MV_U3D_EP_DIR_OUT) {
669 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
670 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
671 | USB_ENDPOINT_XFERTYPE_MASK);
672 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
673 } else {
674 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
675 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
676 | USB_ENDPOINT_XFERTYPE_MASK);
677 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
678 }
679
680 ep->enabled = 0;
681
682 ep->ep.desc = NULL;
683 return 0;
684}
685
686static struct usb_request *
687mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
688{
689 struct mv_u3d_req *req = NULL;
690
691 req = kzalloc(sizeof *req, gfp_flags);
692 if (!req)
693 return NULL;
694
695 INIT_LIST_HEAD(&req->queue);
696
697 return &req->req;
698}
699
700static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
701{
702 struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
703
704 kfree(req);
705}
706
707static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
708{
709 struct mv_u3d *u3d;
710 u32 direction;
711 struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
712 unsigned int loops;
713 u32 tmp;
714
715 /* if endpoint is not enabled, cannot flush endpoint */
716 if (!ep->enabled)
717 return;
718
719 u3d = ep->u3d;
720 direction = mv_u3d_ep_dir(ep);
721
722 /* ep0 need clear bit after flushing fifo. */
723 if (!ep->ep_num) {
724 if (direction == MV_U3D_EP_DIR_OUT) {
725 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
726 tmp |= MV_U3D_EPXCR_EP_FLUSH;
727 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
728 udelay(10);
729 tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
730 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
731 } else {
732 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
733 tmp |= MV_U3D_EPXCR_EP_FLUSH;
734 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
735 udelay(10);
736 tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
737 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
738 }
739 return;
740 }
741
742 if (direction == MV_U3D_EP_DIR_OUT) {
743 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
744 tmp |= MV_U3D_EPXCR_EP_FLUSH;
745 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
746
747 /* Wait until flushing completed */
748 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
749 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
750 MV_U3D_EPXCR_EP_FLUSH) {
751 /*
752 * EP_FLUSH bit should be cleared to indicate this
753 * operation is complete
754 */
755 if (loops == 0) {
756 dev_dbg(u3d->dev,
757 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
758 direction ? "in" : "out");
759 return;
760 }
761 loops--;
762 udelay(LOOPS_USEC);
763 }
764 } else { /* EP_DIR_IN */
765 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
766 tmp |= MV_U3D_EPXCR_EP_FLUSH;
767 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
768
769 /* Wait until flushing completed */
770 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
771 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
772 MV_U3D_EPXCR_EP_FLUSH) {
773 /*
774 * EP_FLUSH bit should be cleared to indicate this
775 * operation is complete
776 */
777 if (loops == 0) {
778 dev_dbg(u3d->dev,
779 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
780 direction ? "in" : "out");
781 return;
782 }
783 loops--;
784 udelay(LOOPS_USEC);
785 }
786 }
787}
788
789/* queues (submits) an I/O request to an endpoint */
790static int
791mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
792{
793 struct mv_u3d_ep *ep;
794 struct mv_u3d_req *req;
795 struct mv_u3d *u3d;
796 unsigned long flags;
797 int is_first_req = 0;
798
799 if (unlikely(!_ep || !_req))
800 return -EINVAL;
801
802 ep = container_of(_ep, struct mv_u3d_ep, ep);
803 u3d = ep->u3d;
804
805 req = container_of(_req, struct mv_u3d_req, req);
806
807 if (!ep->ep_num
808 && u3d->ep0_state == MV_U3D_STATUS_STAGE
809 && !_req->length) {
810 dev_dbg(u3d->dev, "ep0 status stage\n");
811 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
812 return 0;
813 }
814
815 dev_dbg(u3d->dev, "%s: %s, req: 0x%x\n",
816 __func__, _ep->name, (u32)req);
817
818 /* catch various bogus parameters */
819 if (!req->req.complete || !req->req.buf
820 || !list_empty(&req->queue)) {
821 dev_err(u3d->dev,
822 "%s, bad params, _req: 0x%x,"
823 "req->req.complete: 0x%x, req->req.buf: 0x%x,"
824 "list_empty: 0x%x\n",
825 __func__, (u32)_req,
826 (u32)req->req.complete, (u32)req->req.buf,
827 (u32)list_empty(&req->queue));
828 return -EINVAL;
829 }
830 if (unlikely(!ep->ep.desc)) {
831 dev_err(u3d->dev, "%s, bad ep\n", __func__);
832 return -EINVAL;
833 }
834 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
835 if (req->req.length > ep->ep.maxpacket)
836 return -EMSGSIZE;
837 }
838
839 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
840 dev_err(u3d->dev,
841 "bad params of driver/speed\n");
842 return -ESHUTDOWN;
843 }
844
845 req->ep = ep;
846
847 /* Software list handles usb request. */
848 spin_lock_irqsave(&ep->req_lock, flags);
849 is_first_req = list_empty(&ep->req_list);
850 list_add_tail(&req->list, &ep->req_list);
851 spin_unlock_irqrestore(&ep->req_lock, flags);
852 if (!is_first_req) {
853 dev_dbg(u3d->dev, "list is not empty\n");
854 return 0;
855 }
856
857 dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
858 spin_lock_irqsave(&u3d->lock, flags);
859 mv_u3d_start_queue(ep);
860 spin_unlock_irqrestore(&u3d->lock, flags);
861 return 0;
862}
863
864/* dequeues (cancels, unlinks) an I/O request from an endpoint */
865static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
866{
867 struct mv_u3d_ep *ep;
868 struct mv_u3d_req *req;
869 struct mv_u3d *u3d;
870 struct mv_u3d_ep_context *ep_context;
871 struct mv_u3d_req *next_req;
872
873 unsigned long flags;
874 int ret = 0;
875
876 if (!_ep || !_req)
877 return -EINVAL;
878
879 ep = container_of(_ep, struct mv_u3d_ep, ep);
880 u3d = ep->u3d;
881
882 spin_lock_irqsave(&ep->u3d->lock, flags);
883
884 /* make sure it's actually queued on this endpoint */
885 list_for_each_entry(req, &ep->queue, queue) {
886 if (&req->req == _req)
887 break;
888 }
889 if (&req->req != _req) {
890 ret = -EINVAL;
891 goto out;
892 }
893
894 /* The request is in progress, or completed but not dequeued */
895 if (ep->queue.next == &req->queue) {
896 _req->status = -ECONNRESET;
897 mv_u3d_ep_fifo_flush(_ep);
898
899 /* The request isn't the last request in this ep queue */
900 if (req->queue.next != &ep->queue) {
901 dev_dbg(u3d->dev,
902 "it is the last request in this ep queue\n");
903 ep_context = ep->ep_context;
904 next_req = list_entry(req->queue.next,
905 struct mv_u3d_req, queue);
906
907 /* Point first TRB of next request to the EP context. */
908 iowrite32((u32) next_req->trb_head,
909 &ep_context->trb_addr_lo);
910 } else {
911 struct mv_u3d_ep_context *ep_context;
912 ep_context = ep->ep_context;
913 ep_context->trb_addr_lo = 0;
914 ep_context->trb_addr_hi = 0;
915 }
916
917 } else
918 WARN_ON(1);
919
920 mv_u3d_done(ep, req, -ECONNRESET);
921
922 /* remove the req from the ep req list */
923 if (!list_empty(&ep->req_list)) {
924 struct mv_u3d_req *curr_req;
925 curr_req = list_entry(ep->req_list.next,
926 struct mv_u3d_req, list);
927 if (curr_req == req) {
928 list_del_init(&req->list);
929 ep->processing = 0;
930 }
931 }
932
933out:
934 spin_unlock_irqrestore(&ep->u3d->lock, flags);
935 return ret;
936}
937
938static void
939mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
940{
941 u32 tmp;
942 struct mv_u3d_ep *ep = u3d->eps;
943
944 dev_dbg(u3d->dev, "%s\n", __func__);
945 if (direction == MV_U3D_EP_DIR_OUT) {
946 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
947 if (stall)
948 tmp |= MV_U3D_EPXCR_EP_HALT;
949 else
950 tmp &= ~MV_U3D_EPXCR_EP_HALT;
951 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
952 } else {
953 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
954 if (stall)
955 tmp |= MV_U3D_EPXCR_EP_HALT;
956 else
957 tmp &= ~MV_U3D_EPXCR_EP_HALT;
958 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
959 }
960}
961
962static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
963{
964 struct mv_u3d_ep *ep;
965 unsigned long flags = 0;
966 int status = 0;
967 struct mv_u3d *u3d;
968
969 ep = container_of(_ep, struct mv_u3d_ep, ep);
970 u3d = ep->u3d;
971 if (!ep->ep.desc) {
972 status = -EINVAL;
973 goto out;
974 }
975
976 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
977 status = -EOPNOTSUPP;
978 goto out;
979 }
980
981 /*
982 * Attempt to halt IN ep will fail if any transfer requests
983 * are still queue
984 */
985 if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
986 && !list_empty(&ep->queue)) {
987 status = -EAGAIN;
988 goto out;
989 }
990
991 spin_lock_irqsave(&ep->u3d->lock, flags);
992 mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
993 if (halt && wedge)
994 ep->wedge = 1;
995 else if (!halt)
996 ep->wedge = 0;
997 spin_unlock_irqrestore(&ep->u3d->lock, flags);
998
999 if (ep->ep_num == 0)
1000 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
1001out:
1002 return status;
1003}
1004
1005static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
1006{
1007 return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
1008}
1009
1010static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
1011{
1012 return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
1013}
1014
1015static struct usb_ep_ops mv_u3d_ep_ops = {
1016 .enable = mv_u3d_ep_enable,
1017 .disable = mv_u3d_ep_disable,
1018
1019 .alloc_request = mv_u3d_alloc_request,
1020 .free_request = mv_u3d_free_request,
1021
1022 .queue = mv_u3d_ep_queue,
1023 .dequeue = mv_u3d_ep_dequeue,
1024
1025 .set_wedge = mv_u3d_ep_set_wedge,
1026 .set_halt = mv_u3d_ep_set_halt,
1027 .fifo_flush = mv_u3d_ep_fifo_flush,
1028};
1029
1030static void mv_u3d_controller_stop(struct mv_u3d *u3d)
1031{
1032 u32 tmp;
1033
1034 if (!u3d->clock_gating && u3d->vbus_valid_detect)
1035 iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
1036 &u3d->vuc_regs->intrenable);
1037 else
1038 iowrite32(0, &u3d->vuc_regs->intrenable);
1039 iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
1040 iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
1041 iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
1042 iowrite32(~0x0, &u3d->vuc_regs->linkchange);
1043 iowrite32(0x1, &u3d->vuc_regs->setuplock);
1044
1045 /* Reset the RUN bit in the command register to stop USB */
1046 tmp = ioread32(&u3d->op_regs->usbcmd);
1047 tmp &= ~MV_U3D_CMD_RUN_STOP;
1048 iowrite32(tmp, &u3d->op_regs->usbcmd);
1049 dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
1050 ioread32(&u3d->op_regs->usbcmd));
1051}
1052
1053static void mv_u3d_controller_start(struct mv_u3d *u3d)
1054{
1055 u32 usbintr;
1056 u32 temp;
1057
1058 /* enable link LTSSM state machine */
1059 temp = ioread32(&u3d->vuc_regs->ltssm);
1060 temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
1061 iowrite32(temp, &u3d->vuc_regs->ltssm);
1062
1063 /* Enable interrupts */
1064 usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
1065 MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
1066 MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
1067 (u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
1068 iowrite32(usbintr, &u3d->vuc_regs->intrenable);
1069
1070 /* Enable ctrl ep */
1071 iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
1072
1073 /* Set the Run bit in the command register */
1074 iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
1075 dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
1076 ioread32(&u3d->op_regs->usbcmd));
1077}
1078
1079static int mv_u3d_controller_reset(struct mv_u3d *u3d)
1080{
1081 unsigned int loops;
1082 u32 tmp;
1083
1084 /* Stop the controller */
1085 tmp = ioread32(&u3d->op_regs->usbcmd);
1086 tmp &= ~MV_U3D_CMD_RUN_STOP;
1087 iowrite32(tmp, &u3d->op_regs->usbcmd);
1088
1089 /* Reset the controller to get default values */
1090 iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
1091
1092 /* wait for reset to complete */
1093 loops = LOOPS(MV_U3D_RESET_TIMEOUT);
1094 while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
1095 if (loops == 0) {
1096 dev_err(u3d->dev,
1097 "Wait for RESET completed TIMEOUT\n");
1098 return -ETIMEDOUT;
1099 }
1100 loops--;
1101 udelay(LOOPS_USEC);
1102 }
1103
1104 /* Configure the Endpoint Context Address */
1105 iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
1106 iowrite32(0, &u3d->op_regs->dcbaaph);
1107
1108 return 0;
1109}
1110
1111static int mv_u3d_enable(struct mv_u3d *u3d)
1112{
1113 struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
1114 int retval;
1115
1116 if (u3d->active)
1117 return 0;
1118
1119 if (!u3d->clock_gating) {
1120 u3d->active = 1;
1121 return 0;
1122 }
1123
1124 dev_dbg(u3d->dev, "enable u3d\n");
1125 clk_enable(u3d->clk);
1126 if (pdata->phy_init) {
1127 retval = pdata->phy_init(u3d->phy_regs);
1128 if (retval) {
1129 dev_err(u3d->dev,
1130 "init phy error %d\n", retval);
1131 clk_disable(u3d->clk);
1132 return retval;
1133 }
1134 }
1135 u3d->active = 1;
1136
1137 return 0;
1138}
1139
1140static void mv_u3d_disable(struct mv_u3d *u3d)
1141{
1142 struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
1143 if (u3d->clock_gating && u3d->active) {
1144 dev_dbg(u3d->dev, "disable u3d\n");
1145 if (pdata->phy_deinit)
1146 pdata->phy_deinit(u3d->phy_regs);
1147 clk_disable(u3d->clk);
1148 u3d->active = 0;
1149 }
1150}
1151
1152static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
1153{
1154 struct mv_u3d *u3d;
1155 unsigned long flags;
1156 int retval = 0;
1157
1158 u3d = container_of(gadget, struct mv_u3d, gadget);
1159
1160 spin_lock_irqsave(&u3d->lock, flags);
1161
1162 u3d->vbus_active = (is_active != 0);
1163 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
1164 __func__, u3d->softconnect, u3d->vbus_active);
1165 /*
1166 * 1. external VBUS detect: we can disable/enable clock on demand.
1167 * 2. UDC VBUS detect: we have to enable clock all the time.
1168 * 3. No VBUS detect: we have to enable clock all the time.
1169 */
1170 if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
1171 retval = mv_u3d_enable(u3d);
1172 if (retval == 0) {
1173 /*
1174 * after clock is disabled, we lost all the register
1175 * context. We have to re-init registers
1176 */
1177 mv_u3d_controller_reset(u3d);
1178 mv_u3d_ep0_reset(u3d);
1179 mv_u3d_controller_start(u3d);
1180 }
1181 } else if (u3d->driver && u3d->softconnect) {
1182 if (!u3d->active)
1183 goto out;
1184
1185 /* stop all the transfer in queue*/
1186 mv_u3d_stop_activity(u3d, u3d->driver);
1187 mv_u3d_controller_stop(u3d);
1188 mv_u3d_disable(u3d);
1189 }
1190
1191out:
1192 spin_unlock_irqrestore(&u3d->lock, flags);
1193 return retval;
1194}
1195
1196/* constrain controller's VBUS power usage
1197 * This call is used by gadget drivers during SET_CONFIGURATION calls,
1198 * reporting how much power the device may consume. For example, this
1199 * could affect how quickly batteries are recharged.
1200 *
1201 * Returns zero on success, else negative errno.
1202 */
1203static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1204{
1205 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
1206
1207 u3d->power = mA;
1208
1209 return 0;
1210}
1211
1212static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
1213{
1214 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
1215 unsigned long flags;
1216 int retval = 0;
1217
1218 spin_lock_irqsave(&u3d->lock, flags);
1219
1220 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
1221 __func__, u3d->softconnect, u3d->vbus_active);
1222 u3d->softconnect = (is_on != 0);
1223 if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
1224 retval = mv_u3d_enable(u3d);
1225 if (retval == 0) {
1226 /*
1227 * after clock is disabled, we lost all the register
1228 * context. We have to re-init registers
1229 */
1230 mv_u3d_controller_reset(u3d);
1231 mv_u3d_ep0_reset(u3d);
1232 mv_u3d_controller_start(u3d);
1233 }
1234 } else if (u3d->driver && u3d->vbus_active) {
1235 /* stop all the transfer in queue*/
1236 mv_u3d_stop_activity(u3d, u3d->driver);
1237 mv_u3d_controller_stop(u3d);
1238 mv_u3d_disable(u3d);
1239 }
1240
1241 spin_unlock_irqrestore(&u3d->lock, flags);
1242
1243 return retval;
1244}
1245
1246static int mv_u3d_start(struct usb_gadget *g,
1247 struct usb_gadget_driver *driver)
1248{
1249 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
1250 struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
1251 unsigned long flags;
1252
1253 if (u3d->driver)
1254 return -EBUSY;
1255
1256 spin_lock_irqsave(&u3d->lock, flags);
1257
1258 if (!u3d->clock_gating) {
1259 clk_enable(u3d->clk);
1260 if (pdata->phy_init)
1261 pdata->phy_init(u3d->phy_regs);
1262 }
1263
1264 /* hook up the driver ... */
1265 driver->driver.bus = NULL;
1266 u3d->driver = driver;
1267 u3d->gadget.dev.driver = &driver->driver;
1268
1269 u3d->ep0_dir = USB_DIR_OUT;
1270
1271 spin_unlock_irqrestore(&u3d->lock, flags);
1272
1273 u3d->vbus_valid_detect = 1;
1274
1275 return 0;
1276}
1277
1278static int mv_u3d_stop(struct usb_gadget *g,
1279 struct usb_gadget_driver *driver)
1280{
1281 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
1282 struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
1283 unsigned long flags;
1284
1285 u3d->vbus_valid_detect = 0;
1286 spin_lock_irqsave(&u3d->lock, flags);
1287
1288 /* enable clock to access controller register */
1289 clk_enable(u3d->clk);
1290 if (pdata->phy_init)
1291 pdata->phy_init(u3d->phy_regs);
1292
1293 mv_u3d_controller_stop(u3d);
1294 /* stop all usb activities */
1295 u3d->gadget.speed = USB_SPEED_UNKNOWN;
1296 mv_u3d_stop_activity(u3d, driver);
1297 mv_u3d_disable(u3d);
1298
1299 if (pdata->phy_deinit)
1300 pdata->phy_deinit(u3d->phy_regs);
1301 clk_disable(u3d->clk);
1302
1303 spin_unlock_irqrestore(&u3d->lock, flags);
1304
1305 u3d->gadget.dev.driver = NULL;
1306 u3d->driver = NULL;
1307
1308 return 0;
1309}
1310
1311/* device controller usb_gadget_ops structure */
1312static const struct usb_gadget_ops mv_u3d_ops = {
1313 /* notify controller that VBUS is powered or not */
1314 .vbus_session = mv_u3d_vbus_session,
1315
1316 /* constrain controller's VBUS power usage */
1317 .vbus_draw = mv_u3d_vbus_draw,
1318
1319 .pullup = mv_u3d_pullup,
1320 .udc_start = mv_u3d_start,
1321 .udc_stop = mv_u3d_stop,
1322};
1323
1324static int mv_u3d_eps_init(struct mv_u3d *u3d)
1325{
1326 struct mv_u3d_ep *ep;
1327 char name[14];
1328 int i;
1329
1330 /* initialize ep0, ep0 in/out use eps[1] */
1331 ep = &u3d->eps[1];
1332 ep->u3d = u3d;
1333 strncpy(ep->name, "ep0", sizeof(ep->name));
1334 ep->ep.name = ep->name;
1335 ep->ep.ops = &mv_u3d_ep_ops;
1336 ep->wedge = 0;
1337 ep->ep.maxpacket = MV_U3D_EP0_MAX_PKT_SIZE;
1338 ep->ep_num = 0;
1339 ep->ep.desc = &mv_u3d_ep0_desc;
1340 INIT_LIST_HEAD(&ep->queue);
1341 INIT_LIST_HEAD(&ep->req_list);
1342 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1343
1344 /* add ep0 ep_context */
1345 ep->ep_context = &u3d->ep_context[1];
1346
1347 /* initialize other endpoints */
1348 for (i = 2; i < u3d->max_eps * 2; i++) {
1349 ep = &u3d->eps[i];
1350 if (i & 1) {
1351 snprintf(name, sizeof(name), "ep%din", i >> 1);
1352 ep->direction = MV_U3D_EP_DIR_IN;
1353 } else {
1354 snprintf(name, sizeof(name), "ep%dout", i >> 1);
1355 ep->direction = MV_U3D_EP_DIR_OUT;
1356 }
1357 ep->u3d = u3d;
1358 strncpy(ep->name, name, sizeof(ep->name));
1359 ep->ep.name = ep->name;
1360
1361 ep->ep.ops = &mv_u3d_ep_ops;
1362 ep->ep.maxpacket = (unsigned short) ~0;
1363 ep->ep_num = i / 2;
1364
1365 INIT_LIST_HEAD(&ep->queue);
1366 list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
1367
1368 INIT_LIST_HEAD(&ep->req_list);
1369 spin_lock_init(&ep->req_lock);
1370 ep->ep_context = &u3d->ep_context[i];
1371 }
1372
1373 return 0;
1374}
1375
1376/* delete all endpoint requests, called with spinlock held */
1377static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
1378{
1379 /* endpoint fifo flush */
1380 mv_u3d_ep_fifo_flush(&ep->ep);
1381
1382 while (!list_empty(&ep->queue)) {
1383 struct mv_u3d_req *req = NULL;
1384 req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
1385 mv_u3d_done(ep, req, status);
1386 }
1387}
1388
1389/* stop all USB activities */
1390static
1391void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
1392{
1393 struct mv_u3d_ep *ep;
1394
1395 mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
1396
1397 list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
1398 mv_u3d_nuke(ep, -ESHUTDOWN);
1399 }
1400
1401 /* report disconnect; the driver is already quiesced */
1402 if (driver) {
1403 spin_unlock(&u3d->lock);
1404 driver->disconnect(&u3d->gadget);
1405 spin_lock(&u3d->lock);
1406 }
1407}
1408
1409static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
1410{
1411 /* Increment the error count */
1412 u3d->errors++;
1413 dev_err(u3d->dev, "%s\n", __func__);
1414}
1415
1416static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
1417{
1418 u32 linkchange;
1419
1420 linkchange = ioread32(&u3d->vuc_regs->linkchange);
1421 iowrite32(linkchange, &u3d->vuc_regs->linkchange);
1422
1423 dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
1424
1425 if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
1426 dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
1427 ioread32(&u3d->vuc_regs->ltssmstate));
1428
1429 u3d->usb_state = USB_STATE_DEFAULT;
1430 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
1431 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
1432
1433 /* set speed */
1434 u3d->gadget.speed = USB_SPEED_SUPER;
1435 }
1436
1437 if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
1438 dev_dbg(u3d->dev, "link suspend\n");
1439 u3d->resume_state = u3d->usb_state;
1440 u3d->usb_state = USB_STATE_SUSPENDED;
1441 }
1442
1443 if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
1444 dev_dbg(u3d->dev, "link resume\n");
1445 u3d->usb_state = u3d->resume_state;
1446 u3d->resume_state = 0;
1447 }
1448
1449 if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
1450 dev_dbg(u3d->dev, "warm reset\n");
1451 u3d->usb_state = USB_STATE_POWERED;
1452 }
1453
1454 if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
1455 dev_dbg(u3d->dev, "hot reset\n");
1456 u3d->usb_state = USB_STATE_DEFAULT;
1457 }
1458
1459 if (linkchange & MV_U3D_LINK_CHANGE_INACT)
1460 dev_dbg(u3d->dev, "inactive\n");
1461
1462 if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
1463 dev_dbg(u3d->dev, "ss.disabled\n");
1464
1465 if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
1466 dev_dbg(u3d->dev, "vbus invalid\n");
1467 u3d->usb_state = USB_STATE_ATTACHED;
1468 u3d->vbus_valid_detect = 1;
1469 /* if external vbus detect is not supported,
1470 * we handle it here.
1471 */
1472 if (!u3d->vbus) {
1473 spin_unlock(&u3d->lock);
1474 mv_u3d_vbus_session(&u3d->gadget, 0);
1475 spin_lock(&u3d->lock);
1476 }
1477 }
1478}
1479
1480static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
1481 struct usb_ctrlrequest *setup)
1482{
1483 u32 tmp;
1484
1485 if (u3d->usb_state != USB_STATE_DEFAULT) {
1486 dev_err(u3d->dev,
1487 "%s, cannot setaddr in this state (%d)\n",
1488 __func__, u3d->usb_state);
1489 goto err;
1490 }
1491
1492 u3d->dev_addr = (u8)setup->wValue;
1493
1494 dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
1495
1496 if (u3d->dev_addr > 127) {
1497 dev_err(u3d->dev,
1498 "%s, u3d address is wrong (out of range)\n", __func__);
1499 u3d->dev_addr = 0;
1500 goto err;
1501 }
1502
1503 /* update usb state */
1504 u3d->usb_state = USB_STATE_ADDRESS;
1505
1506 /* set the new address */
1507 tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
1508 tmp &= ~0x7F;
1509 tmp |= (u32)u3d->dev_addr;
1510 iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
1511
1512 return;
1513err:
1514 mv_u3d_ep0_stall(u3d);
1515}
1516
1517static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
1518{
1519 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
1520 if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
1521 return 1;
1522
1523 return 0;
1524}
1525
1526static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
1527 struct usb_ctrlrequest *setup)
1528{
1529 bool delegate = false;
1530
1531 mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
1532
1533 dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1534 setup->bRequestType, setup->bRequest,
1535 setup->wValue, setup->wIndex, setup->wLength);
1536
1537 /* We process some stardard setup requests here */
1538 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1539 switch (setup->bRequest) {
1540 case USB_REQ_GET_STATUS:
1541 delegate = true;
1542 break;
1543
1544 case USB_REQ_SET_ADDRESS:
1545 mv_u3d_ch9setaddress(u3d, setup);
1546 break;
1547
1548 case USB_REQ_CLEAR_FEATURE:
1549 delegate = true;
1550 break;
1551
1552 case USB_REQ_SET_FEATURE:
1553 delegate = true;
1554 break;
1555
1556 default:
1557 delegate = true;
1558 }
1559 } else
1560 delegate = true;
1561
1562 /* delegate USB standard requests to the gadget driver */
1563 if (delegate == true) {
1564 /* USB requests handled by gadget */
1565 if (setup->wLength) {
1566 /* DATA phase from gadget, STATUS phase from u3d */
1567 u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1568 ? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
1569 spin_unlock(&u3d->lock);
1570 if (u3d->driver->setup(&u3d->gadget,
1571 &u3d->local_setup_buff) < 0) {
1572 dev_err(u3d->dev, "setup error!\n");
1573 mv_u3d_ep0_stall(u3d);
1574 }
1575 spin_lock(&u3d->lock);
1576 } else {
1577 /* no DATA phase, STATUS phase from gadget */
1578 u3d->ep0_dir = MV_U3D_EP_DIR_IN;
1579 u3d->ep0_state = MV_U3D_STATUS_STAGE;
1580 spin_unlock(&u3d->lock);
1581 if (u3d->driver->setup(&u3d->gadget,
1582 &u3d->local_setup_buff) < 0)
1583 mv_u3d_ep0_stall(u3d);
1584 spin_lock(&u3d->lock);
1585 }
1586
1587 if (mv_u3d_is_set_configuration(setup)) {
1588 dev_dbg(u3d->dev, "u3d configured\n");
1589 u3d->usb_state = USB_STATE_CONFIGURED;
1590 }
1591 }
1592}
1593
1594static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
1595{
1596 struct mv_u3d_ep_context *epcontext;
1597
1598 epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
1599
1600 /* Copy the setup packet to local buffer */
1601 memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
1602}
1603
1604static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
1605{
1606 u32 tmp, i;
1607 /* Process all Setup packet received interrupts */
1608 tmp = ioread32(&u3d->vuc_regs->setuplock);
1609 if (tmp) {
1610 for (i = 0; i < u3d->max_eps; i++) {
1611 if (tmp & (1 << i)) {
1612 mv_u3d_get_setup_data(u3d, i,
1613 (u8 *)(&u3d->local_setup_buff));
1614 mv_u3d_handle_setup_packet(u3d, i,
1615 &u3d->local_setup_buff);
1616 }
1617 }
1618 }
1619
1620 iowrite32(tmp, &u3d->vuc_regs->setuplock);
1621}
1622
1623static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
1624{
1625 u32 tmp, bit_pos;
1626 int i, ep_num = 0, direction = 0;
1627 struct mv_u3d_ep *curr_ep;
1628 struct mv_u3d_req *curr_req, *temp_req;
1629 int status;
1630
1631 tmp = ioread32(&u3d->vuc_regs->endcomplete);
1632
1633 dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
1634 if (!tmp)
1635 return;
1636 iowrite32(tmp, &u3d->vuc_regs->endcomplete);
1637
1638 for (i = 0; i < u3d->max_eps * 2; i++) {
1639 ep_num = i >> 1;
1640 direction = i % 2;
1641
1642 bit_pos = 1 << (ep_num + 16 * direction);
1643
1644 if (!(bit_pos & tmp))
1645 continue;
1646
1647 if (i == 0)
1648 curr_ep = &u3d->eps[1];
1649 else
1650 curr_ep = &u3d->eps[i];
1651
1652 /* remove req out of ep request list after completion */
1653 dev_dbg(u3d->dev, "tr comp: check req_list\n");
1654 spin_lock(&curr_ep->req_lock);
1655 if (!list_empty(&curr_ep->req_list)) {
1656 struct mv_u3d_req *req;
1657 req = list_entry(curr_ep->req_list.next,
1658 struct mv_u3d_req, list);
1659 list_del_init(&req->list);
1660 curr_ep->processing = 0;
1661 }
1662 spin_unlock(&curr_ep->req_lock);
1663
1664 /* process the req queue until an uncomplete request */
1665 list_for_each_entry_safe(curr_req, temp_req,
1666 &curr_ep->queue, queue) {
1667 status = mv_u3d_process_ep_req(u3d, i, curr_req);
1668 if (status)
1669 break;
1670 /* write back status to req */
1671 curr_req->req.status = status;
1672
1673 /* ep0 request completion */
1674 if (ep_num == 0) {
1675 mv_u3d_done(curr_ep, curr_req, 0);
1676 break;
1677 } else {
1678 mv_u3d_done(curr_ep, curr_req, status);
1679 }
1680 }
1681
1682 dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
1683 mv_u3d_start_queue(curr_ep);
1684 }
1685}
1686
1687static irqreturn_t mv_u3d_irq(int irq, void *dev)
1688{
1689 struct mv_u3d *u3d = (struct mv_u3d *)dev;
1690 u32 status, intr;
1691 u32 bridgesetting;
1692 u32 trbunderrun;
1693
1694 spin_lock(&u3d->lock);
1695
1696 status = ioread32(&u3d->vuc_regs->intrcause);
1697 intr = ioread32(&u3d->vuc_regs->intrenable);
1698 status &= intr;
1699
1700 if (status == 0) {
1701 spin_unlock(&u3d->lock);
1702 dev_err(u3d->dev, "irq error!\n");
1703 return IRQ_NONE;
1704 }
1705
1706 if (status & MV_U3D_USBINT_VBUS_VALID) {
1707 bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
1708 if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
1709 /* write vbus valid bit of bridge setting to clear */
1710 bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
1711 iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
1712 dev_dbg(u3d->dev, "vbus valid\n");
1713
1714 u3d->usb_state = USB_STATE_POWERED;
1715 u3d->vbus_valid_detect = 0;
1716 /* if external vbus detect is not supported,
1717 * we handle it here.
1718 */
1719 if (!u3d->vbus) {
1720 spin_unlock(&u3d->lock);
1721 mv_u3d_vbus_session(&u3d->gadget, 1);
1722 spin_lock(&u3d->lock);
1723 }
1724 } else
1725 dev_err(u3d->dev, "vbus bit is not set\n");
1726 }
1727
1728 /* RX data is already in the 16KB FIFO.*/
1729 if (status & MV_U3D_USBINT_UNDER_RUN) {
1730 trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
1731 dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
1732 iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
1733 mv_u3d_irq_process_error(u3d);
1734 }
1735
1736 if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
1737 /* write one to clear */
1738 iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
1739 | MV_U3D_USBINT_TXDESC_ERR),
1740 &u3d->vuc_regs->intrcause);
1741 dev_err(u3d->dev, "desc err 0x%x\n", status);
1742 mv_u3d_irq_process_error(u3d);
1743 }
1744
1745 if (status & MV_U3D_USBINT_LINK_CHG)
1746 mv_u3d_irq_process_link_change(u3d);
1747
1748 if (status & MV_U3D_USBINT_TX_COMPLETE)
1749 mv_u3d_irq_process_tr_complete(u3d);
1750
1751 if (status & MV_U3D_USBINT_RX_COMPLETE)
1752 mv_u3d_irq_process_tr_complete(u3d);
1753
1754 if (status & MV_U3D_USBINT_SETUP)
1755 mv_u3d_irq_process_setup(u3d);
1756
1757 spin_unlock(&u3d->lock);
1758 return IRQ_HANDLED;
1759}
1760
1761static void mv_u3d_gadget_release(struct device *dev)
1762{
1763 dev_dbg(dev, "%s\n", __func__);
1764}
1765
1766static __devexit int mv_u3d_remove(struct platform_device *dev)
1767{
1768 struct mv_u3d *u3d = platform_get_drvdata(dev);
1769
1770 BUG_ON(u3d == NULL);
1771
1772 usb_del_gadget_udc(&u3d->gadget);
1773
1774 /* free memory allocated in probe */
1775 if (u3d->trb_pool)
1776 dma_pool_destroy(u3d->trb_pool);
1777
1778 if (u3d->ep_context)
1779 dma_free_coherent(&dev->dev, u3d->ep_context_size,
1780 u3d->ep_context, u3d->ep_context_dma);
1781
1782 kfree(u3d->eps);
1783
1784 if (u3d->irq)
1785 free_irq(u3d->irq, &dev->dev);
1786
1787 if (u3d->cap_regs)
1788 iounmap(u3d->cap_regs);
1789 u3d->cap_regs = NULL;
1790
1791 kfree(u3d->status_req);
1792
1793 clk_put(u3d->clk);
1794
1795 device_unregister(&u3d->gadget.dev);
1796
1797 platform_set_drvdata(dev, NULL);
1798
1799 kfree(u3d);
1800
1801 return 0;
1802}
1803
1804static int mv_u3d_probe(struct platform_device *dev)
1805{
1806 struct mv_u3d *u3d = NULL;
1807 struct mv_usb_platform_data *pdata = dev->dev.platform_data;
1808 int retval = 0;
1809 struct resource *r;
1810 size_t size;
1811
1812 if (!dev->dev.platform_data) {
1813 dev_err(&dev->dev, "missing platform_data\n");
1814 retval = -ENODEV;
1815 goto err_pdata;
1816 }
1817
1818 u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
1819 if (!u3d) {
1820 dev_err(&dev->dev, "failed to allocate memory for u3d\n");
1821 retval = -ENOMEM;
1822 goto err_alloc_private;
1823 }
1824
1825 spin_lock_init(&u3d->lock);
1826
1827 platform_set_drvdata(dev, u3d);
1828
1829 u3d->dev = &dev->dev;
1830 u3d->vbus = pdata->vbus;
1831
1832 u3d->clk = clk_get(&dev->dev, pdata->clkname[0]);
1833 if (IS_ERR(u3d->clk)) {
1834 retval = PTR_ERR(u3d->clk);
1835 goto err_get_clk;
1836 }
1837
1838 r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
1839 if (!r) {
1840 dev_err(&dev->dev, "no I/O memory resource defined\n");
1841 retval = -ENODEV;
1842 goto err_get_cap_regs;
1843 }
1844
1845 u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
1846 ioremap(r->start, resource_size(r));
1847 if (!u3d->cap_regs) {
1848 dev_err(&dev->dev, "failed to map I/O memory\n");
1849 retval = -EBUSY;
1850 goto err_map_cap_regs;
1851 } else {
1852 dev_dbg(&dev->dev, "cap_regs address: 0x%x/0x%x\n",
1853 (unsigned int)r->start, (unsigned int)u3d->cap_regs);
1854 }
1855
1856 /* we will access controller register, so enable the u3d controller */
1857 clk_enable(u3d->clk);
1858
1859 if (pdata->phy_init) {
1860 retval = pdata->phy_init(u3d->phy_regs);
1861 if (retval) {
1862 dev_err(&dev->dev, "init phy error %d\n", retval);
1863 goto err_u3d_enable;
1864 }
1865 }
1866
1867 u3d->op_regs = (struct mv_u3d_op_regs __iomem *)((u32)u3d->cap_regs
1868 + MV_U3D_USB3_OP_REGS_OFFSET);
1869
1870 u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)((u32)u3d->cap_regs
1871 + ioread32(&u3d->cap_regs->vuoff));
1872
1873 u3d->max_eps = 16;
1874
1875 /*
1876 * some platform will use usb to download image, it may not disconnect
1877 * usb gadget before loading kernel. So first stop u3d here.
1878 */
1879 mv_u3d_controller_stop(u3d);
1880 iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
1881
1882 if (pdata->phy_deinit)
1883 pdata->phy_deinit(u3d->phy_regs);
1884 clk_disable(u3d->clk);
1885
1886 size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
1887 size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
1888 & ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
1889 u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
1890 &u3d->ep_context_dma, GFP_KERNEL);
1891 if (!u3d->ep_context) {
1892 dev_err(&dev->dev, "allocate ep context memory failed\n");
1893 retval = -ENOMEM;
1894 goto err_alloc_ep_context;
1895 }
1896 u3d->ep_context_size = size;
1897
1898 /* create TRB dma_pool resource */
1899 u3d->trb_pool = dma_pool_create("u3d_trb",
1900 &dev->dev,
1901 sizeof(struct mv_u3d_trb_hw),
1902 MV_U3D_TRB_ALIGNMENT,
1903 MV_U3D_DMA_BOUNDARY);
1904
1905 if (!u3d->trb_pool) {
1906 retval = -ENOMEM;
1907 goto err_alloc_trb_pool;
1908 }
1909
1910 size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
1911 u3d->eps = kzalloc(size, GFP_KERNEL);
1912 if (!u3d->eps) {
1913 dev_err(&dev->dev, "allocate ep memory failed\n");
1914 retval = -ENOMEM;
1915 goto err_alloc_eps;
1916 }
1917
1918 /* initialize ep0 status request structure */
1919 u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
1920 if (!u3d->status_req) {
1921 dev_err(&dev->dev, "allocate status_req memory failed\n");
1922 retval = -ENOMEM;
1923 goto err_alloc_status_req;
1924 }
1925 INIT_LIST_HEAD(&u3d->status_req->queue);
1926
1927 /* allocate a small amount of memory to get valid address */
1928 u3d->status_req->req.buf = (char *)u3d->status_req
1929 + sizeof(struct mv_u3d_req);
1930 u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
1931
1932 u3d->resume_state = USB_STATE_NOTATTACHED;
1933 u3d->usb_state = USB_STATE_ATTACHED;
1934 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
1935 u3d->remote_wakeup = 0;
1936
1937 r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
1938 if (!r) {
1939 dev_err(&dev->dev, "no IRQ resource defined\n");
1940 retval = -ENODEV;
1941 goto err_get_irq;
1942 }
1943 u3d->irq = r->start;
1944 if (request_irq(u3d->irq, mv_u3d_irq,
1945 IRQF_DISABLED | IRQF_SHARED, driver_name, u3d)) {
1946 u3d->irq = 0;
1947 dev_err(&dev->dev, "Request irq %d for u3d failed\n",
1948 u3d->irq);
1949 retval = -ENODEV;
1950 goto err_request_irq;
1951 }
1952
1953 /* initialize gadget structure */
1954 u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
1955 u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */
1956 INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */
1957 u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
1958
1959 /* the "gadget" abstracts/virtualizes the controller */
1960 dev_set_name(&u3d->gadget.dev, "gadget");
1961 u3d->gadget.dev.parent = &dev->dev;
1962 u3d->gadget.dev.dma_mask = dev->dev.dma_mask;
1963 u3d->gadget.dev.release = mv_u3d_gadget_release;
1964 u3d->gadget.name = driver_name; /* gadget name */
1965
1966 retval = device_register(&u3d->gadget.dev);
1967 if (retval)
1968 goto err_register_gadget_device;
1969
1970 mv_u3d_eps_init(u3d);
1971
1972 /* external vbus detection */
1973 if (u3d->vbus) {
1974 u3d->clock_gating = 1;
1975 dev_err(&dev->dev, "external vbus detection\n");
1976 }
1977
1978 if (!u3d->clock_gating)
1979 u3d->vbus_active = 1;
1980
1981 /* enable usb3 controller vbus detection */
1982 u3d->vbus_valid_detect = 1;
1983
1984 retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
1985 if (retval)
1986 goto err_unregister;
1987
1988 dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
1989 u3d->clock_gating ? "with" : "without");
1990
1991 return 0;
1992
1993err_unregister:
1994 device_unregister(&u3d->gadget.dev);
1995err_register_gadget_device:
1996 free_irq(u3d->irq, &dev->dev);
1997err_request_irq:
1998err_get_irq:
1999 kfree(u3d->status_req);
2000err_alloc_status_req:
2001 kfree(u3d->eps);
2002err_alloc_eps:
2003 dma_pool_destroy(u3d->trb_pool);
2004err_alloc_trb_pool:
2005 dma_free_coherent(&dev->dev, u3d->ep_context_size,
2006 u3d->ep_context, u3d->ep_context_dma);
2007err_alloc_ep_context:
2008 if (pdata->phy_deinit)
2009 pdata->phy_deinit(u3d->phy_regs);
2010 clk_disable(u3d->clk);
2011err_u3d_enable:
2012 iounmap(u3d->cap_regs);
2013err_map_cap_regs:
2014err_get_cap_regs:
2015err_get_clk:
2016 clk_put(u3d->clk);
2017 platform_set_drvdata(dev, NULL);
2018 kfree(u3d);
2019err_alloc_private:
2020err_pdata:
2021 return retval;
2022}
2023
2024#ifdef CONFIG_PM
2025static int mv_u3d_suspend(struct device *dev)
2026{
2027 struct mv_u3d *u3d = dev_get_drvdata(dev);
2028
2029 /*
2030 * only cable is unplugged, usb can suspend.
2031 * So do not care about clock_gating == 1, it is handled by
2032 * vbus session.
2033 */
2034 if (!u3d->clock_gating) {
2035 mv_u3d_controller_stop(u3d);
2036
2037 spin_lock_irq(&u3d->lock);
2038 /* stop all usb activities */
2039 mv_u3d_stop_activity(u3d, u3d->driver);
2040 spin_unlock_irq(&u3d->lock);
2041
2042 mv_u3d_disable(u3d);
2043 }
2044
2045 return 0;
2046}
2047
2048static int mv_u3d_resume(struct device *dev)
2049{
2050 struct mv_u3d *u3d = dev_get_drvdata(dev);
2051 int retval;
2052
2053 if (!u3d->clock_gating) {
2054 retval = mv_u3d_enable(u3d);
2055 if (retval)
2056 return retval;
2057
2058 if (u3d->driver && u3d->softconnect) {
2059 mv_u3d_controller_reset(u3d);
2060 mv_u3d_ep0_reset(u3d);
2061 mv_u3d_controller_start(u3d);
2062 }
2063 }
2064
2065 return 0;
2066}
2067
2068SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
2069#endif
2070
2071static void mv_u3d_shutdown(struct platform_device *dev)
2072{
2073 struct mv_u3d *u3d = dev_get_drvdata(&dev->dev);
2074 u32 tmp;
2075
2076 tmp = ioread32(&u3d->op_regs->usbcmd);
2077 tmp &= ~MV_U3D_CMD_RUN_STOP;
2078 iowrite32(tmp, &u3d->op_regs->usbcmd);
2079}
2080
2081static struct platform_driver mv_u3d_driver = {
2082 .probe = mv_u3d_probe,
2083 .remove = __exit_p(mv_u3d_remove),
2084 .shutdown = mv_u3d_shutdown,
2085 .driver = {
2086 .owner = THIS_MODULE,
2087 .name = "mv-u3d",
2088#ifdef CONFIG_PM
2089 .pm = &mv_u3d_pm_ops,
2090#endif
2091 },
2092};
2093
2094module_platform_driver(mv_u3d_driver);
2095MODULE_ALIAS("platform:mv-u3d");
2096MODULE_DESCRIPTION(DRIVER_DESC);
2097MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>");
2098MODULE_LICENSE("GPL");