blob: 394b4ac8616173ba0f2a60a870ae70ff98dd40d0 [file] [log] [blame]
Greg Kroah-Hartman5fd54ac2017-11-03 11:28:30 +01001// SPDX-License-Identifier: GPL-2.0
Felipe Balbi550a7372008-07-24 12:27:36 +03002/*
3 * MUSB OTG driver host support
4 *
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
Sergei Shtylyovc7bbc052009-03-26 18:26:40 -07008 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
Felipe Balbi550a7372008-07-24 12:27:36 +03009 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
Felipe Balbi550a7372008-07-24 12:27:36 +030017#include <linux/list.h>
Maulik Mankad496dda72010-09-24 13:44:06 +030018#include <linux/dma-mapping.h>
Felipe Balbi550a7372008-07-24 12:27:36 +030019
20#include "musb_core.h"
21#include "musb_host.h"
Bin Liu19ca6822016-06-30 12:12:26 -050022#include "musb_trace.h"
Felipe Balbi550a7372008-07-24 12:27:36 +030023
Felipe Balbi550a7372008-07-24 12:27:36 +030024/* MUSB HOST status 22-mar-2006
25 *
26 * - There's still lots of partial code duplication for fault paths, so
27 * they aren't handled as consistently as they need to be.
28 *
29 * - PIO mostly behaved when last tested.
30 * + including ep0, with all usbtest cases 9, 10
31 * + usbtest 14 (ep0out) doesn't seem to run at all
32 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
33 * configurations, but otherwise double buffering passes basic tests.
34 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
35 *
36 * - DMA (CPPI) ... partially behaves, not currently recommended
37 * + about 1/15 the speed of typical EHCI implementations (PCI)
38 * + RX, all too often reqpkt seems to misbehave after tx
39 * + TX, no known issues (other than evident silicon issue)
40 *
41 * - DMA (Mentor/OMAP) ...has at least toggle update problems
42 *
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -080043 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
44 * starvation ... nothing yet for TX, interrupt, or bulk.
Felipe Balbi550a7372008-07-24 12:27:36 +030045 *
46 * - Not tested with HNP, but some SRP paths seem to behave.
47 *
48 * NOTE 24-August-2006:
49 *
50 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
51 * extra endpoint for periodic use enabling hub + keybd + mouse. That
52 * mostly works, except that with "usbnet" it's easy to trigger cases
53 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
54 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
55 * although ARP RX wins. (That test was done with a full speed link.)
56 */
57
58
59/*
60 * NOTE on endpoint usage:
61 *
62 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
63 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
Felipe Balbi550a7372008-07-24 12:27:36 +030064 * (Yes, bulk _could_ use more of the endpoints than that, and would even
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -080065 * benefit from it.)
Felipe Balbi550a7372008-07-24 12:27:36 +030066 *
67 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
68 * So far that scheduling is both dumb and optimistic: the endpoint will be
69 * "claimed" until its software queue is no longer refilled. No multiplexing
70 * of transfers between endpoints, or anything clever.
71 */
72
Daniel Mack74c2e932013-04-10 21:55:45 +020073struct musb *hcd_to_musb(struct usb_hcd *hcd)
74{
75 return *(struct musb **) hcd->hcd_priv;
76}
77
Felipe Balbi550a7372008-07-24 12:27:36 +030078
79static void musb_ep_program(struct musb *musb, u8 epnum,
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -070080 struct urb *urb, int is_out,
81 u8 *buf, u32 offset, u32 len);
Felipe Balbi550a7372008-07-24 12:27:36 +030082
83/*
84 * Clear TX fifo. Needed to avoid BABBLE errors.
85 */
David Brownellc767c1c2008-09-11 11:53:23 +030086static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
Felipe Balbi550a7372008-07-24 12:27:36 +030087{
Felipe Balbi5c8a86e2011-05-11 12:44:08 +030088 struct musb *musb = ep->musb;
Felipe Balbi550a7372008-07-24 12:27:36 +030089 void __iomem *epio = ep->regs;
90 u16 csr;
91 int retries = 1000;
92
93 csr = musb_readw(epio, MUSB_TXCSR);
94 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
Daniel Mack2ccc6d32014-05-26 14:52:37 +020095 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
Felipe Balbi550a7372008-07-24 12:27:36 +030096 musb_writew(epio, MUSB_TXCSR, csr);
97 csr = musb_readw(epio, MUSB_TXCSR);
Bin Liu68fe05e2015-11-06 12:08:56 -060098
99 /*
100 * FIXME: sometimes the tx fifo flush failed, it has been
101 * observed during device disconnect on AM335x.
102 *
103 * To reproduce the issue, ensure tx urb(s) are queued when
104 * unplug the usb device which is connected to AM335x usb
105 * host port.
106 *
107 * I found using a usb-ethernet device and running iperf
108 * (client on AM335x) has very high chance to trigger it.
109 *
Bin Liub99d3652016-06-30 12:12:22 -0500110 * Better to turn on musb_dbg() in musb_cleanup_urb() with
Bin Liu68fe05e2015-11-06 12:08:56 -0600111 * CPPI enabled to see the issue when aborting the tx channel.
112 */
113 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
David Brownellbb1c9ef2008-11-24 13:06:50 +0200114 "Could not flush host TX%d fifo: csr: %04x\n",
115 ep->epnum, csr))
Felipe Balbi550a7372008-07-24 12:27:36 +0300116 return;
Bin Liu45d73862017-07-25 09:31:34 -0500117 mdelay(1);
Felipe Balbi550a7372008-07-24 12:27:36 +0300118 }
119}
120
David Brownell78322c12009-03-26 17:38:30 -0700121static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
122{
123 void __iomem *epio = ep->regs;
124 u16 csr;
125 int retries = 5;
126
127 /* scrub any data left in the fifo */
128 do {
129 csr = musb_readw(epio, MUSB_TXCSR);
130 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
131 break;
132 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
133 csr = musb_readw(epio, MUSB_TXCSR);
134 udelay(10);
135 } while (--retries);
136
137 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
138 ep->epnum, csr);
139
140 /* and reset for the next transfer */
141 musb_writew(epio, MUSB_TXCSR, 0);
142}
143
Felipe Balbi550a7372008-07-24 12:27:36 +0300144/*
145 * Start transmit. Caller is responsible for locking shared resources.
146 * musb must be locked.
147 */
148static inline void musb_h_tx_start(struct musb_hw_ep *ep)
149{
150 u16 txcsr;
151
152 /* NOTE: no locks here; caller should lock and select EP */
153 if (ep->epnum) {
154 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
155 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
156 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
157 } else {
158 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
159 musb_writew(ep->regs, MUSB_CSR0, txcsr);
160 }
161
162}
163
Sergei Shtylyovc7bbc052009-03-26 18:26:40 -0700164static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
Felipe Balbi550a7372008-07-24 12:27:36 +0300165{
166 u16 txcsr;
167
168 /* NOTE: no locks here; caller should lock and select EP */
169 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
170 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -0700171 if (is_cppi_enabled(ep->musb))
Sergei Shtylyovc7bbc052009-03-26 18:26:40 -0700172 txcsr |= MUSB_TXCSR_DMAMODE;
Felipe Balbi550a7372008-07-24 12:27:36 +0300173 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
174}
175
Sergei Shtylyov3e5c6dc2009-03-27 12:55:16 -0700176static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
177{
178 if (is_in != 0 || ep->is_shared_fifo)
179 ep->in_qh = qh;
180 if (is_in == 0 || ep->is_shared_fifo)
181 ep->out_qh = qh;
182}
183
184static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
185{
186 return is_in ? ep->in_qh : ep->out_qh;
187}
188
Felipe Balbi550a7372008-07-24 12:27:36 +0300189/*
190 * Start the URB at the front of an endpoint's queue
191 * end must be claimed from the caller.
192 *
193 * Context: controller locked, irqs blocked
194 */
195static void
196musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
197{
Felipe Balbi550a7372008-07-24 12:27:36 +0300198 u32 len;
Felipe Balbi550a7372008-07-24 12:27:36 +0300199 void __iomem *mbase = musb->mregs;
200 struct urb *urb = next_urb(qh);
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700201 void *buf = urb->transfer_buffer;
202 u32 offset = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +0300203 struct musb_hw_ep *hw_ep = qh->hw_ep;
Felipe Balbi550a7372008-07-24 12:27:36 +0300204 int epnum = hw_ep->epnum;
205
206 /* initialize software qh state */
207 qh->offset = 0;
208 qh->segsize = 0;
209
210 /* gather right source of data */
211 switch (qh->type) {
212 case USB_ENDPOINT_XFER_CONTROL:
213 /* control transfers always start with SETUP */
214 is_in = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +0300215 musb->ep0_stage = MUSB_EP0_START;
216 buf = urb->setup_packet;
217 len = 8;
218 break;
219 case USB_ENDPOINT_XFER_ISOC:
220 qh->iso_idx = 0;
221 qh->frame = 0;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700222 offset = urb->iso_frame_desc[0].offset;
Felipe Balbi550a7372008-07-24 12:27:36 +0300223 len = urb->iso_frame_desc[0].length;
224 break;
225 default: /* bulk, interrupt */
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -0800226 /* actual_length may be nonzero on retry paths */
227 buf = urb->transfer_buffer + urb->actual_length;
228 len = urb->transfer_buffer_length - urb->actual_length;
Felipe Balbi550a7372008-07-24 12:27:36 +0300229 }
230
Bin Liu19ca6822016-06-30 12:12:26 -0500231 trace_musb_urb_start(musb, urb);
Felipe Balbi550a7372008-07-24 12:27:36 +0300232
233 /* Configure endpoint */
Sergei Shtylyov3e5c6dc2009-03-27 12:55:16 -0700234 musb_ep_set_qh(hw_ep, is_in, qh);
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700235 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
Felipe Balbi550a7372008-07-24 12:27:36 +0300236
237 /* transmit may have more work: start it when it is time */
238 if (is_in)
239 return;
240
241 /* determine if the time is right for a periodic transfer */
242 switch (qh->type) {
243 case USB_ENDPOINT_XFER_ISOC:
244 case USB_ENDPOINT_XFER_INT:
Bin Liub99d3652016-06-30 12:12:22 -0500245 musb_dbg(musb, "check whether there's still time for periodic Tx");
Felipe Balbi550a7372008-07-24 12:27:36 +0300246 /* FIXME this doesn't implement that scheduling policy ...
247 * or handle framecounter wrapping
248 */
Alan Stern8a1ea512013-05-29 13:21:01 -0400249 if (1) { /* Always assume URB_ISO_ASAP */
Felipe Balbi550a7372008-07-24 12:27:36 +0300250 /* REVISIT the SOF irq handler shouldn't duplicate
251 * this code; and we don't init urb->start_frame...
252 */
253 qh->frame = 0;
254 goto start;
255 } else {
256 qh->frame = urb->start_frame;
257 /* enable SOF interrupt so we can count down */
Bin Liub99d3652016-06-30 12:12:22 -0500258 musb_dbg(musb, "SOF for %d", epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +0300259#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
260 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
261#endif
262 }
263 break;
264 default:
265start:
Bin Liub99d3652016-06-30 12:12:22 -0500266 musb_dbg(musb, "Start TX%d %s", epnum,
Felipe Balbi550a7372008-07-24 12:27:36 +0300267 hw_ep->tx_channel ? "dma" : "pio");
268
269 if (!hw_ep->tx_channel)
270 musb_h_tx_start(hw_ep);
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -0700271 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
Sergei Shtylyovc7bbc052009-03-26 18:26:40 -0700272 musb_h_tx_dma_start(hw_ep);
Felipe Balbi550a7372008-07-24 12:27:36 +0300273 }
274}
275
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -0700276/* Context: caller owns controller lock, IRQs are blocked */
277static void musb_giveback(struct musb *musb, struct urb *urb, int status)
Felipe Balbi550a7372008-07-24 12:27:36 +0300278__releases(musb->lock)
279__acquires(musb->lock)
280{
Bin Liu19ca6822016-06-30 12:12:26 -0500281 trace_musb_urb_gb(musb, urb);
Felipe Balbi550a7372008-07-24 12:27:36 +0300282
Daniel Mack8b125df2013-04-10 21:55:50 +0200283 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
Felipe Balbi550a7372008-07-24 12:27:36 +0300284 spin_unlock(&musb->lock);
Daniel Mack8b125df2013-04-10 21:55:50 +0200285 usb_hcd_giveback_urb(musb->hcd, urb, status);
Felipe Balbi550a7372008-07-24 12:27:36 +0300286 spin_lock(&musb->lock);
287}
288
Sergei Shtylyov846099a2009-03-27 12:54:21 -0700289/* For bulk/interrupt endpoints only */
290static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
291 struct urb *urb)
Felipe Balbi550a7372008-07-24 12:27:36 +0300292{
Sergei Shtylyov846099a2009-03-27 12:54:21 -0700293 void __iomem *epio = qh->hw_ep->regs;
Felipe Balbi550a7372008-07-24 12:27:36 +0300294 u16 csr;
Felipe Balbi550a7372008-07-24 12:27:36 +0300295
Sergei Shtylyov846099a2009-03-27 12:54:21 -0700296 /*
297 * FIXME: the current Mentor DMA code seems to have
Felipe Balbi550a7372008-07-24 12:27:36 +0300298 * problems getting toggle correct.
299 */
300
Sergei Shtylyov846099a2009-03-27 12:54:21 -0700301 if (is_in)
302 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
Felipe Balbi550a7372008-07-24 12:27:36 +0300303 else
Sergei Shtylyov846099a2009-03-27 12:54:21 -0700304 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
Felipe Balbi550a7372008-07-24 12:27:36 +0300305
Sergei Shtylyov846099a2009-03-27 12:54:21 -0700306 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
Felipe Balbi550a7372008-07-24 12:27:36 +0300307}
308
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -0700309/*
310 * Advance this hardware endpoint's queue, completing the specified URB and
311 * advancing to either the next URB queued to that qh, or else invalidating
312 * that qh and advancing to the next qh scheduled after the current one.
313 *
314 * Context: caller owns controller lock, IRQs are blocked
315 */
316static void musb_advance_schedule(struct musb *musb, struct urb *urb,
317 struct musb_hw_ep *hw_ep, int is_in)
Felipe Balbi550a7372008-07-24 12:27:36 +0300318{
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -0700319 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
Felipe Balbi550a7372008-07-24 12:27:36 +0300320 struct musb_hw_ep *ep = qh->hw_ep;
Felipe Balbi550a7372008-07-24 12:27:36 +0300321 int ready = qh->is_ready;
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -0700322 int status;
323
324 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
Felipe Balbi550a7372008-07-24 12:27:36 +0300325
Felipe Balbi550a7372008-07-24 12:27:36 +0300326 /* save toggle eagerly, for paranoia */
327 switch (qh->type) {
328 case USB_ENDPOINT_XFER_BULK:
329 case USB_ENDPOINT_XFER_INT:
Sergei Shtylyov846099a2009-03-27 12:54:21 -0700330 musb_save_toggle(qh, is_in, urb);
Felipe Balbi550a7372008-07-24 12:27:36 +0300331 break;
332 case USB_ENDPOINT_XFER_ISOC:
Sergei Shtylyov1fe975f2009-07-10 20:02:44 +0300333 if (status == 0 && urb->error_count)
Felipe Balbi550a7372008-07-24 12:27:36 +0300334 status = -EXDEV;
335 break;
336 }
337
Felipe Balbi550a7372008-07-24 12:27:36 +0300338 qh->is_ready = 0;
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -0700339 musb_giveback(musb, urb, status);
Felipe Balbi550a7372008-07-24 12:27:36 +0300340 qh->is_ready = ready;
341
342 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
343 * invalidate qh as soon as list_empty(&hep->urb_list)
344 */
345 if (list_empty(&qh->hep->urb_list)) {
346 struct list_head *head;
Ajay Kumar Gupta8c778db2012-06-21 17:18:12 +0530347 struct dma_controller *dma = musb->dma_controller;
Felipe Balbi550a7372008-07-24 12:27:36 +0300348
Ajay Kumar Gupta8c778db2012-06-21 17:18:12 +0530349 if (is_in) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300350 ep->rx_reinit = 1;
Ajay Kumar Gupta8c778db2012-06-21 17:18:12 +0530351 if (ep->rx_channel) {
352 dma->channel_release(ep->rx_channel);
353 ep->rx_channel = NULL;
354 }
355 } else {
Felipe Balbi550a7372008-07-24 12:27:36 +0300356 ep->tx_reinit = 1;
Ajay Kumar Gupta8c778db2012-06-21 17:18:12 +0530357 if (ep->tx_channel) {
358 dma->channel_release(ep->tx_channel);
359 ep->tx_channel = NULL;
360 }
361 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300362
Sergei Shtylyov3e5c6dc2009-03-27 12:55:16 -0700363 /* Clobber old pointers to this qh */
364 musb_ep_set_qh(ep, is_in, NULL);
Felipe Balbi550a7372008-07-24 12:27:36 +0300365 qh->hep->hcpriv = NULL;
366
367 switch (qh->type) {
368
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +0200369 case USB_ENDPOINT_XFER_CONTROL:
370 case USB_ENDPOINT_XFER_BULK:
371 /* fifo policy for these lists, except that NAKing
372 * should rotate a qh to the end (for fairness).
373 */
374 if (qh->mux == 1) {
375 head = qh->ring.prev;
376 list_del(&qh->ring);
377 kfree(qh);
378 qh = first_qh(head);
379 break;
380 }
381
Felipe Balbi550a7372008-07-24 12:27:36 +0300382 case USB_ENDPOINT_XFER_ISOC:
383 case USB_ENDPOINT_XFER_INT:
384 /* this is where periodic bandwidth should be
385 * de-allocated if it's tracked and allocated;
386 * and where we'd update the schedule tree...
387 */
Felipe Balbi550a7372008-07-24 12:27:36 +0300388 kfree(qh);
389 qh = NULL;
390 break;
Felipe Balbi550a7372008-07-24 12:27:36 +0300391 }
392 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300393
Bin Liudbac5d02016-05-31 10:05:04 -0500394 /*
395 * The pipe must be broken if current urb->status is set, so don't
396 * start next urb.
397 * TODO: to minimize the risk of regression, only check urb->status
398 * for RX, until we have a test case to understand the behavior of TX.
399 */
400 if ((!status || !is_in) && qh && qh->is_ready) {
Bin Liub99d3652016-06-30 12:12:22 -0500401 musb_dbg(musb, "... next ep%d %cX urb %p",
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -0700402 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
Felipe Balbi550a7372008-07-24 12:27:36 +0300403 musb_start_urb(musb, is_in, qh);
404 }
405}
406
David Brownellc767c1c2008-09-11 11:53:23 +0300407static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
Felipe Balbi550a7372008-07-24 12:27:36 +0300408{
409 /* we don't want fifo to fill itself again;
410 * ignore dma (various models),
411 * leave toggle alone (may not have been saved yet)
412 */
413 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
414 csr &= ~(MUSB_RXCSR_H_REQPKT
415 | MUSB_RXCSR_H_AUTOREQ
416 | MUSB_RXCSR_AUTOCLEAR);
417
418 /* write 2x to allow double buffering */
419 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
420 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
421
422 /* flush writebuffer */
423 return musb_readw(hw_ep->regs, MUSB_RXCSR);
424}
425
426/*
427 * PIO RX for a packet (or part of it).
428 */
429static bool
430musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
431{
432 u16 rx_count;
433 u8 *buf;
434 u16 csr;
435 bool done = false;
436 u32 length;
437 int do_flush = 0;
438 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
439 void __iomem *epio = hw_ep->regs;
440 struct musb_qh *qh = hw_ep->in_qh;
441 int pipe = urb->pipe;
442 void *buffer = urb->transfer_buffer;
443
444 /* musb_ep_select(mbase, epnum); */
445 rx_count = musb_readw(epio, MUSB_RXCOUNT);
Bin Liub99d3652016-06-30 12:12:22 -0500446 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
Felipe Balbi550a7372008-07-24 12:27:36 +0300447 urb->transfer_buffer, qh->offset,
448 urb->transfer_buffer_length);
449
450 /* unload FIFO */
451 if (usb_pipeisoc(pipe)) {
452 int status = 0;
453 struct usb_iso_packet_descriptor *d;
454
455 if (iso_err) {
456 status = -EILSEQ;
457 urb->error_count++;
458 }
459
460 d = urb->iso_frame_desc + qh->iso_idx;
461 buf = buffer + d->offset;
462 length = d->length;
463 if (rx_count > length) {
464 if (status == 0) {
465 status = -EOVERFLOW;
466 urb->error_count++;
467 }
Bin Liub99d3652016-06-30 12:12:22 -0500468 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
Felipe Balbi550a7372008-07-24 12:27:36 +0300469 do_flush = 1;
470 } else
471 length = rx_count;
472 urb->actual_length += length;
473 d->actual_length = length;
474
475 d->status = status;
476
477 /* see if we are done */
478 done = (++qh->iso_idx >= urb->number_of_packets);
479 } else {
480 /* non-isoch */
481 buf = buffer + qh->offset;
482 length = urb->transfer_buffer_length - qh->offset;
483 if (rx_count > length) {
484 if (urb->status == -EINPROGRESS)
485 urb->status = -EOVERFLOW;
Bin Liub99d3652016-06-30 12:12:22 -0500486 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
Felipe Balbi550a7372008-07-24 12:27:36 +0300487 do_flush = 1;
488 } else
489 length = rx_count;
490 urb->actual_length += length;
491 qh->offset += length;
492
493 /* see if we are done */
494 done = (urb->actual_length == urb->transfer_buffer_length)
495 || (rx_count < qh->maxpacket)
496 || (urb->status != -EINPROGRESS);
497 if (done
498 && (urb->status == -EINPROGRESS)
499 && (urb->transfer_flags & URB_SHORT_NOT_OK)
500 && (urb->actual_length
501 < urb->transfer_buffer_length))
502 urb->status = -EREMOTEIO;
503 }
504
505 musb_read_fifo(hw_ep, length, buf);
506
507 csr = musb_readw(epio, MUSB_RXCSR);
508 csr |= MUSB_RXCSR_H_WZC_BITS;
509 if (unlikely(do_flush))
510 musb_h_flush_rxfifo(hw_ep, csr);
511 else {
512 /* REVISIT this assumes AUTOCLEAR is never set */
513 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
514 if (!done)
515 csr |= MUSB_RXCSR_H_REQPKT;
516 musb_writew(epio, MUSB_RXCSR, csr);
517 }
518
519 return done;
520}
521
522/* we don't always need to reinit a given side of an endpoint...
523 * when we do, use tx/rx reinit routine and then construct a new CSR
524 * to address data toggle, NYET, and DMA or PIO.
525 *
526 * it's possible that driver bugs (especially for DMA) or aborting a
527 * transfer might have left the endpoint busier than it should be.
528 * the busy/not-empty tests are basically paranoia.
529 */
530static void
Hans de Goede0cb74b32015-03-20 20:11:11 +0100531musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
Felipe Balbi550a7372008-07-24 12:27:36 +0300532{
Hans de Goede0cb74b32015-03-20 20:11:11 +0100533 struct musb_hw_ep *ep = musb->endpoints + epnum;
Felipe Balbi550a7372008-07-24 12:27:36 +0300534 u16 csr;
535
536 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
537 * That always uses tx_reinit since ep0 repurposes TX register
538 * offsets; the initial SETUP packet is also a kind of OUT.
539 */
540
541 /* if programmed for Tx, put it in RX mode */
542 if (ep->is_shared_fifo) {
543 csr = musb_readw(ep->regs, MUSB_TXCSR);
544 if (csr & MUSB_TXCSR_MODE) {
545 musb_h_tx_flush_fifo(ep);
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700546 csr = musb_readw(ep->regs, MUSB_TXCSR);
Felipe Balbi550a7372008-07-24 12:27:36 +0300547 musb_writew(ep->regs, MUSB_TXCSR,
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700548 csr | MUSB_TXCSR_FRCDATATOG);
Felipe Balbi550a7372008-07-24 12:27:36 +0300549 }
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700550
551 /*
552 * Clear the MODE bit (and everything else) to enable Rx.
553 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
554 */
555 if (csr & MUSB_TXCSR_DMAMODE)
556 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
Felipe Balbi550a7372008-07-24 12:27:36 +0300557 musb_writew(ep->regs, MUSB_TXCSR, 0);
558
559 /* scrub all previous state, clearing toggle */
Felipe Balbi550a7372008-07-24 12:27:36 +0300560 }
Andrew Goodbodyf3eec0cf2016-05-31 10:05:26 -0500561 csr = musb_readw(ep->regs, MUSB_RXCSR);
562 if (csr & MUSB_RXCSR_RXPKTRDY)
563 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
564 musb_readw(ep->regs, MUSB_RXCOUNT));
565
566 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
Felipe Balbi550a7372008-07-24 12:27:36 +0300567
568 /* target addr and (for multipoint) hub addr/port */
569 if (musb->is_multipoint) {
Hans de Goede6cc2af62015-03-20 20:11:12 +0100570 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
571 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
572 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
Felipe Balbi550a7372008-07-24 12:27:36 +0300573 } else
574 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
575
576 /* protocol/endpoint, interval/NAKlimit, i/o size */
577 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
578 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
579 /* NOTE: bulk combining rewrites high bits of maxpacket */
Cliff Cai9f445cb2010-01-28 20:44:18 -0500580 /* Set RXMAXP with the FIFO size of the endpoint
581 * to disable double buffer mode.
582 */
Felipe Balbi06624812011-01-21 13:39:20 +0800583 if (musb->double_buffer_not_ok)
Cliff Cai9f445cb2010-01-28 20:44:18 -0500584 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
585 else
586 musb_writew(ep->regs, MUSB_RXMAXP,
587 qh->maxpacket | ((qh->hb_mult - 1) << 11));
Felipe Balbi550a7372008-07-24 12:27:36 +0300588
589 ep->rx_reinit = 0;
590}
591
Sergei Shtylyovb6a66312016-05-31 10:05:06 -0500592static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700593 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700594 struct urb *urb, u32 offset,
595 u32 *length, u8 *mode)
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700596{
597 struct dma_channel *channel = hw_ep->tx_channel;
598 void __iomem *epio = hw_ep->regs;
599 u16 pkt_size = qh->maxpacket;
600 u16 csr;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700601
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700602 if (*length > channel->max_len)
603 *length = channel->max_len;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700604
605 csr = musb_readw(epio, MUSB_TXCSR);
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700606 if (*length > pkt_size) {
607 *mode = 1;
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -0700608 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
609 /* autoset shouldn't be set in high bandwidth */
supriya karanthf2786282012-12-06 11:16:23 +0530610 /*
611 * Enable Autoset according to table
612 * below
613 * bulk_split hb_mult Autoset_Enable
614 * 0 1 Yes(Normal)
615 * 0 >1 No(High BW ISO)
616 * 1 1 Yes(HS bulk)
617 * 1 >1 Yes(FS bulk)
618 */
619 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
620 can_bulk_split(hw_ep->musb, qh->type)))
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -0700621 csr |= MUSB_TXCSR_AUTOSET;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700622 } else {
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700623 *mode = 0;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700624 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
625 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
626 }
Cristian Birsanbba40e62016-02-11 08:58:17 -0700627 channel->desired_mode = *mode;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700628 musb_writew(epio, MUSB_TXCSR, csr);
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700629}
630
Sergei Shtylyovb6a66312016-05-31 10:05:06 -0500631static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
632 struct musb_hw_ep *hw_ep,
633 struct musb_qh *qh,
634 struct urb *urb,
635 u32 offset,
636 u32 *length,
637 u8 *mode)
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700638{
639 struct dma_channel *channel = hw_ep->tx_channel;
640
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700641 channel->actual_len = 0;
642
643 /*
644 * TX uses "RNDIS" mode automatically but needs help
645 * to identify the zero-length-final-packet case.
646 */
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700647 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700648}
649
650static bool musb_tx_dma_program(struct dma_controller *dma,
651 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
652 struct urb *urb, u32 offset, u32 length)
653{
654 struct dma_channel *channel = hw_ep->tx_channel;
655 u16 pkt_size = qh->maxpacket;
656 u8 mode;
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700657
658 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
Sergei Shtylyovb6a66312016-05-31 10:05:06 -0500659 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
660 &length, &mode);
Sergei Shtylyov858b9be2016-05-31 10:05:05 -0500661 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
Sergei Shtylyovb6a66312016-05-31 10:05:06 -0500662 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
663 &length, &mode);
Sergei Shtylyov858b9be2016-05-31 10:05:05 -0500664 else
665 return false;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700666
667 qh->segsize = length;
668
Santosh Shilimkar4c647332010-09-20 10:32:07 +0300669 /*
670 * Ensure the data reaches to main memory before starting
671 * DMA transfer
672 */
673 wmb();
674
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700675 if (!dma->channel_program(channel, pkt_size, mode,
676 urb->transfer_dma + offset, length)) {
Tony Lindgren754fe4a2015-05-01 12:29:32 -0700677 void __iomem *epio = hw_ep->regs;
678 u16 csr;
679
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700680 dma->channel_release(channel);
681 hw_ep->tx_channel = NULL;
682
683 csr = musb_readw(epio, MUSB_TXCSR);
684 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
685 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
686 return false;
687 }
688 return true;
689}
Felipe Balbi550a7372008-07-24 12:27:36 +0300690
691/*
692 * Program an HDRC endpoint as per the given URB
693 * Context: irqs blocked, controller lock held
694 */
695static void musb_ep_program(struct musb *musb, u8 epnum,
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700696 struct urb *urb, int is_out,
697 u8 *buf, u32 offset, u32 len)
Felipe Balbi550a7372008-07-24 12:27:36 +0300698{
699 struct dma_controller *dma_controller;
700 struct dma_channel *dma_channel;
701 u8 dma_ok;
702 void __iomem *mbase = musb->mregs;
703 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
704 void __iomem *epio = hw_ep->regs;
Sergei Shtylyov3e5c6dc2009-03-27 12:55:16 -0700705 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
706 u16 packet_sz = qh->maxpacket;
Ajay Kumar Gupta31321222012-07-20 11:07:22 +0530707 u8 use_dma = 1;
708 u16 csr;
Felipe Balbi550a7372008-07-24 12:27:36 +0300709
Bin Liub99d3652016-06-30 12:12:22 -0500710 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
711 "h_addr%02x h_port%02x bytes %d",
Felipe Balbi550a7372008-07-24 12:27:36 +0300712 is_out ? "-->" : "<--",
713 epnum, urb, urb->dev->speed,
714 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
715 qh->h_addr_reg, qh->h_port_reg,
716 len);
717
718 musb_ep_select(mbase, epnum);
719
Ajay Kumar Gupta31321222012-07-20 11:07:22 +0530720 if (is_out && !len) {
721 use_dma = 0;
722 csr = musb_readw(epio, MUSB_TXCSR);
723 csr &= ~MUSB_TXCSR_DMAENAB;
724 musb_writew(epio, MUSB_TXCSR, csr);
725 hw_ep->tx_channel = NULL;
726 }
727
Felipe Balbi550a7372008-07-24 12:27:36 +0300728 /* candidate for DMA? */
729 dma_controller = musb->dma_controller;
Ajay Kumar Gupta31321222012-07-20 11:07:22 +0530730 if (use_dma && is_dma_capable() && epnum && dma_controller) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300731 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
732 if (!dma_channel) {
733 dma_channel = dma_controller->channel_alloc(
734 dma_controller, hw_ep, is_out);
735 if (is_out)
736 hw_ep->tx_channel = dma_channel;
737 else
738 hw_ep->rx_channel = dma_channel;
739 }
740 } else
741 dma_channel = NULL;
742
743 /* make sure we clear DMAEnab, autoSet bits from previous run */
744
745 /* OUT/transmit/EP0 or IN/receive? */
746 if (is_out) {
747 u16 csr;
748 u16 int_txe;
749 u16 load_count;
750
751 csr = musb_readw(epio, MUSB_TXCSR);
752
753 /* disable interrupt in case we flush */
Sebastian Andrzej Siewiorb18d26f2012-10-30 19:52:26 +0100754 int_txe = musb->intrtxe;
Felipe Balbi550a7372008-07-24 12:27:36 +0300755 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
756
757 /* general endpoint setup */
758 if (epnum) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300759 /* flush all old state, set default */
supriya karantha70b8442013-01-04 17:10:33 +0530760 /*
761 * We could be flushing valid
762 * packets in double buffering
763 * case
764 */
765 if (!hw_ep->tx_double_buffered)
766 musb_h_tx_flush_fifo(hw_ep);
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700767
768 /*
769 * We must not clear the DMAMODE bit before or in
770 * the same cycle with the DMAENAB bit, so we clear
771 * the latter first...
772 */
Felipe Balbi550a7372008-07-24 12:27:36 +0300773 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700774 | MUSB_TXCSR_AUTOSET
775 | MUSB_TXCSR_DMAENAB
Felipe Balbi550a7372008-07-24 12:27:36 +0300776 | MUSB_TXCSR_FRCDATATOG
777 | MUSB_TXCSR_H_RXSTALL
778 | MUSB_TXCSR_H_ERROR
779 | MUSB_TXCSR_TXPKTRDY
780 );
781 csr |= MUSB_TXCSR_MODE;
782
supriya karantha70b8442013-01-04 17:10:33 +0530783 if (!hw_ep->tx_double_buffered) {
784 if (usb_gettoggle(urb->dev, qh->epnum, 1))
785 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
786 | MUSB_TXCSR_H_DATATOGGLE;
787 else
788 csr |= MUSB_TXCSR_CLRDATATOG;
789 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300790
Felipe Balbi550a7372008-07-24 12:27:36 +0300791 musb_writew(epio, MUSB_TXCSR, csr);
792 /* REVISIT may need to clear FLUSHFIFO ... */
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700793 csr &= ~MUSB_TXCSR_DMAMODE;
Felipe Balbi550a7372008-07-24 12:27:36 +0300794 musb_writew(epio, MUSB_TXCSR, csr);
795 csr = musb_readw(epio, MUSB_TXCSR);
796 } else {
797 /* endpoint 0: just flush */
David Brownell78322c12009-03-26 17:38:30 -0700798 musb_h_ep0_flush_fifo(hw_ep);
Felipe Balbi550a7372008-07-24 12:27:36 +0300799 }
800
801 /* target addr and (for multipoint) hub addr/port */
802 if (musb->is_multipoint) {
Hans de Goede6cc2af62015-03-20 20:11:12 +0100803 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
804 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
805 musb_write_txhubport(musb, epnum, qh->h_port_reg);
Felipe Balbi550a7372008-07-24 12:27:36 +0300806/* FIXME if !epnum, do the same for RX ... */
807 } else
808 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
809
810 /* protocol/endpoint/interval/NAKlimit */
811 if (epnum) {
812 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
supriya karanthf2786282012-12-06 11:16:23 +0530813 if (musb->double_buffer_not_ok) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300814 musb_writew(epio, MUSB_TXMAXP,
Felipe Balbi06624812011-01-21 13:39:20 +0800815 hw_ep->max_packet_sz_tx);
supriya karanthf2786282012-12-06 11:16:23 +0530816 } else if (can_bulk_split(musb, qh->type)) {
817 qh->hb_mult = hw_ep->max_packet_sz_tx
818 / packet_sz;
Ajay Kumar Guptaccc080c2011-12-13 10:32:42 +0530819 musb_writew(epio, MUSB_TXMAXP, packet_sz
supriya karanthf2786282012-12-06 11:16:23 +0530820 | ((qh->hb_mult) - 1) << 11);
821 } else {
Felipe Balbi550a7372008-07-24 12:27:36 +0300822 musb_writew(epio, MUSB_TXMAXP,
Felipe Balbi06624812011-01-21 13:39:20 +0800823 qh->maxpacket |
824 ((qh->hb_mult - 1) << 11));
supriya karanthf2786282012-12-06 11:16:23 +0530825 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300826 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
827 } else {
828 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
829 if (musb->is_multipoint)
830 musb_writeb(epio, MUSB_TYPE0,
831 qh->type_reg);
832 }
833
834 if (can_bulk_split(musb, qh->type))
835 load_count = min((u32) hw_ep->max_packet_sz_tx,
836 len);
837 else
838 load_count = min((u32) packet_sz, len);
839
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -0700840 if (dma_channel && musb_tx_dma_program(dma_controller,
841 hw_ep, qh, urb, offset, len))
842 load_count = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +0300843
844 if (load_count) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300845 /* PIO to load FIFO */
846 qh->segsize = load_count;
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +0530847 if (!buf) {
848 sg_miter_start(&qh->sg_miter, urb->sg, 1,
849 SG_MITER_ATOMIC
850 | SG_MITER_FROM_SG);
851 if (!sg_miter_next(&qh->sg_miter)) {
852 dev_err(musb->controller,
853 "error: sg"
854 "list empty\n");
855 sg_miter_stop(&qh->sg_miter);
856 goto finish;
857 }
858 buf = qh->sg_miter.addr + urb->sg->offset +
859 urb->actual_length;
860 load_count = min_t(u32, load_count,
861 qh->sg_miter.length);
862 musb_write_fifo(hw_ep, load_count, buf);
863 qh->sg_miter.consumed = load_count;
864 sg_miter_stop(&qh->sg_miter);
865 } else
866 musb_write_fifo(hw_ep, load_count, buf);
Felipe Balbi550a7372008-07-24 12:27:36 +0300867 }
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +0530868finish:
Felipe Balbi550a7372008-07-24 12:27:36 +0300869 /* re-enable interrupt */
870 musb_writew(mbase, MUSB_INTRTXE, int_txe);
871
872 /* IN/receive */
873 } else {
874 u16 csr;
875
876 if (hw_ep->rx_reinit) {
Hans de Goede0cb74b32015-03-20 20:11:11 +0100877 musb_rx_reinit(musb, qh, epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +0300878
879 /* init new state: toggle and NYET, maybe DMA later */
880 if (usb_gettoggle(urb->dev, qh->epnum, 0))
881 csr = MUSB_RXCSR_H_WR_DATATOGGLE
882 | MUSB_RXCSR_H_DATATOGGLE;
883 else
884 csr = 0;
885 if (qh->type == USB_ENDPOINT_XFER_INT)
886 csr |= MUSB_RXCSR_DISNYET;
887
888 } else {
889 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
890
891 if (csr & (MUSB_RXCSR_RXPKTRDY
892 | MUSB_RXCSR_DMAENAB
893 | MUSB_RXCSR_H_REQPKT))
894 ERR("broken !rx_reinit, ep%d csr %04x\n",
895 hw_ep->epnum, csr);
896
897 /* scrub any stale state, leaving toggle alone */
898 csr &= MUSB_RXCSR_DISNYET;
899 }
900
901 /* kick things off */
902
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -0700903 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
Sergei Shtylyovc51e36d2011-05-07 19:44:13 +0400904 /* Candidate for DMA */
905 dma_channel->actual_len = 0L;
906 qh->segsize = len;
Felipe Balbi550a7372008-07-24 12:27:36 +0300907
Sergei Shtylyovc51e36d2011-05-07 19:44:13 +0400908 /* AUTOREQ is in a DMA register */
909 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
910 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
Felipe Balbi550a7372008-07-24 12:27:36 +0300911
Sergei Shtylyovc51e36d2011-05-07 19:44:13 +0400912 /*
913 * Unless caller treats short RX transfers as
914 * errors, we dare not queue multiple transfers.
915 */
916 dma_ok = dma_controller->channel_program(dma_channel,
917 packet_sz, !(urb->transfer_flags &
918 URB_SHORT_NOT_OK),
919 urb->transfer_dma + offset,
920 qh->segsize);
921 if (!dma_ok) {
922 dma_controller->channel_release(dma_channel);
923 hw_ep->rx_channel = dma_channel = NULL;
924 } else
925 csr |= MUSB_RXCSR_DMAENAB;
Felipe Balbi550a7372008-07-24 12:27:36 +0300926 }
927
928 csr |= MUSB_RXCSR_H_REQPKT;
Bin Liub99d3652016-06-30 12:12:22 -0500929 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
Felipe Balbi550a7372008-07-24 12:27:36 +0300930 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
931 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
932 }
933}
934
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +0530935/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
936 * the end; avoids starvation for other endpoints.
937 */
938static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
939 int is_in)
940{
941 struct dma_channel *dma;
942 struct urb *urb;
943 void __iomem *mbase = musb->mregs;
944 void __iomem *epio = ep->regs;
945 struct musb_qh *cur_qh, *next_qh;
946 u16 rx_csr, tx_csr;
947
948 musb_ep_select(mbase, ep->epnum);
949 if (is_in) {
950 dma = is_dma_capable() ? ep->rx_channel : NULL;
951
Andrew Goodbody7b2c17f2016-05-31 10:05:27 -0500952 /*
953 * Need to stop the transaction by clearing REQPKT first
954 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
955 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
956 */
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +0530957 rx_csr = musb_readw(epio, MUSB_RXCSR);
958 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
Andrew Goodbody7b2c17f2016-05-31 10:05:27 -0500959 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
960 musb_writew(epio, MUSB_RXCSR, rx_csr);
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +0530961 rx_csr &= ~MUSB_RXCSR_DATAERROR;
962 musb_writew(epio, MUSB_RXCSR, rx_csr);
963
964 cur_qh = first_qh(&musb->in_bulk);
965 } else {
966 dma = is_dma_capable() ? ep->tx_channel : NULL;
967
968 /* clear nak timeout bit */
969 tx_csr = musb_readw(epio, MUSB_TXCSR);
970 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
971 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
972 musb_writew(epio, MUSB_TXCSR, tx_csr);
973
974 cur_qh = first_qh(&musb->out_bulk);
975 }
976 if (cur_qh) {
977 urb = next_urb(cur_qh);
978 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
979 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
980 musb->dma_controller->channel_abort(dma);
981 urb->actual_length += dma->actual_len;
982 dma->actual_len = 0L;
983 }
984 musb_save_toggle(cur_qh, is_in, urb);
985
986 if (is_in) {
987 /* move cur_qh to end of queue */
988 list_move_tail(&cur_qh->ring, &musb->in_bulk);
989
990 /* get the next qh from musb->in_bulk */
991 next_qh = first_qh(&musb->in_bulk);
992
993 /* set rx_reinit and schedule the next qh */
994 ep->rx_reinit = 1;
995 } else {
996 /* move cur_qh to end of queue */
997 list_move_tail(&cur_qh->ring, &musb->out_bulk);
998
999 /* get the next qh from musb->out_bulk */
1000 next_qh = first_qh(&musb->out_bulk);
1001
1002 /* set tx_reinit and schedule the next qh */
1003 ep->tx_reinit = 1;
1004 }
1005 musb_start_urb(musb, is_in, next_qh);
1006 }
1007}
Felipe Balbi550a7372008-07-24 12:27:36 +03001008
1009/*
1010 * Service the default endpoint (ep0) as host.
1011 * Return true until it's time to start the status stage.
1012 */
1013static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1014{
1015 bool more = false;
1016 u8 *fifo_dest = NULL;
1017 u16 fifo_count = 0;
1018 struct musb_hw_ep *hw_ep = musb->control_ep;
1019 struct musb_qh *qh = hw_ep->in_qh;
1020 struct usb_ctrlrequest *request;
1021
1022 switch (musb->ep0_stage) {
1023 case MUSB_EP0_IN:
1024 fifo_dest = urb->transfer_buffer + urb->actual_length;
Sergei Shtylyov3ecdb9a2009-02-21 15:31:23 -08001025 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1026 urb->actual_length);
Felipe Balbi550a7372008-07-24 12:27:36 +03001027 if (fifo_count < len)
1028 urb->status = -EOVERFLOW;
1029
1030 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1031
1032 urb->actual_length += fifo_count;
1033 if (len < qh->maxpacket) {
1034 /* always terminate on short read; it's
1035 * rarely reported as an error.
1036 */
1037 } else if (urb->actual_length <
1038 urb->transfer_buffer_length)
1039 more = true;
1040 break;
1041 case MUSB_EP0_START:
1042 request = (struct usb_ctrlrequest *) urb->setup_packet;
1043
1044 if (!request->wLength) {
Bin Liub99d3652016-06-30 12:12:22 -05001045 musb_dbg(musb, "start no-DATA");
Felipe Balbi550a7372008-07-24 12:27:36 +03001046 break;
1047 } else if (request->bRequestType & USB_DIR_IN) {
Bin Liub99d3652016-06-30 12:12:22 -05001048 musb_dbg(musb, "start IN-DATA");
Felipe Balbi550a7372008-07-24 12:27:36 +03001049 musb->ep0_stage = MUSB_EP0_IN;
1050 more = true;
1051 break;
1052 } else {
Bin Liub99d3652016-06-30 12:12:22 -05001053 musb_dbg(musb, "start OUT-DATA");
Felipe Balbi550a7372008-07-24 12:27:36 +03001054 musb->ep0_stage = MUSB_EP0_OUT;
1055 more = true;
1056 }
1057 /* FALLTHROUGH */
1058 case MUSB_EP0_OUT:
Sergei Shtylyov3ecdb9a2009-02-21 15:31:23 -08001059 fifo_count = min_t(size_t, qh->maxpacket,
1060 urb->transfer_buffer_length -
1061 urb->actual_length);
Felipe Balbi550a7372008-07-24 12:27:36 +03001062 if (fifo_count) {
1063 fifo_dest = (u8 *) (urb->transfer_buffer
1064 + urb->actual_length);
Bin Liub99d3652016-06-30 12:12:22 -05001065 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
David Brownellbb1c9ef2008-11-24 13:06:50 +02001066 fifo_count,
1067 (fifo_count == 1) ? "" : "s",
1068 fifo_dest);
Felipe Balbi550a7372008-07-24 12:27:36 +03001069 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1070
1071 urb->actual_length += fifo_count;
1072 more = true;
1073 }
1074 break;
1075 default:
1076 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1077 break;
1078 }
1079
1080 return more;
1081}
1082
1083/*
1084 * Handle default endpoint interrupt as host. Only called in IRQ time
David Brownellc767c1c2008-09-11 11:53:23 +03001085 * from musb_interrupt().
Felipe Balbi550a7372008-07-24 12:27:36 +03001086 *
1087 * called with controller irqlocked
1088 */
1089irqreturn_t musb_h_ep0_irq(struct musb *musb)
1090{
1091 struct urb *urb;
1092 u16 csr, len;
1093 int status = 0;
1094 void __iomem *mbase = musb->mregs;
1095 struct musb_hw_ep *hw_ep = musb->control_ep;
1096 void __iomem *epio = hw_ep->regs;
1097 struct musb_qh *qh = hw_ep->in_qh;
1098 bool complete = false;
1099 irqreturn_t retval = IRQ_NONE;
1100
1101 /* ep0 only has one queue, "in" */
1102 urb = next_urb(qh);
1103
1104 musb_ep_select(mbase, 0);
1105 csr = musb_readw(epio, MUSB_CSR0);
1106 len = (csr & MUSB_CSR0_RXPKTRDY)
1107 ? musb_readb(epio, MUSB_COUNT0)
1108 : 0;
1109
Bin Liub99d3652016-06-30 12:12:22 -05001110 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
Felipe Balbi550a7372008-07-24 12:27:36 +03001111 csr, qh, len, urb, musb->ep0_stage);
1112
1113 /* if we just did status stage, we are done */
1114 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1115 retval = IRQ_HANDLED;
1116 complete = true;
1117 }
1118
1119 /* prepare status */
1120 if (csr & MUSB_CSR0_H_RXSTALL) {
Bin Liub99d3652016-06-30 12:12:22 -05001121 musb_dbg(musb, "STALLING ENDPOINT");
Felipe Balbi550a7372008-07-24 12:27:36 +03001122 status = -EPIPE;
1123
1124 } else if (csr & MUSB_CSR0_H_ERROR) {
Bin Liub99d3652016-06-30 12:12:22 -05001125 musb_dbg(musb, "no response, csr0 %04x", csr);
Felipe Balbi550a7372008-07-24 12:27:36 +03001126 status = -EPROTO;
1127
1128 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
Bin Liub99d3652016-06-30 12:12:22 -05001129 musb_dbg(musb, "control NAK timeout");
Felipe Balbi550a7372008-07-24 12:27:36 +03001130
1131 /* NOTE: this code path would be a good place to PAUSE a
1132 * control transfer, if another one is queued, so that
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08001133 * ep0 is more likely to stay busy. That's already done
1134 * for bulk RX transfers.
Felipe Balbi550a7372008-07-24 12:27:36 +03001135 *
1136 * if (qh->ring.next != &musb->control), then
1137 * we have a candidate... NAKing is *NOT* an error
1138 */
1139 musb_writew(epio, MUSB_CSR0, 0);
1140 retval = IRQ_HANDLED;
1141 }
1142
1143 if (status) {
Bin Liub99d3652016-06-30 12:12:22 -05001144 musb_dbg(musb, "aborting");
Felipe Balbi550a7372008-07-24 12:27:36 +03001145 retval = IRQ_HANDLED;
1146 if (urb)
1147 urb->status = status;
1148 complete = true;
1149
1150 /* use the proper sequence to abort the transfer */
1151 if (csr & MUSB_CSR0_H_REQPKT) {
1152 csr &= ~MUSB_CSR0_H_REQPKT;
1153 musb_writew(epio, MUSB_CSR0, csr);
1154 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1155 musb_writew(epio, MUSB_CSR0, csr);
1156 } else {
David Brownell78322c12009-03-26 17:38:30 -07001157 musb_h_ep0_flush_fifo(hw_ep);
Felipe Balbi550a7372008-07-24 12:27:36 +03001158 }
1159
1160 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1161
1162 /* clear it */
1163 musb_writew(epio, MUSB_CSR0, 0);
1164 }
1165
1166 if (unlikely(!urb)) {
1167 /* stop endpoint since we have no place for its data, this
1168 * SHOULD NEVER HAPPEN! */
1169 ERR("no URB for end 0\n");
1170
David Brownell78322c12009-03-26 17:38:30 -07001171 musb_h_ep0_flush_fifo(hw_ep);
Felipe Balbi550a7372008-07-24 12:27:36 +03001172 goto done;
1173 }
1174
1175 if (!complete) {
1176 /* call common logic and prepare response */
1177 if (musb_h_ep0_continue(musb, len, urb)) {
1178 /* more packets required */
1179 csr = (MUSB_EP0_IN == musb->ep0_stage)
1180 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1181 } else {
1182 /* data transfer complete; perform status phase */
1183 if (usb_pipeout(urb->pipe)
1184 || !urb->transfer_buffer_length)
1185 csr = MUSB_CSR0_H_STATUSPKT
1186 | MUSB_CSR0_H_REQPKT;
1187 else
1188 csr = MUSB_CSR0_H_STATUSPKT
1189 | MUSB_CSR0_TXPKTRDY;
1190
Ajay Kumar Gupta3c4653c2014-02-04 15:28:06 +02001191 /* disable ping token in status phase */
1192 csr |= MUSB_CSR0_H_DIS_PING;
1193
Felipe Balbi550a7372008-07-24 12:27:36 +03001194 /* flag status stage */
1195 musb->ep0_stage = MUSB_EP0_STATUS;
1196
Bin Liub99d3652016-06-30 12:12:22 -05001197 musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
Felipe Balbi550a7372008-07-24 12:27:36 +03001198
1199 }
1200 musb_writew(epio, MUSB_CSR0, csr);
1201 retval = IRQ_HANDLED;
1202 } else
1203 musb->ep0_stage = MUSB_EP0_IDLE;
1204
1205 /* call completion handler if done */
1206 if (complete)
1207 musb_advance_schedule(musb, urb, hw_ep, 1);
1208done:
1209 return retval;
1210}
1211
1212
1213#ifdef CONFIG_USB_INVENTRA_DMA
1214
1215/* Host side TX (OUT) using Mentor DMA works as follows:
1216 submit_urb ->
1217 - if queue was empty, Program Endpoint
1218 - ... which starts DMA to fifo in mode 1 or 0
1219
1220 DMA Isr (transfer complete) -> TxAvail()
1221 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1222 only in musb_cleanup_urb)
1223 - TxPktRdy has to be set in mode 0 or for
1224 short packets in mode 1.
1225*/
1226
1227#endif
1228
1229/* Service a Tx-Available or dma completion irq for the endpoint */
1230void musb_host_tx(struct musb *musb, u8 epnum)
1231{
1232 int pipe;
1233 bool done = false;
1234 u16 tx_csr;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001235 size_t length = 0;
1236 size_t offset = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001237 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1238 void __iomem *epio = hw_ep->regs;
Sergei Shtylyov3e5c6dc2009-03-27 12:55:16 -07001239 struct musb_qh *qh = hw_ep->out_qh;
1240 struct urb *urb = next_urb(qh);
Felipe Balbi550a7372008-07-24 12:27:36 +03001241 u32 status = 0;
1242 void __iomem *mbase = musb->mregs;
1243 struct dma_channel *dma;
T. S., Anil Kumarf8afbf7f2010-09-24 13:44:09 +03001244 bool transfer_pending = false;
Felipe Balbi550a7372008-07-24 12:27:36 +03001245
Felipe Balbi550a7372008-07-24 12:27:36 +03001246 musb_ep_select(mbase, epnum);
1247 tx_csr = musb_readw(epio, MUSB_TXCSR);
1248
1249 /* with CPPI, DMA sometimes triggers "extra" irqs */
1250 if (!urb) {
Bin Liub99d3652016-06-30 12:12:22 -05001251 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001252 return;
Felipe Balbi550a7372008-07-24 12:27:36 +03001253 }
1254
1255 pipe = urb->pipe;
1256 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
Bin Liu19ca6822016-06-30 12:12:26 -05001257 trace_musb_urb_tx(musb, urb);
Bin Liub99d3652016-06-30 12:12:22 -05001258 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
Felipe Balbi550a7372008-07-24 12:27:36 +03001259 dma ? ", dma" : "");
1260
1261 /* check for errors */
1262 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1263 /* dma was disabled, fifo flushed */
Bin Liub99d3652016-06-30 12:12:22 -05001264 musb_dbg(musb, "TX end %d stall", epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +03001265
1266 /* stall; record URB status */
1267 status = -EPIPE;
1268
1269 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1270 /* (NON-ISO) dma was disabled, fifo flushed */
Bin Liub99d3652016-06-30 12:12:22 -05001271 musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +03001272
1273 status = -ETIMEDOUT;
1274
1275 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +05301276 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1277 && !list_is_singular(&musb->out_bulk)) {
Bin Liub99d3652016-06-30 12:12:22 -05001278 musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +05301279 musb_bulk_nak_timeout(musb, hw_ep, 0);
1280 } else {
Bin Liub99d3652016-06-30 12:12:22 -05001281 musb_dbg(musb, "TX ep%d device not responding", epnum);
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +05301282 /* NOTE: this code path would be a good place to PAUSE a
1283 * transfer, if there's some other (nonperiodic) tx urb
1284 * that could use this fifo. (dma complicates it...)
1285 * That's already done for bulk RX transfers.
1286 *
1287 * if (bulk && qh->ring.next != &musb->out_bulk), then
1288 * we have a candidate... NAKing is *NOT* an error
1289 */
1290 musb_ep_select(mbase, epnum);
1291 musb_writew(epio, MUSB_TXCSR,
1292 MUSB_TXCSR_H_WZC_BITS
1293 | MUSB_TXCSR_TXPKTRDY);
1294 }
1295 return;
Felipe Balbi550a7372008-07-24 12:27:36 +03001296 }
1297
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301298done:
Felipe Balbi550a7372008-07-24 12:27:36 +03001299 if (status) {
1300 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1301 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
Daniel Mack9c547692014-05-26 14:52:35 +02001302 musb->dma_controller->channel_abort(dma);
Felipe Balbi550a7372008-07-24 12:27:36 +03001303 }
1304
1305 /* do the proper sequence to abort the transfer in the
1306 * usb core; the dma engine should already be stopped.
1307 */
1308 musb_h_tx_flush_fifo(hw_ep);
1309 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1310 | MUSB_TXCSR_DMAENAB
1311 | MUSB_TXCSR_H_ERROR
1312 | MUSB_TXCSR_H_RXSTALL
1313 | MUSB_TXCSR_H_NAKTIMEOUT
1314 );
1315
1316 musb_ep_select(mbase, epnum);
1317 musb_writew(epio, MUSB_TXCSR, tx_csr);
1318 /* REVISIT may need to clear FLUSHFIFO ... */
1319 musb_writew(epio, MUSB_TXCSR, tx_csr);
1320 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1321
1322 done = true;
1323 }
1324
1325 /* second cppi case */
1326 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
Bin Liub99d3652016-06-30 12:12:22 -05001327 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001328 return;
Felipe Balbi550a7372008-07-24 12:27:36 +03001329 }
1330
Sergei Shtylyovc7bbc052009-03-26 18:26:40 -07001331 if (is_dma_capable() && dma && !status) {
1332 /*
1333 * DMA has completed. But if we're using DMA mode 1 (multi
1334 * packet DMA), we need a terminal TXPKTRDY interrupt before
1335 * we can consider this transfer completed, lest we trash
1336 * its last packet when writing the next URB's data. So we
1337 * switch back to mode 0 to get that interrupt; we'll come
1338 * back here once it happens.
1339 */
1340 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1341 /*
1342 * We shouldn't clear DMAMODE with DMAENAB set; so
1343 * clear them in a safe order. That should be OK
1344 * once TXPKTRDY has been set (and I've never seen
1345 * it being 0 at this moment -- DMA interrupt latency
1346 * is significant) but if it hasn't been then we have
1347 * no choice but to stop being polite and ignore the
1348 * programmer's guide... :-)
1349 *
1350 * Note that we must write TXCSR with TXPKTRDY cleared
1351 * in order not to re-trigger the packet send (this bit
1352 * can't be cleared by CPU), and there's another caveat:
1353 * TXPKTRDY may be set shortly and then cleared in the
1354 * double-buffered FIFO mode, so we do an extra TXCSR
1355 * read for debouncing...
1356 */
1357 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1358 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1359 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1360 MUSB_TXCSR_TXPKTRDY);
1361 musb_writew(epio, MUSB_TXCSR,
1362 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1363 }
1364 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1365 MUSB_TXCSR_TXPKTRDY);
1366 musb_writew(epio, MUSB_TXCSR,
1367 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1368
1369 /*
1370 * There is no guarantee that we'll get an interrupt
1371 * after clearing DMAMODE as we might have done this
1372 * too late (after TXPKTRDY was cleared by controller).
1373 * Re-read TXCSR as we have spoiled its previous value.
1374 */
1375 tx_csr = musb_readw(epio, MUSB_TXCSR);
1376 }
1377
1378 /*
1379 * We may get here from a DMA completion or TXPKTRDY interrupt.
1380 * In any case, we must check the FIFO status here and bail out
1381 * only if the FIFO still has data -- that should prevent the
1382 * "missed" TXPKTRDY interrupts and deal with double-buffered
1383 * FIFO mode too...
1384 */
1385 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
Bin Liub99d3652016-06-30 12:12:22 -05001386 musb_dbg(musb,
1387 "DMA complete but FIFO not empty, CSR %04x",
1388 tx_csr);
Sergei Shtylyovc7bbc052009-03-26 18:26:40 -07001389 return;
1390 }
1391 }
1392
Felipe Balbi550a7372008-07-24 12:27:36 +03001393 if (!status || dma || usb_pipeisoc(pipe)) {
1394 if (dma)
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001395 length = dma->actual_len;
Felipe Balbi550a7372008-07-24 12:27:36 +03001396 else
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001397 length = qh->segsize;
1398 qh->offset += length;
Felipe Balbi550a7372008-07-24 12:27:36 +03001399
1400 if (usb_pipeisoc(pipe)) {
1401 struct usb_iso_packet_descriptor *d;
1402
1403 d = urb->iso_frame_desc + qh->iso_idx;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001404 d->actual_length = length;
1405 d->status = status;
Felipe Balbi550a7372008-07-24 12:27:36 +03001406 if (++qh->iso_idx >= urb->number_of_packets) {
1407 done = true;
1408 } else {
1409 d++;
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001410 offset = d->offset;
1411 length = d->length;
Felipe Balbi550a7372008-07-24 12:27:36 +03001412 }
T. S., Anil Kumarf8afbf7f2010-09-24 13:44:09 +03001413 } else if (dma && urb->transfer_buffer_length == qh->offset) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001414 done = true;
1415 } else {
1416 /* see if we need to send more data, or ZLP */
1417 if (qh->segsize < qh->maxpacket)
1418 done = true;
1419 else if (qh->offset == urb->transfer_buffer_length
1420 && !(urb->transfer_flags
1421 & URB_ZERO_PACKET))
1422 done = true;
1423 if (!done) {
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001424 offset = qh->offset;
1425 length = urb->transfer_buffer_length - offset;
T. S., Anil Kumarf8afbf7f2010-09-24 13:44:09 +03001426 transfer_pending = true;
Felipe Balbi550a7372008-07-24 12:27:36 +03001427 }
1428 }
1429 }
1430
1431 /* urb->status != -EINPROGRESS means request has been faulted,
1432 * so we must abort this transfer after cleanup
1433 */
1434 if (urb->status != -EINPROGRESS) {
1435 done = true;
1436 if (status == 0)
1437 status = urb->status;
1438 }
1439
1440 if (done) {
1441 /* set status */
1442 urb->status = status;
1443 urb->actual_length = qh->offset;
1444 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001445 return;
T. S., Anil Kumarf8afbf7f2010-09-24 13:44:09 +03001446 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001447 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
Ajay Kumar Guptadfeffa52009-11-17 15:22:55 +05301448 offset, length)) {
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -07001449 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
Ajay Kumar Guptadfeffa52009-11-17 15:22:55 +05301450 musb_h_tx_dma_start(hw_ep);
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001451 return;
Ajay Kumar Guptadfeffa52009-11-17 15:22:55 +05301452 }
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001453 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
Bin Liub99d3652016-06-30 12:12:22 -05001454 musb_dbg(musb, "not complete, but DMA enabled?");
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001455 return;
1456 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001457
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001458 /*
1459 * PIO: start next packet in this URB.
1460 *
1461 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1462 * (and presumably, FIFO is not half-full) we should write *two*
1463 * packets before updating TXCSR; other docs disagree...
1464 */
1465 if (length > qh->maxpacket)
1466 length = qh->maxpacket;
Maulik Mankad496dda72010-09-24 13:44:06 +03001467 /* Unmap the buffer so that CPU can use it */
Daniel Mack8b125df2013-04-10 21:55:50 +02001468 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301469
1470 /*
1471 * We need to map sg if the transfer_buffer is
1472 * NULL.
1473 */
1474 if (!urb->transfer_buffer)
Virupax Sadashivpetimathed74df12013-04-24 08:38:48 +02001475 qh->use_sg = true;
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301476
Virupax Sadashivpetimathed74df12013-04-24 08:38:48 +02001477 if (qh->use_sg) {
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301478 /* sg_miter_start is already done in musb_ep_program */
1479 if (!sg_miter_next(&qh->sg_miter)) {
1480 dev_err(musb->controller, "error: sg list empty\n");
1481 sg_miter_stop(&qh->sg_miter);
1482 status = -EINVAL;
1483 goto done;
1484 }
1485 urb->transfer_buffer = qh->sg_miter.addr;
1486 length = min_t(u32, length, qh->sg_miter.length);
1487 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1488 qh->sg_miter.consumed = length;
1489 sg_miter_stop(&qh->sg_miter);
1490 } else {
1491 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1492 }
1493
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001494 qh->segsize = length;
Felipe Balbi550a7372008-07-24 12:27:36 +03001495
Virupax Sadashivpetimathed74df12013-04-24 08:38:48 +02001496 if (qh->use_sg) {
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301497 if (offset + length >= urb->transfer_buffer_length)
Virupax Sadashivpetimathed74df12013-04-24 08:38:48 +02001498 qh->use_sg = false;
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301499 }
1500
Sergei Shtylyov6b6e9712009-03-26 18:29:19 -07001501 musb_ep_select(mbase, epnum);
1502 musb_writew(epio, MUSB_TXCSR,
1503 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
Felipe Balbi550a7372008-07-24 12:27:36 +03001504}
1505
Tony Lindgren069a3fd2015-05-01 12:29:33 -07001506#ifdef CONFIG_USB_TI_CPPI41_DMA
1507/* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
1508static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1509 struct musb_hw_ep *hw_ep,
1510 struct musb_qh *qh,
1511 struct urb *urb,
1512 size_t len)
1513{
Bin Liu04471eb2016-05-31 10:05:25 -05001514 struct dma_channel *channel = hw_ep->rx_channel;
Tony Lindgren069a3fd2015-05-01 12:29:33 -07001515 void __iomem *epio = hw_ep->regs;
1516 dma_addr_t *buf;
Gustavo A. R. Silvac68bb0e2017-06-21 09:22:15 -05001517 u32 length;
Tony Lindgren069a3fd2015-05-01 12:29:33 -07001518 u16 val;
1519
1520 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1521 (u32)urb->transfer_dma;
1522
1523 length = urb->iso_frame_desc[qh->iso_idx].length;
1524
1525 val = musb_readw(epio, MUSB_RXCSR);
1526 val |= MUSB_RXCSR_DMAENAB;
1527 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1528
Gustavo A. R. Silvac68bb0e2017-06-21 09:22:15 -05001529 return dma->channel_program(channel, qh->maxpacket, 0,
Tony Lindgren069a3fd2015-05-01 12:29:33 -07001530 (u32)buf, length);
Tony Lindgren069a3fd2015-05-01 12:29:33 -07001531}
1532#else
1533static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1534 struct musb_hw_ep *hw_ep,
1535 struct musb_qh *qh,
1536 struct urb *urb,
1537 size_t len)
1538{
1539 return false;
1540}
1541#endif
Felipe Balbi550a7372008-07-24 12:27:36 +03001542
Tony Lindgrencff84bd2015-05-01 12:29:35 -07001543#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1544 defined(CONFIG_USB_TI_CPPI41_DMA)
Felipe Balbi550a7372008-07-24 12:27:36 +03001545/* Host side RX (IN) using Mentor DMA works as follows:
1546 submit_urb ->
1547 - if queue was empty, ProgramEndpoint
1548 - first IN token is sent out (by setting ReqPkt)
1549 LinuxIsr -> RxReady()
1550 /\ => first packet is received
1551 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1552 | -> DMA Isr (transfer complete) -> RxReady()
1553 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1554 | - if urb not complete, send next IN token (ReqPkt)
1555 | | else complete urb.
1556 | |
1557 ---------------------------
1558 *
1559 * Nuances of mode 1:
1560 * For short packets, no ack (+RxPktRdy) is sent automatically
1561 * (even if AutoClear is ON)
1562 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1563 * automatically => major problem, as collecting the next packet becomes
1564 * difficult. Hence mode 1 is not used.
1565 *
1566 * REVISIT
1567 * All we care about at this driver level is that
1568 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1569 * (b) termination conditions are: short RX, or buffer full;
1570 * (c) fault modes include
1571 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1572 * (and that endpoint's dma queue stops immediately)
1573 * - overflow (full, PLUS more bytes in the terminal packet)
1574 *
1575 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1576 * thus be a great candidate for using mode 1 ... for all but the
1577 * last packet of one URB's transfer.
1578 */
Tony Lindgrencff84bd2015-05-01 12:29:35 -07001579static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1580 struct musb_hw_ep *hw_ep,
1581 struct musb_qh *qh,
1582 struct urb *urb,
1583 size_t len)
1584{
1585 struct dma_channel *channel = hw_ep->rx_channel;
1586 void __iomem *epio = hw_ep->regs;
1587 u16 val;
1588 int pipe;
1589 bool done;
Felipe Balbi550a7372008-07-24 12:27:36 +03001590
Tony Lindgrencff84bd2015-05-01 12:29:35 -07001591 pipe = urb->pipe;
1592
1593 if (usb_pipeisoc(pipe)) {
1594 struct usb_iso_packet_descriptor *d;
1595
1596 d = urb->iso_frame_desc + qh->iso_idx;
1597 d->actual_length = len;
1598
1599 /* even if there was an error, we did the dma
1600 * for iso_frame_desc->length
1601 */
1602 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1603 d->status = 0;
1604
1605 if (++qh->iso_idx >= urb->number_of_packets) {
1606 done = true;
1607 } else {
1608 /* REVISIT: Why ignore return value here? */
1609 if (musb_dma_cppi41(hw_ep->musb))
1610 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1611 urb, len);
1612 done = false;
1613 }
1614
1615 } else {
1616 /* done if urb buffer is full or short packet is recd */
1617 done = (urb->actual_length + len >=
1618 urb->transfer_buffer_length
1619 || channel->actual_len < qh->maxpacket
1620 || channel->rx_packet_done);
1621 }
1622
1623 /* send IN token for next packet, without AUTOREQ */
1624 if (!done) {
1625 val = musb_readw(epio, MUSB_RXCSR);
1626 val |= MUSB_RXCSR_H_REQPKT;
1627 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1628 }
1629
1630 return done;
1631}
Tony Lindgrenac33cdb2015-05-01 12:29:37 -07001632
1633/* Disadvantage of using mode 1:
1634 * It's basically usable only for mass storage class; essentially all
1635 * other protocols also terminate transfers on short packets.
1636 *
1637 * Details:
1638 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1639 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1640 * to use the extra IN token to grab the last packet using mode 0, then
1641 * the problem is that you cannot be sure when the device will send the
1642 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1643 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1644 * transfer, while sometimes it is recd just a little late so that if you
1645 * try to configure for mode 0 soon after the mode 1 transfer is
1646 * completed, you will find rxcount 0. Okay, so you might think why not
1647 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1648 */
1649static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1650 struct musb_hw_ep *hw_ep,
1651 struct musb_qh *qh,
1652 struct urb *urb,
1653 size_t len,
1654 u8 iso_err)
1655{
1656 struct musb *musb = hw_ep->musb;
1657 void __iomem *epio = hw_ep->regs;
1658 struct dma_channel *channel = hw_ep->rx_channel;
1659 u16 rx_count, val;
1660 int length, pipe, done;
1661 dma_addr_t buf;
1662
1663 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1664 pipe = urb->pipe;
1665
1666 if (usb_pipeisoc(pipe)) {
1667 int d_status = 0;
1668 struct usb_iso_packet_descriptor *d;
1669
1670 d = urb->iso_frame_desc + qh->iso_idx;
1671
1672 if (iso_err) {
1673 d_status = -EILSEQ;
1674 urb->error_count++;
1675 }
1676 if (rx_count > d->length) {
1677 if (d_status == 0) {
1678 d_status = -EOVERFLOW;
1679 urb->error_count++;
1680 }
Bin Liub99d3652016-06-30 12:12:22 -05001681 musb_dbg(musb, "** OVERFLOW %d into %d",
Tony Lindgrenac33cdb2015-05-01 12:29:37 -07001682 rx_count, d->length);
1683
1684 length = d->length;
1685 } else
1686 length = rx_count;
1687 d->status = d_status;
1688 buf = urb->transfer_dma + d->offset;
1689 } else {
1690 length = rx_count;
1691 buf = urb->transfer_dma + urb->actual_length;
1692 }
1693
1694 channel->desired_mode = 0;
1695#ifdef USE_MODE1
1696 /* because of the issue below, mode 1 will
1697 * only rarely behave with correct semantics.
1698 */
1699 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1700 && (urb->transfer_buffer_length - urb->actual_length)
1701 > qh->maxpacket)
1702 channel->desired_mode = 1;
1703 if (rx_count < hw_ep->max_packet_sz_rx) {
1704 length = rx_count;
1705 channel->desired_mode = 0;
1706 } else {
1707 length = urb->transfer_buffer_length;
1708 }
1709#endif
1710
1711 /* See comments above on disadvantages of using mode 1 */
1712 val = musb_readw(epio, MUSB_RXCSR);
1713 val &= ~MUSB_RXCSR_H_REQPKT;
1714
1715 if (channel->desired_mode == 0)
1716 val &= ~MUSB_RXCSR_H_AUTOREQ;
1717 else
1718 val |= MUSB_RXCSR_H_AUTOREQ;
1719 val |= MUSB_RXCSR_DMAENAB;
1720
1721 /* autoclear shouldn't be set in high bandwidth */
1722 if (qh->hb_mult == 1)
1723 val |= MUSB_RXCSR_AUTOCLEAR;
1724
1725 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1726
1727 /* REVISIT if when actual_length != 0,
1728 * transfer_buffer_length needs to be
1729 * adjusted first...
1730 */
1731 done = dma->channel_program(channel, qh->maxpacket,
1732 channel->desired_mode,
1733 buf, length);
1734
1735 if (!done) {
1736 dma->channel_release(channel);
1737 hw_ep->rx_channel = NULL;
1738 channel = NULL;
1739 val = musb_readw(epio, MUSB_RXCSR);
1740 val &= ~(MUSB_RXCSR_DMAENAB
1741 | MUSB_RXCSR_H_AUTOREQ
1742 | MUSB_RXCSR_AUTOCLEAR);
1743 musb_writew(epio, MUSB_RXCSR, val);
1744 }
1745
1746 return done;
1747}
Tony Lindgrencff84bd2015-05-01 12:29:35 -07001748#else
1749static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1750 struct musb_hw_ep *hw_ep,
1751 struct musb_qh *qh,
1752 struct urb *urb,
1753 size_t len)
1754{
1755 return false;
1756}
Tony Lindgrenac33cdb2015-05-01 12:29:37 -07001757
1758static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1759 struct musb_hw_ep *hw_ep,
1760 struct musb_qh *qh,
1761 struct urb *urb,
1762 size_t len,
1763 u8 iso_err)
1764{
1765 return false;
1766}
Felipe Balbi550a7372008-07-24 12:27:36 +03001767#endif
1768
1769/*
1770 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1771 * and high-bandwidth IN transfer cases.
1772 */
1773void musb_host_rx(struct musb *musb, u8 epnum)
1774{
1775 struct urb *urb;
1776 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
Tony Lindgrencff84bd2015-05-01 12:29:35 -07001777 struct dma_controller *c = musb->dma_controller;
Felipe Balbi550a7372008-07-24 12:27:36 +03001778 void __iomem *epio = hw_ep->regs;
1779 struct musb_qh *qh = hw_ep->in_qh;
1780 size_t xfer_len;
1781 void __iomem *mbase = musb->mregs;
Felipe Balbi550a7372008-07-24 12:27:36 +03001782 u16 rx_csr, val;
1783 bool iso_err = false;
1784 bool done = false;
1785 u32 status;
1786 struct dma_channel *dma;
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301787 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
Felipe Balbi550a7372008-07-24 12:27:36 +03001788
1789 musb_ep_select(mbase, epnum);
1790
1791 urb = next_urb(qh);
1792 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1793 status = 0;
1794 xfer_len = 0;
1795
1796 rx_csr = musb_readw(epio, MUSB_RXCSR);
1797 val = rx_csr;
1798
1799 if (unlikely(!urb)) {
1800 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1801 * usbtest #11 (unlinks) triggers it regularly, sometimes
1802 * with fifo full. (Only with DMA??)
1803 */
Bin Liub99d3652016-06-30 12:12:22 -05001804 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1805 epnum, val, musb_readw(epio, MUSB_RXCOUNT));
Felipe Balbi550a7372008-07-24 12:27:36 +03001806 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1807 return;
1808 }
1809
Bin Liu19ca6822016-06-30 12:12:26 -05001810 trace_musb_urb_rx(musb, urb);
Felipe Balbi550a7372008-07-24 12:27:36 +03001811
1812 /* check for errors, concurrent stall & unlink is not really
1813 * handled yet! */
1814 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
Bin Liub99d3652016-06-30 12:12:22 -05001815 musb_dbg(musb, "RX end %d STALL", epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +03001816
1817 /* stall; record URB status */
1818 status = -EPIPE;
1819
1820 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
Bin Liub99d3652016-06-30 12:12:22 -05001821 musb_dbg(musb, "end %d RX proto error", epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +03001822
1823 status = -EPROTO;
1824 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1825
Bin Liub5801212016-05-31 10:05:03 -05001826 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1827 musb_writew(epio, MUSB_RXCSR, rx_csr);
1828
Felipe Balbi550a7372008-07-24 12:27:36 +03001829 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1830
1831 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
Bin Liub99d3652016-06-30 12:12:22 -05001832 musb_dbg(musb, "RX end %d NAK timeout", epnum);
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08001833
1834 /* NOTE: NAKing is *NOT* an error, so we want to
1835 * continue. Except ... if there's a request for
1836 * another QH, use that instead of starving it.
1837 *
1838 * Devices like Ethernet and serial adapters keep
1839 * reads posted at all times, which will starve
1840 * other devices without this logic.
1841 */
1842 if (usb_pipebulk(urb->pipe)
1843 && qh->mux == 1
1844 && !list_is_singular(&musb->in_bulk)) {
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +05301845 musb_bulk_nak_timeout(musb, hw_ep, 1);
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08001846 return;
1847 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001848 musb_ep_select(mbase, epnum);
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08001849 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1850 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1851 musb_writew(epio, MUSB_RXCSR, rx_csr);
Felipe Balbi550a7372008-07-24 12:27:36 +03001852
1853 goto finish;
1854 } else {
Bin Liub99d3652016-06-30 12:12:22 -05001855 musb_dbg(musb, "RX end %d ISO data error", epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +03001856 /* packet error reported later */
1857 iso_err = true;
1858 }
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07001859 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
Bin Liub99d3652016-06-30 12:12:22 -05001860 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07001861 epnum);
1862 status = -EPROTO;
Felipe Balbi550a7372008-07-24 12:27:36 +03001863 }
1864
1865 /* faults abort the transfer */
1866 if (status) {
1867 /* clean up dma and collect transfer count */
1868 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1869 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
Daniel Mack9c547692014-05-26 14:52:35 +02001870 musb->dma_controller->channel_abort(dma);
Felipe Balbi550a7372008-07-24 12:27:36 +03001871 xfer_len = dma->actual_len;
1872 }
1873 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1874 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1875 done = true;
1876 goto finish;
1877 }
1878
1879 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1880 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1881 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1882 goto finish;
1883 }
1884
1885 /* thorough shutdown for now ... given more precise fault handling
1886 * and better queueing support, we might keep a DMA pipeline going
1887 * while processing this irq for earlier completions.
1888 */
1889
1890 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
Tony Lindgren557d5432015-05-01 12:29:34 -07001891 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1892 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001893 /* REVISIT this happened for a while on some short reads...
1894 * the cleanup still needs investigation... looks bad...
1895 * and also duplicates dma cleanup code above ... plus,
1896 * shouldn't this be the "half full" double buffer case?
1897 */
1898 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1899 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
Daniel Mack9c547692014-05-26 14:52:35 +02001900 musb->dma_controller->channel_abort(dma);
Felipe Balbi550a7372008-07-24 12:27:36 +03001901 xfer_len = dma->actual_len;
1902 done = true;
1903 }
1904
Bin Liub99d3652016-06-30 12:12:22 -05001905 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
Felipe Balbi550a7372008-07-24 12:27:36 +03001906 xfer_len, dma ? ", dma" : "");
1907 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1908
1909 musb_ep_select(mbase, epnum);
1910 musb_writew(epio, MUSB_RXCSR,
1911 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1912 }
Tony Lindgren557d5432015-05-01 12:29:34 -07001913
Felipe Balbi550a7372008-07-24 12:27:36 +03001914 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1915 xfer_len = dma->actual_len;
1916
1917 val &= ~(MUSB_RXCSR_DMAENAB
1918 | MUSB_RXCSR_H_AUTOREQ
1919 | MUSB_RXCSR_AUTOCLEAR
1920 | MUSB_RXCSR_RXPKTRDY);
1921 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1922
Tony Lindgrencff84bd2015-05-01 12:29:35 -07001923 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1924 musb_dma_cppi41(musb)) {
1925 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
Bin Liub99d3652016-06-30 12:12:22 -05001926 musb_dbg(hw_ep->musb,
1927 "ep %d dma %s, rxcsr %04x, rxcount %d",
Tony Lindgrencff84bd2015-05-01 12:29:35 -07001928 epnum, done ? "off" : "reset",
1929 musb_readw(epio, MUSB_RXCSR),
1930 musb_readw(epio, MUSB_RXCOUNT));
1931 } else {
1932 done = true;
Ajay Kumar Guptaf82a6892008-10-29 15:10:31 +02001933 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001934
Felipe Balbi550a7372008-07-24 12:27:36 +03001935 } else if (urb->status == -EINPROGRESS) {
1936 /* if no errors, be sure a packet is ready for unloading */
1937 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1938 status = -EPROTO;
1939 ERR("Rx interrupt with no errors or packet!\n");
1940
1941 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1942
1943/* SCRUB (RX) */
1944 /* do the proper sequence to abort the transfer */
1945 musb_ep_select(mbase, epnum);
1946 val &= ~MUSB_RXCSR_H_REQPKT;
1947 musb_writew(epio, MUSB_RXCSR, val);
1948 goto finish;
1949 }
1950
1951 /* we are expecting IN packets */
Tony Lindgrene530bb82015-05-01 12:29:36 -07001952 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1953 musb_dma_cppi41(musb)) && dma) {
Bin Liub99d3652016-06-30 12:12:22 -05001954 musb_dbg(hw_ep->musb,
1955 "RX%d count %d, buffer 0x%llx len %d/%d",
Tony Lindgrenac33cdb2015-05-01 12:29:37 -07001956 epnum, musb_readw(epio, MUSB_RXCOUNT),
1957 (unsigned long long) urb->transfer_dma
1958 + urb->actual_length,
1959 qh->offset,
1960 urb->transfer_buffer_length);
Felipe Balbi550a7372008-07-24 12:27:36 +03001961
Cristian Birsan4c2ba0c2016-02-19 10:11:56 +02001962 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1963 xfer_len, iso_err))
Tony Lindgrenac33cdb2015-05-01 12:29:37 -07001964 goto finish;
Felipe Balbi550a7372008-07-24 12:27:36 +03001965 else
Tony Lindgrenac33cdb2015-05-01 12:29:37 -07001966 dev_err(musb->controller, "error: rx_dma failed\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001967 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001968
1969 if (!dma) {
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301970 unsigned int received_len;
1971
Maulik Mankad496dda72010-09-24 13:44:06 +03001972 /* Unmap the buffer so that CPU can use it */
Daniel Mack8b125df2013-04-10 21:55:50 +02001973 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301974
1975 /*
1976 * We need to map sg if the transfer_buffer is
1977 * NULL.
1978 */
1979 if (!urb->transfer_buffer) {
Virupax Sadashivpetimathed74df12013-04-24 08:38:48 +02001980 qh->use_sg = true;
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301981 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1982 sg_flags);
1983 }
1984
Virupax Sadashivpetimathed74df12013-04-24 08:38:48 +02001985 if (qh->use_sg) {
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05301986 if (!sg_miter_next(&qh->sg_miter)) {
1987 dev_err(musb->controller, "error: sg list empty\n");
1988 sg_miter_stop(&qh->sg_miter);
1989 status = -EINVAL;
1990 done = true;
1991 goto finish;
1992 }
1993 urb->transfer_buffer = qh->sg_miter.addr;
1994 received_len = urb->actual_length;
1995 qh->offset = 0x0;
1996 done = musb_host_packet_rx(musb, urb, epnum,
1997 iso_err);
1998 /* Calculate the number of bytes received */
1999 received_len = urb->actual_length -
2000 received_len;
2001 qh->sg_miter.consumed = received_len;
2002 sg_miter_stop(&qh->sg_miter);
2003 } else {
2004 done = musb_host_packet_rx(musb, urb,
2005 epnum, iso_err);
2006 }
Bin Liub99d3652016-06-30 12:12:22 -05002007 musb_dbg(musb, "read %spacket", done ? "last " : "");
Felipe Balbi550a7372008-07-24 12:27:36 +03002008 }
2009 }
2010
Felipe Balbi550a7372008-07-24 12:27:36 +03002011finish:
2012 urb->actual_length += xfer_len;
2013 qh->offset += xfer_len;
2014 if (done) {
Virupax Sadashivpetimathed74df12013-04-24 08:38:48 +02002015 if (qh->use_sg)
2016 qh->use_sg = false;
Virupax Sadashivpetimath8e8a5512012-08-07 14:46:20 +05302017
Felipe Balbi550a7372008-07-24 12:27:36 +03002018 if (urb->status == -EINPROGRESS)
2019 urb->status = status;
2020 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2021 }
2022}
2023
2024/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
2025 * the software schedule associates multiple such nodes with a given
2026 * host side hardware endpoint + direction; scheduling may activate
2027 * that hardware endpoint.
2028 */
2029static int musb_schedule(
2030 struct musb *musb,
2031 struct musb_qh *qh,
2032 int is_in)
2033{
Rickard Strandqvisteac44dc2014-06-01 15:48:12 +02002034 int idle = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03002035 int best_diff;
2036 int best_end, epnum;
2037 struct musb_hw_ep *hw_ep = NULL;
2038 struct list_head *head = NULL;
Swaminathan S5274dab2009-12-28 13:40:37 +02002039 u8 toggle;
2040 u8 txtype;
2041 struct urb *urb = next_urb(qh);
Felipe Balbi550a7372008-07-24 12:27:36 +03002042
2043 /* use fixed hardware for control and bulk */
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002044 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002045 head = &musb->control;
2046 hw_ep = musb->control_ep;
Felipe Balbi550a7372008-07-24 12:27:36 +03002047 goto success;
2048 }
2049
2050 /* else, periodic transfers get muxed to other endpoints */
2051
Sergei Shtylyov5d67a852009-02-24 15:23:34 -08002052 /*
2053 * We know this qh hasn't been scheduled, so all we need to do
Felipe Balbi550a7372008-07-24 12:27:36 +03002054 * is choose which hardware endpoint to put it on ...
2055 *
2056 * REVISIT what we really want here is a regular schedule tree
Sergei Shtylyov5d67a852009-02-24 15:23:34 -08002057 * like e.g. OHCI uses.
Felipe Balbi550a7372008-07-24 12:27:36 +03002058 */
2059 best_diff = 4096;
2060 best_end = -1;
2061
Sergei Shtylyov5d67a852009-02-24 15:23:34 -08002062 for (epnum = 1, hw_ep = musb->endpoints + 1;
2063 epnum < musb->nr_endpoints;
2064 epnum++, hw_ep++) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002065 int diff;
2066
Sergei Shtylyov3e5c6dc2009-03-27 12:55:16 -07002067 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
Felipe Balbi550a7372008-07-24 12:27:36 +03002068 continue;
Sergei Shtylyov5d67a852009-02-24 15:23:34 -08002069
Felipe Balbi550a7372008-07-24 12:27:36 +03002070 if (hw_ep == musb->bulk_ep)
2071 continue;
2072
2073 if (is_in)
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07002074 diff = hw_ep->max_packet_sz_rx;
Felipe Balbi550a7372008-07-24 12:27:36 +03002075 else
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07002076 diff = hw_ep->max_packet_sz_tx;
2077 diff -= (qh->maxpacket * qh->hb_mult);
Felipe Balbi550a7372008-07-24 12:27:36 +03002078
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002079 if (diff >= 0 && best_diff > diff) {
Swaminathan S5274dab2009-12-28 13:40:37 +02002080
2081 /*
2082 * Mentor controller has a bug in that if we schedule
2083 * a BULK Tx transfer on an endpoint that had earlier
2084 * handled ISOC then the BULK transfer has to start on
2085 * a zero toggle. If the BULK transfer starts on a 1
2086 * toggle then this transfer will fail as the mentor
2087 * controller starts the Bulk transfer on a 0 toggle
2088 * irrespective of the programming of the toggle bits
2089 * in the TXCSR register. Check for this condition
2090 * while allocating the EP for a Tx Bulk transfer. If
2091 * so skip this EP.
2092 */
2093 hw_ep = musb->endpoints + epnum;
2094 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2095 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2096 >> 4) & 0x3;
2097 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2098 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2099 continue;
2100
Felipe Balbi550a7372008-07-24 12:27:36 +03002101 best_diff = diff;
2102 best_end = epnum;
2103 }
2104 }
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002105 /* use bulk reserved ep1 if no other ep is free */
Felipe Balbiaa5cbbe2008-11-17 09:08:16 +02002106 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002107 hw_ep = musb->bulk_ep;
2108 if (is_in)
2109 head = &musb->in_bulk;
2110 else
2111 head = &musb->out_bulk;
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08002112
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +05302113 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
Rahul Bedarkar5ae477b2014-01-02 19:27:47 +05302114 * multiplexed. This scheme does not work in high speed to full
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08002115 * speed scenario as NAK interrupts are not coming from a
2116 * full speed device connected to a high speed device.
2117 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2118 * 4 (8 frame or 8ms) for FS device.
2119 */
Ajay Kumar Guptaf2838622012-07-19 13:41:59 +05302120 if (qh->dev)
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08002121 qh->intv_reg =
2122 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002123 goto success;
2124 } else if (best_end < 0) {
Bin Liua2f65602017-08-24 11:38:33 -05002125 dev_err(musb->controller,
2126 "%s hwep alloc failed for %dx%d\n",
2127 musb_ep_xfertype_string(qh->type),
2128 qh->hb_mult, qh->maxpacket);
Felipe Balbi550a7372008-07-24 12:27:36 +03002129 return -ENOSPC;
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002130 }
Felipe Balbi550a7372008-07-24 12:27:36 +03002131
2132 idle = 1;
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002133 qh->mux = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03002134 hw_ep = musb->endpoints + best_end;
Bin Liub99d3652016-06-30 12:12:22 -05002135 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
Felipe Balbi550a7372008-07-24 12:27:36 +03002136success:
Ajay Kumar Gupta23d15e02008-10-29 15:10:35 +02002137 if (head) {
2138 idle = list_empty(head);
2139 list_add_tail(&qh->ring, head);
2140 qh->mux = 1;
2141 }
Felipe Balbi550a7372008-07-24 12:27:36 +03002142 qh->hw_ep = hw_ep;
2143 qh->hep->hcpriv = qh;
2144 if (idle)
2145 musb_start_urb(musb, is_in, qh);
2146 return 0;
2147}
2148
2149static int musb_urb_enqueue(
2150 struct usb_hcd *hcd,
2151 struct urb *urb,
2152 gfp_t mem_flags)
2153{
2154 unsigned long flags;
2155 struct musb *musb = hcd_to_musb(hcd);
2156 struct usb_host_endpoint *hep = urb->ep;
David Brownell74bb3502009-03-26 17:36:57 -07002157 struct musb_qh *qh;
Felipe Balbi550a7372008-07-24 12:27:36 +03002158 struct usb_endpoint_descriptor *epd = &hep->desc;
2159 int ret;
2160 unsigned type_reg;
2161 unsigned interval;
2162
2163 /* host role must be active */
2164 if (!is_host_active(musb) || !musb->is_active)
2165 return -ENODEV;
2166
Bin Liu19ca6822016-06-30 12:12:26 -05002167 trace_musb_urb_enq(musb, urb);
2168
Felipe Balbi550a7372008-07-24 12:27:36 +03002169 spin_lock_irqsave(&musb->lock, flags);
2170 ret = usb_hcd_link_urb_to_ep(hcd, urb);
David Brownell74bb3502009-03-26 17:36:57 -07002171 qh = ret ? NULL : hep->hcpriv;
2172 if (qh)
2173 urb->hcpriv = qh;
Felipe Balbi550a7372008-07-24 12:27:36 +03002174 spin_unlock_irqrestore(&musb->lock, flags);
Felipe Balbi550a7372008-07-24 12:27:36 +03002175
2176 /* DMA mapping was already done, if needed, and this urb is on
David Brownell74bb3502009-03-26 17:36:57 -07002177 * hep->urb_list now ... so we're done, unless hep wasn't yet
2178 * scheduled onto a live qh.
Felipe Balbi550a7372008-07-24 12:27:36 +03002179 *
2180 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2181 * disabled, testing for empty qh->ring and avoiding qh setup costs
2182 * except for the first urb queued after a config change.
2183 */
David Brownell74bb3502009-03-26 17:36:57 -07002184 if (qh || ret)
2185 return ret;
Felipe Balbi550a7372008-07-24 12:27:36 +03002186
2187 /* Allocate and initialize qh, minimizing the work done each time
2188 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2189 *
2190 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2191 * for bugs in other kernel code to break this driver...
2192 */
2193 qh = kzalloc(sizeof *qh, mem_flags);
2194 if (!qh) {
Ajay Kumar Gupta2492e672008-09-11 11:53:21 +03002195 spin_lock_irqsave(&musb->lock, flags);
Felipe Balbi550a7372008-07-24 12:27:36 +03002196 usb_hcd_unlink_urb_from_ep(hcd, urb);
Ajay Kumar Gupta2492e672008-09-11 11:53:21 +03002197 spin_unlock_irqrestore(&musb->lock, flags);
Felipe Balbi550a7372008-07-24 12:27:36 +03002198 return -ENOMEM;
2199 }
2200
2201 qh->hep = hep;
2202 qh->dev = urb->dev;
2203 INIT_LIST_HEAD(&qh->ring);
2204 qh->is_ready = 1;
2205
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07002206 qh->maxpacket = usb_endpoint_maxp(epd);
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07002207 qh->type = usb_endpoint_type(epd);
Felipe Balbi550a7372008-07-24 12:27:36 +03002208
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07002209 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2210 * Some musb cores don't support high bandwidth ISO transfers; and
2211 * we don't (yet!) support high bandwidth interrupt transfers.
2212 */
Felipe Balbi6ddcabc2016-09-28 13:40:40 +03002213 qh->hb_mult = usb_endpoint_maxp_mult(epd);
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07002214 if (qh->hb_mult > 1) {
2215 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2216
2217 if (ok)
2218 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2219 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2220 if (!ok) {
Bin Liu1bff25e2017-08-24 11:38:34 -05002221 dev_err(musb->controller,
2222 "high bandwidth %s (%dx%d) not supported\n",
2223 musb_ep_xfertype_string(qh->type),
2224 qh->hb_mult, qh->maxpacket & 0x7ff);
Ajay Kumar Guptaa483d702009-04-03 16:16:17 -07002225 ret = -EMSGSIZE;
2226 goto done;
2227 }
2228 qh->maxpacket &= 0x7ff;
Felipe Balbi550a7372008-07-24 12:27:36 +03002229 }
2230
Julia Lawall96bcd092009-01-24 17:57:24 -08002231 qh->epnum = usb_endpoint_num(epd);
Felipe Balbi550a7372008-07-24 12:27:36 +03002232
2233 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2234 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2235
2236 /* precompute rxtype/txtype/type0 register */
2237 type_reg = (qh->type << 4) | qh->epnum;
2238 switch (urb->dev->speed) {
2239 case USB_SPEED_LOW:
2240 type_reg |= 0xc0;
2241 break;
2242 case USB_SPEED_FULL:
2243 type_reg |= 0x80;
2244 break;
2245 default:
2246 type_reg |= 0x40;
2247 }
2248 qh->type_reg = type_reg;
2249
Sergei Shtylyov136733d2009-02-21 15:31:35 -08002250 /* Precompute RXINTERVAL/TXINTERVAL register */
Felipe Balbi550a7372008-07-24 12:27:36 +03002251 switch (qh->type) {
2252 case USB_ENDPOINT_XFER_INT:
Sergei Shtylyov136733d2009-02-21 15:31:35 -08002253 /*
2254 * Full/low speeds use the linear encoding,
2255 * high speed uses the logarithmic encoding.
2256 */
2257 if (urb->dev->speed <= USB_SPEED_FULL) {
2258 interval = max_t(u8, epd->bInterval, 1);
2259 break;
Felipe Balbi550a7372008-07-24 12:27:36 +03002260 }
2261 /* FALLTHROUGH */
2262 case USB_ENDPOINT_XFER_ISOC:
Sergei Shtylyov136733d2009-02-21 15:31:35 -08002263 /* ISO always uses logarithmic encoding */
2264 interval = min_t(u8, epd->bInterval, 16);
Felipe Balbi550a7372008-07-24 12:27:36 +03002265 break;
2266 default:
2267 /* REVISIT we actually want to use NAK limits, hinting to the
2268 * transfer scheduling logic to try some other qh, e.g. try
2269 * for 2 msec first:
2270 *
2271 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2272 *
2273 * The downside of disabling this is that transfer scheduling
2274 * gets VERY unfair for nonperiodic transfers; a misbehaving
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08002275 * peripheral could make that hurt. That's perfectly normal
2276 * for reads from network or serial adapters ... so we have
2277 * partial NAKlimit support for bulk RX.
Felipe Balbi550a7372008-07-24 12:27:36 +03002278 *
Ajay Kumar Gupta1e0320f2009-02-24 15:26:13 -08002279 * The upside of disabling it is simpler transfer scheduling.
Felipe Balbi550a7372008-07-24 12:27:36 +03002280 */
2281 interval = 0;
2282 }
2283 qh->intv_reg = interval;
2284
2285 /* precompute addressing for external hub/tt ports */
2286 if (musb->is_multipoint) {
2287 struct usb_device *parent = urb->dev->parent;
2288
2289 if (parent != hcd->self.root_hub) {
2290 qh->h_addr_reg = (u8) parent->devnum;
2291
2292 /* set up tt info if needed */
2293 if (urb->dev->tt) {
2294 qh->h_port_reg = (u8) urb->dev->ttport;
Ajay Kumar Guptaae5ad292008-09-11 11:53:20 +03002295 if (urb->dev->tt->hub)
2296 qh->h_addr_reg =
2297 (u8) urb->dev->tt->hub->devnum;
2298 if (urb->dev->tt->multi)
2299 qh->h_addr_reg |= 0x80;
Felipe Balbi550a7372008-07-24 12:27:36 +03002300 }
2301 }
2302 }
2303
2304 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2305 * until we get real dma queues (with an entry for each urb/buffer),
2306 * we only have work to do in the former case.
2307 */
2308 spin_lock_irqsave(&musb->lock, flags);
yuzheng ma30677792012-08-15 16:11:40 +08002309 if (hep->hcpriv || !next_urb(qh)) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002310 /* some concurrent activity submitted another urb to hep...
2311 * odd, rare, error prone, but legal.
2312 */
2313 kfree(qh);
Dan Carpenter714bc5e2010-03-25 13:14:27 +02002314 qh = NULL;
Felipe Balbi550a7372008-07-24 12:27:36 +03002315 ret = 0;
2316 } else
2317 ret = musb_schedule(musb, qh,
2318 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2319
2320 if (ret == 0) {
2321 urb->hcpriv = qh;
2322 /* FIXME set urb->start_frame for iso/intr, it's tested in
2323 * musb_start_urb(), but otherwise only konicawc cares ...
2324 */
2325 }
2326 spin_unlock_irqrestore(&musb->lock, flags);
2327
2328done:
2329 if (ret != 0) {
Ajay Kumar Gupta2492e672008-09-11 11:53:21 +03002330 spin_lock_irqsave(&musb->lock, flags);
Felipe Balbi550a7372008-07-24 12:27:36 +03002331 usb_hcd_unlink_urb_from_ep(hcd, urb);
Ajay Kumar Gupta2492e672008-09-11 11:53:21 +03002332 spin_unlock_irqrestore(&musb->lock, flags);
Felipe Balbi550a7372008-07-24 12:27:36 +03002333 kfree(qh);
2334 }
2335 return ret;
2336}
2337
2338
2339/*
2340 * abort a transfer that's at the head of a hardware queue.
2341 * called with controller locked, irqs blocked
2342 * that hardware queue advances to the next transfer, unless prevented
2343 */
Sergei Shtylyov81ec4e42009-03-27 12:57:50 -07002344static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
Felipe Balbi550a7372008-07-24 12:27:36 +03002345{
2346 struct musb_hw_ep *ep = qh->hw_ep;
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03002347 struct musb *musb = ep->musb;
Felipe Balbi550a7372008-07-24 12:27:36 +03002348 void __iomem *epio = ep->regs;
2349 unsigned hw_end = ep->epnum;
2350 void __iomem *regs = ep->musb->mregs;
Sergei Shtylyov81ec4e42009-03-27 12:57:50 -07002351 int is_in = usb_pipein(urb->pipe);
Felipe Balbi550a7372008-07-24 12:27:36 +03002352 int status = 0;
Sergei Shtylyov81ec4e42009-03-27 12:57:50 -07002353 u16 csr;
Bin Liu6def85a2017-01-03 18:13:46 -06002354 struct dma_channel *dma = NULL;
Felipe Balbi550a7372008-07-24 12:27:36 +03002355
2356 musb_ep_select(regs, hw_end);
2357
2358 if (is_dma_capable()) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002359 dma = is_in ? ep->rx_channel : ep->tx_channel;
2360 if (dma) {
2361 status = ep->musb->dma_controller->channel_abort(dma);
Bin Liub99d3652016-06-30 12:12:22 -05002362 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
Felipe Balbi550a7372008-07-24 12:27:36 +03002363 is_in ? 'R' : 'T', ep->epnum,
2364 urb, status);
2365 urb->actual_length += dma->actual_len;
2366 }
2367 }
2368
2369 /* turn off DMA requests, discard state, stop polling ... */
Ajay Kumar Gupta692933b2012-03-14 17:33:35 +05302370 if (ep->epnum && is_in) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002371 /* giveback saves bulk toggle */
2372 csr = musb_h_flush_rxfifo(ep, 0);
2373
Bin Liu6def85a2017-01-03 18:13:46 -06002374 /* clear the endpoint's irq status here to avoid bogus irqs */
2375 if (is_dma_capable() && dma)
2376 musb_platform_clear_ep_rxintr(musb, ep->epnum);
David Brownell78322c12009-03-26 17:38:30 -07002377 } else if (ep->epnum) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002378 musb_h_tx_flush_fifo(ep);
2379 csr = musb_readw(epio, MUSB_TXCSR);
2380 csr &= ~(MUSB_TXCSR_AUTOSET
2381 | MUSB_TXCSR_DMAENAB
2382 | MUSB_TXCSR_H_RXSTALL
2383 | MUSB_TXCSR_H_NAKTIMEOUT
2384 | MUSB_TXCSR_H_ERROR
2385 | MUSB_TXCSR_TXPKTRDY);
2386 musb_writew(epio, MUSB_TXCSR, csr);
2387 /* REVISIT may need to clear FLUSHFIFO ... */
2388 musb_writew(epio, MUSB_TXCSR, csr);
2389 /* flush cpu writebuffer */
2390 csr = musb_readw(epio, MUSB_TXCSR);
David Brownell78322c12009-03-26 17:38:30 -07002391 } else {
2392 musb_h_ep0_flush_fifo(ep);
Felipe Balbi550a7372008-07-24 12:27:36 +03002393 }
2394 if (status == 0)
2395 musb_advance_schedule(ep->musb, urb, ep, is_in);
2396 return status;
2397}
2398
2399static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2400{
2401 struct musb *musb = hcd_to_musb(hcd);
2402 struct musb_qh *qh;
Felipe Balbi550a7372008-07-24 12:27:36 +03002403 unsigned long flags;
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002404 int is_in = usb_pipein(urb->pipe);
Felipe Balbi550a7372008-07-24 12:27:36 +03002405 int ret;
2406
Bin Liu19ca6822016-06-30 12:12:26 -05002407 trace_musb_urb_deq(musb, urb);
Felipe Balbi550a7372008-07-24 12:27:36 +03002408
2409 spin_lock_irqsave(&musb->lock, flags);
2410 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2411 if (ret)
2412 goto done;
2413
2414 qh = urb->hcpriv;
2415 if (!qh)
2416 goto done;
2417
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002418 /*
2419 * Any URB not actively programmed into endpoint hardware can be
Sergei Shtylyova2fd8142009-02-21 15:30:45 -08002420 * immediately given back; that's any URB not at the head of an
Felipe Balbi550a7372008-07-24 12:27:36 +03002421 * endpoint queue, unless someday we get real DMA queues. And even
Sergei Shtylyova2fd8142009-02-21 15:30:45 -08002422 * if it's at the head, it might not be known to the hardware...
Felipe Balbi550a7372008-07-24 12:27:36 +03002423 *
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002424 * Otherwise abort current transfer, pending DMA, etc.; urb->status
Felipe Balbi550a7372008-07-24 12:27:36 +03002425 * has already been updated. This is a synchronous abort; it'd be
2426 * OK to hold off until after some IRQ, though.
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002427 *
2428 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
Felipe Balbi550a7372008-07-24 12:27:36 +03002429 */
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002430 if (!qh->is_ready
2431 || urb->urb_list.prev != &qh->hep->urb_list
2432 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002433 int ready = qh->is_ready;
2434
Felipe Balbi550a7372008-07-24 12:27:36 +03002435 qh->is_ready = 0;
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -07002436 musb_giveback(musb, urb, 0);
Felipe Balbi550a7372008-07-24 12:27:36 +03002437 qh->is_ready = ready;
Sergei Shtylyova2fd8142009-02-21 15:30:45 -08002438
2439 /* If nothing else (usually musb_giveback) is using it
2440 * and its URB list has emptied, recycle this qh.
2441 */
2442 if (ready && list_empty(&qh->hep->urb_list)) {
2443 qh->hep->hcpriv = NULL;
2444 list_del(&qh->ring);
2445 kfree(qh);
2446 }
Felipe Balbi550a7372008-07-24 12:27:36 +03002447 } else
Sergei Shtylyov81ec4e42009-03-27 12:57:50 -07002448 ret = musb_cleanup_urb(urb, qh);
Felipe Balbi550a7372008-07-24 12:27:36 +03002449done:
2450 spin_unlock_irqrestore(&musb->lock, flags);
2451 return ret;
2452}
2453
2454/* disable an endpoint */
2455static void
2456musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2457{
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002458 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
Felipe Balbi550a7372008-07-24 12:27:36 +03002459 unsigned long flags;
2460 struct musb *musb = hcd_to_musb(hcd);
Sergei Shtylyovdc61d232009-02-21 15:31:01 -08002461 struct musb_qh *qh;
2462 struct urb *urb;
Felipe Balbi550a7372008-07-24 12:27:36 +03002463
Felipe Balbi550a7372008-07-24 12:27:36 +03002464 spin_lock_irqsave(&musb->lock, flags);
2465
Sergei Shtylyovdc61d232009-02-21 15:31:01 -08002466 qh = hep->hcpriv;
2467 if (qh == NULL)
2468 goto exit;
2469
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002470 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
Felipe Balbi550a7372008-07-24 12:27:36 +03002471
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002472 /* Kick the first URB off the hardware, if needed */
Felipe Balbi550a7372008-07-24 12:27:36 +03002473 qh->is_ready = 0;
Sergei Shtylyov22a0d6f2009-03-27 12:56:26 -07002474 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002475 urb = next_urb(qh);
2476
2477 /* make software (then hardware) stop ASAP */
2478 if (!urb->unlinked)
2479 urb->status = -ESHUTDOWN;
2480
2481 /* cleanup */
Sergei Shtylyov81ec4e42009-03-27 12:57:50 -07002482 musb_cleanup_urb(urb, qh);
Felipe Balbi550a7372008-07-24 12:27:36 +03002483
Sergei Shtylyovdc61d232009-02-21 15:31:01 -08002484 /* Then nuke all the others ... and advance the
2485 * queue on hw_ep (e.g. bulk ring) when we're done.
2486 */
2487 while (!list_empty(&hep->urb_list)) {
2488 urb = next_urb(qh);
2489 urb->status = -ESHUTDOWN;
2490 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2491 }
2492 } else {
2493 /* Just empty the queue; the hardware is busy with
2494 * other transfers, and since !qh->is_ready nothing
2495 * will activate any of these as it advances.
2496 */
2497 while (!list_empty(&hep->urb_list))
Sergei Shtylyovc9cd06b2009-03-27 12:58:31 -07002498 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
Felipe Balbi550a7372008-07-24 12:27:36 +03002499
Sergei Shtylyovdc61d232009-02-21 15:31:01 -08002500 hep->hcpriv = NULL;
2501 list_del(&qh->ring);
2502 kfree(qh);
2503 }
2504exit:
Felipe Balbi550a7372008-07-24 12:27:36 +03002505 spin_unlock_irqrestore(&musb->lock, flags);
2506}
2507
2508static int musb_h_get_frame_number(struct usb_hcd *hcd)
2509{
2510 struct musb *musb = hcd_to_musb(hcd);
2511
2512 return musb_readw(musb->mregs, MUSB_FRAME);
2513}
2514
2515static int musb_h_start(struct usb_hcd *hcd)
2516{
2517 struct musb *musb = hcd_to_musb(hcd);
2518
2519 /* NOTE: musb_start() is called when the hub driver turns
2520 * on port power, or when (OTG) peripheral starts.
2521 */
2522 hcd->state = HC_STATE_RUNNING;
2523 musb->port1_status = 0;
2524 return 0;
2525}
2526
2527static void musb_h_stop(struct usb_hcd *hcd)
2528{
2529 musb_stop(hcd_to_musb(hcd));
2530 hcd->state = HC_STATE_HALT;
2531}
2532
2533static int musb_bus_suspend(struct usb_hcd *hcd)
2534{
2535 struct musb *musb = hcd_to_musb(hcd);
David Brownell89368d32009-07-01 03:36:16 -07002536 u8 devctl;
Felipe Balbi550a7372008-07-24 12:27:36 +03002537
Daniel Mack94f72132013-11-25 22:26:41 +01002538 musb_port_suspend(musb, true);
2539
David Brownell89368d32009-07-01 03:36:16 -07002540 if (!is_host_active(musb))
Felipe Balbi550a7372008-07-24 12:27:36 +03002541 return 0;
2542
Antoine Tenarte47d9252014-10-30 18:41:13 +01002543 switch (musb->xceiv->otg->state) {
David Brownell89368d32009-07-01 03:36:16 -07002544 case OTG_STATE_A_SUSPEND:
2545 return 0;
2546 case OTG_STATE_A_WAIT_VRISE:
2547 /* ID could be grounded even if there's no device
2548 * on the other end of the cable. NOTE that the
2549 * A_WAIT_VRISE timers are messy with MUSB...
2550 */
2551 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2552 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
Antoine Tenarte47d9252014-10-30 18:41:13 +01002553 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
David Brownell89368d32009-07-01 03:36:16 -07002554 break;
2555 default:
2556 break;
2557 }
2558
2559 if (musb->is_active) {
2560 WARNING("trying to suspend as %s while active\n",
Antoine Tenarte47d9252014-10-30 18:41:13 +01002561 usb_otg_state_string(musb->xceiv->otg->state));
Felipe Balbi550a7372008-07-24 12:27:36 +03002562 return -EBUSY;
2563 } else
2564 return 0;
2565}
2566
2567static int musb_bus_resume(struct usb_hcd *hcd)
2568{
Daniel Mack869c5972013-11-26 13:31:14 +01002569 struct musb *musb = hcd_to_musb(hcd);
2570
2571 if (musb->config &&
2572 musb->config->host_port_deassert_reset_at_resume)
2573 musb_port_reset(musb, false);
2574
Felipe Balbi550a7372008-07-24 12:27:36 +03002575 return 0;
2576}
2577
Ruslan Bilovol8408fd12013-03-29 19:15:21 +02002578#ifndef CONFIG_MUSB_PIO_ONLY
2579
2580#define MUSB_USB_DMA_ALIGN 4
2581
2582struct musb_temp_buffer {
2583 void *kmalloc_ptr;
2584 void *old_xfer_buffer;
2585 u8 data[0];
2586};
2587
2588static void musb_free_temp_buffer(struct urb *urb)
2589{
2590 enum dma_data_direction dir;
2591 struct musb_temp_buffer *temp;
Johan Hovoldd72348f2015-04-23 16:06:50 +02002592 size_t length;
Ruslan Bilovol8408fd12013-03-29 19:15:21 +02002593
2594 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2595 return;
2596
2597 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2598
2599 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2600 data);
2601
2602 if (dir == DMA_FROM_DEVICE) {
Johan Hovoldd72348f2015-04-23 16:06:50 +02002603 if (usb_pipeisoc(urb->pipe))
2604 length = urb->transfer_buffer_length;
2605 else
2606 length = urb->actual_length;
2607
2608 memcpy(temp->old_xfer_buffer, temp->data, length);
Ruslan Bilovol8408fd12013-03-29 19:15:21 +02002609 }
2610 urb->transfer_buffer = temp->old_xfer_buffer;
2611 kfree(temp->kmalloc_ptr);
2612
2613 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2614}
2615
2616static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2617{
2618 enum dma_data_direction dir;
2619 struct musb_temp_buffer *temp;
2620 void *kmalloc_ptr;
2621 size_t kmalloc_size;
2622
2623 if (urb->num_sgs || urb->sg ||
2624 urb->transfer_buffer_length == 0 ||
2625 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2626 return 0;
2627
2628 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2629
2630 /* Allocate a buffer with enough padding for alignment */
2631 kmalloc_size = urb->transfer_buffer_length +
2632 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2633
2634 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2635 if (!kmalloc_ptr)
2636 return -ENOMEM;
2637
2638 /* Position our struct temp_buffer such that data is aligned */
2639 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2640
2641
2642 temp->kmalloc_ptr = kmalloc_ptr;
2643 temp->old_xfer_buffer = urb->transfer_buffer;
2644 if (dir == DMA_TO_DEVICE)
2645 memcpy(temp->data, urb->transfer_buffer,
2646 urb->transfer_buffer_length);
2647 urb->transfer_buffer = temp->data;
2648
2649 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2650
2651 return 0;
2652}
2653
2654static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2655 gfp_t mem_flags)
2656{
2657 struct musb *musb = hcd_to_musb(hcd);
2658 int ret;
2659
2660 /*
2661 * The DMA engine in RTL1.8 and above cannot handle
2662 * DMA addresses that are not aligned to a 4 byte boundary.
2663 * For such engine implemented (un)map_urb_for_dma hooks.
2664 * Do not use these hooks for RTL<1.8
2665 */
2666 if (musb->hwvers < MUSB_HWVERS_1800)
2667 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2668
2669 ret = musb_alloc_temp_buffer(urb, mem_flags);
2670 if (ret)
2671 return ret;
2672
2673 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2674 if (ret)
2675 musb_free_temp_buffer(urb);
2676
2677 return ret;
2678}
2679
2680static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2681{
2682 struct musb *musb = hcd_to_musb(hcd);
2683
2684 usb_hcd_unmap_urb_for_dma(hcd, urb);
2685
2686 /* Do not use this hook for RTL<1.8 (see description above) */
2687 if (musb->hwvers < MUSB_HWVERS_1800)
2688 return;
2689
2690 musb_free_temp_buffer(urb);
2691}
2692#endif /* !CONFIG_MUSB_PIO_ONLY */
2693
Daniel Mack74c2e932013-04-10 21:55:45 +02002694static const struct hc_driver musb_hc_driver = {
Felipe Balbi550a7372008-07-24 12:27:36 +03002695 .description = "musb-hcd",
2696 .product_desc = "MUSB HDRC host driver",
Daniel Mack74c2e932013-04-10 21:55:45 +02002697 .hcd_priv_size = sizeof(struct musb *),
Bin Liuf551e132016-04-25 15:53:30 -05002698 .flags = HCD_USB2 | HCD_MEMORY,
Felipe Balbi550a7372008-07-24 12:27:36 +03002699
2700 /* not using irq handler or reset hooks from usbcore, since
2701 * those must be shared with peripheral code for OTG configs
2702 */
2703
2704 .start = musb_h_start,
2705 .stop = musb_h_stop,
2706
2707 .get_frame_number = musb_h_get_frame_number,
2708
2709 .urb_enqueue = musb_urb_enqueue,
2710 .urb_dequeue = musb_urb_dequeue,
2711 .endpoint_disable = musb_h_disable,
2712
Ruslan Bilovol8408fd12013-03-29 19:15:21 +02002713#ifndef CONFIG_MUSB_PIO_ONLY
2714 .map_urb_for_dma = musb_map_urb_for_dma,
2715 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2716#endif
2717
Felipe Balbi550a7372008-07-24 12:27:36 +03002718 .hub_status_data = musb_hub_status_data,
2719 .hub_control = musb_hub_control,
2720 .bus_suspend = musb_bus_suspend,
2721 .bus_resume = musb_bus_resume,
2722 /* .start_port_reset = NULL, */
2723 /* .hub_irq_enable = NULL, */
2724};
Daniel Mack0b3eba42013-04-10 21:55:42 +02002725
Daniel Mack74c2e932013-04-10 21:55:45 +02002726int musb_host_alloc(struct musb *musb)
2727{
2728 struct device *dev = musb->controller;
2729
2730 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2731 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2732 if (!musb->hcd)
2733 return -EINVAL;
2734
2735 *musb->hcd->hcd_priv = (unsigned long) musb;
2736 musb->hcd->self.uses_pio_for_control = 1;
2737 musb->hcd->uses_new_polling = 1;
2738 musb->hcd->has_tt = 1;
2739
2740 return 0;
2741}
2742
2743void musb_host_cleanup(struct musb *musb)
2744{
Sebastian Andrzej Siewior90474282013-08-20 18:35:44 +02002745 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2746 return;
Daniel Mack74c2e932013-04-10 21:55:45 +02002747 usb_remove_hcd(musb->hcd);
Daniel Mack74c2e932013-04-10 21:55:45 +02002748}
2749
2750void musb_host_free(struct musb *musb)
2751{
2752 usb_put_hcd(musb->hcd);
2753}
2754
Daniel Mack2cc65fe2013-04-10 21:55:47 +02002755int musb_host_setup(struct musb *musb, int power_budget)
2756{
2757 int ret;
2758 struct usb_hcd *hcd = musb->hcd;
2759
Tony Lindgren3c50ffe2017-05-17 11:23:10 -05002760 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
2761 MUSB_HST_MODE(musb);
2762 musb->xceiv->otg->default_a = 1;
2763 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2764 }
Daniel Mack2cc65fe2013-04-10 21:55:47 +02002765 otg_set_host(musb->xceiv->otg, &hcd->self);
2766 hcd->self.otg_port = 1;
2767 musb->xceiv->otg->host = &hcd->self;
2768 hcd->power_budget = 2 * (power_budget ? : 250);
2769
2770 ret = usb_add_hcd(hcd, 0, 0);
2771 if (ret < 0)
2772 return ret;
2773
Peter Chen3c9740a2013-11-05 10:46:02 +08002774 device_wakeup_enable(hcd->self.controller);
Daniel Mack2cc65fe2013-04-10 21:55:47 +02002775 return 0;
2776}
2777
Daniel Mack0b3eba42013-04-10 21:55:42 +02002778void musb_host_resume_root_hub(struct musb *musb)
2779{
Daniel Mack74c2e932013-04-10 21:55:45 +02002780 usb_hcd_resume_root_hub(musb->hcd);
Daniel Mack0b3eba42013-04-10 21:55:42 +02002781}
2782
2783void musb_host_poke_root_hub(struct musb *musb)
2784{
2785 MUSB_HST_MODE(musb);
Daniel Mack74c2e932013-04-10 21:55:45 +02002786 if (musb->hcd->status_urb)
2787 usb_hcd_poll_rh_status(musb->hcd);
Daniel Mack0b3eba42013-04-10 21:55:42 +02002788 else
Daniel Mack74c2e932013-04-10 21:55:45 +02002789 usb_hcd_resume_root_hub(musb->hcd);
Daniel Mack0b3eba42013-04-10 21:55:42 +02002790}