blob: 9e18178f1d45e2d94fc3550f93cd7a833d8592d6 [file] [log] [blame]
Felipe Balbi550a7372008-07-24 12:27:36 +03001/*
2 * MUSB OTG driver peripheral support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
Sergei Shtylyovcea83242009-11-18 22:51:18 +03007 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
Felipe Balbi550a7372008-07-24 12:27:36 +03008 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/list.h>
38#include <linux/timer.h>
39#include <linux/module.h>
40#include <linux/smp.h>
41#include <linux/spinlock.h>
42#include <linux/delay.h>
Felipe Balbi550a7372008-07-24 12:27:36 +030043#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Felipe Balbi550a7372008-07-24 12:27:36 +030045
46#include "musb_core.h"
47
48
Felipe Balbi550a7372008-07-24 12:27:36 +030049/* ----------------------------------------------------------------------- */
50
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +010051#define is_buffer_mapped(req) (is_dma_capable() && \
52 (req->map_state != UN_MAPPED))
53
Hema Kalliguddi92d27112010-11-15 04:24:01 -060054/* Maps the buffer to dma */
55
56static inline void map_dma_buffer(struct musb_request *request,
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +010057 struct musb *musb, struct musb_ep *musb_ep)
Hema Kalliguddi92d27112010-11-15 04:24:01 -060058{
Mian Yousaf Kaukab5f5761c2011-01-04 12:47:03 +010059 int compatible = true;
60 struct dma_controller *dma = musb->dma_controller;
61
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +010062 request->map_state = UN_MAPPED;
63
64 if (!is_dma_capable() || !musb_ep->dma)
65 return;
66
Mian Yousaf Kaukab5f5761c2011-01-04 12:47:03 +010067 /* Check if DMA engine can handle this request.
68 * DMA code must reject the USB request explicitly.
69 * Default behaviour is to map the request.
70 */
71 if (dma->is_compatible)
72 compatible = dma->is_compatible(musb_ep->dma,
73 musb_ep->packet_sz, request->request.buf,
74 request->request.length);
75 if (!compatible)
76 return;
77
Hema Kalliguddi92d27112010-11-15 04:24:01 -060078 if (request->request.dma == DMA_ADDR_INVALID) {
Sebastian Andrzej Siewior7b360f42013-08-13 19:35:43 +020079 dma_addr_t dma_addr;
80 int ret;
81
82 dma_addr = dma_map_single(
Hema Kalliguddi92d27112010-11-15 04:24:01 -060083 musb->controller,
84 request->request.buf,
85 request->request.length,
86 request->tx
87 ? DMA_TO_DEVICE
88 : DMA_FROM_DEVICE);
Sebastian Andrzej Siewior7b360f42013-08-13 19:35:43 +020089 ret = dma_mapping_error(musb->controller, dma_addr);
90 if (ret)
91 return;
92
93 request->request.dma = dma_addr;
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +010094 request->map_state = MUSB_MAPPED;
Hema Kalliguddi92d27112010-11-15 04:24:01 -060095 } else {
96 dma_sync_single_for_device(musb->controller,
97 request->request.dma,
98 request->request.length,
99 request->tx
100 ? DMA_TO_DEVICE
101 : DMA_FROM_DEVICE);
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100102 request->map_state = PRE_MAPPED;
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600103 }
104}
105
106/* Unmap the buffer from dma and maps it back to cpu */
107static inline void unmap_dma_buffer(struct musb_request *request,
108 struct musb *musb)
109{
Kishon Vijay Abraham I06d9db72013-03-15 18:58:50 +0530110 struct musb_ep *musb_ep = request->ep;
111
112 if (!is_buffer_mapped(request) || !musb_ep->dma)
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100113 return;
114
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600115 if (request->request.dma == DMA_ADDR_INVALID) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300116 dev_vdbg(musb->controller,
117 "not unmapping a never mapped buffer\n");
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600118 return;
119 }
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100120 if (request->map_state == MUSB_MAPPED) {
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600121 dma_unmap_single(musb->controller,
122 request->request.dma,
123 request->request.length,
124 request->tx
125 ? DMA_TO_DEVICE
126 : DMA_FROM_DEVICE);
127 request->request.dma = DMA_ADDR_INVALID;
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100128 } else { /* PRE_MAPPED */
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600129 dma_sync_single_for_cpu(musb->controller,
130 request->request.dma,
131 request->request.length,
132 request->tx
133 ? DMA_TO_DEVICE
134 : DMA_FROM_DEVICE);
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600135 }
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100136 request->map_state = UN_MAPPED;
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600137}
138
Felipe Balbi550a7372008-07-24 12:27:36 +0300139/*
140 * Immediately complete a request.
141 *
142 * @param request the request to complete
143 * @param status the status to complete the request with
144 * Context: controller locked, IRQs blocked.
145 */
146void musb_g_giveback(
147 struct musb_ep *ep,
148 struct usb_request *request,
149 int status)
150__releases(ep->musb->lock)
151__acquires(ep->musb->lock)
152{
153 struct musb_request *req;
154 struct musb *musb;
155 int busy = ep->busy;
156
157 req = to_musb_request(request);
158
Felipe Balbiad1adb82011-02-16 12:40:05 +0200159 list_del(&req->list);
Felipe Balbi550a7372008-07-24 12:27:36 +0300160 if (req->request.status == -EINPROGRESS)
161 req->request.status = status;
162 musb = req->musb;
163
164 ep->busy = 1;
165 spin_unlock(&musb->lock);
Kishon Vijay Abraham I06d9db72013-03-15 18:58:50 +0530166
167 if (!dma_mapping_error(&musb->g.dev, request->dma))
168 unmap_dma_buffer(req, musb);
169
Felipe Balbi550a7372008-07-24 12:27:36 +0300170 if (request->status == 0)
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300171 dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300172 ep->end_point.name, request,
173 req->request.actual, req->request.length);
174 else
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300175 dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300176 ep->end_point.name, request,
177 req->request.actual, req->request.length,
178 request->status);
Michal Sojka304f7e52014-09-24 22:43:19 +0200179 usb_gadget_giveback_request(&req->ep->end_point, &req->request);
Felipe Balbi550a7372008-07-24 12:27:36 +0300180 spin_lock(&musb->lock);
181 ep->busy = busy;
182}
183
184/* ----------------------------------------------------------------------- */
185
186/*
187 * Abort requests queued to an endpoint using the status. Synchronous.
188 * caller locked controller and blocked irqs, and selected this ep.
189 */
190static void nuke(struct musb_ep *ep, const int status)
191{
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300192 struct musb *musb = ep->musb;
Felipe Balbi550a7372008-07-24 12:27:36 +0300193 struct musb_request *req = NULL;
194 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
195
196 ep->busy = 1;
197
198 if (is_dma_capable() && ep->dma) {
199 struct dma_controller *c = ep->musb->dma_controller;
200 int value;
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700201
Felipe Balbi550a7372008-07-24 12:27:36 +0300202 if (ep->is_in) {
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700203 /*
204 * The programming guide says that we must not clear
205 * the DMAMODE bit before DMAENAB, so we only
206 * clear it in the second write...
207 */
Felipe Balbi550a7372008-07-24 12:27:36 +0300208 musb_writew(epio, MUSB_TXCSR,
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700209 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
Felipe Balbi550a7372008-07-24 12:27:36 +0300210 musb_writew(epio, MUSB_TXCSR,
211 0 | MUSB_TXCSR_FLUSHFIFO);
212 } else {
213 musb_writew(epio, MUSB_RXCSR,
214 0 | MUSB_RXCSR_FLUSHFIFO);
215 musb_writew(epio, MUSB_RXCSR,
216 0 | MUSB_RXCSR_FLUSHFIFO);
217 }
218
219 value = c->channel_abort(ep->dma);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300220 dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
221 ep->name, value);
Felipe Balbi550a7372008-07-24 12:27:36 +0300222 c->channel_release(ep->dma);
223 ep->dma = NULL;
224 }
225
Felipe Balbiad1adb82011-02-16 12:40:05 +0200226 while (!list_empty(&ep->req_list)) {
227 req = list_first_entry(&ep->req_list, struct musb_request, list);
Felipe Balbi550a7372008-07-24 12:27:36 +0300228 musb_g_giveback(ep, &req->request, status);
229 }
230}
231
232/* ----------------------------------------------------------------------- */
233
234/* Data transfers - pure PIO, pure DMA, or mixed mode */
235
236/*
237 * This assumes the separate CPPI engine is responding to DMA requests
238 * from the usb core ... sequenced a bit differently from mentor dma.
239 */
240
241static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
242{
243 if (can_bulk_split(musb, ep->type))
244 return ep->hw_ep->max_packet_sz_tx;
245 else
246 return ep->packet_sz;
247}
248
Felipe Balbi550a7372008-07-24 12:27:36 +0300249/*
250 * An endpoint is transmitting data. This can be called either from
251 * the IRQ routine or from ep.queue() to kickstart a request on an
252 * endpoint.
253 *
254 * Context: controller locked, IRQs blocked, endpoint selected
255 */
256static void txstate(struct musb *musb, struct musb_request *req)
257{
258 u8 epnum = req->epnum;
259 struct musb_ep *musb_ep;
260 void __iomem *epio = musb->endpoints[epnum].regs;
261 struct usb_request *request;
262 u16 fifo_count = 0, csr;
263 int use_dma = 0;
264
265 musb_ep = req->ep;
266
Vikram Panditaabf710e2012-05-18 13:48:04 -0700267 /* Check if EP is disabled */
268 if (!musb_ep->desc) {
269 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
270 musb_ep->end_point.name);
271 return;
272 }
273
Felipe Balbi550a7372008-07-24 12:27:36 +0300274 /* we shouldn't get here while DMA is active ... but we do ... */
275 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300276 dev_dbg(musb->controller, "dma pending...\n");
Felipe Balbi550a7372008-07-24 12:27:36 +0300277 return;
278 }
279
280 /* read TXCSR before */
281 csr = musb_readw(epio, MUSB_TXCSR);
282
283 request = &req->request;
284 fifo_count = min(max_ep_writesize(musb, musb_ep),
285 (int)(request->length - request->actual));
286
287 if (csr & MUSB_TXCSR_TXPKTRDY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300288 dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300289 musb_ep->end_point.name, csr);
290 return;
291 }
292
293 if (csr & MUSB_TXCSR_P_SENDSTALL) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300294 dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300295 musb_ep->end_point.name, csr);
296 return;
297 }
298
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300299 dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300300 epnum, musb_ep->packet_sz, fifo_count,
301 csr);
302
303#ifndef CONFIG_MUSB_PIO_ONLY
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100304 if (is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300305 struct dma_controller *c = musb->dma_controller;
Ming Lei66af83d2010-09-20 10:32:06 +0300306 size_t request_size;
307
308 /* setup DMA, then program endpoint CSR */
309 request_size = min_t(size_t, request->length - request->actual,
310 musb_ep->dma->max_len);
Felipe Balbi550a7372008-07-24 12:27:36 +0300311
Ajay Kumar Guptad17d5352012-07-20 11:07:23 +0530312 use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
Felipe Balbi550a7372008-07-24 12:27:36 +0300313
314 /* MUSB_TXCSR_P_ISO is still set correctly */
315
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100316#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
Felipe Balbi550a7372008-07-24 12:27:36 +0300317 {
Anand Gadiyard1043a22009-04-02 12:07:08 -0700318 if (request_size < musb_ep->packet_sz)
Felipe Balbi550a7372008-07-24 12:27:36 +0300319 musb_ep->dma->desired_mode = 0;
320 else
321 musb_ep->dma->desired_mode = 1;
322
323 use_dma = use_dma && c->channel_program(
324 musb_ep->dma, musb_ep->packet_sz,
325 musb_ep->dma->desired_mode,
Cliff Cai796a83f2009-12-21 21:18:02 -0500326 request->dma + request->actual, request_size);
Felipe Balbi550a7372008-07-24 12:27:36 +0300327 if (use_dma) {
328 if (musb_ep->dma->desired_mode == 0) {
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700329 /*
330 * We must not clear the DMAMODE bit
331 * before the DMAENAB bit -- and the
332 * latter doesn't always get cleared
333 * before we get here...
334 */
335 csr &= ~(MUSB_TXCSR_AUTOSET
336 | MUSB_TXCSR_DMAENAB);
337 musb_writew(epio, MUSB_TXCSR, csr
338 | MUSB_TXCSR_P_WZC_BITS);
339 csr &= ~MUSB_TXCSR_DMAMODE;
Felipe Balbi550a7372008-07-24 12:27:36 +0300340 csr |= (MUSB_TXCSR_DMAENAB |
341 MUSB_TXCSR_MODE);
342 /* against programming guide */
Ming Leif11d8932010-09-24 13:44:04 +0300343 } else {
344 csr |= (MUSB_TXCSR_DMAENAB
Felipe Balbi550a7372008-07-24 12:27:36 +0300345 | MUSB_TXCSR_DMAMODE
346 | MUSB_TXCSR_MODE);
supriya karanthbb3a2ef2012-12-06 11:12:48 +0530347 /*
348 * Enable Autoset according to table
349 * below
350 * bulk_split hb_mult Autoset_Enable
351 * 0 0 Yes(Normal)
352 * 0 >0 No(High BW ISO)
353 * 1 0 Yes(HS bulk)
354 * 1 >0 Yes(FS bulk)
355 */
356 if (!musb_ep->hb_mult ||
357 (musb_ep->hb_mult &&
358 can_bulk_split(musb,
359 musb_ep->type)))
Ming Leif11d8932010-09-24 13:44:04 +0300360 csr |= MUSB_TXCSR_AUTOSET;
361 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300362 csr &= ~MUSB_TXCSR_P_UNDERRUN;
Ming Leif11d8932010-09-24 13:44:04 +0300363
Felipe Balbi550a7372008-07-24 12:27:36 +0300364 musb_writew(epio, MUSB_TXCSR, csr);
365 }
366 }
367
Felipe Balbi550a7372008-07-24 12:27:36 +0300368#endif
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -0700369 if (is_cppi_enabled(musb)) {
Sebastian Andrzej Siewiorfc525752013-08-13 19:38:23 +0200370 /* program endpoint CSR first, then setup DMA */
371 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
372 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
373 MUSB_TXCSR_MODE;
374 musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
375 ~MUSB_TXCSR_P_UNDERRUN) | csr);
376
377 /* ensure writebuffer is empty */
378 csr = musb_readw(epio, MUSB_TXCSR);
379
380 /*
381 * NOTE host side sets DMAENAB later than this; both are
382 * OK since the transfer dma glue (between CPPI and
383 * Mentor fifos) just tells CPPI it could start. Data
384 * only moves to the USB TX fifo when both fifos are
385 * ready.
386 */
387 /*
388 * "mode" is irrelevant here; handle terminating ZLPs
389 * like PIO does, since the hardware RNDIS mode seems
390 * unreliable except for the
391 * last-packet-is-already-short case.
392 */
393 use_dma = use_dma && c->channel_program(
394 musb_ep->dma, musb_ep->packet_sz,
395 0,
396 request->dma + request->actual,
397 request_size);
398 if (!use_dma) {
399 c->channel_release(musb_ep->dma);
400 musb_ep->dma = NULL;
401 csr &= ~MUSB_TXCSR_DMAENAB;
402 musb_writew(epio, MUSB_TXCSR, csr);
403 /* invariant: prequest->buf is non-null */
404 }
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -0700405 } else if (tusb_dma_omap(musb))
Sebastian Andrzej Siewiorfc525752013-08-13 19:38:23 +0200406 use_dma = use_dma && c->channel_program(
407 musb_ep->dma, musb_ep->packet_sz,
408 request->zero,
409 request->dma + request->actual,
410 request_size);
Felipe Balbi550a7372008-07-24 12:27:36 +0300411 }
412#endif
413
414 if (!use_dma) {
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600415 /*
416 * Unmap the dma buffer back to cpu if dma channel
417 * programming fails
418 */
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100419 unmap_dma_buffer(req, musb);
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600420
Felipe Balbi550a7372008-07-24 12:27:36 +0300421 musb_write_fifo(musb_ep->hw_ep, fifo_count,
422 (u8 *) (request->buf + request->actual));
423 request->actual += fifo_count;
424 csr |= MUSB_TXCSR_TXPKTRDY;
425 csr &= ~MUSB_TXCSR_P_UNDERRUN;
426 musb_writew(epio, MUSB_TXCSR, csr);
427 }
428
429 /* host may already have the data when this message shows... */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300430 dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300431 musb_ep->end_point.name, use_dma ? "dma" : "pio",
432 request->actual, request->length,
433 musb_readw(epio, MUSB_TXCSR),
434 fifo_count,
435 musb_readw(epio, MUSB_TXMAXP));
436}
437
438/*
439 * FIFO state update (e.g. data ready).
440 * Called from IRQ, with controller locked.
441 */
442void musb_g_tx(struct musb *musb, u8 epnum)
443{
444 u16 csr;
Felipe Balbiad1adb82011-02-16 12:40:05 +0200445 struct musb_request *req;
Felipe Balbi550a7372008-07-24 12:27:36 +0300446 struct usb_request *request;
447 u8 __iomem *mbase = musb->mregs;
448 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
449 void __iomem *epio = musb->endpoints[epnum].regs;
450 struct dma_channel *dma;
451
452 musb_ep_select(mbase, epnum);
Felipe Balbiad1adb82011-02-16 12:40:05 +0200453 req = next_request(musb_ep);
454 request = &req->request;
Felipe Balbi550a7372008-07-24 12:27:36 +0300455
456 csr = musb_readw(epio, MUSB_TXCSR);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300457 dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
Felipe Balbi550a7372008-07-24 12:27:36 +0300458
459 dma = is_dma_capable() ? musb_ep->dma : NULL;
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300460
461 /*
462 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
463 * probably rates reporting as a host error.
464 */
465 if (csr & MUSB_TXCSR_P_SENTSTALL) {
466 csr |= MUSB_TXCSR_P_WZC_BITS;
467 csr &= ~MUSB_TXCSR_P_SENTSTALL;
468 musb_writew(epio, MUSB_TXCSR, csr);
469 return;
470 }
471
472 if (csr & MUSB_TXCSR_P_UNDERRUN) {
473 /* We NAKed, no big deal... little reason to care. */
474 csr |= MUSB_TXCSR_P_WZC_BITS;
475 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
476 musb_writew(epio, MUSB_TXCSR, csr);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300477 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
478 epnum, request);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300479 }
480
481 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
482 /*
483 * SHOULD NOT HAPPEN... has with CPPI though, after
484 * changing SENDSTALL (and other cases); harmless?
Felipe Balbi550a7372008-07-24 12:27:36 +0300485 */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300486 dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300487 return;
488 }
489
490 if (request) {
491 u8 is_dma = 0;
Tony Lindgrenfb91cdd2015-05-01 12:29:30 -0700492 bool short_packet = false;
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300493
494 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
495 is_dma = 1;
Felipe Balbi550a7372008-07-24 12:27:36 +0300496 csr |= MUSB_TXCSR_P_WZC_BITS;
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300497 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
Mian Yousaf Kaukab100d4a92011-03-15 16:24:24 +0100498 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
Felipe Balbi550a7372008-07-24 12:27:36 +0300499 musb_writew(epio, MUSB_TXCSR, csr);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300500 /* Ensure writebuffer is empty. */
501 csr = musb_readw(epio, MUSB_TXCSR);
502 request->actual += musb_ep->dma->actual_len;
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300503 dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300504 epnum, csr, musb_ep->dma->actual_len, request);
Felipe Balbi550a7372008-07-24 12:27:36 +0300505 }
506
Ming Leie7379aa2010-09-24 13:44:14 +0300507 /*
508 * First, maybe a terminating short packet. Some DMA
509 * engines might handle this by themselves.
510 */
Tony Lindgrenfb91cdd2015-05-01 12:29:30 -0700511 if ((request->zero && request->length)
Ming Leie7379aa2010-09-24 13:44:14 +0300512 && (request->length % musb_ep->packet_sz == 0)
513 && (request->actual == request->length))
Tony Lindgrenfb91cdd2015-05-01 12:29:30 -0700514 short_packet = true;
515
516 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
517 (is_dma && (!dma->desired_mode ||
Ming Leie7379aa2010-09-24 13:44:14 +0300518 (request->actual &
Tony Lindgrenfb91cdd2015-05-01 12:29:30 -0700519 (musb_ep->packet_sz - 1)))))
520 short_packet = true;
521
522 if (short_packet) {
Ming Leie7379aa2010-09-24 13:44:14 +0300523 /*
524 * On DMA completion, FIFO may not be
525 * available yet...
526 */
527 if (csr & MUSB_TXCSR_TXPKTRDY)
528 return;
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300529
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300530 dev_dbg(musb->controller, "sending zero pkt\n");
Ming Leie7379aa2010-09-24 13:44:14 +0300531 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
532 | MUSB_TXCSR_TXPKTRDY);
533 request->zero = 0;
534 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300535
Ming Leie7379aa2010-09-24 13:44:14 +0300536 if (request->actual == request->length) {
537 musb_g_giveback(musb_ep, request, 0);
Supriya Karanth39287072012-02-17 14:54:52 +0530538 /*
539 * In the giveback function the MUSB lock is
540 * released and acquired after sometime. During
541 * this time period the INDEX register could get
542 * changed by the gadget_queue function especially
543 * on SMP systems. Reselect the INDEX to be sure
544 * we are reading/modifying the right registers
545 */
546 musb_ep_select(mbase, epnum);
Felipe Balbiad1adb82011-02-16 12:40:05 +0200547 req = musb_ep->desc ? next_request(musb_ep) : NULL;
548 if (!req) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300549 dev_dbg(musb->controller, "%s idle now\n",
Ming Leie7379aa2010-09-24 13:44:14 +0300550 musb_ep->end_point.name);
551 return;
Sergei Shtylyov95962a72009-12-16 20:38:31 +0300552 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300553 }
554
Felipe Balbiad1adb82011-02-16 12:40:05 +0200555 txstate(musb, req);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300556 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300557}
558
559/* ------------------------------------------------------------ */
560
Felipe Balbi550a7372008-07-24 12:27:36 +0300561/*
562 * Context: controller locked, IRQs blocked, endpoint selected
563 */
564static void rxstate(struct musb *musb, struct musb_request *req)
565{
Felipe Balbi550a7372008-07-24 12:27:36 +0300566 const u8 epnum = req->epnum;
567 struct usb_request *request = &req->request;
Ming Leibd2e74d2010-09-20 10:32:01 +0300568 struct musb_ep *musb_ep;
Felipe Balbi550a7372008-07-24 12:27:36 +0300569 void __iomem *epio = musb->endpoints[epnum].regs;
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400570 unsigned len = 0;
571 u16 fifo_count;
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300572 u16 csr = musb_readw(epio, MUSB_RXCSR);
Ming Leibd2e74d2010-09-20 10:32:01 +0300573 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700574 u8 use_mode_1;
Ming Leibd2e74d2010-09-20 10:32:01 +0300575
576 if (hw_ep->is_shared_fifo)
577 musb_ep = &hw_ep->ep_in;
578 else
579 musb_ep = &hw_ep->ep_out;
580
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400581 fifo_count = musb_ep->packet_sz;
Felipe Balbi550a7372008-07-24 12:27:36 +0300582
Vikram Panditaabf710e2012-05-18 13:48:04 -0700583 /* Check if EP is disabled */
584 if (!musb_ep->desc) {
585 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
586 musb_ep->end_point.name);
587 return;
588 }
589
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300590 /* We shouldn't get here while DMA is active, but we do... */
591 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300592 dev_dbg(musb->controller, "DMA pending...\n");
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300593 return;
594 }
595
596 if (csr & MUSB_RXCSR_P_SENDSTALL) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300597 dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300598 musb_ep->end_point.name, csr);
599 return;
600 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300601
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -0700602 if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300603 struct dma_controller *c = musb->dma_controller;
604 struct dma_channel *channel = musb_ep->dma;
605
606 /* NOTE: CPPI won't actually stop advancing the DMA
607 * queue after short packet transfers, so this is almost
608 * always going to run as IRQ-per-packet DMA so that
609 * faults will be handled correctly.
610 */
611 if (c->channel_program(channel,
612 musb_ep->packet_sz,
613 !request->short_not_ok,
614 request->dma + request->actual,
615 request->length - request->actual)) {
616
617 /* make sure that if an rxpkt arrived after the irq,
618 * the cppi engine will be ready to take it as soon
619 * as DMA is enabled
620 */
621 csr &= ~(MUSB_RXCSR_AUTOCLEAR
622 | MUSB_RXCSR_DMAMODE);
623 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
624 musb_writew(epio, MUSB_RXCSR, csr);
625 return;
626 }
627 }
628
629 if (csr & MUSB_RXCSR_RXPKTRDY) {
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400630 fifo_count = musb_readw(epio, MUSB_RXCOUNT);
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700631
632 /*
Felipe Balbi00a89182012-10-26 09:55:31 +0300633 * Enable Mode 1 on RX transfers only when short_not_ok flag
634 * is set. Currently short_not_ok flag is set only from
635 * file_storage and f_mass_storage drivers
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700636 */
Felipe Balbi00a89182012-10-26 09:55:31 +0300637
638 if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700639 use_mode_1 = 1;
640 else
641 use_mode_1 = 0;
642
Felipe Balbi550a7372008-07-24 12:27:36 +0300643 if (request->actual < request->length) {
644#ifdef CONFIG_USB_INVENTRA_DMA
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100645 if (is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300646 struct dma_controller *c;
647 struct dma_channel *channel;
648 int use_dma = 0;
Felipe Balbi37730ec2013-02-06 10:19:15 +0200649 unsigned int transfer_size;
Felipe Balbi550a7372008-07-24 12:27:36 +0300650
651 c = musb->dma_controller;
652 channel = musb_ep->dma;
653
Felipe Balbi00a89182012-10-26 09:55:31 +0300654 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
655 * mode 0 only. So we do not get endpoint interrupts due to DMA
656 * completion. We only get interrupts from DMA controller.
657 *
658 * We could operate in DMA mode 1 if we knew the size of the tranfer
659 * in advance. For mass storage class, request->length = what the host
660 * sends, so that'd work. But for pretty much everything else,
661 * request->length is routinely more than what the host sends. For
662 * most these gadgets, end of is signified either by a short packet,
663 * or filling the last byte of the buffer. (Sending extra data in
664 * that last pckate should trigger an overflow fault.) But in mode 1,
665 * we don't get DMA completion interrupt for short packets.
666 *
667 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
668 * to get endpoint interrupt on every DMA req, but that didn't seem
669 * to work reliably.
670 *
671 * REVISIT an updated g_file_storage can set req->short_not_ok, which
672 * then becomes usable as a runtime "use mode 1" hint...
673 */
674
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700675 /* Experimental: Mode1 works with mass storage use cases */
676 if (use_mode_1) {
Ming Lei9001d802010-09-25 05:50:43 -0500677 csr |= MUSB_RXCSR_AUTOCLEAR;
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700678 musb_writew(epio, MUSB_RXCSR, csr);
679 csr |= MUSB_RXCSR_DMAENAB;
680 musb_writew(epio, MUSB_RXCSR, csr);
681
682 /*
683 * this special sequence (enabling and then
684 * disabling MUSB_RXCSR_DMAMODE) is required
685 * to get DMAReq to activate
686 */
687 musb_writew(epio, MUSB_RXCSR,
688 csr | MUSB_RXCSR_DMAMODE);
689 musb_writew(epio, MUSB_RXCSR, csr);
690
Felipe Balbi37730ec2013-02-06 10:19:15 +0200691 transfer_size = min_t(unsigned int,
692 request->length -
693 request->actual,
Roger Quadros660fa882012-08-07 16:26:32 +0300694 channel->max_len);
695 musb_ep->dma->desired_mode = 1;
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700696 } else {
697 if (!musb_ep->hb_mult &&
698 musb_ep->hw_ep->rx_double_buffered)
699 csr |= MUSB_RXCSR_AUTOCLEAR;
700 csr |= MUSB_RXCSR_DMAENAB;
701 musb_writew(epio, MUSB_RXCSR, csr);
Felipe Balbi550a7372008-07-24 12:27:36 +0300702
Roger Quadros660fa882012-08-07 16:26:32 +0300703 transfer_size = min(request->length - request->actual,
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400704 (unsigned)fifo_count);
Roger Quadros660fa882012-08-07 16:26:32 +0300705 musb_ep->dma->desired_mode = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +0300706 }
707
Roger Quadros660fa882012-08-07 16:26:32 +0300708 use_dma = c->channel_program(
709 channel,
710 musb_ep->packet_sz,
711 channel->desired_mode,
712 request->dma
713 + request->actual,
714 transfer_size);
715
Felipe Balbi550a7372008-07-24 12:27:36 +0300716 if (use_dma)
717 return;
718 }
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100719#elif defined(CONFIG_USB_UX500_DMA)
720 if ((is_buffer_mapped(req)) &&
721 (request->actual < request->length)) {
722
723 struct dma_controller *c;
724 struct dma_channel *channel;
Felipe Balbi37730ec2013-02-06 10:19:15 +0200725 unsigned int transfer_size = 0;
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100726
727 c = musb->dma_controller;
728 channel = musb_ep->dma;
729
730 /* In case first packet is short */
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400731 if (fifo_count < musb_ep->packet_sz)
732 transfer_size = fifo_count;
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100733 else if (request->short_not_ok)
Felipe Balbi37730ec2013-02-06 10:19:15 +0200734 transfer_size = min_t(unsigned int,
735 request->length -
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100736 request->actual,
737 channel->max_len);
738 else
Felipe Balbi37730ec2013-02-06 10:19:15 +0200739 transfer_size = min_t(unsigned int,
740 request->length -
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100741 request->actual,
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400742 (unsigned)fifo_count);
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100743
744 csr &= ~MUSB_RXCSR_DMAMODE;
745 csr |= (MUSB_RXCSR_DMAENAB |
746 MUSB_RXCSR_AUTOCLEAR);
747
748 musb_writew(epio, MUSB_RXCSR, csr);
749
750 if (transfer_size <= musb_ep->packet_sz) {
751 musb_ep->dma->desired_mode = 0;
752 } else {
753 musb_ep->dma->desired_mode = 1;
754 /* Mode must be set after DMAENAB */
755 csr |= MUSB_RXCSR_DMAMODE;
756 musb_writew(epio, MUSB_RXCSR, csr);
757 }
758
759 if (c->channel_program(channel,
760 musb_ep->packet_sz,
761 channel->desired_mode,
762 request->dma
763 + request->actual,
764 transfer_size))
765
766 return;
767 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300768#endif /* Mentor's DMA */
769
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400770 len = request->length - request->actual;
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300771 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300772 musb_ep->end_point.name,
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400773 fifo_count, len,
Felipe Balbi550a7372008-07-24 12:27:36 +0300774 musb_ep->packet_sz);
775
Felipe Balbic2c96322009-02-21 15:29:42 -0800776 fifo_count = min_t(unsigned, len, fifo_count);
Felipe Balbi550a7372008-07-24 12:27:36 +0300777
778#ifdef CONFIG_USB_TUSB_OMAP_DMA
Tony Lindgrenf8e9f34f2015-05-01 12:29:27 -0700779 if (tusb_dma_omap(musb) && is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300780 struct dma_controller *c = musb->dma_controller;
781 struct dma_channel *channel = musb_ep->dma;
782 u32 dma_addr = request->dma + request->actual;
783 int ret;
784
785 ret = c->channel_program(channel,
786 musb_ep->packet_sz,
787 channel->desired_mode,
788 dma_addr,
789 fifo_count);
790 if (ret)
791 return;
792 }
793#endif
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600794 /*
795 * Unmap the dma buffer back to cpu if dma channel
796 * programming fails. This buffer is mapped if the
797 * channel allocation is successful
798 */
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100799 if (is_buffer_mapped(req)) {
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600800 unmap_dma_buffer(req, musb);
801
Ming Leie75df372010-11-16 23:37:37 +0800802 /*
803 * Clear DMAENAB and AUTOCLEAR for the
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600804 * PIO mode transfer
805 */
Ming Leie75df372010-11-16 23:37:37 +0800806 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600807 musb_writew(epio, MUSB_RXCSR, csr);
808 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300809
810 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
811 (request->buf + request->actual));
812 request->actual += fifo_count;
813
814 /* REVISIT if we left anything in the fifo, flush
815 * it and report -EOVERFLOW
816 */
817
818 /* ack the read! */
819 csr |= MUSB_RXCSR_P_WZC_BITS;
820 csr &= ~MUSB_RXCSR_RXPKTRDY;
821 musb_writew(epio, MUSB_RXCSR, csr);
822 }
823 }
824
825 /* reach the end or short packet detected */
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400826 if (request->actual == request->length ||
827 fifo_count < musb_ep->packet_sz)
Felipe Balbi550a7372008-07-24 12:27:36 +0300828 musb_g_giveback(musb_ep, request, 0);
829}
830
831/*
832 * Data ready for a request; called from IRQ
833 */
834void musb_g_rx(struct musb *musb, u8 epnum)
835{
836 u16 csr;
Felipe Balbiad1adb82011-02-16 12:40:05 +0200837 struct musb_request *req;
Felipe Balbi550a7372008-07-24 12:27:36 +0300838 struct usb_request *request;
839 void __iomem *mbase = musb->mregs;
Ming Leibd2e74d2010-09-20 10:32:01 +0300840 struct musb_ep *musb_ep;
Felipe Balbi550a7372008-07-24 12:27:36 +0300841 void __iomem *epio = musb->endpoints[epnum].regs;
842 struct dma_channel *dma;
Ming Leibd2e74d2010-09-20 10:32:01 +0300843 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
844
845 if (hw_ep->is_shared_fifo)
846 musb_ep = &hw_ep->ep_in;
847 else
848 musb_ep = &hw_ep->ep_out;
Felipe Balbi550a7372008-07-24 12:27:36 +0300849
850 musb_ep_select(mbase, epnum);
851
Felipe Balbiad1adb82011-02-16 12:40:05 +0200852 req = next_request(musb_ep);
853 if (!req)
Maulik Mankad0abdc362009-12-22 16:18:19 +0530854 return;
Felipe Balbi550a7372008-07-24 12:27:36 +0300855
Felipe Balbiad1adb82011-02-16 12:40:05 +0200856 request = &req->request;
857
Felipe Balbi550a7372008-07-24 12:27:36 +0300858 csr = musb_readw(epio, MUSB_RXCSR);
859 dma = is_dma_capable() ? musb_ep->dma : NULL;
860
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300861 dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
Felipe Balbi550a7372008-07-24 12:27:36 +0300862 csr, dma ? " (dma)" : "", request);
863
864 if (csr & MUSB_RXCSR_P_SENTSTALL) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300865 csr |= MUSB_RXCSR_P_WZC_BITS;
866 csr &= ~MUSB_RXCSR_P_SENTSTALL;
867 musb_writew(epio, MUSB_RXCSR, csr);
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300868 return;
Felipe Balbi550a7372008-07-24 12:27:36 +0300869 }
870
871 if (csr & MUSB_RXCSR_P_OVERRUN) {
872 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
873 csr &= ~MUSB_RXCSR_P_OVERRUN;
874 musb_writew(epio, MUSB_RXCSR, csr);
875
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300876 dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
Sergei Shtylyov43467862010-09-24 13:44:12 +0300877 if (request->status == -EINPROGRESS)
Felipe Balbi550a7372008-07-24 12:27:36 +0300878 request->status = -EOVERFLOW;
879 }
880 if (csr & MUSB_RXCSR_INCOMPRX) {
881 /* REVISIT not necessarily an error */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300882 dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
Felipe Balbi550a7372008-07-24 12:27:36 +0300883 }
884
885 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
886 /* "should not happen"; likely RXPKTRDY pending for DMA */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300887 dev_dbg(musb->controller, "%s busy, csr %04x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300888 musb_ep->end_point.name, csr);
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300889 return;
Felipe Balbi550a7372008-07-24 12:27:36 +0300890 }
891
892 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
893 csr &= ~(MUSB_RXCSR_AUTOCLEAR
894 | MUSB_RXCSR_DMAENAB
895 | MUSB_RXCSR_DMAMODE);
896 musb_writew(epio, MUSB_RXCSR,
897 MUSB_RXCSR_P_WZC_BITS | csr);
898
899 request->actual += musb_ep->dma->actual_len;
900
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300901 dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300902 epnum, csr,
903 musb_readw(epio, MUSB_RXCSR),
904 musb_ep->dma->actual_len, request);
905
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100906#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
907 defined(CONFIG_USB_UX500_DMA)
Felipe Balbi550a7372008-07-24 12:27:36 +0300908 /* Autoclear doesn't clear RxPktRdy for short packets */
Ming Lei9001d802010-09-25 05:50:43 -0500909 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
Felipe Balbi550a7372008-07-24 12:27:36 +0300910 || (dma->actual_len
911 & (musb_ep->packet_sz - 1))) {
912 /* ack the read! */
913 csr &= ~MUSB_RXCSR_RXPKTRDY;
914 musb_writew(epio, MUSB_RXCSR, csr);
915 }
916
917 /* incomplete, and not short? wait for next IN packet */
918 if ((request->actual < request->length)
919 && (musb_ep->dma->actual_len
Ming Lei9001d802010-09-25 05:50:43 -0500920 == musb_ep->packet_sz)) {
921 /* In double buffer case, continue to unload fifo if
922 * there is Rx packet in FIFO.
923 **/
924 csr = musb_readw(epio, MUSB_RXCSR);
925 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
926 hw_ep->rx_double_buffered)
927 goto exit;
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300928 return;
Ming Lei9001d802010-09-25 05:50:43 -0500929 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300930#endif
931 musb_g_giveback(musb_ep, request, 0);
Supriya Karanth39287072012-02-17 14:54:52 +0530932 /*
933 * In the giveback function the MUSB lock is
934 * released and acquired after sometime. During
935 * this time period the INDEX register could get
936 * changed by the gadget_queue function especially
937 * on SMP systems. Reselect the INDEX to be sure
938 * we are reading/modifying the right registers
939 */
940 musb_ep_select(mbase, epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +0300941
Felipe Balbiad1adb82011-02-16 12:40:05 +0200942 req = next_request(musb_ep);
943 if (!req)
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300944 return;
Felipe Balbi550a7372008-07-24 12:27:36 +0300945 }
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100946#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
947 defined(CONFIG_USB_UX500_DMA)
Ming Lei9001d802010-09-25 05:50:43 -0500948exit:
Ajay Kumar Guptabb324b02010-11-22 14:22:41 +0530949#endif
Sergei Shtylyov43467862010-09-24 13:44:12 +0300950 /* Analyze request */
Felipe Balbiad1adb82011-02-16 12:40:05 +0200951 rxstate(musb, req);
Felipe Balbi550a7372008-07-24 12:27:36 +0300952}
953
954/* ------------------------------------------------------------ */
955
956static int musb_gadget_enable(struct usb_ep *ep,
957 const struct usb_endpoint_descriptor *desc)
958{
959 unsigned long flags;
960 struct musb_ep *musb_ep;
961 struct musb_hw_ep *hw_ep;
962 void __iomem *regs;
963 struct musb *musb;
964 void __iomem *mbase;
965 u8 epnum;
966 u16 csr;
967 unsigned tmp;
968 int status = -EINVAL;
969
970 if (!ep || !desc)
971 return -EINVAL;
972
973 musb_ep = to_musb_ep(ep);
974 hw_ep = musb_ep->hw_ep;
975 regs = hw_ep->regs;
976 musb = musb_ep->musb;
977 mbase = musb->mregs;
978 epnum = musb_ep->current_epnum;
979
980 spin_lock_irqsave(&musb->lock, flags);
981
982 if (musb_ep->desc) {
983 status = -EBUSY;
984 goto fail;
985 }
Julia Lawall96bcd092009-01-24 17:57:24 -0800986 musb_ep->type = usb_endpoint_type(desc);
Felipe Balbi550a7372008-07-24 12:27:36 +0300987
988 /* check direction and (later) maxpacket size against endpoint */
Julia Lawall96bcd092009-01-24 17:57:24 -0800989 if (usb_endpoint_num(desc) != epnum)
Felipe Balbi550a7372008-07-24 12:27:36 +0300990 goto fail;
991
992 /* REVISIT this rules out high bandwidth periodic transfers */
Kuninori Morimoto29cc8892011-08-23 03:12:03 -0700993 tmp = usb_endpoint_maxp(desc);
Ming Leif11d8932010-09-24 13:44:04 +0300994 if (tmp & ~0x07ff) {
995 int ok;
996
997 if (usb_endpoint_dir_in(desc))
998 ok = musb->hb_iso_tx;
999 else
1000 ok = musb->hb_iso_rx;
1001
1002 if (!ok) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001003 dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
Ming Leif11d8932010-09-24 13:44:04 +03001004 goto fail;
1005 }
1006 musb_ep->hb_mult = (tmp >> 11) & 3;
1007 } else {
1008 musb_ep->hb_mult = 0;
1009 }
1010
1011 musb_ep->packet_sz = tmp & 0x7ff;
1012 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
Felipe Balbi550a7372008-07-24 12:27:36 +03001013
1014 /* enable the interrupts for the endpoint, set the endpoint
1015 * packet size (or fail), set the mode, clear the fifo
1016 */
1017 musb_ep_select(mbase, epnum);
Julia Lawall96bcd092009-01-24 17:57:24 -08001018 if (usb_endpoint_dir_in(desc)) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001019
1020 if (hw_ep->is_shared_fifo)
1021 musb_ep->is_in = 1;
1022 if (!musb_ep->is_in)
1023 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001024
1025 if (tmp > hw_ep->max_packet_sz_tx) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001026 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001027 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001028 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001029
Sebastian Andrzej Siewiorb18d26f2012-10-30 19:52:26 +01001030 musb->intrtxe |= (1 << epnum);
1031 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
Felipe Balbi550a7372008-07-24 12:27:36 +03001032
1033 /* REVISIT if can_bulk_split(), use by updating "tmp";
1034 * likewise high bandwidth periodic tx
1035 */
Cliff Cai9f445cb2010-01-28 20:44:18 -05001036 /* Set TXMAXP with the FIFO size of the endpoint
Ming Lei31c99092010-10-19 19:08:25 -05001037 * to disable double buffering mode.
Cliff Cai9f445cb2010-01-28 20:44:18 -05001038 */
supriya karanthbb3a2ef2012-12-06 11:12:48 +05301039 if (musb->double_buffer_not_ok) {
Felipe Balbi06624812011-01-21 13:39:20 +08001040 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
supriya karanthbb3a2ef2012-12-06 11:12:48 +05301041 } else {
1042 if (can_bulk_split(musb, musb_ep->type))
1043 musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
1044 musb_ep->packet_sz) - 1;
Felipe Balbi06624812011-01-21 13:39:20 +08001045 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1046 | (musb_ep->hb_mult << 11));
supriya karanthbb3a2ef2012-12-06 11:12:48 +05301047 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001048
1049 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1050 if (musb_readw(regs, MUSB_TXCSR)
1051 & MUSB_TXCSR_FIFONOTEMPTY)
1052 csr |= MUSB_TXCSR_FLUSHFIFO;
1053 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1054 csr |= MUSB_TXCSR_P_ISO;
1055
1056 /* set twice in case of double buffering */
1057 musb_writew(regs, MUSB_TXCSR, csr);
1058 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1059 musb_writew(regs, MUSB_TXCSR, csr);
1060
1061 } else {
Felipe Balbi550a7372008-07-24 12:27:36 +03001062
1063 if (hw_ep->is_shared_fifo)
1064 musb_ep->is_in = 0;
1065 if (musb_ep->is_in)
1066 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001067
1068 if (tmp > hw_ep->max_packet_sz_rx) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001069 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001070 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001071 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001072
Sebastian Andrzej Siewioraf5ec142012-10-30 19:52:25 +01001073 musb->intrrxe |= (1 << epnum);
1074 musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
Felipe Balbi550a7372008-07-24 12:27:36 +03001075
1076 /* REVISIT if can_bulk_combine() use by updating "tmp"
1077 * likewise high bandwidth periodic rx
1078 */
Cliff Cai9f445cb2010-01-28 20:44:18 -05001079 /* Set RXMAXP with the FIFO size of the endpoint
1080 * to disable double buffering mode.
1081 */
Felipe Balbi06624812011-01-21 13:39:20 +08001082 if (musb->double_buffer_not_ok)
1083 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1084 else
1085 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1086 | (musb_ep->hb_mult << 11));
Felipe Balbi550a7372008-07-24 12:27:36 +03001087
1088 /* force shared fifo to OUT-only mode */
1089 if (hw_ep->is_shared_fifo) {
1090 csr = musb_readw(regs, MUSB_TXCSR);
1091 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1092 musb_writew(regs, MUSB_TXCSR, csr);
1093 }
1094
1095 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1096 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1097 csr |= MUSB_RXCSR_P_ISO;
1098 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1099 csr |= MUSB_RXCSR_DISNYET;
1100
1101 /* set twice in case of double buffering */
1102 musb_writew(regs, MUSB_RXCSR, csr);
1103 musb_writew(regs, MUSB_RXCSR, csr);
1104 }
1105
1106 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1107 * for some reason you run out of channels here.
1108 */
1109 if (is_dma_capable() && musb->dma_controller) {
1110 struct dma_controller *c = musb->dma_controller;
1111
1112 musb_ep->dma = c->channel_alloc(c, hw_ep,
1113 (desc->bEndpointAddress & USB_DIR_IN));
1114 } else
1115 musb_ep->dma = NULL;
1116
1117 musb_ep->desc = desc;
1118 musb_ep->busy = 0;
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001119 musb_ep->wedged = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001120 status = 0;
1121
1122 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1123 musb_driver_name, musb_ep->end_point.name,
1124 ({ char *s; switch (musb_ep->type) {
1125 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1126 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1127 default: s = "iso"; break;
Joe Perches2b84f922013-10-08 16:01:37 -07001128 } s; }),
Felipe Balbi550a7372008-07-24 12:27:36 +03001129 musb_ep->is_in ? "IN" : "OUT",
1130 musb_ep->dma ? "dma, " : "",
1131 musb_ep->packet_sz);
1132
1133 schedule_work(&musb->irq_work);
1134
1135fail:
1136 spin_unlock_irqrestore(&musb->lock, flags);
1137 return status;
1138}
1139
1140/*
1141 * Disable an endpoint flushing all requests queued.
1142 */
1143static int musb_gadget_disable(struct usb_ep *ep)
1144{
1145 unsigned long flags;
1146 struct musb *musb;
1147 u8 epnum;
1148 struct musb_ep *musb_ep;
1149 void __iomem *epio;
1150 int status = 0;
1151
1152 musb_ep = to_musb_ep(ep);
1153 musb = musb_ep->musb;
1154 epnum = musb_ep->current_epnum;
1155 epio = musb->endpoints[epnum].regs;
1156
1157 spin_lock_irqsave(&musb->lock, flags);
1158 musb_ep_select(musb->mregs, epnum);
1159
1160 /* zero the endpoint sizes */
1161 if (musb_ep->is_in) {
Sebastian Andrzej Siewiorb18d26f2012-10-30 19:52:26 +01001162 musb->intrtxe &= ~(1 << epnum);
1163 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
Felipe Balbi550a7372008-07-24 12:27:36 +03001164 musb_writew(epio, MUSB_TXMAXP, 0);
1165 } else {
Sebastian Andrzej Siewioraf5ec142012-10-30 19:52:25 +01001166 musb->intrrxe &= ~(1 << epnum);
1167 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
Felipe Balbi550a7372008-07-24 12:27:36 +03001168 musb_writew(epio, MUSB_RXMAXP, 0);
1169 }
1170
1171 musb_ep->desc = NULL;
Grazvydas Ignotas08f75bf2012-05-26 00:21:33 +03001172 musb_ep->end_point.desc = NULL;
Felipe Balbi550a7372008-07-24 12:27:36 +03001173
1174 /* abort all pending DMA and requests */
1175 nuke(musb_ep, -ESHUTDOWN);
1176
1177 schedule_work(&musb->irq_work);
1178
1179 spin_unlock_irqrestore(&(musb->lock), flags);
1180
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001181 dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
Felipe Balbi550a7372008-07-24 12:27:36 +03001182
1183 return status;
1184}
1185
1186/*
1187 * Allocate a request for an endpoint.
1188 * Reused by ep0 code.
1189 */
1190struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1191{
1192 struct musb_ep *musb_ep = to_musb_ep(ep);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001193 struct musb *musb = musb_ep->musb;
Felipe Balbi550a7372008-07-24 12:27:36 +03001194 struct musb_request *request = NULL;
1195
1196 request = kzalloc(sizeof *request, gfp_flags);
Felipe Balbi0607f862010-12-01 11:03:54 +02001197 if (!request) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001198 dev_dbg(musb->controller, "not enough memory\n");
Felipe Balbi0607f862010-12-01 11:03:54 +02001199 return NULL;
Felipe Balbi550a7372008-07-24 12:27:36 +03001200 }
1201
Felipe Balbi0607f862010-12-01 11:03:54 +02001202 request->request.dma = DMA_ADDR_INVALID;
1203 request->epnum = musb_ep->current_epnum;
1204 request->ep = musb_ep;
1205
Felipe Balbi550a7372008-07-24 12:27:36 +03001206 return &request->request;
1207}
1208
1209/*
1210 * Free a request
1211 * Reused by ep0 code.
1212 */
1213void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1214{
1215 kfree(to_musb_request(req));
1216}
1217
1218static LIST_HEAD(buffers);
1219
1220struct free_record {
1221 struct list_head list;
1222 struct device *dev;
1223 unsigned bytes;
1224 dma_addr_t dma;
1225};
1226
1227/*
1228 * Context: controller locked, IRQs blocked.
1229 */
Sergei Shtylyova666e3e2010-09-11 13:23:12 -05001230void musb_ep_restart(struct musb *musb, struct musb_request *req)
Felipe Balbi550a7372008-07-24 12:27:36 +03001231{
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001232 dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +03001233 req->tx ? "TX/IN" : "RX/OUT",
1234 &req->request, req->request.length, req->epnum);
1235
1236 musb_ep_select(musb->mregs, req->epnum);
1237 if (req->tx)
1238 txstate(musb, req);
1239 else
1240 rxstate(musb, req);
1241}
1242
1243static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1244 gfp_t gfp_flags)
1245{
1246 struct musb_ep *musb_ep;
1247 struct musb_request *request;
1248 struct musb *musb;
1249 int status = 0;
1250 unsigned long lockflags;
1251
1252 if (!ep || !req)
1253 return -EINVAL;
1254 if (!req->buf)
1255 return -ENODATA;
1256
1257 musb_ep = to_musb_ep(ep);
1258 musb = musb_ep->musb;
1259
1260 request = to_musb_request(req);
1261 request->musb = musb;
1262
1263 if (request->ep != musb_ep)
1264 return -EINVAL;
1265
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001266 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
Felipe Balbi550a7372008-07-24 12:27:36 +03001267
1268 /* request is mine now... */
1269 request->request.actual = 0;
1270 request->request.status = -EINPROGRESS;
1271 request->epnum = musb_ep->current_epnum;
1272 request->tx = musb_ep->is_in;
1273
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +01001274 map_dma_buffer(request, musb, musb_ep);
Felipe Balbi550a7372008-07-24 12:27:36 +03001275
1276 spin_lock_irqsave(&musb->lock, lockflags);
1277
1278 /* don't queue if the ep is down */
1279 if (!musb_ep->desc) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001280 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
Felipe Balbi550a7372008-07-24 12:27:36 +03001281 req, ep->name, "disabled");
1282 status = -ESHUTDOWN;
Sebastian Andrzej Siewior23a53d92013-06-19 17:38:15 +02001283 unmap_dma_buffer(request, musb);
1284 goto unlock;
Felipe Balbi550a7372008-07-24 12:27:36 +03001285 }
1286
1287 /* add request to the list */
Felipe Balbiad1adb82011-02-16 12:40:05 +02001288 list_add_tail(&request->list, &musb_ep->req_list);
Felipe Balbi550a7372008-07-24 12:27:36 +03001289
1290 /* it this is the head of the queue, start i/o ... */
Felipe Balbiad1adb82011-02-16 12:40:05 +02001291 if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
Felipe Balbi550a7372008-07-24 12:27:36 +03001292 musb_ep_restart(musb, request);
1293
Sebastian Andrzej Siewior23a53d92013-06-19 17:38:15 +02001294unlock:
Felipe Balbi550a7372008-07-24 12:27:36 +03001295 spin_unlock_irqrestore(&musb->lock, lockflags);
1296 return status;
1297}
1298
1299static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1300{
1301 struct musb_ep *musb_ep = to_musb_ep(ep);
Felipe Balbi4cbbf082011-02-28 10:44:50 +02001302 struct musb_request *req = to_musb_request(request);
1303 struct musb_request *r;
Felipe Balbi550a7372008-07-24 12:27:36 +03001304 unsigned long flags;
1305 int status = 0;
1306 struct musb *musb = musb_ep->musb;
1307
1308 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1309 return -EINVAL;
1310
1311 spin_lock_irqsave(&musb->lock, flags);
1312
1313 list_for_each_entry(r, &musb_ep->req_list, list) {
Felipe Balbi4cbbf082011-02-28 10:44:50 +02001314 if (r == req)
Felipe Balbi550a7372008-07-24 12:27:36 +03001315 break;
1316 }
Felipe Balbi4cbbf082011-02-28 10:44:50 +02001317 if (r != req) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001318 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
Felipe Balbi550a7372008-07-24 12:27:36 +03001319 status = -EINVAL;
1320 goto done;
1321 }
1322
1323 /* if the hardware doesn't have the request, easy ... */
Felipe Balbi3d5ad132011-03-22 11:38:49 +02001324 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
Felipe Balbi550a7372008-07-24 12:27:36 +03001325 musb_g_giveback(musb_ep, request, -ECONNRESET);
1326
1327 /* ... else abort the dma transfer ... */
1328 else if (is_dma_capable() && musb_ep->dma) {
1329 struct dma_controller *c = musb->dma_controller;
1330
1331 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1332 if (c->channel_abort)
1333 status = c->channel_abort(musb_ep->dma);
1334 else
1335 status = -EBUSY;
1336 if (status == 0)
1337 musb_g_giveback(musb_ep, request, -ECONNRESET);
1338 } else {
1339 /* NOTE: by sticking to easily tested hardware/driver states,
1340 * we leave counting of in-flight packets imprecise.
1341 */
1342 musb_g_giveback(musb_ep, request, -ECONNRESET);
1343 }
1344
1345done:
1346 spin_unlock_irqrestore(&musb->lock, flags);
1347 return status;
1348}
1349
1350/*
1351 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1352 * data but will queue requests.
1353 *
1354 * exported to ep0 code
1355 */
Felipe Balbi1b6c3b02009-12-04 15:47:46 +02001356static int musb_gadget_set_halt(struct usb_ep *ep, int value)
Felipe Balbi550a7372008-07-24 12:27:36 +03001357{
1358 struct musb_ep *musb_ep = to_musb_ep(ep);
1359 u8 epnum = musb_ep->current_epnum;
1360 struct musb *musb = musb_ep->musb;
1361 void __iomem *epio = musb->endpoints[epnum].regs;
1362 void __iomem *mbase;
1363 unsigned long flags;
1364 u16 csr;
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001365 struct musb_request *request;
Felipe Balbi550a7372008-07-24 12:27:36 +03001366 int status = 0;
1367
1368 if (!ep)
1369 return -EINVAL;
1370 mbase = musb->mregs;
1371
1372 spin_lock_irqsave(&musb->lock, flags);
1373
1374 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1375 status = -EINVAL;
1376 goto done;
1377 }
1378
1379 musb_ep_select(mbase, epnum);
1380
Felipe Balbiad1adb82011-02-16 12:40:05 +02001381 request = next_request(musb_ep);
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001382 if (value) {
1383 if (request) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001384 dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001385 ep->name);
1386 status = -EAGAIN;
1387 goto done;
Felipe Balbi550a7372008-07-24 12:27:36 +03001388 }
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001389 /* Cannot portably stall with non-empty FIFO */
1390 if (musb_ep->is_in) {
1391 csr = musb_readw(epio, MUSB_TXCSR);
1392 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001393 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001394 status = -EAGAIN;
1395 goto done;
1396 }
1397 }
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001398 } else
1399 musb_ep->wedged = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001400
1401 /* set/clear the stall and toggle bits */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001402 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
Felipe Balbi550a7372008-07-24 12:27:36 +03001403 if (musb_ep->is_in) {
1404 csr = musb_readw(epio, MUSB_TXCSR);
Felipe Balbi550a7372008-07-24 12:27:36 +03001405 csr |= MUSB_TXCSR_P_WZC_BITS
1406 | MUSB_TXCSR_CLRDATATOG;
1407 if (value)
1408 csr |= MUSB_TXCSR_P_SENDSTALL;
1409 else
1410 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1411 | MUSB_TXCSR_P_SENTSTALL);
1412 csr &= ~MUSB_TXCSR_TXPKTRDY;
1413 musb_writew(epio, MUSB_TXCSR, csr);
1414 } else {
1415 csr = musb_readw(epio, MUSB_RXCSR);
1416 csr |= MUSB_RXCSR_P_WZC_BITS
1417 | MUSB_RXCSR_FLUSHFIFO
1418 | MUSB_RXCSR_CLRDATATOG;
1419 if (value)
1420 csr |= MUSB_RXCSR_P_SENDSTALL;
1421 else
1422 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1423 | MUSB_RXCSR_P_SENTSTALL);
1424 musb_writew(epio, MUSB_RXCSR, csr);
1425 }
1426
Felipe Balbi550a7372008-07-24 12:27:36 +03001427 /* maybe start the first request in the queue */
1428 if (!musb_ep->busy && !value && request) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001429 dev_dbg(musb->controller, "restarting the request\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001430 musb_ep_restart(musb, request);
1431 }
1432
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001433done:
Felipe Balbi550a7372008-07-24 12:27:36 +03001434 spin_unlock_irqrestore(&musb->lock, flags);
1435 return status;
1436}
1437
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001438/*
1439 * Sets the halt feature with the clear requests ignored
1440 */
Felipe Balbi1b6c3b02009-12-04 15:47:46 +02001441static int musb_gadget_set_wedge(struct usb_ep *ep)
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001442{
1443 struct musb_ep *musb_ep = to_musb_ep(ep);
1444
1445 if (!ep)
1446 return -EINVAL;
1447
1448 musb_ep->wedged = 1;
1449
1450 return usb_ep_set_halt(ep);
1451}
1452
Felipe Balbi550a7372008-07-24 12:27:36 +03001453static int musb_gadget_fifo_status(struct usb_ep *ep)
1454{
1455 struct musb_ep *musb_ep = to_musb_ep(ep);
1456 void __iomem *epio = musb_ep->hw_ep->regs;
1457 int retval = -EINVAL;
1458
1459 if (musb_ep->desc && !musb_ep->is_in) {
1460 struct musb *musb = musb_ep->musb;
1461 int epnum = musb_ep->current_epnum;
1462 void __iomem *mbase = musb->mregs;
1463 unsigned long flags;
1464
1465 spin_lock_irqsave(&musb->lock, flags);
1466
1467 musb_ep_select(mbase, epnum);
1468 /* FIXME return zero unless RXPKTRDY is set */
1469 retval = musb_readw(epio, MUSB_RXCOUNT);
1470
1471 spin_unlock_irqrestore(&musb->lock, flags);
1472 }
1473 return retval;
1474}
1475
1476static void musb_gadget_fifo_flush(struct usb_ep *ep)
1477{
1478 struct musb_ep *musb_ep = to_musb_ep(ep);
1479 struct musb *musb = musb_ep->musb;
1480 u8 epnum = musb_ep->current_epnum;
1481 void __iomem *epio = musb->endpoints[epnum].regs;
1482 void __iomem *mbase;
1483 unsigned long flags;
Sebastian Andrzej Siewiorb18d26f2012-10-30 19:52:26 +01001484 u16 csr;
Felipe Balbi550a7372008-07-24 12:27:36 +03001485
1486 mbase = musb->mregs;
1487
1488 spin_lock_irqsave(&musb->lock, flags);
1489 musb_ep_select(mbase, (u8) epnum);
1490
1491 /* disable interrupts */
Sebastian Andrzej Siewiorb18d26f2012-10-30 19:52:26 +01001492 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
Felipe Balbi550a7372008-07-24 12:27:36 +03001493
1494 if (musb_ep->is_in) {
1495 csr = musb_readw(epio, MUSB_TXCSR);
1496 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1497 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
Yauheni Kaliuta4858f062011-06-08 17:12:02 +03001498 /*
1499 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1500 * to interrupt current FIFO loading, but not flushing
1501 * the already loaded ones.
1502 */
1503 csr &= ~MUSB_TXCSR_TXPKTRDY;
Felipe Balbi550a7372008-07-24 12:27:36 +03001504 musb_writew(epio, MUSB_TXCSR, csr);
1505 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1506 musb_writew(epio, MUSB_TXCSR, csr);
1507 }
1508 } else {
1509 csr = musb_readw(epio, MUSB_RXCSR);
1510 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1511 musb_writew(epio, MUSB_RXCSR, csr);
1512 musb_writew(epio, MUSB_RXCSR, csr);
1513 }
1514
1515 /* re-enable interrupt */
Sebastian Andrzej Siewiorb18d26f2012-10-30 19:52:26 +01001516 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
Felipe Balbi550a7372008-07-24 12:27:36 +03001517 spin_unlock_irqrestore(&musb->lock, flags);
1518}
1519
1520static const struct usb_ep_ops musb_ep_ops = {
1521 .enable = musb_gadget_enable,
1522 .disable = musb_gadget_disable,
1523 .alloc_request = musb_alloc_request,
1524 .free_request = musb_free_request,
1525 .queue = musb_gadget_queue,
1526 .dequeue = musb_gadget_dequeue,
1527 .set_halt = musb_gadget_set_halt,
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001528 .set_wedge = musb_gadget_set_wedge,
Felipe Balbi550a7372008-07-24 12:27:36 +03001529 .fifo_status = musb_gadget_fifo_status,
1530 .fifo_flush = musb_gadget_fifo_flush
1531};
1532
1533/* ----------------------------------------------------------------------- */
1534
1535static int musb_gadget_get_frame(struct usb_gadget *gadget)
1536{
1537 struct musb *musb = gadget_to_musb(gadget);
1538
1539 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1540}
1541
1542static int musb_gadget_wakeup(struct usb_gadget *gadget)
1543{
1544 struct musb *musb = gadget_to_musb(gadget);
1545 void __iomem *mregs = musb->mregs;
1546 unsigned long flags;
1547 int status = -EINVAL;
1548 u8 power, devctl;
1549 int retries;
1550
1551 spin_lock_irqsave(&musb->lock, flags);
1552
Antoine Tenarte47d9252014-10-30 18:41:13 +01001553 switch (musb->xceiv->otg->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001554 case OTG_STATE_B_PERIPHERAL:
1555 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1556 * that's part of the standard usb 1.1 state machine, and
1557 * doesn't affect OTG transitions.
1558 */
1559 if (musb->may_wakeup && musb->is_suspended)
1560 break;
1561 goto done;
1562 case OTG_STATE_B_IDLE:
1563 /* Start SRP ... OTG not required. */
1564 devctl = musb_readb(mregs, MUSB_DEVCTL);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001565 dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
Felipe Balbi550a7372008-07-24 12:27:36 +03001566 devctl |= MUSB_DEVCTL_SESSION;
1567 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1568 devctl = musb_readb(mregs, MUSB_DEVCTL);
1569 retries = 100;
1570 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1571 devctl = musb_readb(mregs, MUSB_DEVCTL);
1572 if (retries-- < 1)
1573 break;
1574 }
1575 retries = 10000;
1576 while (devctl & MUSB_DEVCTL_SESSION) {
1577 devctl = musb_readb(mregs, MUSB_DEVCTL);
1578 if (retries-- < 1)
1579 break;
1580 }
1581
Hema HK86205432011-03-22 16:54:22 +05301582 spin_unlock_irqrestore(&musb->lock, flags);
Heikki Krogerus6e13c652012-02-13 13:24:20 +02001583 otg_start_srp(musb->xceiv->otg);
Hema HK86205432011-03-22 16:54:22 +05301584 spin_lock_irqsave(&musb->lock, flags);
1585
Felipe Balbi550a7372008-07-24 12:27:36 +03001586 /* Block idling for at least 1s */
1587 musb_platform_try_idle(musb,
1588 jiffies + msecs_to_jiffies(1 * HZ));
1589
1590 status = 0;
1591 goto done;
1592 default:
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001593 dev_dbg(musb->controller, "Unhandled wake: %s\n",
Antoine Tenarte47d9252014-10-30 18:41:13 +01001594 usb_otg_state_string(musb->xceiv->otg->state));
Felipe Balbi550a7372008-07-24 12:27:36 +03001595 goto done;
1596 }
1597
1598 status = 0;
1599
1600 power = musb_readb(mregs, MUSB_POWER);
1601 power |= MUSB_POWER_RESUME;
1602 musb_writeb(mregs, MUSB_POWER, power);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001603 dev_dbg(musb->controller, "issue wakeup\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001604
1605 /* FIXME do this next chunk in a timer callback, no udelay */
1606 mdelay(2);
1607
1608 power = musb_readb(mregs, MUSB_POWER);
1609 power &= ~MUSB_POWER_RESUME;
1610 musb_writeb(mregs, MUSB_POWER, power);
1611done:
1612 spin_unlock_irqrestore(&musb->lock, flags);
1613 return status;
1614}
1615
1616static int
1617musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1618{
Peter Chendadac982015-01-28 16:32:41 +08001619 gadget->is_selfpowered = !!is_selfpowered;
Felipe Balbi550a7372008-07-24 12:27:36 +03001620 return 0;
1621}
1622
1623static void musb_pullup(struct musb *musb, int is_on)
1624{
1625 u8 power;
1626
1627 power = musb_readb(musb->mregs, MUSB_POWER);
1628 if (is_on)
1629 power |= MUSB_POWER_SOFTCONN;
1630 else
1631 power &= ~MUSB_POWER_SOFTCONN;
1632
1633 /* FIXME if on, HdrcStart; if off, HdrcStop */
1634
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001635 dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1636 is_on ? "on" : "off");
Felipe Balbi550a7372008-07-24 12:27:36 +03001637 musb_writeb(musb->mregs, MUSB_POWER, power);
1638}
1639
1640#if 0
1641static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1642{
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001643 dev_dbg(musb->controller, "<= %s =>\n", __func__);
Felipe Balbi550a7372008-07-24 12:27:36 +03001644
1645 /*
1646 * FIXME iff driver's softconnect flag is set (as it is during probe,
1647 * though that can clear it), just musb_pullup().
1648 */
1649
1650 return -EINVAL;
1651}
1652#endif
1653
1654static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1655{
1656 struct musb *musb = gadget_to_musb(gadget);
1657
David Brownell84e250f2009-03-31 12:30:04 -07001658 if (!musb->xceiv->set_power)
Felipe Balbi550a7372008-07-24 12:27:36 +03001659 return -EOPNOTSUPP;
Heikki Krogerusb96d3b02012-02-13 13:24:18 +02001660 return usb_phy_set_power(musb->xceiv, mA);
Felipe Balbi550a7372008-07-24 12:27:36 +03001661}
1662
1663static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1664{
1665 struct musb *musb = gadget_to_musb(gadget);
1666 unsigned long flags;
1667
1668 is_on = !!is_on;
1669
John Stultz93e098a2011-07-20 17:09:34 -07001670 pm_runtime_get_sync(musb->controller);
1671
Felipe Balbi550a7372008-07-24 12:27:36 +03001672 /* NOTE: this assumes we are sensing vbus; we'd rather
1673 * not pullup unless the B-session is active.
1674 */
1675 spin_lock_irqsave(&musb->lock, flags);
1676 if (is_on != musb->softconnect) {
1677 musb->softconnect = is_on;
1678 musb_pullup(musb, is_on);
1679 }
1680 spin_unlock_irqrestore(&musb->lock, flags);
John Stultz93e098a2011-07-20 17:09:34 -07001681
1682 pm_runtime_put(musb->controller);
1683
Felipe Balbi550a7372008-07-24 12:27:36 +03001684 return 0;
1685}
1686
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001687static int musb_gadget_start(struct usb_gadget *g,
1688 struct usb_gadget_driver *driver);
Felipe Balbi22835b82014-10-17 12:05:12 -05001689static int musb_gadget_stop(struct usb_gadget *g);
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001690
Felipe Balbi550a7372008-07-24 12:27:36 +03001691static const struct usb_gadget_ops musb_gadget_operations = {
1692 .get_frame = musb_gadget_get_frame,
1693 .wakeup = musb_gadget_wakeup,
1694 .set_selfpowered = musb_gadget_set_self_powered,
1695 /* .vbus_session = musb_gadget_vbus_session, */
1696 .vbus_draw = musb_gadget_vbus_draw,
1697 .pullup = musb_gadget_pullup,
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001698 .udc_start = musb_gadget_start,
1699 .udc_stop = musb_gadget_stop,
Felipe Balbi550a7372008-07-24 12:27:36 +03001700};
1701
1702/* ----------------------------------------------------------------------- */
1703
1704/* Registration */
1705
1706/* Only this registration code "knows" the rule (from USB standards)
1707 * about there being only one external upstream port. It assumes
1708 * all peripheral ports are external...
1709 */
Felipe Balbi550a7372008-07-24 12:27:36 +03001710
Bill Pemberton41ac7b32012-11-19 13:21:48 -05001711static void
Felipe Balbi550a7372008-07-24 12:27:36 +03001712init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1713{
1714 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1715
1716 memset(ep, 0, sizeof *ep);
1717
1718 ep->current_epnum = epnum;
1719 ep->musb = musb;
1720 ep->hw_ep = hw_ep;
1721 ep->is_in = is_in;
1722
1723 INIT_LIST_HEAD(&ep->req_list);
1724
1725 sprintf(ep->name, "ep%d%s", epnum,
1726 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1727 is_in ? "in" : "out"));
1728 ep->end_point.name = ep->name;
1729 INIT_LIST_HEAD(&ep->end_point.ep_list);
1730 if (!epnum) {
Robert Baldygae117e742013-12-13 12:23:38 +01001731 usb_ep_set_maxpacket_limit(&ep->end_point, 64);
Felipe Balbi550a7372008-07-24 12:27:36 +03001732 ep->end_point.ops = &musb_g_ep0_ops;
1733 musb->g.ep0 = &ep->end_point;
1734 } else {
1735 if (is_in)
Robert Baldygae117e742013-12-13 12:23:38 +01001736 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
Felipe Balbi550a7372008-07-24 12:27:36 +03001737 else
Robert Baldygae117e742013-12-13 12:23:38 +01001738 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
Felipe Balbi550a7372008-07-24 12:27:36 +03001739 ep->end_point.ops = &musb_ep_ops;
1740 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1741 }
1742}
1743
1744/*
1745 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1746 * to the rest of the driver state.
1747 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05001748static inline void musb_g_init_endpoints(struct musb *musb)
Felipe Balbi550a7372008-07-24 12:27:36 +03001749{
1750 u8 epnum;
1751 struct musb_hw_ep *hw_ep;
1752 unsigned count = 0;
1753
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001754 /* initialize endpoint list just once */
Felipe Balbi550a7372008-07-24 12:27:36 +03001755 INIT_LIST_HEAD(&(musb->g.ep_list));
1756
1757 for (epnum = 0, hw_ep = musb->endpoints;
1758 epnum < musb->nr_endpoints;
1759 epnum++, hw_ep++) {
1760 if (hw_ep->is_shared_fifo /* || !epnum */) {
1761 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1762 count++;
1763 } else {
1764 if (hw_ep->max_packet_sz_tx) {
1765 init_peripheral_ep(musb, &hw_ep->ep_in,
1766 epnum, 1);
1767 count++;
1768 }
1769 if (hw_ep->max_packet_sz_rx) {
1770 init_peripheral_ep(musb, &hw_ep->ep_out,
1771 epnum, 0);
1772 count++;
1773 }
1774 }
1775 }
1776}
1777
1778/* called once during driver setup to initialize and link into
1779 * the driver model; memory is zeroed.
1780 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05001781int musb_gadget_setup(struct musb *musb)
Felipe Balbi550a7372008-07-24 12:27:36 +03001782{
1783 int status;
1784
1785 /* REVISIT minor race: if (erroneously) setting up two
1786 * musb peripherals at the same time, only the bus lock
1787 * is probably held.
1788 */
Felipe Balbi550a7372008-07-24 12:27:36 +03001789
1790 musb->g.ops = &musb_gadget_operations;
Michal Nazarewiczd327ab52011-11-19 18:27:37 +01001791 musb->g.max_speed = USB_SPEED_HIGH;
Felipe Balbi550a7372008-07-24 12:27:36 +03001792 musb->g.speed = USB_SPEED_UNKNOWN;
1793
Bin Liu1374a4302013-09-17 12:43:13 -05001794 MUSB_DEV_MODE(musb);
1795 musb->xceiv->otg->default_a = 0;
Antoine Tenarte47d9252014-10-30 18:41:13 +01001796 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
Bin Liu1374a4302013-09-17 12:43:13 -05001797
Felipe Balbi550a7372008-07-24 12:27:36 +03001798 /* this "gadget" abstracts/virtualizes the controller */
Felipe Balbi550a7372008-07-24 12:27:36 +03001799 musb->g.name = musb_driver_name;
Apelete Seketelifd3923a2013-11-19 23:18:20 +01001800#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
Felipe Balbi032ec492011-11-24 15:46:26 +02001801 musb->g.is_otg = 1;
Apelete Seketelifd3923a2013-11-19 23:18:20 +01001802#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1803 musb->g.is_otg = 0;
1804#endif
Felipe Balbi550a7372008-07-24 12:27:36 +03001805
1806 musb_g_init_endpoints(musb);
1807
1808 musb->is_active = 0;
1809 musb_platform_try_idle(musb, 0);
1810
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001811 status = usb_add_gadget_udc(musb->controller, &musb->g);
1812 if (status)
1813 goto err;
1814
1815 return 0;
1816err:
Sebastian Andrzej Siewior6193d692011-08-10 11:01:57 +02001817 musb->g.dev.parent = NULL;
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001818 device_unregister(&musb->g.dev);
Felipe Balbi550a7372008-07-24 12:27:36 +03001819 return status;
1820}
1821
1822void musb_gadget_cleanup(struct musb *musb)
1823{
Sebastian Andrzej Siewior90474282013-08-20 18:35:44 +02001824 if (musb->port_mode == MUSB_PORT_MODE_HOST)
1825 return;
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001826 usb_del_gadget_udc(&musb->g);
Felipe Balbi550a7372008-07-24 12:27:36 +03001827}
1828
1829/*
1830 * Register the gadget driver. Used by gadget drivers when
1831 * registering themselves with the controller.
1832 *
1833 * -EINVAL something went wrong (not driver)
1834 * -EBUSY another gadget is already using the controller
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001835 * -ENOMEM no memory to perform the operation
Felipe Balbi550a7372008-07-24 12:27:36 +03001836 *
1837 * @param driver the gadget driver
1838 * @return <0 if error, 0 if everything is fine
1839 */
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001840static int musb_gadget_start(struct usb_gadget *g,
1841 struct usb_gadget_driver *driver)
Felipe Balbi550a7372008-07-24 12:27:36 +03001842{
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001843 struct musb *musb = gadget_to_musb(g);
Heikki Krogerusd445b6d2012-02-13 13:24:15 +02001844 struct usb_otg *otg = musb->xceiv->otg;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001845 unsigned long flags;
Felipe Balbi032ec492011-11-24 15:46:26 +02001846 int retval = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001847
Felipe Balbi032ec492011-11-24 15:46:26 +02001848 if (driver->max_speed < USB_SPEED_HIGH) {
1849 retval = -EINVAL;
1850 goto err;
1851 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001852
Hema HK7acc6192011-02-28 14:19:34 +05301853 pm_runtime_get_sync(musb->controller);
1854
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001855 musb->softconnect = 0;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001856 musb->gadget_driver = driver;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001857
1858 spin_lock_irqsave(&musb->lock, flags);
Greg Kroah-Hartman43e699c2013-10-14 13:06:15 -07001859 musb->is_active = 1;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001860
Heikki Krogerus6e13c652012-02-13 13:24:20 +02001861 otg_set_peripheral(otg, &musb->g);
Antoine Tenarte47d9252014-10-30 18:41:13 +01001862 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
Felipe Balbi550a7372008-07-24 12:27:36 +03001863 spin_unlock_irqrestore(&musb->lock, flags);
1864
Sebastian Andrzej Siewior001dd842013-10-11 10:38:13 +02001865 musb_start(musb);
1866
Felipe Balbi032ec492011-11-24 15:46:26 +02001867 /* REVISIT: funcall to other code, which also
1868 * handles power budgeting ... this way also
1869 * ensures HdrcStart is indirectly called.
1870 */
Grazvydas Ignotasb65ae0f2013-03-24 17:36:55 +02001871 if (musb->xceiv->last_event == USB_EVENT_ID)
1872 musb_platform_set_vbus(musb, 1);
Felipe Balbi032ec492011-11-24 15:46:26 +02001873
Jarkko Nikulacdefce12011-04-29 16:17:35 +03001874 if (musb->xceiv->last_event == USB_EVENT_NONE)
1875 pm_runtime_put(musb->controller);
Felipe Balbi550a7372008-07-24 12:27:36 +03001876
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001877 return 0;
1878
Felipe Balbi032ec492011-11-24 15:46:26 +02001879err:
Felipe Balbi550a7372008-07-24 12:27:36 +03001880 return retval;
1881}
Felipe Balbi550a7372008-07-24 12:27:36 +03001882
Felipe Balbi550a7372008-07-24 12:27:36 +03001883/*
1884 * Unregister the gadget driver. Used by gadget drivers when
1885 * unregistering themselves from the controller.
1886 *
1887 * @param driver the gadget driver to unregister
1888 */
Felipe Balbi22835b82014-10-17 12:05:12 -05001889static int musb_gadget_stop(struct usb_gadget *g)
Felipe Balbi550a7372008-07-24 12:27:36 +03001890{
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001891 struct musb *musb = gadget_to_musb(g);
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001892 unsigned long flags;
Felipe Balbi550a7372008-07-24 12:27:36 +03001893
Hema HK7acc6192011-02-28 14:19:34 +05301894 if (musb->xceiv->last_event == USB_EVENT_NONE)
1895 pm_runtime_get_sync(musb->controller);
1896
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001897 /*
1898 * REVISIT always use otg_set_peripheral() here too;
Felipe Balbi550a7372008-07-24 12:27:36 +03001899 * this needs to shut down the OTG engine.
1900 */
1901
1902 spin_lock_irqsave(&musb->lock, flags);
1903
Felipe Balbi550a7372008-07-24 12:27:36 +03001904 musb_hnp_stop(musb);
Felipe Balbi550a7372008-07-24 12:27:36 +03001905
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001906 (void) musb_gadget_vbus_draw(&musb->g, 0);
Felipe Balbi550a7372008-07-24 12:27:36 +03001907
Antoine Tenarte47d9252014-10-30 18:41:13 +01001908 musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
Felipe Balbid5638fc2015-02-02 17:14:12 -06001909 musb_stop(musb);
Heikki Krogerus6e13c652012-02-13 13:24:20 +02001910 otg_set_peripheral(musb->xceiv->otg, NULL);
Felipe Balbi550a7372008-07-24 12:27:36 +03001911
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001912 musb->is_active = 0;
Grazvydas Ignotase21de102013-03-10 02:49:14 +02001913 musb->gadget_driver = NULL;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001914 musb_platform_try_idle(musb, 0);
Felipe Balbi550a7372008-07-24 12:27:36 +03001915 spin_unlock_irqrestore(&musb->lock, flags);
1916
Felipe Balbi032ec492011-11-24 15:46:26 +02001917 /*
1918 * FIXME we need to be able to register another
1919 * gadget driver here and have everything work;
1920 * that currently misbehaves.
1921 */
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001922
Hema HK7acc6192011-02-28 14:19:34 +05301923 pm_runtime_put(musb->controller);
1924
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001925 return 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001926}
Felipe Balbi550a7372008-07-24 12:27:36 +03001927
1928/* ----------------------------------------------------------------------- */
1929
1930/* lifecycle operations called through plat_uds.c */
1931
1932void musb_g_resume(struct musb *musb)
1933{
1934 musb->is_suspended = 0;
Antoine Tenarte47d9252014-10-30 18:41:13 +01001935 switch (musb->xceiv->otg->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001936 case OTG_STATE_B_IDLE:
1937 break;
1938 case OTG_STATE_B_WAIT_ACON:
1939 case OTG_STATE_B_PERIPHERAL:
1940 musb->is_active = 1;
1941 if (musb->gadget_driver && musb->gadget_driver->resume) {
1942 spin_unlock(&musb->lock);
1943 musb->gadget_driver->resume(&musb->g);
1944 spin_lock(&musb->lock);
1945 }
1946 break;
1947 default:
1948 WARNING("unhandled RESUME transition (%s)\n",
Antoine Tenarte47d9252014-10-30 18:41:13 +01001949 usb_otg_state_string(musb->xceiv->otg->state));
Felipe Balbi550a7372008-07-24 12:27:36 +03001950 }
1951}
1952
1953/* called when SOF packets stop for 3+ msec */
1954void musb_g_suspend(struct musb *musb)
1955{
1956 u8 devctl;
1957
1958 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001959 dev_dbg(musb->controller, "devctl %02x\n", devctl);
Felipe Balbi550a7372008-07-24 12:27:36 +03001960
Antoine Tenarte47d9252014-10-30 18:41:13 +01001961 switch (musb->xceiv->otg->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001962 case OTG_STATE_B_IDLE:
1963 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
Antoine Tenarte47d9252014-10-30 18:41:13 +01001964 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
Felipe Balbi550a7372008-07-24 12:27:36 +03001965 break;
1966 case OTG_STATE_B_PERIPHERAL:
1967 musb->is_suspended = 1;
1968 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1969 spin_unlock(&musb->lock);
1970 musb->gadget_driver->suspend(&musb->g);
1971 spin_lock(&musb->lock);
1972 }
1973 break;
1974 default:
1975 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1976 * A_PERIPHERAL may need care too
1977 */
1978 WARNING("unhandled SUSPEND transition (%s)\n",
Antoine Tenarte47d9252014-10-30 18:41:13 +01001979 usb_otg_state_string(musb->xceiv->otg->state));
Felipe Balbi550a7372008-07-24 12:27:36 +03001980 }
1981}
1982
1983/* Called during SRP */
1984void musb_g_wakeup(struct musb *musb)
1985{
1986 musb_gadget_wakeup(&musb->g);
1987}
1988
1989/* called when VBUS drops below session threshold, and in other cases */
1990void musb_g_disconnect(struct musb *musb)
1991{
1992 void __iomem *mregs = musb->mregs;
1993 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
1994
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001995 dev_dbg(musb->controller, "devctl %02x\n", devctl);
Felipe Balbi550a7372008-07-24 12:27:36 +03001996
1997 /* clear HR */
1998 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1999
2000 /* don't draw vbus until new b-default session */
2001 (void) musb_gadget_vbus_draw(&musb->g, 0);
2002
2003 musb->g.speed = USB_SPEED_UNKNOWN;
2004 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2005 spin_unlock(&musb->lock);
2006 musb->gadget_driver->disconnect(&musb->g);
2007 spin_lock(&musb->lock);
2008 }
2009
Antoine Tenarte47d9252014-10-30 18:41:13 +01002010 switch (musb->xceiv->otg->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002011 default:
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03002012 dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
Antoine Tenarte47d9252014-10-30 18:41:13 +01002013 usb_otg_state_string(musb->xceiv->otg->state));
2014 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
David Brownellab983f2a2009-03-31 12:35:09 -07002015 MUSB_HST_MODE(musb);
Felipe Balbi550a7372008-07-24 12:27:36 +03002016 break;
2017 case OTG_STATE_A_PERIPHERAL:
Antoine Tenarte47d9252014-10-30 18:41:13 +01002018 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
David Brownellab983f2a2009-03-31 12:35:09 -07002019 MUSB_HST_MODE(musb);
Felipe Balbi550a7372008-07-24 12:27:36 +03002020 break;
2021 case OTG_STATE_B_WAIT_ACON:
2022 case OTG_STATE_B_HOST:
Felipe Balbi550a7372008-07-24 12:27:36 +03002023 case OTG_STATE_B_PERIPHERAL:
2024 case OTG_STATE_B_IDLE:
Antoine Tenarte47d9252014-10-30 18:41:13 +01002025 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
Felipe Balbi550a7372008-07-24 12:27:36 +03002026 break;
2027 case OTG_STATE_B_SRP_INIT:
2028 break;
2029 }
2030
2031 musb->is_active = 0;
2032}
2033
2034void musb_g_reset(struct musb *musb)
2035__releases(musb->lock)
2036__acquires(musb->lock)
2037{
2038 void __iomem *mbase = musb->mregs;
2039 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2040 u8 power;
2041
Sebastian Andrzej Siewior515ba292012-10-30 19:52:24 +01002042 dev_dbg(musb->controller, "<== %s driver '%s'\n",
Felipe Balbi550a7372008-07-24 12:27:36 +03002043 (devctl & MUSB_DEVCTL_BDEVICE)
2044 ? "B-Device" : "A-Device",
Felipe Balbi550a7372008-07-24 12:27:36 +03002045 musb->gadget_driver
2046 ? musb->gadget_driver->driver.name
2047 : NULL
2048 );
2049
Felipe Balbi1189f7f2014-11-06 14:27:54 +08002050 /* report reset, if we didn't already (flushing EP state) */
2051 if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2052 spin_unlock(&musb->lock);
2053 usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2054 spin_lock(&musb->lock);
2055 }
Felipe Balbi550a7372008-07-24 12:27:36 +03002056
2057 /* clear HR */
2058 else if (devctl & MUSB_DEVCTL_HR)
2059 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2060
2061
2062 /* what speed did we negotiate? */
2063 power = musb_readb(mbase, MUSB_POWER);
2064 musb->g.speed = (power & MUSB_POWER_HSMODE)
2065 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2066
2067 /* start in USB_STATE_DEFAULT */
2068 musb->is_active = 1;
2069 musb->is_suspended = 0;
2070 MUSB_DEV_MODE(musb);
2071 musb->address = 0;
2072 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2073
2074 musb->may_wakeup = 0;
2075 musb->g.b_hnp_enable = 0;
2076 musb->g.a_alt_hnp_support = 0;
2077 musb->g.a_hnp_support = 0;
Robert Baldygaca1023c2015-07-28 07:20:00 +02002078 musb->g.quirk_zlp_not_supp = 1;
Felipe Balbi550a7372008-07-24 12:27:36 +03002079
2080 /* Normal reset, as B-Device;
2081 * or else after HNP, as A-Device
2082 */
Apelete Seketeli23db9fd2013-12-19 21:42:27 +01002083 if (!musb->g.is_otg) {
2084 /* USB device controllers that are not OTG compatible
2085 * may not have DEVCTL register in silicon.
2086 * In that case, do not rely on devctl for setting
2087 * peripheral mode.
2088 */
Antoine Tenarte47d9252014-10-30 18:41:13 +01002089 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
Apelete Seketeli23db9fd2013-12-19 21:42:27 +01002090 musb->g.is_a_peripheral = 0;
2091 } else if (devctl & MUSB_DEVCTL_BDEVICE) {
Antoine Tenarte47d9252014-10-30 18:41:13 +01002092 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
Felipe Balbi550a7372008-07-24 12:27:36 +03002093 musb->g.is_a_peripheral = 0;
Felipe Balbi032ec492011-11-24 15:46:26 +02002094 } else {
Antoine Tenarte47d9252014-10-30 18:41:13 +01002095 musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
Felipe Balbi550a7372008-07-24 12:27:36 +03002096 musb->g.is_a_peripheral = 1;
Felipe Balbi032ec492011-11-24 15:46:26 +02002097 }
Felipe Balbi550a7372008-07-24 12:27:36 +03002098
2099 /* start with default limits on VBUS power draw */
Felipe Balbi032ec492011-11-24 15:46:26 +02002100 (void) musb_gadget_vbus_draw(&musb->g, 8);
Felipe Balbi550a7372008-07-24 12:27:36 +03002101}