blob: 932464ffb10c5e1d7f5277794fe454abee4f621f [file] [log] [blame]
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001#include <linux/device.h>
2#include <linux/dma-mapping.h>
3#include <linux/dmaengine.h>
4#include <linux/sizes.h>
5#include <linux/platform_device.h>
6#include <linux/of.h>
7
8#include "musb_core.h"
9
10#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
11
12#define EP_MODE_AUTOREG_NONE 0
13#define EP_MODE_AUTOREG_ALL_NEOP 1
14#define EP_MODE_AUTOREG_ALWAYS 3
15
16#define EP_MODE_DMA_TRANSPARENT 0
17#define EP_MODE_DMA_RNDIS 1
18#define EP_MODE_DMA_GEN_RNDIS 3
19
20#define USB_CTRL_TX_MODE 0x70
21#define USB_CTRL_RX_MODE 0x74
22#define USB_CTRL_AUTOREQ 0xd0
23#define USB_TDOWN 0xd8
24
25struct cppi41_dma_channel {
26 struct dma_channel channel;
27 struct cppi41_dma_controller *controller;
28 struct musb_hw_ep *hw_ep;
29 struct dma_chan *dc;
30 dma_cookie_t cookie;
31 u8 port_num;
32 u8 is_tx;
33 u8 is_allocated;
34 u8 usb_toggle;
35
36 dma_addr_t buf_addr;
37 u32 total_len;
38 u32 prog_len;
39 u32 transferred;
40 u32 packet_sz;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +010041 struct list_head tx_check;
George Cherian1af54b72014-01-27 15:07:26 +053042 struct work_struct dma_completion;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020043};
44
45#define MUSB_DMA_NUM_CHANNELS 15
46
47struct cppi41_dma_controller {
48 struct dma_controller controller;
49 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
50 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
51 struct musb *musb;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +010052 struct hrtimer early_tx;
53 struct list_head early_tx_list;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020054 u32 rx_mode;
55 u32 tx_mode;
56 u32 auto_req;
57};
58
59static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
60{
61 u16 csr;
62 u8 toggle;
63
64 if (cppi41_channel->is_tx)
65 return;
66 if (!is_host_active(cppi41_channel->controller->musb))
67 return;
68
69 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
70 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
71
72 cppi41_channel->usb_toggle = toggle;
73}
74
75static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
76{
Daniel Mackf50e6782014-05-26 14:52:39 +020077 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
78 struct musb *musb = hw_ep->musb;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020079 u16 csr;
80 u8 toggle;
81
82 if (cppi41_channel->is_tx)
83 return;
Daniel Mackf50e6782014-05-26 14:52:39 +020084 if (!is_host_active(musb))
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020085 return;
86
Daniel Mackf50e6782014-05-26 14:52:39 +020087 musb_ep_select(musb->mregs, hw_ep->epnum);
88 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020089 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
90
91 /*
92 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
93 * data toggle may reset from DATA1 to DATA0 during receiving data from
94 * more than one endpoint.
95 */
96 if (!toggle && toggle == cppi41_channel->usb_toggle) {
97 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
98 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
99 dev_dbg(cppi41_channel->controller->musb->controller,
100 "Restoring DATA1 toggle.\n");
101 }
102
103 cppi41_channel->usb_toggle = toggle;
104}
105
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100106static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
107{
108 u8 epnum = hw_ep->epnum;
109 struct musb *musb = hw_ep->musb;
110 void __iomem *epio = musb->endpoints[epnum].regs;
111 u16 csr;
112
Daniel Mackf50e6782014-05-26 14:52:39 +0200113 musb_ep_select(musb->mregs, hw_ep->epnum);
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100114 csr = musb_readw(epio, MUSB_TXCSR);
115 if (csr & MUSB_TXCSR_TXPKTRDY)
116 return false;
117 return true;
118}
119
George Cherian1af54b72014-01-27 15:07:26 +0530120static bool is_isoc(struct musb_hw_ep *hw_ep, bool in)
121{
122 if (in && hw_ep->in_qh) {
123 if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC)
124 return true;
125 } else if (hw_ep->out_qh) {
126 if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC)
127 return true;
128 }
129 return false;
130}
131
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100132static void cppi41_dma_callback(void *private_data);
133
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100134static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200135{
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200136 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
137 struct musb *musb = hw_ep->musb;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200138
George Cherianaecbc312014-02-27 10:44:41 +0530139 if (!cppi41_channel->prog_len ||
140 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200141
142 /* done, complete */
143 cppi41_channel->channel.actual_len =
144 cppi41_channel->transferred;
145 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
Daniel Mackff3fcac2014-05-26 14:52:38 +0200146 cppi41_channel->channel.rx_packet_done = true;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200147 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
148 } else {
149 /* next iteration, reload */
150 struct dma_chan *dc = cppi41_channel->dc;
151 struct dma_async_tx_descriptor *dma_desc;
152 enum dma_transfer_direction direction;
153 u16 csr;
154 u32 remain_bytes;
155 void __iomem *epio = cppi41_channel->hw_ep->regs;
156
157 cppi41_channel->buf_addr += cppi41_channel->packet_sz;
158
159 remain_bytes = cppi41_channel->total_len;
160 remain_bytes -= cppi41_channel->transferred;
161 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
162 cppi41_channel->prog_len = remain_bytes;
163
164 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
165 : DMA_DEV_TO_MEM;
166 dma_desc = dmaengine_prep_slave_single(dc,
167 cppi41_channel->buf_addr,
168 remain_bytes,
169 direction,
170 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100171 if (WARN_ON(!dma_desc))
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200172 return;
173
174 dma_desc->callback = cppi41_dma_callback;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100175 dma_desc->callback_param = &cppi41_channel->channel;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200176 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
177 dma_async_issue_pending(dc);
178
179 if (!cppi41_channel->is_tx) {
Daniel Mackf50e6782014-05-26 14:52:39 +0200180 musb_ep_select(musb->mregs, hw_ep->epnum);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200181 csr = musb_readw(epio, MUSB_RXCSR);
182 csr |= MUSB_RXCSR_H_REQPKT;
183 musb_writew(epio, MUSB_RXCSR, csr);
184 }
185 }
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100186}
187
George Cherian1af54b72014-01-27 15:07:26 +0530188static void cppi_trans_done_work(struct work_struct *work)
189{
190 unsigned long flags;
191 struct cppi41_dma_channel *cppi41_channel =
192 container_of(work, struct cppi41_dma_channel, dma_completion);
193 struct cppi41_dma_controller *controller = cppi41_channel->controller;
194 struct musb *musb = controller->musb;
195 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
196 bool empty;
197
198 if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) {
199 spin_lock_irqsave(&musb->lock, flags);
200 cppi41_trans_done(cppi41_channel);
201 spin_unlock_irqrestore(&musb->lock, flags);
202 } else {
203 empty = musb_is_tx_fifo_empty(hw_ep);
204 if (empty) {
205 spin_lock_irqsave(&musb->lock, flags);
206 cppi41_trans_done(cppi41_channel);
207 spin_unlock_irqrestore(&musb->lock, flags);
208 } else {
209 schedule_work(&cppi41_channel->dma_completion);
210 }
211 }
212}
213
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100214static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
215{
216 struct cppi41_dma_controller *controller;
217 struct cppi41_dma_channel *cppi41_channel, *n;
218 struct musb *musb;
219 unsigned long flags;
220 enum hrtimer_restart ret = HRTIMER_NORESTART;
221
222 controller = container_of(timer, struct cppi41_dma_controller,
223 early_tx);
224 musb = controller->musb;
225
226 spin_lock_irqsave(&musb->lock, flags);
227 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
228 tx_check) {
229 bool empty;
230 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
231
232 empty = musb_is_tx_fifo_empty(hw_ep);
233 if (empty) {
234 list_del_init(&cppi41_channel->tx_check);
235 cppi41_trans_done(cppi41_channel);
236 }
237 }
238
239 if (!list_empty(&controller->early_tx_list)) {
240 ret = HRTIMER_RESTART;
241 hrtimer_forward_now(&controller->early_tx,
242 ktime_set(0, 150 * NSEC_PER_USEC));
243 }
244
245 spin_unlock_irqrestore(&musb->lock, flags);
246 return ret;
247}
248
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100249static void cppi41_dma_callback(void *private_data)
250{
251 struct dma_channel *channel = private_data;
252 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
253 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
254 struct musb *musb = hw_ep->musb;
255 unsigned long flags;
256 struct dma_tx_state txstate;
257 u32 transferred;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100258 bool empty;
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100259
260 spin_lock_irqsave(&musb->lock, flags);
261
262 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
263 &txstate);
264 transferred = cppi41_channel->prog_len - txstate.residue;
265 cppi41_channel->transferred += transferred;
266
267 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
268 hw_ep->epnum, cppi41_channel->transferred,
269 cppi41_channel->total_len);
270
271 update_rx_toggle(cppi41_channel);
272
273 if (cppi41_channel->transferred == cppi41_channel->total_len ||
274 transferred < cppi41_channel->packet_sz)
275 cppi41_channel->prog_len = 0;
276
George Cherian1af54b72014-01-27 15:07:26 +0530277 if (!cppi41_channel->is_tx) {
278 if (is_isoc(hw_ep, 1))
279 schedule_work(&cppi41_channel->dma_completion);
280 else
281 cppi41_trans_done(cppi41_channel);
282 goto out;
283 }
284
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100285 empty = musb_is_tx_fifo_empty(hw_ep);
286 if (empty) {
287 cppi41_trans_done(cppi41_channel);
288 } else {
289 struct cppi41_dma_controller *controller;
290 /*
291 * On AM335x it has been observed that the TX interrupt fires
292 * too early that means the TXFIFO is not yet empty but the DMA
293 * engine says that it is done with the transfer. We don't
294 * receive a FIFO empty interrupt so the only thing we can do is
295 * to poll for the bit. On HS it usually takes 2us, on FS around
296 * 110us - 150us depending on the transfer size.
297 * We spin on HS (no longer than than 25us and setup a timer on
298 * FS to check for the bit and complete the transfer.
299 */
300 controller = cppi41_channel->controller;
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100301
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100302 if (musb->g.speed == USB_SPEED_HIGH) {
303 unsigned wait = 25;
304
305 do {
306 empty = musb_is_tx_fifo_empty(hw_ep);
307 if (empty)
308 break;
309 wait--;
310 if (!wait)
311 break;
312 udelay(1);
313 } while (1);
314
315 empty = musb_is_tx_fifo_empty(hw_ep);
316 if (empty) {
317 cppi41_trans_done(cppi41_channel);
318 goto out;
319 }
320 }
George Cherian1af54b72014-01-27 15:07:26 +0530321 if (is_isoc(hw_ep, 0)) {
322 schedule_work(&cppi41_channel->dma_completion);
323 goto out;
324 }
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100325 list_add_tail(&cppi41_channel->tx_check,
326 &controller->early_tx_list);
327 if (!hrtimer_active(&controller->early_tx)) {
328 hrtimer_start_range_ns(&controller->early_tx,
329 ktime_set(0, 140 * NSEC_PER_USEC),
330 40 * NSEC_PER_USEC,
331 HRTIMER_MODE_REL);
332 }
333 }
334out:
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200335 spin_unlock_irqrestore(&musb->lock, flags);
336}
337
338static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
339{
340 unsigned shift;
341
342 shift = (ep - 1) * 2;
343 old &= ~(3 << shift);
344 old |= mode << shift;
345 return old;
346}
347
348static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
349 unsigned mode)
350{
351 struct cppi41_dma_controller *controller = cppi41_channel->controller;
352 u32 port;
353 u32 new_mode;
354 u32 old_mode;
355
356 if (cppi41_channel->is_tx)
357 old_mode = controller->tx_mode;
358 else
359 old_mode = controller->rx_mode;
360 port = cppi41_channel->port_num;
361 new_mode = update_ep_mode(port, mode, old_mode);
362
363 if (new_mode == old_mode)
364 return;
365 if (cppi41_channel->is_tx) {
366 controller->tx_mode = new_mode;
367 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
368 new_mode);
369 } else {
370 controller->rx_mode = new_mode;
371 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
372 new_mode);
373 }
374}
375
376static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
377 unsigned mode)
378{
379 struct cppi41_dma_controller *controller = cppi41_channel->controller;
380 u32 port;
381 u32 new_mode;
382 u32 old_mode;
383
384 old_mode = controller->auto_req;
385 port = cppi41_channel->port_num;
386 new_mode = update_ep_mode(port, mode, old_mode);
387
388 if (new_mode == old_mode)
389 return;
390 controller->auto_req = new_mode;
391 musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
392}
393
394static bool cppi41_configure_channel(struct dma_channel *channel,
395 u16 packet_sz, u8 mode,
396 dma_addr_t dma_addr, u32 len)
397{
398 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
399 struct dma_chan *dc = cppi41_channel->dc;
400 struct dma_async_tx_descriptor *dma_desc;
401 enum dma_transfer_direction direction;
402 struct musb *musb = cppi41_channel->controller->musb;
403 unsigned use_gen_rndis = 0;
404
405 dev_dbg(musb->controller,
406 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
407 cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
408 packet_sz, mode, (unsigned long long) dma_addr,
409 len, cppi41_channel->is_tx);
410
411 cppi41_channel->buf_addr = dma_addr;
412 cppi41_channel->total_len = len;
413 cppi41_channel->transferred = 0;
414 cppi41_channel->packet_sz = packet_sz;
415
416 /*
417 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
418 * than max packet size at a time.
419 */
420 if (cppi41_channel->is_tx)
421 use_gen_rndis = 1;
422
423 if (use_gen_rndis) {
424 /* RNDIS mode */
425 if (len > packet_sz) {
426 musb_writel(musb->ctrl_base,
427 RNDIS_REG(cppi41_channel->port_num), len);
428 /* gen rndis */
429 cppi41_set_dma_mode(cppi41_channel,
430 EP_MODE_DMA_GEN_RNDIS);
431
432 /* auto req */
433 cppi41_set_autoreq_mode(cppi41_channel,
434 EP_MODE_AUTOREG_ALL_NEOP);
435 } else {
436 musb_writel(musb->ctrl_base,
437 RNDIS_REG(cppi41_channel->port_num), 0);
438 cppi41_set_dma_mode(cppi41_channel,
439 EP_MODE_DMA_TRANSPARENT);
440 cppi41_set_autoreq_mode(cppi41_channel,
441 EP_MODE_AUTOREG_NONE);
442 }
443 } else {
444 /* fallback mode */
445 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
446 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
447 len = min_t(u32, packet_sz, len);
448 }
449 cppi41_channel->prog_len = len;
450 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
451 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
452 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
453 if (!dma_desc)
454 return false;
455
456 dma_desc->callback = cppi41_dma_callback;
457 dma_desc->callback_param = channel;
458 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
Daniel Mackff3fcac2014-05-26 14:52:38 +0200459 cppi41_channel->channel.rx_packet_done = false;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200460
461 save_rx_toggle(cppi41_channel);
462 dma_async_issue_pending(dc);
463 return true;
464}
465
466static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
467 struct musb_hw_ep *hw_ep, u8 is_tx)
468{
469 struct cppi41_dma_controller *controller = container_of(c,
470 struct cppi41_dma_controller, controller);
471 struct cppi41_dma_channel *cppi41_channel = NULL;
472 u8 ch_num = hw_ep->epnum - 1;
473
474 if (ch_num >= MUSB_DMA_NUM_CHANNELS)
475 return NULL;
476
477 if (is_tx)
478 cppi41_channel = &controller->tx_channel[ch_num];
479 else
480 cppi41_channel = &controller->rx_channel[ch_num];
481
482 if (!cppi41_channel->dc)
483 return NULL;
484
485 if (cppi41_channel->is_allocated)
486 return NULL;
487
488 cppi41_channel->hw_ep = hw_ep;
489 cppi41_channel->is_allocated = 1;
490
491 return &cppi41_channel->channel;
492}
493
494static void cppi41_dma_channel_release(struct dma_channel *channel)
495{
496 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
497
498 if (cppi41_channel->is_allocated) {
499 cppi41_channel->is_allocated = 0;
500 channel->status = MUSB_DMA_STATUS_FREE;
501 channel->actual_len = 0;
502 }
503}
504
505static int cppi41_dma_channel_program(struct dma_channel *channel,
506 u16 packet_sz, u8 mode,
507 dma_addr_t dma_addr, u32 len)
508{
509 int ret;
George Cherianf82503f2014-01-27 15:07:25 +0530510 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
511 int hb_mult = 0;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200512
513 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
514 channel->status == MUSB_DMA_STATUS_BUSY);
515
George Cherianf82503f2014-01-27 15:07:25 +0530516 if (is_host_active(cppi41_channel->controller->musb)) {
517 if (cppi41_channel->is_tx)
518 hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
519 else
520 hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
521 }
522
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200523 channel->status = MUSB_DMA_STATUS_BUSY;
524 channel->actual_len = 0;
George Cherianf82503f2014-01-27 15:07:25 +0530525
526 if (hb_mult)
527 packet_sz = hb_mult * (packet_sz & 0x7FF);
528
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200529 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
530 if (!ret)
531 channel->status = MUSB_DMA_STATUS_FREE;
532
533 return ret;
534}
535
536static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
537 void *buf, u32 length)
538{
539 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
540 struct cppi41_dma_controller *controller = cppi41_channel->controller;
541 struct musb *musb = controller->musb;
542
543 if (is_host_active(musb)) {
544 WARN_ON(1);
545 return 1;
546 }
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100547 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
548 return 0;
Sebastian Andrzej Siewior13266fe2013-08-13 19:38:24 +0200549 if (cppi41_channel->is_tx)
550 return 1;
551 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200552 return 0;
553}
554
555static int cppi41_dma_channel_abort(struct dma_channel *channel)
556{
557 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
558 struct cppi41_dma_controller *controller = cppi41_channel->controller;
559 struct musb *musb = controller->musb;
560 void __iomem *epio = cppi41_channel->hw_ep->regs;
561 int tdbit;
562 int ret;
563 unsigned is_tx;
564 u16 csr;
565
566 is_tx = cppi41_channel->is_tx;
567 dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
568 cppi41_channel->port_num, is_tx);
569
570 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
571 return 0;
572
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100573 list_del_init(&cppi41_channel->tx_check);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200574 if (is_tx) {
575 csr = musb_readw(epio, MUSB_TXCSR);
576 csr &= ~MUSB_TXCSR_DMAENAB;
577 musb_writew(epio, MUSB_TXCSR, csr);
578 } else {
579 csr = musb_readw(epio, MUSB_RXCSR);
580 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
581 musb_writew(epio, MUSB_RXCSR, csr);
582
583 csr = musb_readw(epio, MUSB_RXCSR);
584 if (csr & MUSB_RXCSR_RXPKTRDY) {
585 csr |= MUSB_RXCSR_FLUSHFIFO;
586 musb_writew(epio, MUSB_RXCSR, csr);
587 musb_writew(epio, MUSB_RXCSR, csr);
588 }
589 }
590
591 tdbit = 1 << cppi41_channel->port_num;
592 if (is_tx)
593 tdbit <<= 16;
594
595 do {
596 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
597 ret = dmaengine_terminate_all(cppi41_channel->dc);
598 } while (ret == -EAGAIN);
599
600 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
601
602 if (is_tx) {
603 csr = musb_readw(epio, MUSB_TXCSR);
604 if (csr & MUSB_TXCSR_TXPKTRDY) {
605 csr |= MUSB_TXCSR_FLUSHFIFO;
606 musb_writew(epio, MUSB_TXCSR, csr);
607 }
608 }
609
610 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
611 return 0;
612}
613
614static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
615{
616 struct dma_chan *dc;
617 int i;
618
619 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
620 dc = ctrl->tx_channel[i].dc;
621 if (dc)
622 dma_release_channel(dc);
623 dc = ctrl->rx_channel[i].dc;
624 if (dc)
625 dma_release_channel(dc);
626 }
627}
628
629static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
630{
631 cppi41_release_all_dma_chans(controller);
632}
633
634static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
635{
636 struct musb *musb = controller->musb;
637 struct device *dev = musb->controller;
638 struct device_node *np = dev->of_node;
639 struct cppi41_dma_channel *cppi41_channel;
640 int count;
641 int i;
642 int ret;
643
644 count = of_property_count_strings(np, "dma-names");
645 if (count < 0)
646 return count;
647
648 for (i = 0; i < count; i++) {
649 struct dma_chan *dc;
650 struct dma_channel *musb_dma;
651 const char *str;
652 unsigned is_tx;
653 unsigned int port;
654
655 ret = of_property_read_string_index(np, "dma-names", i, &str);
656 if (ret)
657 goto err;
658 if (!strncmp(str, "tx", 2))
659 is_tx = 1;
660 else if (!strncmp(str, "rx", 2))
661 is_tx = 0;
662 else {
663 dev_err(dev, "Wrong dmatype %s\n", str);
664 goto err;
665 }
666 ret = kstrtouint(str + 2, 0, &port);
667 if (ret)
668 goto err;
669
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200670 ret = -EINVAL;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200671 if (port > MUSB_DMA_NUM_CHANNELS || !port)
672 goto err;
673 if (is_tx)
674 cppi41_channel = &controller->tx_channel[port - 1];
675 else
676 cppi41_channel = &controller->rx_channel[port - 1];
677
678 cppi41_channel->controller = controller;
679 cppi41_channel->port_num = port;
680 cppi41_channel->is_tx = is_tx;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100681 INIT_LIST_HEAD(&cppi41_channel->tx_check);
George Cherian1af54b72014-01-27 15:07:26 +0530682 INIT_WORK(&cppi41_channel->dma_completion,
683 cppi_trans_done_work);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200684
685 musb_dma = &cppi41_channel->channel;
686 musb_dma->private_data = cppi41_channel;
687 musb_dma->status = MUSB_DMA_STATUS_FREE;
688 musb_dma->max_len = SZ_4M;
689
690 dc = dma_request_slave_channel(dev, str);
691 if (!dc) {
Rahul Bedarkar5ae477b2014-01-02 19:27:47 +0530692 dev_err(dev, "Failed to request %s.\n", str);
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200693 ret = -EPROBE_DEFER;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200694 goto err;
695 }
696 cppi41_channel->dc = dc;
697 }
698 return 0;
699err:
700 cppi41_release_all_dma_chans(controller);
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200701 return ret;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200702}
703
704void dma_controller_destroy(struct dma_controller *c)
705{
706 struct cppi41_dma_controller *controller = container_of(c,
707 struct cppi41_dma_controller, controller);
708
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100709 hrtimer_cancel(&controller->early_tx);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200710 cppi41_dma_controller_stop(controller);
711 kfree(controller);
712}
713
714struct dma_controller *dma_controller_create(struct musb *musb,
715 void __iomem *base)
716{
717 struct cppi41_dma_controller *controller;
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200718 int ret = 0;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200719
720 if (!musb->controller->of_node) {
721 dev_err(musb->controller, "Need DT for the DMA engine.\n");
722 return NULL;
723 }
724
725 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
726 if (!controller)
727 goto kzalloc_fail;
728
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100729 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
730 controller->early_tx.function = cppi41_recheck_tx_req;
731 INIT_LIST_HEAD(&controller->early_tx_list);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200732 controller->musb = musb;
733
734 controller->controller.channel_alloc = cppi41_dma_channel_allocate;
735 controller->controller.channel_release = cppi41_dma_channel_release;
736 controller->controller.channel_program = cppi41_dma_channel_program;
737 controller->controller.channel_abort = cppi41_dma_channel_abort;
738 controller->controller.is_compatible = cppi41_is_compatible;
739
740 ret = cppi41_dma_controller_start(controller);
741 if (ret)
742 goto plat_get_fail;
743 return &controller->controller;
744
745plat_get_fail:
746 kfree(controller);
747kzalloc_fail:
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200748 if (ret == -EPROBE_DEFER)
749 return ERR_PTR(ret);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200750 return NULL;
751}