blob: 1fe7eaeb097b924f01f5662452809a84dd6c93d9 [file] [log] [blame]
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001#include <linux/device.h>
2#include <linux/dma-mapping.h>
3#include <linux/dmaengine.h>
4#include <linux/sizes.h>
5#include <linux/platform_device.h>
6#include <linux/of.h>
7
Bin Liu239d2212016-06-30 12:12:29 -05008#include "cppi_dma.h"
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02009#include "musb_core.h"
Bin Liu8ccb49d2016-06-30 12:12:30 -050010#include "musb_trace.h"
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020011
12#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
13
Bin Liu0149b072015-01-26 16:22:06 -060014#define EP_MODE_AUTOREQ_NONE 0
15#define EP_MODE_AUTOREQ_ALL_NEOP 1
16#define EP_MODE_AUTOREQ_ALWAYS 3
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020017
18#define EP_MODE_DMA_TRANSPARENT 0
19#define EP_MODE_DMA_RNDIS 1
20#define EP_MODE_DMA_GEN_RNDIS 3
21
22#define USB_CTRL_TX_MODE 0x70
23#define USB_CTRL_RX_MODE 0x74
24#define USB_CTRL_AUTOREQ 0xd0
25#define USB_TDOWN 0xd8
26
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020027#define MUSB_DMA_NUM_CHANNELS 15
28
29struct cppi41_dma_controller {
30 struct dma_controller controller;
31 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
32 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
33 struct musb *musb;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +010034 struct hrtimer early_tx;
35 struct list_head early_tx_list;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020036 u32 rx_mode;
37 u32 tx_mode;
38 u32 auto_req;
39};
40
41static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
42{
43 u16 csr;
44 u8 toggle;
45
46 if (cppi41_channel->is_tx)
47 return;
48 if (!is_host_active(cppi41_channel->controller->musb))
49 return;
50
51 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
52 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
53
54 cppi41_channel->usb_toggle = toggle;
55}
56
57static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
58{
Daniel Mackf50e6782014-05-26 14:52:39 +020059 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
60 struct musb *musb = hw_ep->musb;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020061 u16 csr;
62 u8 toggle;
63
64 if (cppi41_channel->is_tx)
65 return;
Daniel Mackf50e6782014-05-26 14:52:39 +020066 if (!is_host_active(musb))
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020067 return;
68
Daniel Mackf50e6782014-05-26 14:52:39 +020069 musb_ep_select(musb->mregs, hw_ep->epnum);
70 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020071 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
72
73 /*
74 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
75 * data toggle may reset from DATA1 to DATA0 during receiving data from
76 * more than one endpoint.
77 */
78 if (!toggle && toggle == cppi41_channel->usb_toggle) {
79 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
80 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
Bin Liub99d3652016-06-30 12:12:22 -050081 musb_dbg(cppi41_channel->controller->musb,
82 "Restoring DATA1 toggle.");
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020083 }
84
85 cppi41_channel->usb_toggle = toggle;
86}
87
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +010088static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
89{
90 u8 epnum = hw_ep->epnum;
91 struct musb *musb = hw_ep->musb;
92 void __iomem *epio = musb->endpoints[epnum].regs;
93 u16 csr;
94
Daniel Mackf50e6782014-05-26 14:52:39 +020095 musb_ep_select(musb->mregs, hw_ep->epnum);
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +010096 csr = musb_readw(epio, MUSB_TXCSR);
97 if (csr & MUSB_TXCSR_TXPKTRDY)
98 return false;
99 return true;
100}
101
Alexandre Bailoned232c02017-02-06 22:53:52 -0600102static void cppi41_dma_callback(void *private_data,
103 const struct dmaengine_result *result);
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100104
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100105static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200106{
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200107 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
108 struct musb *musb = hw_ep->musb;
Bin Liu9267eda2014-08-12 14:18:43 -0500109 void __iomem *epio = hw_ep->regs;
110 u16 csr;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200111
George Cherianaecbc312014-02-27 10:44:41 +0530112 if (!cppi41_channel->prog_len ||
113 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200114
115 /* done, complete */
116 cppi41_channel->channel.actual_len =
117 cppi41_channel->transferred;
118 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
Daniel Mackff3fcac2014-05-26 14:52:38 +0200119 cppi41_channel->channel.rx_packet_done = true;
Bin Liu9267eda2014-08-12 14:18:43 -0500120
121 /*
122 * transmit ZLP using PIO mode for transfers which size is
123 * multiple of EP packet size.
124 */
125 if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
126 cppi41_channel->packet_sz) == 0) {
127 musb_ep_select(musb->mregs, hw_ep->epnum);
128 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
129 musb_writew(epio, MUSB_TXCSR, csr);
130 }
Bin Liu8ccb49d2016-06-30 12:12:30 -0500131
132 trace_musb_cppi41_done(cppi41_channel);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200133 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
134 } else {
135 /* next iteration, reload */
136 struct dma_chan *dc = cppi41_channel->dc;
137 struct dma_async_tx_descriptor *dma_desc;
138 enum dma_transfer_direction direction;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200139 u32 remain_bytes;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200140
141 cppi41_channel->buf_addr += cppi41_channel->packet_sz;
142
143 remain_bytes = cppi41_channel->total_len;
144 remain_bytes -= cppi41_channel->transferred;
145 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
146 cppi41_channel->prog_len = remain_bytes;
147
148 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
149 : DMA_DEV_TO_MEM;
150 dma_desc = dmaengine_prep_slave_single(dc,
151 cppi41_channel->buf_addr,
152 remain_bytes,
153 direction,
154 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100155 if (WARN_ON(!dma_desc))
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200156 return;
157
Alexandre Bailoned232c02017-02-06 22:53:52 -0600158 dma_desc->callback_result = cppi41_dma_callback;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100159 dma_desc->callback_param = &cppi41_channel->channel;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200160 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
Bin Liu8ccb49d2016-06-30 12:12:30 -0500161 trace_musb_cppi41_cont(cppi41_channel);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200162 dma_async_issue_pending(dc);
163
164 if (!cppi41_channel->is_tx) {
Daniel Mackf50e6782014-05-26 14:52:39 +0200165 musb_ep_select(musb->mregs, hw_ep->epnum);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200166 csr = musb_readw(epio, MUSB_RXCSR);
167 csr |= MUSB_RXCSR_H_REQPKT;
168 musb_writew(epio, MUSB_RXCSR, csr);
169 }
170 }
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100171}
172
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100173static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
174{
175 struct cppi41_dma_controller *controller;
176 struct cppi41_dma_channel *cppi41_channel, *n;
177 struct musb *musb;
178 unsigned long flags;
179 enum hrtimer_restart ret = HRTIMER_NORESTART;
180
181 controller = container_of(timer, struct cppi41_dma_controller,
182 early_tx);
183 musb = controller->musb;
184
185 spin_lock_irqsave(&musb->lock, flags);
186 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
187 tx_check) {
188 bool empty;
189 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
190
191 empty = musb_is_tx_fifo_empty(hw_ep);
192 if (empty) {
193 list_del_init(&cppi41_channel->tx_check);
194 cppi41_trans_done(cppi41_channel);
195 }
196 }
197
Thomas Gleixnerd2e6d622014-10-02 17:32:16 +0200198 if (!list_empty(&controller->early_tx_list) &&
199 !hrtimer_is_queued(&controller->early_tx)) {
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100200 ret = HRTIMER_RESTART;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100201 hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100202 }
203
204 spin_unlock_irqrestore(&musb->lock, flags);
205 return ret;
206}
207
Alexandre Bailoned232c02017-02-06 22:53:52 -0600208static void cppi41_dma_callback(void *private_data,
209 const struct dmaengine_result *result)
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100210{
211 struct dma_channel *channel = private_data;
212 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
213 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
Felipe Balbi1b616252015-02-27 13:19:39 -0600214 struct cppi41_dma_controller *controller;
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100215 struct musb *musb = hw_ep->musb;
216 unsigned long flags;
217 struct dma_tx_state txstate;
218 u32 transferred;
Felipe Balbi1b616252015-02-27 13:19:39 -0600219 int is_hs = 0;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100220 bool empty;
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100221
Alexandre Bailon050dc902017-02-06 22:53:51 -0600222 controller = cppi41_channel->controller;
223 if (controller->controller.dma_callback)
224 controller->controller.dma_callback(&controller->controller);
225
Alexandre Bailoned232c02017-02-06 22:53:52 -0600226 if (result->result == DMA_TRANS_ABORTED)
227 return;
228
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100229 spin_lock_irqsave(&musb->lock, flags);
230
231 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
232 &txstate);
233 transferred = cppi41_channel->prog_len - txstate.residue;
234 cppi41_channel->transferred += transferred;
235
Bin Liu8ccb49d2016-06-30 12:12:30 -0500236 trace_musb_cppi41_gb(cppi41_channel);
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100237 update_rx_toggle(cppi41_channel);
238
239 if (cppi41_channel->transferred == cppi41_channel->total_len ||
240 transferred < cppi41_channel->packet_sz)
241 cppi41_channel->prog_len = 0;
242
Takeyoshi Kikuchi72a472d2015-03-02 11:03:51 +0900243 if (cppi41_channel->is_tx)
244 empty = musb_is_tx_fifo_empty(hw_ep);
245
246 if (!cppi41_channel->is_tx || empty) {
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100247 cppi41_trans_done(cppi41_channel);
Felipe Balbi1b616252015-02-27 13:19:39 -0600248 goto out;
249 }
250
251 /*
252 * On AM335x it has been observed that the TX interrupt fires
253 * too early that means the TXFIFO is not yet empty but the DMA
254 * engine says that it is done with the transfer. We don't
255 * receive a FIFO empty interrupt so the only thing we can do is
256 * to poll for the bit. On HS it usually takes 2us, on FS around
257 * 110us - 150us depending on the transfer size.
258 * We spin on HS (no longer than than 25us and setup a timer on
259 * FS to check for the bit and complete the transfer.
260 */
Felipe Balbi1b616252015-02-27 13:19:39 -0600261 if (is_host_active(musb)) {
262 if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
263 is_hs = 1;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100264 } else {
Felipe Balbi1b616252015-02-27 13:19:39 -0600265 if (musb->g.speed == USB_SPEED_HIGH)
266 is_hs = 1;
267 }
268 if (is_hs) {
269 unsigned wait = 25;
Sebastian Andrzej Siewiord373a852013-11-12 16:37:46 +0100270
Felipe Balbi1b616252015-02-27 13:19:39 -0600271 do {
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100272 empty = musb_is_tx_fifo_empty(hw_ep);
Felipe Balbiaf634292015-02-27 13:21:14 -0600273 if (empty) {
274 cppi41_trans_done(cppi41_channel);
275 goto out;
276 }
Felipe Balbi1b616252015-02-27 13:19:39 -0600277 wait--;
278 if (!wait)
279 break;
Felipe Balbi043f5b72015-02-27 13:22:27 -0600280 cpu_relax();
Felipe Balbi1b616252015-02-27 13:19:39 -0600281 } while (1);
Felipe Balbi1b616252015-02-27 13:19:39 -0600282 }
283 list_add_tail(&cppi41_channel->tx_check,
284 &controller->early_tx_list);
285 if (!hrtimer_is_queued(&controller->early_tx)) {
286 unsigned long usecs = cppi41_channel->total_len / 10;
287
288 hrtimer_start_range_ns(&controller->early_tx,
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100289 usecs * NSEC_PER_USEC,
290 20 * NSEC_PER_USEC,
291 HRTIMER_MODE_REL);
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100292 }
Felipe Balbi1b616252015-02-27 13:19:39 -0600293
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100294out:
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200295 spin_unlock_irqrestore(&musb->lock, flags);
296}
297
298static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
299{
300 unsigned shift;
301
302 shift = (ep - 1) * 2;
303 old &= ~(3 << shift);
304 old |= mode << shift;
305 return old;
306}
307
308static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
309 unsigned mode)
310{
311 struct cppi41_dma_controller *controller = cppi41_channel->controller;
312 u32 port;
313 u32 new_mode;
314 u32 old_mode;
315
316 if (cppi41_channel->is_tx)
317 old_mode = controller->tx_mode;
318 else
319 old_mode = controller->rx_mode;
320 port = cppi41_channel->port_num;
321 new_mode = update_ep_mode(port, mode, old_mode);
322
323 if (new_mode == old_mode)
324 return;
325 if (cppi41_channel->is_tx) {
326 controller->tx_mode = new_mode;
327 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
328 new_mode);
329 } else {
330 controller->rx_mode = new_mode;
331 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
332 new_mode);
333 }
334}
335
336static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
337 unsigned mode)
338{
339 struct cppi41_dma_controller *controller = cppi41_channel->controller;
340 u32 port;
341 u32 new_mode;
342 u32 old_mode;
343
344 old_mode = controller->auto_req;
345 port = cppi41_channel->port_num;
346 new_mode = update_ep_mode(port, mode, old_mode);
347
348 if (new_mode == old_mode)
349 return;
350 controller->auto_req = new_mode;
351 musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
352}
353
354static bool cppi41_configure_channel(struct dma_channel *channel,
355 u16 packet_sz, u8 mode,
356 dma_addr_t dma_addr, u32 len)
357{
358 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
359 struct dma_chan *dc = cppi41_channel->dc;
360 struct dma_async_tx_descriptor *dma_desc;
361 enum dma_transfer_direction direction;
362 struct musb *musb = cppi41_channel->controller->musb;
363 unsigned use_gen_rndis = 0;
364
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200365 cppi41_channel->buf_addr = dma_addr;
366 cppi41_channel->total_len = len;
367 cppi41_channel->transferred = 0;
368 cppi41_channel->packet_sz = packet_sz;
Bin Liu9267eda2014-08-12 14:18:43 -0500369 cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200370
371 /*
372 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
373 * than max packet size at a time.
374 */
375 if (cppi41_channel->is_tx)
376 use_gen_rndis = 1;
377
378 if (use_gen_rndis) {
379 /* RNDIS mode */
380 if (len > packet_sz) {
381 musb_writel(musb->ctrl_base,
382 RNDIS_REG(cppi41_channel->port_num), len);
383 /* gen rndis */
384 cppi41_set_dma_mode(cppi41_channel,
385 EP_MODE_DMA_GEN_RNDIS);
386
387 /* auto req */
388 cppi41_set_autoreq_mode(cppi41_channel,
Bin Liu0149b072015-01-26 16:22:06 -0600389 EP_MODE_AUTOREQ_ALL_NEOP);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200390 } else {
391 musb_writel(musb->ctrl_base,
392 RNDIS_REG(cppi41_channel->port_num), 0);
393 cppi41_set_dma_mode(cppi41_channel,
394 EP_MODE_DMA_TRANSPARENT);
395 cppi41_set_autoreq_mode(cppi41_channel,
Bin Liu0149b072015-01-26 16:22:06 -0600396 EP_MODE_AUTOREQ_NONE);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200397 }
398 } else {
399 /* fallback mode */
400 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
Bin Liu0149b072015-01-26 16:22:06 -0600401 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200402 len = min_t(u32, packet_sz, len);
403 }
404 cppi41_channel->prog_len = len;
405 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
406 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
407 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
408 if (!dma_desc)
409 return false;
410
Alexandre Bailoned232c02017-02-06 22:53:52 -0600411 dma_desc->callback_result = cppi41_dma_callback;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200412 dma_desc->callback_param = channel;
413 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
Daniel Mackff3fcac2014-05-26 14:52:38 +0200414 cppi41_channel->channel.rx_packet_done = false;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200415
Bin Liu8ccb49d2016-06-30 12:12:30 -0500416 trace_musb_cppi41_config(cppi41_channel);
417
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200418 save_rx_toggle(cppi41_channel);
419 dma_async_issue_pending(dc);
420 return true;
421}
422
423static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
424 struct musb_hw_ep *hw_ep, u8 is_tx)
425{
426 struct cppi41_dma_controller *controller = container_of(c,
427 struct cppi41_dma_controller, controller);
428 struct cppi41_dma_channel *cppi41_channel = NULL;
429 u8 ch_num = hw_ep->epnum - 1;
430
431 if (ch_num >= MUSB_DMA_NUM_CHANNELS)
432 return NULL;
433
434 if (is_tx)
435 cppi41_channel = &controller->tx_channel[ch_num];
436 else
437 cppi41_channel = &controller->rx_channel[ch_num];
438
439 if (!cppi41_channel->dc)
440 return NULL;
441
442 if (cppi41_channel->is_allocated)
443 return NULL;
444
445 cppi41_channel->hw_ep = hw_ep;
446 cppi41_channel->is_allocated = 1;
447
Bin Liu8ccb49d2016-06-30 12:12:30 -0500448 trace_musb_cppi41_alloc(cppi41_channel);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200449 return &cppi41_channel->channel;
450}
451
452static void cppi41_dma_channel_release(struct dma_channel *channel)
453{
454 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
455
Bin Liu8ccb49d2016-06-30 12:12:30 -0500456 trace_musb_cppi41_free(cppi41_channel);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200457 if (cppi41_channel->is_allocated) {
458 cppi41_channel->is_allocated = 0;
459 channel->status = MUSB_DMA_STATUS_FREE;
460 channel->actual_len = 0;
461 }
462}
463
464static int cppi41_dma_channel_program(struct dma_channel *channel,
465 u16 packet_sz, u8 mode,
466 dma_addr_t dma_addr, u32 len)
467{
468 int ret;
George Cherianf82503f2014-01-27 15:07:25 +0530469 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
470 int hb_mult = 0;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200471
472 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
473 channel->status == MUSB_DMA_STATUS_BUSY);
474
George Cherianf82503f2014-01-27 15:07:25 +0530475 if (is_host_active(cppi41_channel->controller->musb)) {
476 if (cppi41_channel->is_tx)
477 hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
478 else
479 hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
480 }
481
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200482 channel->status = MUSB_DMA_STATUS_BUSY;
483 channel->actual_len = 0;
George Cherianf82503f2014-01-27 15:07:25 +0530484
485 if (hb_mult)
486 packet_sz = hb_mult * (packet_sz & 0x7FF);
487
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200488 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
489 if (!ret)
490 channel->status = MUSB_DMA_STATUS_FREE;
491
492 return ret;
493}
494
495static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
496 void *buf, u32 length)
497{
498 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
499 struct cppi41_dma_controller *controller = cppi41_channel->controller;
500 struct musb *musb = controller->musb;
501
502 if (is_host_active(musb)) {
503 WARN_ON(1);
504 return 1;
505 }
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100506 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
507 return 0;
Sebastian Andrzej Siewior13266fe2013-08-13 19:38:24 +0200508 if (cppi41_channel->is_tx)
509 return 1;
510 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200511 return 0;
512}
513
514static int cppi41_dma_channel_abort(struct dma_channel *channel)
515{
516 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
517 struct cppi41_dma_controller *controller = cppi41_channel->controller;
518 struct musb *musb = controller->musb;
519 void __iomem *epio = cppi41_channel->hw_ep->regs;
520 int tdbit;
521 int ret;
522 unsigned is_tx;
523 u16 csr;
524
525 is_tx = cppi41_channel->is_tx;
Bin Liu8ccb49d2016-06-30 12:12:30 -0500526 trace_musb_cppi41_abort(cppi41_channel);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200527
528 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
529 return 0;
530
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100531 list_del_init(&cppi41_channel->tx_check);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200532 if (is_tx) {
533 csr = musb_readw(epio, MUSB_TXCSR);
534 csr &= ~MUSB_TXCSR_DMAENAB;
535 musb_writew(epio, MUSB_TXCSR, csr);
536 } else {
Bin Liucb83df72015-01-26 16:22:07 -0600537 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
538
Bin Liub431ba82015-08-24 15:28:37 -0500539 /* delay to drain to cppi dma pipeline for isoch */
540 udelay(250);
541
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200542 csr = musb_readw(epio, MUSB_RXCSR);
543 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
544 musb_writew(epio, MUSB_RXCSR, csr);
545
Bin Liucb83df72015-01-26 16:22:07 -0600546 /* wait to drain cppi dma pipe line */
547 udelay(50);
548
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200549 csr = musb_readw(epio, MUSB_RXCSR);
550 if (csr & MUSB_RXCSR_RXPKTRDY) {
551 csr |= MUSB_RXCSR_FLUSHFIFO;
552 musb_writew(epio, MUSB_RXCSR, csr);
553 musb_writew(epio, MUSB_RXCSR, csr);
554 }
555 }
556
557 tdbit = 1 << cppi41_channel->port_num;
558 if (is_tx)
559 tdbit <<= 16;
560
561 do {
Bin Liucb83df72015-01-26 16:22:07 -0600562 if (is_tx)
563 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200564 ret = dmaengine_terminate_all(cppi41_channel->dc);
565 } while (ret == -EAGAIN);
566
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200567 if (is_tx) {
Bin Liucb83df72015-01-26 16:22:07 -0600568 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
569
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200570 csr = musb_readw(epio, MUSB_TXCSR);
571 if (csr & MUSB_TXCSR_TXPKTRDY) {
572 csr |= MUSB_TXCSR_FLUSHFIFO;
573 musb_writew(epio, MUSB_TXCSR, csr);
574 }
575 }
576
577 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
578 return 0;
579}
580
581static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
582{
583 struct dma_chan *dc;
584 int i;
585
586 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
587 dc = ctrl->tx_channel[i].dc;
588 if (dc)
589 dma_release_channel(dc);
590 dc = ctrl->rx_channel[i].dc;
591 if (dc)
592 dma_release_channel(dc);
593 }
594}
595
596static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
597{
598 cppi41_release_all_dma_chans(controller);
599}
600
601static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
602{
603 struct musb *musb = controller->musb;
604 struct device *dev = musb->controller;
Felipe Balbib0a688d2015-08-06 10:51:29 -0500605 struct device_node *np = dev->parent->of_node;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200606 struct cppi41_dma_channel *cppi41_channel;
607 int count;
608 int i;
609 int ret;
610
611 count = of_property_count_strings(np, "dma-names");
612 if (count < 0)
613 return count;
614
615 for (i = 0; i < count; i++) {
616 struct dma_chan *dc;
617 struct dma_channel *musb_dma;
618 const char *str;
619 unsigned is_tx;
620 unsigned int port;
621
622 ret = of_property_read_string_index(np, "dma-names", i, &str);
623 if (ret)
624 goto err;
Rasmus Villemoese87c3f82014-11-27 22:25:45 +0100625 if (strstarts(str, "tx"))
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200626 is_tx = 1;
Rasmus Villemoese87c3f82014-11-27 22:25:45 +0100627 else if (strstarts(str, "rx"))
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200628 is_tx = 0;
629 else {
630 dev_err(dev, "Wrong dmatype %s\n", str);
631 goto err;
632 }
633 ret = kstrtouint(str + 2, 0, &port);
634 if (ret)
635 goto err;
636
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200637 ret = -EINVAL;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200638 if (port > MUSB_DMA_NUM_CHANNELS || !port)
639 goto err;
640 if (is_tx)
641 cppi41_channel = &controller->tx_channel[port - 1];
642 else
643 cppi41_channel = &controller->rx_channel[port - 1];
644
645 cppi41_channel->controller = controller;
646 cppi41_channel->port_num = port;
647 cppi41_channel->is_tx = is_tx;
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100648 INIT_LIST_HEAD(&cppi41_channel->tx_check);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200649
650 musb_dma = &cppi41_channel->channel;
651 musb_dma->private_data = cppi41_channel;
652 musb_dma->status = MUSB_DMA_STATUS_FREE;
653 musb_dma->max_len = SZ_4M;
654
Felipe Balbib0a688d2015-08-06 10:51:29 -0500655 dc = dma_request_slave_channel(dev->parent, str);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200656 if (!dc) {
Rahul Bedarkar5ae477b2014-01-02 19:27:47 +0530657 dev_err(dev, "Failed to request %s.\n", str);
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200658 ret = -EPROBE_DEFER;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200659 goto err;
660 }
661 cppi41_channel->dc = dc;
662 }
663 return 0;
664err:
665 cppi41_release_all_dma_chans(controller);
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200666 return ret;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200667}
668
Tony Lindgren7f6283e2015-05-01 12:29:28 -0700669void cppi41_dma_controller_destroy(struct dma_controller *c)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200670{
671 struct cppi41_dma_controller *controller = container_of(c,
672 struct cppi41_dma_controller, controller);
673
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100674 hrtimer_cancel(&controller->early_tx);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200675 cppi41_dma_controller_stop(controller);
676 kfree(controller);
677}
Tony Lindgren7f6283e2015-05-01 12:29:28 -0700678EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200679
Tony Lindgren7f6283e2015-05-01 12:29:28 -0700680struct dma_controller *
681cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200682{
683 struct cppi41_dma_controller *controller;
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200684 int ret = 0;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200685
Felipe Balbib0a688d2015-08-06 10:51:29 -0500686 if (!musb->controller->parent->of_node) {
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200687 dev_err(musb->controller, "Need DT for the DMA engine.\n");
688 return NULL;
689 }
690
691 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
692 if (!controller)
693 goto kzalloc_fail;
694
Sebastian Andrzej Siewiora655f482013-11-12 16:37:47 +0100695 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
696 controller->early_tx.function = cppi41_recheck_tx_req;
697 INIT_LIST_HEAD(&controller->early_tx_list);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200698 controller->musb = musb;
699
700 controller->controller.channel_alloc = cppi41_dma_channel_allocate;
701 controller->controller.channel_release = cppi41_dma_channel_release;
702 controller->controller.channel_program = cppi41_dma_channel_program;
703 controller->controller.channel_abort = cppi41_dma_channel_abort;
704 controller->controller.is_compatible = cppi41_is_compatible;
Alexandre Bailon050dc902017-02-06 22:53:51 -0600705 controller->controller.musb = musb;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200706
707 ret = cppi41_dma_controller_start(controller);
708 if (ret)
709 goto plat_get_fail;
710 return &controller->controller;
711
712plat_get_fail:
713 kfree(controller);
714kzalloc_fail:
Sebastian Andrzej Siewior48054142013-10-16 12:50:08 +0200715 if (ret == -EPROBE_DEFER)
716 return ERR_PTR(ret);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200717 return NULL;
718}
Tony Lindgren7f6283e2015-05-01 12:29:28 -0700719EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);