blob: ec7cc10d45949e4ee738435618328ce454c41b1e [file] [log] [blame]
Russell King7bedaa52012-04-13 12:10:24 +01001/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
Russell Kingfa3ad862013-11-02 17:07:09 +00008#include <linux/delay.h>
Russell King7bedaa52012-04-13 12:10:24 +01009#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/omap-dma.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
Jon Hunter8d306622013-02-26 12:27:24 -060020#include <linux/of_dma.h>
21#include <linux/of_device.h>
Russell King7bedaa52012-04-13 12:10:24 +010022
23#include "virt-dma.h"
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -070024
Russell King7bedaa52012-04-13 12:10:24 +010025struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
Russell King1b416c42013-11-02 13:00:03 +000030 struct omap_system_dma_plat_info *plat;
Russell King7bedaa52012-04-13 12:10:24 +010031};
32
33struct omap_chan {
34 struct virt_dma_chan vc;
35 struct list_head node;
Russell King1b416c42013-11-02 13:00:03 +000036 struct omap_system_dma_plat_info *plat;
Russell King7bedaa52012-04-13 12:10:24 +010037
38 struct dma_slave_config cfg;
39 unsigned dma_sig;
Russell King3a774ea2012-06-21 10:40:15 +010040 bool cyclic;
Peter Ujfalusi2dcdf572012-09-14 15:05:45 +030041 bool paused;
Russell King7bedaa52012-04-13 12:10:24 +010042
43 int dma_ch;
44 struct omap_desc *desc;
45 unsigned sgidx;
46};
47
48struct omap_sg {
49 dma_addr_t addr;
50 uint32_t en; /* number of elements (24-bit) */
51 uint32_t fn; /* number of frames (16-bit) */
52};
53
54struct omap_desc {
55 struct virt_dma_desc vd;
56 enum dma_transfer_direction dir;
57 dma_addr_t dev_addr;
58
Russell King7c836bc2012-06-18 16:45:19 +010059 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
Russell King7bedaa52012-04-13 12:10:24 +010060 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
61 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
62 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
Russell Kingfa3ad862013-11-02 17:07:09 +000063 uint16_t cicr; /* CICR value */
Russell King2f0d13b2013-11-02 18:51:53 +000064 uint32_t csdp; /* CSDP value */
Russell King7bedaa52012-04-13 12:10:24 +010065
66 unsigned sglen;
67 struct omap_sg sg[0];
68};
69
70static const unsigned es_bytes[] = {
71 [OMAP_DMA_DATA_TYPE_S8] = 1,
72 [OMAP_DMA_DATA_TYPE_S16] = 2,
73 [OMAP_DMA_DATA_TYPE_S32] = 4,
74};
75
Jon Hunter8d306622013-02-26 12:27:24 -060076static struct of_dma_filter_info omap_dma_info = {
77 .filter_fn = omap_dma_filter_fn,
78};
79
Russell King7bedaa52012-04-13 12:10:24 +010080static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
81{
82 return container_of(d, struct omap_dmadev, ddev);
83}
84
85static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
86{
87 return container_of(c, struct omap_chan, vc.chan);
88}
89
90static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
91{
92 return container_of(t, struct omap_desc, vd.tx);
93}
94
95static void omap_dma_desc_free(struct virt_dma_desc *vd)
96{
97 kfree(container_of(vd, struct omap_desc, vd));
98}
99
Russell Kingfa3ad862013-11-02 17:07:09 +0000100static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
101{
102 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
103 uint32_t val;
104
105 if (__dma_omap15xx(od->plat->dma_attr))
106 c->plat->dma_write(0, CPC, c->dma_ch);
107 else
108 c->plat->dma_write(0, CDAC, c->dma_ch);
109
110 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
111 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
112
113 if (dma_omap1())
114 val &= ~(1 << 14);
115
116 val |= c->dma_ch | 1 << 15;
117
118 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
119 } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
120 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
121
122 /* Clear CSR */
123 if (dma_omap1())
124 c->plat->dma_read(CSR, c->dma_ch);
125 else
126 c->plat->dma_write(~0, CSR, c->dma_ch);
127
128 /* Enable interrupts */
129 c->plat->dma_write(d->cicr, CICR, c->dma_ch);
130
131 val = c->plat->dma_read(CCR, c->dma_ch);
132 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
133 val |= OMAP_DMA_CCR_BUFFERING_DISABLE;
134 val |= OMAP_DMA_CCR_EN;
135 mb();
136 c->plat->dma_write(val, CCR, c->dma_ch);
137}
138
139static void omap_dma_stop(struct omap_chan *c)
140{
141 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
142 uint32_t val;
143
144 /* disable irq */
145 c->plat->dma_write(0, CICR, c->dma_ch);
146
147 /* Clear CSR */
148 if (dma_omap1())
149 c->plat->dma_read(CSR, c->dma_ch);
150 else
151 c->plat->dma_write(~0, CSR, c->dma_ch);
152
153 val = c->plat->dma_read(CCR, c->dma_ch);
154 if (od->plat->errata & DMA_ERRATA_i541 &&
155 val & OMAP_DMA_CCR_SEL_SRC_DST_SYNC) {
156 uint32_t sysconfig;
157 unsigned i;
158
159 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
160 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
161 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
162 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
163
164 val = c->plat->dma_read(CCR, c->dma_ch);
165 val &= ~OMAP_DMA_CCR_EN;
166 c->plat->dma_write(val, CCR, c->dma_ch);
167
168 /* Wait for sDMA FIFO to drain */
169 for (i = 0; ; i++) {
170 val = c->plat->dma_read(CCR, c->dma_ch);
171 if (!(val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE)))
172 break;
173
174 if (i > 100)
175 break;
176
177 udelay(5);
178 }
179
180 if (val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE))
181 dev_err(c->vc.chan.device->dev,
182 "DMA drain did not complete on lch %d\n",
183 c->dma_ch);
184
185 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
186 } else {
187 val &= ~OMAP_DMA_CCR_EN;
188 c->plat->dma_write(val, CCR, c->dma_ch);
189 }
190
191 mb();
192
193 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
194 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
195
196 if (dma_omap1())
197 val |= 1 << 14; /* set the STOP_LNK bit */
198 else
199 val &= ~(1 << 15); /* Clear the ENABLE_LNK bit */
200
201 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
202 }
203}
204
Russell King7bedaa52012-04-13 12:10:24 +0100205static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
206 unsigned idx)
207{
208 struct omap_sg *sg = d->sg + idx;
209
Russell Kingb9e97822013-11-02 13:26:57 +0000210 if (d->dir == DMA_DEV_TO_MEM) {
Russell Kingb9e97822013-11-02 13:26:57 +0000211 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
212 c->plat->dma_write(0, CDEI, c->dma_ch);
213 c->plat->dma_write(0, CDFI, c->dma_ch);
214 } else {
Russell Kingb9e97822013-11-02 13:26:57 +0000215 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
216 c->plat->dma_write(0, CSEI, c->dma_ch);
217 c->plat->dma_write(0, CSFI, c->dma_ch);
218 }
219
Russell Kingb9e97822013-11-02 13:26:57 +0000220 c->plat->dma_write(sg->en, CEN, c->dma_ch);
221 c->plat->dma_write(sg->fn, CFN, c->dma_ch);
Russell King7bedaa52012-04-13 12:10:24 +0100222
Russell Kingfa3ad862013-11-02 17:07:09 +0000223 omap_dma_start(c, d);
Russell King7bedaa52012-04-13 12:10:24 +0100224}
225
226static void omap_dma_start_desc(struct omap_chan *c)
227{
228 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
229 struct omap_desc *d;
Russell Kingb9e97822013-11-02 13:26:57 +0000230 uint32_t val;
Russell King7bedaa52012-04-13 12:10:24 +0100231
232 if (!vd) {
233 c->desc = NULL;
234 return;
235 }
236
237 list_del(&vd->node);
238
239 c->desc = d = to_omap_dma_desc(&vd->tx);
240 c->sgidx = 0;
241
Russell Kingb9e97822013-11-02 13:26:57 +0000242 if (d->dir == DMA_DEV_TO_MEM) {
Russell Kingb9e97822013-11-02 13:26:57 +0000243 val = c->plat->dma_read(CCR, c->dma_ch);
Russell King913a2d02013-11-02 14:41:42 +0000244 val &= ~(0x03 << 14 | 0x03 << 12);
245 val |= OMAP_DMA_AMODE_POST_INC << 14;
Russell Kingb9e97822013-11-02 13:26:57 +0000246 val |= OMAP_DMA_AMODE_CONSTANT << 12;
247 c->plat->dma_write(val, CCR, c->dma_ch);
248
249 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
250 c->plat->dma_write(0, CSEI, c->dma_ch);
251 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
252 } else {
Russell Kingb9e97822013-11-02 13:26:57 +0000253 val = c->plat->dma_read(CCR, c->dma_ch);
Russell King913a2d02013-11-02 14:41:42 +0000254 val &= ~(0x03 << 12 | 0x03 << 14);
Russell Kingb9e97822013-11-02 13:26:57 +0000255 val |= OMAP_DMA_AMODE_CONSTANT << 14;
Russell King913a2d02013-11-02 14:41:42 +0000256 val |= OMAP_DMA_AMODE_POST_INC << 12;
Russell Kingb9e97822013-11-02 13:26:57 +0000257 c->plat->dma_write(val, CCR, c->dma_ch);
258
259 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
260 c->plat->dma_write(0, CDEI, c->dma_ch);
261 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
262 }
Russell King7bedaa52012-04-13 12:10:24 +0100263
Russell King2f0d13b2013-11-02 18:51:53 +0000264 c->plat->dma_write(d->csdp, CSDP, c->dma_ch);
Russell King913a2d02013-11-02 14:41:42 +0000265
266 if (dma_omap1()) {
267 val = c->plat->dma_read(CCR, c->dma_ch);
268 val &= ~(1 << 5);
269 if (d->sync_mode == OMAP_DMA_SYNC_FRAME)
270 val |= 1 << 5;
271 c->plat->dma_write(val, CCR, c->dma_ch);
272
273 val = c->plat->dma_read(CCR2, c->dma_ch);
274 val &= ~(1 << 2);
275 if (d->sync_mode == OMAP_DMA_SYNC_BLOCK)
276 val |= 1 << 2;
277 c->plat->dma_write(val, CCR2, c->dma_ch);
278 } else if (c->dma_sig) {
279 val = c->plat->dma_read(CCR, c->dma_ch);
280
281 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
282 val &= ~(1 << 24 | 1 << 23 | 3 << 19 | 1 << 18 | 1 << 5 | 0x1f);
283 val |= (c->dma_sig & ~0x1f) << 14;
284 val |= c->dma_sig & 0x1f;
285
286 if (d->sync_mode & OMAP_DMA_SYNC_FRAME)
287 val |= 1 << 5;
288
289 if (d->sync_mode & OMAP_DMA_SYNC_BLOCK)
290 val |= 1 << 18;
291
292 switch (d->sync_type) {
293 case OMAP_DMA_DST_SYNC_PREFETCH:/* dest synch */
294 val |= 1 << 23; /* Prefetch */
295 break;
296 case 0:
297 break;
298 default:
299 val |= 1 << 24; /* source synch */
300 break;
301 }
302 c->plat->dma_write(val, CCR, c->dma_ch);
303 }
304
Russell King7bedaa52012-04-13 12:10:24 +0100305 omap_dma_start_sg(c, d, 0);
306}
307
308static void omap_dma_callback(int ch, u16 status, void *data)
309{
310 struct omap_chan *c = data;
311 struct omap_desc *d;
312 unsigned long flags;
313
314 spin_lock_irqsave(&c->vc.lock, flags);
315 d = c->desc;
316 if (d) {
Russell King3a774ea2012-06-21 10:40:15 +0100317 if (!c->cyclic) {
318 if (++c->sgidx < d->sglen) {
319 omap_dma_start_sg(c, d, c->sgidx);
320 } else {
321 omap_dma_start_desc(c);
322 vchan_cookie_complete(&d->vd);
323 }
Russell King7bedaa52012-04-13 12:10:24 +0100324 } else {
Russell King3a774ea2012-06-21 10:40:15 +0100325 vchan_cyclic_callback(&d->vd);
Russell King7bedaa52012-04-13 12:10:24 +0100326 }
327 }
328 spin_unlock_irqrestore(&c->vc.lock, flags);
329}
330
331/*
332 * This callback schedules all pending channels. We could be more
333 * clever here by postponing allocation of the real DMA channels to
334 * this point, and freeing them when our virtual channel becomes idle.
335 *
336 * We would then need to deal with 'all channels in-use'
337 */
338static void omap_dma_sched(unsigned long data)
339{
340 struct omap_dmadev *d = (struct omap_dmadev *)data;
341 LIST_HEAD(head);
342
343 spin_lock_irq(&d->lock);
344 list_splice_tail_init(&d->pending, &head);
345 spin_unlock_irq(&d->lock);
346
347 while (!list_empty(&head)) {
348 struct omap_chan *c = list_first_entry(&head,
349 struct omap_chan, node);
350
351 spin_lock_irq(&c->vc.lock);
352 list_del_init(&c->node);
353 omap_dma_start_desc(c);
354 spin_unlock_irq(&c->vc.lock);
355 }
356}
357
358static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
359{
360 struct omap_chan *c = to_omap_dma_chan(chan);
361
Ezequiel Garcia9e2f7d82013-12-19 22:22:29 -0300362 dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
Russell King7bedaa52012-04-13 12:10:24 +0100363
364 return omap_request_dma(c->dma_sig, "DMA engine",
365 omap_dma_callback, c, &c->dma_ch);
366}
367
368static void omap_dma_free_chan_resources(struct dma_chan *chan)
369{
370 struct omap_chan *c = to_omap_dma_chan(chan);
371
372 vchan_free_chan_resources(&c->vc);
373 omap_free_dma(c->dma_ch);
374
Ezequiel Garcia9e2f7d82013-12-19 22:22:29 -0300375 dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
Russell King7bedaa52012-04-13 12:10:24 +0100376}
377
Russell King3850e222012-06-21 10:37:35 +0100378static size_t omap_dma_sg_size(struct omap_sg *sg)
379{
380 return sg->en * sg->fn;
381}
382
383static size_t omap_dma_desc_size(struct omap_desc *d)
384{
385 unsigned i;
386 size_t size;
387
388 for (size = i = 0; i < d->sglen; i++)
389 size += omap_dma_sg_size(&d->sg[i]);
390
391 return size * es_bytes[d->es];
392}
393
394static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
395{
396 unsigned i;
397 size_t size, es_size = es_bytes[d->es];
398
399 for (size = i = 0; i < d->sglen; i++) {
400 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
401
402 if (size)
403 size += this_size;
404 else if (addr >= d->sg[i].addr &&
405 addr < d->sg[i].addr + this_size)
406 size += d->sg[i].addr + this_size - addr;
407 }
408 return size;
409}
410
Russell King3997cab2013-11-02 18:04:17 +0000411static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
412{
413 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
414 dma_addr_t addr;
415
416 if (__dma_omap15xx(od->plat->dma_attr))
417 addr = c->plat->dma_read(CPC, c->dma_ch);
418 else
419 addr = c->plat->dma_read(CSAC, c->dma_ch);
420
421 if (od->plat->errata & DMA_ERRATA_3_3 && addr == 0)
422 addr = c->plat->dma_read(CSAC, c->dma_ch);
423
424 if (!__dma_omap15xx(od->plat->dma_attr)) {
425 /*
426 * CDAC == 0 indicates that the DMA transfer on the channel has
427 * not been started (no data has been transferred so far).
428 * Return the programmed source start address in this case.
429 */
430 if (c->plat->dma_read(CDAC, c->dma_ch))
431 addr = c->plat->dma_read(CSAC, c->dma_ch);
432 else
433 addr = c->plat->dma_read(CSSA, c->dma_ch);
434 }
435
436 if (dma_omap1())
437 addr |= c->plat->dma_read(CSSA, c->dma_ch) & 0xffff0000;
438
439 return addr;
440}
441
442static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
443{
444 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
445 dma_addr_t addr;
446
447 if (__dma_omap15xx(od->plat->dma_attr))
448 addr = c->plat->dma_read(CPC, c->dma_ch);
449 else
450 addr = c->plat->dma_read(CDAC, c->dma_ch);
451
452 /*
453 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
454 * read before the DMA controller finished disabling the channel.
455 */
456 if (!__dma_omap15xx(od->plat->dma_attr) && addr == 0) {
457 addr = c->plat->dma_read(CDAC, c->dma_ch);
458 /*
459 * CDAC == 0 indicates that the DMA transfer on the channel has
460 * not been started (no data has been transferred so far).
461 * Return the programmed destination start address in this case.
462 */
463 if (addr == 0)
464 addr = c->plat->dma_read(CDSA, c->dma_ch);
465 }
466
467 if (dma_omap1())
468 addr |= c->plat->dma_read(CDSA, c->dma_ch) & 0xffff0000;
469
470 return addr;
471}
472
Russell King7bedaa52012-04-13 12:10:24 +0100473static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
474 dma_cookie_t cookie, struct dma_tx_state *txstate)
475{
Russell King3850e222012-06-21 10:37:35 +0100476 struct omap_chan *c = to_omap_dma_chan(chan);
477 struct virt_dma_desc *vd;
478 enum dma_status ret;
479 unsigned long flags;
480
481 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Koul7cce5082013-10-16 20:51:54 +0530482 if (ret == DMA_COMPLETE || !txstate)
Russell King3850e222012-06-21 10:37:35 +0100483 return ret;
484
485 spin_lock_irqsave(&c->vc.lock, flags);
486 vd = vchan_find_desc(&c->vc, cookie);
487 if (vd) {
488 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
489 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
490 struct omap_desc *d = c->desc;
491 dma_addr_t pos;
492
493 if (d->dir == DMA_MEM_TO_DEV)
Russell King3997cab2013-11-02 18:04:17 +0000494 pos = omap_dma_get_src_pos(c);
Russell King3850e222012-06-21 10:37:35 +0100495 else if (d->dir == DMA_DEV_TO_MEM)
Russell King3997cab2013-11-02 18:04:17 +0000496 pos = omap_dma_get_dst_pos(c);
Russell King3850e222012-06-21 10:37:35 +0100497 else
498 pos = 0;
499
500 txstate->residue = omap_dma_desc_size_pos(d, pos);
501 } else {
502 txstate->residue = 0;
503 }
504 spin_unlock_irqrestore(&c->vc.lock, flags);
505
506 return ret;
Russell King7bedaa52012-04-13 12:10:24 +0100507}
508
509static void omap_dma_issue_pending(struct dma_chan *chan)
510{
511 struct omap_chan *c = to_omap_dma_chan(chan);
512 unsigned long flags;
513
514 spin_lock_irqsave(&c->vc.lock, flags);
515 if (vchan_issue_pending(&c->vc) && !c->desc) {
Peter Ujfalusi76502462013-04-09 16:33:06 +0200516 /*
517 * c->cyclic is used only by audio and in this case the DMA need
518 * to be started without delay.
519 */
520 if (!c->cyclic) {
521 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
522 spin_lock(&d->lock);
523 if (list_empty(&c->node))
524 list_add_tail(&c->node, &d->pending);
525 spin_unlock(&d->lock);
526 tasklet_schedule(&d->task);
527 } else {
528 omap_dma_start_desc(c);
529 }
Russell King7bedaa52012-04-13 12:10:24 +0100530 }
531 spin_unlock_irqrestore(&c->vc.lock, flags);
532}
533
534static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
535 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
536 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
537{
538 struct omap_chan *c = to_omap_dma_chan(chan);
539 enum dma_slave_buswidth dev_width;
540 struct scatterlist *sgent;
541 struct omap_desc *d;
542 dma_addr_t dev_addr;
543 unsigned i, j = 0, es, en, frame_bytes, sync_type;
544 u32 burst;
545
546 if (dir == DMA_DEV_TO_MEM) {
547 dev_addr = c->cfg.src_addr;
548 dev_width = c->cfg.src_addr_width;
549 burst = c->cfg.src_maxburst;
550 sync_type = OMAP_DMA_SRC_SYNC;
551 } else if (dir == DMA_MEM_TO_DEV) {
552 dev_addr = c->cfg.dst_addr;
553 dev_width = c->cfg.dst_addr_width;
554 burst = c->cfg.dst_maxburst;
555 sync_type = OMAP_DMA_DST_SYNC;
556 } else {
557 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
558 return NULL;
559 }
560
561 /* Bus width translates to the element size (ES) */
562 switch (dev_width) {
563 case DMA_SLAVE_BUSWIDTH_1_BYTE:
564 es = OMAP_DMA_DATA_TYPE_S8;
565 break;
566 case DMA_SLAVE_BUSWIDTH_2_BYTES:
567 es = OMAP_DMA_DATA_TYPE_S16;
568 break;
569 case DMA_SLAVE_BUSWIDTH_4_BYTES:
570 es = OMAP_DMA_DATA_TYPE_S32;
571 break;
572 default: /* not reached */
573 return NULL;
574 }
575
576 /* Now allocate and setup the descriptor. */
577 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
578 if (!d)
579 return NULL;
580
581 d->dir = dir;
582 d->dev_addr = dev_addr;
583 d->es = es;
584 d->sync_mode = OMAP_DMA_SYNC_FRAME;
585 d->sync_type = sync_type;
Russell Kingfa3ad862013-11-02 17:07:09 +0000586 d->cicr = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
Russell King2f0d13b2013-11-02 18:51:53 +0000587 d->csdp = es;
Russell Kingfa3ad862013-11-02 17:07:09 +0000588
Russell King2f0d13b2013-11-02 18:51:53 +0000589 if (dma_omap1()) {
Russell Kingfa3ad862013-11-02 17:07:09 +0000590 d->cicr |= OMAP1_DMA_TOUT_IRQ;
Russell King2f0d13b2013-11-02 18:51:53 +0000591
592 if (dir == DMA_DEV_TO_MEM)
593 d->csdp |= OMAP_DMA_PORT_EMIFF << 9 |
594 OMAP_DMA_PORT_TIPB << 2;
595 else
596 d->csdp |= OMAP_DMA_PORT_TIPB << 9 |
597 OMAP_DMA_PORT_EMIFF << 2;
598 } else {
Russell Kingfa3ad862013-11-02 17:07:09 +0000599 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
Russell King2f0d13b2013-11-02 18:51:53 +0000600 }
Russell King7bedaa52012-04-13 12:10:24 +0100601
602 /*
603 * Build our scatterlist entries: each contains the address,
604 * the number of elements (EN) in each frame, and the number of
605 * frames (FN). Number of bytes for this entry = ES * EN * FN.
606 *
607 * Burst size translates to number of elements with frame sync.
608 * Note: DMA engine defines burst to be the number of dev-width
609 * transfers.
610 */
611 en = burst;
612 frame_bytes = es_bytes[es] * en;
613 for_each_sg(sgl, sgent, sglen, i) {
614 d->sg[j].addr = sg_dma_address(sgent);
615 d->sg[j].en = en;
616 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
617 j++;
618 }
619
620 d->sglen = j;
621
622 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
623}
624
Russell King3a774ea2012-06-21 10:40:15 +0100625static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
626 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +0300627 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
628 void *context)
Russell King3a774ea2012-06-21 10:40:15 +0100629{
Russell Kingfa3ad862013-11-02 17:07:09 +0000630 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
Russell King3a774ea2012-06-21 10:40:15 +0100631 struct omap_chan *c = to_omap_dma_chan(chan);
632 enum dma_slave_buswidth dev_width;
633 struct omap_desc *d;
634 dma_addr_t dev_addr;
635 unsigned es, sync_type;
636 u32 burst;
637
638 if (dir == DMA_DEV_TO_MEM) {
639 dev_addr = c->cfg.src_addr;
640 dev_width = c->cfg.src_addr_width;
641 burst = c->cfg.src_maxburst;
642 sync_type = OMAP_DMA_SRC_SYNC;
643 } else if (dir == DMA_MEM_TO_DEV) {
644 dev_addr = c->cfg.dst_addr;
645 dev_width = c->cfg.dst_addr_width;
646 burst = c->cfg.dst_maxburst;
647 sync_type = OMAP_DMA_DST_SYNC;
648 } else {
649 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
650 return NULL;
651 }
652
653 /* Bus width translates to the element size (ES) */
654 switch (dev_width) {
655 case DMA_SLAVE_BUSWIDTH_1_BYTE:
656 es = OMAP_DMA_DATA_TYPE_S8;
657 break;
658 case DMA_SLAVE_BUSWIDTH_2_BYTES:
659 es = OMAP_DMA_DATA_TYPE_S16;
660 break;
661 case DMA_SLAVE_BUSWIDTH_4_BYTES:
662 es = OMAP_DMA_DATA_TYPE_S32;
663 break;
664 default: /* not reached */
665 return NULL;
666 }
667
668 /* Now allocate and setup the descriptor. */
669 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
670 if (!d)
671 return NULL;
672
673 d->dir = dir;
674 d->dev_addr = dev_addr;
675 d->fi = burst;
676 d->es = es;
Peter Ujfalusiccffa382012-09-14 15:05:44 +0300677 if (burst)
678 d->sync_mode = OMAP_DMA_SYNC_PACKET;
679 else
680 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
Russell King3a774ea2012-06-21 10:40:15 +0100681 d->sync_type = sync_type;
Russell King3a774ea2012-06-21 10:40:15 +0100682 d->sg[0].addr = buf_addr;
683 d->sg[0].en = period_len / es_bytes[es];
684 d->sg[0].fn = buf_len / period_len;
685 d->sglen = 1;
Russell Kingfa3ad862013-11-02 17:07:09 +0000686 d->cicr = OMAP_DMA_DROP_IRQ;
687 if (flags & DMA_PREP_INTERRUPT)
688 d->cicr |= OMAP_DMA_FRAME_IRQ;
689
Russell King2f0d13b2013-11-02 18:51:53 +0000690 d->csdp = es;
691
692 if (dma_omap1()) {
Russell Kingfa3ad862013-11-02 17:07:09 +0000693 d->cicr |= OMAP1_DMA_TOUT_IRQ;
Russell King2f0d13b2013-11-02 18:51:53 +0000694
695 if (dir == DMA_DEV_TO_MEM)
696 d->csdp |= OMAP_DMA_PORT_EMIFF << 9 |
697 OMAP_DMA_PORT_MPUI << 2;
698 else
699 d->csdp |= OMAP_DMA_PORT_MPUI << 9 |
700 OMAP_DMA_PORT_EMIFF << 2;
701 } else {
Russell Kingfa3ad862013-11-02 17:07:09 +0000702 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
Russell King3a774ea2012-06-21 10:40:15 +0100703
Russell King2f0d13b2013-11-02 18:51:53 +0000704 /* src and dst burst mode 16 */
705 d->csdp |= 3 << 14 | 3 << 7;
706 }
707
Russell King3a774ea2012-06-21 10:40:15 +0100708 if (!c->cyclic) {
709 c->cyclic = true;
Peter Ujfalusi2dde5b92012-09-14 15:05:48 +0300710
Russell Kingfa3ad862013-11-02 17:07:09 +0000711 if (__dma_omap15xx(od->plat->dma_attr)) {
712 uint32_t val;
Peter Ujfalusi2dde5b92012-09-14 15:05:48 +0300713
Russell Kingfa3ad862013-11-02 17:07:09 +0000714 val = c->plat->dma_read(CCR, c->dma_ch);
715 val |= 3 << 8;
716 c->plat->dma_write(val, CCR, c->dma_ch);
717 }
Russell King3a774ea2012-06-21 10:40:15 +0100718 }
719
Peter Ujfalusi2dde5b92012-09-14 15:05:48 +0300720 return vchan_tx_prep(&c->vc, &d->vd, flags);
Russell King3a774ea2012-06-21 10:40:15 +0100721}
722
Russell King7bedaa52012-04-13 12:10:24 +0100723static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
724{
725 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
726 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
727 return -EINVAL;
728
729 memcpy(&c->cfg, cfg, sizeof(c->cfg));
730
731 return 0;
732}
733
734static int omap_dma_terminate_all(struct omap_chan *c)
735{
736 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
737 unsigned long flags;
738 LIST_HEAD(head);
739
740 spin_lock_irqsave(&c->vc.lock, flags);
741
742 /* Prevent this channel being scheduled */
743 spin_lock(&d->lock);
744 list_del_init(&c->node);
745 spin_unlock(&d->lock);
746
747 /*
748 * Stop DMA activity: we assume the callback will not be called
Russell Kingfa3ad862013-11-02 17:07:09 +0000749 * after omap_dma_stop() returns (even if it does, it will see
Russell King7bedaa52012-04-13 12:10:24 +0100750 * c->desc is NULL and exit.)
751 */
752 if (c->desc) {
753 c->desc = NULL;
Peter Ujfalusi2dcdf572012-09-14 15:05:45 +0300754 /* Avoid stopping the dma twice */
755 if (!c->paused)
Russell Kingfa3ad862013-11-02 17:07:09 +0000756 omap_dma_stop(c);
Russell King7bedaa52012-04-13 12:10:24 +0100757 }
758
Russell King3a774ea2012-06-21 10:40:15 +0100759 if (c->cyclic) {
760 c->cyclic = false;
Peter Ujfalusi2dcdf572012-09-14 15:05:45 +0300761 c->paused = false;
Russell Kingfa3ad862013-11-02 17:07:09 +0000762
763 if (__dma_omap15xx(od->plat->dma_attr)) {
764 uint32_t val;
765
766 val = c->plat->dma_read(CCR, c->dma_ch);
767 val &= ~(3 << 8);
768 c->plat->dma_write(val, CCR, c->dma_ch);
769 }
Russell King3a774ea2012-06-21 10:40:15 +0100770 }
771
Russell King7bedaa52012-04-13 12:10:24 +0100772 vchan_get_all_descriptors(&c->vc, &head);
773 spin_unlock_irqrestore(&c->vc.lock, flags);
774 vchan_dma_desc_free_list(&c->vc, &head);
775
776 return 0;
777}
778
779static int omap_dma_pause(struct omap_chan *c)
780{
Peter Ujfalusi2dcdf572012-09-14 15:05:45 +0300781 /* Pause/Resume only allowed with cyclic mode */
782 if (!c->cyclic)
783 return -EINVAL;
784
785 if (!c->paused) {
Russell Kingfa3ad862013-11-02 17:07:09 +0000786 omap_dma_stop(c);
Peter Ujfalusi2dcdf572012-09-14 15:05:45 +0300787 c->paused = true;
788 }
789
790 return 0;
Russell King7bedaa52012-04-13 12:10:24 +0100791}
792
793static int omap_dma_resume(struct omap_chan *c)
794{
Peter Ujfalusi2dcdf572012-09-14 15:05:45 +0300795 /* Pause/Resume only allowed with cyclic mode */
796 if (!c->cyclic)
797 return -EINVAL;
798
799 if (c->paused) {
Russell Kingfa3ad862013-11-02 17:07:09 +0000800 omap_dma_start(c, c->desc);
Peter Ujfalusi2dcdf572012-09-14 15:05:45 +0300801 c->paused = false;
802 }
803
804 return 0;
Russell King7bedaa52012-04-13 12:10:24 +0100805}
806
807static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
808 unsigned long arg)
809{
810 struct omap_chan *c = to_omap_dma_chan(chan);
811 int ret;
812
813 switch (cmd) {
814 case DMA_SLAVE_CONFIG:
815 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
816 break;
817
818 case DMA_TERMINATE_ALL:
819 ret = omap_dma_terminate_all(c);
820 break;
821
822 case DMA_PAUSE:
823 ret = omap_dma_pause(c);
824 break;
825
826 case DMA_RESUME:
827 ret = omap_dma_resume(c);
828 break;
829
830 default:
831 ret = -ENXIO;
832 break;
833 }
834
835 return ret;
836}
837
838static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
839{
840 struct omap_chan *c;
841
842 c = kzalloc(sizeof(*c), GFP_KERNEL);
843 if (!c)
844 return -ENOMEM;
845
Russell King1b416c42013-11-02 13:00:03 +0000846 c->plat = od->plat;
Russell King7bedaa52012-04-13 12:10:24 +0100847 c->dma_sig = dma_sig;
848 c->vc.desc_free = omap_dma_desc_free;
849 vchan_init(&c->vc, &od->ddev);
850 INIT_LIST_HEAD(&c->node);
851
852 od->ddev.chancnt++;
853
854 return 0;
855}
856
857static void omap_dma_free(struct omap_dmadev *od)
858{
859 tasklet_kill(&od->task);
860 while (!list_empty(&od->ddev.channels)) {
861 struct omap_chan *c = list_first_entry(&od->ddev.channels,
862 struct omap_chan, vc.chan.device_node);
863
864 list_del(&c->vc.chan.device_node);
865 tasklet_kill(&c->vc.task);
866 kfree(c);
867 }
Russell King7bedaa52012-04-13 12:10:24 +0100868}
869
870static int omap_dma_probe(struct platform_device *pdev)
871{
872 struct omap_dmadev *od;
873 int rc, i;
874
Russell King104fce72013-11-02 12:58:29 +0000875 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
Russell King7bedaa52012-04-13 12:10:24 +0100876 if (!od)
877 return -ENOMEM;
878
Russell King1b416c42013-11-02 13:00:03 +0000879 od->plat = omap_get_plat_info();
880 if (!od->plat)
881 return -EPROBE_DEFER;
882
Russell King7bedaa52012-04-13 12:10:24 +0100883 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
Russell King3a774ea2012-06-21 10:40:15 +0100884 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
Russell King7bedaa52012-04-13 12:10:24 +0100885 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
886 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
887 od->ddev.device_tx_status = omap_dma_tx_status;
888 od->ddev.device_issue_pending = omap_dma_issue_pending;
889 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
Russell King3a774ea2012-06-21 10:40:15 +0100890 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
Russell King7bedaa52012-04-13 12:10:24 +0100891 od->ddev.device_control = omap_dma_control;
892 od->ddev.dev = &pdev->dev;
893 INIT_LIST_HEAD(&od->ddev.channels);
894 INIT_LIST_HEAD(&od->pending);
895 spin_lock_init(&od->lock);
896
897 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
898
899 for (i = 0; i < 127; i++) {
900 rc = omap_dma_chan_init(od, i);
901 if (rc) {
902 omap_dma_free(od);
903 return rc;
904 }
905 }
906
907 rc = dma_async_device_register(&od->ddev);
908 if (rc) {
909 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
910 rc);
911 omap_dma_free(od);
Jon Hunter8d306622013-02-26 12:27:24 -0600912 return rc;
913 }
914
915 platform_set_drvdata(pdev, od);
916
917 if (pdev->dev.of_node) {
918 omap_dma_info.dma_cap = od->ddev.cap_mask;
919
920 /* Device-tree DMA controller registration */
921 rc = of_dma_controller_register(pdev->dev.of_node,
922 of_dma_simple_xlate, &omap_dma_info);
923 if (rc) {
924 pr_warn("OMAP-DMA: failed to register DMA controller\n");
925 dma_async_device_unregister(&od->ddev);
926 omap_dma_free(od);
927 }
Russell King7bedaa52012-04-13 12:10:24 +0100928 }
929
930 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
931
932 return rc;
933}
934
935static int omap_dma_remove(struct platform_device *pdev)
936{
937 struct omap_dmadev *od = platform_get_drvdata(pdev);
938
Jon Hunter8d306622013-02-26 12:27:24 -0600939 if (pdev->dev.of_node)
940 of_dma_controller_free(pdev->dev.of_node);
941
Russell King7bedaa52012-04-13 12:10:24 +0100942 dma_async_device_unregister(&od->ddev);
943 omap_dma_free(od);
944
945 return 0;
946}
947
Jon Hunter8d306622013-02-26 12:27:24 -0600948static const struct of_device_id omap_dma_match[] = {
949 { .compatible = "ti,omap2420-sdma", },
950 { .compatible = "ti,omap2430-sdma", },
951 { .compatible = "ti,omap3430-sdma", },
952 { .compatible = "ti,omap3630-sdma", },
953 { .compatible = "ti,omap4430-sdma", },
954 {},
955};
956MODULE_DEVICE_TABLE(of, omap_dma_match);
957
Russell King7bedaa52012-04-13 12:10:24 +0100958static struct platform_driver omap_dma_driver = {
959 .probe = omap_dma_probe,
960 .remove = omap_dma_remove,
961 .driver = {
962 .name = "omap-dma-engine",
963 .owner = THIS_MODULE,
Jon Hunter8d306622013-02-26 12:27:24 -0600964 .of_match_table = of_match_ptr(omap_dma_match),
Russell King7bedaa52012-04-13 12:10:24 +0100965 },
966};
967
968bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
969{
970 if (chan->device->dev->driver == &omap_dma_driver.driver) {
971 struct omap_chan *c = to_omap_dma_chan(chan);
972 unsigned req = *(unsigned *)param;
973
974 return req == c->dma_sig;
975 }
976 return false;
977}
978EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
979
Russell King7bedaa52012-04-13 12:10:24 +0100980static int omap_dma_init(void)
981{
Tony Lindgrenbe1f9482013-01-11 11:24:19 -0800982 return platform_driver_register(&omap_dma_driver);
Russell King7bedaa52012-04-13 12:10:24 +0100983}
984subsys_initcall(omap_dma_init);
985
986static void __exit omap_dma_exit(void)
987{
Russell King7bedaa52012-04-13 12:10:24 +0100988 platform_driver_unregister(&omap_dma_driver);
989}
990module_exit(omap_dma_exit);
991
992MODULE_AUTHOR("Russell King");
993MODULE_LICENSE("GPL");