blob: f424298f1ac5094513f6ac9f3e3a64cbaa681ba6 [file] [log] [blame]
Matt Porterc2dde5f2012-08-22 21:09:34 -04001/*
2 * TI EDMA DMA engine driver
3 *
4 * Copyright 2012 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include <mach/edma.h>
28
29#include "dmaengine.h"
30#include "virt-dma.h"
31
32/*
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
40 */
41#ifdef CONFIG_ARCH_DAVINCI_DA8XX
42#define EDMA_CTLRS 2
43#define EDMA_CHANS 32
44#else
45#define EDMA_CTLRS 1
46#define EDMA_CHANS 64
47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
48
49/* Max of 16 segments per channel to conserve PaRAM slots */
50#define MAX_NR_SG 16
51#define EDMA_MAX_SLOTS MAX_NR_SG
52#define EDMA_DESCRIPTORS 16
53
54struct edma_desc {
55 struct virt_dma_desc vdesc;
56 struct list_head node;
57 int absync;
58 int pset_nr;
59 struct edmacc_param pset[0];
60};
61
62struct edma_cc;
63
64struct edma_chan {
65 struct virt_dma_chan vchan;
66 struct list_head node;
67 struct edma_desc *edesc;
68 struct edma_cc *ecc;
69 int ch_num;
70 bool alloced;
71 int slot[EDMA_MAX_SLOTS];
72 dma_addr_t addr;
73 int addr_width;
74 int maxburst;
75};
76
77struct edma_cc {
78 int ctlr;
79 struct dma_device dma_slave;
80 struct edma_chan slave_chans[EDMA_CHANS];
81 int num_slave_chans;
82 int dummy_slot;
83};
84
85static inline struct edma_cc *to_edma_cc(struct dma_device *d)
86{
87 return container_of(d, struct edma_cc, dma_slave);
88}
89
90static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
91{
92 return container_of(c, struct edma_chan, vchan.chan);
93}
94
95static inline struct edma_desc
96*to_edma_desc(struct dma_async_tx_descriptor *tx)
97{
98 return container_of(tx, struct edma_desc, vdesc.tx);
99}
100
101static void edma_desc_free(struct virt_dma_desc *vdesc)
102{
103 kfree(container_of(vdesc, struct edma_desc, vdesc));
104}
105
106/* Dispatch a queued descriptor to the controller (caller holds lock) */
107static void edma_execute(struct edma_chan *echan)
108{
109 struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
110 struct edma_desc *edesc;
111 int i;
112
113 if (!vdesc) {
114 echan->edesc = NULL;
115 return;
116 }
117
118 list_del(&vdesc->node);
119
120 echan->edesc = edesc = to_edma_desc(&vdesc->tx);
121
122 /* Write descriptor PaRAM set(s) */
123 for (i = 0; i < edesc->pset_nr; i++) {
124 edma_write_slot(echan->slot[i], &edesc->pset[i]);
125 dev_dbg(echan->vchan.chan.device->dev,
126 "\n pset[%d]:\n"
127 " chnum\t%d\n"
128 " slot\t%d\n"
129 " opt\t%08x\n"
130 " src\t%08x\n"
131 " dst\t%08x\n"
132 " abcnt\t%08x\n"
133 " ccnt\t%08x\n"
134 " bidx\t%08x\n"
135 " cidx\t%08x\n"
136 " lkrld\t%08x\n",
137 i, echan->ch_num, echan->slot[i],
138 edesc->pset[i].opt,
139 edesc->pset[i].src,
140 edesc->pset[i].dst,
141 edesc->pset[i].a_b_cnt,
142 edesc->pset[i].ccnt,
143 edesc->pset[i].src_dst_bidx,
144 edesc->pset[i].src_dst_cidx,
145 edesc->pset[i].link_bcntrld);
146 /* Link to the previous slot if not the last set */
147 if (i != (edesc->pset_nr - 1))
148 edma_link(echan->slot[i], echan->slot[i+1]);
149 /* Final pset links to the dummy pset */
150 else
151 edma_link(echan->slot[i], echan->ecc->dummy_slot);
152 }
153
154 edma_start(echan->ch_num);
155}
156
157static int edma_terminate_all(struct edma_chan *echan)
158{
159 unsigned long flags;
160 LIST_HEAD(head);
161
162 spin_lock_irqsave(&echan->vchan.lock, flags);
163
164 /*
165 * Stop DMA activity: we assume the callback will not be called
166 * after edma_dma() returns (even if it does, it will see
167 * echan->edesc is NULL and exit.)
168 */
169 if (echan->edesc) {
170 echan->edesc = NULL;
171 edma_stop(echan->ch_num);
172 }
173
174 vchan_get_all_descriptors(&echan->vchan, &head);
175 spin_unlock_irqrestore(&echan->vchan.lock, flags);
176 vchan_dma_desc_free_list(&echan->vchan, &head);
177
178 return 0;
179}
180
181
182static int edma_slave_config(struct edma_chan *echan,
183 struct dma_slave_config *config)
184{
185 if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
186 (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
187 return -EINVAL;
188
189 if (config->direction == DMA_MEM_TO_DEV) {
190 if (config->dst_addr)
191 echan->addr = config->dst_addr;
192 if (config->dst_addr_width)
193 echan->addr_width = config->dst_addr_width;
194 if (config->dst_maxburst)
195 echan->maxburst = config->dst_maxburst;
196 } else if (config->direction == DMA_DEV_TO_MEM) {
197 if (config->src_addr)
198 echan->addr = config->src_addr;
199 if (config->src_addr_width)
200 echan->addr_width = config->src_addr_width;
201 if (config->src_maxburst)
202 echan->maxburst = config->src_maxburst;
203 }
204
205 return 0;
206}
207
208static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
209 unsigned long arg)
210{
211 int ret = 0;
212 struct dma_slave_config *config;
213 struct edma_chan *echan = to_edma_chan(chan);
214
215 switch (cmd) {
216 case DMA_TERMINATE_ALL:
217 edma_terminate_all(echan);
218 break;
219 case DMA_SLAVE_CONFIG:
220 config = (struct dma_slave_config *)arg;
221 ret = edma_slave_config(echan, config);
222 break;
223 default:
224 ret = -ENOSYS;
225 }
226
227 return ret;
228}
229
230static struct dma_async_tx_descriptor *edma_prep_slave_sg(
231 struct dma_chan *chan, struct scatterlist *sgl,
232 unsigned int sg_len, enum dma_transfer_direction direction,
233 unsigned long tx_flags, void *context)
234{
235 struct edma_chan *echan = to_edma_chan(chan);
236 struct device *dev = chan->device->dev;
237 struct edma_desc *edesc;
238 struct scatterlist *sg;
239 int i;
240 int acnt, bcnt, ccnt, src, dst, cidx;
241 int src_bidx, dst_bidx, src_cidx, dst_cidx;
242
243 if (unlikely(!echan || !sgl || !sg_len))
244 return NULL;
245
246 if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
247 dev_err(dev, "Undefined slave buswidth\n");
248 return NULL;
249 }
250
251 if (sg_len > MAX_NR_SG) {
252 dev_err(dev, "Exceeded max SG segments %d > %d\n",
253 sg_len, MAX_NR_SG);
254 return NULL;
255 }
256
257 edesc = kzalloc(sizeof(*edesc) + sg_len *
258 sizeof(edesc->pset[0]), GFP_ATOMIC);
259 if (!edesc) {
260 dev_dbg(dev, "Failed to allocate a descriptor\n");
261 return NULL;
262 }
263
264 edesc->pset_nr = sg_len;
265
266 for_each_sg(sgl, sg, sg_len, i) {
267 /* Allocate a PaRAM slot, if needed */
268 if (echan->slot[i] < 0) {
269 echan->slot[i] =
270 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
271 EDMA_SLOT_ANY);
272 if (echan->slot[i] < 0) {
273 dev_err(dev, "Failed to allocate slot\n");
274 return NULL;
275 }
276 }
277
278 acnt = echan->addr_width;
279
280 /*
281 * If the maxburst is equal to the fifo width, use
282 * A-synced transfers. This allows for large contiguous
283 * buffer transfers using only one PaRAM set.
284 */
285 if (echan->maxburst == 1) {
286 edesc->absync = false;
287 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
288 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
289 if (bcnt)
290 ccnt++;
291 else
292 bcnt = SZ_64K - 1;
293 cidx = acnt;
294 /*
295 * If maxburst is greater than the fifo address_width,
296 * use AB-synced transfers where A count is the fifo
297 * address_width and B count is the maxburst. In this
298 * case, we are limited to transfers of C count frames
299 * of (address_width * maxburst) where C count is limited
300 * to SZ_64K-1. This places an upper bound on the length
301 * of an SG segment that can be handled.
302 */
303 } else {
304 edesc->absync = true;
305 bcnt = echan->maxburst;
306 ccnt = sg_dma_len(sg) / (acnt * bcnt);
307 if (ccnt > (SZ_64K - 1)) {
308 dev_err(dev, "Exceeded max SG segment size\n");
309 return NULL;
310 }
311 cidx = acnt * bcnt;
312 }
313
314 if (direction == DMA_MEM_TO_DEV) {
315 src = sg_dma_address(sg);
316 dst = echan->addr;
317 src_bidx = acnt;
318 src_cidx = cidx;
319 dst_bidx = 0;
320 dst_cidx = 0;
321 } else {
322 src = echan->addr;
323 dst = sg_dma_address(sg);
324 src_bidx = 0;
325 src_cidx = 0;
326 dst_bidx = acnt;
327 dst_cidx = cidx;
328 }
329
330 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
331 /* Configure A or AB synchronized transfers */
332 if (edesc->absync)
333 edesc->pset[i].opt |= SYNCDIM;
334 /* If this is the last set, enable completion interrupt flag */
335 if (i == sg_len - 1)
336 edesc->pset[i].opt |= TCINTEN;
337
338 edesc->pset[i].src = src;
339 edesc->pset[i].dst = dst;
340
341 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
342 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
343
344 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
345 edesc->pset[i].ccnt = ccnt;
346 edesc->pset[i].link_bcntrld = 0xffffffff;
347
348 }
349
350 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
351}
352
353static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
354{
355 struct edma_chan *echan = data;
356 struct device *dev = echan->vchan.chan.device->dev;
357 struct edma_desc *edesc;
358 unsigned long flags;
359
360 /* Stop the channel */
361 edma_stop(echan->ch_num);
362
363 switch (ch_status) {
364 case DMA_COMPLETE:
365 dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
366
367 spin_lock_irqsave(&echan->vchan.lock, flags);
368
369 edesc = echan->edesc;
370 if (edesc) {
371 edma_execute(echan);
372 vchan_cookie_complete(&edesc->vdesc);
373 }
374
375 spin_unlock_irqrestore(&echan->vchan.lock, flags);
376
377 break;
378 case DMA_CC_ERROR:
379 dev_dbg(dev, "transfer error on channel %d\n", ch_num);
380 break;
381 default:
382 break;
383 }
384}
385
386/* Alloc channel resources */
387static int edma_alloc_chan_resources(struct dma_chan *chan)
388{
389 struct edma_chan *echan = to_edma_chan(chan);
390 struct device *dev = chan->device->dev;
391 int ret;
392 int a_ch_num;
393 LIST_HEAD(descs);
394
395 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
396 chan, EVENTQ_DEFAULT);
397
398 if (a_ch_num < 0) {
399 ret = -ENODEV;
400 goto err_no_chan;
401 }
402
403 if (a_ch_num != echan->ch_num) {
404 dev_err(dev, "failed to allocate requested channel %u:%u\n",
405 EDMA_CTLR(echan->ch_num),
406 EDMA_CHAN_SLOT(echan->ch_num));
407 ret = -ENODEV;
408 goto err_wrong_chan;
409 }
410
411 echan->alloced = true;
412 echan->slot[0] = echan->ch_num;
413
414 dev_info(dev, "allocated channel for %u:%u\n",
415 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
416
417 return 0;
418
419err_wrong_chan:
420 edma_free_channel(a_ch_num);
421err_no_chan:
422 return ret;
423}
424
425/* Free channel resources */
426static void edma_free_chan_resources(struct dma_chan *chan)
427{
428 struct edma_chan *echan = to_edma_chan(chan);
429 struct device *dev = chan->device->dev;
430 int i;
431
432 /* Terminate transfers */
433 edma_stop(echan->ch_num);
434
435 vchan_free_chan_resources(&echan->vchan);
436
437 /* Free EDMA PaRAM slots */
438 for (i = 1; i < EDMA_MAX_SLOTS; i++) {
439 if (echan->slot[i] >= 0) {
440 edma_free_slot(echan->slot[i]);
441 echan->slot[i] = -1;
442 }
443 }
444
445 /* Free EDMA channel */
446 if (echan->alloced) {
447 edma_free_channel(echan->ch_num);
448 echan->alloced = false;
449 }
450
451 dev_info(dev, "freeing channel for %u\n", echan->ch_num);
452}
453
454/* Send pending descriptor to hardware */
455static void edma_issue_pending(struct dma_chan *chan)
456{
457 struct edma_chan *echan = to_edma_chan(chan);
458 unsigned long flags;
459
460 spin_lock_irqsave(&echan->vchan.lock, flags);
461 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
462 edma_execute(echan);
463 spin_unlock_irqrestore(&echan->vchan.lock, flags);
464}
465
466static size_t edma_desc_size(struct edma_desc *edesc)
467{
468 int i;
469 size_t size;
470
471 if (edesc->absync)
472 for (size = i = 0; i < edesc->pset_nr; i++)
473 size += (edesc->pset[i].a_b_cnt & 0xffff) *
474 (edesc->pset[i].a_b_cnt >> 16) *
475 edesc->pset[i].ccnt;
476 else
477 size = (edesc->pset[0].a_b_cnt & 0xffff) *
478 (edesc->pset[0].a_b_cnt >> 16) +
479 (edesc->pset[0].a_b_cnt & 0xffff) *
480 (SZ_64K - 1) * edesc->pset[0].ccnt;
481
482 return size;
483}
484
485/* Check request completion status */
486static enum dma_status edma_tx_status(struct dma_chan *chan,
487 dma_cookie_t cookie,
488 struct dma_tx_state *txstate)
489{
490 struct edma_chan *echan = to_edma_chan(chan);
491 struct virt_dma_desc *vdesc;
492 enum dma_status ret;
493 unsigned long flags;
494
495 ret = dma_cookie_status(chan, cookie, txstate);
496 if (ret == DMA_SUCCESS || !txstate)
497 return ret;
498
499 spin_lock_irqsave(&echan->vchan.lock, flags);
500 vdesc = vchan_find_desc(&echan->vchan, cookie);
501 if (vdesc) {
502 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
503 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
504 struct edma_desc *edesc = echan->edesc;
505 txstate->residue = edma_desc_size(edesc);
506 } else {
507 txstate->residue = 0;
508 }
509 spin_unlock_irqrestore(&echan->vchan.lock, flags);
510
511 return ret;
512}
513
514static void __init edma_chan_init(struct edma_cc *ecc,
515 struct dma_device *dma,
516 struct edma_chan *echans)
517{
518 int i, j;
519
520 for (i = 0; i < EDMA_CHANS; i++) {
521 struct edma_chan *echan = &echans[i];
522 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
523 echan->ecc = ecc;
524 echan->vchan.desc_free = edma_desc_free;
525
526 vchan_init(&echan->vchan, dma);
527
528 INIT_LIST_HEAD(&echan->node);
529 for (j = 0; j < EDMA_MAX_SLOTS; j++)
530 echan->slot[j] = -1;
531 }
532}
533
534static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
535 struct device *dev)
536{
537 dma->device_prep_slave_sg = edma_prep_slave_sg;
538 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
539 dma->device_free_chan_resources = edma_free_chan_resources;
540 dma->device_issue_pending = edma_issue_pending;
541 dma->device_tx_status = edma_tx_status;
542 dma->device_control = edma_control;
543 dma->dev = dev;
544
545 INIT_LIST_HEAD(&dma->channels);
546}
547
Bill Pemberton463a1f82012-11-19 13:22:55 -0500548static int edma_probe(struct platform_device *pdev)
Matt Porterc2dde5f2012-08-22 21:09:34 -0400549{
550 struct edma_cc *ecc;
551 int ret;
552
553 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
554 if (!ecc) {
555 dev_err(&pdev->dev, "Can't allocate controller\n");
556 return -ENOMEM;
557 }
558
559 ecc->ctlr = pdev->id;
560 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
561 if (ecc->dummy_slot < 0) {
562 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
563 return -EIO;
564 }
565
566 dma_cap_zero(ecc->dma_slave.cap_mask);
567 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
568
569 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
570
571 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
572
573 ret = dma_async_device_register(&ecc->dma_slave);
574 if (ret)
575 goto err_reg1;
576
577 platform_set_drvdata(pdev, ecc);
578
579 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
580
581 return 0;
582
583err_reg1:
584 edma_free_slot(ecc->dummy_slot);
585 return ret;
586}
587
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800588static int edma_remove(struct platform_device *pdev)
Matt Porterc2dde5f2012-08-22 21:09:34 -0400589{
590 struct device *dev = &pdev->dev;
591 struct edma_cc *ecc = dev_get_drvdata(dev);
592
593 dma_async_device_unregister(&ecc->dma_slave);
594 edma_free_slot(ecc->dummy_slot);
595
596 return 0;
597}
598
599static struct platform_driver edma_driver = {
600 .probe = edma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -0500601 .remove = edma_remove,
Matt Porterc2dde5f2012-08-22 21:09:34 -0400602 .driver = {
603 .name = "edma-dma-engine",
604 .owner = THIS_MODULE,
605 },
606};
607
608bool edma_filter_fn(struct dma_chan *chan, void *param)
609{
610 if (chan->device->dev->driver == &edma_driver.driver) {
611 struct edma_chan *echan = to_edma_chan(chan);
612 unsigned ch_req = *(unsigned *)param;
613 return ch_req == echan->ch_num;
614 }
615 return false;
616}
617EXPORT_SYMBOL(edma_filter_fn);
618
619static struct platform_device *pdev0, *pdev1;
620
621static const struct platform_device_info edma_dev_info0 = {
622 .name = "edma-dma-engine",
623 .id = 0,
624 .dma_mask = DMA_BIT_MASK(32),
625};
626
627static const struct platform_device_info edma_dev_info1 = {
628 .name = "edma-dma-engine",
629 .id = 1,
630 .dma_mask = DMA_BIT_MASK(32),
631};
632
633static int edma_init(void)
634{
635 int ret = platform_driver_register(&edma_driver);
636
637 if (ret == 0) {
638 pdev0 = platform_device_register_full(&edma_dev_info0);
639 if (IS_ERR(pdev0)) {
640 platform_driver_unregister(&edma_driver);
641 ret = PTR_ERR(pdev0);
642 goto out;
643 }
644 }
645
646 if (EDMA_CTLRS == 2) {
647 pdev1 = platform_device_register_full(&edma_dev_info1);
648 if (IS_ERR(pdev1)) {
649 platform_driver_unregister(&edma_driver);
650 platform_device_unregister(pdev0);
651 ret = PTR_ERR(pdev1);
652 }
653 }
654
655out:
656 return ret;
657}
658subsys_initcall(edma_init);
659
660static void __exit edma_exit(void)
661{
662 platform_device_unregister(pdev0);
663 if (pdev1)
664 platform_device_unregister(pdev1);
665 platform_driver_unregister(&edma_driver);
666}
667module_exit(edma_exit);
668
669MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
670MODULE_DESCRIPTION("TI EDMA DMA engine driver");
671MODULE_LICENSE("GPL v2");