blob: a760d981ece06b1960a66d36f319b5c49c878b01 [file] [log] [blame]
Rongjun Yingca21a142011-10-27 19:22:39 -07001/*
2 * DMA controller driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/slab.h>
15#include <linux/of_irq.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/sirfsoc_dma.h>
20
21#define SIRFSOC_DMA_DESCRIPTORS 16
22#define SIRFSOC_DMA_CHANNELS 16
23
24#define SIRFSOC_DMA_CH_ADDR 0x00
25#define SIRFSOC_DMA_CH_XLEN 0x04
26#define SIRFSOC_DMA_CH_YLEN 0x08
27#define SIRFSOC_DMA_CH_CTRL 0x0C
28
29#define SIRFSOC_DMA_WIDTH_0 0x100
30#define SIRFSOC_DMA_CH_VALID 0x140
31#define SIRFSOC_DMA_CH_INT 0x144
32#define SIRFSOC_DMA_INT_EN 0x148
33#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
34
35#define SIRFSOC_DMA_MODE_CTRL_BIT 4
36#define SIRFSOC_DMA_DIR_CTRL_BIT 5
37
38/* xlen and dma_width register is in 4 bytes boundary */
39#define SIRFSOC_DMA_WORD_LEN 4
40
41struct sirfsoc_dma_desc {
42 struct dma_async_tx_descriptor desc;
43 struct list_head node;
44
45 /* SiRFprimaII 2D-DMA parameters */
46
47 int xlen; /* DMA xlen */
48 int ylen; /* DMA ylen */
49 int width; /* DMA width */
50 int dir;
51 bool cyclic; /* is loop DMA? */
52 u32 addr; /* DMA buffer address */
53};
54
55struct sirfsoc_dma_chan {
56 struct dma_chan chan;
57 struct list_head free;
58 struct list_head prepared;
59 struct list_head queued;
60 struct list_head active;
61 struct list_head completed;
Rongjun Yingca21a142011-10-27 19:22:39 -070062 unsigned long happened_cyclic;
63 unsigned long completed_cyclic;
64
65 /* Lock for this structure */
66 spinlock_t lock;
67
68 int mode;
69};
70
71struct sirfsoc_dma {
72 struct dma_device dma;
73 struct tasklet_struct tasklet;
74 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
75 void __iomem *base;
76 int irq;
77};
78
79#define DRV_NAME "sirfsoc_dma"
80
81/* Convert struct dma_chan to struct sirfsoc_dma_chan */
82static inline
83struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
84{
85 return container_of(c, struct sirfsoc_dma_chan, chan);
86}
87
88/* Convert struct dma_chan to struct sirfsoc_dma */
89static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
90{
91 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
92 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
93}
94
95/* Execute all queued DMA descriptors */
96static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
97{
98 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
99 int cid = schan->chan.chan_id;
100 struct sirfsoc_dma_desc *sdesc = NULL;
101
102 /*
103 * lock has been held by functions calling this, so we don't hold
104 * lock again
105 */
106
107 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
108 node);
109 /* Move the first queued descriptor to active list */
110 list_move_tail(&schan->queued, &schan->active);
111
112 /* Start the DMA transfer */
113 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
114 cid * 4);
115 writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
116 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
117 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
118 writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
119 SIRFSOC_DMA_CH_XLEN);
120 writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
121 SIRFSOC_DMA_CH_YLEN);
122 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
123 (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
124
125 /*
126 * writel has an implict memory write barrier to make sure data is
127 * flushed into memory before starting DMA
128 */
129 writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
130
131 if (sdesc->cyclic) {
132 writel((1 << cid) | 1 << (cid + 16) |
133 readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
134 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
135 schan->happened_cyclic = schan->completed_cyclic = 0;
136 }
137}
138
139/* Interrupt handler */
140static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
141{
142 struct sirfsoc_dma *sdma = data;
143 struct sirfsoc_dma_chan *schan;
144 struct sirfsoc_dma_desc *sdesc = NULL;
145 u32 is;
146 int ch;
147
148 is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
149 while ((ch = fls(is) - 1) >= 0) {
150 is &= ~(1 << ch);
151 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
152 schan = &sdma->channels[ch];
153
154 spin_lock(&schan->lock);
155
156 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
157 node);
158 if (!sdesc->cyclic) {
159 /* Execute queued descriptors */
160 list_splice_tail_init(&schan->active, &schan->completed);
161 if (!list_empty(&schan->queued))
162 sirfsoc_dma_execute(schan);
163 } else
164 schan->happened_cyclic++;
165
166 spin_unlock(&schan->lock);
167 }
168
169 /* Schedule tasklet */
170 tasklet_schedule(&sdma->tasklet);
171
172 return IRQ_HANDLED;
173}
174
175/* process completed descriptors */
176static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
177{
178 dma_cookie_t last_cookie = 0;
179 struct sirfsoc_dma_chan *schan;
180 struct sirfsoc_dma_desc *sdesc;
181 struct dma_async_tx_descriptor *desc;
182 unsigned long flags;
183 unsigned long happened_cyclic;
184 LIST_HEAD(list);
185 int i;
186
187 for (i = 0; i < sdma->dma.chancnt; i++) {
188 schan = &sdma->channels[i];
189
190 /* Get all completed descriptors */
191 spin_lock_irqsave(&schan->lock, flags);
192 if (!list_empty(&schan->completed)) {
193 list_splice_tail_init(&schan->completed, &list);
194 spin_unlock_irqrestore(&schan->lock, flags);
195
196 /* Execute callbacks and run dependencies */
197 list_for_each_entry(sdesc, &list, node) {
198 desc = &sdesc->desc;
199
200 if (desc->callback)
201 desc->callback(desc->callback_param);
202
203 last_cookie = desc->cookie;
204 dma_run_dependencies(desc);
205 }
206
207 /* Free descriptors */
208 spin_lock_irqsave(&schan->lock, flags);
209 list_splice_tail_init(&list, &schan->free);
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000210 schan->chan.completed_cookie = last_cookie;
Rongjun Yingca21a142011-10-27 19:22:39 -0700211 spin_unlock_irqrestore(&schan->lock, flags);
212 } else {
213 /* for cyclic channel, desc is always in active list */
214 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
215 node);
216
217 if (!sdesc || (sdesc && !sdesc->cyclic)) {
218 /* without active cyclic DMA */
219 spin_unlock_irqrestore(&schan->lock, flags);
220 continue;
221 }
222
223 /* cyclic DMA */
224 happened_cyclic = schan->happened_cyclic;
225 spin_unlock_irqrestore(&schan->lock, flags);
226
227 desc = &sdesc->desc;
228 while (happened_cyclic != schan->completed_cyclic) {
229 if (desc->callback)
230 desc->callback(desc->callback_param);
231 schan->completed_cyclic++;
232 }
233 }
234 }
235}
236
237/* DMA Tasklet */
238static void sirfsoc_dma_tasklet(unsigned long data)
239{
240 struct sirfsoc_dma *sdma = (void *)data;
241
242 sirfsoc_dma_process_completed(sdma);
243}
244
245/* Submit descriptor to hardware */
246static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
247{
248 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
249 struct sirfsoc_dma_desc *sdesc;
250 unsigned long flags;
251 dma_cookie_t cookie;
252
253 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
254
255 spin_lock_irqsave(&schan->lock, flags);
256
257 /* Move descriptor to queue */
258 list_move_tail(&sdesc->node, &schan->queued);
259
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000260 cookie = dma_cookie_assign(txd);
Rongjun Yingca21a142011-10-27 19:22:39 -0700261
262 spin_unlock_irqrestore(&schan->lock, flags);
263
264 return cookie;
265}
266
267static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
268 struct dma_slave_config *config)
269{
270 unsigned long flags;
271
272 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
273 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
274 return -EINVAL;
275
276 spin_lock_irqsave(&schan->lock, flags);
277 schan->mode = (config->src_maxburst == 4 ? 1 : 0);
278 spin_unlock_irqrestore(&schan->lock, flags);
279
280 return 0;
281}
282
283static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
284{
285 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
286 int cid = schan->chan.chan_id;
287 unsigned long flags;
288
289 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
290 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
291 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
292
293 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
294 & ~((1 << cid) | 1 << (cid + 16)),
295 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
296
297 spin_lock_irqsave(&schan->lock, flags);
298 list_splice_tail_init(&schan->active, &schan->free);
299 list_splice_tail_init(&schan->queued, &schan->free);
300 spin_unlock_irqrestore(&schan->lock, flags);
301
302 return 0;
303}
304
305static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
306 unsigned long arg)
307{
308 struct dma_slave_config *config;
309 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
310
311 switch (cmd) {
312 case DMA_TERMINATE_ALL:
313 return sirfsoc_dma_terminate_all(schan);
314 case DMA_SLAVE_CONFIG:
315 config = (struct dma_slave_config *)arg;
316 return sirfsoc_dma_slave_config(schan, config);
317
318 default:
319 break;
320 }
321
322 return -ENOSYS;
323}
324
325/* Alloc channel resources */
326static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
327{
328 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
329 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
330 struct sirfsoc_dma_desc *sdesc;
331 unsigned long flags;
332 LIST_HEAD(descs);
333 int i;
334
335 /* Alloc descriptors for this channel */
336 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
337 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
338 if (!sdesc) {
339 dev_notice(sdma->dma.dev, "Memory allocation error. "
340 "Allocated only %u descriptors\n", i);
341 break;
342 }
343
344 dma_async_tx_descriptor_init(&sdesc->desc, chan);
345 sdesc->desc.flags = DMA_CTRL_ACK;
346 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
347
348 list_add_tail(&sdesc->node, &descs);
349 }
350
351 /* Return error only if no descriptors were allocated */
352 if (i == 0)
353 return -ENOMEM;
354
355 spin_lock_irqsave(&schan->lock, flags);
356
357 list_splice_tail_init(&descs, &schan->free);
358 spin_unlock_irqrestore(&schan->lock, flags);
359
360 return i;
361}
362
363/* Free channel resources */
364static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
365{
366 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
367 struct sirfsoc_dma_desc *sdesc, *tmp;
368 unsigned long flags;
369 LIST_HEAD(descs);
370
371 spin_lock_irqsave(&schan->lock, flags);
372
373 /* Channel must be idle */
374 BUG_ON(!list_empty(&schan->prepared));
375 BUG_ON(!list_empty(&schan->queued));
376 BUG_ON(!list_empty(&schan->active));
377 BUG_ON(!list_empty(&schan->completed));
378
379 /* Move data */
380 list_splice_tail_init(&schan->free, &descs);
381
382 spin_unlock_irqrestore(&schan->lock, flags);
383
384 /* Free descriptors */
385 list_for_each_entry_safe(sdesc, tmp, &descs, node)
386 kfree(sdesc);
387}
388
389/* Send pending descriptor to hardware */
390static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
391{
392 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
393 unsigned long flags;
394
395 spin_lock_irqsave(&schan->lock, flags);
396
397 if (list_empty(&schan->active) && !list_empty(&schan->queued))
398 sirfsoc_dma_execute(schan);
399
400 spin_unlock_irqrestore(&schan->lock, flags);
401}
402
403/* Check request completion status */
404static enum dma_status
405sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
406 struct dma_tx_state *txstate)
407{
408 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
409 unsigned long flags;
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000410 enum dma_status ret;
Rongjun Yingca21a142011-10-27 19:22:39 -0700411
412 spin_lock_irqsave(&schan->lock, flags);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000413 ret = dma_cookie_status(chan, cookie, txstate);
Rongjun Yingca21a142011-10-27 19:22:39 -0700414 spin_unlock_irqrestore(&schan->lock, flags);
415
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000416 return ret;
Rongjun Yingca21a142011-10-27 19:22:39 -0700417}
418
419static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
420 struct dma_chan *chan, struct dma_interleaved_template *xt,
421 unsigned long flags)
422{
423 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
424 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
425 struct sirfsoc_dma_desc *sdesc = NULL;
426 unsigned long iflags;
427 int ret;
428
429 if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
430 ret = -EINVAL;
431 goto err_dir;
432 }
433
434 /* Get free descriptor */
435 spin_lock_irqsave(&schan->lock, iflags);
436 if (!list_empty(&schan->free)) {
437 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
438 node);
439 list_del(&sdesc->node);
440 }
441 spin_unlock_irqrestore(&schan->lock, iflags);
442
443 if (!sdesc) {
444 /* try to free completed descriptors */
445 sirfsoc_dma_process_completed(sdma);
446 ret = 0;
447 goto no_desc;
448 }
449
450 /* Place descriptor in prepared list */
451 spin_lock_irqsave(&schan->lock, iflags);
452
453 /*
454 * Number of chunks in a frame can only be 1 for prima2
455 * and ylen (number of frame - 1) must be at least 0
456 */
457 if ((xt->frame_size == 1) && (xt->numf > 0)) {
458 sdesc->cyclic = 0;
459 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
460 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
461 SIRFSOC_DMA_WORD_LEN;
462 sdesc->ylen = xt->numf - 1;
463 if (xt->dir == DMA_MEM_TO_DEV) {
464 sdesc->addr = xt->src_start;
465 sdesc->dir = 1;
466 } else {
467 sdesc->addr = xt->dst_start;
468 sdesc->dir = 0;
469 }
470
471 list_add_tail(&sdesc->node, &schan->prepared);
472 } else {
473 pr_err("sirfsoc DMA Invalid xfer\n");
474 ret = -EINVAL;
475 goto err_xfer;
476 }
477 spin_unlock_irqrestore(&schan->lock, iflags);
478
479 return &sdesc->desc;
480err_xfer:
481 spin_unlock_irqrestore(&schan->lock, iflags);
482no_desc:
483err_dir:
484 return ERR_PTR(ret);
485}
486
487static struct dma_async_tx_descriptor *
488sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
489 size_t buf_len, size_t period_len,
490 enum dma_transfer_direction direction)
491{
492 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
493 struct sirfsoc_dma_desc *sdesc = NULL;
494 unsigned long iflags;
495
496 /*
497 * we only support cycle transfer with 2 period
498 * If the X-length is set to 0, it would be the loop mode.
499 * The DMA address keeps increasing until reaching the end of a loop
500 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
501 * the DMA address goes back to the beginning of this area.
502 * In loop mode, the DMA data region is divided into two parts, BUFA
503 * and BUFB. DMA controller generates interrupts twice in each loop:
504 * when the DMA address reaches the end of BUFA or the end of the
505 * BUFB
506 */
507 if (buf_len != 2 * period_len)
508 return ERR_PTR(-EINVAL);
509
510 /* Get free descriptor */
511 spin_lock_irqsave(&schan->lock, iflags);
512 if (!list_empty(&schan->free)) {
513 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
514 node);
515 list_del(&sdesc->node);
516 }
517 spin_unlock_irqrestore(&schan->lock, iflags);
518
519 if (!sdesc)
520 return 0;
521
522 /* Place descriptor in prepared list */
523 spin_lock_irqsave(&schan->lock, iflags);
524 sdesc->addr = addr;
525 sdesc->cyclic = 1;
526 sdesc->xlen = 0;
527 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
528 sdesc->width = 1;
529 list_add_tail(&sdesc->node, &schan->prepared);
530 spin_unlock_irqrestore(&schan->lock, iflags);
531
532 return &sdesc->desc;
533}
534
535/*
536 * The DMA controller consists of 16 independent DMA channels.
537 * Each channel is allocated to a different function
538 */
539bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
540{
541 unsigned int ch_nr = (unsigned int) chan_id;
542
543 if (ch_nr == chan->chan_id +
544 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
545 return true;
546
547 return false;
548}
549EXPORT_SYMBOL(sirfsoc_dma_filter_id);
550
551static int __devinit sirfsoc_dma_probe(struct platform_device *op)
552{
553 struct device_node *dn = op->dev.of_node;
554 struct device *dev = &op->dev;
555 struct dma_device *dma;
556 struct sirfsoc_dma *sdma;
557 struct sirfsoc_dma_chan *schan;
558 struct resource res;
559 ulong regs_start, regs_size;
560 u32 id;
561 int ret, i;
562
563 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
564 if (!sdma) {
565 dev_err(dev, "Memory exhausted!\n");
566 return -ENOMEM;
567 }
568
569 if (of_property_read_u32(dn, "cell-index", &id)) {
570 dev_err(dev, "Fail to get DMAC index\n");
571 ret = -ENODEV;
572 goto free_mem;
573 }
574
575 sdma->irq = irq_of_parse_and_map(dn, 0);
576 if (sdma->irq == NO_IRQ) {
577 dev_err(dev, "Error mapping IRQ!\n");
578 ret = -EINVAL;
579 goto free_mem;
580 }
581
582 ret = of_address_to_resource(dn, 0, &res);
583 if (ret) {
584 dev_err(dev, "Error parsing memory region!\n");
585 goto free_mem;
586 }
587
588 regs_start = res.start;
589 regs_size = resource_size(&res);
590
591 sdma->base = devm_ioremap(dev, regs_start, regs_size);
592 if (!sdma->base) {
593 dev_err(dev, "Error mapping memory region!\n");
594 ret = -ENOMEM;
595 goto irq_dispose;
596 }
597
598 ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
599 sdma);
600 if (ret) {
601 dev_err(dev, "Error requesting IRQ!\n");
602 ret = -EINVAL;
603 goto unmap_mem;
604 }
605
606 dma = &sdma->dma;
607 dma->dev = dev;
608 dma->chancnt = SIRFSOC_DMA_CHANNELS;
609
610 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
611 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
612 dma->device_issue_pending = sirfsoc_dma_issue_pending;
613 dma->device_control = sirfsoc_dma_control;
614 dma->device_tx_status = sirfsoc_dma_tx_status;
615 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
616 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
617
618 INIT_LIST_HEAD(&dma->channels);
619 dma_cap_set(DMA_SLAVE, dma->cap_mask);
620 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
621 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
622 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
623
624 for (i = 0; i < dma->chancnt; i++) {
625 schan = &sdma->channels[i];
626
627 schan->chan.device = dma;
628 schan->chan.cookie = 1;
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000629 schan->chan.completed_cookie = schan->chan.cookie;
Rongjun Yingca21a142011-10-27 19:22:39 -0700630
631 INIT_LIST_HEAD(&schan->free);
632 INIT_LIST_HEAD(&schan->prepared);
633 INIT_LIST_HEAD(&schan->queued);
634 INIT_LIST_HEAD(&schan->active);
635 INIT_LIST_HEAD(&schan->completed);
636
637 spin_lock_init(&schan->lock);
638 list_add_tail(&schan->chan.device_node, &dma->channels);
639 }
640
641 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
642
643 /* Register DMA engine */
644 dev_set_drvdata(dev, sdma);
645 ret = dma_async_device_register(dma);
646 if (ret)
647 goto free_irq;
648
649 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
650
651 return 0;
652
653free_irq:
654 devm_free_irq(dev, sdma->irq, sdma);
655irq_dispose:
656 irq_dispose_mapping(sdma->irq);
657unmap_mem:
658 iounmap(sdma->base);
659free_mem:
660 devm_kfree(dev, sdma);
661 return ret;
662}
663
664static int __devexit sirfsoc_dma_remove(struct platform_device *op)
665{
666 struct device *dev = &op->dev;
667 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
668
669 dma_async_device_unregister(&sdma->dma);
670 devm_free_irq(dev, sdma->irq, sdma);
671 irq_dispose_mapping(sdma->irq);
672 iounmap(sdma->base);
673 devm_kfree(dev, sdma);
674 return 0;
675}
676
677static struct of_device_id sirfsoc_dma_match[] = {
678 { .compatible = "sirf,prima2-dmac", },
679 {},
680};
681
682static struct platform_driver sirfsoc_dma_driver = {
683 .probe = sirfsoc_dma_probe,
684 .remove = __devexit_p(sirfsoc_dma_remove),
685 .driver = {
686 .name = DRV_NAME,
687 .owner = THIS_MODULE,
688 .of_match_table = sirfsoc_dma_match,
689 },
690};
691
Axel Linc94e9102011-11-26 15:11:12 +0800692module_platform_driver(sirfsoc_dma_driver);
Rongjun Yingca21a142011-10-27 19:22:39 -0700693
694MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
695 "Barry Song <baohua.song@csr.com>");
696MODULE_DESCRIPTION("SIRFSOC DMA control driver");
697MODULE_LICENSE("GPL v2");