blob: 1d627e2391f495ef2b8b9fd2e6b5359da940df2d [file] [log] [blame]
Rongjun Yingca21a142011-10-27 19:22:39 -07001/*
2 * DMA controller driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/slab.h>
15#include <linux/of_irq.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/sirfsoc_dma.h>
20
Vinod Koul949ff5b2012-03-13 11:58:12 +053021#include "dmaengine.h"
22
Rongjun Yingca21a142011-10-27 19:22:39 -070023#define SIRFSOC_DMA_DESCRIPTORS 16
24#define SIRFSOC_DMA_CHANNELS 16
25
26#define SIRFSOC_DMA_CH_ADDR 0x00
27#define SIRFSOC_DMA_CH_XLEN 0x04
28#define SIRFSOC_DMA_CH_YLEN 0x08
29#define SIRFSOC_DMA_CH_CTRL 0x0C
30
31#define SIRFSOC_DMA_WIDTH_0 0x100
32#define SIRFSOC_DMA_CH_VALID 0x140
33#define SIRFSOC_DMA_CH_INT 0x144
34#define SIRFSOC_DMA_INT_EN 0x148
Barry Songf7d935d2012-11-01 22:54:43 +080035#define SIRFSOC_DMA_INT_EN_CLR 0x14C
Rongjun Yingca21a142011-10-27 19:22:39 -070036#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
Barry Songf7d935d2012-11-01 22:54:43 +080037#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
Rongjun Yingca21a142011-10-27 19:22:39 -070038
39#define SIRFSOC_DMA_MODE_CTRL_BIT 4
40#define SIRFSOC_DMA_DIR_CTRL_BIT 5
41
42/* xlen and dma_width register is in 4 bytes boundary */
43#define SIRFSOC_DMA_WORD_LEN 4
44
45struct sirfsoc_dma_desc {
46 struct dma_async_tx_descriptor desc;
47 struct list_head node;
48
49 /* SiRFprimaII 2D-DMA parameters */
50
51 int xlen; /* DMA xlen */
52 int ylen; /* DMA ylen */
53 int width; /* DMA width */
54 int dir;
55 bool cyclic; /* is loop DMA? */
56 u32 addr; /* DMA buffer address */
57};
58
59struct sirfsoc_dma_chan {
60 struct dma_chan chan;
61 struct list_head free;
62 struct list_head prepared;
63 struct list_head queued;
64 struct list_head active;
65 struct list_head completed;
Rongjun Yingca21a142011-10-27 19:22:39 -070066 unsigned long happened_cyclic;
67 unsigned long completed_cyclic;
68
69 /* Lock for this structure */
70 spinlock_t lock;
71
72 int mode;
73};
74
75struct sirfsoc_dma {
76 struct dma_device dma;
77 struct tasklet_struct tasklet;
78 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
79 void __iomem *base;
80 int irq;
Barry Songf7d935d2012-11-01 22:54:43 +080081 bool is_marco;
Rongjun Yingca21a142011-10-27 19:22:39 -070082};
83
84#define DRV_NAME "sirfsoc_dma"
85
86/* Convert struct dma_chan to struct sirfsoc_dma_chan */
87static inline
88struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
89{
90 return container_of(c, struct sirfsoc_dma_chan, chan);
91}
92
93/* Convert struct dma_chan to struct sirfsoc_dma */
94static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
95{
96 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
97 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
98}
99
100/* Execute all queued DMA descriptors */
101static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
102{
103 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
104 int cid = schan->chan.chan_id;
105 struct sirfsoc_dma_desc *sdesc = NULL;
106
107 /*
108 * lock has been held by functions calling this, so we don't hold
109 * lock again
110 */
111
112 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
113 node);
114 /* Move the first queued descriptor to active list */
Barry Song26fd1222012-09-27 16:36:10 +0800115 list_move_tail(&sdesc->node, &schan->active);
Rongjun Yingca21a142011-10-27 19:22:39 -0700116
117 /* Start the DMA transfer */
118 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
119 cid * 4);
120 writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
121 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
122 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
123 writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
124 SIRFSOC_DMA_CH_XLEN);
125 writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
126 SIRFSOC_DMA_CH_YLEN);
127 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
128 (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
129
130 /*
131 * writel has an implict memory write barrier to make sure data is
132 * flushed into memory before starting DMA
133 */
134 writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
135
136 if (sdesc->cyclic) {
137 writel((1 << cid) | 1 << (cid + 16) |
138 readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
139 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
140 schan->happened_cyclic = schan->completed_cyclic = 0;
141 }
142}
143
144/* Interrupt handler */
145static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
146{
147 struct sirfsoc_dma *sdma = data;
148 struct sirfsoc_dma_chan *schan;
149 struct sirfsoc_dma_desc *sdesc = NULL;
150 u32 is;
151 int ch;
152
153 is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
154 while ((ch = fls(is) - 1) >= 0) {
155 is &= ~(1 << ch);
156 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
157 schan = &sdma->channels[ch];
158
159 spin_lock(&schan->lock);
160
161 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
162 node);
163 if (!sdesc->cyclic) {
164 /* Execute queued descriptors */
165 list_splice_tail_init(&schan->active, &schan->completed);
166 if (!list_empty(&schan->queued))
167 sirfsoc_dma_execute(schan);
168 } else
169 schan->happened_cyclic++;
170
171 spin_unlock(&schan->lock);
172 }
173
174 /* Schedule tasklet */
175 tasklet_schedule(&sdma->tasklet);
176
177 return IRQ_HANDLED;
178}
179
180/* process completed descriptors */
181static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
182{
183 dma_cookie_t last_cookie = 0;
184 struct sirfsoc_dma_chan *schan;
185 struct sirfsoc_dma_desc *sdesc;
186 struct dma_async_tx_descriptor *desc;
187 unsigned long flags;
188 unsigned long happened_cyclic;
189 LIST_HEAD(list);
190 int i;
191
192 for (i = 0; i < sdma->dma.chancnt; i++) {
193 schan = &sdma->channels[i];
194
195 /* Get all completed descriptors */
196 spin_lock_irqsave(&schan->lock, flags);
197 if (!list_empty(&schan->completed)) {
198 list_splice_tail_init(&schan->completed, &list);
199 spin_unlock_irqrestore(&schan->lock, flags);
200
201 /* Execute callbacks and run dependencies */
202 list_for_each_entry(sdesc, &list, node) {
203 desc = &sdesc->desc;
204
205 if (desc->callback)
206 desc->callback(desc->callback_param);
207
208 last_cookie = desc->cookie;
209 dma_run_dependencies(desc);
210 }
211
212 /* Free descriptors */
213 spin_lock_irqsave(&schan->lock, flags);
214 list_splice_tail_init(&list, &schan->free);
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000215 schan->chan.completed_cookie = last_cookie;
Rongjun Yingca21a142011-10-27 19:22:39 -0700216 spin_unlock_irqrestore(&schan->lock, flags);
217 } else {
218 /* for cyclic channel, desc is always in active list */
219 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
220 node);
221
222 if (!sdesc || (sdesc && !sdesc->cyclic)) {
223 /* without active cyclic DMA */
224 spin_unlock_irqrestore(&schan->lock, flags);
225 continue;
226 }
227
228 /* cyclic DMA */
229 happened_cyclic = schan->happened_cyclic;
230 spin_unlock_irqrestore(&schan->lock, flags);
231
232 desc = &sdesc->desc;
233 while (happened_cyclic != schan->completed_cyclic) {
234 if (desc->callback)
235 desc->callback(desc->callback_param);
236 schan->completed_cyclic++;
237 }
238 }
239 }
240}
241
242/* DMA Tasklet */
243static void sirfsoc_dma_tasklet(unsigned long data)
244{
245 struct sirfsoc_dma *sdma = (void *)data;
246
247 sirfsoc_dma_process_completed(sdma);
248}
249
250/* Submit descriptor to hardware */
251static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
252{
253 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
254 struct sirfsoc_dma_desc *sdesc;
255 unsigned long flags;
256 dma_cookie_t cookie;
257
258 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
259
260 spin_lock_irqsave(&schan->lock, flags);
261
262 /* Move descriptor to queue */
263 list_move_tail(&sdesc->node, &schan->queued);
264
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000265 cookie = dma_cookie_assign(txd);
Rongjun Yingca21a142011-10-27 19:22:39 -0700266
267 spin_unlock_irqrestore(&schan->lock, flags);
268
269 return cookie;
270}
271
272static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
273 struct dma_slave_config *config)
274{
275 unsigned long flags;
276
277 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
278 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
279 return -EINVAL;
280
281 spin_lock_irqsave(&schan->lock, flags);
282 schan->mode = (config->src_maxburst == 4 ? 1 : 0);
283 spin_unlock_irqrestore(&schan->lock, flags);
284
285 return 0;
286}
287
288static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
289{
290 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
291 int cid = schan->chan.chan_id;
292 unsigned long flags;
293
Barry Song2b99c252012-12-14 11:06:58 +0000294 spin_lock_irqsave(&schan->lock, flags);
295
Barry Songf7d935d2012-11-01 22:54:43 +0800296 if (!sdma->is_marco) {
297 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
298 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
299 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
300 & ~((1 << cid) | 1 << (cid + 16)),
Rongjun Yingca21a142011-10-27 19:22:39 -0700301 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
Barry Songf7d935d2012-11-01 22:54:43 +0800302 } else {
303 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
304 writel_relaxed((1 << cid) | 1 << (cid + 16),
305 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
306 }
307
Rongjun Yingca21a142011-10-27 19:22:39 -0700308 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
309
Rongjun Yingca21a142011-10-27 19:22:39 -0700310 list_splice_tail_init(&schan->active, &schan->free);
311 list_splice_tail_init(&schan->queued, &schan->free);
Barry Song2b99c252012-12-14 11:06:58 +0000312
Rongjun Yingca21a142011-10-27 19:22:39 -0700313 spin_unlock_irqrestore(&schan->lock, flags);
314
315 return 0;
316}
317
Barry Song2518d1d2012-12-14 10:59:22 +0000318static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
319{
320 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
321 int cid = schan->chan.chan_id;
322 unsigned long flags;
323
324 spin_lock_irqsave(&schan->lock, flags);
325
326 if (!sdma->is_marco)
327 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
328 & ~((1 << cid) | 1 << (cid + 16)),
329 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
330 else
331 writel_relaxed((1 << cid) | 1 << (cid + 16),
332 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
333
334 spin_unlock_irqrestore(&schan->lock, flags);
335
336 return 0;
337}
338
339static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
340{
341 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
342 int cid = schan->chan.chan_id;
343 unsigned long flags;
344
345 spin_lock_irqsave(&schan->lock, flags);
346
347 if (!sdma->is_marco)
348 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
349 | ((1 << cid) | 1 << (cid + 16)),
350 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
351 else
352 writel_relaxed((1 << cid) | 1 << (cid + 16),
353 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
354
Rongjun Yingca21a142011-10-27 19:22:39 -0700355 spin_unlock_irqrestore(&schan->lock, flags);
356
357 return 0;
358}
359
360static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
361 unsigned long arg)
362{
363 struct dma_slave_config *config;
364 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
365
366 switch (cmd) {
Barry Song2518d1d2012-12-14 10:59:22 +0000367 case DMA_PAUSE:
368 return sirfsoc_dma_pause_chan(schan);
369 case DMA_RESUME:
370 return sirfsoc_dma_resume_chan(schan);
Rongjun Yingca21a142011-10-27 19:22:39 -0700371 case DMA_TERMINATE_ALL:
372 return sirfsoc_dma_terminate_all(schan);
373 case DMA_SLAVE_CONFIG:
374 config = (struct dma_slave_config *)arg;
375 return sirfsoc_dma_slave_config(schan, config);
376
377 default:
378 break;
379 }
380
381 return -ENOSYS;
382}
383
384/* Alloc channel resources */
385static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
386{
387 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
388 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
389 struct sirfsoc_dma_desc *sdesc;
390 unsigned long flags;
391 LIST_HEAD(descs);
392 int i;
393
394 /* Alloc descriptors for this channel */
395 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
396 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
397 if (!sdesc) {
398 dev_notice(sdma->dma.dev, "Memory allocation error. "
399 "Allocated only %u descriptors\n", i);
400 break;
401 }
402
403 dma_async_tx_descriptor_init(&sdesc->desc, chan);
404 sdesc->desc.flags = DMA_CTRL_ACK;
405 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
406
407 list_add_tail(&sdesc->node, &descs);
408 }
409
410 /* Return error only if no descriptors were allocated */
411 if (i == 0)
412 return -ENOMEM;
413
414 spin_lock_irqsave(&schan->lock, flags);
415
416 list_splice_tail_init(&descs, &schan->free);
417 spin_unlock_irqrestore(&schan->lock, flags);
418
419 return i;
420}
421
422/* Free channel resources */
423static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
424{
425 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
426 struct sirfsoc_dma_desc *sdesc, *tmp;
427 unsigned long flags;
428 LIST_HEAD(descs);
429
430 spin_lock_irqsave(&schan->lock, flags);
431
432 /* Channel must be idle */
433 BUG_ON(!list_empty(&schan->prepared));
434 BUG_ON(!list_empty(&schan->queued));
435 BUG_ON(!list_empty(&schan->active));
436 BUG_ON(!list_empty(&schan->completed));
437
438 /* Move data */
439 list_splice_tail_init(&schan->free, &descs);
440
441 spin_unlock_irqrestore(&schan->lock, flags);
442
443 /* Free descriptors */
444 list_for_each_entry_safe(sdesc, tmp, &descs, node)
445 kfree(sdesc);
446}
447
448/* Send pending descriptor to hardware */
449static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
450{
451 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
452 unsigned long flags;
453
454 spin_lock_irqsave(&schan->lock, flags);
455
456 if (list_empty(&schan->active) && !list_empty(&schan->queued))
457 sirfsoc_dma_execute(schan);
458
459 spin_unlock_irqrestore(&schan->lock, flags);
460}
461
462/* Check request completion status */
463static enum dma_status
464sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
465 struct dma_tx_state *txstate)
466{
467 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
468 unsigned long flags;
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000469 enum dma_status ret;
Rongjun Yingca21a142011-10-27 19:22:39 -0700470
471 spin_lock_irqsave(&schan->lock, flags);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000472 ret = dma_cookie_status(chan, cookie, txstate);
Rongjun Yingca21a142011-10-27 19:22:39 -0700473 spin_unlock_irqrestore(&schan->lock, flags);
474
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000475 return ret;
Rongjun Yingca21a142011-10-27 19:22:39 -0700476}
477
478static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
479 struct dma_chan *chan, struct dma_interleaved_template *xt,
480 unsigned long flags)
481{
482 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
483 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
484 struct sirfsoc_dma_desc *sdesc = NULL;
485 unsigned long iflags;
486 int ret;
487
Barry Song5997e082012-09-27 16:35:38 +0800488 if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
Rongjun Yingca21a142011-10-27 19:22:39 -0700489 ret = -EINVAL;
490 goto err_dir;
491 }
492
493 /* Get free descriptor */
494 spin_lock_irqsave(&schan->lock, iflags);
495 if (!list_empty(&schan->free)) {
496 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
497 node);
498 list_del(&sdesc->node);
499 }
500 spin_unlock_irqrestore(&schan->lock, iflags);
501
502 if (!sdesc) {
503 /* try to free completed descriptors */
504 sirfsoc_dma_process_completed(sdma);
505 ret = 0;
506 goto no_desc;
507 }
508
509 /* Place descriptor in prepared list */
510 spin_lock_irqsave(&schan->lock, iflags);
511
512 /*
513 * Number of chunks in a frame can only be 1 for prima2
514 * and ylen (number of frame - 1) must be at least 0
515 */
516 if ((xt->frame_size == 1) && (xt->numf > 0)) {
517 sdesc->cyclic = 0;
518 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
519 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
520 SIRFSOC_DMA_WORD_LEN;
521 sdesc->ylen = xt->numf - 1;
522 if (xt->dir == DMA_MEM_TO_DEV) {
523 sdesc->addr = xt->src_start;
524 sdesc->dir = 1;
525 } else {
526 sdesc->addr = xt->dst_start;
527 sdesc->dir = 0;
528 }
529
530 list_add_tail(&sdesc->node, &schan->prepared);
531 } else {
532 pr_err("sirfsoc DMA Invalid xfer\n");
533 ret = -EINVAL;
534 goto err_xfer;
535 }
536 spin_unlock_irqrestore(&schan->lock, iflags);
537
538 return &sdesc->desc;
539err_xfer:
540 spin_unlock_irqrestore(&schan->lock, iflags);
541no_desc:
542err_dir:
543 return ERR_PTR(ret);
544}
545
546static struct dma_async_tx_descriptor *
547sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
548 size_t buf_len, size_t period_len,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +0300549 enum dma_transfer_direction direction, unsigned long flags, void *context)
Rongjun Yingca21a142011-10-27 19:22:39 -0700550{
551 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
552 struct sirfsoc_dma_desc *sdesc = NULL;
553 unsigned long iflags;
554
555 /*
556 * we only support cycle transfer with 2 period
557 * If the X-length is set to 0, it would be the loop mode.
558 * The DMA address keeps increasing until reaching the end of a loop
559 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
560 * the DMA address goes back to the beginning of this area.
561 * In loop mode, the DMA data region is divided into two parts, BUFA
562 * and BUFB. DMA controller generates interrupts twice in each loop:
563 * when the DMA address reaches the end of BUFA or the end of the
564 * BUFB
565 */
566 if (buf_len != 2 * period_len)
567 return ERR_PTR(-EINVAL);
568
569 /* Get free descriptor */
570 spin_lock_irqsave(&schan->lock, iflags);
571 if (!list_empty(&schan->free)) {
572 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
573 node);
574 list_del(&sdesc->node);
575 }
576 spin_unlock_irqrestore(&schan->lock, iflags);
577
578 if (!sdesc)
579 return 0;
580
581 /* Place descriptor in prepared list */
582 spin_lock_irqsave(&schan->lock, iflags);
583 sdesc->addr = addr;
584 sdesc->cyclic = 1;
585 sdesc->xlen = 0;
586 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
587 sdesc->width = 1;
588 list_add_tail(&sdesc->node, &schan->prepared);
589 spin_unlock_irqrestore(&schan->lock, iflags);
590
591 return &sdesc->desc;
592}
593
594/*
595 * The DMA controller consists of 16 independent DMA channels.
596 * Each channel is allocated to a different function
597 */
598bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
599{
600 unsigned int ch_nr = (unsigned int) chan_id;
601
602 if (ch_nr == chan->chan_id +
603 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
604 return true;
605
606 return false;
607}
608EXPORT_SYMBOL(sirfsoc_dma_filter_id);
609
Bill Pemberton463a1f82012-11-19 13:22:55 -0500610static int sirfsoc_dma_probe(struct platform_device *op)
Rongjun Yingca21a142011-10-27 19:22:39 -0700611{
612 struct device_node *dn = op->dev.of_node;
613 struct device *dev = &op->dev;
614 struct dma_device *dma;
615 struct sirfsoc_dma *sdma;
616 struct sirfsoc_dma_chan *schan;
617 struct resource res;
618 ulong regs_start, regs_size;
619 u32 id;
620 int ret, i;
621
622 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
623 if (!sdma) {
624 dev_err(dev, "Memory exhausted!\n");
625 return -ENOMEM;
626 }
627
Barry Songf7d935d2012-11-01 22:54:43 +0800628 if (of_device_is_compatible(dn, "sirf,marco-dmac"))
629 sdma->is_marco = true;
630
Rongjun Yingca21a142011-10-27 19:22:39 -0700631 if (of_property_read_u32(dn, "cell-index", &id)) {
632 dev_err(dev, "Fail to get DMAC index\n");
Julia Lawall94d39012012-08-04 10:35:30 +0200633 return -ENODEV;
Rongjun Yingca21a142011-10-27 19:22:39 -0700634 }
635
636 sdma->irq = irq_of_parse_and_map(dn, 0);
637 if (sdma->irq == NO_IRQ) {
638 dev_err(dev, "Error mapping IRQ!\n");
Julia Lawall94d39012012-08-04 10:35:30 +0200639 return -EINVAL;
Rongjun Yingca21a142011-10-27 19:22:39 -0700640 }
641
642 ret = of_address_to_resource(dn, 0, &res);
643 if (ret) {
644 dev_err(dev, "Error parsing memory region!\n");
Julia Lawall94d39012012-08-04 10:35:30 +0200645 goto irq_dispose;
Rongjun Yingca21a142011-10-27 19:22:39 -0700646 }
647
648 regs_start = res.start;
649 regs_size = resource_size(&res);
650
651 sdma->base = devm_ioremap(dev, regs_start, regs_size);
652 if (!sdma->base) {
653 dev_err(dev, "Error mapping memory region!\n");
654 ret = -ENOMEM;
655 goto irq_dispose;
656 }
657
Julia Lawall94d39012012-08-04 10:35:30 +0200658 ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
Rongjun Yingca21a142011-10-27 19:22:39 -0700659 if (ret) {
660 dev_err(dev, "Error requesting IRQ!\n");
661 ret = -EINVAL;
Julia Lawall94d39012012-08-04 10:35:30 +0200662 goto irq_dispose;
Rongjun Yingca21a142011-10-27 19:22:39 -0700663 }
664
665 dma = &sdma->dma;
666 dma->dev = dev;
667 dma->chancnt = SIRFSOC_DMA_CHANNELS;
668
669 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
670 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
671 dma->device_issue_pending = sirfsoc_dma_issue_pending;
672 dma->device_control = sirfsoc_dma_control;
673 dma->device_tx_status = sirfsoc_dma_tx_status;
674 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
675 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
676
677 INIT_LIST_HEAD(&dma->channels);
678 dma_cap_set(DMA_SLAVE, dma->cap_mask);
679 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
680 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
681 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
682
683 for (i = 0; i < dma->chancnt; i++) {
684 schan = &sdma->channels[i];
685
686 schan->chan.device = dma;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +0000687 dma_cookie_init(&schan->chan);
Rongjun Yingca21a142011-10-27 19:22:39 -0700688
689 INIT_LIST_HEAD(&schan->free);
690 INIT_LIST_HEAD(&schan->prepared);
691 INIT_LIST_HEAD(&schan->queued);
692 INIT_LIST_HEAD(&schan->active);
693 INIT_LIST_HEAD(&schan->completed);
694
695 spin_lock_init(&schan->lock);
696 list_add_tail(&schan->chan.device_node, &dma->channels);
697 }
698
699 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
700
701 /* Register DMA engine */
702 dev_set_drvdata(dev, sdma);
703 ret = dma_async_device_register(dma);
704 if (ret)
705 goto free_irq;
706
707 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
708
709 return 0;
710
711free_irq:
Julia Lawall94d39012012-08-04 10:35:30 +0200712 free_irq(sdma->irq, sdma);
Rongjun Yingca21a142011-10-27 19:22:39 -0700713irq_dispose:
714 irq_dispose_mapping(sdma->irq);
Rongjun Yingca21a142011-10-27 19:22:39 -0700715 return ret;
716}
717
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800718static int sirfsoc_dma_remove(struct platform_device *op)
Rongjun Yingca21a142011-10-27 19:22:39 -0700719{
720 struct device *dev = &op->dev;
721 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
722
723 dma_async_device_unregister(&sdma->dma);
Julia Lawall94d39012012-08-04 10:35:30 +0200724 free_irq(sdma->irq, sdma);
Rongjun Yingca21a142011-10-27 19:22:39 -0700725 irq_dispose_mapping(sdma->irq);
Rongjun Yingca21a142011-10-27 19:22:39 -0700726 return 0;
727}
728
729static struct of_device_id sirfsoc_dma_match[] = {
730 { .compatible = "sirf,prima2-dmac", },
Barry Songf7d935d2012-11-01 22:54:43 +0800731 { .compatible = "sirf,marco-dmac", },
Rongjun Yingca21a142011-10-27 19:22:39 -0700732 {},
733};
734
735static struct platform_driver sirfsoc_dma_driver = {
736 .probe = sirfsoc_dma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -0500737 .remove = sirfsoc_dma_remove,
Rongjun Yingca21a142011-10-27 19:22:39 -0700738 .driver = {
739 .name = DRV_NAME,
740 .owner = THIS_MODULE,
741 .of_match_table = sirfsoc_dma_match,
742 },
743};
744
Axel Linc94e9102011-11-26 15:11:12 +0800745module_platform_driver(sirfsoc_dma_driver);
Rongjun Yingca21a142011-10-27 19:22:39 -0700746
747MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
748 "Barry Song <baohua.song@csr.com>");
749MODULE_DESCRIPTION("SIRFSOC DMA control driver");
750MODULE_LICENSE("GPL v2");