blob: 3fdf1f46bd635763772418515ddb1431ff8de574 [file] [log] [blame]
Piotr Ziecik0fb6f732010-02-05 03:42:52 +00001/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 *
5 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
6 * (defines, structures and comments) was taken from MPC5121 DMA driver
7 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
8 *
9 * Approved as OSADL project by a majority of OSADL members and funded
10 * by OSADL membership fees in 2009; for details see www.osadl.org.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc., 59
24 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 *
26 * The full GNU General Public License is included in this distribution in the
27 * file called COPYING.
28 */
29
30/*
31 * This is initial version of MPC5121 DMA driver. Only memory to memory
32 * transfers are supported (tested using dmatest module).
33 */
34
35#include <linux/module.h>
36#include <linux/dmaengine.h>
37#include <linux/dma-mapping.h>
38#include <linux/interrupt.h>
39#include <linux/io.h>
40#include <linux/of_device.h>
41#include <linux/of_platform.h>
42
43#include <linux/random.h>
44
45/* Number of DMA Transfer descriptors allocated per channel */
46#define MPC_DMA_DESCRIPTORS 64
47
48/* Macro definitions */
49#define MPC_DMA_CHANNELS 64
50#define MPC_DMA_TCD_OFFSET 0x1000
51
52/* Arbitration mode of group and channel */
53#define MPC_DMA_DMACR_EDCG (1 << 31)
54#define MPC_DMA_DMACR_ERGA (1 << 3)
55#define MPC_DMA_DMACR_ERCA (1 << 2)
56
57/* Error codes */
58#define MPC_DMA_DMAES_VLD (1 << 31)
59#define MPC_DMA_DMAES_GPE (1 << 15)
60#define MPC_DMA_DMAES_CPE (1 << 14)
61#define MPC_DMA_DMAES_ERRCHN(err) \
62 (((err) >> 8) & 0x3f)
63#define MPC_DMA_DMAES_SAE (1 << 7)
64#define MPC_DMA_DMAES_SOE (1 << 6)
65#define MPC_DMA_DMAES_DAE (1 << 5)
66#define MPC_DMA_DMAES_DOE (1 << 4)
67#define MPC_DMA_DMAES_NCE (1 << 3)
68#define MPC_DMA_DMAES_SGE (1 << 2)
69#define MPC_DMA_DMAES_SBE (1 << 1)
70#define MPC_DMA_DMAES_DBE (1 << 0)
71
72#define MPC_DMA_TSIZE_1 0x00
73#define MPC_DMA_TSIZE_2 0x01
74#define MPC_DMA_TSIZE_4 0x02
75#define MPC_DMA_TSIZE_16 0x04
76#define MPC_DMA_TSIZE_32 0x05
77
78/* MPC5121 DMA engine registers */
79struct __attribute__ ((__packed__)) mpc_dma_regs {
80 /* 0x00 */
81 u32 dmacr; /* DMA control register */
82 u32 dmaes; /* DMA error status */
83 /* 0x08 */
84 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
85 u32 dmaerql; /* DMA enable request low(channels 31~0) */
86 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
87 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
88 /* 0x18 */
89 u8 dmaserq; /* DMA set enable request */
90 u8 dmacerq; /* DMA clear enable request */
91 u8 dmaseei; /* DMA set enable error interrupt */
92 u8 dmaceei; /* DMA clear enable error interrupt */
93 /* 0x1c */
94 u8 dmacint; /* DMA clear interrupt request */
95 u8 dmacerr; /* DMA clear error */
96 u8 dmassrt; /* DMA set start bit */
97 u8 dmacdne; /* DMA clear DONE status bit */
98 /* 0x20 */
99 u32 dmainth; /* DMA interrupt request high(ch63~32) */
100 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
101 u32 dmaerrh; /* DMA error high(ch63~32) */
102 u32 dmaerrl; /* DMA error low(ch31~0) */
103 /* 0x30 */
104 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
105 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
106 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
107 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
108 /* 0x40 ~ 0xff */
109 u32 reserve0[48]; /* Reserved */
110 /* 0x100 */
111 u8 dchpri[MPC_DMA_CHANNELS];
112 /* DMA channels(0~63) priority */
113};
114
115struct __attribute__ ((__packed__)) mpc_dma_tcd {
116 /* 0x00 */
117 u32 saddr; /* Source address */
118
119 u32 smod:5; /* Source address modulo */
120 u32 ssize:3; /* Source data transfer size */
121 u32 dmod:5; /* Destination address modulo */
122 u32 dsize:3; /* Destination data transfer size */
123 u32 soff:16; /* Signed source address offset */
124
125 /* 0x08 */
126 u32 nbytes; /* Inner "minor" byte count */
127 u32 slast; /* Last source address adjustment */
128 u32 daddr; /* Destination address */
129
130 /* 0x14 */
131 u32 citer_elink:1; /* Enable channel-to-channel linking on
132 * minor loop complete
133 */
134 u32 citer_linkch:6; /* Link channel for minor loop complete */
135 u32 citer:9; /* Current "major" iteration count */
136 u32 doff:16; /* Signed destination address offset */
137
138 /* 0x18 */
139 u32 dlast_sga; /* Last Destination address adjustment/scatter
140 * gather address
141 */
142
143 /* 0x1c */
144 u32 biter_elink:1; /* Enable channel-to-channel linking on major
145 * loop complete
146 */
147 u32 biter_linkch:6;
148 u32 biter:9; /* Beginning "major" iteration count */
149 u32 bwc:2; /* Bandwidth control */
150 u32 major_linkch:6; /* Link channel number */
151 u32 done:1; /* Channel done */
152 u32 active:1; /* Channel active */
153 u32 major_elink:1; /* Enable channel-to-channel linking on major
154 * loop complete
155 */
156 u32 e_sg:1; /* Enable scatter/gather processing */
157 u32 d_req:1; /* Disable request */
158 u32 int_half:1; /* Enable an interrupt when major counter is
159 * half complete
160 */
161 u32 int_maj:1; /* Enable an interrupt when major iteration
162 * count completes
163 */
164 u32 start:1; /* Channel start */
165};
166
167struct mpc_dma_desc {
168 struct dma_async_tx_descriptor desc;
169 struct mpc_dma_tcd *tcd;
170 dma_addr_t tcd_paddr;
171 int error;
172 struct list_head node;
173};
174
175struct mpc_dma_chan {
176 struct dma_chan chan;
177 struct list_head free;
178 struct list_head prepared;
179 struct list_head queued;
180 struct list_head active;
181 struct list_head completed;
182 struct mpc_dma_tcd *tcd;
183 dma_addr_t tcd_paddr;
184 dma_cookie_t completed_cookie;
185
186 /* Lock for this structure */
187 spinlock_t lock;
188};
189
190struct mpc_dma {
191 struct dma_device dma;
192 struct tasklet_struct tasklet;
193 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
194 struct mpc_dma_regs __iomem *regs;
195 struct mpc_dma_tcd __iomem *tcd;
196 int irq;
197 uint error_status;
198
199 /* Lock for error_status field in this structure */
200 spinlock_t error_status_lock;
201};
202
203#define DRV_NAME "mpc512x_dma"
204
205/* Convert struct dma_chan to struct mpc_dma_chan */
206static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
207{
208 return container_of(c, struct mpc_dma_chan, chan);
209}
210
211/* Convert struct dma_chan to struct mpc_dma */
212static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
213{
214 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
215 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
216}
217
218/*
219 * Execute all queued DMA descriptors.
220 *
221 * Following requirements must be met while calling mpc_dma_execute():
222 * a) mchan->lock is acquired,
223 * b) mchan->active list is empty,
224 * c) mchan->queued list contains at least one entry.
225 */
226static void mpc_dma_execute(struct mpc_dma_chan *mchan)
227{
228 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
229 struct mpc_dma_desc *first = NULL;
230 struct mpc_dma_desc *prev = NULL;
231 struct mpc_dma_desc *mdesc;
232 int cid = mchan->chan.chan_id;
233
234 /* Move all queued descriptors to active list */
235 list_splice_tail_init(&mchan->queued, &mchan->active);
236
237 /* Chain descriptors into one transaction */
238 list_for_each_entry(mdesc, &mchan->active, node) {
239 if (!first)
240 first = mdesc;
241
242 if (!prev) {
243 prev = mdesc;
244 continue;
245 }
246
247 prev->tcd->dlast_sga = mdesc->tcd_paddr;
248 prev->tcd->e_sg = 1;
249 mdesc->tcd->start = 1;
250
251 prev = mdesc;
252 }
253
254 prev->tcd->start = 0;
255 prev->tcd->int_maj = 1;
256
257 /* Send first descriptor in chain into hardware */
258 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
259 out_8(&mdma->regs->dmassrt, cid);
260}
261
262/* Handle interrupt on one half of DMA controller (32 channels) */
263static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
264{
265 struct mpc_dma_chan *mchan;
266 struct mpc_dma_desc *mdesc;
267 u32 status = is | es;
268 int ch;
269
270 while ((ch = fls(status) - 1) >= 0) {
271 status &= ~(1 << ch);
272 mchan = &mdma->channels[ch + off];
273
274 spin_lock(&mchan->lock);
275
276 /* Check error status */
277 if (es & (1 << ch))
278 list_for_each_entry(mdesc, &mchan->active, node)
279 mdesc->error = -EIO;
280
281 /* Execute queued descriptors */
282 list_splice_tail_init(&mchan->active, &mchan->completed);
283 if (!list_empty(&mchan->queued))
284 mpc_dma_execute(mchan);
285
286 spin_unlock(&mchan->lock);
287 }
288}
289
290/* Interrupt handler */
291static irqreturn_t mpc_dma_irq(int irq, void *data)
292{
293 struct mpc_dma *mdma = data;
294 uint es;
295
296 /* Save error status register */
297 es = in_be32(&mdma->regs->dmaes);
298 spin_lock(&mdma->error_status_lock);
299 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
300 mdma->error_status = es;
301 spin_unlock(&mdma->error_status_lock);
302
303 /* Handle interrupt on each channel */
304 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
305 in_be32(&mdma->regs->dmaerrh), 32);
306 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
307 in_be32(&mdma->regs->dmaerrl), 0);
308
309 /* Ack interrupt on all channels */
310 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
311 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
312 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
313 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
314
315 /* Schedule tasklet */
316 tasklet_schedule(&mdma->tasklet);
317
318 return IRQ_HANDLED;
319}
320
321/* DMA Tasklet */
322static void mpc_dma_tasklet(unsigned long data)
323{
324 struct mpc_dma *mdma = (void *)data;
325 dma_cookie_t last_cookie = 0;
326 struct mpc_dma_chan *mchan;
327 struct mpc_dma_desc *mdesc;
328 struct dma_async_tx_descriptor *desc;
329 unsigned long flags;
330 LIST_HEAD(list);
331 uint es;
332 int i;
333
334 spin_lock_irqsave(&mdma->error_status_lock, flags);
335 es = mdma->error_status;
336 mdma->error_status = 0;
337 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
338
339 /* Print nice error report */
340 if (es) {
341 dev_err(mdma->dma.dev,
342 "Hardware reported following error(s) on channel %u:\n",
343 MPC_DMA_DMAES_ERRCHN(es));
344
345 if (es & MPC_DMA_DMAES_GPE)
346 dev_err(mdma->dma.dev, "- Group Priority Error\n");
347 if (es & MPC_DMA_DMAES_CPE)
348 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
349 if (es & MPC_DMA_DMAES_SAE)
350 dev_err(mdma->dma.dev, "- Source Address Error\n");
351 if (es & MPC_DMA_DMAES_SOE)
352 dev_err(mdma->dma.dev, "- Source Offset"
353 " Configuration Error\n");
354 if (es & MPC_DMA_DMAES_DAE)
355 dev_err(mdma->dma.dev, "- Destination Address"
356 " Error\n");
357 if (es & MPC_DMA_DMAES_DOE)
358 dev_err(mdma->dma.dev, "- Destination Offset"
359 " Configuration Error\n");
360 if (es & MPC_DMA_DMAES_NCE)
361 dev_err(mdma->dma.dev, "- NBytes/Citter"
362 " Configuration Error\n");
363 if (es & MPC_DMA_DMAES_SGE)
364 dev_err(mdma->dma.dev, "- Scatter/Gather"
365 " Configuration Error\n");
366 if (es & MPC_DMA_DMAES_SBE)
367 dev_err(mdma->dma.dev, "- Source Bus Error\n");
368 if (es & MPC_DMA_DMAES_DBE)
369 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
370 }
371
372 for (i = 0; i < mdma->dma.chancnt; i++) {
373 mchan = &mdma->channels[i];
374
375 /* Get all completed descriptors */
376 spin_lock_irqsave(&mchan->lock, flags);
377 if (!list_empty(&mchan->completed))
378 list_splice_tail_init(&mchan->completed, &list);
379 spin_unlock_irqrestore(&mchan->lock, flags);
380
381 if (list_empty(&list))
382 continue;
383
384 /* Execute callbacks and run dependencies */
385 list_for_each_entry(mdesc, &list, node) {
386 desc = &mdesc->desc;
387
388 if (desc->callback)
389 desc->callback(desc->callback_param);
390
391 last_cookie = desc->cookie;
392 dma_run_dependencies(desc);
393 }
394
395 /* Free descriptors */
396 spin_lock_irqsave(&mchan->lock, flags);
397 list_splice_tail_init(&list, &mchan->free);
398 mchan->completed_cookie = last_cookie;
399 spin_unlock_irqrestore(&mchan->lock, flags);
400 }
401}
402
403/* Submit descriptor to hardware */
404static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
405{
406 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
407 struct mpc_dma_desc *mdesc;
408 unsigned long flags;
409 dma_cookie_t cookie;
410
411 mdesc = container_of(txd, struct mpc_dma_desc, desc);
412
413 spin_lock_irqsave(&mchan->lock, flags);
414
415 /* Move descriptor to queue */
416 list_move_tail(&mdesc->node, &mchan->queued);
417
418 /* If channel is idle, execute all queued descriptors */
419 if (list_empty(&mchan->active))
420 mpc_dma_execute(mchan);
421
422 /* Update cookie */
423 cookie = mchan->chan.cookie + 1;
424 if (cookie <= 0)
425 cookie = 1;
426
427 mchan->chan.cookie = cookie;
428 mdesc->desc.cookie = cookie;
429
430 spin_unlock_irqrestore(&mchan->lock, flags);
431
432 return cookie;
433}
434
435/* Alloc channel resources */
436static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
437{
438 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
439 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
440 struct mpc_dma_desc *mdesc;
441 struct mpc_dma_tcd *tcd;
442 dma_addr_t tcd_paddr;
443 unsigned long flags;
444 LIST_HEAD(descs);
445 int i;
446
447 /* Alloc DMA memory for Transfer Control Descriptors */
448 tcd = dma_alloc_coherent(mdma->dma.dev,
449 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
450 &tcd_paddr, GFP_KERNEL);
451 if (!tcd)
452 return -ENOMEM;
453
454 /* Alloc descriptors for this channel */
455 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
456 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
457 if (!mdesc) {
458 dev_notice(mdma->dma.dev, "Memory allocation error. "
459 "Allocated only %u descriptors\n", i);
460 break;
461 }
462
463 dma_async_tx_descriptor_init(&mdesc->desc, chan);
464 mdesc->desc.flags = DMA_CTRL_ACK;
465 mdesc->desc.tx_submit = mpc_dma_tx_submit;
466
467 mdesc->tcd = &tcd[i];
468 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
469
470 list_add_tail(&mdesc->node, &descs);
471 }
472
473 /* Return error only if no descriptors were allocated */
474 if (i == 0) {
475 dma_free_coherent(mdma->dma.dev,
476 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
477 tcd, tcd_paddr);
478 return -ENOMEM;
479 }
480
481 spin_lock_irqsave(&mchan->lock, flags);
482 mchan->tcd = tcd;
483 mchan->tcd_paddr = tcd_paddr;
484 list_splice_tail_init(&descs, &mchan->free);
485 spin_unlock_irqrestore(&mchan->lock, flags);
486
487 /* Enable Error Interrupt */
488 out_8(&mdma->regs->dmaseei, chan->chan_id);
489
490 return 0;
491}
492
493/* Free channel resources */
494static void mpc_dma_free_chan_resources(struct dma_chan *chan)
495{
496 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
497 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
498 struct mpc_dma_desc *mdesc, *tmp;
499 struct mpc_dma_tcd *tcd;
500 dma_addr_t tcd_paddr;
501 unsigned long flags;
502 LIST_HEAD(descs);
503
504 spin_lock_irqsave(&mchan->lock, flags);
505
506 /* Channel must be idle */
507 BUG_ON(!list_empty(&mchan->prepared));
508 BUG_ON(!list_empty(&mchan->queued));
509 BUG_ON(!list_empty(&mchan->active));
510 BUG_ON(!list_empty(&mchan->completed));
511
512 /* Move data */
513 list_splice_tail_init(&mchan->free, &descs);
514 tcd = mchan->tcd;
515 tcd_paddr = mchan->tcd_paddr;
516
517 spin_unlock_irqrestore(&mchan->lock, flags);
518
519 /* Free DMA memory used by descriptors */
520 dma_free_coherent(mdma->dma.dev,
521 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
522 tcd, tcd_paddr);
523
524 /* Free descriptors */
525 list_for_each_entry_safe(mdesc, tmp, &descs, node)
526 kfree(mdesc);
527
528 /* Disable Error Interrupt */
529 out_8(&mdma->regs->dmaceei, chan->chan_id);
530}
531
532/* Send all pending descriptor to hardware */
533static void mpc_dma_issue_pending(struct dma_chan *chan)
534{
535 /*
536 * We are posting descriptors to the hardware as soon as
537 * they are ready, so this function does nothing.
538 */
539}
540
541/* Check request completion status */
542static enum dma_status
543mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
544 dma_cookie_t *done, dma_cookie_t *used)
545{
546 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
547 unsigned long flags;
548 dma_cookie_t last_used;
549 dma_cookie_t last_complete;
550
551 spin_lock_irqsave(&mchan->lock, flags);
552 last_used = mchan->chan.cookie;
553 last_complete = mchan->completed_cookie;
554 spin_unlock_irqrestore(&mchan->lock, flags);
555
556 if (done)
557 *done = last_complete;
558
559 if (used)
560 *used = last_used;
561
562 return dma_async_is_complete(cookie, last_complete, last_used);
563}
564
565/* Prepare descriptor for memory to memory copy */
566static struct dma_async_tx_descriptor *
567mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
568 size_t len, unsigned long flags)
569{
570 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
571 struct mpc_dma_desc *mdesc = NULL;
572 struct mpc_dma_tcd *tcd;
573 unsigned long iflags;
574
575 /* Get free descriptor */
576 spin_lock_irqsave(&mchan->lock, iflags);
577 if (!list_empty(&mchan->free)) {
578 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
579 node);
580 list_del(&mdesc->node);
581 }
582 spin_unlock_irqrestore(&mchan->lock, iflags);
583
584 if (!mdesc)
585 return NULL;
586
587 mdesc->error = 0;
588 tcd = mdesc->tcd;
589
590 /* Prepare Transfer Control Descriptor for this transaction */
591 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
592
593 if (IS_ALIGNED(src | dst | len, 32)) {
594 tcd->ssize = MPC_DMA_TSIZE_32;
595 tcd->dsize = MPC_DMA_TSIZE_32;
596 tcd->soff = 32;
597 tcd->doff = 32;
598 } else if (IS_ALIGNED(src | dst | len, 16)) {
599 tcd->ssize = MPC_DMA_TSIZE_16;
600 tcd->dsize = MPC_DMA_TSIZE_16;
601 tcd->soff = 16;
602 tcd->doff = 16;
603 } else if (IS_ALIGNED(src | dst | len, 4)) {
604 tcd->ssize = MPC_DMA_TSIZE_4;
605 tcd->dsize = MPC_DMA_TSIZE_4;
606 tcd->soff = 4;
607 tcd->doff = 4;
608 } else if (IS_ALIGNED(src | dst | len, 2)) {
609 tcd->ssize = MPC_DMA_TSIZE_2;
610 tcd->dsize = MPC_DMA_TSIZE_2;
611 tcd->soff = 2;
612 tcd->doff = 2;
613 } else {
614 tcd->ssize = MPC_DMA_TSIZE_1;
615 tcd->dsize = MPC_DMA_TSIZE_1;
616 tcd->soff = 1;
617 tcd->doff = 1;
618 }
619
620 tcd->saddr = src;
621 tcd->daddr = dst;
622 tcd->nbytes = len;
623 tcd->biter = 1;
624 tcd->citer = 1;
625
626 /* Place descriptor in prepared list */
627 spin_lock_irqsave(&mchan->lock, iflags);
628 list_add_tail(&mdesc->node, &mchan->prepared);
629 spin_unlock_irqrestore(&mchan->lock, iflags);
630
631 return &mdesc->desc;
632}
633
634static int __devinit mpc_dma_probe(struct of_device *op,
635 const struct of_device_id *match)
636{
637 struct device_node *dn = op->node;
638 struct device *dev = &op->dev;
639 struct dma_device *dma;
640 struct mpc_dma *mdma;
641 struct mpc_dma_chan *mchan;
642 struct resource res;
643 ulong regs_start, regs_size;
644 int retval, i;
645
646 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
647 if (!mdma) {
648 dev_err(dev, "Memory exhausted!\n");
649 return -ENOMEM;
650 }
651
652 mdma->irq = irq_of_parse_and_map(dn, 0);
653 if (mdma->irq == NO_IRQ) {
654 dev_err(dev, "Error mapping IRQ!\n");
655 return -EINVAL;
656 }
657
658 retval = of_address_to_resource(dn, 0, &res);
659 if (retval) {
660 dev_err(dev, "Error parsing memory region!\n");
661 return retval;
662 }
663
664 regs_start = res.start;
665 regs_size = res.end - res.start + 1;
666
667 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
668 dev_err(dev, "Error requesting memory region!\n");
669 return -EBUSY;
670 }
671
672 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
673 if (!mdma->regs) {
674 dev_err(dev, "Error mapping memory region!\n");
675 return -ENOMEM;
676 }
677
678 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
679 + MPC_DMA_TCD_OFFSET);
680
681 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
682 mdma);
683 if (retval) {
684 dev_err(dev, "Error requesting IRQ!\n");
685 return -EINVAL;
686 }
687
688 spin_lock_init(&mdma->error_status_lock);
689
690 dma = &mdma->dma;
691 dma->dev = dev;
692 dma->chancnt = MPC_DMA_CHANNELS;
693 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
694 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
695 dma->device_issue_pending = mpc_dma_issue_pending;
696 dma->device_is_tx_complete = mpc_dma_is_tx_complete;
697 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
698
699 INIT_LIST_HEAD(&dma->channels);
700 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
701
702 for (i = 0; i < dma->chancnt; i++) {
703 mchan = &mdma->channels[i];
704
705 mchan->chan.device = dma;
706 mchan->chan.chan_id = i;
707 mchan->chan.cookie = 1;
708 mchan->completed_cookie = mchan->chan.cookie;
709
710 INIT_LIST_HEAD(&mchan->free);
711 INIT_LIST_HEAD(&mchan->prepared);
712 INIT_LIST_HEAD(&mchan->queued);
713 INIT_LIST_HEAD(&mchan->active);
714 INIT_LIST_HEAD(&mchan->completed);
715
716 spin_lock_init(&mchan->lock);
717 list_add_tail(&mchan->chan.device_node, &dma->channels);
718 }
719
720 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
721
722 /*
723 * Configure DMA Engine:
724 * - Dynamic clock,
725 * - Round-robin group arbitration,
726 * - Round-robin channel arbitration.
727 */
728 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
729 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
730
731 /* Disable hardware DMA requests */
732 out_be32(&mdma->regs->dmaerqh, 0);
733 out_be32(&mdma->regs->dmaerql, 0);
734
735 /* Disable error interrupts */
736 out_be32(&mdma->regs->dmaeeih, 0);
737 out_be32(&mdma->regs->dmaeeil, 0);
738
739 /* Clear interrupts status */
740 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
741 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
742 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
743 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
744
745 /* Route interrupts to IPIC */
746 out_be32(&mdma->regs->dmaihsa, 0);
747 out_be32(&mdma->regs->dmailsa, 0);
748
749 /* Register DMA engine */
750 dev_set_drvdata(dev, mdma);
751 retval = dma_async_device_register(dma);
752 if (retval) {
753 devm_free_irq(dev, mdma->irq, mdma);
754 irq_dispose_mapping(mdma->irq);
755 }
756
757 return retval;
758}
759
760static int __devexit mpc_dma_remove(struct of_device *op)
761{
762 struct device *dev = &op->dev;
763 struct mpc_dma *mdma = dev_get_drvdata(dev);
764
765 dma_async_device_unregister(&mdma->dma);
766 devm_free_irq(dev, mdma->irq, mdma);
767 irq_dispose_mapping(mdma->irq);
768
769 return 0;
770}
771
772static struct of_device_id mpc_dma_match[] = {
773 { .compatible = "fsl,mpc5121-dma", },
774 {},
775};
776
777static struct of_platform_driver mpc_dma_driver = {
778 .match_table = mpc_dma_match,
779 .probe = mpc_dma_probe,
780 .remove = __devexit_p(mpc_dma_remove),
781 .driver = {
782 .name = DRV_NAME,
783 .owner = THIS_MODULE,
784 },
785};
786
787static int __init mpc_dma_init(void)
788{
789 return of_register_platform_driver(&mpc_dma_driver);
790}
791module_init(mpc_dma_init);
792
793static void __exit mpc_dma_exit(void)
794{
795 of_unregister_platform_driver(&mpc_dma_driver);
796}
797module_exit(mpc_dma_exit);
798
799MODULE_LICENSE("GPL");
800MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");