blob: 96104f4eebe8d0d40b791bf453c0af646df23959 [file] [log] [blame]
Piotr Ziecik0fb6f732010-02-05 03:42:52 +00001/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
Ilya Yanokba2eea22010-10-27 01:52:57 +02004 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
Piotr Ziecik0fb6f732010-02-05 03:42:52 +00005 *
6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
7 * (defines, structures and comments) was taken from MPC5121 DMA driver
8 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
9 *
10 * Approved as OSADL project by a majority of OSADL members and funded
11 * by OSADL membership fees in 2009; for details see www.osadl.org.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * more details.
22 *
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc., 59
25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 * The full GNU General Public License is included in this distribution in the
28 * file called COPYING.
29 */
30
31/*
32 * This is initial version of MPC5121 DMA driver. Only memory to memory
33 * transfers are supported (tested using dmatest module).
34 */
35
36#include <linux/module.h>
37#include <linux/dmaengine.h>
38#include <linux/dma-mapping.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Rob Herring5af50732013-09-17 14:28:33 -050042#include <linux/of_address.h>
Piotr Ziecik0fb6f732010-02-05 03:42:52 +000043#include <linux/of_device.h>
Rob Herring5af50732013-09-17 14:28:33 -050044#include <linux/of_irq.h>
Piotr Ziecik0fb6f732010-02-05 03:42:52 +000045#include <linux/of_platform.h>
46
47#include <linux/random.h>
48
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000049#include "dmaengine.h"
50
Piotr Ziecik0fb6f732010-02-05 03:42:52 +000051/* Number of DMA Transfer descriptors allocated per channel */
52#define MPC_DMA_DESCRIPTORS 64
53
54/* Macro definitions */
Piotr Ziecik0fb6f732010-02-05 03:42:52 +000055#define MPC_DMA_TCD_OFFSET 0x1000
56
Alexander Popov78a4f032014-04-23 17:53:23 +040057/*
58 * Maximum channel counts for individual hardware variants
59 * and the maximum channel count over all supported controllers,
60 * used for data structure size
61 */
62#define MPC8308_DMACHAN_MAX 16
63#define MPC512x_DMACHAN_MAX 64
64#define MPC_DMA_CHANNELS 64
65
Piotr Ziecik0fb6f732010-02-05 03:42:52 +000066/* Arbitration mode of group and channel */
67#define MPC_DMA_DMACR_EDCG (1 << 31)
68#define MPC_DMA_DMACR_ERGA (1 << 3)
69#define MPC_DMA_DMACR_ERCA (1 << 2)
70
71/* Error codes */
72#define MPC_DMA_DMAES_VLD (1 << 31)
73#define MPC_DMA_DMAES_GPE (1 << 15)
74#define MPC_DMA_DMAES_CPE (1 << 14)
75#define MPC_DMA_DMAES_ERRCHN(err) \
76 (((err) >> 8) & 0x3f)
77#define MPC_DMA_DMAES_SAE (1 << 7)
78#define MPC_DMA_DMAES_SOE (1 << 6)
79#define MPC_DMA_DMAES_DAE (1 << 5)
80#define MPC_DMA_DMAES_DOE (1 << 4)
81#define MPC_DMA_DMAES_NCE (1 << 3)
82#define MPC_DMA_DMAES_SGE (1 << 2)
83#define MPC_DMA_DMAES_SBE (1 << 1)
84#define MPC_DMA_DMAES_DBE (1 << 0)
85
Ilya Yanokba2eea22010-10-27 01:52:57 +020086#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
87
Piotr Ziecik0fb6f732010-02-05 03:42:52 +000088#define MPC_DMA_TSIZE_1 0x00
89#define MPC_DMA_TSIZE_2 0x01
90#define MPC_DMA_TSIZE_4 0x02
91#define MPC_DMA_TSIZE_16 0x04
92#define MPC_DMA_TSIZE_32 0x05
93
94/* MPC5121 DMA engine registers */
95struct __attribute__ ((__packed__)) mpc_dma_regs {
96 /* 0x00 */
97 u32 dmacr; /* DMA control register */
98 u32 dmaes; /* DMA error status */
99 /* 0x08 */
100 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
101 u32 dmaerql; /* DMA enable request low(channels 31~0) */
102 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
103 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
104 /* 0x18 */
105 u8 dmaserq; /* DMA set enable request */
106 u8 dmacerq; /* DMA clear enable request */
107 u8 dmaseei; /* DMA set enable error interrupt */
108 u8 dmaceei; /* DMA clear enable error interrupt */
109 /* 0x1c */
110 u8 dmacint; /* DMA clear interrupt request */
111 u8 dmacerr; /* DMA clear error */
112 u8 dmassrt; /* DMA set start bit */
113 u8 dmacdne; /* DMA clear DONE status bit */
114 /* 0x20 */
115 u32 dmainth; /* DMA interrupt request high(ch63~32) */
116 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
117 u32 dmaerrh; /* DMA error high(ch63~32) */
118 u32 dmaerrl; /* DMA error low(ch31~0) */
119 /* 0x30 */
120 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
121 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
Ilya Yanokba2eea22010-10-27 01:52:57 +0200122 union {
123 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
124 u32 dmagpor; /* (General purpose register on MPC8308) */
125 };
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000126 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
127 /* 0x40 ~ 0xff */
128 u32 reserve0[48]; /* Reserved */
129 /* 0x100 */
130 u8 dchpri[MPC_DMA_CHANNELS];
131 /* DMA channels(0~63) priority */
132};
133
134struct __attribute__ ((__packed__)) mpc_dma_tcd {
135 /* 0x00 */
136 u32 saddr; /* Source address */
137
138 u32 smod:5; /* Source address modulo */
139 u32 ssize:3; /* Source data transfer size */
140 u32 dmod:5; /* Destination address modulo */
141 u32 dsize:3; /* Destination data transfer size */
142 u32 soff:16; /* Signed source address offset */
143
144 /* 0x08 */
145 u32 nbytes; /* Inner "minor" byte count */
146 u32 slast; /* Last source address adjustment */
147 u32 daddr; /* Destination address */
148
149 /* 0x14 */
150 u32 citer_elink:1; /* Enable channel-to-channel linking on
151 * minor loop complete
152 */
153 u32 citer_linkch:6; /* Link channel for minor loop complete */
154 u32 citer:9; /* Current "major" iteration count */
155 u32 doff:16; /* Signed destination address offset */
156
157 /* 0x18 */
158 u32 dlast_sga; /* Last Destination address adjustment/scatter
159 * gather address
160 */
161
162 /* 0x1c */
163 u32 biter_elink:1; /* Enable channel-to-channel linking on major
164 * loop complete
165 */
166 u32 biter_linkch:6;
167 u32 biter:9; /* Beginning "major" iteration count */
168 u32 bwc:2; /* Bandwidth control */
169 u32 major_linkch:6; /* Link channel number */
170 u32 done:1; /* Channel done */
171 u32 active:1; /* Channel active */
172 u32 major_elink:1; /* Enable channel-to-channel linking on major
173 * loop complete
174 */
175 u32 e_sg:1; /* Enable scatter/gather processing */
176 u32 d_req:1; /* Disable request */
177 u32 int_half:1; /* Enable an interrupt when major counter is
178 * half complete
179 */
180 u32 int_maj:1; /* Enable an interrupt when major iteration
181 * count completes
182 */
183 u32 start:1; /* Channel start */
184};
185
186struct mpc_dma_desc {
187 struct dma_async_tx_descriptor desc;
188 struct mpc_dma_tcd *tcd;
189 dma_addr_t tcd_paddr;
190 int error;
191 struct list_head node;
192};
193
194struct mpc_dma_chan {
195 struct dma_chan chan;
196 struct list_head free;
197 struct list_head prepared;
198 struct list_head queued;
199 struct list_head active;
200 struct list_head completed;
201 struct mpc_dma_tcd *tcd;
202 dma_addr_t tcd_paddr;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000203
204 /* Lock for this structure */
205 spinlock_t lock;
206};
207
208struct mpc_dma {
209 struct dma_device dma;
210 struct tasklet_struct tasklet;
211 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
212 struct mpc_dma_regs __iomem *regs;
213 struct mpc_dma_tcd __iomem *tcd;
214 int irq;
Ilya Yanokba2eea22010-10-27 01:52:57 +0200215 int irq2;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000216 uint error_status;
Ilya Yanokba2eea22010-10-27 01:52:57 +0200217 int is_mpc8308;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000218
219 /* Lock for error_status field in this structure */
220 spinlock_t error_status_lock;
221};
222
223#define DRV_NAME "mpc512x_dma"
224
225/* Convert struct dma_chan to struct mpc_dma_chan */
226static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
227{
228 return container_of(c, struct mpc_dma_chan, chan);
229}
230
231/* Convert struct dma_chan to struct mpc_dma */
232static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
233{
234 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
235 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
236}
237
238/*
239 * Execute all queued DMA descriptors.
240 *
241 * Following requirements must be met while calling mpc_dma_execute():
242 * a) mchan->lock is acquired,
243 * b) mchan->active list is empty,
244 * c) mchan->queued list contains at least one entry.
245 */
246static void mpc_dma_execute(struct mpc_dma_chan *mchan)
247{
248 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
249 struct mpc_dma_desc *first = NULL;
250 struct mpc_dma_desc *prev = NULL;
251 struct mpc_dma_desc *mdesc;
252 int cid = mchan->chan.chan_id;
253
254 /* Move all queued descriptors to active list */
255 list_splice_tail_init(&mchan->queued, &mchan->active);
256
257 /* Chain descriptors into one transaction */
258 list_for_each_entry(mdesc, &mchan->active, node) {
259 if (!first)
260 first = mdesc;
261
262 if (!prev) {
263 prev = mdesc;
264 continue;
265 }
266
267 prev->tcd->dlast_sga = mdesc->tcd_paddr;
268 prev->tcd->e_sg = 1;
269 mdesc->tcd->start = 1;
270
271 prev = mdesc;
272 }
273
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000274 prev->tcd->int_maj = 1;
275
276 /* Send first descriptor in chain into hardware */
277 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
Ilya Yanok6504cf32010-10-27 01:52:55 +0200278
279 if (first != prev)
280 mdma->tcd[cid].e_sg = 1;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000281 out_8(&mdma->regs->dmassrt, cid);
282}
283
284/* Handle interrupt on one half of DMA controller (32 channels) */
285static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
286{
287 struct mpc_dma_chan *mchan;
288 struct mpc_dma_desc *mdesc;
289 u32 status = is | es;
290 int ch;
291
292 while ((ch = fls(status) - 1) >= 0) {
293 status &= ~(1 << ch);
294 mchan = &mdma->channels[ch + off];
295
296 spin_lock(&mchan->lock);
297
Ilya Yanok28625592010-10-27 01:52:56 +0200298 out_8(&mdma->regs->dmacint, ch + off);
299 out_8(&mdma->regs->dmacerr, ch + off);
300
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000301 /* Check error status */
302 if (es & (1 << ch))
303 list_for_each_entry(mdesc, &mchan->active, node)
304 mdesc->error = -EIO;
305
306 /* Execute queued descriptors */
307 list_splice_tail_init(&mchan->active, &mchan->completed);
308 if (!list_empty(&mchan->queued))
309 mpc_dma_execute(mchan);
310
311 spin_unlock(&mchan->lock);
312 }
313}
314
315/* Interrupt handler */
316static irqreturn_t mpc_dma_irq(int irq, void *data)
317{
318 struct mpc_dma *mdma = data;
319 uint es;
320
321 /* Save error status register */
322 es = in_be32(&mdma->regs->dmaes);
323 spin_lock(&mdma->error_status_lock);
324 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
325 mdma->error_status = es;
326 spin_unlock(&mdma->error_status_lock);
327
328 /* Handle interrupt on each channel */
Ilya Yanokba2eea22010-10-27 01:52:57 +0200329 if (mdma->dma.chancnt > 32) {
330 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000331 in_be32(&mdma->regs->dmaerrh), 32);
Ilya Yanokba2eea22010-10-27 01:52:57 +0200332 }
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000333 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
334 in_be32(&mdma->regs->dmaerrl), 0);
335
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000336 /* Schedule tasklet */
337 tasklet_schedule(&mdma->tasklet);
338
339 return IRQ_HANDLED;
340}
341
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300342/* process completed descriptors */
Ilya Yanoka2769912010-10-27 01:52:58 +0200343static void mpc_dma_process_completed(struct mpc_dma *mdma)
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000344{
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000345 dma_cookie_t last_cookie = 0;
346 struct mpc_dma_chan *mchan;
347 struct mpc_dma_desc *mdesc;
348 struct dma_async_tx_descriptor *desc;
349 unsigned long flags;
350 LIST_HEAD(list);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000351 int i;
352
Ilya Yanoka2769912010-10-27 01:52:58 +0200353 for (i = 0; i < mdma->dma.chancnt; i++) {
354 mchan = &mdma->channels[i];
355
356 /* Get all completed descriptors */
357 spin_lock_irqsave(&mchan->lock, flags);
358 if (!list_empty(&mchan->completed))
359 list_splice_tail_init(&mchan->completed, &list);
360 spin_unlock_irqrestore(&mchan->lock, flags);
361
362 if (list_empty(&list))
363 continue;
364
365 /* Execute callbacks and run dependencies */
366 list_for_each_entry(mdesc, &list, node) {
367 desc = &mdesc->desc;
368
369 if (desc->callback)
370 desc->callback(desc->callback_param);
371
372 last_cookie = desc->cookie;
373 dma_run_dependencies(desc);
374 }
375
376 /* Free descriptors */
377 spin_lock_irqsave(&mchan->lock, flags);
378 list_splice_tail_init(&list, &mchan->free);
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000379 mchan->chan.completed_cookie = last_cookie;
Ilya Yanoka2769912010-10-27 01:52:58 +0200380 spin_unlock_irqrestore(&mchan->lock, flags);
381 }
382}
383
384/* DMA Tasklet */
385static void mpc_dma_tasklet(unsigned long data)
386{
387 struct mpc_dma *mdma = (void *)data;
388 unsigned long flags;
389 uint es;
390
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000391 spin_lock_irqsave(&mdma->error_status_lock, flags);
392 es = mdma->error_status;
393 mdma->error_status = 0;
394 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
395
396 /* Print nice error report */
397 if (es) {
398 dev_err(mdma->dma.dev,
399 "Hardware reported following error(s) on channel %u:\n",
400 MPC_DMA_DMAES_ERRCHN(es));
401
402 if (es & MPC_DMA_DMAES_GPE)
403 dev_err(mdma->dma.dev, "- Group Priority Error\n");
404 if (es & MPC_DMA_DMAES_CPE)
405 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
406 if (es & MPC_DMA_DMAES_SAE)
407 dev_err(mdma->dma.dev, "- Source Address Error\n");
408 if (es & MPC_DMA_DMAES_SOE)
409 dev_err(mdma->dma.dev, "- Source Offset"
410 " Configuration Error\n");
411 if (es & MPC_DMA_DMAES_DAE)
412 dev_err(mdma->dma.dev, "- Destination Address"
413 " Error\n");
414 if (es & MPC_DMA_DMAES_DOE)
415 dev_err(mdma->dma.dev, "- Destination Offset"
416 " Configuration Error\n");
417 if (es & MPC_DMA_DMAES_NCE)
418 dev_err(mdma->dma.dev, "- NBytes/Citter"
419 " Configuration Error\n");
420 if (es & MPC_DMA_DMAES_SGE)
421 dev_err(mdma->dma.dev, "- Scatter/Gather"
422 " Configuration Error\n");
423 if (es & MPC_DMA_DMAES_SBE)
424 dev_err(mdma->dma.dev, "- Source Bus Error\n");
425 if (es & MPC_DMA_DMAES_DBE)
426 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
427 }
428
Ilya Yanoka2769912010-10-27 01:52:58 +0200429 mpc_dma_process_completed(mdma);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000430}
431
432/* Submit descriptor to hardware */
433static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
434{
435 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
436 struct mpc_dma_desc *mdesc;
437 unsigned long flags;
438 dma_cookie_t cookie;
439
440 mdesc = container_of(txd, struct mpc_dma_desc, desc);
441
442 spin_lock_irqsave(&mchan->lock, flags);
443
444 /* Move descriptor to queue */
445 list_move_tail(&mdesc->node, &mchan->queued);
446
447 /* If channel is idle, execute all queued descriptors */
448 if (list_empty(&mchan->active))
449 mpc_dma_execute(mchan);
450
451 /* Update cookie */
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000452 cookie = dma_cookie_assign(txd);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000453 spin_unlock_irqrestore(&mchan->lock, flags);
454
455 return cookie;
456}
457
458/* Alloc channel resources */
459static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
460{
461 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
462 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
463 struct mpc_dma_desc *mdesc;
464 struct mpc_dma_tcd *tcd;
465 dma_addr_t tcd_paddr;
466 unsigned long flags;
467 LIST_HEAD(descs);
468 int i;
469
470 /* Alloc DMA memory for Transfer Control Descriptors */
471 tcd = dma_alloc_coherent(mdma->dma.dev,
472 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
473 &tcd_paddr, GFP_KERNEL);
474 if (!tcd)
475 return -ENOMEM;
476
477 /* Alloc descriptors for this channel */
478 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
479 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
480 if (!mdesc) {
481 dev_notice(mdma->dma.dev, "Memory allocation error. "
482 "Allocated only %u descriptors\n", i);
483 break;
484 }
485
486 dma_async_tx_descriptor_init(&mdesc->desc, chan);
487 mdesc->desc.flags = DMA_CTRL_ACK;
488 mdesc->desc.tx_submit = mpc_dma_tx_submit;
489
490 mdesc->tcd = &tcd[i];
491 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
492
493 list_add_tail(&mdesc->node, &descs);
494 }
495
496 /* Return error only if no descriptors were allocated */
497 if (i == 0) {
498 dma_free_coherent(mdma->dma.dev,
499 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
500 tcd, tcd_paddr);
501 return -ENOMEM;
502 }
503
504 spin_lock_irqsave(&mchan->lock, flags);
505 mchan->tcd = tcd;
506 mchan->tcd_paddr = tcd_paddr;
507 list_splice_tail_init(&descs, &mchan->free);
508 spin_unlock_irqrestore(&mchan->lock, flags);
509
510 /* Enable Error Interrupt */
511 out_8(&mdma->regs->dmaseei, chan->chan_id);
512
513 return 0;
514}
515
516/* Free channel resources */
517static void mpc_dma_free_chan_resources(struct dma_chan *chan)
518{
519 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
520 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
521 struct mpc_dma_desc *mdesc, *tmp;
522 struct mpc_dma_tcd *tcd;
523 dma_addr_t tcd_paddr;
524 unsigned long flags;
525 LIST_HEAD(descs);
526
527 spin_lock_irqsave(&mchan->lock, flags);
528
529 /* Channel must be idle */
530 BUG_ON(!list_empty(&mchan->prepared));
531 BUG_ON(!list_empty(&mchan->queued));
532 BUG_ON(!list_empty(&mchan->active));
533 BUG_ON(!list_empty(&mchan->completed));
534
535 /* Move data */
536 list_splice_tail_init(&mchan->free, &descs);
537 tcd = mchan->tcd;
538 tcd_paddr = mchan->tcd_paddr;
539
540 spin_unlock_irqrestore(&mchan->lock, flags);
541
542 /* Free DMA memory used by descriptors */
543 dma_free_coherent(mdma->dma.dev,
544 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
545 tcd, tcd_paddr);
546
547 /* Free descriptors */
548 list_for_each_entry_safe(mdesc, tmp, &descs, node)
549 kfree(mdesc);
550
551 /* Disable Error Interrupt */
552 out_8(&mdma->regs->dmaceei, chan->chan_id);
553}
554
555/* Send all pending descriptor to hardware */
556static void mpc_dma_issue_pending(struct dma_chan *chan)
557{
558 /*
559 * We are posting descriptors to the hardware as soon as
560 * they are ready, so this function does nothing.
561 */
562}
563
564/* Check request completion status */
565static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -0700566mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
567 struct dma_tx_state *txstate)
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000568{
Andy Shevchenko108fae82013-05-27 15:14:37 +0300569 return dma_cookie_status(chan, cookie, txstate);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000570}
571
572/* Prepare descriptor for memory to memory copy */
573static struct dma_async_tx_descriptor *
574mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
575 size_t len, unsigned long flags)
576{
Ilya Yanokba2eea22010-10-27 01:52:57 +0200577 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000578 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
579 struct mpc_dma_desc *mdesc = NULL;
580 struct mpc_dma_tcd *tcd;
581 unsigned long iflags;
582
583 /* Get free descriptor */
584 spin_lock_irqsave(&mchan->lock, iflags);
585 if (!list_empty(&mchan->free)) {
586 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
587 node);
588 list_del(&mdesc->node);
589 }
590 spin_unlock_irqrestore(&mchan->lock, iflags);
591
Ilya Yanoka2769912010-10-27 01:52:58 +0200592 if (!mdesc) {
593 /* try to free completed descriptors */
594 mpc_dma_process_completed(mdma);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000595 return NULL;
Ilya Yanoka2769912010-10-27 01:52:58 +0200596 }
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000597
598 mdesc->error = 0;
599 tcd = mdesc->tcd;
600
601 /* Prepare Transfer Control Descriptor for this transaction */
602 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
603
604 if (IS_ALIGNED(src | dst | len, 32)) {
605 tcd->ssize = MPC_DMA_TSIZE_32;
606 tcd->dsize = MPC_DMA_TSIZE_32;
607 tcd->soff = 32;
608 tcd->doff = 32;
Ilya Yanokba2eea22010-10-27 01:52:57 +0200609 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
610 /* MPC8308 doesn't support 16 byte transfers */
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000611 tcd->ssize = MPC_DMA_TSIZE_16;
612 tcd->dsize = MPC_DMA_TSIZE_16;
613 tcd->soff = 16;
614 tcd->doff = 16;
615 } else if (IS_ALIGNED(src | dst | len, 4)) {
616 tcd->ssize = MPC_DMA_TSIZE_4;
617 tcd->dsize = MPC_DMA_TSIZE_4;
618 tcd->soff = 4;
619 tcd->doff = 4;
620 } else if (IS_ALIGNED(src | dst | len, 2)) {
621 tcd->ssize = MPC_DMA_TSIZE_2;
622 tcd->dsize = MPC_DMA_TSIZE_2;
623 tcd->soff = 2;
624 tcd->doff = 2;
625 } else {
626 tcd->ssize = MPC_DMA_TSIZE_1;
627 tcd->dsize = MPC_DMA_TSIZE_1;
628 tcd->soff = 1;
629 tcd->doff = 1;
630 }
631
632 tcd->saddr = src;
633 tcd->daddr = dst;
634 tcd->nbytes = len;
635 tcd->biter = 1;
636 tcd->citer = 1;
637
638 /* Place descriptor in prepared list */
639 spin_lock_irqsave(&mchan->lock, iflags);
640 list_add_tail(&mdesc->node, &mchan->prepared);
641 spin_unlock_irqrestore(&mchan->lock, iflags);
642
643 return &mdesc->desc;
644}
645
Bill Pemberton463a1f82012-11-19 13:22:55 -0500646static int mpc_dma_probe(struct platform_device *op)
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000647{
Anatolij Gustschinb4a75c92010-05-31 18:39:13 +0200648 struct device_node *dn = op->dev.of_node;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000649 struct device *dev = &op->dev;
650 struct dma_device *dma;
651 struct mpc_dma *mdma;
652 struct mpc_dma_chan *mchan;
653 struct resource res;
654 ulong regs_start, regs_size;
655 int retval, i;
656
657 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
658 if (!mdma) {
659 dev_err(dev, "Memory exhausted!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400660 retval = -ENOMEM;
661 goto err;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000662 }
663
664 mdma->irq = irq_of_parse_and_map(dn, 0);
665 if (mdma->irq == NO_IRQ) {
666 dev_err(dev, "Error mapping IRQ!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400667 retval = -EINVAL;
668 goto err;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000669 }
670
Ilya Yanokba2eea22010-10-27 01:52:57 +0200671 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
672 mdma->is_mpc8308 = 1;
673 mdma->irq2 = irq_of_parse_and_map(dn, 1);
674 if (mdma->irq2 == NO_IRQ) {
675 dev_err(dev, "Error mapping IRQ!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400676 retval = -EINVAL;
677 goto err_dispose1;
Ilya Yanokba2eea22010-10-27 01:52:57 +0200678 }
679 }
680
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000681 retval = of_address_to_resource(dn, 0, &res);
682 if (retval) {
683 dev_err(dev, "Error parsing memory region!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400684 goto err_dispose2;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000685 }
686
687 regs_start = res.start;
Tobias Klauser8381fc32010-05-06 11:58:55 +0200688 regs_size = resource_size(&res);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000689
690 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
691 dev_err(dev, "Error requesting memory region!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400692 retval = -EBUSY;
693 goto err_dispose2;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000694 }
695
696 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
697 if (!mdma->regs) {
698 dev_err(dev, "Error mapping memory region!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400699 retval = -ENOMEM;
700 goto err_dispose2;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000701 }
702
703 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
704 + MPC_DMA_TCD_OFFSET);
705
Alexander Popovbaca66f2014-04-23 17:53:26 +0400706 retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000707 if (retval) {
708 dev_err(dev, "Error requesting IRQ!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400709 retval = -EINVAL;
710 goto err_dispose2;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000711 }
712
Ilya Yanokba2eea22010-10-27 01:52:57 +0200713 if (mdma->is_mpc8308) {
Alexander Popovbaca66f2014-04-23 17:53:26 +0400714 retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
715 DRV_NAME, mdma);
Ilya Yanokba2eea22010-10-27 01:52:57 +0200716 if (retval) {
717 dev_err(dev, "Error requesting IRQ2!\n");
Alexander Popovbaca66f2014-04-23 17:53:26 +0400718 retval = -EINVAL;
719 goto err_free1;
Ilya Yanokba2eea22010-10-27 01:52:57 +0200720 }
721 }
722
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000723 spin_lock_init(&mdma->error_status_lock);
724
725 dma = &mdma->dma;
726 dma->dev = dev;
Alexander Popov78a4f032014-04-23 17:53:23 +0400727 if (mdma->is_mpc8308)
728 dma->chancnt = MPC8308_DMACHAN_MAX;
Ilya Yanokba2eea22010-10-27 01:52:57 +0200729 else
Alexander Popov78a4f032014-04-23 17:53:23 +0400730 dma->chancnt = MPC512x_DMACHAN_MAX;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000731 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
732 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
733 dma->device_issue_pending = mpc_dma_issue_pending;
Linus Walleij07934482010-03-26 16:50:49 -0700734 dma->device_tx_status = mpc_dma_tx_status;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000735 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
736
737 INIT_LIST_HEAD(&dma->channels);
738 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
739
740 for (i = 0; i < dma->chancnt; i++) {
741 mchan = &mdma->channels[i];
742
743 mchan->chan.device = dma;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +0000744 dma_cookie_init(&mchan->chan);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000745
746 INIT_LIST_HEAD(&mchan->free);
747 INIT_LIST_HEAD(&mchan->prepared);
748 INIT_LIST_HEAD(&mchan->queued);
749 INIT_LIST_HEAD(&mchan->active);
750 INIT_LIST_HEAD(&mchan->completed);
751
752 spin_lock_init(&mchan->lock);
753 list_add_tail(&mchan->chan.device_node, &dma->channels);
754 }
755
756 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
757
758 /*
759 * Configure DMA Engine:
760 * - Dynamic clock,
761 * - Round-robin group arbitration,
762 * - Round-robin channel arbitration.
763 */
Alexander Popov78a4f032014-04-23 17:53:23 +0400764 if (mdma->is_mpc8308) {
765 /* MPC8308 has 16 channels and lacks some registers */
766 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
767
768 /* enable snooping */
769 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
770 /* Disable error interrupts */
771 out_be32(&mdma->regs->dmaeeil, 0);
772
773 /* Clear interrupts status */
774 out_be32(&mdma->regs->dmaintl, 0xFFFF);
775 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
776 } else {
Ilya Yanokba2eea22010-10-27 01:52:57 +0200777 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
778 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000779
Ilya Yanokba2eea22010-10-27 01:52:57 +0200780 /* Disable hardware DMA requests */
781 out_be32(&mdma->regs->dmaerqh, 0);
782 out_be32(&mdma->regs->dmaerql, 0);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000783
Ilya Yanokba2eea22010-10-27 01:52:57 +0200784 /* Disable error interrupts */
785 out_be32(&mdma->regs->dmaeeih, 0);
786 out_be32(&mdma->regs->dmaeeil, 0);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000787
Ilya Yanokba2eea22010-10-27 01:52:57 +0200788 /* Clear interrupts status */
789 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
790 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
791 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
792 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000793
Ilya Yanokba2eea22010-10-27 01:52:57 +0200794 /* Route interrupts to IPIC */
795 out_be32(&mdma->regs->dmaihsa, 0);
796 out_be32(&mdma->regs->dmailsa, 0);
Ilya Yanokba2eea22010-10-27 01:52:57 +0200797 }
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000798
799 /* Register DMA engine */
800 dev_set_drvdata(dev, mdma);
801 retval = dma_async_device_register(dma);
Alexander Popovbaca66f2014-04-23 17:53:26 +0400802 if (retval)
803 goto err_free2;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000804
805 return retval;
Alexander Popovbaca66f2014-04-23 17:53:26 +0400806
807err_free2:
808 if (mdma->is_mpc8308)
809 free_irq(mdma->irq2, mdma);
810err_free1:
811 free_irq(mdma->irq, mdma);
812err_dispose2:
813 if (mdma->is_mpc8308)
814 irq_dispose_mapping(mdma->irq2);
815err_dispose1:
816 irq_dispose_mapping(mdma->irq);
817err:
818 return retval;
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000819}
820
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800821static int mpc_dma_remove(struct platform_device *op)
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000822{
823 struct device *dev = &op->dev;
824 struct mpc_dma *mdma = dev_get_drvdata(dev);
825
826 dma_async_device_unregister(&mdma->dma);
Alexander Popovbaca66f2014-04-23 17:53:26 +0400827 if (mdma->is_mpc8308) {
828 free_irq(mdma->irq2, mdma);
829 irq_dispose_mapping(mdma->irq2);
830 }
831 free_irq(mdma->irq, mdma);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000832 irq_dispose_mapping(mdma->irq);
833
834 return 0;
835}
836
837static struct of_device_id mpc_dma_match[] = {
838 { .compatible = "fsl,mpc5121-dma", },
Alexander Popov62057d32014-04-23 17:53:24 +0400839 { .compatible = "fsl,mpc8308-dma", },
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000840 {},
841};
842
Grant Likely00006122011-02-22 19:59:54 -0700843static struct platform_driver mpc_dma_driver = {
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000844 .probe = mpc_dma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -0500845 .remove = mpc_dma_remove,
Anatolij Gustschinb4a75c92010-05-31 18:39:13 +0200846 .driver = {
847 .name = DRV_NAME,
848 .owner = THIS_MODULE,
849 .of_match_table = mpc_dma_match,
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000850 },
851};
852
Axel Linc94e9102011-11-26 15:11:12 +0800853module_platform_driver(mpc_dma_driver);
Piotr Ziecik0fb6f732010-02-05 03:42:52 +0000854
855MODULE_LICENSE("GPL");
856MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");