blob: 29dd09ad41ff292c8084a33f49e06a47117c04f8 [file] [log] [blame]
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001/*
2 * Renesas R-Car Gen2 DMA Controller Driver
3 *
4 * Copyright (C) 2014 Renesas Electronics Inc.
5 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */
12
Laurent Pinchartccadee92014-07-16 23:15:48 +020013#include <linux/dma-mapping.h>
Laurent Pinchart87244fe2014-07-09 00:42:19 +020014#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "../dmaengine.h"
28
29/*
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
35 */
36struct rcar_dmac_xfer_chunk {
37 struct list_head node;
38
39 dma_addr_t src_addr;
40 dma_addr_t dst_addr;
41 u32 size;
42};
43
44/*
Laurent Pinchartccadee92014-07-16 23:15:48 +020045 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
49 */
50struct rcar_dmac_hw_desc {
51 u32 sar;
52 u32 dar;
53 u32 tcr;
54 u32 reserved;
55} __attribute__((__packed__));
56
57/*
Laurent Pinchart87244fe2014-07-09 00:42:19 +020058 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
Laurent Pinchartccadee92014-07-16 23:15:48 +020066 * @nchunks: number of transfer chunks for this transfer
Laurent Pinchart1ed13152014-07-19 00:05:14 +020067 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
Laurent Pinchartccadee92014-07-16 23:15:48 +020068 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
Laurent Pinchart87244fe2014-07-09 00:42:19 +020071 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
73 */
74struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
78 u32 chcr;
79
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
Laurent Pinchartccadee92014-07-16 23:15:48 +020083 unsigned int nchunks;
84
85 struct {
Laurent Pinchart1ed13152014-07-19 00:05:14 +020086 bool use;
Laurent Pinchartccadee92014-07-16 23:15:48 +020087 struct rcar_dmac_hw_desc *mem;
88 dma_addr_t dma;
89 size_t size;
90 } hwdescs;
Laurent Pinchart87244fe2014-07-09 00:42:19 +020091
92 unsigned int size;
93 bool cyclic;
94};
95
96#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97
98/*
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
103 */
104struct rcar_dmac_desc_page {
105 struct list_head node;
106
107 union {
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
110 };
111};
112
113#define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
119
120/*
121 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
122 * @chan: base DMA channel object
123 * @iomem: channel I/O memory base
124 * @index: index of this channel in the controller
125 * @src_xfer_size: size (in bytes) of hardware transfers on the source side
126 * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
127 * @src_slave_addr: slave source memory address
128 * @dst_slave_addr: slave destination memory address
129 * @mid_rid: hardware MID/RID for the DMA client using this channel
130 * @lock: protects the channel CHCR register and the desc members
131 * @desc.free: list of free descriptors
132 * @desc.pending: list of pending descriptors (submitted with tx_submit)
133 * @desc.active: list of active descriptors (activated with issue_pending)
134 * @desc.done: list of completed descriptors
135 * @desc.wait: list of descriptors waiting for an ack
136 * @desc.running: the descriptor being processed (a member of the active list)
137 * @desc.chunks_free: list of free transfer chunk descriptors
138 * @desc.pages: list of pages used by allocated descriptors
139 */
140struct rcar_dmac_chan {
141 struct dma_chan chan;
142 void __iomem *iomem;
143 unsigned int index;
144
145 unsigned int src_xfer_size;
146 unsigned int dst_xfer_size;
147 dma_addr_t src_slave_addr;
148 dma_addr_t dst_slave_addr;
149 int mid_rid;
150
151 spinlock_t lock;
152
153 struct {
154 struct list_head free;
155 struct list_head pending;
156 struct list_head active;
157 struct list_head done;
158 struct list_head wait;
159 struct rcar_dmac_desc *running;
160
161 struct list_head chunks_free;
162
163 struct list_head pages;
164 } desc;
165};
166
167#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
168
169/*
170 * struct rcar_dmac - R-Car Gen2 DMA Controller
171 * @engine: base DMA engine object
172 * @dev: the hardware device
173 * @iomem: remapped I/O memory base
174 * @n_channels: number of available channels
175 * @channels: array of DMAC channels
176 * @modules: bitmask of client modules in use
177 */
178struct rcar_dmac {
179 struct dma_device engine;
180 struct device *dev;
181 void __iomem *iomem;
182
183 unsigned int n_channels;
184 struct rcar_dmac_chan *channels;
185
186 unsigned long modules[256 / BITS_PER_LONG];
187};
188
189#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
190
191/* -----------------------------------------------------------------------------
192 * Registers
193 */
194
195#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
196
197#define RCAR_DMAISTA 0x0020
198#define RCAR_DMASEC 0x0030
199#define RCAR_DMAOR 0x0060
200#define RCAR_DMAOR_PRI_FIXED (0 << 8)
201#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
202#define RCAR_DMAOR_AE (1 << 2)
203#define RCAR_DMAOR_DME (1 << 0)
204#define RCAR_DMACHCLR 0x0080
205#define RCAR_DMADPSEC 0x00a0
206
207#define RCAR_DMASAR 0x0000
208#define RCAR_DMADAR 0x0004
209#define RCAR_DMATCR 0x0008
210#define RCAR_DMATCR_MASK 0x00ffffff
211#define RCAR_DMATSR 0x0028
212#define RCAR_DMACHCR 0x000c
213#define RCAR_DMACHCR_CAE (1 << 31)
214#define RCAR_DMACHCR_CAIE (1 << 30)
215#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
216#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
217#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
218#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
219#define RCAR_DMACHCR_RPT_SAR (1 << 27)
220#define RCAR_DMACHCR_RPT_DAR (1 << 26)
221#define RCAR_DMACHCR_RPT_TCR (1 << 25)
222#define RCAR_DMACHCR_DPB (1 << 22)
223#define RCAR_DMACHCR_DSE (1 << 19)
224#define RCAR_DMACHCR_DSIE (1 << 18)
225#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
226#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
227#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
228#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
229#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
230#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
231#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
232#define RCAR_DMACHCR_DM_FIXED (0 << 14)
233#define RCAR_DMACHCR_DM_INC (1 << 14)
234#define RCAR_DMACHCR_DM_DEC (2 << 14)
235#define RCAR_DMACHCR_SM_FIXED (0 << 12)
236#define RCAR_DMACHCR_SM_INC (1 << 12)
237#define RCAR_DMACHCR_SM_DEC (2 << 12)
238#define RCAR_DMACHCR_RS_AUTO (4 << 8)
239#define RCAR_DMACHCR_RS_DMARS (8 << 8)
240#define RCAR_DMACHCR_IE (1 << 2)
241#define RCAR_DMACHCR_TE (1 << 1)
242#define RCAR_DMACHCR_DE (1 << 0)
243#define RCAR_DMATCRB 0x0018
244#define RCAR_DMATSRB 0x0038
245#define RCAR_DMACHCRB 0x001c
246#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
Laurent Pinchartccadee92014-07-16 23:15:48 +0200247#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
248#define RCAR_DMACHCRB_DPTR_SHIFT 16
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200249#define RCAR_DMACHCRB_DRST (1 << 15)
250#define RCAR_DMACHCRB_DTS (1 << 8)
251#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
252#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
253#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
254#define RCAR_DMARS 0x0040
255#define RCAR_DMABUFCR 0x0048
256#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
257#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
258#define RCAR_DMADPBASE 0x0050
259#define RCAR_DMADPBASE_MASK 0xfffffff0
260#define RCAR_DMADPBASE_SEL (1 << 0)
261#define RCAR_DMADPCR 0x0054
262#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
263#define RCAR_DMAFIXSAR 0x0010
264#define RCAR_DMAFIXDAR 0x0014
265#define RCAR_DMAFIXDPBASE 0x0060
266
267/* Hardcode the MEMCPY transfer size to 4 bytes. */
268#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
269
270/* -----------------------------------------------------------------------------
271 * Device access
272 */
273
274static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
275{
276 if (reg == RCAR_DMAOR)
277 writew(data, dmac->iomem + reg);
278 else
279 writel(data, dmac->iomem + reg);
280}
281
282static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
283{
284 if (reg == RCAR_DMAOR)
285 return readw(dmac->iomem + reg);
286 else
287 return readl(dmac->iomem + reg);
288}
289
290static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
291{
292 if (reg == RCAR_DMARS)
293 return readw(chan->iomem + reg);
294 else
295 return readl(chan->iomem + reg);
296}
297
298static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
299{
300 if (reg == RCAR_DMARS)
301 writew(data, chan->iomem + reg);
302 else
303 writel(data, chan->iomem + reg);
304}
305
306/* -----------------------------------------------------------------------------
307 * Initialization and configuration
308 */
309
310static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
311{
312 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
313
314 return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
315}
316
317static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
318{
319 struct rcar_dmac_desc *desc = chan->desc.running;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200320 u32 chcr = desc->chcr;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200321
322 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
323
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200324 if (chan->mid_rid >= 0)
325 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
326
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200327 if (desc->hwdescs.use) {
Laurent Pinchartccadee92014-07-16 23:15:48 +0200328 dev_dbg(chan->chan.device->dev,
329 "chan%u: queue desc %p: %u@%pad\n",
330 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200331
Laurent Pinchartccadee92014-07-16 23:15:48 +0200332#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
333 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
334 desc->hwdescs.dma >> 32);
335#endif
336 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
337 (desc->hwdescs.dma & 0xfffffff0) |
338 RCAR_DMADPBASE_SEL);
339 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
340 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
341 RCAR_DMACHCRB_DRST);
342
343 /*
344 * Program the descriptor stage interrupt to occur after the end
345 * of the first stage.
346 */
347 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
348
349 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
350 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
351
352 /*
353 * If the descriptor isn't cyclic enable normal descriptor mode
354 * and the transfer completion interrupt.
355 */
356 if (!desc->cyclic)
357 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
358 /*
359 * If the descriptor is cyclic and has a callback enable the
360 * descriptor stage interrupt in infinite repeat mode.
361 */
362 else if (desc->async_tx.callback)
363 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
364 /*
365 * Otherwise just select infinite repeat mode without any
366 * interrupt.
367 */
368 else
369 chcr |= RCAR_DMACHCR_DPM_INFINITE;
370 } else {
371 struct rcar_dmac_xfer_chunk *chunk = desc->running;
372
373 dev_dbg(chan->chan.device->dev,
374 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
375 chan->index, chunk, chunk->size, &chunk->src_addr,
376 &chunk->dst_addr);
377
378#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
379 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
380 chunk->src_addr >> 32);
381 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
382 chunk->dst_addr >> 32);
383#endif
384 rcar_dmac_chan_write(chan, RCAR_DMASAR,
385 chunk->src_addr & 0xffffffff);
386 rcar_dmac_chan_write(chan, RCAR_DMADAR,
387 chunk->dst_addr & 0xffffffff);
388 rcar_dmac_chan_write(chan, RCAR_DMATCR,
389 chunk->size >> desc->xfer_shift);
390
391 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
392 }
393
394 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200395}
396
397static int rcar_dmac_init(struct rcar_dmac *dmac)
398{
399 u16 dmaor;
400
401 /* Clear all channels and enable the DMAC globally. */
402 rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
403 rcar_dmac_write(dmac, RCAR_DMAOR,
404 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
405
406 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
407 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
408 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
409 return -EIO;
410 }
411
412 return 0;
413}
414
415/* -----------------------------------------------------------------------------
416 * Descriptors submission
417 */
418
419static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
420{
421 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
422 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
423 unsigned long flags;
424 dma_cookie_t cookie;
425
426 spin_lock_irqsave(&chan->lock, flags);
427
428 cookie = dma_cookie_assign(tx);
429
430 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
431 chan->index, tx->cookie, desc);
432
433 list_add_tail(&desc->node, &chan->desc.pending);
434 desc->running = list_first_entry(&desc->chunks,
435 struct rcar_dmac_xfer_chunk, node);
436
437 spin_unlock_irqrestore(&chan->lock, flags);
438
439 return cookie;
440}
441
442/* -----------------------------------------------------------------------------
443 * Descriptors allocation and free
444 */
445
446/*
447 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
448 * @chan: the DMA channel
449 * @gfp: allocation flags
450 */
451static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
452{
453 struct rcar_dmac_desc_page *page;
454 LIST_HEAD(list);
455 unsigned int i;
456
457 page = (void *)get_zeroed_page(gfp);
458 if (!page)
459 return -ENOMEM;
460
461 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
462 struct rcar_dmac_desc *desc = &page->descs[i];
463
464 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
465 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
466 INIT_LIST_HEAD(&desc->chunks);
467
468 list_add_tail(&desc->node, &list);
469 }
470
471 spin_lock_irq(&chan->lock);
472 list_splice_tail(&list, &chan->desc.free);
473 list_add_tail(&page->node, &chan->desc.pages);
474 spin_unlock_irq(&chan->lock);
475
476 return 0;
477}
478
479/*
480 * rcar_dmac_desc_put - Release a DMA transfer descriptor
481 * @chan: the DMA channel
482 * @desc: the descriptor
483 *
484 * Put the descriptor and its transfer chunk descriptors back in the channel's
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200485 * free descriptors lists. The descriptor's chunks list will be reinitialized to
486 * an empty list as a result.
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200487 *
Laurent Pinchartccadee92014-07-16 23:15:48 +0200488 * The descriptor must have been removed from the channel's lists before calling
489 * this function.
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200490 *
Laurent Pinchartccadee92014-07-16 23:15:48 +0200491 * Locking: Must be called in non-atomic context.
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200492 */
493static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
494 struct rcar_dmac_desc *desc)
495{
Laurent Pinchartccadee92014-07-16 23:15:48 +0200496 spin_lock_irq(&chan->lock);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200497 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
498 list_add_tail(&desc->node, &chan->desc.free);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200499 spin_unlock_irq(&chan->lock);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200500}
501
502static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
503{
504 struct rcar_dmac_desc *desc, *_desc;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200505 LIST_HEAD(list);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200506
Laurent Pinchartccadee92014-07-16 23:15:48 +0200507 /*
508 * We have to temporarily move all descriptors from the wait list to a
509 * local list as iterating over the wait list, even with
510 * list_for_each_entry_safe, isn't safe if we release the channel lock
511 * around the rcar_dmac_desc_put() call.
512 */
513 spin_lock_irq(&chan->lock);
514 list_splice_init(&chan->desc.wait, &list);
515 spin_unlock_irq(&chan->lock);
516
517 list_for_each_entry_safe(desc, _desc, &list, node) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200518 if (async_tx_test_ack(&desc->async_tx)) {
519 list_del(&desc->node);
520 rcar_dmac_desc_put(chan, desc);
521 }
522 }
Laurent Pinchartccadee92014-07-16 23:15:48 +0200523
524 if (list_empty(&list))
525 return;
526
527 /* Put the remaining descriptors back in the wait list. */
528 spin_lock_irq(&chan->lock);
529 list_splice(&list, &chan->desc.wait);
530 spin_unlock_irq(&chan->lock);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200531}
532
533/*
534 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
535 * @chan: the DMA channel
536 *
537 * Locking: This function must be called in a non-atomic context.
538 *
539 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
540 * be allocated.
541 */
542static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
543{
544 struct rcar_dmac_desc *desc;
545 int ret;
546
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200547 /* Recycle acked descriptors before attempting allocation. */
548 rcar_dmac_desc_recycle_acked(chan);
549
Laurent Pinchartccadee92014-07-16 23:15:48 +0200550 spin_lock_irq(&chan->lock);
551
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200552 do {
553 if (list_empty(&chan->desc.free)) {
554 /*
555 * No free descriptors, allocate a page worth of them
556 * and try again, as someone else could race us to get
557 * the newly allocated descriptors. If the allocation
558 * fails return an error.
559 */
560 spin_unlock_irq(&chan->lock);
561 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
562 if (ret < 0)
563 return NULL;
564 spin_lock_irq(&chan->lock);
565 continue;
566 }
567
568 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc,
569 node);
570 list_del(&desc->node);
571 } while (!desc);
572
573 spin_unlock_irq(&chan->lock);
574
575 return desc;
576}
577
578/*
579 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
580 * @chan: the DMA channel
581 * @gfp: allocation flags
582 */
583static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
584{
585 struct rcar_dmac_desc_page *page;
586 LIST_HEAD(list);
587 unsigned int i;
588
589 page = (void *)get_zeroed_page(gfp);
590 if (!page)
591 return -ENOMEM;
592
593 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
594 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
595
596 list_add_tail(&chunk->node, &list);
597 }
598
599 spin_lock_irq(&chan->lock);
600 list_splice_tail(&list, &chan->desc.chunks_free);
601 list_add_tail(&page->node, &chan->desc.pages);
602 spin_unlock_irq(&chan->lock);
603
604 return 0;
605}
606
607/*
608 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
609 * @chan: the DMA channel
610 *
611 * Locking: This function must be called in a non-atomic context.
612 *
613 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
614 * descriptor can be allocated.
615 */
616static struct rcar_dmac_xfer_chunk *
617rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
618{
619 struct rcar_dmac_xfer_chunk *chunk;
620 int ret;
621
622 spin_lock_irq(&chan->lock);
623
624 do {
625 if (list_empty(&chan->desc.chunks_free)) {
626 /*
627 * No free descriptors, allocate a page worth of them
628 * and try again, as someone else could race us to get
629 * the newly allocated descriptors. If the allocation
630 * fails return an error.
631 */
632 spin_unlock_irq(&chan->lock);
633 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
634 if (ret < 0)
635 return NULL;
636 spin_lock_irq(&chan->lock);
637 continue;
638 }
639
640 chunk = list_first_entry(&chan->desc.chunks_free,
641 struct rcar_dmac_xfer_chunk, node);
642 list_del(&chunk->node);
643 } while (!chunk);
644
645 spin_unlock_irq(&chan->lock);
646
647 return chunk;
648}
649
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200650static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
651 struct rcar_dmac_desc *desc, size_t size)
652{
653 /*
654 * dma_alloc_coherent() allocates memory in page size increments. To
655 * avoid reallocating the hardware descriptors when the allocated size
656 * wouldn't change align the requested size to a multiple of the page
657 * size.
658 */
659 size = PAGE_ALIGN(size);
660
661 if (desc->hwdescs.size == size)
662 return;
663
664 if (desc->hwdescs.mem) {
665 dma_free_coherent(NULL, desc->hwdescs.size, desc->hwdescs.mem,
666 desc->hwdescs.dma);
667 desc->hwdescs.mem = NULL;
668 desc->hwdescs.size = 0;
669 }
670
671 if (!size)
672 return;
673
674 desc->hwdescs.mem = dma_alloc_coherent(NULL, size, &desc->hwdescs.dma,
675 GFP_NOWAIT);
676 if (!desc->hwdescs.mem)
677 return;
678
679 desc->hwdescs.size = size;
680}
681
Jürg Billeteree4b8762014-11-25 15:10:17 +0100682static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
683 struct rcar_dmac_desc *desc)
Laurent Pinchartccadee92014-07-16 23:15:48 +0200684{
685 struct rcar_dmac_xfer_chunk *chunk;
686 struct rcar_dmac_hw_desc *hwdesc;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200687
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200688 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
689
690 hwdesc = desc->hwdescs.mem;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200691 if (!hwdesc)
Jürg Billeteree4b8762014-11-25 15:10:17 +0100692 return -ENOMEM;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200693
Laurent Pinchartccadee92014-07-16 23:15:48 +0200694 list_for_each_entry(chunk, &desc->chunks, node) {
695 hwdesc->sar = chunk->src_addr;
696 hwdesc->dar = chunk->dst_addr;
697 hwdesc->tcr = chunk->size >> desc->xfer_shift;
698 hwdesc++;
699 }
Jürg Billeteree4b8762014-11-25 15:10:17 +0100700
701 return 0;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200702}
703
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200704/* -----------------------------------------------------------------------------
705 * Stop and reset
706 */
707
708static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
709{
710 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
711
Laurent Pinchartccadee92014-07-16 23:15:48 +0200712 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
713 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200714 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
715}
716
717static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
718{
719 struct rcar_dmac_desc *desc, *_desc;
720 unsigned long flags;
721 LIST_HEAD(descs);
722
723 spin_lock_irqsave(&chan->lock, flags);
724
725 /* Move all non-free descriptors to the local lists. */
726 list_splice_init(&chan->desc.pending, &descs);
727 list_splice_init(&chan->desc.active, &descs);
728 list_splice_init(&chan->desc.done, &descs);
729 list_splice_init(&chan->desc.wait, &descs);
730
731 chan->desc.running = NULL;
732
733 spin_unlock_irqrestore(&chan->lock, flags);
734
735 list_for_each_entry_safe(desc, _desc, &descs, node) {
736 list_del(&desc->node);
737 rcar_dmac_desc_put(chan, desc);
738 }
739}
740
741static void rcar_dmac_stop(struct rcar_dmac *dmac)
742{
743 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
744}
745
746static void rcar_dmac_abort(struct rcar_dmac *dmac)
747{
748 unsigned int i;
749
750 /* Stop all channels. */
751 for (i = 0; i < dmac->n_channels; ++i) {
752 struct rcar_dmac_chan *chan = &dmac->channels[i];
753
754 /* Stop and reinitialize the channel. */
755 spin_lock(&chan->lock);
756 rcar_dmac_chan_halt(chan);
757 spin_unlock(&chan->lock);
758
759 rcar_dmac_chan_reinit(chan);
760 }
761}
762
763/* -----------------------------------------------------------------------------
764 * Descriptors preparation
765 */
766
767static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
768 struct rcar_dmac_desc *desc)
769{
770 static const u32 chcr_ts[] = {
771 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
772 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
773 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
774 RCAR_DMACHCR_TS_64B,
775 };
776
777 unsigned int xfer_size;
778 u32 chcr;
779
780 switch (desc->direction) {
781 case DMA_DEV_TO_MEM:
782 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
783 | RCAR_DMACHCR_RS_DMARS;
784 xfer_size = chan->src_xfer_size;
785 break;
786
787 case DMA_MEM_TO_DEV:
788 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
789 | RCAR_DMACHCR_RS_DMARS;
790 xfer_size = chan->dst_xfer_size;
791 break;
792
793 case DMA_MEM_TO_MEM:
794 default:
795 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
796 | RCAR_DMACHCR_RS_AUTO;
797 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
798 break;
799 }
800
801 desc->xfer_shift = ilog2(xfer_size);
802 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
803}
804
805/*
806 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
807 *
808 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
809 * converted to scatter-gather to guarantee consistent locking and a correct
810 * list manipulation. For slave DMA direction carries the usual meaning, and,
811 * logically, the SG list is RAM and the addr variable contains slave address,
812 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
813 * and the SG list contains only one element and points at the source buffer.
814 */
815static struct dma_async_tx_descriptor *
816rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
817 unsigned int sg_len, dma_addr_t dev_addr,
818 enum dma_transfer_direction dir, unsigned long dma_flags,
819 bool cyclic)
820{
821 struct rcar_dmac_xfer_chunk *chunk;
822 struct rcar_dmac_desc *desc;
823 struct scatterlist *sg;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200824 unsigned int nchunks = 0;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200825 unsigned int max_chunk_size;
826 unsigned int full_size = 0;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200827 bool highmem = false;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200828 unsigned int i;
829
830 desc = rcar_dmac_desc_get(chan);
831 if (!desc)
832 return NULL;
833
834 desc->async_tx.flags = dma_flags;
835 desc->async_tx.cookie = -EBUSY;
836
837 desc->cyclic = cyclic;
838 desc->direction = dir;
839
840 rcar_dmac_chan_configure_desc(chan, desc);
841
842 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
843
844 /*
845 * Allocate and fill the transfer chunk descriptors. We own the only
846 * reference to the DMA descriptor, there's no need for locking.
847 */
848 for_each_sg(sgl, sg, sg_len, i) {
849 dma_addr_t mem_addr = sg_dma_address(sg);
850 unsigned int len = sg_dma_len(sg);
851
852 full_size += len;
853
854 while (len) {
855 unsigned int size = min(len, max_chunk_size);
856
857#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
858 /*
859 * Prevent individual transfers from crossing 4GB
860 * boundaries.
861 */
862 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
863 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
864 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
865 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200866
867 /*
868 * Check if either of the source or destination address
869 * can't be expressed in 32 bits. If so we can't use
870 * hardware descriptor lists.
871 */
872 if (dev_addr >> 32 || mem_addr >> 32)
873 highmem = true;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200874#endif
875
876 chunk = rcar_dmac_xfer_chunk_get(chan);
877 if (!chunk) {
878 rcar_dmac_desc_put(chan, desc);
879 return NULL;
880 }
881
882 if (dir == DMA_DEV_TO_MEM) {
883 chunk->src_addr = dev_addr;
884 chunk->dst_addr = mem_addr;
885 } else {
886 chunk->src_addr = mem_addr;
887 chunk->dst_addr = dev_addr;
888 }
889
890 chunk->size = size;
891
892 dev_dbg(chan->chan.device->dev,
893 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
894 chan->index, chunk, desc, i, sg, size, len,
895 &chunk->src_addr, &chunk->dst_addr);
896
897 mem_addr += size;
898 if (dir == DMA_MEM_TO_MEM)
899 dev_addr += size;
900
901 len -= size;
902
903 list_add_tail(&chunk->node, &desc->chunks);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200904 nchunks++;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200905 }
906 }
907
Laurent Pinchartccadee92014-07-16 23:15:48 +0200908 desc->nchunks = nchunks;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200909 desc->size = full_size;
910
Laurent Pinchartccadee92014-07-16 23:15:48 +0200911 /*
912 * Use hardware descriptor lists if possible when more than one chunk
913 * needs to be transferred (otherwise they don't make much sense).
914 *
915 * The highmem check currently covers the whole transfer. As an
916 * optimization we could use descriptor lists for consecutive lowmem
917 * chunks and direct manual mode for highmem chunks. Whether the
918 * performance improvement would be significant enough compared to the
919 * additional complexity remains to be investigated.
920 */
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200921 desc->hwdescs.use = !highmem && nchunks > 1;
Jürg Billeteree4b8762014-11-25 15:10:17 +0100922 if (desc->hwdescs.use) {
923 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
924 desc->hwdescs.use = false;
925 }
Laurent Pinchartccadee92014-07-16 23:15:48 +0200926
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200927 return &desc->async_tx;
928}
929
930/* -----------------------------------------------------------------------------
931 * DMA engine operations
932 */
933
934static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
935{
936 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
937 int ret;
938
939 INIT_LIST_HEAD(&rchan->desc.free);
940 INIT_LIST_HEAD(&rchan->desc.pending);
941 INIT_LIST_HEAD(&rchan->desc.active);
942 INIT_LIST_HEAD(&rchan->desc.done);
943 INIT_LIST_HEAD(&rchan->desc.wait);
944 INIT_LIST_HEAD(&rchan->desc.chunks_free);
945 INIT_LIST_HEAD(&rchan->desc.pages);
946
947 /* Preallocate descriptors. */
948 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
949 if (ret < 0)
950 return -ENOMEM;
951
952 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
953 if (ret < 0)
954 return -ENOMEM;
955
956 return pm_runtime_get_sync(chan->device->dev);
957}
958
959static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
960{
961 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
962 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
963 struct rcar_dmac_desc_page *page, *_page;
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200964 struct rcar_dmac_desc *desc;
965 LIST_HEAD(list);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200966
967 /* Protect against ISR */
968 spin_lock_irq(&rchan->lock);
969 rcar_dmac_chan_halt(rchan);
970 spin_unlock_irq(&rchan->lock);
971
972 /* Now no new interrupts will occur */
973
974 if (rchan->mid_rid >= 0) {
975 /* The caller is holding dma_list_mutex */
976 clear_bit(rchan->mid_rid, dmac->modules);
977 rchan->mid_rid = -EINVAL;
978 }
979
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200980 list_splice(&rchan->desc.free, &list);
981 list_splice(&rchan->desc.pending, &list);
982 list_splice(&rchan->desc.active, &list);
983 list_splice(&rchan->desc.done, &list);
984 list_splice(&rchan->desc.wait, &list);
985
986 list_for_each_entry(desc, &list, node)
987 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
988
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200989 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
990 list_del(&page->node);
991 free_page((unsigned long)page);
992 }
993
994 pm_runtime_put(chan->device->dev);
995}
996
997static struct dma_async_tx_descriptor *
998rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
999 dma_addr_t dma_src, size_t len, unsigned long flags)
1000{
1001 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1002 struct scatterlist sgl;
1003
1004 if (!len)
1005 return NULL;
1006
1007 sg_init_table(&sgl, 1);
1008 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1009 offset_in_page(dma_src));
1010 sg_dma_address(&sgl) = dma_src;
1011 sg_dma_len(&sgl) = len;
1012
1013 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1014 DMA_MEM_TO_MEM, flags, false);
1015}
1016
1017static struct dma_async_tx_descriptor *
1018rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1019 unsigned int sg_len, enum dma_transfer_direction dir,
1020 unsigned long flags, void *context)
1021{
1022 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1023 dma_addr_t dev_addr;
1024
1025 /* Someone calling slave DMA on a generic channel? */
1026 if (rchan->mid_rid < 0 || !sg_len) {
1027 dev_warn(chan->device->dev,
1028 "%s: bad parameter: len=%d, id=%d\n",
1029 __func__, sg_len, rchan->mid_rid);
1030 return NULL;
1031 }
1032
1033 dev_addr = dir == DMA_DEV_TO_MEM
1034 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1035 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1036 dir, flags, false);
1037}
1038
1039#define RCAR_DMAC_MAX_SG_LEN 32
1040
1041static struct dma_async_tx_descriptor *
1042rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1043 size_t buf_len, size_t period_len,
1044 enum dma_transfer_direction dir, unsigned long flags)
1045{
1046 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1047 struct dma_async_tx_descriptor *desc;
1048 struct scatterlist *sgl;
1049 dma_addr_t dev_addr;
1050 unsigned int sg_len;
1051 unsigned int i;
1052
1053 /* Someone calling slave DMA on a generic channel? */
1054 if (rchan->mid_rid < 0 || buf_len < period_len) {
1055 dev_warn(chan->device->dev,
1056 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1057 __func__, buf_len, period_len, rchan->mid_rid);
1058 return NULL;
1059 }
1060
1061 sg_len = buf_len / period_len;
1062 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1063 dev_err(chan->device->dev,
1064 "chan%u: sg length %d exceds limit %d",
1065 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1066 return NULL;
1067 }
1068
1069 /*
1070 * Allocate the sg list dynamically as it would consume too much stack
1071 * space.
1072 */
1073 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1074 if (!sgl)
1075 return NULL;
1076
1077 sg_init_table(sgl, sg_len);
1078
1079 for (i = 0; i < sg_len; ++i) {
1080 dma_addr_t src = buf_addr + (period_len * i);
1081
1082 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1083 offset_in_page(src));
1084 sg_dma_address(&sgl[i]) = src;
1085 sg_dma_len(&sgl[i]) = period_len;
1086 }
1087
1088 dev_addr = dir == DMA_DEV_TO_MEM
1089 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1090 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1091 dir, flags, true);
1092
1093 kfree(sgl);
1094 return desc;
1095}
1096
1097static int rcar_dmac_device_config(struct dma_chan *chan,
1098 struct dma_slave_config *cfg)
1099{
1100 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1101
1102 /*
1103 * We could lock this, but you shouldn't be configuring the
1104 * channel, while using it...
1105 */
1106 rchan->src_slave_addr = cfg->src_addr;
1107 rchan->dst_slave_addr = cfg->dst_addr;
1108 rchan->src_xfer_size = cfg->src_addr_width;
1109 rchan->dst_xfer_size = cfg->dst_addr_width;
1110
1111 return 0;
1112}
1113
1114static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1115{
1116 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1117 unsigned long flags;
1118
1119 spin_lock_irqsave(&rchan->lock, flags);
1120 rcar_dmac_chan_halt(rchan);
1121 spin_unlock_irqrestore(&rchan->lock, flags);
1122
1123 /*
1124 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1125 * be running.
1126 */
1127
1128 rcar_dmac_chan_reinit(rchan);
1129
1130 return 0;
1131}
1132
1133static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1134 dma_cookie_t cookie)
1135{
1136 struct rcar_dmac_desc *desc = chan->desc.running;
Laurent Pinchartccadee92014-07-16 23:15:48 +02001137 struct rcar_dmac_xfer_chunk *running = NULL;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001138 struct rcar_dmac_xfer_chunk *chunk;
1139 unsigned int residue = 0;
Laurent Pinchartccadee92014-07-16 23:15:48 +02001140 unsigned int dptr = 0;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001141
1142 if (!desc)
1143 return 0;
1144
1145 /*
1146 * If the cookie doesn't correspond to the currently running transfer
1147 * then the descriptor hasn't been processed yet, and the residue is
1148 * equal to the full descriptor size.
1149 */
1150 if (cookie != desc->async_tx.cookie)
1151 return desc->size;
1152
Laurent Pinchartccadee92014-07-16 23:15:48 +02001153 /*
1154 * In descriptor mode the descriptor running pointer is not maintained
1155 * by the interrupt handler, find the running descriptor from the
1156 * descriptor pointer field in the CHCRB register. In non-descriptor
1157 * mode just use the running descriptor pointer.
1158 */
Laurent Pinchart1ed13152014-07-19 00:05:14 +02001159 if (desc->hwdescs.use) {
Laurent Pinchartccadee92014-07-16 23:15:48 +02001160 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1161 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1162 WARN_ON(dptr >= desc->nchunks);
1163 } else {
1164 running = desc->running;
1165 }
1166
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001167 /* Compute the size of all chunks still to be transferred. */
1168 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
Laurent Pinchartccadee92014-07-16 23:15:48 +02001169 if (chunk == running || ++dptr == desc->nchunks)
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001170 break;
1171
1172 residue += chunk->size;
1173 }
1174
1175 /* Add the residue for the current chunk. */
1176 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1177
1178 return residue;
1179}
1180
1181static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1182 dma_cookie_t cookie,
1183 struct dma_tx_state *txstate)
1184{
1185 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1186 enum dma_status status;
1187 unsigned long flags;
1188 unsigned int residue;
1189
1190 status = dma_cookie_status(chan, cookie, txstate);
1191 if (status == DMA_COMPLETE || !txstate)
1192 return status;
1193
1194 spin_lock_irqsave(&rchan->lock, flags);
1195 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1196 spin_unlock_irqrestore(&rchan->lock, flags);
1197
1198 dma_set_residue(txstate, residue);
1199
1200 return status;
1201}
1202
1203static void rcar_dmac_issue_pending(struct dma_chan *chan)
1204{
1205 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1206 unsigned long flags;
1207
1208 spin_lock_irqsave(&rchan->lock, flags);
1209
1210 if (list_empty(&rchan->desc.pending))
1211 goto done;
1212
1213 /* Append the pending list to the active list. */
1214 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1215
1216 /*
1217 * If no transfer is running pick the first descriptor from the active
1218 * list and start the transfer.
1219 */
1220 if (!rchan->desc.running) {
1221 struct rcar_dmac_desc *desc;
1222
1223 desc = list_first_entry(&rchan->desc.active,
1224 struct rcar_dmac_desc, node);
1225 rchan->desc.running = desc;
1226
1227 rcar_dmac_chan_start_xfer(rchan);
1228 }
1229
1230done:
1231 spin_unlock_irqrestore(&rchan->lock, flags);
1232}
1233
1234/* -----------------------------------------------------------------------------
1235 * IRQ handling
1236 */
1237
Laurent Pinchartccadee92014-07-16 23:15:48 +02001238static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1239{
1240 struct rcar_dmac_desc *desc = chan->desc.running;
1241 unsigned int stage;
1242
1243 if (WARN_ON(!desc || !desc->cyclic)) {
1244 /*
1245 * This should never happen, there should always be a running
1246 * cyclic descriptor when a descriptor stage end interrupt is
1247 * triggered. Warn and return.
1248 */
1249 return IRQ_NONE;
1250 }
1251
1252 /* Program the interrupt pointer to the next stage. */
1253 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1254 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1255 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1256
1257 return IRQ_WAKE_THREAD;
1258}
1259
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001260static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1261{
1262 struct rcar_dmac_desc *desc = chan->desc.running;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001263 irqreturn_t ret = IRQ_WAKE_THREAD;
1264
1265 if (WARN_ON_ONCE(!desc)) {
1266 /*
Laurent Pinchartccadee92014-07-16 23:15:48 +02001267 * This should never happen, there should always be a running
1268 * descriptor when a transfer end interrupt is triggered. Warn
1269 * and return.
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001270 */
1271 return IRQ_NONE;
1272 }
1273
1274 /*
Laurent Pinchartccadee92014-07-16 23:15:48 +02001275 * The transfer end interrupt isn't generated for each chunk when using
1276 * descriptor mode. Only update the running chunk pointer in
1277 * non-descriptor mode.
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001278 */
Laurent Pinchart1ed13152014-07-19 00:05:14 +02001279 if (!desc->hwdescs.use) {
Laurent Pinchartccadee92014-07-16 23:15:48 +02001280 /*
1281 * If we haven't completed the last transfer chunk simply move
1282 * to the next one. Only wake the IRQ thread if the transfer is
1283 * cyclic.
1284 */
1285 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1286 desc->running = list_next_entry(desc->running, node);
1287 if (!desc->cyclic)
1288 ret = IRQ_HANDLED;
1289 goto done;
1290 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001291
Laurent Pinchartccadee92014-07-16 23:15:48 +02001292 /*
1293 * We've completed the last transfer chunk. If the transfer is
1294 * cyclic, move back to the first one.
1295 */
1296 if (desc->cyclic) {
1297 desc->running =
1298 list_first_entry(&desc->chunks,
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001299 struct rcar_dmac_xfer_chunk,
1300 node);
Laurent Pinchartccadee92014-07-16 23:15:48 +02001301 goto done;
1302 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001303 }
1304
1305 /* The descriptor is complete, move it to the done list. */
1306 list_move_tail(&desc->node, &chan->desc.done);
1307
1308 /* Queue the next descriptor, if any. */
1309 if (!list_empty(&chan->desc.active))
1310 chan->desc.running = list_first_entry(&chan->desc.active,
1311 struct rcar_dmac_desc,
1312 node);
1313 else
1314 chan->desc.running = NULL;
1315
1316done:
1317 if (chan->desc.running)
1318 rcar_dmac_chan_start_xfer(chan);
1319
1320 return ret;
1321}
1322
1323static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1324{
Laurent Pinchartccadee92014-07-16 23:15:48 +02001325 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001326 struct rcar_dmac_chan *chan = dev;
1327 irqreturn_t ret = IRQ_NONE;
1328 u32 chcr;
1329
1330 spin_lock(&chan->lock);
1331
1332 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
Laurent Pinchartccadee92014-07-16 23:15:48 +02001333 if (chcr & RCAR_DMACHCR_TE)
1334 mask |= RCAR_DMACHCR_DE;
1335 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1336
1337 if (chcr & RCAR_DMACHCR_DSE)
1338 ret |= rcar_dmac_isr_desc_stage_end(chan);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001339
1340 if (chcr & RCAR_DMACHCR_TE)
1341 ret |= rcar_dmac_isr_transfer_end(chan);
1342
1343 spin_unlock(&chan->lock);
1344
1345 return ret;
1346}
1347
1348static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1349{
1350 struct rcar_dmac_chan *chan = dev;
1351 struct rcar_dmac_desc *desc;
1352
1353 spin_lock_irq(&chan->lock);
1354
1355 /* For cyclic transfers notify the user after every chunk. */
1356 if (chan->desc.running && chan->desc.running->cyclic) {
1357 dma_async_tx_callback callback;
1358 void *callback_param;
1359
1360 desc = chan->desc.running;
1361 callback = desc->async_tx.callback;
1362 callback_param = desc->async_tx.callback_param;
1363
1364 if (callback) {
1365 spin_unlock_irq(&chan->lock);
1366 callback(callback_param);
1367 spin_lock_irq(&chan->lock);
1368 }
1369 }
1370
1371 /*
1372 * Call the callback function for all descriptors on the done list and
1373 * move them to the ack wait list.
1374 */
1375 while (!list_empty(&chan->desc.done)) {
1376 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1377 node);
1378 dma_cookie_complete(&desc->async_tx);
1379 list_del(&desc->node);
1380
1381 if (desc->async_tx.callback) {
1382 spin_unlock_irq(&chan->lock);
1383 /*
1384 * We own the only reference to this descriptor, we can
1385 * safely dereference it without holding the channel
1386 * lock.
1387 */
1388 desc->async_tx.callback(desc->async_tx.callback_param);
1389 spin_lock_irq(&chan->lock);
1390 }
1391
1392 list_add_tail(&desc->node, &chan->desc.wait);
1393 }
1394
Laurent Pinchartccadee92014-07-16 23:15:48 +02001395 spin_unlock_irq(&chan->lock);
1396
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001397 /* Recycle all acked descriptors. */
1398 rcar_dmac_desc_recycle_acked(chan);
1399
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001400 return IRQ_HANDLED;
1401}
1402
1403static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1404{
1405 struct rcar_dmac *dmac = data;
1406
1407 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1408 return IRQ_NONE;
1409
1410 /*
1411 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1412 * abort transfers on all channels, and reinitialize the DMAC.
1413 */
1414 rcar_dmac_stop(dmac);
1415 rcar_dmac_abort(dmac);
1416 rcar_dmac_init(dmac);
1417
1418 return IRQ_HANDLED;
1419}
1420
1421/* -----------------------------------------------------------------------------
1422 * OF xlate and channel filter
1423 */
1424
1425static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1426{
1427 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1428 struct of_phandle_args *dma_spec = arg;
1429
1430 /*
1431 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1432 * function knows from which device it wants to allocate a channel from,
1433 * and would be perfectly capable of selecting the channel it wants.
1434 * Forcing it to call dma_request_channel() and iterate through all
1435 * channels from all controllers is just pointless.
1436 */
1437 if (chan->device->device_config != rcar_dmac_device_config ||
1438 dma_spec->np != chan->device->dev->of_node)
1439 return false;
1440
1441 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1442}
1443
1444static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1445 struct of_dma *ofdma)
1446{
1447 struct rcar_dmac_chan *rchan;
1448 struct dma_chan *chan;
1449 dma_cap_mask_t mask;
1450
1451 if (dma_spec->args_count != 1)
1452 return NULL;
1453
1454 /* Only slave DMA channels can be allocated via DT */
1455 dma_cap_zero(mask);
1456 dma_cap_set(DMA_SLAVE, mask);
1457
1458 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1459 if (!chan)
1460 return NULL;
1461
1462 rchan = to_rcar_dmac_chan(chan);
1463 rchan->mid_rid = dma_spec->args[0];
1464
1465 return chan;
1466}
1467
1468/* -----------------------------------------------------------------------------
1469 * Power management
1470 */
1471
1472#ifdef CONFIG_PM_SLEEP
1473static int rcar_dmac_sleep_suspend(struct device *dev)
1474{
1475 /*
1476 * TODO: Wait for the current transfer to complete and stop the device.
1477 */
1478 return 0;
1479}
1480
1481static int rcar_dmac_sleep_resume(struct device *dev)
1482{
1483 /* TODO: Resume transfers, if any. */
1484 return 0;
1485}
1486#endif
1487
1488#ifdef CONFIG_PM
1489static int rcar_dmac_runtime_suspend(struct device *dev)
1490{
1491 return 0;
1492}
1493
1494static int rcar_dmac_runtime_resume(struct device *dev)
1495{
1496 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1497
1498 return rcar_dmac_init(dmac);
1499}
1500#endif
1501
1502static const struct dev_pm_ops rcar_dmac_pm = {
1503 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1504 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1505 NULL)
1506};
1507
1508/* -----------------------------------------------------------------------------
1509 * Probe and remove
1510 */
1511
1512static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1513 struct rcar_dmac_chan *rchan,
1514 unsigned int index)
1515{
1516 struct platform_device *pdev = to_platform_device(dmac->dev);
1517 struct dma_chan *chan = &rchan->chan;
1518 char pdev_irqname[5];
1519 char *irqname;
1520 int irq;
1521 int ret;
1522
1523 rchan->index = index;
1524 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1525 rchan->mid_rid = -EINVAL;
1526
1527 spin_lock_init(&rchan->lock);
1528
1529 /* Request the channel interrupt. */
1530 sprintf(pdev_irqname, "ch%u", index);
1531 irq = platform_get_irq_byname(pdev, pdev_irqname);
1532 if (irq < 0) {
1533 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1534 return -ENODEV;
1535 }
1536
1537 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1538 dev_name(dmac->dev), index);
1539 if (!irqname)
1540 return -ENOMEM;
1541
1542 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1543 rcar_dmac_isr_channel_thread, 0,
1544 irqname, rchan);
1545 if (ret) {
1546 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1547 return ret;
1548 }
1549
1550 /*
1551 * Initialize the DMA engine channel and add it to the DMA engine
1552 * channels list.
1553 */
1554 chan->device = &dmac->engine;
1555 dma_cookie_init(chan);
1556
1557 list_add_tail(&chan->device_node, &dmac->engine.channels);
1558
1559 return 0;
1560}
1561
1562static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1563{
1564 struct device_node *np = dev->of_node;
1565 int ret;
1566
1567 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1568 if (ret < 0) {
1569 dev_err(dev, "unable to read dma-channels property\n");
1570 return ret;
1571 }
1572
1573 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1574 dev_err(dev, "invalid number of channels %u\n",
1575 dmac->n_channels);
1576 return -EINVAL;
1577 }
1578
1579 return 0;
1580}
1581
1582static int rcar_dmac_probe(struct platform_device *pdev)
1583{
1584 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1585 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1586 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1587 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1588 struct dma_device *engine;
1589 struct rcar_dmac *dmac;
1590 struct resource *mem;
1591 unsigned int i;
1592 char *irqname;
1593 int irq;
1594 int ret;
1595
1596 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1597 if (!dmac)
1598 return -ENOMEM;
1599
1600 dmac->dev = &pdev->dev;
1601 platform_set_drvdata(pdev, dmac);
1602
1603 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1604 if (ret < 0)
1605 return ret;
1606
1607 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1608 sizeof(*dmac->channels), GFP_KERNEL);
1609 if (!dmac->channels)
1610 return -ENOMEM;
1611
1612 /* Request resources. */
1613 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1614 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1615 if (IS_ERR(dmac->iomem))
1616 return PTR_ERR(dmac->iomem);
1617
1618 irq = platform_get_irq_byname(pdev, "error");
1619 if (irq < 0) {
1620 dev_err(&pdev->dev, "no error IRQ specified\n");
1621 return -ENODEV;
1622 }
1623
1624 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1625 dev_name(dmac->dev));
1626 if (!irqname)
1627 return -ENOMEM;
1628
1629 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1630 irqname, dmac);
1631 if (ret) {
1632 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1633 irq, ret);
1634 return ret;
1635 }
1636
1637 /* Enable runtime PM and initialize the device. */
1638 pm_runtime_enable(&pdev->dev);
1639 ret = pm_runtime_get_sync(&pdev->dev);
1640 if (ret < 0) {
1641 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1642 return ret;
1643 }
1644
1645 ret = rcar_dmac_init(dmac);
1646 pm_runtime_put(&pdev->dev);
1647
1648 if (ret) {
1649 dev_err(&pdev->dev, "failed to reset device\n");
1650 goto error;
1651 }
1652
1653 /* Initialize the channels. */
1654 INIT_LIST_HEAD(&dmac->engine.channels);
1655
1656 for (i = 0; i < dmac->n_channels; ++i) {
1657 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
1658 if (ret < 0)
1659 goto error;
1660 }
1661
1662 /* Register the DMAC as a DMA provider for DT. */
1663 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1664 NULL);
1665 if (ret < 0)
1666 goto error;
1667
1668 /*
1669 * Register the DMA engine device.
1670 *
1671 * Default transfer size of 32 bytes requires 32-byte alignment.
1672 */
1673 engine = &dmac->engine;
1674 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1675 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1676
1677 engine->dev = &pdev->dev;
1678 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1679
1680 engine->src_addr_widths = widths;
1681 engine->dst_addr_widths = widths;
1682 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1683 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1684
1685 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1686 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1687 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1688 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1689 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1690 engine->device_config = rcar_dmac_device_config;
1691 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1692 engine->device_tx_status = rcar_dmac_tx_status;
1693 engine->device_issue_pending = rcar_dmac_issue_pending;
1694
1695 ret = dma_async_device_register(engine);
1696 if (ret < 0)
1697 goto error;
1698
1699 return 0;
1700
1701error:
1702 of_dma_controller_free(pdev->dev.of_node);
1703 pm_runtime_disable(&pdev->dev);
1704 return ret;
1705}
1706
1707static int rcar_dmac_remove(struct platform_device *pdev)
1708{
1709 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1710
1711 of_dma_controller_free(pdev->dev.of_node);
1712 dma_async_device_unregister(&dmac->engine);
1713
1714 pm_runtime_disable(&pdev->dev);
1715
1716 return 0;
1717}
1718
1719static void rcar_dmac_shutdown(struct platform_device *pdev)
1720{
1721 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1722
1723 rcar_dmac_stop(dmac);
1724}
1725
1726static const struct of_device_id rcar_dmac_of_ids[] = {
1727 { .compatible = "renesas,rcar-dmac", },
1728 { /* Sentinel */ }
1729};
1730MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1731
1732static struct platform_driver rcar_dmac_driver = {
1733 .driver = {
1734 .pm = &rcar_dmac_pm,
1735 .name = "rcar-dmac",
1736 .of_match_table = rcar_dmac_of_ids,
1737 },
1738 .probe = rcar_dmac_probe,
1739 .remove = rcar_dmac_remove,
1740 .shutdown = rcar_dmac_shutdown,
1741};
1742
1743module_platform_driver(rcar_dmac_driver);
1744
1745MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1746MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1747MODULE_LICENSE("GPL v2");