blob: b419afaa2389a623aa884150b0fb1045270b5005 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/dmaengine.h>
24#include <linux/delay.h>
25#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000026#include <linux/platform_device.h>
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000027#include <asm/dmaengine.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000028#include "shdma.h"
29
30/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070031enum sh_dmae_desc_status {
32 DESC_IDLE,
33 DESC_PREPARED,
34 DESC_SUBMITTED,
35 DESC_COMPLETED, /* completed, have to call callback */
36 DESC_WAITING, /* callback called, waiting for ack / re-submit */
37};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000038
39#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000040/* Default MEMCPY transfer size = 2^2 = 4 bytes */
41#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000042
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000043/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
44static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
45
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070046static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
47
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000048static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
49{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000050 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000051}
52
53static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
54{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000055 return __raw_readl(sh_dc->base + reg / sizeof(u32));
56}
57
58static u16 dmaor_read(struct sh_dmae_device *shdev)
59{
60 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
61}
62
63static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
64{
65 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000066}
67
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000068/*
69 * Reset DMA controller
70 *
71 * SH7780 has two DMAOR register
72 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000073static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000074{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000075 unsigned short dmaor = dmaor_read(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000076
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000077 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000078}
79
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000080static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000081{
82 unsigned short dmaor;
83
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000084 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000085 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000086
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000087 dmaor_write(shdev, dmaor);
88 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +000089 pr_warning("dma-sh: Can't initialize DMAOR.\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000090 return -EINVAL;
91 }
92 return 0;
93}
94
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000095static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000096{
97 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000098
99 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
100 return true; /* working */
101
102 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000103}
104
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000105static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000106{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000107 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
108 struct sh_dmae_device, common);
109 struct sh_dmae_pdata *pdata = shdev->pdata;
110 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
111 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000112
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000113 if (cnt >= pdata->ts_shift_num)
114 cnt = 0;
115
116 return pdata->ts_shift[cnt];
117}
118
119static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
120{
121 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
122 struct sh_dmae_device, common);
123 struct sh_dmae_pdata *pdata = shdev->pdata;
124 int i;
125
126 for (i = 0; i < pdata->ts_shift_num; i++)
127 if (pdata->ts_shift[i] == l2size)
128 break;
129
130 if (i == pdata->ts_shift_num)
131 i = 0;
132
133 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
134 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000135}
136
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700137static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000138{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700139 sh_dmae_writel(sh_chan, hw->sar, SAR);
140 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000141 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000142}
143
144static void dmae_start(struct sh_dmae_chan *sh_chan)
145{
146 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
147
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100148 chcr |= CHCR_DE | CHCR_IE;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000149 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000150}
151
152static void dmae_halt(struct sh_dmae_chan *sh_chan)
153{
154 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
155
156 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
157 sh_dmae_writel(sh_chan, chcr, CHCR);
158}
159
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000160static void dmae_init(struct sh_dmae_chan *sh_chan)
161{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000162 /*
163 * Default configuration for dual address memory-memory transfer.
164 * 0x400 represents auto-request.
165 */
166 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
167 LOG2_DEFAULT_XFER_SIZE);
168 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000169 sh_dmae_writel(sh_chan, chcr, CHCR);
170}
171
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000172static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
173{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000174 /* When DMA was working, can not set data to CHCR */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000175 if (dmae_is_busy(sh_chan))
176 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000177
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000178 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000179 sh_dmae_writel(sh_chan, val, CHCR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000180
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000181 return 0;
182}
183
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000184static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
185{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000186 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
187 struct sh_dmae_device, common);
188 struct sh_dmae_pdata *pdata = shdev->pdata;
189 struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
190 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
191 int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000192
193 if (dmae_is_busy(sh_chan))
194 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000195
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000196 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
197 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000198
199 return 0;
200}
201
202static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
203{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700204 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000205 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700206 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000207 dma_cookie_t cookie;
208
209 spin_lock_bh(&sh_chan->desc_lock);
210
211 cookie = sh_chan->common.cookie;
212 cookie++;
213 if (cookie < 0)
214 cookie = 1;
215
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700216 sh_chan->common.cookie = cookie;
217 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000218
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700219 /* Mark all chunks of this descriptor as submitted, move to the queue */
220 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
221 /*
222 * All chunks are on the global ld_free, so, we have to find
223 * the end of the chain ourselves
224 */
225 if (chunk != desc && (chunk->mark == DESC_IDLE ||
226 chunk->async_tx.cookie > 0 ||
227 chunk->async_tx.cookie == -EBUSY ||
228 &chunk->node == &sh_chan->ld_free))
229 break;
230 chunk->mark = DESC_SUBMITTED;
231 /* Callback goes to the last chunk */
232 chunk->async_tx.callback = NULL;
233 chunk->cookie = cookie;
234 list_move_tail(&chunk->node, &sh_chan->ld_queue);
235 last = chunk;
236 }
237
238 last->async_tx.callback = callback;
239 last->async_tx.callback_param = tx->callback_param;
240
241 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
242 tx->cookie, &last->async_tx, sh_chan->id,
243 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000244
245 spin_unlock_bh(&sh_chan->desc_lock);
246
247 return cookie;
248}
249
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700250/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000251static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
252{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700253 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000254
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700255 list_for_each_entry(desc, &sh_chan->ld_free, node)
256 if (desc->mark != DESC_PREPARED) {
257 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000258 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700259 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000260 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000261
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700262 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000263}
264
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000265static struct sh_dmae_slave_config *sh_dmae_find_slave(
266 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
267{
268 struct dma_device *dma_dev = sh_chan->common.device;
269 struct sh_dmae_device *shdev = container_of(dma_dev,
270 struct sh_dmae_device, common);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000271 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000272 int i;
273
274 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
275 return NULL;
276
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000277 for (i = 0; i < pdata->slave_num; i++)
278 if (pdata->slave[i].slave_id == slave_id)
279 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000280
281 return NULL;
282}
283
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000284static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
285{
286 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
287 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000288 struct sh_dmae_slave *param = chan->private;
289
290 /*
291 * This relies on the guarantee from dmaengine that alloc_chan_resources
292 * never runs concurrently with itself or free_chan_resources.
293 */
294 if (param) {
295 struct sh_dmae_slave_config *cfg;
296
297 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
298 if (!cfg)
299 return -EINVAL;
300
301 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
302 return -EBUSY;
303
304 param->config = cfg;
305
306 dmae_set_dmars(sh_chan, cfg->mid_rid);
307 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000308 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
309 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000310 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000311
312 spin_lock_bh(&sh_chan->desc_lock);
313 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
314 spin_unlock_bh(&sh_chan->desc_lock);
315 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
316 if (!desc) {
317 spin_lock_bh(&sh_chan->desc_lock);
318 break;
319 }
320 dma_async_tx_descriptor_init(&desc->async_tx,
321 &sh_chan->common);
322 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700323 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000324
325 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700326 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000327 sh_chan->descs_allocated++;
328 }
329 spin_unlock_bh(&sh_chan->desc_lock);
330
331 return sh_chan->descs_allocated;
332}
333
334/*
335 * sh_dma_free_chan_resources - Free all resources of the channel.
336 */
337static void sh_dmae_free_chan_resources(struct dma_chan *chan)
338{
339 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
340 struct sh_desc *desc, *_desc;
341 LIST_HEAD(list);
342
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000343 dmae_halt(sh_chan);
344
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700345 /* Prepared and not submitted descriptors can still be on the queue */
346 if (!list_empty(&sh_chan->ld_queue))
347 sh_dmae_chan_ld_cleanup(sh_chan, true);
348
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000349 if (chan->private) {
350 /* The caller is holding dma_list_mutex */
351 struct sh_dmae_slave *param = chan->private;
352 clear_bit(param->slave_id, sh_dmae_slave_used);
353 }
354
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000355 spin_lock_bh(&sh_chan->desc_lock);
356
357 list_splice_init(&sh_chan->ld_free, &list);
358 sh_chan->descs_allocated = 0;
359
360 spin_unlock_bh(&sh_chan->desc_lock);
361
362 list_for_each_entry_safe(desc, _desc, &list, node)
363 kfree(desc);
364}
365
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000366/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000367 * sh_dmae_add_desc - get, set up and return one transfer descriptor
368 * @sh_chan: DMA channel
369 * @flags: DMA transfer flags
370 * @dest: destination DMA address, incremented when direction equals
371 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
372 * @src: source DMA address, incremented when direction equals
373 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
374 * @len: DMA transfer length
375 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
376 * @direction: needed for slave DMA to decide which address to keep constant,
377 * equals DMA_BIDIRECTIONAL for MEMCPY
378 * Returns 0 or an error
379 * Locks: called with desc_lock held
380 */
381static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
382 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
383 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000384{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000385 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000386 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000387
388 if (!*len)
389 return NULL;
390
391 /* Allocate the link descriptor from the free list */
392 new = sh_dmae_get_desc(sh_chan);
393 if (!new) {
394 dev_err(sh_chan->dev, "No free link descriptor available\n");
395 return NULL;
396 }
397
398 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
399
400 new->hw.sar = *src;
401 new->hw.dar = *dest;
402 new->hw.tcr = copy_size;
403
404 if (!*first) {
405 /* First desc */
406 new->async_tx.cookie = -EBUSY;
407 *first = new;
408 } else {
409 /* Other desc - invisible to the user */
410 new->async_tx.cookie = -EINVAL;
411 }
412
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000413 dev_dbg(sh_chan->dev,
414 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000415 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000416 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000417
418 new->mark = DESC_PREPARED;
419 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000420 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000421
422 *len -= copy_size;
423 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
424 *src += copy_size;
425 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
426 *dest += copy_size;
427
428 return new;
429}
430
431/*
432 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
433 *
434 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
435 * converted to scatter-gather to guarantee consistent locking and a correct
436 * list manipulation. For slave DMA direction carries the usual meaning, and,
437 * logically, the SG list is RAM and the addr variable contains slave address,
438 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
439 * and the SG list contains only one element and points at the source buffer.
440 */
441static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
442 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
443 enum dma_data_direction direction, unsigned long flags)
444{
445 struct scatterlist *sg;
446 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700447 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000448 int chunks = 0;
449 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000450
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000451 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000452 return NULL;
453
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000454 for_each_sg(sgl, sg, sg_len, i)
455 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
456 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000457
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700458 /* Have to lock the whole loop to protect against concurrent release */
459 spin_lock_bh(&sh_chan->desc_lock);
460
461 /*
462 * Chaining:
463 * first descriptor is what user is dealing with in all API calls, its
464 * cookie is at first set to -EBUSY, at tx-submit to a positive
465 * number
466 * if more than one chunk is needed further chunks have cookie = -EINVAL
467 * the last chunk, if not equal to the first, has cookie = -ENOSPC
468 * all chunks are linked onto the tx_list head with their .node heads
469 * only during this function, then they are immediately spliced
470 * back onto the free list in form of a chain
471 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000472 for_each_sg(sgl, sg, sg_len, i) {
473 dma_addr_t sg_addr = sg_dma_address(sg);
474 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000475
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000476 if (!len)
477 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000478
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000479 do {
480 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
481 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000482
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000483 if (direction == DMA_FROM_DEVICE)
484 new = sh_dmae_add_desc(sh_chan, flags,
485 &sg_addr, addr, &len, &first,
486 direction);
487 else
488 new = sh_dmae_add_desc(sh_chan, flags,
489 addr, &sg_addr, &len, &first,
490 direction);
491 if (!new)
492 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700493
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000494 new->chunks = chunks--;
495 list_add_tail(&new->node, &tx_list);
496 } while (len);
497 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000498
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700499 if (new != first)
500 new->async_tx.cookie = -ENOSPC;
501
502 /* Put them back on the free list, so, they don't get lost */
503 list_splice_tail(&tx_list, &sh_chan->ld_free);
504
505 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000506
507 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000508
509err_get_desc:
510 list_for_each_entry(new, &tx_list, node)
511 new->mark = DESC_IDLE;
512 list_splice(&tx_list, &sh_chan->ld_free);
513
514 spin_unlock_bh(&sh_chan->desc_lock);
515
516 return NULL;
517}
518
519static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
520 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
521 size_t len, unsigned long flags)
522{
523 struct sh_dmae_chan *sh_chan;
524 struct scatterlist sg;
525
526 if (!chan || !len)
527 return NULL;
528
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000529 chan->private = NULL;
530
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000531 sh_chan = to_sh_chan(chan);
532
533 sg_init_table(&sg, 1);
534 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
535 offset_in_page(dma_src));
536 sg_dma_address(&sg) = dma_src;
537 sg_dma_len(&sg) = len;
538
539 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
540 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700541}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000542
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000543static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
544 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
545 enum dma_data_direction direction, unsigned long flags)
546{
547 struct sh_dmae_slave *param;
548 struct sh_dmae_chan *sh_chan;
549
550 if (!chan)
551 return NULL;
552
553 sh_chan = to_sh_chan(chan);
554 param = chan->private;
555
556 /* Someone calling slave DMA on a public channel? */
557 if (!param || !sg_len) {
558 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
559 __func__, param, sg_len, param ? param->slave_id : -1);
560 return NULL;
561 }
562
563 /*
564 * if (param != NULL), this is a successfully requested slave channel,
565 * therefore param->config != NULL too.
566 */
567 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
568 direction, flags);
569}
570
571static void sh_dmae_terminate_all(struct dma_chan *chan)
572{
573 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
574
575 if (!chan)
576 return;
577
578 sh_dmae_chan_ld_cleanup(sh_chan, true);
579}
580
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700581static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
582{
583 struct sh_desc *desc, *_desc;
584 /* Is the "exposed" head of a chain acked? */
585 bool head_acked = false;
586 dma_cookie_t cookie = 0;
587 dma_async_tx_callback callback = NULL;
588 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000589
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700590 spin_lock_bh(&sh_chan->desc_lock);
591 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
592 struct dma_async_tx_descriptor *tx = &desc->async_tx;
593
594 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
595 BUG_ON(desc->mark != DESC_SUBMITTED &&
596 desc->mark != DESC_COMPLETED &&
597 desc->mark != DESC_WAITING);
598
599 /*
600 * queue is ordered, and we use this loop to (1) clean up all
601 * completed descriptors, and to (2) update descriptor flags of
602 * any chunks in a (partially) completed chain
603 */
604 if (!all && desc->mark == DESC_SUBMITTED &&
605 desc->cookie != cookie)
606 break;
607
608 if (tx->cookie > 0)
609 cookie = tx->cookie;
610
611 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000612 if (sh_chan->completed_cookie != desc->cookie - 1)
613 dev_dbg(sh_chan->dev,
614 "Completing cookie %d, expected %d\n",
615 desc->cookie,
616 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700617 sh_chan->completed_cookie = desc->cookie;
618 }
619
620 /* Call callback on the last chunk */
621 if (desc->mark == DESC_COMPLETED && tx->callback) {
622 desc->mark = DESC_WAITING;
623 callback = tx->callback;
624 param = tx->callback_param;
625 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
626 tx->cookie, tx, sh_chan->id);
627 BUG_ON(desc->chunks != 1);
628 break;
629 }
630
631 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
632 if (desc->mark == DESC_COMPLETED) {
633 BUG_ON(tx->cookie < 0);
634 desc->mark = DESC_WAITING;
635 }
636 head_acked = async_tx_test_ack(tx);
637 } else {
638 switch (desc->mark) {
639 case DESC_COMPLETED:
640 desc->mark = DESC_WAITING;
641 /* Fall through */
642 case DESC_WAITING:
643 if (head_acked)
644 async_tx_ack(&desc->async_tx);
645 }
646 }
647
648 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
649 tx, tx->cookie);
650
651 if (((desc->mark == DESC_COMPLETED ||
652 desc->mark == DESC_WAITING) &&
653 async_tx_test_ack(&desc->async_tx)) || all) {
654 /* Remove from ld_queue list */
655 desc->mark = DESC_IDLE;
656 list_move(&desc->node, &sh_chan->ld_free);
657 }
658 }
659 spin_unlock_bh(&sh_chan->desc_lock);
660
661 if (callback)
662 callback(param);
663
664 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000665}
666
667/*
668 * sh_chan_ld_cleanup - Clean up link descriptors
669 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700670 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000671 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700672static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000673{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700674 while (__ld_cleanup(sh_chan, all))
675 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000676}
677
678static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
679{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000680 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000681
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700682 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000683 /* DMA work check */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700684 if (dmae_is_busy(sh_chan)) {
685 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000686 return;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700687 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000688
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000689 /* Find the first not transferred desciptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000690 list_for_each_entry(desc, &sh_chan->ld_queue, node)
691 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700692 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000693 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700694 dmae_start(sh_chan);
695 break;
696 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000697
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700698 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000699}
700
701static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
702{
703 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
704 sh_chan_xfer_ld_queue(sh_chan);
705}
706
707static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
708 dma_cookie_t cookie,
709 dma_cookie_t *done,
710 dma_cookie_t *used)
711{
712 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
713 dma_cookie_t last_used;
714 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000715 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000716
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700717 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000718
719 last_used = chan->cookie;
720 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700721 BUG_ON(last_complete < 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000722
723 if (done)
724 *done = last_complete;
725
726 if (used)
727 *used = last_used;
728
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000729 spin_lock_bh(&sh_chan->desc_lock);
730
731 status = dma_async_is_complete(cookie, last_complete, last_used);
732
733 /*
734 * If we don't find cookie on the queue, it has been aborted and we have
735 * to report error
736 */
737 if (status != DMA_SUCCESS) {
738 struct sh_desc *desc;
739 status = DMA_ERROR;
740 list_for_each_entry(desc, &sh_chan->ld_queue, node)
741 if (desc->cookie == cookie) {
742 status = DMA_IN_PROGRESS;
743 break;
744 }
745 }
746
747 spin_unlock_bh(&sh_chan->desc_lock);
748
749 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000750}
751
752static irqreturn_t sh_dmae_interrupt(int irq, void *data)
753{
754 irqreturn_t ret = IRQ_NONE;
755 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
756 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
757
758 if (chcr & CHCR_TE) {
759 /* DMA stop */
760 dmae_halt(sh_chan);
761
762 ret = IRQ_HANDLED;
763 tasklet_schedule(&sh_chan->tasklet);
764 }
765
766 return ret;
767}
768
769#if defined(CONFIG_CPU_SH4)
770static irqreturn_t sh_dmae_err(int irq, void *data)
771{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000772 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000773 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000774
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000775 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000776 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000777
778 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000779 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000780 struct sh_dmae_chan *sh_chan = shdev->chan[i];
781 if (sh_chan) {
782 struct sh_desc *desc;
783 /* Stop the channel */
784 dmae_halt(sh_chan);
785 /* Complete all */
786 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
787 struct dma_async_tx_descriptor *tx = &desc->async_tx;
788 desc->mark = DESC_IDLE;
789 if (tx->callback)
790 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000791 }
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000792 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000793 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000794 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000795 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000796
797 return IRQ_HANDLED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000798}
799#endif
800
801static void dmae_do_tasklet(unsigned long data)
802{
803 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700804 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000805 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000806 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100807
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700808 spin_lock(&sh_chan->desc_lock);
809 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000810 if (desc->mark == DESC_SUBMITTED &&
811 ((desc->direction == DMA_FROM_DEVICE &&
812 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
813 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700814 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
815 desc->async_tx.cookie, &desc->async_tx,
816 desc->hw.dar);
817 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000818 break;
819 }
820 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700821 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000822
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000823 /* Next desc */
824 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700825 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000826}
827
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000828static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
829 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000830{
831 int err;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000832 struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
833 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000834 struct sh_dmae_chan *new_sh_chan;
835
836 /* alloc channel */
837 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
838 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100839 dev_err(shdev->common.dev,
840 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000841 return -ENOMEM;
842 }
843
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000844 /* copy struct dma_device */
845 new_sh_chan->common.device = &shdev->common;
846
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000847 new_sh_chan->dev = shdev->common.dev;
848 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000849 new_sh_chan->irq = irq;
850 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000851
852 /* Init DMA tasklet */
853 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
854 (unsigned long)new_sh_chan);
855
856 /* Init the channel */
857 dmae_init(new_sh_chan);
858
859 spin_lock_init(&new_sh_chan->desc_lock);
860
861 /* Init descripter manage list */
862 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
863 INIT_LIST_HEAD(&new_sh_chan->ld_free);
864
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000865 /* Add the channel to DMA device channel list */
866 list_add_tail(&new_sh_chan->common.device_node,
867 &shdev->common.channels);
868 shdev->common.chancnt++;
869
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000870 if (pdev->id >= 0)
871 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
872 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
873 else
874 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
875 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000876
877 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000878 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100879 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000880 if (err) {
881 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
882 "with return %d\n", id, err);
883 goto err_no_irq;
884 }
885
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000886 shdev->chan[id] = new_sh_chan;
887 return 0;
888
889err_no_irq:
890 /* remove from dmaengine device node */
891 list_del(&new_sh_chan->common.device_node);
892 kfree(new_sh_chan);
893 return err;
894}
895
896static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
897{
898 int i;
899
900 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
901 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000902 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000903
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000904 free_irq(sh_chan->irq, sh_chan);
905
906 list_del(&sh_chan->common.device_node);
907 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000908 shdev->chan[i] = NULL;
909 }
910 }
911 shdev->common.chancnt = 0;
912}
913
914static int __init sh_dmae_probe(struct platform_device *pdev)
915{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000916 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
917 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000918 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
919 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000920 int err, i, irq_cnt = 0, irqres = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000921 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000922 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000923
Dan Williams56adf7e2009-11-22 12:10:10 -0700924 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000925 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -0700926 return -ENODEV;
927
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000928 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
929 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
930 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
931 /*
932 * IRQ resources:
933 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
934 * the error IRQ, in which case it is the only IRQ in this resource:
935 * start == end. If it is the only IRQ resource, all channels also
936 * use the same IRQ.
937 * 2. DMA channel IRQ resources can be specified one per resource or in
938 * ranges (start != end)
939 * 3. iff all events (channels and, optionally, error) on this
940 * controller use the same IRQ, only one IRQ resource can be
941 * specified, otherwise there must be one IRQ per channel, even if
942 * some of them are equal
943 * 4. if all IRQs on this controller are equal or if some specific IRQs
944 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
945 * requested with the IRQF_SHARED flag
946 */
947 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
948 if (!chan || !errirq_res)
949 return -ENODEV;
950
951 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
952 dev_err(&pdev->dev, "DMAC register region already claimed\n");
953 return -EBUSY;
954 }
955
956 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
957 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
958 err = -EBUSY;
959 goto ermrdmars;
960 }
961
962 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000963 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
964 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000965 dev_err(&pdev->dev, "Not enough memory\n");
966 goto ealloc;
967 }
968
969 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
970 if (!shdev->chan_reg)
971 goto emapchan;
972 if (dmars) {
973 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
974 if (!shdev->dmars)
975 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000976 }
977
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000978 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000979 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000980
981 /* reset dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000982 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000983 if (err)
984 goto rst_err;
985
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000986 INIT_LIST_HEAD(&shdev->common.channels);
987
988 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000989 if (dmars)
990 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000991
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000992 shdev->common.device_alloc_chan_resources
993 = sh_dmae_alloc_chan_resources;
994 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
995 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
996 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
997 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000998
999 /* Compulsory for DMA_SLAVE fields */
1000 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1001 shdev->common.device_terminate_all = sh_dmae_terminate_all;
1002
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001003 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001004 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001005 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001006
1007#if defined(CONFIG_CPU_SH4)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001008 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1009
1010 if (!chanirq_res)
1011 chanirq_res = errirq_res;
1012 else
1013 irqres++;
1014
1015 if (chanirq_res == errirq_res ||
1016 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001017 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001018
1019 errirq = errirq_res->start;
1020
1021 err = request_irq(errirq, sh_dmae_err, irqflags,
1022 "DMAC Address Error", shdev);
1023 if (err) {
1024 dev_err(&pdev->dev,
1025 "DMA failed requesting irq #%d, error %d\n",
1026 errirq, err);
1027 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001028 }
1029
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001030#else
1031 chanirq_res = errirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001032#endif /* CONFIG_CPU_SH4 */
1033
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001034 if (chanirq_res->start == chanirq_res->end &&
1035 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1036 /* Special case - all multiplexed */
1037 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1038 chan_irq[irq_cnt] = chanirq_res->start;
1039 chan_flag[irq_cnt] = IRQF_SHARED;
1040 }
1041 } else {
1042 do {
1043 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1044 if ((errirq_res->flags & IORESOURCE_BITS) ==
1045 IORESOURCE_IRQ_SHAREABLE)
1046 chan_flag[irq_cnt] = IRQF_SHARED;
1047 else
1048 chan_flag[irq_cnt] = IRQF_DISABLED;
1049 dev_dbg(&pdev->dev,
1050 "Found IRQ %d for channel %d\n",
1051 i, irq_cnt);
1052 chan_irq[irq_cnt++] = i;
1053 }
1054 chanirq_res = platform_get_resource(pdev,
1055 IORESOURCE_IRQ, ++irqres);
1056 } while (irq_cnt < pdata->channel_num && chanirq_res);
1057 }
1058
1059 if (irq_cnt < pdata->channel_num)
1060 goto eirqres;
1061
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001062 /* Create DMA Channel */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001063 for (i = 0; i < pdata->channel_num; i++) {
1064 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001065 if (err)
1066 goto chan_probe_err;
1067 }
1068
1069 platform_set_drvdata(pdev, shdev);
1070 dma_async_device_register(&shdev->common);
1071
1072 return err;
1073
1074chan_probe_err:
1075 sh_dmae_chan_remove(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001076eirqres:
1077#if defined(CONFIG_CPU_SH4)
1078 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001079eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001080#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001081rst_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001082 if (dmars)
1083 iounmap(shdev->dmars);
1084emapdmars:
1085 iounmap(shdev->chan_reg);
1086emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001087 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001088ealloc:
1089 if (dmars)
1090 release_mem_region(dmars->start, resource_size(dmars));
1091ermrdmars:
1092 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001093
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001094 return err;
1095}
1096
1097static int __exit sh_dmae_remove(struct platform_device *pdev)
1098{
1099 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001100 struct resource *res;
1101 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001102
1103 dma_async_device_unregister(&shdev->common);
1104
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001105 if (errirq > 0)
1106 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001107
1108 /* channel data remove */
1109 sh_dmae_chan_remove(shdev);
1110
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001111 if (shdev->dmars)
1112 iounmap(shdev->dmars);
1113 iounmap(shdev->chan_reg);
1114
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001115 kfree(shdev);
1116
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001117 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1118 if (res)
1119 release_mem_region(res->start, resource_size(res));
1120 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1121 if (res)
1122 release_mem_region(res->start, resource_size(res));
1123
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001124 return 0;
1125}
1126
1127static void sh_dmae_shutdown(struct platform_device *pdev)
1128{
1129 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001130 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001131}
1132
1133static struct platform_driver sh_dmae_driver = {
1134 .remove = __exit_p(sh_dmae_remove),
1135 .shutdown = sh_dmae_shutdown,
1136 .driver = {
1137 .name = "sh-dma-engine",
1138 },
1139};
1140
1141static int __init sh_dmae_init(void)
1142{
1143 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1144}
1145module_init(sh_dmae_init);
1146
1147static void __exit sh_dmae_exit(void)
1148{
1149 platform_driver_unregister(&sh_dmae_driver);
1150}
1151module_exit(sh_dmae_exit);
1152
1153MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1154MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1155MODULE_LICENSE("GPL");