blob: ea6779f3e73fada8edb714d0dc8877d00f33c634 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/dmaengine.h>
24#include <linux/delay.h>
25#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000026#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000027#include <linux/pm_runtime.h>
28
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000029#include <asm/dmaengine.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000030
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000031#include "shdma.h"
32
33/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070034enum sh_dmae_desc_status {
35 DESC_IDLE,
36 DESC_PREPARED,
37 DESC_SUBMITTED,
38 DESC_COMPLETED, /* completed, have to call callback */
39 DESC_WAITING, /* callback called, waiting for ack / re-submit */
40};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000041
42#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000043/* Default MEMCPY transfer size = 2^2 = 4 bytes */
44#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000045
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000046/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
47static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
48
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070049static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
50
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000051static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
52{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000053 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000054}
55
56static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
57{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000058 return __raw_readl(sh_dc->base + reg / sizeof(u32));
59}
60
61static u16 dmaor_read(struct sh_dmae_device *shdev)
62{
63 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
64}
65
66static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
67{
68 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000069}
70
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000071/*
72 * Reset DMA controller
73 *
74 * SH7780 has two DMAOR register
75 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000076static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000077{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000078 unsigned short dmaor = dmaor_read(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000079
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000080 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000081}
82
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000083static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000084{
85 unsigned short dmaor;
86
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000087 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000088 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000089
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000090 dmaor_write(shdev, dmaor);
91 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +000092 pr_warning("dma-sh: Can't initialize DMAOR.\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000093 return -EINVAL;
94 }
95 return 0;
96}
97
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000098static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000099{
100 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000101
102 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
103 return true; /* working */
104
105 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000106}
107
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000108static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000109{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000110 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
111 struct sh_dmae_device, common);
112 struct sh_dmae_pdata *pdata = shdev->pdata;
113 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
114 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000115
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000116 if (cnt >= pdata->ts_shift_num)
117 cnt = 0;
118
119 return pdata->ts_shift[cnt];
120}
121
122static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
123{
124 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
125 struct sh_dmae_device, common);
126 struct sh_dmae_pdata *pdata = shdev->pdata;
127 int i;
128
129 for (i = 0; i < pdata->ts_shift_num; i++)
130 if (pdata->ts_shift[i] == l2size)
131 break;
132
133 if (i == pdata->ts_shift_num)
134 i = 0;
135
136 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
137 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000138}
139
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700140static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000141{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700142 sh_dmae_writel(sh_chan, hw->sar, SAR);
143 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000144 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000145}
146
147static void dmae_start(struct sh_dmae_chan *sh_chan)
148{
149 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
150
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100151 chcr |= CHCR_DE | CHCR_IE;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000152 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000153}
154
155static void dmae_halt(struct sh_dmae_chan *sh_chan)
156{
157 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
158
159 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
160 sh_dmae_writel(sh_chan, chcr, CHCR);
161}
162
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000163static void dmae_init(struct sh_dmae_chan *sh_chan)
164{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000165 /*
166 * Default configuration for dual address memory-memory transfer.
167 * 0x400 represents auto-request.
168 */
169 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
170 LOG2_DEFAULT_XFER_SIZE);
171 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000172 sh_dmae_writel(sh_chan, chcr, CHCR);
173}
174
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000175static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
176{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000177 /* When DMA was working, can not set data to CHCR */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000178 if (dmae_is_busy(sh_chan))
179 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000180
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000181 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000182 sh_dmae_writel(sh_chan, val, CHCR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000183
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000184 return 0;
185}
186
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000187static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
188{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
190 struct sh_dmae_device, common);
191 struct sh_dmae_pdata *pdata = shdev->pdata;
192 struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
194 int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000195
196 if (dmae_is_busy(sh_chan))
197 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000198
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000199 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
200 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000201
202 return 0;
203}
204
205static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
206{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700207 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000208 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700209 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000210 dma_cookie_t cookie;
211
212 spin_lock_bh(&sh_chan->desc_lock);
213
214 cookie = sh_chan->common.cookie;
215 cookie++;
216 if (cookie < 0)
217 cookie = 1;
218
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700219 sh_chan->common.cookie = cookie;
220 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000221
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700222 /* Mark all chunks of this descriptor as submitted, move to the queue */
223 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
224 /*
225 * All chunks are on the global ld_free, so, we have to find
226 * the end of the chain ourselves
227 */
228 if (chunk != desc && (chunk->mark == DESC_IDLE ||
229 chunk->async_tx.cookie > 0 ||
230 chunk->async_tx.cookie == -EBUSY ||
231 &chunk->node == &sh_chan->ld_free))
232 break;
233 chunk->mark = DESC_SUBMITTED;
234 /* Callback goes to the last chunk */
235 chunk->async_tx.callback = NULL;
236 chunk->cookie = cookie;
237 list_move_tail(&chunk->node, &sh_chan->ld_queue);
238 last = chunk;
239 }
240
241 last->async_tx.callback = callback;
242 last->async_tx.callback_param = tx->callback_param;
243
244 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
245 tx->cookie, &last->async_tx, sh_chan->id,
246 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000247
248 spin_unlock_bh(&sh_chan->desc_lock);
249
250 return cookie;
251}
252
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700253/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000254static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
255{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700256 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000257
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700258 list_for_each_entry(desc, &sh_chan->ld_free, node)
259 if (desc->mark != DESC_PREPARED) {
260 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000261 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700262 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000263 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000264
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700265 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000266}
267
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000268static struct sh_dmae_slave_config *sh_dmae_find_slave(
269 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
270{
271 struct dma_device *dma_dev = sh_chan->common.device;
272 struct sh_dmae_device *shdev = container_of(dma_dev,
273 struct sh_dmae_device, common);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000274 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000275 int i;
276
277 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
278 return NULL;
279
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000280 for (i = 0; i < pdata->slave_num; i++)
281 if (pdata->slave[i].slave_id == slave_id)
282 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000283
284 return NULL;
285}
286
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000287static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
288{
289 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
290 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000291 struct sh_dmae_slave *param = chan->private;
292
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000293 pm_runtime_get_sync(sh_chan->dev);
294
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000295 /*
296 * This relies on the guarantee from dmaengine that alloc_chan_resources
297 * never runs concurrently with itself or free_chan_resources.
298 */
299 if (param) {
300 struct sh_dmae_slave_config *cfg;
301
302 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
303 if (!cfg)
304 return -EINVAL;
305
306 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
307 return -EBUSY;
308
309 param->config = cfg;
310
311 dmae_set_dmars(sh_chan, cfg->mid_rid);
312 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000313 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
314 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000315 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000316
317 spin_lock_bh(&sh_chan->desc_lock);
318 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
319 spin_unlock_bh(&sh_chan->desc_lock);
320 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
321 if (!desc) {
322 spin_lock_bh(&sh_chan->desc_lock);
323 break;
324 }
325 dma_async_tx_descriptor_init(&desc->async_tx,
326 &sh_chan->common);
327 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700328 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000329
330 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700331 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000332 sh_chan->descs_allocated++;
333 }
334 spin_unlock_bh(&sh_chan->desc_lock);
335
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000336 if (!sh_chan->descs_allocated)
337 pm_runtime_put(sh_chan->dev);
338
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000339 return sh_chan->descs_allocated;
340}
341
342/*
343 * sh_dma_free_chan_resources - Free all resources of the channel.
344 */
345static void sh_dmae_free_chan_resources(struct dma_chan *chan)
346{
347 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
348 struct sh_desc *desc, *_desc;
349 LIST_HEAD(list);
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000350 int descs = sh_chan->descs_allocated;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000351
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000352 dmae_halt(sh_chan);
353
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700354 /* Prepared and not submitted descriptors can still be on the queue */
355 if (!list_empty(&sh_chan->ld_queue))
356 sh_dmae_chan_ld_cleanup(sh_chan, true);
357
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000358 if (chan->private) {
359 /* The caller is holding dma_list_mutex */
360 struct sh_dmae_slave *param = chan->private;
361 clear_bit(param->slave_id, sh_dmae_slave_used);
362 }
363
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000364 spin_lock_bh(&sh_chan->desc_lock);
365
366 list_splice_init(&sh_chan->ld_free, &list);
367 sh_chan->descs_allocated = 0;
368
369 spin_unlock_bh(&sh_chan->desc_lock);
370
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000371 if (descs > 0)
372 pm_runtime_put(sh_chan->dev);
373
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000374 list_for_each_entry_safe(desc, _desc, &list, node)
375 kfree(desc);
376}
377
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000378/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000379 * sh_dmae_add_desc - get, set up and return one transfer descriptor
380 * @sh_chan: DMA channel
381 * @flags: DMA transfer flags
382 * @dest: destination DMA address, incremented when direction equals
383 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
384 * @src: source DMA address, incremented when direction equals
385 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
386 * @len: DMA transfer length
387 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
388 * @direction: needed for slave DMA to decide which address to keep constant,
389 * equals DMA_BIDIRECTIONAL for MEMCPY
390 * Returns 0 or an error
391 * Locks: called with desc_lock held
392 */
393static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
394 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
395 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000396{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000397 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000398 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000399
400 if (!*len)
401 return NULL;
402
403 /* Allocate the link descriptor from the free list */
404 new = sh_dmae_get_desc(sh_chan);
405 if (!new) {
406 dev_err(sh_chan->dev, "No free link descriptor available\n");
407 return NULL;
408 }
409
410 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
411
412 new->hw.sar = *src;
413 new->hw.dar = *dest;
414 new->hw.tcr = copy_size;
415
416 if (!*first) {
417 /* First desc */
418 new->async_tx.cookie = -EBUSY;
419 *first = new;
420 } else {
421 /* Other desc - invisible to the user */
422 new->async_tx.cookie = -EINVAL;
423 }
424
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000425 dev_dbg(sh_chan->dev,
426 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000427 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000428 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000429
430 new->mark = DESC_PREPARED;
431 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000432 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000433
434 *len -= copy_size;
435 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
436 *src += copy_size;
437 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
438 *dest += copy_size;
439
440 return new;
441}
442
443/*
444 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
445 *
446 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
447 * converted to scatter-gather to guarantee consistent locking and a correct
448 * list manipulation. For slave DMA direction carries the usual meaning, and,
449 * logically, the SG list is RAM and the addr variable contains slave address,
450 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
451 * and the SG list contains only one element and points at the source buffer.
452 */
453static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
454 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
455 enum dma_data_direction direction, unsigned long flags)
456{
457 struct scatterlist *sg;
458 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700459 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000460 int chunks = 0;
461 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000462
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000463 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000464 return NULL;
465
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000466 for_each_sg(sgl, sg, sg_len, i)
467 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
468 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000469
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700470 /* Have to lock the whole loop to protect against concurrent release */
471 spin_lock_bh(&sh_chan->desc_lock);
472
473 /*
474 * Chaining:
475 * first descriptor is what user is dealing with in all API calls, its
476 * cookie is at first set to -EBUSY, at tx-submit to a positive
477 * number
478 * if more than one chunk is needed further chunks have cookie = -EINVAL
479 * the last chunk, if not equal to the first, has cookie = -ENOSPC
480 * all chunks are linked onto the tx_list head with their .node heads
481 * only during this function, then they are immediately spliced
482 * back onto the free list in form of a chain
483 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000484 for_each_sg(sgl, sg, sg_len, i) {
485 dma_addr_t sg_addr = sg_dma_address(sg);
486 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000487
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000488 if (!len)
489 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000490
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000491 do {
492 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
493 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000494
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000495 if (direction == DMA_FROM_DEVICE)
496 new = sh_dmae_add_desc(sh_chan, flags,
497 &sg_addr, addr, &len, &first,
498 direction);
499 else
500 new = sh_dmae_add_desc(sh_chan, flags,
501 addr, &sg_addr, &len, &first,
502 direction);
503 if (!new)
504 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700505
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000506 new->chunks = chunks--;
507 list_add_tail(&new->node, &tx_list);
508 } while (len);
509 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000510
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700511 if (new != first)
512 new->async_tx.cookie = -ENOSPC;
513
514 /* Put them back on the free list, so, they don't get lost */
515 list_splice_tail(&tx_list, &sh_chan->ld_free);
516
517 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000518
519 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000520
521err_get_desc:
522 list_for_each_entry(new, &tx_list, node)
523 new->mark = DESC_IDLE;
524 list_splice(&tx_list, &sh_chan->ld_free);
525
526 spin_unlock_bh(&sh_chan->desc_lock);
527
528 return NULL;
529}
530
531static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
532 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
533 size_t len, unsigned long flags)
534{
535 struct sh_dmae_chan *sh_chan;
536 struct scatterlist sg;
537
538 if (!chan || !len)
539 return NULL;
540
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000541 chan->private = NULL;
542
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000543 sh_chan = to_sh_chan(chan);
544
545 sg_init_table(&sg, 1);
546 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
547 offset_in_page(dma_src));
548 sg_dma_address(&sg) = dma_src;
549 sg_dma_len(&sg) = len;
550
551 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
552 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700553}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000554
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000555static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
556 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
557 enum dma_data_direction direction, unsigned long flags)
558{
559 struct sh_dmae_slave *param;
560 struct sh_dmae_chan *sh_chan;
561
562 if (!chan)
563 return NULL;
564
565 sh_chan = to_sh_chan(chan);
566 param = chan->private;
567
568 /* Someone calling slave DMA on a public channel? */
569 if (!param || !sg_len) {
570 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
571 __func__, param, sg_len, param ? param->slave_id : -1);
572 return NULL;
573 }
574
575 /*
576 * if (param != NULL), this is a successfully requested slave channel,
577 * therefore param->config != NULL too.
578 */
579 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
580 direction, flags);
581}
582
583static void sh_dmae_terminate_all(struct dma_chan *chan)
584{
585 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
586
587 if (!chan)
588 return;
589
590 sh_dmae_chan_ld_cleanup(sh_chan, true);
591}
592
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700593static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
594{
595 struct sh_desc *desc, *_desc;
596 /* Is the "exposed" head of a chain acked? */
597 bool head_acked = false;
598 dma_cookie_t cookie = 0;
599 dma_async_tx_callback callback = NULL;
600 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000601
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700602 spin_lock_bh(&sh_chan->desc_lock);
603 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
604 struct dma_async_tx_descriptor *tx = &desc->async_tx;
605
606 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
607 BUG_ON(desc->mark != DESC_SUBMITTED &&
608 desc->mark != DESC_COMPLETED &&
609 desc->mark != DESC_WAITING);
610
611 /*
612 * queue is ordered, and we use this loop to (1) clean up all
613 * completed descriptors, and to (2) update descriptor flags of
614 * any chunks in a (partially) completed chain
615 */
616 if (!all && desc->mark == DESC_SUBMITTED &&
617 desc->cookie != cookie)
618 break;
619
620 if (tx->cookie > 0)
621 cookie = tx->cookie;
622
623 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000624 if (sh_chan->completed_cookie != desc->cookie - 1)
625 dev_dbg(sh_chan->dev,
626 "Completing cookie %d, expected %d\n",
627 desc->cookie,
628 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700629 sh_chan->completed_cookie = desc->cookie;
630 }
631
632 /* Call callback on the last chunk */
633 if (desc->mark == DESC_COMPLETED && tx->callback) {
634 desc->mark = DESC_WAITING;
635 callback = tx->callback;
636 param = tx->callback_param;
637 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
638 tx->cookie, tx, sh_chan->id);
639 BUG_ON(desc->chunks != 1);
640 break;
641 }
642
643 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
644 if (desc->mark == DESC_COMPLETED) {
645 BUG_ON(tx->cookie < 0);
646 desc->mark = DESC_WAITING;
647 }
648 head_acked = async_tx_test_ack(tx);
649 } else {
650 switch (desc->mark) {
651 case DESC_COMPLETED:
652 desc->mark = DESC_WAITING;
653 /* Fall through */
654 case DESC_WAITING:
655 if (head_acked)
656 async_tx_ack(&desc->async_tx);
657 }
658 }
659
660 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
661 tx, tx->cookie);
662
663 if (((desc->mark == DESC_COMPLETED ||
664 desc->mark == DESC_WAITING) &&
665 async_tx_test_ack(&desc->async_tx)) || all) {
666 /* Remove from ld_queue list */
667 desc->mark = DESC_IDLE;
668 list_move(&desc->node, &sh_chan->ld_free);
669 }
670 }
671 spin_unlock_bh(&sh_chan->desc_lock);
672
673 if (callback)
674 callback(param);
675
676 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000677}
678
679/*
680 * sh_chan_ld_cleanup - Clean up link descriptors
681 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700682 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000683 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700684static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000685{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700686 while (__ld_cleanup(sh_chan, all))
687 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000688}
689
690static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
691{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000692 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000693
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700694 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000695 /* DMA work check */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700696 if (dmae_is_busy(sh_chan)) {
697 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000698 return;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700699 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000700
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000701 /* Find the first not transferred desciptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000702 list_for_each_entry(desc, &sh_chan->ld_queue, node)
703 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700704 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000705 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700706 dmae_start(sh_chan);
707 break;
708 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000709
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700710 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000711}
712
713static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
714{
715 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
716 sh_chan_xfer_ld_queue(sh_chan);
717}
718
719static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
720 dma_cookie_t cookie,
721 dma_cookie_t *done,
722 dma_cookie_t *used)
723{
724 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
725 dma_cookie_t last_used;
726 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000727 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000728
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700729 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000730
731 last_used = chan->cookie;
732 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700733 BUG_ON(last_complete < 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000734
735 if (done)
736 *done = last_complete;
737
738 if (used)
739 *used = last_used;
740
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000741 spin_lock_bh(&sh_chan->desc_lock);
742
743 status = dma_async_is_complete(cookie, last_complete, last_used);
744
745 /*
746 * If we don't find cookie on the queue, it has been aborted and we have
747 * to report error
748 */
749 if (status != DMA_SUCCESS) {
750 struct sh_desc *desc;
751 status = DMA_ERROR;
752 list_for_each_entry(desc, &sh_chan->ld_queue, node)
753 if (desc->cookie == cookie) {
754 status = DMA_IN_PROGRESS;
755 break;
756 }
757 }
758
759 spin_unlock_bh(&sh_chan->desc_lock);
760
761 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000762}
763
764static irqreturn_t sh_dmae_interrupt(int irq, void *data)
765{
766 irqreturn_t ret = IRQ_NONE;
767 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
768 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
769
770 if (chcr & CHCR_TE) {
771 /* DMA stop */
772 dmae_halt(sh_chan);
773
774 ret = IRQ_HANDLED;
775 tasklet_schedule(&sh_chan->tasklet);
776 }
777
778 return ret;
779}
780
781#if defined(CONFIG_CPU_SH4)
782static irqreturn_t sh_dmae_err(int irq, void *data)
783{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000784 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000785 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000786
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000787 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000788 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000789
790 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000791 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000792 struct sh_dmae_chan *sh_chan = shdev->chan[i];
793 if (sh_chan) {
794 struct sh_desc *desc;
795 /* Stop the channel */
796 dmae_halt(sh_chan);
797 /* Complete all */
798 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
799 struct dma_async_tx_descriptor *tx = &desc->async_tx;
800 desc->mark = DESC_IDLE;
801 if (tx->callback)
802 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000803 }
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000804 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000805 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000806 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000807 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000808
809 return IRQ_HANDLED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000810}
811#endif
812
813static void dmae_do_tasklet(unsigned long data)
814{
815 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700816 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000817 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000818 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100819
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700820 spin_lock(&sh_chan->desc_lock);
821 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000822 if (desc->mark == DESC_SUBMITTED &&
823 ((desc->direction == DMA_FROM_DEVICE &&
824 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
825 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700826 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
827 desc->async_tx.cookie, &desc->async_tx,
828 desc->hw.dar);
829 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000830 break;
831 }
832 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700833 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000834
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000835 /* Next desc */
836 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700837 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000838}
839
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000840static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
841 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000842{
843 int err;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000844 struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
845 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000846 struct sh_dmae_chan *new_sh_chan;
847
848 /* alloc channel */
849 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
850 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100851 dev_err(shdev->common.dev,
852 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000853 return -ENOMEM;
854 }
855
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000856 /* copy struct dma_device */
857 new_sh_chan->common.device = &shdev->common;
858
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000859 new_sh_chan->dev = shdev->common.dev;
860 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000861 new_sh_chan->irq = irq;
862 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000863
864 /* Init DMA tasklet */
865 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
866 (unsigned long)new_sh_chan);
867
868 /* Init the channel */
869 dmae_init(new_sh_chan);
870
871 spin_lock_init(&new_sh_chan->desc_lock);
872
873 /* Init descripter manage list */
874 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
875 INIT_LIST_HEAD(&new_sh_chan->ld_free);
876
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000877 /* Add the channel to DMA device channel list */
878 list_add_tail(&new_sh_chan->common.device_node,
879 &shdev->common.channels);
880 shdev->common.chancnt++;
881
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000882 if (pdev->id >= 0)
883 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
884 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
885 else
886 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
887 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000888
889 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000890 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100891 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000892 if (err) {
893 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
894 "with return %d\n", id, err);
895 goto err_no_irq;
896 }
897
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000898 shdev->chan[id] = new_sh_chan;
899 return 0;
900
901err_no_irq:
902 /* remove from dmaengine device node */
903 list_del(&new_sh_chan->common.device_node);
904 kfree(new_sh_chan);
905 return err;
906}
907
908static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
909{
910 int i;
911
912 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
913 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000914 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000915
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000916 free_irq(sh_chan->irq, sh_chan);
917
918 list_del(&sh_chan->common.device_node);
919 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000920 shdev->chan[i] = NULL;
921 }
922 }
923 shdev->common.chancnt = 0;
924}
925
926static int __init sh_dmae_probe(struct platform_device *pdev)
927{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000928 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
929 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000930 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
931 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000932 int err, i, irq_cnt = 0, irqres = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000933 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000934 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000935
Dan Williams56adf7e2009-11-22 12:10:10 -0700936 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000937 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -0700938 return -ENODEV;
939
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000940 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
941 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
942 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
943 /*
944 * IRQ resources:
945 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
946 * the error IRQ, in which case it is the only IRQ in this resource:
947 * start == end. If it is the only IRQ resource, all channels also
948 * use the same IRQ.
949 * 2. DMA channel IRQ resources can be specified one per resource or in
950 * ranges (start != end)
951 * 3. iff all events (channels and, optionally, error) on this
952 * controller use the same IRQ, only one IRQ resource can be
953 * specified, otherwise there must be one IRQ per channel, even if
954 * some of them are equal
955 * 4. if all IRQs on this controller are equal or if some specific IRQs
956 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
957 * requested with the IRQF_SHARED flag
958 */
959 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
960 if (!chan || !errirq_res)
961 return -ENODEV;
962
963 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
964 dev_err(&pdev->dev, "DMAC register region already claimed\n");
965 return -EBUSY;
966 }
967
968 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
969 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
970 err = -EBUSY;
971 goto ermrdmars;
972 }
973
974 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000975 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
976 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000977 dev_err(&pdev->dev, "Not enough memory\n");
978 goto ealloc;
979 }
980
981 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
982 if (!shdev->chan_reg)
983 goto emapchan;
984 if (dmars) {
985 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
986 if (!shdev->dmars)
987 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000988 }
989
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000990 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000991 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000992
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000993 pm_runtime_enable(&pdev->dev);
994 pm_runtime_get_sync(&pdev->dev);
995
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000996 /* reset dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000997 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000998 if (err)
999 goto rst_err;
1000
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001001 INIT_LIST_HEAD(&shdev->common.channels);
1002
1003 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001004 if (dmars)
1005 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001006
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001007 shdev->common.device_alloc_chan_resources
1008 = sh_dmae_alloc_chan_resources;
1009 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1010 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1011 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
1012 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001013
1014 /* Compulsory for DMA_SLAVE fields */
1015 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1016 shdev->common.device_terminate_all = sh_dmae_terminate_all;
1017
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001018 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001019 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001020 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001021
1022#if defined(CONFIG_CPU_SH4)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001023 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1024
1025 if (!chanirq_res)
1026 chanirq_res = errirq_res;
1027 else
1028 irqres++;
1029
1030 if (chanirq_res == errirq_res ||
1031 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001032 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001033
1034 errirq = errirq_res->start;
1035
1036 err = request_irq(errirq, sh_dmae_err, irqflags,
1037 "DMAC Address Error", shdev);
1038 if (err) {
1039 dev_err(&pdev->dev,
1040 "DMA failed requesting irq #%d, error %d\n",
1041 errirq, err);
1042 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001043 }
1044
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001045#else
1046 chanirq_res = errirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001047#endif /* CONFIG_CPU_SH4 */
1048
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001049 if (chanirq_res->start == chanirq_res->end &&
1050 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1051 /* Special case - all multiplexed */
1052 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1053 chan_irq[irq_cnt] = chanirq_res->start;
1054 chan_flag[irq_cnt] = IRQF_SHARED;
1055 }
1056 } else {
1057 do {
1058 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1059 if ((errirq_res->flags & IORESOURCE_BITS) ==
1060 IORESOURCE_IRQ_SHAREABLE)
1061 chan_flag[irq_cnt] = IRQF_SHARED;
1062 else
1063 chan_flag[irq_cnt] = IRQF_DISABLED;
1064 dev_dbg(&pdev->dev,
1065 "Found IRQ %d for channel %d\n",
1066 i, irq_cnt);
1067 chan_irq[irq_cnt++] = i;
1068 }
1069 chanirq_res = platform_get_resource(pdev,
1070 IORESOURCE_IRQ, ++irqres);
1071 } while (irq_cnt < pdata->channel_num && chanirq_res);
1072 }
1073
1074 if (irq_cnt < pdata->channel_num)
1075 goto eirqres;
1076
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001077 /* Create DMA Channel */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001078 for (i = 0; i < pdata->channel_num; i++) {
1079 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001080 if (err)
1081 goto chan_probe_err;
1082 }
1083
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001084 pm_runtime_put(&pdev->dev);
1085
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001086 platform_set_drvdata(pdev, shdev);
1087 dma_async_device_register(&shdev->common);
1088
1089 return err;
1090
1091chan_probe_err:
1092 sh_dmae_chan_remove(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001093eirqres:
1094#if defined(CONFIG_CPU_SH4)
1095 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001096eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001097#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001098rst_err:
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001099 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001100 if (dmars)
1101 iounmap(shdev->dmars);
1102emapdmars:
1103 iounmap(shdev->chan_reg);
1104emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001105 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001106ealloc:
1107 if (dmars)
1108 release_mem_region(dmars->start, resource_size(dmars));
1109ermrdmars:
1110 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001111
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001112 return err;
1113}
1114
1115static int __exit sh_dmae_remove(struct platform_device *pdev)
1116{
1117 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001118 struct resource *res;
1119 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001120
1121 dma_async_device_unregister(&shdev->common);
1122
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001123 if (errirq > 0)
1124 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001125
1126 /* channel data remove */
1127 sh_dmae_chan_remove(shdev);
1128
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001129 pm_runtime_disable(&pdev->dev);
1130
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001131 if (shdev->dmars)
1132 iounmap(shdev->dmars);
1133 iounmap(shdev->chan_reg);
1134
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001135 kfree(shdev);
1136
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001137 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1138 if (res)
1139 release_mem_region(res->start, resource_size(res));
1140 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1141 if (res)
1142 release_mem_region(res->start, resource_size(res));
1143
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001144 return 0;
1145}
1146
1147static void sh_dmae_shutdown(struct platform_device *pdev)
1148{
1149 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001150 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001151}
1152
1153static struct platform_driver sh_dmae_driver = {
1154 .remove = __exit_p(sh_dmae_remove),
1155 .shutdown = sh_dmae_shutdown,
1156 .driver = {
1157 .name = "sh-dma-engine",
1158 },
1159};
1160
1161static int __init sh_dmae_init(void)
1162{
1163 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1164}
1165module_init(sh_dmae_init);
1166
1167static void __exit sh_dmae_exit(void)
1168{
1169 platform_driver_unregister(&sh_dmae_driver);
1170}
1171module_exit(sh_dmae_exit);
1172
1173MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1174MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1175MODULE_LICENSE("GPL");