blob: eb6b54dbb8064a9a5d2e71eb3261132195ff6f8d [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000027#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000028#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000029#include <linux/sh_dma.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000030
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000031#include "shdma.h"
32
33/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070034enum sh_dmae_desc_status {
35 DESC_IDLE,
36 DESC_PREPARED,
37 DESC_SUBMITTED,
38 DESC_COMPLETED, /* completed, have to call callback */
39 DESC_WAITING, /* callback called, waiting for ack / re-submit */
40};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000041
42#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000043/* Default MEMCPY transfer size = 2^2 = 4 bytes */
44#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000045
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000046/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000047static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000048
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070049static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
50
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000051static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
52{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000053 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000054}
55
56static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
57{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000058 return __raw_readl(sh_dc->base + reg / sizeof(u32));
59}
60
61static u16 dmaor_read(struct sh_dmae_device *shdev)
62{
63 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
64}
65
66static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
67{
68 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000069}
70
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000071/*
72 * Reset DMA controller
73 *
74 * SH7780 has two DMAOR register
75 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000076static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000077{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000078 unsigned short dmaor = dmaor_read(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000079
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000080 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000081}
82
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000083static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000084{
85 unsigned short dmaor;
86
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000087 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000088 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000089
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000090 dmaor_write(shdev, dmaor);
91 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +000092 pr_warning("dma-sh: Can't initialize DMAOR.\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000093 return -EINVAL;
94 }
95 return 0;
96}
97
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000098static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000099{
100 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000101
102 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
103 return true; /* working */
104
105 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000106}
107
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000108static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000109{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000110 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
111 struct sh_dmae_device, common);
112 struct sh_dmae_pdata *pdata = shdev->pdata;
113 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
114 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000115
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000116 if (cnt >= pdata->ts_shift_num)
117 cnt = 0;
118
119 return pdata->ts_shift[cnt];
120}
121
122static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
123{
124 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
125 struct sh_dmae_device, common);
126 struct sh_dmae_pdata *pdata = shdev->pdata;
127 int i;
128
129 for (i = 0; i < pdata->ts_shift_num; i++)
130 if (pdata->ts_shift[i] == l2size)
131 break;
132
133 if (i == pdata->ts_shift_num)
134 i = 0;
135
136 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
137 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000138}
139
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700140static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000141{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700142 sh_dmae_writel(sh_chan, hw->sar, SAR);
143 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000144 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000145}
146
147static void dmae_start(struct sh_dmae_chan *sh_chan)
148{
149 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
150
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100151 chcr |= CHCR_DE | CHCR_IE;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000152 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000153}
154
155static void dmae_halt(struct sh_dmae_chan *sh_chan)
156{
157 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
158
159 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
160 sh_dmae_writel(sh_chan, chcr, CHCR);
161}
162
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000163static void dmae_init(struct sh_dmae_chan *sh_chan)
164{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000165 /*
166 * Default configuration for dual address memory-memory transfer.
167 * 0x400 represents auto-request.
168 */
169 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
170 LOG2_DEFAULT_XFER_SIZE);
171 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000172 sh_dmae_writel(sh_chan, chcr, CHCR);
173}
174
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000175static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
176{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000177 /* When DMA was working, can not set data to CHCR */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000178 if (dmae_is_busy(sh_chan))
179 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000180
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000181 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000182 sh_dmae_writel(sh_chan, val, CHCR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000183
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000184 return 0;
185}
186
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000187static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
188{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
190 struct sh_dmae_device, common);
191 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000192 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
194 int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000195
196 if (dmae_is_busy(sh_chan))
197 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000198
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000199 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
200 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000201
202 return 0;
203}
204
205static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
206{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700207 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000208 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700209 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000210 dma_cookie_t cookie;
211
212 spin_lock_bh(&sh_chan->desc_lock);
213
214 cookie = sh_chan->common.cookie;
215 cookie++;
216 if (cookie < 0)
217 cookie = 1;
218
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700219 sh_chan->common.cookie = cookie;
220 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000221
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700222 /* Mark all chunks of this descriptor as submitted, move to the queue */
223 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
224 /*
225 * All chunks are on the global ld_free, so, we have to find
226 * the end of the chain ourselves
227 */
228 if (chunk != desc && (chunk->mark == DESC_IDLE ||
229 chunk->async_tx.cookie > 0 ||
230 chunk->async_tx.cookie == -EBUSY ||
231 &chunk->node == &sh_chan->ld_free))
232 break;
233 chunk->mark = DESC_SUBMITTED;
234 /* Callback goes to the last chunk */
235 chunk->async_tx.callback = NULL;
236 chunk->cookie = cookie;
237 list_move_tail(&chunk->node, &sh_chan->ld_queue);
238 last = chunk;
239 }
240
241 last->async_tx.callback = callback;
242 last->async_tx.callback_param = tx->callback_param;
243
244 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
245 tx->cookie, &last->async_tx, sh_chan->id,
246 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000247
248 spin_unlock_bh(&sh_chan->desc_lock);
249
250 return cookie;
251}
252
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700253/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000254static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
255{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700256 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000257
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700258 list_for_each_entry(desc, &sh_chan->ld_free, node)
259 if (desc->mark != DESC_PREPARED) {
260 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000261 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700262 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000263 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000264
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700265 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000266}
267
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000268static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000269 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000270{
271 struct dma_device *dma_dev = sh_chan->common.device;
272 struct sh_dmae_device *shdev = container_of(dma_dev,
273 struct sh_dmae_device, common);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000274 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000275 int i;
276
Magnus Damm02ca5082010-03-19 04:46:47 +0000277 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000278 return NULL;
279
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000280 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000281 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000282 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000283
284 return NULL;
285}
286
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000287static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
288{
289 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
290 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000291 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000292 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000293
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000294 pm_runtime_get_sync(sh_chan->dev);
295
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000296 /*
297 * This relies on the guarantee from dmaengine that alloc_chan_resources
298 * never runs concurrently with itself or free_chan_resources.
299 */
300 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000301 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000302
Magnus Damm4bab9d42010-03-19 04:46:38 +0000303 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000304 if (!cfg) {
305 ret = -EINVAL;
306 goto efindslave;
307 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000308
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000309 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
310 ret = -EBUSY;
311 goto etestused;
312 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000313
314 param->config = cfg;
315
316 dmae_set_dmars(sh_chan, cfg->mid_rid);
317 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000318 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
319 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000320 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000321
322 spin_lock_bh(&sh_chan->desc_lock);
323 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
324 spin_unlock_bh(&sh_chan->desc_lock);
325 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
326 if (!desc) {
327 spin_lock_bh(&sh_chan->desc_lock);
328 break;
329 }
330 dma_async_tx_descriptor_init(&desc->async_tx,
331 &sh_chan->common);
332 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700333 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000334
335 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700336 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000337 sh_chan->descs_allocated++;
338 }
339 spin_unlock_bh(&sh_chan->desc_lock);
340
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000341 if (!sh_chan->descs_allocated) {
342 ret = -ENOMEM;
343 goto edescalloc;
344 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000345
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000346 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000347
348edescalloc:
349 if (param)
350 clear_bit(param->slave_id, sh_dmae_slave_used);
351etestused:
352efindslave:
353 pm_runtime_put(sh_chan->dev);
354 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000355}
356
357/*
358 * sh_dma_free_chan_resources - Free all resources of the channel.
359 */
360static void sh_dmae_free_chan_resources(struct dma_chan *chan)
361{
362 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
363 struct sh_desc *desc, *_desc;
364 LIST_HEAD(list);
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000365 int descs = sh_chan->descs_allocated;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000366
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000367 dmae_halt(sh_chan);
368
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700369 /* Prepared and not submitted descriptors can still be on the queue */
370 if (!list_empty(&sh_chan->ld_queue))
371 sh_dmae_chan_ld_cleanup(sh_chan, true);
372
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000373 if (chan->private) {
374 /* The caller is holding dma_list_mutex */
375 struct sh_dmae_slave *param = chan->private;
376 clear_bit(param->slave_id, sh_dmae_slave_used);
377 }
378
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000379 spin_lock_bh(&sh_chan->desc_lock);
380
381 list_splice_init(&sh_chan->ld_free, &list);
382 sh_chan->descs_allocated = 0;
383
384 spin_unlock_bh(&sh_chan->desc_lock);
385
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000386 if (descs > 0)
387 pm_runtime_put(sh_chan->dev);
388
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000389 list_for_each_entry_safe(desc, _desc, &list, node)
390 kfree(desc);
391}
392
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000393/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000394 * sh_dmae_add_desc - get, set up and return one transfer descriptor
395 * @sh_chan: DMA channel
396 * @flags: DMA transfer flags
397 * @dest: destination DMA address, incremented when direction equals
398 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
399 * @src: source DMA address, incremented when direction equals
400 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
401 * @len: DMA transfer length
402 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
403 * @direction: needed for slave DMA to decide which address to keep constant,
404 * equals DMA_BIDIRECTIONAL for MEMCPY
405 * Returns 0 or an error
406 * Locks: called with desc_lock held
407 */
408static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
409 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
410 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000411{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000412 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000413 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000414
415 if (!*len)
416 return NULL;
417
418 /* Allocate the link descriptor from the free list */
419 new = sh_dmae_get_desc(sh_chan);
420 if (!new) {
421 dev_err(sh_chan->dev, "No free link descriptor available\n");
422 return NULL;
423 }
424
425 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
426
427 new->hw.sar = *src;
428 new->hw.dar = *dest;
429 new->hw.tcr = copy_size;
430
431 if (!*first) {
432 /* First desc */
433 new->async_tx.cookie = -EBUSY;
434 *first = new;
435 } else {
436 /* Other desc - invisible to the user */
437 new->async_tx.cookie = -EINVAL;
438 }
439
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000440 dev_dbg(sh_chan->dev,
441 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000442 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000443 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000444
445 new->mark = DESC_PREPARED;
446 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000447 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000448
449 *len -= copy_size;
450 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
451 *src += copy_size;
452 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
453 *dest += copy_size;
454
455 return new;
456}
457
458/*
459 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
460 *
461 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
462 * converted to scatter-gather to guarantee consistent locking and a correct
463 * list manipulation. For slave DMA direction carries the usual meaning, and,
464 * logically, the SG list is RAM and the addr variable contains slave address,
465 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
466 * and the SG list contains only one element and points at the source buffer.
467 */
468static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
469 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
470 enum dma_data_direction direction, unsigned long flags)
471{
472 struct scatterlist *sg;
473 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700474 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000475 int chunks = 0;
476 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000477
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000478 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000479 return NULL;
480
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000481 for_each_sg(sgl, sg, sg_len, i)
482 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
483 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000484
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700485 /* Have to lock the whole loop to protect against concurrent release */
486 spin_lock_bh(&sh_chan->desc_lock);
487
488 /*
489 * Chaining:
490 * first descriptor is what user is dealing with in all API calls, its
491 * cookie is at first set to -EBUSY, at tx-submit to a positive
492 * number
493 * if more than one chunk is needed further chunks have cookie = -EINVAL
494 * the last chunk, if not equal to the first, has cookie = -ENOSPC
495 * all chunks are linked onto the tx_list head with their .node heads
496 * only during this function, then they are immediately spliced
497 * back onto the free list in form of a chain
498 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000499 for_each_sg(sgl, sg, sg_len, i) {
500 dma_addr_t sg_addr = sg_dma_address(sg);
501 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000502
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000503 if (!len)
504 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000505
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000506 do {
507 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
508 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000509
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000510 if (direction == DMA_FROM_DEVICE)
511 new = sh_dmae_add_desc(sh_chan, flags,
512 &sg_addr, addr, &len, &first,
513 direction);
514 else
515 new = sh_dmae_add_desc(sh_chan, flags,
516 addr, &sg_addr, &len, &first,
517 direction);
518 if (!new)
519 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700520
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000521 new->chunks = chunks--;
522 list_add_tail(&new->node, &tx_list);
523 } while (len);
524 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000525
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700526 if (new != first)
527 new->async_tx.cookie = -ENOSPC;
528
529 /* Put them back on the free list, so, they don't get lost */
530 list_splice_tail(&tx_list, &sh_chan->ld_free);
531
532 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000533
534 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000535
536err_get_desc:
537 list_for_each_entry(new, &tx_list, node)
538 new->mark = DESC_IDLE;
539 list_splice(&tx_list, &sh_chan->ld_free);
540
541 spin_unlock_bh(&sh_chan->desc_lock);
542
543 return NULL;
544}
545
546static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
547 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
548 size_t len, unsigned long flags)
549{
550 struct sh_dmae_chan *sh_chan;
551 struct scatterlist sg;
552
553 if (!chan || !len)
554 return NULL;
555
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000556 chan->private = NULL;
557
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000558 sh_chan = to_sh_chan(chan);
559
560 sg_init_table(&sg, 1);
561 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
562 offset_in_page(dma_src));
563 sg_dma_address(&sg) = dma_src;
564 sg_dma_len(&sg) = len;
565
566 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
567 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700568}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000569
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000570static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
571 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
572 enum dma_data_direction direction, unsigned long flags)
573{
574 struct sh_dmae_slave *param;
575 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000576 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000577
578 if (!chan)
579 return NULL;
580
581 sh_chan = to_sh_chan(chan);
582 param = chan->private;
583
584 /* Someone calling slave DMA on a public channel? */
585 if (!param || !sg_len) {
586 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
587 __func__, param, sg_len, param ? param->slave_id : -1);
588 return NULL;
589 }
590
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200591 slave_addr = param->config->addr;
592
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000593 /*
594 * if (param != NULL), this is a successfully requested slave channel,
595 * therefore param->config != NULL too.
596 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000597 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000598 direction, flags);
599}
600
Linus Walleij05827632010-05-17 16:30:42 -0700601static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
602 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000603{
604 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
605
Linus Walleijc3635c72010-03-26 16:44:01 -0700606 /* Only supports DMA_TERMINATE_ALL */
607 if (cmd != DMA_TERMINATE_ALL)
608 return -ENXIO;
609
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000610 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700611 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000612
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000613 dmae_halt(sh_chan);
614
615 spin_lock_bh(&sh_chan->desc_lock);
616 if (!list_empty(&sh_chan->ld_queue)) {
617 /* Record partial transfer */
618 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
619 struct sh_desc, node);
620 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
621 sh_chan->xmit_shift;
622
623 }
624 spin_unlock_bh(&sh_chan->desc_lock);
625
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000626 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700627
628 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000629}
630
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700631static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
632{
633 struct sh_desc *desc, *_desc;
634 /* Is the "exposed" head of a chain acked? */
635 bool head_acked = false;
636 dma_cookie_t cookie = 0;
637 dma_async_tx_callback callback = NULL;
638 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000639
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700640 spin_lock_bh(&sh_chan->desc_lock);
641 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
642 struct dma_async_tx_descriptor *tx = &desc->async_tx;
643
644 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
645 BUG_ON(desc->mark != DESC_SUBMITTED &&
646 desc->mark != DESC_COMPLETED &&
647 desc->mark != DESC_WAITING);
648
649 /*
650 * queue is ordered, and we use this loop to (1) clean up all
651 * completed descriptors, and to (2) update descriptor flags of
652 * any chunks in a (partially) completed chain
653 */
654 if (!all && desc->mark == DESC_SUBMITTED &&
655 desc->cookie != cookie)
656 break;
657
658 if (tx->cookie > 0)
659 cookie = tx->cookie;
660
661 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000662 if (sh_chan->completed_cookie != desc->cookie - 1)
663 dev_dbg(sh_chan->dev,
664 "Completing cookie %d, expected %d\n",
665 desc->cookie,
666 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700667 sh_chan->completed_cookie = desc->cookie;
668 }
669
670 /* Call callback on the last chunk */
671 if (desc->mark == DESC_COMPLETED && tx->callback) {
672 desc->mark = DESC_WAITING;
673 callback = tx->callback;
674 param = tx->callback_param;
675 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
676 tx->cookie, tx, sh_chan->id);
677 BUG_ON(desc->chunks != 1);
678 break;
679 }
680
681 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
682 if (desc->mark == DESC_COMPLETED) {
683 BUG_ON(tx->cookie < 0);
684 desc->mark = DESC_WAITING;
685 }
686 head_acked = async_tx_test_ack(tx);
687 } else {
688 switch (desc->mark) {
689 case DESC_COMPLETED:
690 desc->mark = DESC_WAITING;
691 /* Fall through */
692 case DESC_WAITING:
693 if (head_acked)
694 async_tx_ack(&desc->async_tx);
695 }
696 }
697
698 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
699 tx, tx->cookie);
700
701 if (((desc->mark == DESC_COMPLETED ||
702 desc->mark == DESC_WAITING) &&
703 async_tx_test_ack(&desc->async_tx)) || all) {
704 /* Remove from ld_queue list */
705 desc->mark = DESC_IDLE;
706 list_move(&desc->node, &sh_chan->ld_free);
707 }
708 }
709 spin_unlock_bh(&sh_chan->desc_lock);
710
711 if (callback)
712 callback(param);
713
714 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000715}
716
717/*
718 * sh_chan_ld_cleanup - Clean up link descriptors
719 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700720 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000721 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700722static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000723{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700724 while (__ld_cleanup(sh_chan, all))
725 ;
Guennadi Liakhovetski9255f1d2010-05-21 15:30:12 +0000726
727 if (all)
728 /* Terminating - forgive uncompleted cookies */
729 sh_chan->completed_cookie = sh_chan->common.cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000730}
731
732static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
733{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000734 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000735
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700736 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000737 /* DMA work check */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700738 if (dmae_is_busy(sh_chan)) {
739 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000740 return;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700741 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000742
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000743 /* Find the first not transferred desciptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000744 list_for_each_entry(desc, &sh_chan->ld_queue, node)
745 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000746 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
747 desc->async_tx.cookie, sh_chan->id,
748 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700749 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000750 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700751 dmae_start(sh_chan);
752 break;
753 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000754
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700755 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000756}
757
758static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
759{
760 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
761 sh_chan_xfer_ld_queue(sh_chan);
762}
763
Linus Walleij07934482010-03-26 16:50:49 -0700764static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000765 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700766 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000767{
768 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
769 dma_cookie_t last_used;
770 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000771 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000772
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700773 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000774
775 last_used = chan->cookie;
776 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700777 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700778 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000779
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000780 spin_lock_bh(&sh_chan->desc_lock);
781
782 status = dma_async_is_complete(cookie, last_complete, last_used);
783
784 /*
785 * If we don't find cookie on the queue, it has been aborted and we have
786 * to report error
787 */
788 if (status != DMA_SUCCESS) {
789 struct sh_desc *desc;
790 status = DMA_ERROR;
791 list_for_each_entry(desc, &sh_chan->ld_queue, node)
792 if (desc->cookie == cookie) {
793 status = DMA_IN_PROGRESS;
794 break;
795 }
796 }
797
798 spin_unlock_bh(&sh_chan->desc_lock);
799
800 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000801}
802
803static irqreturn_t sh_dmae_interrupt(int irq, void *data)
804{
805 irqreturn_t ret = IRQ_NONE;
806 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
807 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
808
809 if (chcr & CHCR_TE) {
810 /* DMA stop */
811 dmae_halt(sh_chan);
812
813 ret = IRQ_HANDLED;
814 tasklet_schedule(&sh_chan->tasklet);
815 }
816
817 return ret;
818}
819
Magnus Damm927a7c92010-03-19 04:47:19 +0000820#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000821static irqreturn_t sh_dmae_err(int irq, void *data)
822{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000823 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000824 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000825
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000826 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000827 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000828
829 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000830 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000831 struct sh_dmae_chan *sh_chan = shdev->chan[i];
832 if (sh_chan) {
833 struct sh_desc *desc;
834 /* Stop the channel */
835 dmae_halt(sh_chan);
836 /* Complete all */
837 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
838 struct dma_async_tx_descriptor *tx = &desc->async_tx;
839 desc->mark = DESC_IDLE;
840 if (tx->callback)
841 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000842 }
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000843 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000844 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000845 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000846 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000847
848 return IRQ_HANDLED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000849}
850#endif
851
852static void dmae_do_tasklet(unsigned long data)
853{
854 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700855 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000856 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000857 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100858
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700859 spin_lock(&sh_chan->desc_lock);
860 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000861 if (desc->mark == DESC_SUBMITTED &&
862 ((desc->direction == DMA_FROM_DEVICE &&
863 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
864 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700865 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
866 desc->async_tx.cookie, &desc->async_tx,
867 desc->hw.dar);
868 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000869 break;
870 }
871 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700872 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000873
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000874 /* Next desc */
875 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700876 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000877}
878
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000879static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
880 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000881{
882 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000883 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000884 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000885 struct sh_dmae_chan *new_sh_chan;
886
887 /* alloc channel */
888 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
889 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100890 dev_err(shdev->common.dev,
891 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000892 return -ENOMEM;
893 }
894
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000895 /* copy struct dma_device */
896 new_sh_chan->common.device = &shdev->common;
897
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000898 new_sh_chan->dev = shdev->common.dev;
899 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000900 new_sh_chan->irq = irq;
901 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000902
903 /* Init DMA tasklet */
904 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
905 (unsigned long)new_sh_chan);
906
907 /* Init the channel */
908 dmae_init(new_sh_chan);
909
910 spin_lock_init(&new_sh_chan->desc_lock);
911
912 /* Init descripter manage list */
913 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
914 INIT_LIST_HEAD(&new_sh_chan->ld_free);
915
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000916 /* Add the channel to DMA device channel list */
917 list_add_tail(&new_sh_chan->common.device_node,
918 &shdev->common.channels);
919 shdev->common.chancnt++;
920
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000921 if (pdev->id >= 0)
922 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
923 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
924 else
925 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
926 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000927
928 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000929 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100930 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000931 if (err) {
932 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
933 "with return %d\n", id, err);
934 goto err_no_irq;
935 }
936
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000937 shdev->chan[id] = new_sh_chan;
938 return 0;
939
940err_no_irq:
941 /* remove from dmaengine device node */
942 list_del(&new_sh_chan->common.device_node);
943 kfree(new_sh_chan);
944 return err;
945}
946
947static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
948{
949 int i;
950
951 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
952 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000953 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000954
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000955 free_irq(sh_chan->irq, sh_chan);
956
957 list_del(&sh_chan->common.device_node);
958 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000959 shdev->chan[i] = NULL;
960 }
961 }
962 shdev->common.chancnt = 0;
963}
964
965static int __init sh_dmae_probe(struct platform_device *pdev)
966{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000967 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
968 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000969 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
970 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000971 int err, i, irq_cnt = 0, irqres = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000972 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000973 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000974
Dan Williams56adf7e2009-11-22 12:10:10 -0700975 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000976 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -0700977 return -ENODEV;
978
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000979 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
980 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
981 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
982 /*
983 * IRQ resources:
984 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
985 * the error IRQ, in which case it is the only IRQ in this resource:
986 * start == end. If it is the only IRQ resource, all channels also
987 * use the same IRQ.
988 * 2. DMA channel IRQ resources can be specified one per resource or in
989 * ranges (start != end)
990 * 3. iff all events (channels and, optionally, error) on this
991 * controller use the same IRQ, only one IRQ resource can be
992 * specified, otherwise there must be one IRQ per channel, even if
993 * some of them are equal
994 * 4. if all IRQs on this controller are equal or if some specific IRQs
995 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
996 * requested with the IRQF_SHARED flag
997 */
998 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
999 if (!chan || !errirq_res)
1000 return -ENODEV;
1001
1002 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1003 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1004 return -EBUSY;
1005 }
1006
1007 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1008 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1009 err = -EBUSY;
1010 goto ermrdmars;
1011 }
1012
1013 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001014 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1015 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001016 dev_err(&pdev->dev, "Not enough memory\n");
1017 goto ealloc;
1018 }
1019
1020 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1021 if (!shdev->chan_reg)
1022 goto emapchan;
1023 if (dmars) {
1024 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1025 if (!shdev->dmars)
1026 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001027 }
1028
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001029 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001030 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001031
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001032 pm_runtime_enable(&pdev->dev);
1033 pm_runtime_get_sync(&pdev->dev);
1034
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001035 /* reset dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001036 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001037 if (err)
1038 goto rst_err;
1039
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001040 INIT_LIST_HEAD(&shdev->common.channels);
1041
1042 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001043 if (dmars)
1044 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001045
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001046 shdev->common.device_alloc_chan_resources
1047 = sh_dmae_alloc_chan_resources;
1048 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1049 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001050 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001051 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001052
1053 /* Compulsory for DMA_SLAVE fields */
1054 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001055 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001056
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001057 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001058 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001059 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001060
Magnus Damm927a7c92010-03-19 04:47:19 +00001061#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001062 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1063
1064 if (!chanirq_res)
1065 chanirq_res = errirq_res;
1066 else
1067 irqres++;
1068
1069 if (chanirq_res == errirq_res ||
1070 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001071 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001072
1073 errirq = errirq_res->start;
1074
1075 err = request_irq(errirq, sh_dmae_err, irqflags,
1076 "DMAC Address Error", shdev);
1077 if (err) {
1078 dev_err(&pdev->dev,
1079 "DMA failed requesting irq #%d, error %d\n",
1080 errirq, err);
1081 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001082 }
1083
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001084#else
1085 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001086#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001087
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001088 if (chanirq_res->start == chanirq_res->end &&
1089 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1090 /* Special case - all multiplexed */
1091 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1092 chan_irq[irq_cnt] = chanirq_res->start;
1093 chan_flag[irq_cnt] = IRQF_SHARED;
1094 }
1095 } else {
1096 do {
1097 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1098 if ((errirq_res->flags & IORESOURCE_BITS) ==
1099 IORESOURCE_IRQ_SHAREABLE)
1100 chan_flag[irq_cnt] = IRQF_SHARED;
1101 else
1102 chan_flag[irq_cnt] = IRQF_DISABLED;
1103 dev_dbg(&pdev->dev,
1104 "Found IRQ %d for channel %d\n",
1105 i, irq_cnt);
1106 chan_irq[irq_cnt++] = i;
1107 }
1108 chanirq_res = platform_get_resource(pdev,
1109 IORESOURCE_IRQ, ++irqres);
1110 } while (irq_cnt < pdata->channel_num && chanirq_res);
1111 }
1112
1113 if (irq_cnt < pdata->channel_num)
1114 goto eirqres;
1115
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001116 /* Create DMA Channel */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001117 for (i = 0; i < pdata->channel_num; i++) {
1118 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001119 if (err)
1120 goto chan_probe_err;
1121 }
1122
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001123 pm_runtime_put(&pdev->dev);
1124
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001125 platform_set_drvdata(pdev, shdev);
1126 dma_async_device_register(&shdev->common);
1127
1128 return err;
1129
1130chan_probe_err:
1131 sh_dmae_chan_remove(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001132eirqres:
Magnus Damm927a7c92010-03-19 04:47:19 +00001133#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001134 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001135eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001136#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001137rst_err:
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001138 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001139 if (dmars)
1140 iounmap(shdev->dmars);
1141emapdmars:
1142 iounmap(shdev->chan_reg);
1143emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001144 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001145ealloc:
1146 if (dmars)
1147 release_mem_region(dmars->start, resource_size(dmars));
1148ermrdmars:
1149 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001150
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001151 return err;
1152}
1153
1154static int __exit sh_dmae_remove(struct platform_device *pdev)
1155{
1156 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001157 struct resource *res;
1158 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001159
1160 dma_async_device_unregister(&shdev->common);
1161
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001162 if (errirq > 0)
1163 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001164
1165 /* channel data remove */
1166 sh_dmae_chan_remove(shdev);
1167
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001168 pm_runtime_disable(&pdev->dev);
1169
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001170 if (shdev->dmars)
1171 iounmap(shdev->dmars);
1172 iounmap(shdev->chan_reg);
1173
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001174 kfree(shdev);
1175
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001176 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1177 if (res)
1178 release_mem_region(res->start, resource_size(res));
1179 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1180 if (res)
1181 release_mem_region(res->start, resource_size(res));
1182
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001183 return 0;
1184}
1185
1186static void sh_dmae_shutdown(struct platform_device *pdev)
1187{
1188 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001189 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001190}
1191
1192static struct platform_driver sh_dmae_driver = {
1193 .remove = __exit_p(sh_dmae_remove),
1194 .shutdown = sh_dmae_shutdown,
1195 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001196 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001197 .name = "sh-dma-engine",
1198 },
1199};
1200
1201static int __init sh_dmae_init(void)
1202{
1203 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1204}
1205module_init(sh_dmae_init);
1206
1207static void __exit sh_dmae_exit(void)
1208{
1209 platform_driver_unregister(&sh_dmae_driver);
1210}
1211module_exit(sh_dmae_exit);
1212
1213MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1214MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1215MODULE_LICENSE("GPL");