blob: 40900c1cee9a9f2a43760dac239c6d5382476247 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000027#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000028#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000029#include <linux/sh_dma.h>
Paul Mundt03aa18f2010-12-17 19:16:10 +090030#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000034#include "shdma.h"
35
36/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070037enum sh_dmae_desc_status {
38 DESC_IDLE,
39 DESC_PREPARED,
40 DESC_SUBMITTED,
41 DESC_COMPLETED, /* completed, have to call callback */
42 DESC_WAITING, /* callback called, waiting for ack / re-submit */
43};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000044
45#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000046/* Default MEMCPY transfer size = 2^2 = 4 bytes */
47#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000048
Paul Mundt03aa18f2010-12-17 19:16:10 +090049/*
50 * Used for write-side mutual exclusion for the global device list,
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000051 * read-side synchronization by way of RCU, and per-controller data.
Paul Mundt03aa18f2010-12-17 19:16:10 +090052 */
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000056/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000057static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000058
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070059static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000061static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000063 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000064}
65
66static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000068 return __raw_readl(sh_dc->base + reg / sizeof(u32));
69}
70
71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{
73 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
74}
75
76static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
77{
78 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000079}
80
Kuninori Morimoto5899a722011-06-17 08:20:40 +000081static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
82{
83 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
84
85 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
86}
87
88static u32 chcr_read(struct sh_dmae_chan *sh_dc)
89{
90 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
91
92 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
93}
94
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000095/*
96 * Reset DMA controller
97 *
98 * SH7780 has two DMAOR register
99 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000100static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000101{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000102 unsigned short dmaor;
103 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000104
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000105 spin_lock_irqsave(&sh_dmae_lock, flags);
106
107 dmaor = dmaor_read(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000108 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000109
110 spin_unlock_irqrestore(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000111}
112
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000113static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000114{
115 unsigned short dmaor;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000116 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000117
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000118 spin_lock_irqsave(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000119
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000120 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
121
122 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
123
124 dmaor = dmaor_read(shdev);
125
126 spin_unlock_irqrestore(&sh_dmae_lock, flags);
127
128 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
129 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
130 return -EIO;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000131 }
132 return 0;
133}
134
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000135static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000136{
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000137 u32 chcr = chcr_read(sh_chan);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000138
139 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
140 return true; /* working */
141
142 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000143}
144
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000145static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000146{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000147 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000148 struct sh_dmae_pdata *pdata = shdev->pdata;
149 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
150 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000151
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000152 if (cnt >= pdata->ts_shift_num)
153 cnt = 0;
154
155 return pdata->ts_shift[cnt];
156}
157
158static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
159{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000160 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000161 struct sh_dmae_pdata *pdata = shdev->pdata;
162 int i;
163
164 for (i = 0; i < pdata->ts_shift_num; i++)
165 if (pdata->ts_shift[i] == l2size)
166 break;
167
168 if (i == pdata->ts_shift_num)
169 i = 0;
170
171 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
172 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000173}
174
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700175static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000176{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700177 sh_dmae_writel(sh_chan, hw->sar, SAR);
178 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000179 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000180}
181
182static void dmae_start(struct sh_dmae_chan *sh_chan)
183{
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000184 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000185
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100186 chcr |= CHCR_DE | CHCR_IE;
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000187 chcr_write(sh_chan, chcr & ~CHCR_TE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000188}
189
190static void dmae_halt(struct sh_dmae_chan *sh_chan)
191{
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000192 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000193
194 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000195 chcr_write(sh_chan, chcr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000196}
197
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000198static void dmae_init(struct sh_dmae_chan *sh_chan)
199{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000200 /*
201 * Default configuration for dual address memory-memory transfer.
202 * 0x400 represents auto-request.
203 */
204 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
205 LOG2_DEFAULT_XFER_SIZE);
206 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000207 chcr_write(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000208}
209
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000210static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
211{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000212 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000213 if (dmae_is_busy(sh_chan))
214 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000215
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000216 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000217 chcr_write(sh_chan, val);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000218
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000219 return 0;
220}
221
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000222static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
223{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000224 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000225 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000226 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Magnus Damm26fc02a2011-05-24 10:31:12 +0000227 u16 __iomem *addr = shdev->dmars;
Kuninori Morimoto090b9182011-06-16 05:08:28 +0000228 unsigned int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000229
230 if (dmae_is_busy(sh_chan))
231 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000232
Magnus Damm26fc02a2011-05-24 10:31:12 +0000233 /* in the case of a missing DMARS resource use first memory window */
234 if (!addr)
235 addr = (u16 __iomem *)shdev->chan_reg;
236 addr += chan_pdata->dmars / sizeof(u16);
237
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000238 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
239 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000240
241 return 0;
242}
243
244static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
245{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700246 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000247 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700248 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000249 dma_cookie_t cookie;
250
251 spin_lock_bh(&sh_chan->desc_lock);
252
253 cookie = sh_chan->common.cookie;
254 cookie++;
255 if (cookie < 0)
256 cookie = 1;
257
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700258 sh_chan->common.cookie = cookie;
259 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000260
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700261 /* Mark all chunks of this descriptor as submitted, move to the queue */
262 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
263 /*
264 * All chunks are on the global ld_free, so, we have to find
265 * the end of the chain ourselves
266 */
267 if (chunk != desc && (chunk->mark == DESC_IDLE ||
268 chunk->async_tx.cookie > 0 ||
269 chunk->async_tx.cookie == -EBUSY ||
270 &chunk->node == &sh_chan->ld_free))
271 break;
272 chunk->mark = DESC_SUBMITTED;
273 /* Callback goes to the last chunk */
274 chunk->async_tx.callback = NULL;
275 chunk->cookie = cookie;
276 list_move_tail(&chunk->node, &sh_chan->ld_queue);
277 last = chunk;
278 }
279
280 last->async_tx.callback = callback;
281 last->async_tx.callback_param = tx->callback_param;
282
283 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
284 tx->cookie, &last->async_tx, sh_chan->id,
285 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000286
287 spin_unlock_bh(&sh_chan->desc_lock);
288
289 return cookie;
290}
291
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700292/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000293static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
294{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700295 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000296
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700297 list_for_each_entry(desc, &sh_chan->ld_free, node)
298 if (desc->mark != DESC_PREPARED) {
299 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000300 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700301 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000302 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000303
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700304 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000305}
306
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000307static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000308 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000309{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000310 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000311 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000312 int i;
313
Magnus Damm02ca5082010-03-19 04:46:47 +0000314 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000315 return NULL;
316
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000317 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000318 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000319 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000320
321 return NULL;
322}
323
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000324static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
325{
326 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
327 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000328 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000329 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000330
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000331 pm_runtime_get_sync(sh_chan->dev);
332
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000333 /*
334 * This relies on the guarantee from dmaengine that alloc_chan_resources
335 * never runs concurrently with itself or free_chan_resources.
336 */
337 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000338 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000339
Magnus Damm4bab9d42010-03-19 04:46:38 +0000340 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000341 if (!cfg) {
342 ret = -EINVAL;
343 goto efindslave;
344 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000345
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000346 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
347 ret = -EBUSY;
348 goto etestused;
349 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000350
351 param->config = cfg;
352
353 dmae_set_dmars(sh_chan, cfg->mid_rid);
354 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetskia1b2cc52011-05-31 09:25:16 +0000355 } else {
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000356 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000357 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000358
359 spin_lock_bh(&sh_chan->desc_lock);
360 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
361 spin_unlock_bh(&sh_chan->desc_lock);
362 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
363 if (!desc) {
364 spin_lock_bh(&sh_chan->desc_lock);
365 break;
366 }
367 dma_async_tx_descriptor_init(&desc->async_tx,
368 &sh_chan->common);
369 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700370 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000371
372 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700373 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000374 sh_chan->descs_allocated++;
375 }
376 spin_unlock_bh(&sh_chan->desc_lock);
377
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000378 if (!sh_chan->descs_allocated) {
379 ret = -ENOMEM;
380 goto edescalloc;
381 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000382
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000383 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000384
385edescalloc:
386 if (param)
387 clear_bit(param->slave_id, sh_dmae_slave_used);
388etestused:
389efindslave:
390 pm_runtime_put(sh_chan->dev);
391 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000392}
393
394/*
395 * sh_dma_free_chan_resources - Free all resources of the channel.
396 */
397static void sh_dmae_free_chan_resources(struct dma_chan *chan)
398{
399 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
400 struct sh_desc *desc, *_desc;
401 LIST_HEAD(list);
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000402 int descs = sh_chan->descs_allocated;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000403
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000404 /* Protect against ISR */
405 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000406 dmae_halt(sh_chan);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000407 spin_unlock_irq(&sh_chan->desc_lock);
408
409 /* Now no new interrupts will occur */
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000410
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700411 /* Prepared and not submitted descriptors can still be on the queue */
412 if (!list_empty(&sh_chan->ld_queue))
413 sh_dmae_chan_ld_cleanup(sh_chan, true);
414
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000415 if (chan->private) {
416 /* The caller is holding dma_list_mutex */
417 struct sh_dmae_slave *param = chan->private;
418 clear_bit(param->slave_id, sh_dmae_slave_used);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000419 chan->private = NULL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000420 }
421
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000422 spin_lock_bh(&sh_chan->desc_lock);
423
424 list_splice_init(&sh_chan->ld_free, &list);
425 sh_chan->descs_allocated = 0;
426
427 spin_unlock_bh(&sh_chan->desc_lock);
428
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000429 if (descs > 0)
430 pm_runtime_put(sh_chan->dev);
431
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000432 list_for_each_entry_safe(desc, _desc, &list, node)
433 kfree(desc);
434}
435
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000436/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000437 * sh_dmae_add_desc - get, set up and return one transfer descriptor
438 * @sh_chan: DMA channel
439 * @flags: DMA transfer flags
440 * @dest: destination DMA address, incremented when direction equals
441 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
442 * @src: source DMA address, incremented when direction equals
443 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
444 * @len: DMA transfer length
445 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
446 * @direction: needed for slave DMA to decide which address to keep constant,
447 * equals DMA_BIDIRECTIONAL for MEMCPY
448 * Returns 0 or an error
449 * Locks: called with desc_lock held
450 */
451static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
452 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
453 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000454{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000455 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000456 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000457
458 if (!*len)
459 return NULL;
460
461 /* Allocate the link descriptor from the free list */
462 new = sh_dmae_get_desc(sh_chan);
463 if (!new) {
464 dev_err(sh_chan->dev, "No free link descriptor available\n");
465 return NULL;
466 }
467
468 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
469
470 new->hw.sar = *src;
471 new->hw.dar = *dest;
472 new->hw.tcr = copy_size;
473
474 if (!*first) {
475 /* First desc */
476 new->async_tx.cookie = -EBUSY;
477 *first = new;
478 } else {
479 /* Other desc - invisible to the user */
480 new->async_tx.cookie = -EINVAL;
481 }
482
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000483 dev_dbg(sh_chan->dev,
484 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000485 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000486 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000487
488 new->mark = DESC_PREPARED;
489 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000490 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000491
492 *len -= copy_size;
493 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
494 *src += copy_size;
495 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
496 *dest += copy_size;
497
498 return new;
499}
500
501/*
502 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
503 *
504 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
505 * converted to scatter-gather to guarantee consistent locking and a correct
506 * list manipulation. For slave DMA direction carries the usual meaning, and,
507 * logically, the SG list is RAM and the addr variable contains slave address,
508 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
509 * and the SG list contains only one element and points at the source buffer.
510 */
511static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
512 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
513 enum dma_data_direction direction, unsigned long flags)
514{
515 struct scatterlist *sg;
516 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700517 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000518 int chunks = 0;
519 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000520
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000521 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000522 return NULL;
523
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000524 for_each_sg(sgl, sg, sg_len, i)
525 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
526 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000527
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700528 /* Have to lock the whole loop to protect against concurrent release */
529 spin_lock_bh(&sh_chan->desc_lock);
530
531 /*
532 * Chaining:
533 * first descriptor is what user is dealing with in all API calls, its
534 * cookie is at first set to -EBUSY, at tx-submit to a positive
535 * number
536 * if more than one chunk is needed further chunks have cookie = -EINVAL
537 * the last chunk, if not equal to the first, has cookie = -ENOSPC
538 * all chunks are linked onto the tx_list head with their .node heads
539 * only during this function, then they are immediately spliced
540 * back onto the free list in form of a chain
541 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000542 for_each_sg(sgl, sg, sg_len, i) {
543 dma_addr_t sg_addr = sg_dma_address(sg);
544 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000545
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000546 if (!len)
547 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000548
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000549 do {
550 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
551 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000552
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000553 if (direction == DMA_FROM_DEVICE)
554 new = sh_dmae_add_desc(sh_chan, flags,
555 &sg_addr, addr, &len, &first,
556 direction);
557 else
558 new = sh_dmae_add_desc(sh_chan, flags,
559 addr, &sg_addr, &len, &first,
560 direction);
561 if (!new)
562 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700563
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000564 new->chunks = chunks--;
565 list_add_tail(&new->node, &tx_list);
566 } while (len);
567 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000568
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700569 if (new != first)
570 new->async_tx.cookie = -ENOSPC;
571
572 /* Put them back on the free list, so, they don't get lost */
573 list_splice_tail(&tx_list, &sh_chan->ld_free);
574
575 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000576
577 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000578
579err_get_desc:
580 list_for_each_entry(new, &tx_list, node)
581 new->mark = DESC_IDLE;
582 list_splice(&tx_list, &sh_chan->ld_free);
583
584 spin_unlock_bh(&sh_chan->desc_lock);
585
586 return NULL;
587}
588
589static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
590 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
591 size_t len, unsigned long flags)
592{
593 struct sh_dmae_chan *sh_chan;
594 struct scatterlist sg;
595
596 if (!chan || !len)
597 return NULL;
598
599 sh_chan = to_sh_chan(chan);
600
601 sg_init_table(&sg, 1);
602 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
603 offset_in_page(dma_src));
604 sg_dma_address(&sg) = dma_src;
605 sg_dma_len(&sg) = len;
606
607 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
608 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700609}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000610
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000611static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
612 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
613 enum dma_data_direction direction, unsigned long flags)
614{
615 struct sh_dmae_slave *param;
616 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000617 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000618
619 if (!chan)
620 return NULL;
621
622 sh_chan = to_sh_chan(chan);
623 param = chan->private;
624
625 /* Someone calling slave DMA on a public channel? */
626 if (!param || !sg_len) {
627 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
628 __func__, param, sg_len, param ? param->slave_id : -1);
629 return NULL;
630 }
631
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200632 slave_addr = param->config->addr;
633
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000634 /*
635 * if (param != NULL), this is a successfully requested slave channel,
636 * therefore param->config != NULL too.
637 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000638 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000639 direction, flags);
640}
641
Linus Walleij05827632010-05-17 16:30:42 -0700642static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
643 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000644{
645 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
646
Linus Walleijc3635c72010-03-26 16:44:01 -0700647 /* Only supports DMA_TERMINATE_ALL */
648 if (cmd != DMA_TERMINATE_ALL)
649 return -ENXIO;
650
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000651 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700652 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000653
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000654 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000655 dmae_halt(sh_chan);
656
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000657 if (!list_empty(&sh_chan->ld_queue)) {
658 /* Record partial transfer */
659 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
660 struct sh_desc, node);
661 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
662 sh_chan->xmit_shift;
663
664 }
665 spin_unlock_bh(&sh_chan->desc_lock);
666
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000667 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700668
669 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000670}
671
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700672static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
673{
674 struct sh_desc *desc, *_desc;
675 /* Is the "exposed" head of a chain acked? */
676 bool head_acked = false;
677 dma_cookie_t cookie = 0;
678 dma_async_tx_callback callback = NULL;
679 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000680
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700681 spin_lock_bh(&sh_chan->desc_lock);
682 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
683 struct dma_async_tx_descriptor *tx = &desc->async_tx;
684
685 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
686 BUG_ON(desc->mark != DESC_SUBMITTED &&
687 desc->mark != DESC_COMPLETED &&
688 desc->mark != DESC_WAITING);
689
690 /*
691 * queue is ordered, and we use this loop to (1) clean up all
692 * completed descriptors, and to (2) update descriptor flags of
693 * any chunks in a (partially) completed chain
694 */
695 if (!all && desc->mark == DESC_SUBMITTED &&
696 desc->cookie != cookie)
697 break;
698
699 if (tx->cookie > 0)
700 cookie = tx->cookie;
701
702 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000703 if (sh_chan->completed_cookie != desc->cookie - 1)
704 dev_dbg(sh_chan->dev,
705 "Completing cookie %d, expected %d\n",
706 desc->cookie,
707 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700708 sh_chan->completed_cookie = desc->cookie;
709 }
710
711 /* Call callback on the last chunk */
712 if (desc->mark == DESC_COMPLETED && tx->callback) {
713 desc->mark = DESC_WAITING;
714 callback = tx->callback;
715 param = tx->callback_param;
716 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
717 tx->cookie, tx, sh_chan->id);
718 BUG_ON(desc->chunks != 1);
719 break;
720 }
721
722 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
723 if (desc->mark == DESC_COMPLETED) {
724 BUG_ON(tx->cookie < 0);
725 desc->mark = DESC_WAITING;
726 }
727 head_acked = async_tx_test_ack(tx);
728 } else {
729 switch (desc->mark) {
730 case DESC_COMPLETED:
731 desc->mark = DESC_WAITING;
732 /* Fall through */
733 case DESC_WAITING:
734 if (head_acked)
735 async_tx_ack(&desc->async_tx);
736 }
737 }
738
739 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
740 tx, tx->cookie);
741
742 if (((desc->mark == DESC_COMPLETED ||
743 desc->mark == DESC_WAITING) &&
744 async_tx_test_ack(&desc->async_tx)) || all) {
745 /* Remove from ld_queue list */
746 desc->mark = DESC_IDLE;
747 list_move(&desc->node, &sh_chan->ld_free);
748 }
749 }
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000750
751 if (all && !callback)
752 /*
753 * Terminating and the loop completed normally: forgive
754 * uncompleted cookies
755 */
756 sh_chan->completed_cookie = sh_chan->common.cookie;
757
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700758 spin_unlock_bh(&sh_chan->desc_lock);
759
760 if (callback)
761 callback(param);
762
763 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000764}
765
766/*
767 * sh_chan_ld_cleanup - Clean up link descriptors
768 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700769 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000770 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700771static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000772{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700773 while (__ld_cleanup(sh_chan, all))
774 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000775}
776
777static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
778{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000779 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000780
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700781 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000782 /* DMA work check */
Kuninori Morimoto1d2c0982011-06-16 05:08:18 +0000783 if (dmae_is_busy(sh_chan))
784 goto sh_chan_xfer_ld_queue_end;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000785
Justin P. Mattock5a3a76582011-01-19 15:36:38 +0100786 /* Find the first not transferred descriptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000787 list_for_each_entry(desc, &sh_chan->ld_queue, node)
788 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000789 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
790 desc->async_tx.cookie, sh_chan->id,
791 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700792 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000793 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700794 dmae_start(sh_chan);
795 break;
796 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000797
Kuninori Morimoto1d2c0982011-06-16 05:08:18 +0000798sh_chan_xfer_ld_queue_end:
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700799 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000800}
801
802static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
803{
804 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
805 sh_chan_xfer_ld_queue(sh_chan);
806}
807
Linus Walleij07934482010-03-26 16:50:49 -0700808static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000809 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700810 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000811{
812 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
813 dma_cookie_t last_used;
814 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000815 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000816
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700817 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000818
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000819 /* First read completed cookie to avoid a skew */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000820 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000821 rmb();
822 last_used = chan->cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700823 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700824 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000825
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000826 spin_lock_bh(&sh_chan->desc_lock);
827
828 status = dma_async_is_complete(cookie, last_complete, last_used);
829
830 /*
831 * If we don't find cookie on the queue, it has been aborted and we have
832 * to report error
833 */
834 if (status != DMA_SUCCESS) {
835 struct sh_desc *desc;
836 status = DMA_ERROR;
837 list_for_each_entry(desc, &sh_chan->ld_queue, node)
838 if (desc->cookie == cookie) {
839 status = DMA_IN_PROGRESS;
840 break;
841 }
842 }
843
844 spin_unlock_bh(&sh_chan->desc_lock);
845
846 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000847}
848
849static irqreturn_t sh_dmae_interrupt(int irq, void *data)
850{
851 irqreturn_t ret = IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000852 struct sh_dmae_chan *sh_chan = data;
853 u32 chcr;
854
855 spin_lock(&sh_chan->desc_lock);
856
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000857 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000858
859 if (chcr & CHCR_TE) {
860 /* DMA stop */
861 dmae_halt(sh_chan);
862
863 ret = IRQ_HANDLED;
864 tasklet_schedule(&sh_chan->tasklet);
865 }
866
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000867 spin_unlock(&sh_chan->desc_lock);
868
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000869 return ret;
870}
871
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000872/* Called from error IRQ or NMI */
873static bool sh_dmae_reset(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000874{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900875 unsigned int handled = 0;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000876 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000877
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000878 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000879 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000880
881 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000882 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000883 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Paul Mundt03aa18f2010-12-17 19:16:10 +0900884 struct sh_desc *desc;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000885 LIST_HEAD(dl);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900886
887 if (!sh_chan)
888 continue;
889
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000890 spin_lock(&sh_chan->desc_lock);
891
Paul Mundt03aa18f2010-12-17 19:16:10 +0900892 /* Stop the channel */
893 dmae_halt(sh_chan);
894
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000895 list_splice_init(&sh_chan->ld_queue, &dl);
896
897 spin_unlock(&sh_chan->desc_lock);
898
Paul Mundt03aa18f2010-12-17 19:16:10 +0900899 /* Complete all */
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000900 list_for_each_entry(desc, &dl, node) {
Paul Mundt03aa18f2010-12-17 19:16:10 +0900901 struct dma_async_tx_descriptor *tx = &desc->async_tx;
902 desc->mark = DESC_IDLE;
903 if (tx->callback)
904 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000905 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900906
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000907 spin_lock(&sh_chan->desc_lock);
908 list_splice(&dl, &sh_chan->ld_free);
909 spin_unlock(&sh_chan->desc_lock);
910
Paul Mundt03aa18f2010-12-17 19:16:10 +0900911 handled++;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000912 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900913
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000914 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000915
Paul Mundt03aa18f2010-12-17 19:16:10 +0900916 return !!handled;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000917}
Paul Mundt03aa18f2010-12-17 19:16:10 +0900918
919static irqreturn_t sh_dmae_err(int irq, void *data)
920{
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000921 struct sh_dmae_device *shdev = data;
922
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000923 if (!(dmaor_read(shdev) & DMAOR_AE))
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000924 return IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000925
926 sh_dmae_reset(data);
927 return IRQ_HANDLED;
Paul Mundt03aa18f2010-12-17 19:16:10 +0900928}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000929
930static void dmae_do_tasklet(unsigned long data)
931{
932 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700933 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000934 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000935 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100936
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700937 spin_lock(&sh_chan->desc_lock);
938 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000939 if (desc->mark == DESC_SUBMITTED &&
940 ((desc->direction == DMA_FROM_DEVICE &&
941 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
942 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700943 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
944 desc->async_tx.cookie, &desc->async_tx,
945 desc->hw.dar);
946 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000947 break;
948 }
949 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700950 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000951
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000952 /* Next desc */
953 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700954 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000955}
956
Paul Mundt03aa18f2010-12-17 19:16:10 +0900957static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
958{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900959 /* Fast path out if NMIF is not asserted for this controller */
960 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
961 return false;
962
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000963 return sh_dmae_reset(shdev);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900964}
965
966static int sh_dmae_nmi_handler(struct notifier_block *self,
967 unsigned long cmd, void *data)
968{
969 struct sh_dmae_device *shdev;
970 int ret = NOTIFY_DONE;
971 bool triggered;
972
973 /*
974 * Only concern ourselves with NMI events.
975 *
976 * Normally we would check the die chain value, but as this needs
977 * to be architecture independent, check for NMI context instead.
978 */
979 if (!in_nmi())
980 return NOTIFY_DONE;
981
982 rcu_read_lock();
983 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
984 /*
985 * Only stop if one of the controllers has NMIF asserted,
986 * we do not want to interfere with regular address error
987 * handling or NMI events that don't concern the DMACs.
988 */
989 triggered = sh_dmae_nmi_notify(shdev);
990 if (triggered == true)
991 ret = NOTIFY_OK;
992 }
993 rcu_read_unlock();
994
995 return ret;
996}
997
998static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
999 .notifier_call = sh_dmae_nmi_handler,
1000
1001 /* Run before NMI debug handler and KGDB */
1002 .priority = 1,
1003};
1004
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001005static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1006 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001007{
1008 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +00001009 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001010 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001011 struct sh_dmae_chan *new_sh_chan;
1012
1013 /* alloc channel */
1014 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1015 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001016 dev_err(shdev->common.dev,
1017 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001018 return -ENOMEM;
1019 }
1020
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001021 /* copy struct dma_device */
1022 new_sh_chan->common.device = &shdev->common;
1023
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001024 new_sh_chan->dev = shdev->common.dev;
1025 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001026 new_sh_chan->irq = irq;
1027 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001028
1029 /* Init DMA tasklet */
1030 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1031 (unsigned long)new_sh_chan);
1032
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001033 spin_lock_init(&new_sh_chan->desc_lock);
1034
1035 /* Init descripter manage list */
1036 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1037 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1038
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001039 /* Add the channel to DMA device channel list */
1040 list_add_tail(&new_sh_chan->common.device_node,
1041 &shdev->common.channels);
1042 shdev->common.chancnt++;
1043
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001044 if (pdev->id >= 0)
1045 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1046 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1047 else
1048 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1049 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001050
1051 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001052 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001053 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001054 if (err) {
1055 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1056 "with return %d\n", id, err);
1057 goto err_no_irq;
1058 }
1059
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001060 shdev->chan[id] = new_sh_chan;
1061 return 0;
1062
1063err_no_irq:
1064 /* remove from dmaengine device node */
1065 list_del(&new_sh_chan->common.device_node);
1066 kfree(new_sh_chan);
1067 return err;
1068}
1069
1070static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1071{
1072 int i;
1073
1074 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1075 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001076 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001077
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001078 free_irq(sh_chan->irq, sh_chan);
1079
1080 list_del(&sh_chan->common.device_node);
1081 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001082 shdev->chan[i] = NULL;
1083 }
1084 }
1085 shdev->common.chancnt = 0;
1086}
1087
1088static int __init sh_dmae_probe(struct platform_device *pdev)
1089{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001090 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1091 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001092 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1093 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Magnus Damm300e5f92011-05-24 10:31:20 +00001094 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001095 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001096 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001097
Dan Williams56adf7e2009-11-22 12:10:10 -07001098 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001099 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -07001100 return -ENODEV;
1101
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001102 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001103 /* DMARS area is optional */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001104 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1105 /*
1106 * IRQ resources:
1107 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1108 * the error IRQ, in which case it is the only IRQ in this resource:
1109 * start == end. If it is the only IRQ resource, all channels also
1110 * use the same IRQ.
1111 * 2. DMA channel IRQ resources can be specified one per resource or in
1112 * ranges (start != end)
1113 * 3. iff all events (channels and, optionally, error) on this
1114 * controller use the same IRQ, only one IRQ resource can be
1115 * specified, otherwise there must be one IRQ per channel, even if
1116 * some of them are equal
1117 * 4. if all IRQs on this controller are equal or if some specific IRQs
1118 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1119 * requested with the IRQF_SHARED flag
1120 */
1121 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1122 if (!chan || !errirq_res)
1123 return -ENODEV;
1124
1125 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1126 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1127 return -EBUSY;
1128 }
1129
1130 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1131 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1132 err = -EBUSY;
1133 goto ermrdmars;
1134 }
1135
1136 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001137 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1138 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001139 dev_err(&pdev->dev, "Not enough memory\n");
1140 goto ealloc;
1141 }
1142
1143 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1144 if (!shdev->chan_reg)
1145 goto emapchan;
1146 if (dmars) {
1147 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1148 if (!shdev->dmars)
1149 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001150 }
1151
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001152 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001153 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001154
Kuninori Morimoto5899a722011-06-17 08:20:40 +00001155 if (pdata->chcr_offset)
1156 shdev->chcr_offset = pdata->chcr_offset;
1157 else
1158 shdev->chcr_offset = CHCR;
1159
Paul Mundt5c2de442011-05-31 15:53:03 +09001160 platform_set_drvdata(pdev, shdev);
1161
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001162 pm_runtime_enable(&pdev->dev);
1163 pm_runtime_get_sync(&pdev->dev);
1164
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001165 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001166 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001167 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001168
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001169 /* reset dma controller - only needed as a test */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001170 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001171 if (err)
1172 goto rst_err;
1173
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001174 INIT_LIST_HEAD(&shdev->common.channels);
1175
1176 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001177 if (pdata->slave && pdata->slave_num)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001178 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001179
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001180 shdev->common.device_alloc_chan_resources
1181 = sh_dmae_alloc_chan_resources;
1182 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1183 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001184 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001185 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001186
1187 /* Compulsory for DMA_SLAVE fields */
1188 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001189 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001190
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001191 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001192 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001193 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001194
Magnus Damm927a7c92010-03-19 04:47:19 +00001195#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001196 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1197
1198 if (!chanirq_res)
1199 chanirq_res = errirq_res;
1200 else
1201 irqres++;
1202
1203 if (chanirq_res == errirq_res ||
1204 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001205 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001206
1207 errirq = errirq_res->start;
1208
1209 err = request_irq(errirq, sh_dmae_err, irqflags,
1210 "DMAC Address Error", shdev);
1211 if (err) {
1212 dev_err(&pdev->dev,
1213 "DMA failed requesting irq #%d, error %d\n",
1214 errirq, err);
1215 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001216 }
1217
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001218#else
1219 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001220#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001221
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001222 if (chanirq_res->start == chanirq_res->end &&
1223 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1224 /* Special case - all multiplexed */
1225 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
Magnus Damm300e5f92011-05-24 10:31:20 +00001226 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1227 chan_irq[irq_cnt] = chanirq_res->start;
1228 chan_flag[irq_cnt] = IRQF_SHARED;
1229 } else {
1230 irq_cap = 1;
1231 break;
1232 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001233 }
1234 } else {
1235 do {
1236 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1237 if ((errirq_res->flags & IORESOURCE_BITS) ==
1238 IORESOURCE_IRQ_SHAREABLE)
1239 chan_flag[irq_cnt] = IRQF_SHARED;
1240 else
1241 chan_flag[irq_cnt] = IRQF_DISABLED;
1242 dev_dbg(&pdev->dev,
1243 "Found IRQ %d for channel %d\n",
1244 i, irq_cnt);
1245 chan_irq[irq_cnt++] = i;
Magnus Damm300e5f92011-05-24 10:31:20 +00001246
1247 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1248 break;
1249 }
1250
1251 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1252 irq_cap = 1;
1253 break;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001254 }
1255 chanirq_res = platform_get_resource(pdev,
1256 IORESOURCE_IRQ, ++irqres);
1257 } while (irq_cnt < pdata->channel_num && chanirq_res);
1258 }
1259
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001260 /* Create DMA Channel */
Magnus Damm300e5f92011-05-24 10:31:20 +00001261 for (i = 0; i < irq_cnt; i++) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001262 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001263 if (err)
1264 goto chan_probe_err;
1265 }
1266
Magnus Damm300e5f92011-05-24 10:31:20 +00001267 if (irq_cap)
1268 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1269 "channels when a maximum of %d are supported.\n",
1270 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1271
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001272 pm_runtime_put(&pdev->dev);
1273
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001274 dma_async_device_register(&shdev->common);
1275
1276 return err;
1277
1278chan_probe_err:
1279 sh_dmae_chan_remove(shdev);
Magnus Damm300e5f92011-05-24 10:31:20 +00001280
Magnus Damm927a7c92010-03-19 04:47:19 +00001281#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001282 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001283eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001284#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001285rst_err:
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001286 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001287 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001288 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001289
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001290 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001291 pm_runtime_disable(&pdev->dev);
1292
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001293 if (dmars)
1294 iounmap(shdev->dmars);
Paul Mundt5c2de442011-05-31 15:53:03 +09001295
1296 platform_set_drvdata(pdev, NULL);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001297emapdmars:
1298 iounmap(shdev->chan_reg);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001299 synchronize_rcu();
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001300emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001301 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001302ealloc:
1303 if (dmars)
1304 release_mem_region(dmars->start, resource_size(dmars));
1305ermrdmars:
1306 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001307
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001308 return err;
1309}
1310
1311static int __exit sh_dmae_remove(struct platform_device *pdev)
1312{
1313 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001314 struct resource *res;
1315 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001316
1317 dma_async_device_unregister(&shdev->common);
1318
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001319 if (errirq > 0)
1320 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001321
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001322 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001323 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001324 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001325
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001326 /* channel data remove */
1327 sh_dmae_chan_remove(shdev);
1328
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001329 pm_runtime_disable(&pdev->dev);
1330
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001331 if (shdev->dmars)
1332 iounmap(shdev->dmars);
1333 iounmap(shdev->chan_reg);
1334
Paul Mundt5c2de442011-05-31 15:53:03 +09001335 platform_set_drvdata(pdev, NULL);
1336
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001337 synchronize_rcu();
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001338 kfree(shdev);
1339
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001340 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1341 if (res)
1342 release_mem_region(res->start, resource_size(res));
1343 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1344 if (res)
1345 release_mem_region(res->start, resource_size(res));
1346
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001347 return 0;
1348}
1349
1350static void sh_dmae_shutdown(struct platform_device *pdev)
1351{
1352 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001353 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001354}
1355
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001356static int sh_dmae_runtime_suspend(struct device *dev)
1357{
1358 return 0;
1359}
1360
1361static int sh_dmae_runtime_resume(struct device *dev)
1362{
1363 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1364
1365 return sh_dmae_rst(shdev);
1366}
1367
1368#ifdef CONFIG_PM
1369static int sh_dmae_suspend(struct device *dev)
1370{
1371 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1372 int i;
1373
1374 for (i = 0; i < shdev->pdata->channel_num; i++) {
1375 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1376 if (sh_chan->descs_allocated)
1377 sh_chan->pm_error = pm_runtime_put_sync(dev);
1378 }
1379
1380 return 0;
1381}
1382
1383static int sh_dmae_resume(struct device *dev)
1384{
1385 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1386 int i;
1387
1388 for (i = 0; i < shdev->pdata->channel_num; i++) {
1389 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1390 struct sh_dmae_slave *param = sh_chan->common.private;
1391
1392 if (!sh_chan->descs_allocated)
1393 continue;
1394
1395 if (!sh_chan->pm_error)
1396 pm_runtime_get_sync(dev);
1397
1398 if (param) {
1399 const struct sh_dmae_slave_config *cfg = param->config;
1400 dmae_set_dmars(sh_chan, cfg->mid_rid);
1401 dmae_set_chcr(sh_chan, cfg->chcr);
1402 } else {
1403 dmae_init(sh_chan);
1404 }
1405 }
1406
1407 return 0;
1408}
1409#else
1410#define sh_dmae_suspend NULL
1411#define sh_dmae_resume NULL
1412#endif
1413
1414const struct dev_pm_ops sh_dmae_pm = {
1415 .suspend = sh_dmae_suspend,
1416 .resume = sh_dmae_resume,
1417 .runtime_suspend = sh_dmae_runtime_suspend,
1418 .runtime_resume = sh_dmae_runtime_resume,
1419};
1420
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001421static struct platform_driver sh_dmae_driver = {
1422 .remove = __exit_p(sh_dmae_remove),
1423 .shutdown = sh_dmae_shutdown,
1424 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001425 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001426 .name = "sh-dma-engine",
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001427 .pm = &sh_dmae_pm,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001428 },
1429};
1430
1431static int __init sh_dmae_init(void)
1432{
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001433 /* Wire up NMI handling */
1434 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1435 if (err)
1436 return err;
1437
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001438 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1439}
1440module_init(sh_dmae_init);
1441
1442static void __exit sh_dmae_exit(void)
1443{
1444 platform_driver_unregister(&sh_dmae_driver);
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001445
1446 unregister_die_notifier(&sh_dmae_nmi_notifier);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001447}
1448module_exit(sh_dmae_exit);
1449
1450MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1451MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1452MODULE_LICENSE("GPL");
Guennadi Liakhovetskie5843342010-11-24 09:48:10 +00001453MODULE_ALIAS("platform:sh-dma-engine");