blob: 28720d3103c40aad29e2bb1bf9a90b57101d2e03 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000027#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000028#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000029#include <linux/sh_dma.h>
Paul Mundt03aa18f2010-12-17 19:16:10 +090030#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000034#include "shdma.h"
35
36/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070037enum sh_dmae_desc_status {
38 DESC_IDLE,
39 DESC_PREPARED,
40 DESC_SUBMITTED,
41 DESC_COMPLETED, /* completed, have to call callback */
42 DESC_WAITING, /* callback called, waiting for ack / re-submit */
43};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000044
45#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000046/* Default MEMCPY transfer size = 2^2 = 4 bytes */
47#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000048
Paul Mundt03aa18f2010-12-17 19:16:10 +090049/*
50 * Used for write-side mutual exclusion for the global device list,
51 * read-side synchronization by way of RCU.
52 */
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000056/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000057static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000058
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070059static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000061static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000063 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000064}
65
66static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000068 return __raw_readl(sh_dc->base + reg / sizeof(u32));
69}
70
71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{
73 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
74}
75
76static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
77{
78 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000079}
80
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000081/*
82 * Reset DMA controller
83 *
84 * SH7780 has two DMAOR register
85 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000086static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000087{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000088 unsigned short dmaor = dmaor_read(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000089
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000090 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000091}
92
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000093static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000094{
95 unsigned short dmaor;
96
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000097 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000098 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000099
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000100 dmaor_write(shdev, dmaor);
101 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000102 pr_warning("dma-sh: Can't initialize DMAOR.\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000103 return -EINVAL;
104 }
105 return 0;
106}
107
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000108static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000109{
110 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000111
112 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
113 return true; /* working */
114
115 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000116}
117
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000118static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000119{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000120 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
121 struct sh_dmae_device, common);
122 struct sh_dmae_pdata *pdata = shdev->pdata;
123 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
124 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000125
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000126 if (cnt >= pdata->ts_shift_num)
127 cnt = 0;
128
129 return pdata->ts_shift[cnt];
130}
131
132static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
133{
134 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
135 struct sh_dmae_device, common);
136 struct sh_dmae_pdata *pdata = shdev->pdata;
137 int i;
138
139 for (i = 0; i < pdata->ts_shift_num; i++)
140 if (pdata->ts_shift[i] == l2size)
141 break;
142
143 if (i == pdata->ts_shift_num)
144 i = 0;
145
146 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
147 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000148}
149
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700150static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000151{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700152 sh_dmae_writel(sh_chan, hw->sar, SAR);
153 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000154 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000155}
156
157static void dmae_start(struct sh_dmae_chan *sh_chan)
158{
159 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
160
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100161 chcr |= CHCR_DE | CHCR_IE;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000162 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000163}
164
165static void dmae_halt(struct sh_dmae_chan *sh_chan)
166{
167 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
168
169 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
170 sh_dmae_writel(sh_chan, chcr, CHCR);
171}
172
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000173static void dmae_init(struct sh_dmae_chan *sh_chan)
174{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000175 /*
176 * Default configuration for dual address memory-memory transfer.
177 * 0x400 represents auto-request.
178 */
179 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
180 LOG2_DEFAULT_XFER_SIZE);
181 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000182 sh_dmae_writel(sh_chan, chcr, CHCR);
183}
184
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000185static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
186{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000187 /* When DMA was working, can not set data to CHCR */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000188 if (dmae_is_busy(sh_chan))
189 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000190
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000191 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000192 sh_dmae_writel(sh_chan, val, CHCR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000193
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000194 return 0;
195}
196
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000197static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
198{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000199 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
200 struct sh_dmae_device, common);
201 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000202 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000203 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
204 int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000205
206 if (dmae_is_busy(sh_chan))
207 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000208
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000209 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
210 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000211
212 return 0;
213}
214
215static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
216{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700217 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000218 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700219 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000220 dma_cookie_t cookie;
221
222 spin_lock_bh(&sh_chan->desc_lock);
223
224 cookie = sh_chan->common.cookie;
225 cookie++;
226 if (cookie < 0)
227 cookie = 1;
228
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700229 sh_chan->common.cookie = cookie;
230 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000231
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700232 /* Mark all chunks of this descriptor as submitted, move to the queue */
233 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
234 /*
235 * All chunks are on the global ld_free, so, we have to find
236 * the end of the chain ourselves
237 */
238 if (chunk != desc && (chunk->mark == DESC_IDLE ||
239 chunk->async_tx.cookie > 0 ||
240 chunk->async_tx.cookie == -EBUSY ||
241 &chunk->node == &sh_chan->ld_free))
242 break;
243 chunk->mark = DESC_SUBMITTED;
244 /* Callback goes to the last chunk */
245 chunk->async_tx.callback = NULL;
246 chunk->cookie = cookie;
247 list_move_tail(&chunk->node, &sh_chan->ld_queue);
248 last = chunk;
249 }
250
251 last->async_tx.callback = callback;
252 last->async_tx.callback_param = tx->callback_param;
253
254 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
255 tx->cookie, &last->async_tx, sh_chan->id,
256 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000257
258 spin_unlock_bh(&sh_chan->desc_lock);
259
260 return cookie;
261}
262
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700263/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000264static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
265{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700266 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000267
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700268 list_for_each_entry(desc, &sh_chan->ld_free, node)
269 if (desc->mark != DESC_PREPARED) {
270 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000271 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700272 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000273 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000274
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700275 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000276}
277
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000278static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000279 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000280{
281 struct dma_device *dma_dev = sh_chan->common.device;
282 struct sh_dmae_device *shdev = container_of(dma_dev,
283 struct sh_dmae_device, common);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000284 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000285 int i;
286
Magnus Damm02ca5082010-03-19 04:46:47 +0000287 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000288 return NULL;
289
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000290 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000291 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000292 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000293
294 return NULL;
295}
296
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000297static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
298{
299 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
300 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000301 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000302 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000303
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000304 pm_runtime_get_sync(sh_chan->dev);
305
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000306 /*
307 * This relies on the guarantee from dmaengine that alloc_chan_resources
308 * never runs concurrently with itself or free_chan_resources.
309 */
310 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000311 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000312
Magnus Damm4bab9d42010-03-19 04:46:38 +0000313 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000314 if (!cfg) {
315 ret = -EINVAL;
316 goto efindslave;
317 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000318
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000319 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
320 ret = -EBUSY;
321 goto etestused;
322 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000323
324 param->config = cfg;
325
326 dmae_set_dmars(sh_chan, cfg->mid_rid);
327 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000328 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
329 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000330 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000331
332 spin_lock_bh(&sh_chan->desc_lock);
333 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
334 spin_unlock_bh(&sh_chan->desc_lock);
335 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
336 if (!desc) {
337 spin_lock_bh(&sh_chan->desc_lock);
338 break;
339 }
340 dma_async_tx_descriptor_init(&desc->async_tx,
341 &sh_chan->common);
342 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700343 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000344
345 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700346 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000347 sh_chan->descs_allocated++;
348 }
349 spin_unlock_bh(&sh_chan->desc_lock);
350
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000351 if (!sh_chan->descs_allocated) {
352 ret = -ENOMEM;
353 goto edescalloc;
354 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000355
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000356 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000357
358edescalloc:
359 if (param)
360 clear_bit(param->slave_id, sh_dmae_slave_used);
361etestused:
362efindslave:
363 pm_runtime_put(sh_chan->dev);
364 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000365}
366
367/*
368 * sh_dma_free_chan_resources - Free all resources of the channel.
369 */
370static void sh_dmae_free_chan_resources(struct dma_chan *chan)
371{
372 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
373 struct sh_desc *desc, *_desc;
374 LIST_HEAD(list);
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000375 int descs = sh_chan->descs_allocated;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000376
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000377 dmae_halt(sh_chan);
378
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700379 /* Prepared and not submitted descriptors can still be on the queue */
380 if (!list_empty(&sh_chan->ld_queue))
381 sh_dmae_chan_ld_cleanup(sh_chan, true);
382
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000383 if (chan->private) {
384 /* The caller is holding dma_list_mutex */
385 struct sh_dmae_slave *param = chan->private;
386 clear_bit(param->slave_id, sh_dmae_slave_used);
387 }
388
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000389 spin_lock_bh(&sh_chan->desc_lock);
390
391 list_splice_init(&sh_chan->ld_free, &list);
392 sh_chan->descs_allocated = 0;
393
394 spin_unlock_bh(&sh_chan->desc_lock);
395
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000396 if (descs > 0)
397 pm_runtime_put(sh_chan->dev);
398
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000399 list_for_each_entry_safe(desc, _desc, &list, node)
400 kfree(desc);
401}
402
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000403/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000404 * sh_dmae_add_desc - get, set up and return one transfer descriptor
405 * @sh_chan: DMA channel
406 * @flags: DMA transfer flags
407 * @dest: destination DMA address, incremented when direction equals
408 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
409 * @src: source DMA address, incremented when direction equals
410 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
411 * @len: DMA transfer length
412 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
413 * @direction: needed for slave DMA to decide which address to keep constant,
414 * equals DMA_BIDIRECTIONAL for MEMCPY
415 * Returns 0 or an error
416 * Locks: called with desc_lock held
417 */
418static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
419 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
420 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000421{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000422 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000423 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000424
425 if (!*len)
426 return NULL;
427
428 /* Allocate the link descriptor from the free list */
429 new = sh_dmae_get_desc(sh_chan);
430 if (!new) {
431 dev_err(sh_chan->dev, "No free link descriptor available\n");
432 return NULL;
433 }
434
435 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
436
437 new->hw.sar = *src;
438 new->hw.dar = *dest;
439 new->hw.tcr = copy_size;
440
441 if (!*first) {
442 /* First desc */
443 new->async_tx.cookie = -EBUSY;
444 *first = new;
445 } else {
446 /* Other desc - invisible to the user */
447 new->async_tx.cookie = -EINVAL;
448 }
449
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000450 dev_dbg(sh_chan->dev,
451 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000452 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000453 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000454
455 new->mark = DESC_PREPARED;
456 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000457 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000458
459 *len -= copy_size;
460 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
461 *src += copy_size;
462 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
463 *dest += copy_size;
464
465 return new;
466}
467
468/*
469 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
470 *
471 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
472 * converted to scatter-gather to guarantee consistent locking and a correct
473 * list manipulation. For slave DMA direction carries the usual meaning, and,
474 * logically, the SG list is RAM and the addr variable contains slave address,
475 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
476 * and the SG list contains only one element and points at the source buffer.
477 */
478static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
479 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
480 enum dma_data_direction direction, unsigned long flags)
481{
482 struct scatterlist *sg;
483 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700484 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000485 int chunks = 0;
486 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000487
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000488 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000489 return NULL;
490
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000491 for_each_sg(sgl, sg, sg_len, i)
492 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
493 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000494
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700495 /* Have to lock the whole loop to protect against concurrent release */
496 spin_lock_bh(&sh_chan->desc_lock);
497
498 /*
499 * Chaining:
500 * first descriptor is what user is dealing with in all API calls, its
501 * cookie is at first set to -EBUSY, at tx-submit to a positive
502 * number
503 * if more than one chunk is needed further chunks have cookie = -EINVAL
504 * the last chunk, if not equal to the first, has cookie = -ENOSPC
505 * all chunks are linked onto the tx_list head with their .node heads
506 * only during this function, then they are immediately spliced
507 * back onto the free list in form of a chain
508 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000509 for_each_sg(sgl, sg, sg_len, i) {
510 dma_addr_t sg_addr = sg_dma_address(sg);
511 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000512
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000513 if (!len)
514 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000515
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000516 do {
517 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
518 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000519
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000520 if (direction == DMA_FROM_DEVICE)
521 new = sh_dmae_add_desc(sh_chan, flags,
522 &sg_addr, addr, &len, &first,
523 direction);
524 else
525 new = sh_dmae_add_desc(sh_chan, flags,
526 addr, &sg_addr, &len, &first,
527 direction);
528 if (!new)
529 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700530
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000531 new->chunks = chunks--;
532 list_add_tail(&new->node, &tx_list);
533 } while (len);
534 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000535
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700536 if (new != first)
537 new->async_tx.cookie = -ENOSPC;
538
539 /* Put them back on the free list, so, they don't get lost */
540 list_splice_tail(&tx_list, &sh_chan->ld_free);
541
542 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000543
544 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000545
546err_get_desc:
547 list_for_each_entry(new, &tx_list, node)
548 new->mark = DESC_IDLE;
549 list_splice(&tx_list, &sh_chan->ld_free);
550
551 spin_unlock_bh(&sh_chan->desc_lock);
552
553 return NULL;
554}
555
556static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
557 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
558 size_t len, unsigned long flags)
559{
560 struct sh_dmae_chan *sh_chan;
561 struct scatterlist sg;
562
563 if (!chan || !len)
564 return NULL;
565
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000566 chan->private = NULL;
567
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000568 sh_chan = to_sh_chan(chan);
569
570 sg_init_table(&sg, 1);
571 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
572 offset_in_page(dma_src));
573 sg_dma_address(&sg) = dma_src;
574 sg_dma_len(&sg) = len;
575
576 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
577 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700578}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000579
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000580static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
581 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
582 enum dma_data_direction direction, unsigned long flags)
583{
584 struct sh_dmae_slave *param;
585 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000586 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000587
588 if (!chan)
589 return NULL;
590
591 sh_chan = to_sh_chan(chan);
592 param = chan->private;
593
594 /* Someone calling slave DMA on a public channel? */
595 if (!param || !sg_len) {
596 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
597 __func__, param, sg_len, param ? param->slave_id : -1);
598 return NULL;
599 }
600
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200601 slave_addr = param->config->addr;
602
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000603 /*
604 * if (param != NULL), this is a successfully requested slave channel,
605 * therefore param->config != NULL too.
606 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000607 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000608 direction, flags);
609}
610
Linus Walleij05827632010-05-17 16:30:42 -0700611static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
612 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000613{
614 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
615
Linus Walleijc3635c72010-03-26 16:44:01 -0700616 /* Only supports DMA_TERMINATE_ALL */
617 if (cmd != DMA_TERMINATE_ALL)
618 return -ENXIO;
619
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000620 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700621 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000622
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000623 dmae_halt(sh_chan);
624
625 spin_lock_bh(&sh_chan->desc_lock);
626 if (!list_empty(&sh_chan->ld_queue)) {
627 /* Record partial transfer */
628 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
629 struct sh_desc, node);
630 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
631 sh_chan->xmit_shift;
632
633 }
634 spin_unlock_bh(&sh_chan->desc_lock);
635
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000636 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700637
638 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000639}
640
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700641static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
642{
643 struct sh_desc *desc, *_desc;
644 /* Is the "exposed" head of a chain acked? */
645 bool head_acked = false;
646 dma_cookie_t cookie = 0;
647 dma_async_tx_callback callback = NULL;
648 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000649
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700650 spin_lock_bh(&sh_chan->desc_lock);
651 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
652 struct dma_async_tx_descriptor *tx = &desc->async_tx;
653
654 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
655 BUG_ON(desc->mark != DESC_SUBMITTED &&
656 desc->mark != DESC_COMPLETED &&
657 desc->mark != DESC_WAITING);
658
659 /*
660 * queue is ordered, and we use this loop to (1) clean up all
661 * completed descriptors, and to (2) update descriptor flags of
662 * any chunks in a (partially) completed chain
663 */
664 if (!all && desc->mark == DESC_SUBMITTED &&
665 desc->cookie != cookie)
666 break;
667
668 if (tx->cookie > 0)
669 cookie = tx->cookie;
670
671 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000672 if (sh_chan->completed_cookie != desc->cookie - 1)
673 dev_dbg(sh_chan->dev,
674 "Completing cookie %d, expected %d\n",
675 desc->cookie,
676 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700677 sh_chan->completed_cookie = desc->cookie;
678 }
679
680 /* Call callback on the last chunk */
681 if (desc->mark == DESC_COMPLETED && tx->callback) {
682 desc->mark = DESC_WAITING;
683 callback = tx->callback;
684 param = tx->callback_param;
685 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
686 tx->cookie, tx, sh_chan->id);
687 BUG_ON(desc->chunks != 1);
688 break;
689 }
690
691 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
692 if (desc->mark == DESC_COMPLETED) {
693 BUG_ON(tx->cookie < 0);
694 desc->mark = DESC_WAITING;
695 }
696 head_acked = async_tx_test_ack(tx);
697 } else {
698 switch (desc->mark) {
699 case DESC_COMPLETED:
700 desc->mark = DESC_WAITING;
701 /* Fall through */
702 case DESC_WAITING:
703 if (head_acked)
704 async_tx_ack(&desc->async_tx);
705 }
706 }
707
708 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
709 tx, tx->cookie);
710
711 if (((desc->mark == DESC_COMPLETED ||
712 desc->mark == DESC_WAITING) &&
713 async_tx_test_ack(&desc->async_tx)) || all) {
714 /* Remove from ld_queue list */
715 desc->mark = DESC_IDLE;
716 list_move(&desc->node, &sh_chan->ld_free);
717 }
718 }
719 spin_unlock_bh(&sh_chan->desc_lock);
720
721 if (callback)
722 callback(param);
723
724 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000725}
726
727/*
728 * sh_chan_ld_cleanup - Clean up link descriptors
729 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700730 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000731 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700732static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000733{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700734 while (__ld_cleanup(sh_chan, all))
735 ;
Guennadi Liakhovetski9255f1d2010-05-21 15:30:12 +0000736
737 if (all)
738 /* Terminating - forgive uncompleted cookies */
739 sh_chan->completed_cookie = sh_chan->common.cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000740}
741
742static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
743{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000744 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000745
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700746 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000747 /* DMA work check */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700748 if (dmae_is_busy(sh_chan)) {
749 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000750 return;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700751 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000752
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000753 /* Find the first not transferred desciptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000754 list_for_each_entry(desc, &sh_chan->ld_queue, node)
755 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000756 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
757 desc->async_tx.cookie, sh_chan->id,
758 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700759 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000760 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700761 dmae_start(sh_chan);
762 break;
763 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000764
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700765 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000766}
767
768static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
769{
770 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
771 sh_chan_xfer_ld_queue(sh_chan);
772}
773
Linus Walleij07934482010-03-26 16:50:49 -0700774static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000775 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700776 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000777{
778 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
779 dma_cookie_t last_used;
780 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000781 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000782
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700783 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000784
785 last_used = chan->cookie;
786 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700787 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700788 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000789
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000790 spin_lock_bh(&sh_chan->desc_lock);
791
792 status = dma_async_is_complete(cookie, last_complete, last_used);
793
794 /*
795 * If we don't find cookie on the queue, it has been aborted and we have
796 * to report error
797 */
798 if (status != DMA_SUCCESS) {
799 struct sh_desc *desc;
800 status = DMA_ERROR;
801 list_for_each_entry(desc, &sh_chan->ld_queue, node)
802 if (desc->cookie == cookie) {
803 status = DMA_IN_PROGRESS;
804 break;
805 }
806 }
807
808 spin_unlock_bh(&sh_chan->desc_lock);
809
810 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000811}
812
813static irqreturn_t sh_dmae_interrupt(int irq, void *data)
814{
815 irqreturn_t ret = IRQ_NONE;
816 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
817 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
818
819 if (chcr & CHCR_TE) {
820 /* DMA stop */
821 dmae_halt(sh_chan);
822
823 ret = IRQ_HANDLED;
824 tasklet_schedule(&sh_chan->tasklet);
825 }
826
827 return ret;
828}
829
Paul Mundt03aa18f2010-12-17 19:16:10 +0900830static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000831{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900832 unsigned int handled = 0;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000833 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000834
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000835 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000836 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000837
838 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000839 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000840 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Paul Mundt03aa18f2010-12-17 19:16:10 +0900841 struct sh_desc *desc;
842
843 if (!sh_chan)
844 continue;
845
846 /* Stop the channel */
847 dmae_halt(sh_chan);
848
849 /* Complete all */
850 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
851 struct dma_async_tx_descriptor *tx = &desc->async_tx;
852 desc->mark = DESC_IDLE;
853 if (tx->callback)
854 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000855 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900856
857 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
858 handled++;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000859 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900860
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000861 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000862
Paul Mundt03aa18f2010-12-17 19:16:10 +0900863 return !!handled;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000864}
Paul Mundt03aa18f2010-12-17 19:16:10 +0900865
866static irqreturn_t sh_dmae_err(int irq, void *data)
867{
868 return IRQ_RETVAL(sh_dmae_reset(data));
869}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000870
871static void dmae_do_tasklet(unsigned long data)
872{
873 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700874 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000875 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000876 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100877
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700878 spin_lock(&sh_chan->desc_lock);
879 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000880 if (desc->mark == DESC_SUBMITTED &&
881 ((desc->direction == DMA_FROM_DEVICE &&
882 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
883 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700884 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
885 desc->async_tx.cookie, &desc->async_tx,
886 desc->hw.dar);
887 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000888 break;
889 }
890 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700891 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000892
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000893 /* Next desc */
894 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700895 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000896}
897
Paul Mundt03aa18f2010-12-17 19:16:10 +0900898static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
899{
900 unsigned int handled;
901
902 /* Fast path out if NMIF is not asserted for this controller */
903 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
904 return false;
905
906 handled = sh_dmae_reset(shdev);
907 if (handled)
908 return true;
909
910 return false;
911}
912
913static int sh_dmae_nmi_handler(struct notifier_block *self,
914 unsigned long cmd, void *data)
915{
916 struct sh_dmae_device *shdev;
917 int ret = NOTIFY_DONE;
918 bool triggered;
919
920 /*
921 * Only concern ourselves with NMI events.
922 *
923 * Normally we would check the die chain value, but as this needs
924 * to be architecture independent, check for NMI context instead.
925 */
926 if (!in_nmi())
927 return NOTIFY_DONE;
928
929 rcu_read_lock();
930 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
931 /*
932 * Only stop if one of the controllers has NMIF asserted,
933 * we do not want to interfere with regular address error
934 * handling or NMI events that don't concern the DMACs.
935 */
936 triggered = sh_dmae_nmi_notify(shdev);
937 if (triggered == true)
938 ret = NOTIFY_OK;
939 }
940 rcu_read_unlock();
941
942 return ret;
943}
944
945static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
946 .notifier_call = sh_dmae_nmi_handler,
947
948 /* Run before NMI debug handler and KGDB */
949 .priority = 1,
950};
951
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000952static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
953 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000954{
955 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000956 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000957 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000958 struct sh_dmae_chan *new_sh_chan;
959
960 /* alloc channel */
961 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
962 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100963 dev_err(shdev->common.dev,
964 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000965 return -ENOMEM;
966 }
967
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000968 /* copy struct dma_device */
969 new_sh_chan->common.device = &shdev->common;
970
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000971 new_sh_chan->dev = shdev->common.dev;
972 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000973 new_sh_chan->irq = irq;
974 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000975
976 /* Init DMA tasklet */
977 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
978 (unsigned long)new_sh_chan);
979
980 /* Init the channel */
981 dmae_init(new_sh_chan);
982
983 spin_lock_init(&new_sh_chan->desc_lock);
984
985 /* Init descripter manage list */
986 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
987 INIT_LIST_HEAD(&new_sh_chan->ld_free);
988
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000989 /* Add the channel to DMA device channel list */
990 list_add_tail(&new_sh_chan->common.device_node,
991 &shdev->common.channels);
992 shdev->common.chancnt++;
993
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000994 if (pdev->id >= 0)
995 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
996 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
997 else
998 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
999 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001000
1001 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001002 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001003 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001004 if (err) {
1005 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1006 "with return %d\n", id, err);
1007 goto err_no_irq;
1008 }
1009
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001010 shdev->chan[id] = new_sh_chan;
1011 return 0;
1012
1013err_no_irq:
1014 /* remove from dmaengine device node */
1015 list_del(&new_sh_chan->common.device_node);
1016 kfree(new_sh_chan);
1017 return err;
1018}
1019
1020static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1021{
1022 int i;
1023
1024 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1025 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001026 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001027
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001028 free_irq(sh_chan->irq, sh_chan);
1029
1030 list_del(&sh_chan->common.device_node);
1031 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001032 shdev->chan[i] = NULL;
1033 }
1034 }
1035 shdev->common.chancnt = 0;
1036}
1037
1038static int __init sh_dmae_probe(struct platform_device *pdev)
1039{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001040 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1041 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001042 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
Paul Mundt03aa18f2010-12-17 19:16:10 +09001043 unsigned long flags;
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001044 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001045 int err, i, irq_cnt = 0, irqres = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001046 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001047 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001048
Dan Williams56adf7e2009-11-22 12:10:10 -07001049 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001050 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -07001051 return -ENODEV;
1052
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001053 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1054 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
1055 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1056 /*
1057 * IRQ resources:
1058 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1059 * the error IRQ, in which case it is the only IRQ in this resource:
1060 * start == end. If it is the only IRQ resource, all channels also
1061 * use the same IRQ.
1062 * 2. DMA channel IRQ resources can be specified one per resource or in
1063 * ranges (start != end)
1064 * 3. iff all events (channels and, optionally, error) on this
1065 * controller use the same IRQ, only one IRQ resource can be
1066 * specified, otherwise there must be one IRQ per channel, even if
1067 * some of them are equal
1068 * 4. if all IRQs on this controller are equal or if some specific IRQs
1069 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1070 * requested with the IRQF_SHARED flag
1071 */
1072 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1073 if (!chan || !errirq_res)
1074 return -ENODEV;
1075
1076 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1077 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1078 return -EBUSY;
1079 }
1080
1081 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1082 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1083 err = -EBUSY;
1084 goto ermrdmars;
1085 }
1086
1087 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001088 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1089 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001090 dev_err(&pdev->dev, "Not enough memory\n");
1091 goto ealloc;
1092 }
1093
1094 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1095 if (!shdev->chan_reg)
1096 goto emapchan;
1097 if (dmars) {
1098 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1099 if (!shdev->dmars)
1100 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001101 }
1102
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001103 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001104 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001105
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001106 pm_runtime_enable(&pdev->dev);
1107 pm_runtime_get_sync(&pdev->dev);
1108
Paul Mundt03aa18f2010-12-17 19:16:10 +09001109 spin_lock_irqsave(&sh_dmae_lock, flags);
1110 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1111 spin_unlock_irqrestore(&sh_dmae_lock, flags);
1112
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001113 /* reset dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001114 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001115 if (err)
1116 goto rst_err;
1117
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001118 INIT_LIST_HEAD(&shdev->common.channels);
1119
1120 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001121 if (dmars)
1122 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001123
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001124 shdev->common.device_alloc_chan_resources
1125 = sh_dmae_alloc_chan_resources;
1126 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1127 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001128 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001129 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001130
1131 /* Compulsory for DMA_SLAVE fields */
1132 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001133 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001134
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001135 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001136 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001137 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001138
Magnus Damm927a7c92010-03-19 04:47:19 +00001139#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001140 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1141
1142 if (!chanirq_res)
1143 chanirq_res = errirq_res;
1144 else
1145 irqres++;
1146
1147 if (chanirq_res == errirq_res ||
1148 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001149 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001150
1151 errirq = errirq_res->start;
1152
1153 err = request_irq(errirq, sh_dmae_err, irqflags,
1154 "DMAC Address Error", shdev);
1155 if (err) {
1156 dev_err(&pdev->dev,
1157 "DMA failed requesting irq #%d, error %d\n",
1158 errirq, err);
1159 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001160 }
1161
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001162#else
1163 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001164#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001165
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001166 if (chanirq_res->start == chanirq_res->end &&
1167 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1168 /* Special case - all multiplexed */
1169 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1170 chan_irq[irq_cnt] = chanirq_res->start;
1171 chan_flag[irq_cnt] = IRQF_SHARED;
1172 }
1173 } else {
1174 do {
1175 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1176 if ((errirq_res->flags & IORESOURCE_BITS) ==
1177 IORESOURCE_IRQ_SHAREABLE)
1178 chan_flag[irq_cnt] = IRQF_SHARED;
1179 else
1180 chan_flag[irq_cnt] = IRQF_DISABLED;
1181 dev_dbg(&pdev->dev,
1182 "Found IRQ %d for channel %d\n",
1183 i, irq_cnt);
1184 chan_irq[irq_cnt++] = i;
1185 }
1186 chanirq_res = platform_get_resource(pdev,
1187 IORESOURCE_IRQ, ++irqres);
1188 } while (irq_cnt < pdata->channel_num && chanirq_res);
1189 }
1190
1191 if (irq_cnt < pdata->channel_num)
1192 goto eirqres;
1193
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001194 /* Create DMA Channel */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001195 for (i = 0; i < pdata->channel_num; i++) {
1196 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001197 if (err)
1198 goto chan_probe_err;
1199 }
1200
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001201 pm_runtime_put(&pdev->dev);
1202
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001203 platform_set_drvdata(pdev, shdev);
1204 dma_async_device_register(&shdev->common);
1205
1206 return err;
1207
1208chan_probe_err:
1209 sh_dmae_chan_remove(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001210eirqres:
Magnus Damm927a7c92010-03-19 04:47:19 +00001211#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001212 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001213eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001214#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001215rst_err:
Paul Mundt03aa18f2010-12-17 19:16:10 +09001216 spin_lock_irqsave(&sh_dmae_lock, flags);
1217 list_del_rcu(&shdev->node);
1218 spin_unlock_irqrestore(&sh_dmae_lock, flags);
1219
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001220 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001221 if (dmars)
1222 iounmap(shdev->dmars);
1223emapdmars:
1224 iounmap(shdev->chan_reg);
1225emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001226 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001227ealloc:
1228 if (dmars)
1229 release_mem_region(dmars->start, resource_size(dmars));
1230ermrdmars:
1231 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001232
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001233 return err;
1234}
1235
1236static int __exit sh_dmae_remove(struct platform_device *pdev)
1237{
1238 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001239 struct resource *res;
Paul Mundt03aa18f2010-12-17 19:16:10 +09001240 unsigned long flags;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001241 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001242
1243 dma_async_device_unregister(&shdev->common);
1244
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001245 if (errirq > 0)
1246 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001247
Paul Mundt03aa18f2010-12-17 19:16:10 +09001248 spin_lock_irqsave(&sh_dmae_lock, flags);
1249 list_del_rcu(&shdev->node);
1250 spin_unlock_irqrestore(&sh_dmae_lock, flags);
1251
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001252 /* channel data remove */
1253 sh_dmae_chan_remove(shdev);
1254
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001255 pm_runtime_disable(&pdev->dev);
1256
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001257 if (shdev->dmars)
1258 iounmap(shdev->dmars);
1259 iounmap(shdev->chan_reg);
1260
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001261 kfree(shdev);
1262
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001263 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1264 if (res)
1265 release_mem_region(res->start, resource_size(res));
1266 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1267 if (res)
1268 release_mem_region(res->start, resource_size(res));
1269
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001270 return 0;
1271}
1272
1273static void sh_dmae_shutdown(struct platform_device *pdev)
1274{
1275 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001276 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001277}
1278
1279static struct platform_driver sh_dmae_driver = {
1280 .remove = __exit_p(sh_dmae_remove),
1281 .shutdown = sh_dmae_shutdown,
1282 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001283 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001284 .name = "sh-dma-engine",
1285 },
1286};
1287
1288static int __init sh_dmae_init(void)
1289{
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001290 /* Wire up NMI handling */
1291 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1292 if (err)
1293 return err;
1294
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001295 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1296}
1297module_init(sh_dmae_init);
1298
1299static void __exit sh_dmae_exit(void)
1300{
1301 platform_driver_unregister(&sh_dmae_driver);
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001302
1303 unregister_die_notifier(&sh_dmae_nmi_notifier);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001304}
1305module_exit(sh_dmae_exit);
1306
1307MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1308MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1309MODULE_LICENSE("GPL");
Guennadi Liakhovetskie5843342010-11-24 09:48:10 +00001310MODULE_ALIAS("platform:sh-dma-engine");