blob: 9412de3ef8993e6e1fa6af4f89428060d4ee8b71 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000027#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000028#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000029#include <linux/sh_dma.h>
Paul Mundt03aa18f2010-12-17 19:16:10 +090030#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000034#include "shdma.h"
35
36/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070037enum sh_dmae_desc_status {
38 DESC_IDLE,
39 DESC_PREPARED,
40 DESC_SUBMITTED,
41 DESC_COMPLETED, /* completed, have to call callback */
42 DESC_WAITING, /* callback called, waiting for ack / re-submit */
43};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000044
45#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000046/* Default MEMCPY transfer size = 2^2 = 4 bytes */
47#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000048
Paul Mundt03aa18f2010-12-17 19:16:10 +090049/*
50 * Used for write-side mutual exclusion for the global device list,
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000051 * read-side synchronization by way of RCU, and per-controller data.
Paul Mundt03aa18f2010-12-17 19:16:10 +090052 */
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000056/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000057static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000058
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070059static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000061static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000063 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000064}
65
66static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000068 return __raw_readl(sh_dc->base + reg / sizeof(u32));
69}
70
71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{
73 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
74}
75
76static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
77{
78 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000079}
80
Kuninori Morimoto5899a722011-06-17 08:20:40 +000081static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
82{
83 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
84
85 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
86}
87
88static u32 chcr_read(struct sh_dmae_chan *sh_dc)
89{
90 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
91
92 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
93}
94
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000095/*
96 * Reset DMA controller
97 *
98 * SH7780 has two DMAOR register
99 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000100static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000101{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000102 unsigned short dmaor;
103 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000104
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000105 spin_lock_irqsave(&sh_dmae_lock, flags);
106
107 dmaor = dmaor_read(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000108 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000109
110 spin_unlock_irqrestore(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000111}
112
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000113static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000114{
115 unsigned short dmaor;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000116 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000117
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000118 spin_lock_irqsave(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000119
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000120 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
121
122 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
123
124 dmaor = dmaor_read(shdev);
125
126 spin_unlock_irqrestore(&sh_dmae_lock, flags);
127
128 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
129 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
130 return -EIO;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000131 }
132 return 0;
133}
134
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000135static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000136{
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000137 u32 chcr = chcr_read(sh_chan);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000138
139 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
140 return true; /* working */
141
142 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000143}
144
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000145static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000146{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000147 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000148 struct sh_dmae_pdata *pdata = shdev->pdata;
149 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
150 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000151
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000152 if (cnt >= pdata->ts_shift_num)
153 cnt = 0;
154
155 return pdata->ts_shift[cnt];
156}
157
158static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
159{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000160 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000161 struct sh_dmae_pdata *pdata = shdev->pdata;
162 int i;
163
164 for (i = 0; i < pdata->ts_shift_num; i++)
165 if (pdata->ts_shift[i] == l2size)
166 break;
167
168 if (i == pdata->ts_shift_num)
169 i = 0;
170
171 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
172 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000173}
174
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700175static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000176{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700177 sh_dmae_writel(sh_chan, hw->sar, SAR);
178 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000179 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000180}
181
182static void dmae_start(struct sh_dmae_chan *sh_chan)
183{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000184 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000185 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000186
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000187 chcr |= CHCR_DE | shdev->chcr_ie_bit;
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000188 chcr_write(sh_chan, chcr & ~CHCR_TE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000189}
190
191static void dmae_halt(struct sh_dmae_chan *sh_chan)
192{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000193 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000194 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000195
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000196 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000197 chcr_write(sh_chan, chcr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000198}
199
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000200static void dmae_init(struct sh_dmae_chan *sh_chan)
201{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000202 /*
203 * Default configuration for dual address memory-memory transfer.
204 * 0x400 represents auto-request.
205 */
206 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
207 LOG2_DEFAULT_XFER_SIZE);
208 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000209 chcr_write(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000210}
211
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000212static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
213{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000214 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000215 if (dmae_is_busy(sh_chan))
216 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000217
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000218 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000219 chcr_write(sh_chan, val);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000220
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000221 return 0;
222}
223
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000224static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
225{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000226 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000227 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000228 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Magnus Damm26fc02a2011-05-24 10:31:12 +0000229 u16 __iomem *addr = shdev->dmars;
Kuninori Morimoto090b9182011-06-16 05:08:28 +0000230 unsigned int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000231
232 if (dmae_is_busy(sh_chan))
233 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000234
Magnus Damm26fc02a2011-05-24 10:31:12 +0000235 /* in the case of a missing DMARS resource use first memory window */
236 if (!addr)
237 addr = (u16 __iomem *)shdev->chan_reg;
238 addr += chan_pdata->dmars / sizeof(u16);
239
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000240 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
241 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000242
243 return 0;
244}
245
246static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
247{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700248 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000249 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700250 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000251 dma_cookie_t cookie;
252
253 spin_lock_bh(&sh_chan->desc_lock);
254
255 cookie = sh_chan->common.cookie;
256 cookie++;
257 if (cookie < 0)
258 cookie = 1;
259
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700260 sh_chan->common.cookie = cookie;
261 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000262
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700263 /* Mark all chunks of this descriptor as submitted, move to the queue */
264 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
265 /*
266 * All chunks are on the global ld_free, so, we have to find
267 * the end of the chain ourselves
268 */
269 if (chunk != desc && (chunk->mark == DESC_IDLE ||
270 chunk->async_tx.cookie > 0 ||
271 chunk->async_tx.cookie == -EBUSY ||
272 &chunk->node == &sh_chan->ld_free))
273 break;
274 chunk->mark = DESC_SUBMITTED;
275 /* Callback goes to the last chunk */
276 chunk->async_tx.callback = NULL;
277 chunk->cookie = cookie;
278 list_move_tail(&chunk->node, &sh_chan->ld_queue);
279 last = chunk;
280 }
281
282 last->async_tx.callback = callback;
283 last->async_tx.callback_param = tx->callback_param;
284
285 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
286 tx->cookie, &last->async_tx, sh_chan->id,
287 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000288
289 spin_unlock_bh(&sh_chan->desc_lock);
290
291 return cookie;
292}
293
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700294/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000295static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
296{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700297 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000298
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700299 list_for_each_entry(desc, &sh_chan->ld_free, node)
300 if (desc->mark != DESC_PREPARED) {
301 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000302 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700303 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000304 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000305
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700306 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000307}
308
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000309static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000310 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000311{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000312 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000313 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000314 int i;
315
Magnus Damm02ca5082010-03-19 04:46:47 +0000316 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000317 return NULL;
318
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000319 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000320 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000321 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000322
323 return NULL;
324}
325
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000326static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
327{
328 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
329 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000330 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000331 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000332
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000333 pm_runtime_get_sync(sh_chan->dev);
334
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000335 /*
336 * This relies on the guarantee from dmaengine that alloc_chan_resources
337 * never runs concurrently with itself or free_chan_resources.
338 */
339 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000340 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000341
Magnus Damm4bab9d42010-03-19 04:46:38 +0000342 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000343 if (!cfg) {
344 ret = -EINVAL;
345 goto efindslave;
346 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000347
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000348 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
349 ret = -EBUSY;
350 goto etestused;
351 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000352
353 param->config = cfg;
354
355 dmae_set_dmars(sh_chan, cfg->mid_rid);
356 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetskia1b2cc52011-05-31 09:25:16 +0000357 } else {
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000358 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000359 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000360
361 spin_lock_bh(&sh_chan->desc_lock);
362 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
363 spin_unlock_bh(&sh_chan->desc_lock);
364 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
365 if (!desc) {
366 spin_lock_bh(&sh_chan->desc_lock);
367 break;
368 }
369 dma_async_tx_descriptor_init(&desc->async_tx,
370 &sh_chan->common);
371 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700372 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000373
374 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700375 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000376 sh_chan->descs_allocated++;
377 }
378 spin_unlock_bh(&sh_chan->desc_lock);
379
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000380 if (!sh_chan->descs_allocated) {
381 ret = -ENOMEM;
382 goto edescalloc;
383 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000384
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000385 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000386
387edescalloc:
388 if (param)
389 clear_bit(param->slave_id, sh_dmae_slave_used);
390etestused:
391efindslave:
392 pm_runtime_put(sh_chan->dev);
393 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000394}
395
396/*
397 * sh_dma_free_chan_resources - Free all resources of the channel.
398 */
399static void sh_dmae_free_chan_resources(struct dma_chan *chan)
400{
401 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
402 struct sh_desc *desc, *_desc;
403 LIST_HEAD(list);
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000404 int descs = sh_chan->descs_allocated;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000405
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000406 /* Protect against ISR */
407 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000408 dmae_halt(sh_chan);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000409 spin_unlock_irq(&sh_chan->desc_lock);
410
411 /* Now no new interrupts will occur */
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000412
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700413 /* Prepared and not submitted descriptors can still be on the queue */
414 if (!list_empty(&sh_chan->ld_queue))
415 sh_dmae_chan_ld_cleanup(sh_chan, true);
416
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000417 if (chan->private) {
418 /* The caller is holding dma_list_mutex */
419 struct sh_dmae_slave *param = chan->private;
420 clear_bit(param->slave_id, sh_dmae_slave_used);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000421 chan->private = NULL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000422 }
423
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000424 spin_lock_bh(&sh_chan->desc_lock);
425
426 list_splice_init(&sh_chan->ld_free, &list);
427 sh_chan->descs_allocated = 0;
428
429 spin_unlock_bh(&sh_chan->desc_lock);
430
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000431 if (descs > 0)
432 pm_runtime_put(sh_chan->dev);
433
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000434 list_for_each_entry_safe(desc, _desc, &list, node)
435 kfree(desc);
436}
437
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000438/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000439 * sh_dmae_add_desc - get, set up and return one transfer descriptor
440 * @sh_chan: DMA channel
441 * @flags: DMA transfer flags
442 * @dest: destination DMA address, incremented when direction equals
443 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
444 * @src: source DMA address, incremented when direction equals
445 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
446 * @len: DMA transfer length
447 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
448 * @direction: needed for slave DMA to decide which address to keep constant,
449 * equals DMA_BIDIRECTIONAL for MEMCPY
450 * Returns 0 or an error
451 * Locks: called with desc_lock held
452 */
453static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
454 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
455 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000456{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000457 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000458 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000459
460 if (!*len)
461 return NULL;
462
463 /* Allocate the link descriptor from the free list */
464 new = sh_dmae_get_desc(sh_chan);
465 if (!new) {
466 dev_err(sh_chan->dev, "No free link descriptor available\n");
467 return NULL;
468 }
469
470 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
471
472 new->hw.sar = *src;
473 new->hw.dar = *dest;
474 new->hw.tcr = copy_size;
475
476 if (!*first) {
477 /* First desc */
478 new->async_tx.cookie = -EBUSY;
479 *first = new;
480 } else {
481 /* Other desc - invisible to the user */
482 new->async_tx.cookie = -EINVAL;
483 }
484
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000485 dev_dbg(sh_chan->dev,
486 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000487 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000488 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000489
490 new->mark = DESC_PREPARED;
491 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000492 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000493
494 *len -= copy_size;
495 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
496 *src += copy_size;
497 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
498 *dest += copy_size;
499
500 return new;
501}
502
503/*
504 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
505 *
506 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
507 * converted to scatter-gather to guarantee consistent locking and a correct
508 * list manipulation. For slave DMA direction carries the usual meaning, and,
509 * logically, the SG list is RAM and the addr variable contains slave address,
510 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
511 * and the SG list contains only one element and points at the source buffer.
512 */
513static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
514 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
515 enum dma_data_direction direction, unsigned long flags)
516{
517 struct scatterlist *sg;
518 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700519 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000520 int chunks = 0;
521 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000522
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000523 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000524 return NULL;
525
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000526 for_each_sg(sgl, sg, sg_len, i)
527 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
528 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000529
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700530 /* Have to lock the whole loop to protect against concurrent release */
531 spin_lock_bh(&sh_chan->desc_lock);
532
533 /*
534 * Chaining:
535 * first descriptor is what user is dealing with in all API calls, its
536 * cookie is at first set to -EBUSY, at tx-submit to a positive
537 * number
538 * if more than one chunk is needed further chunks have cookie = -EINVAL
539 * the last chunk, if not equal to the first, has cookie = -ENOSPC
540 * all chunks are linked onto the tx_list head with their .node heads
541 * only during this function, then they are immediately spliced
542 * back onto the free list in form of a chain
543 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000544 for_each_sg(sgl, sg, sg_len, i) {
545 dma_addr_t sg_addr = sg_dma_address(sg);
546 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000547
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000548 if (!len)
549 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000550
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000551 do {
552 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
553 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000554
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000555 if (direction == DMA_FROM_DEVICE)
556 new = sh_dmae_add_desc(sh_chan, flags,
557 &sg_addr, addr, &len, &first,
558 direction);
559 else
560 new = sh_dmae_add_desc(sh_chan, flags,
561 addr, &sg_addr, &len, &first,
562 direction);
563 if (!new)
564 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700565
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000566 new->chunks = chunks--;
567 list_add_tail(&new->node, &tx_list);
568 } while (len);
569 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000570
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700571 if (new != first)
572 new->async_tx.cookie = -ENOSPC;
573
574 /* Put them back on the free list, so, they don't get lost */
575 list_splice_tail(&tx_list, &sh_chan->ld_free);
576
577 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000578
579 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000580
581err_get_desc:
582 list_for_each_entry(new, &tx_list, node)
583 new->mark = DESC_IDLE;
584 list_splice(&tx_list, &sh_chan->ld_free);
585
586 spin_unlock_bh(&sh_chan->desc_lock);
587
588 return NULL;
589}
590
591static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
592 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
593 size_t len, unsigned long flags)
594{
595 struct sh_dmae_chan *sh_chan;
596 struct scatterlist sg;
597
598 if (!chan || !len)
599 return NULL;
600
601 sh_chan = to_sh_chan(chan);
602
603 sg_init_table(&sg, 1);
604 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
605 offset_in_page(dma_src));
606 sg_dma_address(&sg) = dma_src;
607 sg_dma_len(&sg) = len;
608
609 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
610 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700611}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000612
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000613static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
614 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
615 enum dma_data_direction direction, unsigned long flags)
616{
617 struct sh_dmae_slave *param;
618 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000619 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000620
621 if (!chan)
622 return NULL;
623
624 sh_chan = to_sh_chan(chan);
625 param = chan->private;
626
627 /* Someone calling slave DMA on a public channel? */
628 if (!param || !sg_len) {
629 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
630 __func__, param, sg_len, param ? param->slave_id : -1);
631 return NULL;
632 }
633
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200634 slave_addr = param->config->addr;
635
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000636 /*
637 * if (param != NULL), this is a successfully requested slave channel,
638 * therefore param->config != NULL too.
639 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000640 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000641 direction, flags);
642}
643
Linus Walleij05827632010-05-17 16:30:42 -0700644static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
645 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000646{
647 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
648
Linus Walleijc3635c72010-03-26 16:44:01 -0700649 /* Only supports DMA_TERMINATE_ALL */
650 if (cmd != DMA_TERMINATE_ALL)
651 return -ENXIO;
652
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000653 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700654 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000655
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000656 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000657 dmae_halt(sh_chan);
658
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000659 if (!list_empty(&sh_chan->ld_queue)) {
660 /* Record partial transfer */
661 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
662 struct sh_desc, node);
663 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
664 sh_chan->xmit_shift;
665
666 }
667 spin_unlock_bh(&sh_chan->desc_lock);
668
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000669 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700670
671 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000672}
673
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700674static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
675{
676 struct sh_desc *desc, *_desc;
677 /* Is the "exposed" head of a chain acked? */
678 bool head_acked = false;
679 dma_cookie_t cookie = 0;
680 dma_async_tx_callback callback = NULL;
681 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000682
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700683 spin_lock_bh(&sh_chan->desc_lock);
684 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
685 struct dma_async_tx_descriptor *tx = &desc->async_tx;
686
687 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
688 BUG_ON(desc->mark != DESC_SUBMITTED &&
689 desc->mark != DESC_COMPLETED &&
690 desc->mark != DESC_WAITING);
691
692 /*
693 * queue is ordered, and we use this loop to (1) clean up all
694 * completed descriptors, and to (2) update descriptor flags of
695 * any chunks in a (partially) completed chain
696 */
697 if (!all && desc->mark == DESC_SUBMITTED &&
698 desc->cookie != cookie)
699 break;
700
701 if (tx->cookie > 0)
702 cookie = tx->cookie;
703
704 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000705 if (sh_chan->completed_cookie != desc->cookie - 1)
706 dev_dbg(sh_chan->dev,
707 "Completing cookie %d, expected %d\n",
708 desc->cookie,
709 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700710 sh_chan->completed_cookie = desc->cookie;
711 }
712
713 /* Call callback on the last chunk */
714 if (desc->mark == DESC_COMPLETED && tx->callback) {
715 desc->mark = DESC_WAITING;
716 callback = tx->callback;
717 param = tx->callback_param;
718 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
719 tx->cookie, tx, sh_chan->id);
720 BUG_ON(desc->chunks != 1);
721 break;
722 }
723
724 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
725 if (desc->mark == DESC_COMPLETED) {
726 BUG_ON(tx->cookie < 0);
727 desc->mark = DESC_WAITING;
728 }
729 head_acked = async_tx_test_ack(tx);
730 } else {
731 switch (desc->mark) {
732 case DESC_COMPLETED:
733 desc->mark = DESC_WAITING;
734 /* Fall through */
735 case DESC_WAITING:
736 if (head_acked)
737 async_tx_ack(&desc->async_tx);
738 }
739 }
740
741 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
742 tx, tx->cookie);
743
744 if (((desc->mark == DESC_COMPLETED ||
745 desc->mark == DESC_WAITING) &&
746 async_tx_test_ack(&desc->async_tx)) || all) {
747 /* Remove from ld_queue list */
748 desc->mark = DESC_IDLE;
749 list_move(&desc->node, &sh_chan->ld_free);
750 }
751 }
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000752
753 if (all && !callback)
754 /*
755 * Terminating and the loop completed normally: forgive
756 * uncompleted cookies
757 */
758 sh_chan->completed_cookie = sh_chan->common.cookie;
759
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700760 spin_unlock_bh(&sh_chan->desc_lock);
761
762 if (callback)
763 callback(param);
764
765 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000766}
767
768/*
769 * sh_chan_ld_cleanup - Clean up link descriptors
770 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700771 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000772 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700773static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000774{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700775 while (__ld_cleanup(sh_chan, all))
776 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000777}
778
779static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
780{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000781 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000782
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700783 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000784 /* DMA work check */
Kuninori Morimoto1d2c0982011-06-16 05:08:18 +0000785 if (dmae_is_busy(sh_chan))
786 goto sh_chan_xfer_ld_queue_end;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000787
Justin P. Mattock5a3a7652011-01-19 15:36:38 +0100788 /* Find the first not transferred descriptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000789 list_for_each_entry(desc, &sh_chan->ld_queue, node)
790 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000791 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
792 desc->async_tx.cookie, sh_chan->id,
793 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700794 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000795 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700796 dmae_start(sh_chan);
797 break;
798 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000799
Kuninori Morimoto1d2c0982011-06-16 05:08:18 +0000800sh_chan_xfer_ld_queue_end:
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700801 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000802}
803
804static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
805{
806 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
807 sh_chan_xfer_ld_queue(sh_chan);
808}
809
Linus Walleij07934482010-03-26 16:50:49 -0700810static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000811 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700812 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000813{
814 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
815 dma_cookie_t last_used;
816 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000817 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000818
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700819 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000820
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000821 /* First read completed cookie to avoid a skew */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000822 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000823 rmb();
824 last_used = chan->cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700825 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700826 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000827
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000828 spin_lock_bh(&sh_chan->desc_lock);
829
830 status = dma_async_is_complete(cookie, last_complete, last_used);
831
832 /*
833 * If we don't find cookie on the queue, it has been aborted and we have
834 * to report error
835 */
836 if (status != DMA_SUCCESS) {
837 struct sh_desc *desc;
838 status = DMA_ERROR;
839 list_for_each_entry(desc, &sh_chan->ld_queue, node)
840 if (desc->cookie == cookie) {
841 status = DMA_IN_PROGRESS;
842 break;
843 }
844 }
845
846 spin_unlock_bh(&sh_chan->desc_lock);
847
848 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000849}
850
851static irqreturn_t sh_dmae_interrupt(int irq, void *data)
852{
853 irqreturn_t ret = IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000854 struct sh_dmae_chan *sh_chan = data;
855 u32 chcr;
856
857 spin_lock(&sh_chan->desc_lock);
858
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000859 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000860
861 if (chcr & CHCR_TE) {
862 /* DMA stop */
863 dmae_halt(sh_chan);
864
865 ret = IRQ_HANDLED;
866 tasklet_schedule(&sh_chan->tasklet);
867 }
868
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000869 spin_unlock(&sh_chan->desc_lock);
870
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000871 return ret;
872}
873
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000874/* Called from error IRQ or NMI */
875static bool sh_dmae_reset(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000876{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900877 unsigned int handled = 0;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000878 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000879
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000880 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000881 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000882
883 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000884 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000885 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Paul Mundt03aa18f2010-12-17 19:16:10 +0900886 struct sh_desc *desc;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000887 LIST_HEAD(dl);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900888
889 if (!sh_chan)
890 continue;
891
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000892 spin_lock(&sh_chan->desc_lock);
893
Paul Mundt03aa18f2010-12-17 19:16:10 +0900894 /* Stop the channel */
895 dmae_halt(sh_chan);
896
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000897 list_splice_init(&sh_chan->ld_queue, &dl);
898
899 spin_unlock(&sh_chan->desc_lock);
900
Paul Mundt03aa18f2010-12-17 19:16:10 +0900901 /* Complete all */
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000902 list_for_each_entry(desc, &dl, node) {
Paul Mundt03aa18f2010-12-17 19:16:10 +0900903 struct dma_async_tx_descriptor *tx = &desc->async_tx;
904 desc->mark = DESC_IDLE;
905 if (tx->callback)
906 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000907 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900908
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000909 spin_lock(&sh_chan->desc_lock);
910 list_splice(&dl, &sh_chan->ld_free);
911 spin_unlock(&sh_chan->desc_lock);
912
Paul Mundt03aa18f2010-12-17 19:16:10 +0900913 handled++;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000914 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900915
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000916 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000917
Paul Mundt03aa18f2010-12-17 19:16:10 +0900918 return !!handled;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000919}
Paul Mundt03aa18f2010-12-17 19:16:10 +0900920
921static irqreturn_t sh_dmae_err(int irq, void *data)
922{
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000923 struct sh_dmae_device *shdev = data;
924
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000925 if (!(dmaor_read(shdev) & DMAOR_AE))
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000926 return IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000927
928 sh_dmae_reset(data);
929 return IRQ_HANDLED;
Paul Mundt03aa18f2010-12-17 19:16:10 +0900930}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000931
932static void dmae_do_tasklet(unsigned long data)
933{
934 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700935 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000936 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000937 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100938
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700939 spin_lock(&sh_chan->desc_lock);
940 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000941 if (desc->mark == DESC_SUBMITTED &&
942 ((desc->direction == DMA_FROM_DEVICE &&
943 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
944 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700945 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
946 desc->async_tx.cookie, &desc->async_tx,
947 desc->hw.dar);
948 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000949 break;
950 }
951 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700952 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000953
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000954 /* Next desc */
955 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700956 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000957}
958
Paul Mundt03aa18f2010-12-17 19:16:10 +0900959static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
960{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900961 /* Fast path out if NMIF is not asserted for this controller */
962 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
963 return false;
964
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000965 return sh_dmae_reset(shdev);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900966}
967
968static int sh_dmae_nmi_handler(struct notifier_block *self,
969 unsigned long cmd, void *data)
970{
971 struct sh_dmae_device *shdev;
972 int ret = NOTIFY_DONE;
973 bool triggered;
974
975 /*
976 * Only concern ourselves with NMI events.
977 *
978 * Normally we would check the die chain value, but as this needs
979 * to be architecture independent, check for NMI context instead.
980 */
981 if (!in_nmi())
982 return NOTIFY_DONE;
983
984 rcu_read_lock();
985 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
986 /*
987 * Only stop if one of the controllers has NMIF asserted,
988 * we do not want to interfere with regular address error
989 * handling or NMI events that don't concern the DMACs.
990 */
991 triggered = sh_dmae_nmi_notify(shdev);
992 if (triggered == true)
993 ret = NOTIFY_OK;
994 }
995 rcu_read_unlock();
996
997 return ret;
998}
999
1000static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1001 .notifier_call = sh_dmae_nmi_handler,
1002
1003 /* Run before NMI debug handler and KGDB */
1004 .priority = 1,
1005};
1006
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001007static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1008 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001009{
1010 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +00001011 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001012 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001013 struct sh_dmae_chan *new_sh_chan;
1014
1015 /* alloc channel */
1016 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1017 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001018 dev_err(shdev->common.dev,
1019 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001020 return -ENOMEM;
1021 }
1022
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001023 /* copy struct dma_device */
1024 new_sh_chan->common.device = &shdev->common;
1025
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001026 new_sh_chan->dev = shdev->common.dev;
1027 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001028 new_sh_chan->irq = irq;
1029 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001030
1031 /* Init DMA tasklet */
1032 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1033 (unsigned long)new_sh_chan);
1034
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001035 spin_lock_init(&new_sh_chan->desc_lock);
1036
1037 /* Init descripter manage list */
1038 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1039 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1040
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001041 /* Add the channel to DMA device channel list */
1042 list_add_tail(&new_sh_chan->common.device_node,
1043 &shdev->common.channels);
1044 shdev->common.chancnt++;
1045
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001046 if (pdev->id >= 0)
1047 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1048 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1049 else
1050 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1051 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001052
1053 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001054 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001055 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001056 if (err) {
1057 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1058 "with return %d\n", id, err);
1059 goto err_no_irq;
1060 }
1061
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001062 shdev->chan[id] = new_sh_chan;
1063 return 0;
1064
1065err_no_irq:
1066 /* remove from dmaengine device node */
1067 list_del(&new_sh_chan->common.device_node);
1068 kfree(new_sh_chan);
1069 return err;
1070}
1071
1072static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1073{
1074 int i;
1075
1076 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1077 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001078 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001079
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001080 free_irq(sh_chan->irq, sh_chan);
1081
1082 list_del(&sh_chan->common.device_node);
1083 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001084 shdev->chan[i] = NULL;
1085 }
1086 }
1087 shdev->common.chancnt = 0;
1088}
1089
1090static int __init sh_dmae_probe(struct platform_device *pdev)
1091{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001092 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1093 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001094 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1095 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Magnus Damm300e5f92011-05-24 10:31:20 +00001096 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001097 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001098 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001099
Dan Williams56adf7e2009-11-22 12:10:10 -07001100 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001101 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -07001102 return -ENODEV;
1103
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001104 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001105 /* DMARS area is optional */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001106 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1107 /*
1108 * IRQ resources:
1109 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1110 * the error IRQ, in which case it is the only IRQ in this resource:
1111 * start == end. If it is the only IRQ resource, all channels also
1112 * use the same IRQ.
1113 * 2. DMA channel IRQ resources can be specified one per resource or in
1114 * ranges (start != end)
1115 * 3. iff all events (channels and, optionally, error) on this
1116 * controller use the same IRQ, only one IRQ resource can be
1117 * specified, otherwise there must be one IRQ per channel, even if
1118 * some of them are equal
1119 * 4. if all IRQs on this controller are equal or if some specific IRQs
1120 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1121 * requested with the IRQF_SHARED flag
1122 */
1123 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1124 if (!chan || !errirq_res)
1125 return -ENODEV;
1126
1127 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1128 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1129 return -EBUSY;
1130 }
1131
1132 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1133 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1134 err = -EBUSY;
1135 goto ermrdmars;
1136 }
1137
1138 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001139 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1140 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001141 dev_err(&pdev->dev, "Not enough memory\n");
1142 goto ealloc;
1143 }
1144
1145 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1146 if (!shdev->chan_reg)
1147 goto emapchan;
1148 if (dmars) {
1149 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1150 if (!shdev->dmars)
1151 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001152 }
1153
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001154 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001155 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001156
Kuninori Morimoto5899a722011-06-17 08:20:40 +00001157 if (pdata->chcr_offset)
1158 shdev->chcr_offset = pdata->chcr_offset;
1159 else
1160 shdev->chcr_offset = CHCR;
1161
Kuninori Morimoto67c62692011-06-17 08:20:51 +00001162 if (pdata->chcr_ie_bit)
1163 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1164 else
1165 shdev->chcr_ie_bit = CHCR_IE;
1166
Paul Mundt5c2de442011-05-31 15:53:03 +09001167 platform_set_drvdata(pdev, shdev);
1168
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001169 pm_runtime_enable(&pdev->dev);
1170 pm_runtime_get_sync(&pdev->dev);
1171
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001172 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001173 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001174 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001175
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001176 /* reset dma controller - only needed as a test */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001177 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001178 if (err)
1179 goto rst_err;
1180
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001181 INIT_LIST_HEAD(&shdev->common.channels);
1182
1183 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001184 if (pdata->slave && pdata->slave_num)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001185 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001186
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001187 shdev->common.device_alloc_chan_resources
1188 = sh_dmae_alloc_chan_resources;
1189 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1190 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001191 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001192 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001193
1194 /* Compulsory for DMA_SLAVE fields */
1195 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001196 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001197
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001198 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001199 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001200 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001201
Magnus Damm927a7c92010-03-19 04:47:19 +00001202#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001203 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1204
1205 if (!chanirq_res)
1206 chanirq_res = errirq_res;
1207 else
1208 irqres++;
1209
1210 if (chanirq_res == errirq_res ||
1211 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001212 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001213
1214 errirq = errirq_res->start;
1215
1216 err = request_irq(errirq, sh_dmae_err, irqflags,
1217 "DMAC Address Error", shdev);
1218 if (err) {
1219 dev_err(&pdev->dev,
1220 "DMA failed requesting irq #%d, error %d\n",
1221 errirq, err);
1222 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001223 }
1224
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001225#else
1226 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001227#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001228
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001229 if (chanirq_res->start == chanirq_res->end &&
1230 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1231 /* Special case - all multiplexed */
1232 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
Magnus Damm300e5f92011-05-24 10:31:20 +00001233 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1234 chan_irq[irq_cnt] = chanirq_res->start;
1235 chan_flag[irq_cnt] = IRQF_SHARED;
1236 } else {
1237 irq_cap = 1;
1238 break;
1239 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001240 }
1241 } else {
1242 do {
1243 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1244 if ((errirq_res->flags & IORESOURCE_BITS) ==
1245 IORESOURCE_IRQ_SHAREABLE)
1246 chan_flag[irq_cnt] = IRQF_SHARED;
1247 else
1248 chan_flag[irq_cnt] = IRQF_DISABLED;
1249 dev_dbg(&pdev->dev,
1250 "Found IRQ %d for channel %d\n",
1251 i, irq_cnt);
1252 chan_irq[irq_cnt++] = i;
Magnus Damm300e5f92011-05-24 10:31:20 +00001253
1254 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1255 break;
1256 }
1257
1258 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1259 irq_cap = 1;
1260 break;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001261 }
1262 chanirq_res = platform_get_resource(pdev,
1263 IORESOURCE_IRQ, ++irqres);
1264 } while (irq_cnt < pdata->channel_num && chanirq_res);
1265 }
1266
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001267 /* Create DMA Channel */
Magnus Damm300e5f92011-05-24 10:31:20 +00001268 for (i = 0; i < irq_cnt; i++) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001269 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001270 if (err)
1271 goto chan_probe_err;
1272 }
1273
Magnus Damm300e5f92011-05-24 10:31:20 +00001274 if (irq_cap)
1275 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1276 "channels when a maximum of %d are supported.\n",
1277 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1278
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001279 pm_runtime_put(&pdev->dev);
1280
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001281 dma_async_device_register(&shdev->common);
1282
1283 return err;
1284
1285chan_probe_err:
1286 sh_dmae_chan_remove(shdev);
Magnus Damm300e5f92011-05-24 10:31:20 +00001287
Magnus Damm927a7c92010-03-19 04:47:19 +00001288#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001289 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001290eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001291#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001292rst_err:
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001293 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001294 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001295 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001296
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001297 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001298 pm_runtime_disable(&pdev->dev);
1299
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001300 if (dmars)
1301 iounmap(shdev->dmars);
Paul Mundt5c2de442011-05-31 15:53:03 +09001302
1303 platform_set_drvdata(pdev, NULL);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001304emapdmars:
1305 iounmap(shdev->chan_reg);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001306 synchronize_rcu();
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001307emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001308 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001309ealloc:
1310 if (dmars)
1311 release_mem_region(dmars->start, resource_size(dmars));
1312ermrdmars:
1313 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001314
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001315 return err;
1316}
1317
1318static int __exit sh_dmae_remove(struct platform_device *pdev)
1319{
1320 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001321 struct resource *res;
1322 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001323
1324 dma_async_device_unregister(&shdev->common);
1325
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001326 if (errirq > 0)
1327 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001328
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001329 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001330 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001331 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001332
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001333 /* channel data remove */
1334 sh_dmae_chan_remove(shdev);
1335
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001336 pm_runtime_disable(&pdev->dev);
1337
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001338 if (shdev->dmars)
1339 iounmap(shdev->dmars);
1340 iounmap(shdev->chan_reg);
1341
Paul Mundt5c2de442011-05-31 15:53:03 +09001342 platform_set_drvdata(pdev, NULL);
1343
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001344 synchronize_rcu();
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001345 kfree(shdev);
1346
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001347 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1348 if (res)
1349 release_mem_region(res->start, resource_size(res));
1350 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1351 if (res)
1352 release_mem_region(res->start, resource_size(res));
1353
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001354 return 0;
1355}
1356
1357static void sh_dmae_shutdown(struct platform_device *pdev)
1358{
1359 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001360 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001361}
1362
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001363static int sh_dmae_runtime_suspend(struct device *dev)
1364{
1365 return 0;
1366}
1367
1368static int sh_dmae_runtime_resume(struct device *dev)
1369{
1370 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1371
1372 return sh_dmae_rst(shdev);
1373}
1374
1375#ifdef CONFIG_PM
1376static int sh_dmae_suspend(struct device *dev)
1377{
1378 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1379 int i;
1380
1381 for (i = 0; i < shdev->pdata->channel_num; i++) {
1382 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1383 if (sh_chan->descs_allocated)
1384 sh_chan->pm_error = pm_runtime_put_sync(dev);
1385 }
1386
1387 return 0;
1388}
1389
1390static int sh_dmae_resume(struct device *dev)
1391{
1392 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1393 int i;
1394
1395 for (i = 0; i < shdev->pdata->channel_num; i++) {
1396 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1397 struct sh_dmae_slave *param = sh_chan->common.private;
1398
1399 if (!sh_chan->descs_allocated)
1400 continue;
1401
1402 if (!sh_chan->pm_error)
1403 pm_runtime_get_sync(dev);
1404
1405 if (param) {
1406 const struct sh_dmae_slave_config *cfg = param->config;
1407 dmae_set_dmars(sh_chan, cfg->mid_rid);
1408 dmae_set_chcr(sh_chan, cfg->chcr);
1409 } else {
1410 dmae_init(sh_chan);
1411 }
1412 }
1413
1414 return 0;
1415}
1416#else
1417#define sh_dmae_suspend NULL
1418#define sh_dmae_resume NULL
1419#endif
1420
1421const struct dev_pm_ops sh_dmae_pm = {
1422 .suspend = sh_dmae_suspend,
1423 .resume = sh_dmae_resume,
1424 .runtime_suspend = sh_dmae_runtime_suspend,
1425 .runtime_resume = sh_dmae_runtime_resume,
1426};
1427
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001428static struct platform_driver sh_dmae_driver = {
1429 .remove = __exit_p(sh_dmae_remove),
1430 .shutdown = sh_dmae_shutdown,
1431 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001432 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001433 .name = "sh-dma-engine",
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001434 .pm = &sh_dmae_pm,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001435 },
1436};
1437
1438static int __init sh_dmae_init(void)
1439{
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001440 /* Wire up NMI handling */
1441 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1442 if (err)
1443 return err;
1444
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001445 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1446}
1447module_init(sh_dmae_init);
1448
1449static void __exit sh_dmae_exit(void)
1450{
1451 platform_driver_unregister(&sh_dmae_driver);
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001452
1453 unregister_die_notifier(&sh_dmae_nmi_notifier);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001454}
1455module_exit(sh_dmae_exit);
1456
1457MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1458MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1459MODULE_LICENSE("GPL");
Guennadi Liakhovetskie5843342010-11-24 09:48:10 +00001460MODULE_ALIAS("platform:sh-dma-engine");