blob: 81809c2b46abef271cb2ac90408914990d0376a7 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000027#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000028#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000029#include <linux/sh_dma.h>
Paul Mundt03aa18f2010-12-17 19:16:10 +090030#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000034#include "shdma.h"
35
36/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070037enum sh_dmae_desc_status {
38 DESC_IDLE,
39 DESC_PREPARED,
40 DESC_SUBMITTED,
41 DESC_COMPLETED, /* completed, have to call callback */
42 DESC_WAITING, /* callback called, waiting for ack / re-submit */
43};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000044
45#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000046/* Default MEMCPY transfer size = 2^2 = 4 bytes */
47#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000048
Paul Mundt03aa18f2010-12-17 19:16:10 +090049/*
50 * Used for write-side mutual exclusion for the global device list,
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000051 * read-side synchronization by way of RCU, and per-controller data.
Paul Mundt03aa18f2010-12-17 19:16:10 +090052 */
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000056/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000057static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000058
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070059static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000061static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000063 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000064}
65
66static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000068 return __raw_readl(sh_dc->base + reg / sizeof(u32));
69}
70
71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{
Kuninori Morimotoe76c3af2011-06-17 08:20:56 +000073 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
74
75 if (shdev->pdata->dmaor_is_32bit)
76 return __raw_readl(addr);
77 else
78 return __raw_readw(addr);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000079}
80
81static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
82{
Kuninori Morimotoe76c3af2011-06-17 08:20:56 +000083 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
84
85 if (shdev->pdata->dmaor_is_32bit)
86 __raw_writel(data, addr);
87 else
88 __raw_writew(data, addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000089}
90
Kuninori Morimoto5899a722011-06-17 08:20:40 +000091static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
92{
93 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
94
95 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
96}
97
98static u32 chcr_read(struct sh_dmae_chan *sh_dc)
99{
100 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
101
102 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000103}
104
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000105/*
106 * Reset DMA controller
107 *
108 * SH7780 has two DMAOR register
109 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000110static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000111{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000112 unsigned short dmaor;
113 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000114
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000115 spin_lock_irqsave(&sh_dmae_lock, flags);
116
117 dmaor = dmaor_read(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000118 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000119
120 spin_unlock_irqrestore(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000121}
122
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000123static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000124{
125 unsigned short dmaor;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000126 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000127
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000128 spin_lock_irqsave(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000129
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000130 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
131
132 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
133
134 dmaor = dmaor_read(shdev);
135
136 spin_unlock_irqrestore(&sh_dmae_lock, flags);
137
138 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
139 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
140 return -EIO;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000141 }
142 return 0;
143}
144
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000145static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000146{
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000147 u32 chcr = chcr_read(sh_chan);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000148
149 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
150 return true; /* working */
151
152 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000153}
154
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000155static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000156{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000157 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000158 struct sh_dmae_pdata *pdata = shdev->pdata;
159 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
160 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000161
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000162 if (cnt >= pdata->ts_shift_num)
163 cnt = 0;
164
165 return pdata->ts_shift[cnt];
166}
167
168static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
169{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000170 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000171 struct sh_dmae_pdata *pdata = shdev->pdata;
172 int i;
173
174 for (i = 0; i < pdata->ts_shift_num; i++)
175 if (pdata->ts_shift[i] == l2size)
176 break;
177
178 if (i == pdata->ts_shift_num)
179 i = 0;
180
181 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
182 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000183}
184
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700185static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000186{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700187 sh_dmae_writel(sh_chan, hw->sar, SAR);
188 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000189 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000190}
191
192static void dmae_start(struct sh_dmae_chan *sh_chan)
193{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000194 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000195 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000196
Kuninori Morimoto260bf2c2011-06-17 08:21:05 +0000197 if (shdev->pdata->needs_tend_set)
198 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
199
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000200 chcr |= CHCR_DE | shdev->chcr_ie_bit;
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000201 chcr_write(sh_chan, chcr & ~CHCR_TE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000202}
203
204static void dmae_halt(struct sh_dmae_chan *sh_chan)
205{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000206 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000207 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000208
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000209 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000210 chcr_write(sh_chan, chcr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000211}
212
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000213static void dmae_init(struct sh_dmae_chan *sh_chan)
214{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000215 /*
216 * Default configuration for dual address memory-memory transfer.
217 * 0x400 represents auto-request.
218 */
219 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
220 LOG2_DEFAULT_XFER_SIZE);
221 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000222 chcr_write(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000223}
224
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000225static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
226{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000227 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000228 if (dmae_is_busy(sh_chan))
229 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000230
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000231 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000232 chcr_write(sh_chan, val);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000233
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000234 return 0;
235}
236
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000237static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
238{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000239 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000240 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000241 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Magnus Damm26fc02a2011-05-24 10:31:12 +0000242 u16 __iomem *addr = shdev->dmars;
Kuninori Morimoto090b9182011-06-16 05:08:28 +0000243 unsigned int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000244
245 if (dmae_is_busy(sh_chan))
246 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000247
Kuninori Morimoto260bf2c2011-06-17 08:21:05 +0000248 if (pdata->no_dmars)
249 return 0;
250
Magnus Damm26fc02a2011-05-24 10:31:12 +0000251 /* in the case of a missing DMARS resource use first memory window */
252 if (!addr)
253 addr = (u16 __iomem *)shdev->chan_reg;
254 addr += chan_pdata->dmars / sizeof(u16);
255
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000256 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
257 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000258
259 return 0;
260}
261
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200262static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
263
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000264static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
265{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000267 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200268 struct sh_dmae_slave *param = tx->chan->private;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700269 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000270 dma_cookie_t cookie;
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200271 bool power_up;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000272
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200273 spin_lock_irq(&sh_chan->desc_lock);
274
275 if (list_empty(&sh_chan->ld_queue))
276 power_up = true;
277 else
278 power_up = false;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000279
280 cookie = sh_chan->common.cookie;
281 cookie++;
282 if (cookie < 0)
283 cookie = 1;
284
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700285 sh_chan->common.cookie = cookie;
286 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000287
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700288 /* Mark all chunks of this descriptor as submitted, move to the queue */
289 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
290 /*
291 * All chunks are on the global ld_free, so, we have to find
292 * the end of the chain ourselves
293 */
294 if (chunk != desc && (chunk->mark == DESC_IDLE ||
295 chunk->async_tx.cookie > 0 ||
296 chunk->async_tx.cookie == -EBUSY ||
297 &chunk->node == &sh_chan->ld_free))
298 break;
299 chunk->mark = DESC_SUBMITTED;
300 /* Callback goes to the last chunk */
301 chunk->async_tx.callback = NULL;
302 chunk->cookie = cookie;
303 list_move_tail(&chunk->node, &sh_chan->ld_queue);
304 last = chunk;
305 }
306
307 last->async_tx.callback = callback;
308 last->async_tx.callback_param = tx->callback_param;
309
310 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
311 tx->cookie, &last->async_tx, sh_chan->id,
312 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000313
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200314 if (power_up) {
315 sh_chan->pm_state = DMAE_PM_BUSY;
316
317 pm_runtime_get(sh_chan->dev);
318
319 spin_unlock_irq(&sh_chan->desc_lock);
320
321 pm_runtime_barrier(sh_chan->dev);
322
323 spin_lock_irq(&sh_chan->desc_lock);
324
325 /* Have we been reset, while waiting? */
326 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
327 dev_dbg(sh_chan->dev, "Bring up channel %d\n",
328 sh_chan->id);
329 if (param) {
330 const struct sh_dmae_slave_config *cfg =
331 param->config;
332
333 dmae_set_dmars(sh_chan, cfg->mid_rid);
334 dmae_set_chcr(sh_chan, cfg->chcr);
335 } else {
336 dmae_init(sh_chan);
337 }
338
339 if (sh_chan->pm_state == DMAE_PM_PENDING)
340 sh_chan_xfer_ld_queue(sh_chan);
341 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
342 }
343 }
344
345 spin_unlock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000346
347 return cookie;
348}
349
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700350/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000351static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
352{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700353 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000354
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700355 list_for_each_entry(desc, &sh_chan->ld_free, node)
356 if (desc->mark != DESC_PREPARED) {
357 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000358 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700359 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000360 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000361
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700362 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000363}
364
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000365static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000366 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000367{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000368 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000369 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000370 int i;
371
Magnus Damm02ca5082010-03-19 04:46:47 +0000372 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000373 return NULL;
374
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000375 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000376 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000377 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000378
379 return NULL;
380}
381
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000382static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
383{
384 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
385 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000386 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000387 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000388
389 /*
390 * This relies on the guarantee from dmaengine that alloc_chan_resources
391 * never runs concurrently with itself or free_chan_resources.
392 */
393 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000394 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000395
Magnus Damm4bab9d42010-03-19 04:46:38 +0000396 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000397 if (!cfg) {
398 ret = -EINVAL;
399 goto efindslave;
400 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000401
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000402 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
403 ret = -EBUSY;
404 goto etestused;
405 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000406
407 param->config = cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000408 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000409
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000410 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000411 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200412 if (!desc)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000413 break;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000414 dma_async_tx_descriptor_init(&desc->async_tx,
415 &sh_chan->common);
416 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700417 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000418
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700419 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000420 sh_chan->descs_allocated++;
421 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000422
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000423 if (!sh_chan->descs_allocated) {
424 ret = -ENOMEM;
425 goto edescalloc;
426 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000427
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000428 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000429
430edescalloc:
431 if (param)
432 clear_bit(param->slave_id, sh_dmae_slave_used);
433etestused:
434efindslave:
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200435 chan->private = NULL;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000436 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000437}
438
439/*
440 * sh_dma_free_chan_resources - Free all resources of the channel.
441 */
442static void sh_dmae_free_chan_resources(struct dma_chan *chan)
443{
444 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
445 struct sh_desc *desc, *_desc;
446 LIST_HEAD(list);
447
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000448 /* Protect against ISR */
449 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000450 dmae_halt(sh_chan);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000451 spin_unlock_irq(&sh_chan->desc_lock);
452
453 /* Now no new interrupts will occur */
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000454
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700455 /* Prepared and not submitted descriptors can still be on the queue */
456 if (!list_empty(&sh_chan->ld_queue))
457 sh_dmae_chan_ld_cleanup(sh_chan, true);
458
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000459 if (chan->private) {
460 /* The caller is holding dma_list_mutex */
461 struct sh_dmae_slave *param = chan->private;
462 clear_bit(param->slave_id, sh_dmae_slave_used);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000463 chan->private = NULL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000464 }
465
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200466 spin_lock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000467
468 list_splice_init(&sh_chan->ld_free, &list);
469 sh_chan->descs_allocated = 0;
470
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200471 spin_unlock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000472
473 list_for_each_entry_safe(desc, _desc, &list, node)
474 kfree(desc);
475}
476
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000477/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000478 * sh_dmae_add_desc - get, set up and return one transfer descriptor
479 * @sh_chan: DMA channel
480 * @flags: DMA transfer flags
481 * @dest: destination DMA address, incremented when direction equals
482 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
483 * @src: source DMA address, incremented when direction equals
484 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
485 * @len: DMA transfer length
486 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
487 * @direction: needed for slave DMA to decide which address to keep constant,
488 * equals DMA_BIDIRECTIONAL for MEMCPY
489 * Returns 0 or an error
490 * Locks: called with desc_lock held
491 */
492static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
493 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
494 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000495{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000496 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000497 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000498
499 if (!*len)
500 return NULL;
501
502 /* Allocate the link descriptor from the free list */
503 new = sh_dmae_get_desc(sh_chan);
504 if (!new) {
505 dev_err(sh_chan->dev, "No free link descriptor available\n");
506 return NULL;
507 }
508
509 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
510
511 new->hw.sar = *src;
512 new->hw.dar = *dest;
513 new->hw.tcr = copy_size;
514
515 if (!*first) {
516 /* First desc */
517 new->async_tx.cookie = -EBUSY;
518 *first = new;
519 } else {
520 /* Other desc - invisible to the user */
521 new->async_tx.cookie = -EINVAL;
522 }
523
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000524 dev_dbg(sh_chan->dev,
525 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000526 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000527 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000528
529 new->mark = DESC_PREPARED;
530 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000531 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000532
533 *len -= copy_size;
534 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
535 *src += copy_size;
536 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
537 *dest += copy_size;
538
539 return new;
540}
541
542/*
543 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
544 *
545 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
546 * converted to scatter-gather to guarantee consistent locking and a correct
547 * list manipulation. For slave DMA direction carries the usual meaning, and,
548 * logically, the SG list is RAM and the addr variable contains slave address,
549 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
550 * and the SG list contains only one element and points at the source buffer.
551 */
552static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
553 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
554 enum dma_data_direction direction, unsigned long flags)
555{
556 struct scatterlist *sg;
557 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700558 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000559 int chunks = 0;
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200560 unsigned long irq_flags;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000561 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000562
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000563 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000564 return NULL;
565
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000566 for_each_sg(sgl, sg, sg_len, i)
567 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
568 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000569
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700570 /* Have to lock the whole loop to protect against concurrent release */
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200571 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700572
573 /*
574 * Chaining:
575 * first descriptor is what user is dealing with in all API calls, its
576 * cookie is at first set to -EBUSY, at tx-submit to a positive
577 * number
578 * if more than one chunk is needed further chunks have cookie = -EINVAL
579 * the last chunk, if not equal to the first, has cookie = -ENOSPC
580 * all chunks are linked onto the tx_list head with their .node heads
581 * only during this function, then they are immediately spliced
582 * back onto the free list in form of a chain
583 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000584 for_each_sg(sgl, sg, sg_len, i) {
585 dma_addr_t sg_addr = sg_dma_address(sg);
586 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000587
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000588 if (!len)
589 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000590
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000591 do {
592 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
593 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000594
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000595 if (direction == DMA_FROM_DEVICE)
596 new = sh_dmae_add_desc(sh_chan, flags,
597 &sg_addr, addr, &len, &first,
598 direction);
599 else
600 new = sh_dmae_add_desc(sh_chan, flags,
601 addr, &sg_addr, &len, &first,
602 direction);
603 if (!new)
604 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700605
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000606 new->chunks = chunks--;
607 list_add_tail(&new->node, &tx_list);
608 } while (len);
609 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000610
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700611 if (new != first)
612 new->async_tx.cookie = -ENOSPC;
613
614 /* Put them back on the free list, so, they don't get lost */
615 list_splice_tail(&tx_list, &sh_chan->ld_free);
616
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200617 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000618
619 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000620
621err_get_desc:
622 list_for_each_entry(new, &tx_list, node)
623 new->mark = DESC_IDLE;
624 list_splice(&tx_list, &sh_chan->ld_free);
625
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200626 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000627
628 return NULL;
629}
630
631static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
632 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
633 size_t len, unsigned long flags)
634{
635 struct sh_dmae_chan *sh_chan;
636 struct scatterlist sg;
637
638 if (!chan || !len)
639 return NULL;
640
641 sh_chan = to_sh_chan(chan);
642
643 sg_init_table(&sg, 1);
644 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
645 offset_in_page(dma_src));
646 sg_dma_address(&sg) = dma_src;
647 sg_dma_len(&sg) = len;
648
649 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
650 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700651}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000652
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000653static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
654 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
655 enum dma_data_direction direction, unsigned long flags)
656{
657 struct sh_dmae_slave *param;
658 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000659 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000660
661 if (!chan)
662 return NULL;
663
664 sh_chan = to_sh_chan(chan);
665 param = chan->private;
666
667 /* Someone calling slave DMA on a public channel? */
668 if (!param || !sg_len) {
669 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
670 __func__, param, sg_len, param ? param->slave_id : -1);
671 return NULL;
672 }
673
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200674 slave_addr = param->config->addr;
675
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000676 /*
677 * if (param != NULL), this is a successfully requested slave channel,
678 * therefore param->config != NULL too.
679 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000680 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000681 direction, flags);
682}
683
Linus Walleij05827632010-05-17 16:30:42 -0700684static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
685 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000686{
687 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200688 unsigned long flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000689
Linus Walleijc3635c72010-03-26 16:44:01 -0700690 /* Only supports DMA_TERMINATE_ALL */
691 if (cmd != DMA_TERMINATE_ALL)
692 return -ENXIO;
693
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000694 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700695 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000696
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200697 spin_lock_irqsave(&sh_chan->desc_lock, flags);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000698 dmae_halt(sh_chan);
699
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000700 if (!list_empty(&sh_chan->ld_queue)) {
701 /* Record partial transfer */
702 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
703 struct sh_desc, node);
704 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
705 sh_chan->xmit_shift;
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000706 }
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200707 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000708
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000709 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700710
711 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000712}
713
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700714static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
715{
716 struct sh_desc *desc, *_desc;
717 /* Is the "exposed" head of a chain acked? */
718 bool head_acked = false;
719 dma_cookie_t cookie = 0;
720 dma_async_tx_callback callback = NULL;
721 void *param = NULL;
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200722 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000723
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200724 spin_lock_irqsave(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700725 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
726 struct dma_async_tx_descriptor *tx = &desc->async_tx;
727
728 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
729 BUG_ON(desc->mark != DESC_SUBMITTED &&
730 desc->mark != DESC_COMPLETED &&
731 desc->mark != DESC_WAITING);
732
733 /*
734 * queue is ordered, and we use this loop to (1) clean up all
735 * completed descriptors, and to (2) update descriptor flags of
736 * any chunks in a (partially) completed chain
737 */
738 if (!all && desc->mark == DESC_SUBMITTED &&
739 desc->cookie != cookie)
740 break;
741
742 if (tx->cookie > 0)
743 cookie = tx->cookie;
744
745 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000746 if (sh_chan->completed_cookie != desc->cookie - 1)
747 dev_dbg(sh_chan->dev,
748 "Completing cookie %d, expected %d\n",
749 desc->cookie,
750 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700751 sh_chan->completed_cookie = desc->cookie;
752 }
753
754 /* Call callback on the last chunk */
755 if (desc->mark == DESC_COMPLETED && tx->callback) {
756 desc->mark = DESC_WAITING;
757 callback = tx->callback;
758 param = tx->callback_param;
759 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
760 tx->cookie, tx, sh_chan->id);
761 BUG_ON(desc->chunks != 1);
762 break;
763 }
764
765 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
766 if (desc->mark == DESC_COMPLETED) {
767 BUG_ON(tx->cookie < 0);
768 desc->mark = DESC_WAITING;
769 }
770 head_acked = async_tx_test_ack(tx);
771 } else {
772 switch (desc->mark) {
773 case DESC_COMPLETED:
774 desc->mark = DESC_WAITING;
775 /* Fall through */
776 case DESC_WAITING:
777 if (head_acked)
778 async_tx_ack(&desc->async_tx);
779 }
780 }
781
782 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
783 tx, tx->cookie);
784
785 if (((desc->mark == DESC_COMPLETED ||
786 desc->mark == DESC_WAITING) &&
787 async_tx_test_ack(&desc->async_tx)) || all) {
788 /* Remove from ld_queue list */
789 desc->mark = DESC_IDLE;
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200790
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700791 list_move(&desc->node, &sh_chan->ld_free);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200792
793 if (list_empty(&sh_chan->ld_queue)) {
794 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
795 pm_runtime_put(sh_chan->dev);
796 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700797 }
798 }
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000799
800 if (all && !callback)
801 /*
802 * Terminating and the loop completed normally: forgive
803 * uncompleted cookies
804 */
805 sh_chan->completed_cookie = sh_chan->common.cookie;
806
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200807 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700808
809 if (callback)
810 callback(param);
811
812 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000813}
814
815/*
816 * sh_chan_ld_cleanup - Clean up link descriptors
817 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700818 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000819 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700820static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000821{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700822 while (__ld_cleanup(sh_chan, all))
823 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000824}
825
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200826/* Called under spin_lock_irq(&sh_chan->desc_lock) */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000827static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
828{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000829 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000830
831 /* DMA work check */
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200832 if (dmae_is_busy(sh_chan))
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200833 return;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000834
Justin P. Mattock5a3a7652011-01-19 15:36:38 +0100835 /* Find the first not transferred descriptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000836 list_for_each_entry(desc, &sh_chan->ld_queue, node)
837 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000838 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
839 desc->async_tx.cookie, sh_chan->id,
840 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700841 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000842 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700843 dmae_start(sh_chan);
844 break;
845 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000846}
847
848static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
849{
850 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200851
852 spin_lock_irq(&sh_chan->desc_lock);
853 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
854 sh_chan_xfer_ld_queue(sh_chan);
855 else
856 sh_chan->pm_state = DMAE_PM_PENDING;
857 spin_unlock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000858}
859
Linus Walleij07934482010-03-26 16:50:49 -0700860static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000861 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700862 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000863{
864 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
865 dma_cookie_t last_used;
866 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000867 enum dma_status status;
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200868 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000869
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700870 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000871
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000872 /* First read completed cookie to avoid a skew */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000873 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000874 rmb();
875 last_used = chan->cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700876 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700877 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000878
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200879 spin_lock_irqsave(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000880
881 status = dma_async_is_complete(cookie, last_complete, last_used);
882
883 /*
884 * If we don't find cookie on the queue, it has been aborted and we have
885 * to report error
886 */
887 if (status != DMA_SUCCESS) {
888 struct sh_desc *desc;
889 status = DMA_ERROR;
890 list_for_each_entry(desc, &sh_chan->ld_queue, node)
891 if (desc->cookie == cookie) {
892 status = DMA_IN_PROGRESS;
893 break;
894 }
895 }
896
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200897 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000898
899 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000900}
901
902static irqreturn_t sh_dmae_interrupt(int irq, void *data)
903{
904 irqreturn_t ret = IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000905 struct sh_dmae_chan *sh_chan = data;
906 u32 chcr;
907
908 spin_lock(&sh_chan->desc_lock);
909
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000910 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000911
912 if (chcr & CHCR_TE) {
913 /* DMA stop */
914 dmae_halt(sh_chan);
915
916 ret = IRQ_HANDLED;
917 tasklet_schedule(&sh_chan->tasklet);
918 }
919
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000920 spin_unlock(&sh_chan->desc_lock);
921
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000922 return ret;
923}
924
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000925/* Called from error IRQ or NMI */
926static bool sh_dmae_reset(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000927{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900928 unsigned int handled = 0;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000929 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000930
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000931 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000932 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000933
934 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000935 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000936 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Paul Mundt03aa18f2010-12-17 19:16:10 +0900937 struct sh_desc *desc;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000938 LIST_HEAD(dl);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900939
940 if (!sh_chan)
941 continue;
942
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000943 spin_lock(&sh_chan->desc_lock);
944
Paul Mundt03aa18f2010-12-17 19:16:10 +0900945 /* Stop the channel */
946 dmae_halt(sh_chan);
947
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000948 list_splice_init(&sh_chan->ld_queue, &dl);
949
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200950 if (!list_empty(&dl)) {
951 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
952 pm_runtime_put(sh_chan->dev);
953 }
954 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
955
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000956 spin_unlock(&sh_chan->desc_lock);
957
Paul Mundt03aa18f2010-12-17 19:16:10 +0900958 /* Complete all */
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000959 list_for_each_entry(desc, &dl, node) {
Paul Mundt03aa18f2010-12-17 19:16:10 +0900960 struct dma_async_tx_descriptor *tx = &desc->async_tx;
961 desc->mark = DESC_IDLE;
962 if (tx->callback)
963 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000964 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900965
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000966 spin_lock(&sh_chan->desc_lock);
967 list_splice(&dl, &sh_chan->ld_free);
968 spin_unlock(&sh_chan->desc_lock);
969
Paul Mundt03aa18f2010-12-17 19:16:10 +0900970 handled++;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000971 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900972
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000973 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000974
Paul Mundt03aa18f2010-12-17 19:16:10 +0900975 return !!handled;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000976}
Paul Mundt03aa18f2010-12-17 19:16:10 +0900977
978static irqreturn_t sh_dmae_err(int irq, void *data)
979{
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000980 struct sh_dmae_device *shdev = data;
981
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000982 if (!(dmaor_read(shdev) & DMAOR_AE))
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000983 return IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000984
985 sh_dmae_reset(data);
986 return IRQ_HANDLED;
Paul Mundt03aa18f2010-12-17 19:16:10 +0900987}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000988
989static void dmae_do_tasklet(unsigned long data)
990{
991 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700992 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000993 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000994 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100995
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200996 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700997 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000998 if (desc->mark == DESC_SUBMITTED &&
999 ((desc->direction == DMA_FROM_DEVICE &&
1000 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
1001 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -07001002 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
1003 desc->async_tx.cookie, &desc->async_tx,
1004 desc->hw.dar);
1005 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001006 break;
1007 }
1008 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001009 /* Next desc */
1010 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +02001011 spin_unlock_irq(&sh_chan->desc_lock);
1012
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -07001013 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001014}
1015
Paul Mundt03aa18f2010-12-17 19:16:10 +09001016static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
1017{
Paul Mundt03aa18f2010-12-17 19:16:10 +09001018 /* Fast path out if NMIF is not asserted for this controller */
1019 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
1020 return false;
1021
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001022 return sh_dmae_reset(shdev);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001023}
1024
1025static int sh_dmae_nmi_handler(struct notifier_block *self,
1026 unsigned long cmd, void *data)
1027{
1028 struct sh_dmae_device *shdev;
1029 int ret = NOTIFY_DONE;
1030 bool triggered;
1031
1032 /*
1033 * Only concern ourselves with NMI events.
1034 *
1035 * Normally we would check the die chain value, but as this needs
1036 * to be architecture independent, check for NMI context instead.
1037 */
1038 if (!in_nmi())
1039 return NOTIFY_DONE;
1040
1041 rcu_read_lock();
1042 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
1043 /*
1044 * Only stop if one of the controllers has NMIF asserted,
1045 * we do not want to interfere with regular address error
1046 * handling or NMI events that don't concern the DMACs.
1047 */
1048 triggered = sh_dmae_nmi_notify(shdev);
1049 if (triggered == true)
1050 ret = NOTIFY_OK;
1051 }
1052 rcu_read_unlock();
1053
1054 return ret;
1055}
1056
1057static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1058 .notifier_call = sh_dmae_nmi_handler,
1059
1060 /* Run before NMI debug handler and KGDB */
1061 .priority = 1,
1062};
1063
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001064static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1065 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001066{
1067 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +00001068 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001069 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001070 struct sh_dmae_chan *new_sh_chan;
1071
1072 /* alloc channel */
1073 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1074 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001075 dev_err(shdev->common.dev,
1076 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001077 return -ENOMEM;
1078 }
1079
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +02001080 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1081
1082 /* reference struct dma_device */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001083 new_sh_chan->common.device = &shdev->common;
1084
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001085 new_sh_chan->dev = shdev->common.dev;
1086 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001087 new_sh_chan->irq = irq;
1088 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001089
1090 /* Init DMA tasklet */
1091 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1092 (unsigned long)new_sh_chan);
1093
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001094 spin_lock_init(&new_sh_chan->desc_lock);
1095
1096 /* Init descripter manage list */
1097 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1098 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1099
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001100 /* Add the channel to DMA device channel list */
1101 list_add_tail(&new_sh_chan->common.device_node,
1102 &shdev->common.channels);
1103 shdev->common.chancnt++;
1104
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001105 if (pdev->id >= 0)
1106 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1107 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1108 else
1109 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1110 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001111
1112 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001113 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001114 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001115 if (err) {
1116 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1117 "with return %d\n", id, err);
1118 goto err_no_irq;
1119 }
1120
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001121 shdev->chan[id] = new_sh_chan;
1122 return 0;
1123
1124err_no_irq:
1125 /* remove from dmaengine device node */
1126 list_del(&new_sh_chan->common.device_node);
1127 kfree(new_sh_chan);
1128 return err;
1129}
1130
1131static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1132{
1133 int i;
1134
1135 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1136 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001137 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001138
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001139 free_irq(sh_chan->irq, sh_chan);
1140
1141 list_del(&sh_chan->common.device_node);
1142 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001143 shdev->chan[i] = NULL;
1144 }
1145 }
1146 shdev->common.chancnt = 0;
1147}
1148
1149static int __init sh_dmae_probe(struct platform_device *pdev)
1150{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001151 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1152 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001153 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1154 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Magnus Damm300e5f92011-05-24 10:31:20 +00001155 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001156 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001157 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001158
Dan Williams56adf7e2009-11-22 12:10:10 -07001159 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001160 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -07001161 return -ENODEV;
1162
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001163 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001164 /* DMARS area is optional */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001165 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1166 /*
1167 * IRQ resources:
1168 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1169 * the error IRQ, in which case it is the only IRQ in this resource:
1170 * start == end. If it is the only IRQ resource, all channels also
1171 * use the same IRQ.
1172 * 2. DMA channel IRQ resources can be specified one per resource or in
1173 * ranges (start != end)
1174 * 3. iff all events (channels and, optionally, error) on this
1175 * controller use the same IRQ, only one IRQ resource can be
1176 * specified, otherwise there must be one IRQ per channel, even if
1177 * some of them are equal
1178 * 4. if all IRQs on this controller are equal or if some specific IRQs
1179 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1180 * requested with the IRQF_SHARED flag
1181 */
1182 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1183 if (!chan || !errirq_res)
1184 return -ENODEV;
1185
1186 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1187 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1188 return -EBUSY;
1189 }
1190
1191 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1192 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1193 err = -EBUSY;
1194 goto ermrdmars;
1195 }
1196
1197 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001198 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1199 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001200 dev_err(&pdev->dev, "Not enough memory\n");
1201 goto ealloc;
1202 }
1203
1204 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1205 if (!shdev->chan_reg)
1206 goto emapchan;
1207 if (dmars) {
1208 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1209 if (!shdev->dmars)
1210 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001211 }
1212
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001213 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001214 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001215
Kuninori Morimoto5899a722011-06-17 08:20:40 +00001216 if (pdata->chcr_offset)
1217 shdev->chcr_offset = pdata->chcr_offset;
1218 else
1219 shdev->chcr_offset = CHCR;
1220
Kuninori Morimoto67c62692011-06-17 08:20:51 +00001221 if (pdata->chcr_ie_bit)
1222 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1223 else
1224 shdev->chcr_ie_bit = CHCR_IE;
1225
Paul Mundt5c2de442011-05-31 15:53:03 +09001226 platform_set_drvdata(pdev, shdev);
1227
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001228 pm_runtime_enable(&pdev->dev);
1229 pm_runtime_get_sync(&pdev->dev);
1230
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001231 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001232 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001233 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001234
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001235 /* reset dma controller - only needed as a test */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001236 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001237 if (err)
1238 goto rst_err;
1239
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001240 INIT_LIST_HEAD(&shdev->common.channels);
1241
1242 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001243 if (pdata->slave && pdata->slave_num)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001244 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001245
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001246 shdev->common.device_alloc_chan_resources
1247 = sh_dmae_alloc_chan_resources;
1248 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1249 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001250 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001251 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001252
1253 /* Compulsory for DMA_SLAVE fields */
1254 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001255 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001256
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001257 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001258 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001259 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001260
Magnus Damm927a7c92010-03-19 04:47:19 +00001261#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001262 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1263
1264 if (!chanirq_res)
1265 chanirq_res = errirq_res;
1266 else
1267 irqres++;
1268
1269 if (chanirq_res == errirq_res ||
1270 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001271 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001272
1273 errirq = errirq_res->start;
1274
1275 err = request_irq(errirq, sh_dmae_err, irqflags,
1276 "DMAC Address Error", shdev);
1277 if (err) {
1278 dev_err(&pdev->dev,
1279 "DMA failed requesting irq #%d, error %d\n",
1280 errirq, err);
1281 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001282 }
1283
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001284#else
1285 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001286#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001287
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001288 if (chanirq_res->start == chanirq_res->end &&
1289 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1290 /* Special case - all multiplexed */
1291 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
Magnus Damm300e5f92011-05-24 10:31:20 +00001292 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1293 chan_irq[irq_cnt] = chanirq_res->start;
1294 chan_flag[irq_cnt] = IRQF_SHARED;
1295 } else {
1296 irq_cap = 1;
1297 break;
1298 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001299 }
1300 } else {
1301 do {
1302 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
Magnus Dammdcee0bb2011-06-09 06:35:08 +00001303 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1304 irq_cap = 1;
1305 break;
1306 }
1307
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001308 if ((errirq_res->flags & IORESOURCE_BITS) ==
1309 IORESOURCE_IRQ_SHAREABLE)
1310 chan_flag[irq_cnt] = IRQF_SHARED;
1311 else
1312 chan_flag[irq_cnt] = IRQF_DISABLED;
1313 dev_dbg(&pdev->dev,
1314 "Found IRQ %d for channel %d\n",
1315 i, irq_cnt);
1316 chan_irq[irq_cnt++] = i;
Magnus Damm300e5f92011-05-24 10:31:20 +00001317 }
1318
Magnus Dammdcee0bb2011-06-09 06:35:08 +00001319 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
Magnus Damm300e5f92011-05-24 10:31:20 +00001320 break;
Magnus Dammdcee0bb2011-06-09 06:35:08 +00001321
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001322 chanirq_res = platform_get_resource(pdev,
1323 IORESOURCE_IRQ, ++irqres);
1324 } while (irq_cnt < pdata->channel_num && chanirq_res);
1325 }
1326
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001327 /* Create DMA Channel */
Magnus Damm300e5f92011-05-24 10:31:20 +00001328 for (i = 0; i < irq_cnt; i++) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001329 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001330 if (err)
1331 goto chan_probe_err;
1332 }
1333
Magnus Damm300e5f92011-05-24 10:31:20 +00001334 if (irq_cap)
1335 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1336 "channels when a maximum of %d are supported.\n",
1337 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1338
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001339 pm_runtime_put(&pdev->dev);
1340
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001341 dma_async_device_register(&shdev->common);
1342
1343 return err;
1344
1345chan_probe_err:
1346 sh_dmae_chan_remove(shdev);
Magnus Damm300e5f92011-05-24 10:31:20 +00001347
Magnus Damm927a7c92010-03-19 04:47:19 +00001348#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001349 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001350eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001351#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001352rst_err:
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001353 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001354 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001355 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001356
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001357 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001358 pm_runtime_disable(&pdev->dev);
1359
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001360 if (dmars)
1361 iounmap(shdev->dmars);
Paul Mundt5c2de442011-05-31 15:53:03 +09001362
1363 platform_set_drvdata(pdev, NULL);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001364emapdmars:
1365 iounmap(shdev->chan_reg);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001366 synchronize_rcu();
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001367emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001368 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001369ealloc:
1370 if (dmars)
1371 release_mem_region(dmars->start, resource_size(dmars));
1372ermrdmars:
1373 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001374
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001375 return err;
1376}
1377
1378static int __exit sh_dmae_remove(struct platform_device *pdev)
1379{
1380 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001381 struct resource *res;
1382 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001383
1384 dma_async_device_unregister(&shdev->common);
1385
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001386 if (errirq > 0)
1387 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001388
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001389 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001390 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001391 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001392
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001393 /* channel data remove */
1394 sh_dmae_chan_remove(shdev);
1395
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001396 pm_runtime_disable(&pdev->dev);
1397
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001398 if (shdev->dmars)
1399 iounmap(shdev->dmars);
1400 iounmap(shdev->chan_reg);
1401
Paul Mundt5c2de442011-05-31 15:53:03 +09001402 platform_set_drvdata(pdev, NULL);
1403
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001404 synchronize_rcu();
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001405 kfree(shdev);
1406
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001407 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1408 if (res)
1409 release_mem_region(res->start, resource_size(res));
1410 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1411 if (res)
1412 release_mem_region(res->start, resource_size(res));
1413
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001414 return 0;
1415}
1416
1417static void sh_dmae_shutdown(struct platform_device *pdev)
1418{
1419 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001420 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001421}
1422
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001423static int sh_dmae_runtime_suspend(struct device *dev)
1424{
1425 return 0;
1426}
1427
1428static int sh_dmae_runtime_resume(struct device *dev)
1429{
1430 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1431
1432 return sh_dmae_rst(shdev);
1433}
1434
1435#ifdef CONFIG_PM
1436static int sh_dmae_suspend(struct device *dev)
1437{
1438 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1439 int i;
1440
1441 for (i = 0; i < shdev->pdata->channel_num; i++) {
1442 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1443 if (sh_chan->descs_allocated)
1444 sh_chan->pm_error = pm_runtime_put_sync(dev);
1445 }
1446
1447 return 0;
1448}
1449
1450static int sh_dmae_resume(struct device *dev)
1451{
1452 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1453 int i;
1454
1455 for (i = 0; i < shdev->pdata->channel_num; i++) {
1456 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1457 struct sh_dmae_slave *param = sh_chan->common.private;
1458
1459 if (!sh_chan->descs_allocated)
1460 continue;
1461
1462 if (!sh_chan->pm_error)
1463 pm_runtime_get_sync(dev);
1464
1465 if (param) {
1466 const struct sh_dmae_slave_config *cfg = param->config;
1467 dmae_set_dmars(sh_chan, cfg->mid_rid);
1468 dmae_set_chcr(sh_chan, cfg->chcr);
1469 } else {
1470 dmae_init(sh_chan);
1471 }
1472 }
1473
1474 return 0;
1475}
1476#else
1477#define sh_dmae_suspend NULL
1478#define sh_dmae_resume NULL
1479#endif
1480
1481const struct dev_pm_ops sh_dmae_pm = {
1482 .suspend = sh_dmae_suspend,
1483 .resume = sh_dmae_resume,
1484 .runtime_suspend = sh_dmae_runtime_suspend,
1485 .runtime_resume = sh_dmae_runtime_resume,
1486};
1487
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001488static struct platform_driver sh_dmae_driver = {
1489 .remove = __exit_p(sh_dmae_remove),
1490 .shutdown = sh_dmae_shutdown,
1491 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001492 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001493 .name = "sh-dma-engine",
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001494 .pm = &sh_dmae_pm,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001495 },
1496};
1497
1498static int __init sh_dmae_init(void)
1499{
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001500 /* Wire up NMI handling */
1501 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1502 if (err)
1503 return err;
1504
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001505 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1506}
1507module_init(sh_dmae_init);
1508
1509static void __exit sh_dmae_exit(void)
1510{
1511 platform_driver_unregister(&sh_dmae_driver);
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001512
1513 unregister_die_notifier(&sh_dmae_nmi_notifier);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001514}
1515module_exit(sh_dmae_exit);
1516
1517MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1518MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1519MODULE_LICENSE("GPL");
Guennadi Liakhovetskie5843342010-11-24 09:48:10 +00001520MODULE_ALIAS("platform:sh-dma-engine");