blob: c2914330b8fc002a60b13bc8773c93d2b853f918 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000026#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000027#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000028#include <linux/sh_dma.h>
Paul Mundt03aa18f2010-12-17 19:16:10 +090029#include <linux/notifier.h>
30#include <linux/kdebug.h>
31#include <linux/spinlock.h>
32#include <linux/rculist.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000033
34#include "dmaengine.h"
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000035#include "shdma.h"
36
37/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070038enum sh_dmae_desc_status {
39 DESC_IDLE,
40 DESC_PREPARED,
41 DESC_SUBMITTED,
42 DESC_COMPLETED, /* completed, have to call callback */
43 DESC_WAITING, /* callback called, waiting for ack / re-submit */
44};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000045
46#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000047/* Default MEMCPY transfer size = 2^2 = 4 bytes */
48#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000049
Paul Mundt03aa18f2010-12-17 19:16:10 +090050/*
51 * Used for write-side mutual exclusion for the global device list,
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000052 * read-side synchronization by way of RCU, and per-controller data.
Paul Mundt03aa18f2010-12-17 19:16:10 +090053 */
54static DEFINE_SPINLOCK(sh_dmae_lock);
55static LIST_HEAD(sh_dmae_devices);
56
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000057/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000058static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000059
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070060static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
Guennadi Liakhovetskic11b46c322012-01-04 15:34:17 +010061static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
62
63static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
64{
65 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
66
67 __raw_writel(data, shdev->chan_reg +
68 shdev->pdata->channel[sh_dc->id].chclr_offset);
69}
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070070
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000071static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
72{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000073 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000074}
75
76static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
77{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000078 return __raw_readl(sh_dc->base + reg / sizeof(u32));
79}
80
81static u16 dmaor_read(struct sh_dmae_device *shdev)
82{
Kuninori Morimotoe76c3af2011-06-17 08:20:56 +000083 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
84
85 if (shdev->pdata->dmaor_is_32bit)
86 return __raw_readl(addr);
87 else
88 return __raw_readw(addr);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000089}
90
91static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
92{
Kuninori Morimotoe76c3af2011-06-17 08:20:56 +000093 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
94
95 if (shdev->pdata->dmaor_is_32bit)
96 __raw_writel(data, addr);
97 else
98 __raw_writew(data, addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000099}
100
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000101static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
102{
103 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
104
105 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
106}
107
108static u32 chcr_read(struct sh_dmae_chan *sh_dc)
109{
110 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
111
112 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000113}
114
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000115/*
116 * Reset DMA controller
117 *
118 * SH7780 has two DMAOR register
119 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000120static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000121{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000122 unsigned short dmaor;
123 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000124
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000125 spin_lock_irqsave(&sh_dmae_lock, flags);
126
127 dmaor = dmaor_read(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000128 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000129
130 spin_unlock_irqrestore(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000131}
132
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000133static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000134{
135 unsigned short dmaor;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000136 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000137
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000138 spin_lock_irqsave(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000139
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000140 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
141
Guennadi Liakhovetskic11b46c322012-01-04 15:34:17 +0100142 if (shdev->pdata->chclr_present) {
143 int i;
144 for (i = 0; i < shdev->pdata->channel_num; i++) {
145 struct sh_dmae_chan *sh_chan = shdev->chan[i];
146 if (sh_chan)
147 chclr_write(sh_chan, 0);
148 }
149 }
150
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000151 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
152
153 dmaor = dmaor_read(shdev);
154
155 spin_unlock_irqrestore(&sh_dmae_lock, flags);
156
157 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
158 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
159 return -EIO;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000160 }
Guennadi Liakhovetskic11b46c322012-01-04 15:34:17 +0100161 if (shdev->pdata->dmaor_init & ~dmaor)
162 dev_warn(shdev->common.dev,
163 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
164 dmaor, shdev->pdata->dmaor_init);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000165 return 0;
166}
167
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000168static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000169{
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000170 u32 chcr = chcr_read(sh_chan);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000171
172 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
173 return true; /* working */
174
175 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000176}
177
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000178static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000179{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000180 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000181 struct sh_dmae_pdata *pdata = shdev->pdata;
182 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
183 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000184
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000185 if (cnt >= pdata->ts_shift_num)
186 cnt = 0;
187
188 return pdata->ts_shift[cnt];
189}
190
191static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
192{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000193 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000194 struct sh_dmae_pdata *pdata = shdev->pdata;
195 int i;
196
197 for (i = 0; i < pdata->ts_shift_num; i++)
198 if (pdata->ts_shift[i] == l2size)
199 break;
200
201 if (i == pdata->ts_shift_num)
202 i = 0;
203
204 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
205 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000206}
207
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700208static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000209{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700210 sh_dmae_writel(sh_chan, hw->sar, SAR);
211 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000212 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000213}
214
215static void dmae_start(struct sh_dmae_chan *sh_chan)
216{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000217 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000218 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000219
Kuninori Morimoto260bf2c2011-06-17 08:21:05 +0000220 if (shdev->pdata->needs_tend_set)
221 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
222
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000223 chcr |= CHCR_DE | shdev->chcr_ie_bit;
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000224 chcr_write(sh_chan, chcr & ~CHCR_TE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000225}
226
227static void dmae_halt(struct sh_dmae_chan *sh_chan)
228{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000229 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000230 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000231
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000232 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000233 chcr_write(sh_chan, chcr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000234}
235
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000236static void dmae_init(struct sh_dmae_chan *sh_chan)
237{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000238 /*
239 * Default configuration for dual address memory-memory transfer.
240 * 0x400 represents auto-request.
241 */
242 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
243 LOG2_DEFAULT_XFER_SIZE);
244 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000245 chcr_write(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000246}
247
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000248static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
249{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000250 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000251 if (dmae_is_busy(sh_chan))
252 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000253
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000254 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000255 chcr_write(sh_chan, val);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000256
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000257 return 0;
258}
259
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000260static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
261{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000262 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000263 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000264 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Magnus Damm26fc02a2011-05-24 10:31:12 +0000265 u16 __iomem *addr = shdev->dmars;
Kuninori Morimoto090b9182011-06-16 05:08:28 +0000266 unsigned int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000267
268 if (dmae_is_busy(sh_chan))
269 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000270
Kuninori Morimoto260bf2c2011-06-17 08:21:05 +0000271 if (pdata->no_dmars)
272 return 0;
273
Magnus Damm26fc02a2011-05-24 10:31:12 +0000274 /* in the case of a missing DMARS resource use first memory window */
275 if (!addr)
276 addr = (u16 __iomem *)shdev->chan_reg;
277 addr += chan_pdata->dmars / sizeof(u16);
278
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000279 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
280 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000281
282 return 0;
283}
284
285static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
286{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700287 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000288 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200289 struct sh_dmae_slave *param = tx->chan->private;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700290 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000291 dma_cookie_t cookie;
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200292 bool power_up;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000293
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200294 spin_lock_irq(&sh_chan->desc_lock);
295
296 if (list_empty(&sh_chan->ld_queue))
297 power_up = true;
298 else
299 power_up = false;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000300
301 cookie = sh_chan->common.cookie;
302 cookie++;
303 if (cookie < 0)
304 cookie = 1;
305
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700306 sh_chan->common.cookie = cookie;
307 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000308
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700309 /* Mark all chunks of this descriptor as submitted, move to the queue */
310 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
311 /*
312 * All chunks are on the global ld_free, so, we have to find
313 * the end of the chain ourselves
314 */
315 if (chunk != desc && (chunk->mark == DESC_IDLE ||
316 chunk->async_tx.cookie > 0 ||
317 chunk->async_tx.cookie == -EBUSY ||
318 &chunk->node == &sh_chan->ld_free))
319 break;
320 chunk->mark = DESC_SUBMITTED;
321 /* Callback goes to the last chunk */
322 chunk->async_tx.callback = NULL;
323 chunk->cookie = cookie;
324 list_move_tail(&chunk->node, &sh_chan->ld_queue);
325 last = chunk;
326 }
327
328 last->async_tx.callback = callback;
329 last->async_tx.callback_param = tx->callback_param;
330
331 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
332 tx->cookie, &last->async_tx, sh_chan->id,
333 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000334
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200335 if (power_up) {
336 sh_chan->pm_state = DMAE_PM_BUSY;
337
338 pm_runtime_get(sh_chan->dev);
339
340 spin_unlock_irq(&sh_chan->desc_lock);
341
342 pm_runtime_barrier(sh_chan->dev);
343
344 spin_lock_irq(&sh_chan->desc_lock);
345
346 /* Have we been reset, while waiting? */
347 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
348 dev_dbg(sh_chan->dev, "Bring up channel %d\n",
349 sh_chan->id);
350 if (param) {
351 const struct sh_dmae_slave_config *cfg =
352 param->config;
353
354 dmae_set_dmars(sh_chan, cfg->mid_rid);
355 dmae_set_chcr(sh_chan, cfg->chcr);
356 } else {
357 dmae_init(sh_chan);
358 }
359
360 if (sh_chan->pm_state == DMAE_PM_PENDING)
361 sh_chan_xfer_ld_queue(sh_chan);
362 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
363 }
Guennadi Liakhovetskic11b46c322012-01-04 15:34:17 +0100364 } else {
365 sh_chan->pm_state = DMAE_PM_PENDING;
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200366 }
367
368 spin_unlock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000369
370 return cookie;
371}
372
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700373/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000374static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
375{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700376 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000377
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700378 list_for_each_entry(desc, &sh_chan->ld_free, node)
379 if (desc->mark != DESC_PREPARED) {
380 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000381 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700382 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000383 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000384
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700385 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000386}
387
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000388static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000389 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000390{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000391 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000392 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000393 int i;
394
Magnus Damm02ca5082010-03-19 04:46:47 +0000395 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000396 return NULL;
397
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000398 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000399 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000400 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000401
402 return NULL;
403}
404
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000405static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
406{
407 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
408 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000409 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000410 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000411
412 /*
413 * This relies on the guarantee from dmaengine that alloc_chan_resources
414 * never runs concurrently with itself or free_chan_resources.
415 */
416 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000417 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000418
Magnus Damm4bab9d42010-03-19 04:46:38 +0000419 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000420 if (!cfg) {
421 ret = -EINVAL;
422 goto efindslave;
423 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000424
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000425 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
426 ret = -EBUSY;
427 goto etestused;
428 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000429
430 param->config = cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000431 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000432
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000433 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000434 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200435 if (!desc)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000436 break;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000437 dma_async_tx_descriptor_init(&desc->async_tx,
438 &sh_chan->common);
439 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700440 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000441
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700442 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000443 sh_chan->descs_allocated++;
444 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000445
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000446 if (!sh_chan->descs_allocated) {
447 ret = -ENOMEM;
448 goto edescalloc;
449 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000450
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000451 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000452
453edescalloc:
454 if (param)
455 clear_bit(param->slave_id, sh_dmae_slave_used);
456etestused:
457efindslave:
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200458 chan->private = NULL;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000459 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000460}
461
462/*
463 * sh_dma_free_chan_resources - Free all resources of the channel.
464 */
465static void sh_dmae_free_chan_resources(struct dma_chan *chan)
466{
467 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
468 struct sh_desc *desc, *_desc;
469 LIST_HEAD(list);
470
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000471 /* Protect against ISR */
472 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000473 dmae_halt(sh_chan);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000474 spin_unlock_irq(&sh_chan->desc_lock);
475
476 /* Now no new interrupts will occur */
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000477
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700478 /* Prepared and not submitted descriptors can still be on the queue */
479 if (!list_empty(&sh_chan->ld_queue))
480 sh_dmae_chan_ld_cleanup(sh_chan, true);
481
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000482 if (chan->private) {
483 /* The caller is holding dma_list_mutex */
484 struct sh_dmae_slave *param = chan->private;
485 clear_bit(param->slave_id, sh_dmae_slave_used);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000486 chan->private = NULL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000487 }
488
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200489 spin_lock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000490
491 list_splice_init(&sh_chan->ld_free, &list);
492 sh_chan->descs_allocated = 0;
493
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200494 spin_unlock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000495
496 list_for_each_entry_safe(desc, _desc, &list, node)
497 kfree(desc);
498}
499
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000500/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000501 * sh_dmae_add_desc - get, set up and return one transfer descriptor
502 * @sh_chan: DMA channel
503 * @flags: DMA transfer flags
504 * @dest: destination DMA address, incremented when direction equals
Vinod Kouldb8196d2011-10-13 22:34:23 +0530505 * DMA_DEV_TO_MEM
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000506 * @src: source DMA address, incremented when direction equals
Vinod Kouldb8196d2011-10-13 22:34:23 +0530507 * DMA_MEM_TO_DEV
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000508 * @len: DMA transfer length
509 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
510 * @direction: needed for slave DMA to decide which address to keep constant,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530511 * equals DMA_MEM_TO_MEM for MEMCPY
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000512 * Returns 0 or an error
513 * Locks: called with desc_lock held
514 */
515static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
516 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530517 struct sh_desc **first, enum dma_transfer_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000518{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000519 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000520 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000521
522 if (!*len)
523 return NULL;
524
525 /* Allocate the link descriptor from the free list */
526 new = sh_dmae_get_desc(sh_chan);
527 if (!new) {
528 dev_err(sh_chan->dev, "No free link descriptor available\n");
529 return NULL;
530 }
531
532 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
533
534 new->hw.sar = *src;
535 new->hw.dar = *dest;
536 new->hw.tcr = copy_size;
537
538 if (!*first) {
539 /* First desc */
540 new->async_tx.cookie = -EBUSY;
541 *first = new;
542 } else {
543 /* Other desc - invisible to the user */
544 new->async_tx.cookie = -EINVAL;
545 }
546
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000547 dev_dbg(sh_chan->dev,
548 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000549 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000550 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000551
552 new->mark = DESC_PREPARED;
553 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000554 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000555
556 *len -= copy_size;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530557 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000558 *src += copy_size;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530559 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000560 *dest += copy_size;
561
562 return new;
563}
564
565/*
566 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
567 *
568 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
569 * converted to scatter-gather to guarantee consistent locking and a correct
570 * list manipulation. For slave DMA direction carries the usual meaning, and,
571 * logically, the SG list is RAM and the addr variable contains slave address,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530572 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000573 * and the SG list contains only one element and points at the source buffer.
574 */
575static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
576 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530577 enum dma_transfer_direction direction, unsigned long flags)
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000578{
579 struct scatterlist *sg;
580 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700581 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000582 int chunks = 0;
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200583 unsigned long irq_flags;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000584 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000585
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000586 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000587 return NULL;
588
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000589 for_each_sg(sgl, sg, sg_len, i)
590 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
591 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000592
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700593 /* Have to lock the whole loop to protect against concurrent release */
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200594 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700595
596 /*
597 * Chaining:
598 * first descriptor is what user is dealing with in all API calls, its
599 * cookie is at first set to -EBUSY, at tx-submit to a positive
600 * number
601 * if more than one chunk is needed further chunks have cookie = -EINVAL
602 * the last chunk, if not equal to the first, has cookie = -ENOSPC
603 * all chunks are linked onto the tx_list head with their .node heads
604 * only during this function, then they are immediately spliced
605 * back onto the free list in form of a chain
606 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000607 for_each_sg(sgl, sg, sg_len, i) {
608 dma_addr_t sg_addr = sg_dma_address(sg);
609 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000610
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000611 if (!len)
612 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000613
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000614 do {
615 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
616 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000617
Vinod Kouldb8196d2011-10-13 22:34:23 +0530618 if (direction == DMA_DEV_TO_MEM)
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000619 new = sh_dmae_add_desc(sh_chan, flags,
620 &sg_addr, addr, &len, &first,
621 direction);
622 else
623 new = sh_dmae_add_desc(sh_chan, flags,
624 addr, &sg_addr, &len, &first,
625 direction);
626 if (!new)
627 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700628
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000629 new->chunks = chunks--;
630 list_add_tail(&new->node, &tx_list);
631 } while (len);
632 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000633
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700634 if (new != first)
635 new->async_tx.cookie = -ENOSPC;
636
637 /* Put them back on the free list, so, they don't get lost */
638 list_splice_tail(&tx_list, &sh_chan->ld_free);
639
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200640 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000641
642 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000643
644err_get_desc:
645 list_for_each_entry(new, &tx_list, node)
646 new->mark = DESC_IDLE;
647 list_splice(&tx_list, &sh_chan->ld_free);
648
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200649 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000650
651 return NULL;
652}
653
654static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
655 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
656 size_t len, unsigned long flags)
657{
658 struct sh_dmae_chan *sh_chan;
659 struct scatterlist sg;
660
661 if (!chan || !len)
662 return NULL;
663
664 sh_chan = to_sh_chan(chan);
665
666 sg_init_table(&sg, 1);
667 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
668 offset_in_page(dma_src));
669 sg_dma_address(&sg) = dma_src;
670 sg_dma_len(&sg) = len;
671
Vinod Kouldb8196d2011-10-13 22:34:23 +0530672 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000673 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700674}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000675
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000676static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
677 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530678 enum dma_transfer_direction direction, unsigned long flags)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000679{
680 struct sh_dmae_slave *param;
681 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000682 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000683
684 if (!chan)
685 return NULL;
686
687 sh_chan = to_sh_chan(chan);
688 param = chan->private;
689
690 /* Someone calling slave DMA on a public channel? */
691 if (!param || !sg_len) {
692 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
693 __func__, param, sg_len, param ? param->slave_id : -1);
694 return NULL;
695 }
696
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200697 slave_addr = param->config->addr;
698
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000699 /*
700 * if (param != NULL), this is a successfully requested slave channel,
701 * therefore param->config != NULL too.
702 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000703 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000704 direction, flags);
705}
706
Linus Walleij05827632010-05-17 16:30:42 -0700707static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
708 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000709{
710 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200711 unsigned long flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000712
Linus Walleijc3635c72010-03-26 16:44:01 -0700713 /* Only supports DMA_TERMINATE_ALL */
714 if (cmd != DMA_TERMINATE_ALL)
715 return -ENXIO;
716
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000717 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700718 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000719
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200720 spin_lock_irqsave(&sh_chan->desc_lock, flags);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000721 dmae_halt(sh_chan);
722
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000723 if (!list_empty(&sh_chan->ld_queue)) {
724 /* Record partial transfer */
725 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
726 struct sh_desc, node);
727 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
728 sh_chan->xmit_shift;
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000729 }
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200730 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000731
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000732 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700733
734 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000735}
736
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700737static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
738{
739 struct sh_desc *desc, *_desc;
740 /* Is the "exposed" head of a chain acked? */
741 bool head_acked = false;
742 dma_cookie_t cookie = 0;
743 dma_async_tx_callback callback = NULL;
744 void *param = NULL;
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200745 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000746
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200747 spin_lock_irqsave(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700748 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
749 struct dma_async_tx_descriptor *tx = &desc->async_tx;
750
751 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
752 BUG_ON(desc->mark != DESC_SUBMITTED &&
753 desc->mark != DESC_COMPLETED &&
754 desc->mark != DESC_WAITING);
755
756 /*
757 * queue is ordered, and we use this loop to (1) clean up all
758 * completed descriptors, and to (2) update descriptor flags of
759 * any chunks in a (partially) completed chain
760 */
761 if (!all && desc->mark == DESC_SUBMITTED &&
762 desc->cookie != cookie)
763 break;
764
765 if (tx->cookie > 0)
766 cookie = tx->cookie;
767
768 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000769 if (sh_chan->common.completed_cookie != desc->cookie - 1)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000770 dev_dbg(sh_chan->dev,
771 "Completing cookie %d, expected %d\n",
772 desc->cookie,
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000773 sh_chan->common.completed_cookie + 1);
774 sh_chan->common.completed_cookie = desc->cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700775 }
776
777 /* Call callback on the last chunk */
778 if (desc->mark == DESC_COMPLETED && tx->callback) {
779 desc->mark = DESC_WAITING;
780 callback = tx->callback;
781 param = tx->callback_param;
782 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
783 tx->cookie, tx, sh_chan->id);
784 BUG_ON(desc->chunks != 1);
785 break;
786 }
787
788 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
789 if (desc->mark == DESC_COMPLETED) {
790 BUG_ON(tx->cookie < 0);
791 desc->mark = DESC_WAITING;
792 }
793 head_acked = async_tx_test_ack(tx);
794 } else {
795 switch (desc->mark) {
796 case DESC_COMPLETED:
797 desc->mark = DESC_WAITING;
798 /* Fall through */
799 case DESC_WAITING:
800 if (head_acked)
801 async_tx_ack(&desc->async_tx);
802 }
803 }
804
805 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
806 tx, tx->cookie);
807
808 if (((desc->mark == DESC_COMPLETED ||
809 desc->mark == DESC_WAITING) &&
810 async_tx_test_ack(&desc->async_tx)) || all) {
811 /* Remove from ld_queue list */
812 desc->mark = DESC_IDLE;
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200813
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700814 list_move(&desc->node, &sh_chan->ld_free);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200815
816 if (list_empty(&sh_chan->ld_queue)) {
817 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
818 pm_runtime_put(sh_chan->dev);
819 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700820 }
821 }
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000822
823 if (all && !callback)
824 /*
825 * Terminating and the loop completed normally: forgive
826 * uncompleted cookies
827 */
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000828 sh_chan->common.completed_cookie = sh_chan->common.cookie;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000829
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200830 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700831
832 if (callback)
833 callback(param);
834
835 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000836}
837
838/*
839 * sh_chan_ld_cleanup - Clean up link descriptors
840 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700841 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000842 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700843static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000844{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700845 while (__ld_cleanup(sh_chan, all))
846 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000847}
848
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200849/* Called under spin_lock_irq(&sh_chan->desc_lock) */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000850static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
851{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000852 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000853
854 /* DMA work check */
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200855 if (dmae_is_busy(sh_chan))
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200856 return;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000857
Justin P. Mattock5a3a76582011-01-19 15:36:38 +0100858 /* Find the first not transferred descriptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000859 list_for_each_entry(desc, &sh_chan->ld_queue, node)
860 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000861 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
862 desc->async_tx.cookie, sh_chan->id,
863 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700864 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000865 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700866 dmae_start(sh_chan);
867 break;
868 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000869}
870
871static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
872{
873 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200874
875 spin_lock_irq(&sh_chan->desc_lock);
876 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
877 sh_chan_xfer_ld_queue(sh_chan);
878 else
879 sh_chan->pm_state = DMAE_PM_PENDING;
880 spin_unlock_irq(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000881}
882
Linus Walleij07934482010-03-26 16:50:49 -0700883static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000884 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700885 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000886{
887 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
888 dma_cookie_t last_used;
889 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000890 enum dma_status status;
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200891 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000892
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700893 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000894
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000895 /* First read completed cookie to avoid a skew */
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000896 last_complete = chan->completed_cookie;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000897 rmb();
898 last_used = chan->cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700899 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700900 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000901
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200902 spin_lock_irqsave(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000903
904 status = dma_async_is_complete(cookie, last_complete, last_used);
905
906 /*
907 * If we don't find cookie on the queue, it has been aborted and we have
908 * to report error
909 */
910 if (status != DMA_SUCCESS) {
911 struct sh_desc *desc;
912 status = DMA_ERROR;
913 list_for_each_entry(desc, &sh_chan->ld_queue, node)
914 if (desc->cookie == cookie) {
915 status = DMA_IN_PROGRESS;
916 break;
917 }
918 }
919
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +0200920 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000921
922 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000923}
924
925static irqreturn_t sh_dmae_interrupt(int irq, void *data)
926{
927 irqreturn_t ret = IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000928 struct sh_dmae_chan *sh_chan = data;
929 u32 chcr;
930
931 spin_lock(&sh_chan->desc_lock);
932
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000933 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000934
935 if (chcr & CHCR_TE) {
936 /* DMA stop */
937 dmae_halt(sh_chan);
938
939 ret = IRQ_HANDLED;
940 tasklet_schedule(&sh_chan->tasklet);
941 }
942
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000943 spin_unlock(&sh_chan->desc_lock);
944
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000945 return ret;
946}
947
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000948/* Called from error IRQ or NMI */
949static bool sh_dmae_reset(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000950{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900951 unsigned int handled = 0;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000952 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000953
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000954 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000955 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000956
957 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000958 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000959 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Paul Mundt03aa18f2010-12-17 19:16:10 +0900960 struct sh_desc *desc;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000961 LIST_HEAD(dl);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900962
963 if (!sh_chan)
964 continue;
965
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000966 spin_lock(&sh_chan->desc_lock);
967
Paul Mundt03aa18f2010-12-17 19:16:10 +0900968 /* Stop the channel */
969 dmae_halt(sh_chan);
970
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000971 list_splice_init(&sh_chan->ld_queue, &dl);
972
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +0200973 if (!list_empty(&dl)) {
974 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
975 pm_runtime_put(sh_chan->dev);
976 }
977 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
978
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000979 spin_unlock(&sh_chan->desc_lock);
980
Paul Mundt03aa18f2010-12-17 19:16:10 +0900981 /* Complete all */
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000982 list_for_each_entry(desc, &dl, node) {
Paul Mundt03aa18f2010-12-17 19:16:10 +0900983 struct dma_async_tx_descriptor *tx = &desc->async_tx;
984 desc->mark = DESC_IDLE;
985 if (tx->callback)
986 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000987 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900988
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000989 spin_lock(&sh_chan->desc_lock);
990 list_splice(&dl, &sh_chan->ld_free);
991 spin_unlock(&sh_chan->desc_lock);
992
Paul Mundt03aa18f2010-12-17 19:16:10 +0900993 handled++;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000994 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900995
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000996 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000997
Paul Mundt03aa18f2010-12-17 19:16:10 +0900998 return !!handled;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000999}
Paul Mundt03aa18f2010-12-17 19:16:10 +09001000
1001static irqreturn_t sh_dmae_err(int irq, void *data)
1002{
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +00001003 struct sh_dmae_device *shdev = data;
1004
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001005 if (!(dmaor_read(shdev) & DMAOR_AE))
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +00001006 return IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001007
1008 sh_dmae_reset(data);
1009 return IRQ_HANDLED;
Paul Mundt03aa18f2010-12-17 19:16:10 +09001010}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001011
1012static void dmae_do_tasklet(unsigned long data)
1013{
1014 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -07001015 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001016 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001017 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001018
Guennadi Liakhovetskib4dae6e2011-09-25 16:12:18 +02001019 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -07001020 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001021 if (desc->mark == DESC_SUBMITTED &&
Vinod Kouldb8196d2011-10-13 22:34:23 +05301022 ((desc->direction == DMA_DEV_TO_MEM &&
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001023 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
1024 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -07001025 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
1026 desc->async_tx.cookie, &desc->async_tx,
1027 desc->hw.dar);
1028 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001029 break;
1030 }
1031 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001032 /* Next desc */
1033 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +02001034 spin_unlock_irq(&sh_chan->desc_lock);
1035
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -07001036 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001037}
1038
Paul Mundt03aa18f2010-12-17 19:16:10 +09001039static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
1040{
Paul Mundt03aa18f2010-12-17 19:16:10 +09001041 /* Fast path out if NMIF is not asserted for this controller */
1042 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
1043 return false;
1044
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001045 return sh_dmae_reset(shdev);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001046}
1047
1048static int sh_dmae_nmi_handler(struct notifier_block *self,
1049 unsigned long cmd, void *data)
1050{
1051 struct sh_dmae_device *shdev;
1052 int ret = NOTIFY_DONE;
1053 bool triggered;
1054
1055 /*
1056 * Only concern ourselves with NMI events.
1057 *
1058 * Normally we would check the die chain value, but as this needs
1059 * to be architecture independent, check for NMI context instead.
1060 */
1061 if (!in_nmi())
1062 return NOTIFY_DONE;
1063
1064 rcu_read_lock();
1065 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
1066 /*
1067 * Only stop if one of the controllers has NMIF asserted,
1068 * we do not want to interfere with regular address error
1069 * handling or NMI events that don't concern the DMACs.
1070 */
1071 triggered = sh_dmae_nmi_notify(shdev);
1072 if (triggered == true)
1073 ret = NOTIFY_OK;
1074 }
1075 rcu_read_unlock();
1076
1077 return ret;
1078}
1079
1080static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1081 .notifier_call = sh_dmae_nmi_handler,
1082
1083 /* Run before NMI debug handler and KGDB */
1084 .priority = 1,
1085};
1086
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001087static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1088 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001089{
1090 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +00001091 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001092 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001093 struct sh_dmae_chan *new_sh_chan;
1094
1095 /* alloc channel */
1096 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1097 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001098 dev_err(shdev->common.dev,
1099 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001100 return -ENOMEM;
1101 }
1102
Guennadi Liakhovetski7a1cd9a2011-08-18 16:55:27 +02001103 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1104
1105 /* reference struct dma_device */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001106 new_sh_chan->common.device = &shdev->common;
1107
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001108 new_sh_chan->dev = shdev->common.dev;
1109 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001110 new_sh_chan->irq = irq;
1111 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001112
1113 /* Init DMA tasklet */
1114 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1115 (unsigned long)new_sh_chan);
1116
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001117 spin_lock_init(&new_sh_chan->desc_lock);
1118
1119 /* Init descripter manage list */
1120 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1121 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1122
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001123 /* Add the channel to DMA device channel list */
1124 list_add_tail(&new_sh_chan->common.device_node,
1125 &shdev->common.channels);
1126 shdev->common.chancnt++;
1127
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001128 if (pdev->id >= 0)
1129 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1130 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1131 else
1132 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1133 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001134
1135 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001136 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001137 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001138 if (err) {
1139 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1140 "with return %d\n", id, err);
1141 goto err_no_irq;
1142 }
1143
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001144 shdev->chan[id] = new_sh_chan;
1145 return 0;
1146
1147err_no_irq:
1148 /* remove from dmaengine device node */
1149 list_del(&new_sh_chan->common.device_node);
1150 kfree(new_sh_chan);
1151 return err;
1152}
1153
1154static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1155{
1156 int i;
1157
1158 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1159 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001160 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001161
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001162 free_irq(sh_chan->irq, sh_chan);
1163
1164 list_del(&sh_chan->common.device_node);
1165 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001166 shdev->chan[i] = NULL;
1167 }
1168 }
1169 shdev->common.chancnt = 0;
1170}
1171
1172static int __init sh_dmae_probe(struct platform_device *pdev)
1173{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001174 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1175 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001176 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1177 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Magnus Damm300e5f92011-05-24 10:31:20 +00001178 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001179 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001180 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001181
Dan Williams56adf7e2009-11-22 12:10:10 -07001182 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001183 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -07001184 return -ENODEV;
1185
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001186 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001187 /* DMARS area is optional */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001188 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1189 /*
1190 * IRQ resources:
1191 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1192 * the error IRQ, in which case it is the only IRQ in this resource:
1193 * start == end. If it is the only IRQ resource, all channels also
1194 * use the same IRQ.
1195 * 2. DMA channel IRQ resources can be specified one per resource or in
1196 * ranges (start != end)
1197 * 3. iff all events (channels and, optionally, error) on this
1198 * controller use the same IRQ, only one IRQ resource can be
1199 * specified, otherwise there must be one IRQ per channel, even if
1200 * some of them are equal
1201 * 4. if all IRQs on this controller are equal or if some specific IRQs
1202 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1203 * requested with the IRQF_SHARED flag
1204 */
1205 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1206 if (!chan || !errirq_res)
1207 return -ENODEV;
1208
1209 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1210 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1211 return -EBUSY;
1212 }
1213
1214 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1215 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1216 err = -EBUSY;
1217 goto ermrdmars;
1218 }
1219
1220 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001221 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1222 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001223 dev_err(&pdev->dev, "Not enough memory\n");
1224 goto ealloc;
1225 }
1226
1227 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1228 if (!shdev->chan_reg)
1229 goto emapchan;
1230 if (dmars) {
1231 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1232 if (!shdev->dmars)
1233 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001234 }
1235
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001236 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001237 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001238
Kuninori Morimoto5899a722011-06-17 08:20:40 +00001239 if (pdata->chcr_offset)
1240 shdev->chcr_offset = pdata->chcr_offset;
1241 else
1242 shdev->chcr_offset = CHCR;
1243
Kuninori Morimoto67c62692011-06-17 08:20:51 +00001244 if (pdata->chcr_ie_bit)
1245 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1246 else
1247 shdev->chcr_ie_bit = CHCR_IE;
1248
Paul Mundt5c2de442011-05-31 15:53:03 +09001249 platform_set_drvdata(pdev, shdev);
1250
Guennadi Liakhovetskic11b46c322012-01-04 15:34:17 +01001251 shdev->common.dev = &pdev->dev;
1252
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001253 pm_runtime_enable(&pdev->dev);
1254 pm_runtime_get_sync(&pdev->dev);
1255
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001256 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001257 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001258 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001259
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001260 /* reset dma controller - only needed as a test */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001261 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001262 if (err)
1263 goto rst_err;
1264
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001265 INIT_LIST_HEAD(&shdev->common.channels);
1266
Guennadi Liakhovetskie9c8d7a02012-01-18 10:14:25 +01001267 if (!pdata->slave_only)
1268 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001269 if (pdata->slave && pdata->slave_num)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001270 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001271
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001272 shdev->common.device_alloc_chan_resources
1273 = sh_dmae_alloc_chan_resources;
1274 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1275 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001276 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001277 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001278
1279 /* Compulsory for DMA_SLAVE fields */
1280 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001281 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001282
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001283 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001284 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001285
Magnus Damm927a7c92010-03-19 04:47:19 +00001286#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001287 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1288
1289 if (!chanirq_res)
1290 chanirq_res = errirq_res;
1291 else
1292 irqres++;
1293
1294 if (chanirq_res == errirq_res ||
1295 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001296 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001297
1298 errirq = errirq_res->start;
1299
1300 err = request_irq(errirq, sh_dmae_err, irqflags,
1301 "DMAC Address Error", shdev);
1302 if (err) {
1303 dev_err(&pdev->dev,
1304 "DMA failed requesting irq #%d, error %d\n",
1305 errirq, err);
1306 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001307 }
1308
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001309#else
1310 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001311#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001312
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001313 if (chanirq_res->start == chanirq_res->end &&
1314 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1315 /* Special case - all multiplexed */
1316 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
Magnus Damm300e5f92011-05-24 10:31:20 +00001317 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1318 chan_irq[irq_cnt] = chanirq_res->start;
1319 chan_flag[irq_cnt] = IRQF_SHARED;
1320 } else {
1321 irq_cap = 1;
1322 break;
1323 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001324 }
1325 } else {
1326 do {
1327 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
Magnus Dammdcee0bb2011-06-09 06:35:08 +00001328 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1329 irq_cap = 1;
1330 break;
1331 }
1332
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001333 if ((errirq_res->flags & IORESOURCE_BITS) ==
1334 IORESOURCE_IRQ_SHAREABLE)
1335 chan_flag[irq_cnt] = IRQF_SHARED;
1336 else
1337 chan_flag[irq_cnt] = IRQF_DISABLED;
1338 dev_dbg(&pdev->dev,
1339 "Found IRQ %d for channel %d\n",
1340 i, irq_cnt);
1341 chan_irq[irq_cnt++] = i;
Magnus Damm300e5f92011-05-24 10:31:20 +00001342 }
1343
Magnus Dammdcee0bb2011-06-09 06:35:08 +00001344 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
Magnus Damm300e5f92011-05-24 10:31:20 +00001345 break;
Magnus Dammdcee0bb2011-06-09 06:35:08 +00001346
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001347 chanirq_res = platform_get_resource(pdev,
1348 IORESOURCE_IRQ, ++irqres);
1349 } while (irq_cnt < pdata->channel_num && chanirq_res);
1350 }
1351
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001352 /* Create DMA Channel */
Magnus Damm300e5f92011-05-24 10:31:20 +00001353 for (i = 0; i < irq_cnt; i++) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001354 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001355 if (err)
1356 goto chan_probe_err;
1357 }
1358
Magnus Damm300e5f92011-05-24 10:31:20 +00001359 if (irq_cap)
1360 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1361 "channels when a maximum of %d are supported.\n",
1362 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1363
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001364 pm_runtime_put(&pdev->dev);
1365
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001366 dma_async_device_register(&shdev->common);
1367
1368 return err;
1369
1370chan_probe_err:
1371 sh_dmae_chan_remove(shdev);
Magnus Damm300e5f92011-05-24 10:31:20 +00001372
Magnus Damm927a7c92010-03-19 04:47:19 +00001373#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001374 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001375eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001376#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001377rst_err:
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001378 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001379 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001380 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001381
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001382 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001383 pm_runtime_disable(&pdev->dev);
1384
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001385 if (dmars)
1386 iounmap(shdev->dmars);
Paul Mundt5c2de442011-05-31 15:53:03 +09001387
1388 platform_set_drvdata(pdev, NULL);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001389emapdmars:
1390 iounmap(shdev->chan_reg);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001391 synchronize_rcu();
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001392emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001393 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001394ealloc:
1395 if (dmars)
1396 release_mem_region(dmars->start, resource_size(dmars));
1397ermrdmars:
1398 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001399
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001400 return err;
1401}
1402
1403static int __exit sh_dmae_remove(struct platform_device *pdev)
1404{
1405 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001406 struct resource *res;
1407 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001408
1409 dma_async_device_unregister(&shdev->common);
1410
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001411 if (errirq > 0)
1412 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001413
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001414 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001415 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001416 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001417
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001418 /* channel data remove */
1419 sh_dmae_chan_remove(shdev);
1420
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001421 pm_runtime_disable(&pdev->dev);
1422
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001423 if (shdev->dmars)
1424 iounmap(shdev->dmars);
1425 iounmap(shdev->chan_reg);
1426
Paul Mundt5c2de442011-05-31 15:53:03 +09001427 platform_set_drvdata(pdev, NULL);
1428
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001429 synchronize_rcu();
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001430 kfree(shdev);
1431
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001432 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1433 if (res)
1434 release_mem_region(res->start, resource_size(res));
1435 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1436 if (res)
1437 release_mem_region(res->start, resource_size(res));
1438
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001439 return 0;
1440}
1441
1442static void sh_dmae_shutdown(struct platform_device *pdev)
1443{
1444 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001445 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001446}
1447
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001448static int sh_dmae_runtime_suspend(struct device *dev)
1449{
1450 return 0;
1451}
1452
1453static int sh_dmae_runtime_resume(struct device *dev)
1454{
1455 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1456
1457 return sh_dmae_rst(shdev);
1458}
1459
1460#ifdef CONFIG_PM
1461static int sh_dmae_suspend(struct device *dev)
1462{
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001463 return 0;
1464}
1465
1466static int sh_dmae_resume(struct device *dev)
1467{
1468 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
Guennadi Liakhovetskic11b46c322012-01-04 15:34:17 +01001469 int i, ret;
1470
1471 ret = sh_dmae_rst(shdev);
1472 if (ret < 0)
1473 dev_err(dev, "Failed to reset!\n");
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001474
1475 for (i = 0; i < shdev->pdata->channel_num; i++) {
1476 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1477 struct sh_dmae_slave *param = sh_chan->common.private;
1478
1479 if (!sh_chan->descs_allocated)
1480 continue;
1481
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001482 if (param) {
1483 const struct sh_dmae_slave_config *cfg = param->config;
1484 dmae_set_dmars(sh_chan, cfg->mid_rid);
1485 dmae_set_chcr(sh_chan, cfg->chcr);
1486 } else {
1487 dmae_init(sh_chan);
1488 }
1489 }
1490
1491 return 0;
1492}
1493#else
1494#define sh_dmae_suspend NULL
1495#define sh_dmae_resume NULL
1496#endif
1497
1498const struct dev_pm_ops sh_dmae_pm = {
1499 .suspend = sh_dmae_suspend,
1500 .resume = sh_dmae_resume,
1501 .runtime_suspend = sh_dmae_runtime_suspend,
1502 .runtime_resume = sh_dmae_runtime_resume,
1503};
1504
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001505static struct platform_driver sh_dmae_driver = {
1506 .remove = __exit_p(sh_dmae_remove),
1507 .shutdown = sh_dmae_shutdown,
1508 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001509 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001510 .name = "sh-dma-engine",
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001511 .pm = &sh_dmae_pm,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001512 },
1513};
1514
1515static int __init sh_dmae_init(void)
1516{
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001517 /* Wire up NMI handling */
1518 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1519 if (err)
1520 return err;
1521
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001522 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1523}
1524module_init(sh_dmae_init);
1525
1526static void __exit sh_dmae_exit(void)
1527{
1528 platform_driver_unregister(&sh_dmae_driver);
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001529
1530 unregister_die_notifier(&sh_dmae_nmi_notifier);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001531}
1532module_exit(sh_dmae_exit);
1533
1534MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1535MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1536MODULE_LICENSE("GPL");
Guennadi Liakhovetskie5843342010-11-24 09:48:10 +00001537MODULE_ALIAS("platform:sh-dma-engine");