blob: 6a21cd843ab77c17b0a55b892aff7a515a39d4f0 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000027#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000028#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000029#include <linux/sh_dma.h>
Paul Mundt03aa18f2010-12-17 19:16:10 +090030#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000034#include "shdma.h"
35
36/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070037enum sh_dmae_desc_status {
38 DESC_IDLE,
39 DESC_PREPARED,
40 DESC_SUBMITTED,
41 DESC_COMPLETED, /* completed, have to call callback */
42 DESC_WAITING, /* callback called, waiting for ack / re-submit */
43};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000044
45#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000046/* Default MEMCPY transfer size = 2^2 = 4 bytes */
47#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000048
Paul Mundt03aa18f2010-12-17 19:16:10 +090049/*
50 * Used for write-side mutual exclusion for the global device list,
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000051 * read-side synchronization by way of RCU, and per-controller data.
Paul Mundt03aa18f2010-12-17 19:16:10 +090052 */
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000056/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000057static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000058
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070059static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000061static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000063 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000064}
65
66static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000068 return __raw_readl(sh_dc->base + reg / sizeof(u32));
69}
70
71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{
Kuninori Morimotoe76c3af2011-06-17 08:20:56 +000073 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
74
75 if (shdev->pdata->dmaor_is_32bit)
76 return __raw_readl(addr);
77 else
78 return __raw_readw(addr);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000079}
80
81static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
82{
Kuninori Morimotoe76c3af2011-06-17 08:20:56 +000083 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
84
85 if (shdev->pdata->dmaor_is_32bit)
86 __raw_writel(data, addr);
87 else
88 __raw_writew(data, addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000089}
90
Kuninori Morimoto5899a722011-06-17 08:20:40 +000091static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
92{
93 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
94
95 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
96}
97
98static u32 chcr_read(struct sh_dmae_chan *sh_dc)
99{
100 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
101
102 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
103}
104
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000105/*
106 * Reset DMA controller
107 *
108 * SH7780 has two DMAOR register
109 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000110static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000111{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000112 unsigned short dmaor;
113 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000114
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000115 spin_lock_irqsave(&sh_dmae_lock, flags);
116
117 dmaor = dmaor_read(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000118 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000119
120 spin_unlock_irqrestore(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000121}
122
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000123static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000124{
125 unsigned short dmaor;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000126 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000127
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000128 spin_lock_irqsave(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000129
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000130 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
131
132 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
133
134 dmaor = dmaor_read(shdev);
135
136 spin_unlock_irqrestore(&sh_dmae_lock, flags);
137
138 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
139 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
140 return -EIO;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000141 }
142 return 0;
143}
144
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000145static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000146{
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000147 u32 chcr = chcr_read(sh_chan);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000148
149 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
150 return true; /* working */
151
152 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000153}
154
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000155static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000156{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000157 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000158 struct sh_dmae_pdata *pdata = shdev->pdata;
159 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
160 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000161
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000162 if (cnt >= pdata->ts_shift_num)
163 cnt = 0;
164
165 return pdata->ts_shift[cnt];
166}
167
168static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
169{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000170 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000171 struct sh_dmae_pdata *pdata = shdev->pdata;
172 int i;
173
174 for (i = 0; i < pdata->ts_shift_num; i++)
175 if (pdata->ts_shift[i] == l2size)
176 break;
177
178 if (i == pdata->ts_shift_num)
179 i = 0;
180
181 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
182 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000183}
184
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700185static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000186{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700187 sh_dmae_writel(sh_chan, hw->sar, SAR);
188 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000189 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000190}
191
192static void dmae_start(struct sh_dmae_chan *sh_chan)
193{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000194 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000195 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000196
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000197 chcr |= CHCR_DE | shdev->chcr_ie_bit;
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000198 chcr_write(sh_chan, chcr & ~CHCR_TE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000199}
200
201static void dmae_halt(struct sh_dmae_chan *sh_chan)
202{
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000203 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000204 u32 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000205
Kuninori Morimoto67c62692011-06-17 08:20:51 +0000206 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000207 chcr_write(sh_chan, chcr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000208}
209
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000210static void dmae_init(struct sh_dmae_chan *sh_chan)
211{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000212 /*
213 * Default configuration for dual address memory-memory transfer.
214 * 0x400 represents auto-request.
215 */
216 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
217 LOG2_DEFAULT_XFER_SIZE);
218 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000219 chcr_write(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000220}
221
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000222static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
223{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000224 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000225 if (dmae_is_busy(sh_chan))
226 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000227
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000228 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000229 chcr_write(sh_chan, val);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000230
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000231 return 0;
232}
233
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000234static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
235{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000236 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000237 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000238 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Magnus Damm26fc02a2011-05-24 10:31:12 +0000239 u16 __iomem *addr = shdev->dmars;
Kuninori Morimoto090b9182011-06-16 05:08:28 +0000240 unsigned int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000241
242 if (dmae_is_busy(sh_chan))
243 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000244
Magnus Damm26fc02a2011-05-24 10:31:12 +0000245 /* in the case of a missing DMARS resource use first memory window */
246 if (!addr)
247 addr = (u16 __iomem *)shdev->chan_reg;
248 addr += chan_pdata->dmars / sizeof(u16);
249
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000250 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
251 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000252
253 return 0;
254}
255
256static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
257{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700258 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000259 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700260 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000261 dma_cookie_t cookie;
262
263 spin_lock_bh(&sh_chan->desc_lock);
264
265 cookie = sh_chan->common.cookie;
266 cookie++;
267 if (cookie < 0)
268 cookie = 1;
269
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700270 sh_chan->common.cookie = cookie;
271 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000272
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700273 /* Mark all chunks of this descriptor as submitted, move to the queue */
274 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
275 /*
276 * All chunks are on the global ld_free, so, we have to find
277 * the end of the chain ourselves
278 */
279 if (chunk != desc && (chunk->mark == DESC_IDLE ||
280 chunk->async_tx.cookie > 0 ||
281 chunk->async_tx.cookie == -EBUSY ||
282 &chunk->node == &sh_chan->ld_free))
283 break;
284 chunk->mark = DESC_SUBMITTED;
285 /* Callback goes to the last chunk */
286 chunk->async_tx.callback = NULL;
287 chunk->cookie = cookie;
288 list_move_tail(&chunk->node, &sh_chan->ld_queue);
289 last = chunk;
290 }
291
292 last->async_tx.callback = callback;
293 last->async_tx.callback_param = tx->callback_param;
294
295 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
296 tx->cookie, &last->async_tx, sh_chan->id,
297 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000298
299 spin_unlock_bh(&sh_chan->desc_lock);
300
301 return cookie;
302}
303
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700304/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000305static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
306{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700307 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000308
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700309 list_for_each_entry(desc, &sh_chan->ld_free, node)
310 if (desc->mark != DESC_PREPARED) {
311 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000312 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700313 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000314 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000315
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700316 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000317}
318
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000319static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000320 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000321{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000322 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000323 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000324 int i;
325
Magnus Damm02ca5082010-03-19 04:46:47 +0000326 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000327 return NULL;
328
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000329 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000330 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000331 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000332
333 return NULL;
334}
335
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000336static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
337{
338 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
339 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000340 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000341 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000342
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000343 pm_runtime_get_sync(sh_chan->dev);
344
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000345 /*
346 * This relies on the guarantee from dmaengine that alloc_chan_resources
347 * never runs concurrently with itself or free_chan_resources.
348 */
349 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000350 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000351
Magnus Damm4bab9d42010-03-19 04:46:38 +0000352 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000353 if (!cfg) {
354 ret = -EINVAL;
355 goto efindslave;
356 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000357
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000358 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
359 ret = -EBUSY;
360 goto etestused;
361 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000362
363 param->config = cfg;
364
365 dmae_set_dmars(sh_chan, cfg->mid_rid);
366 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetskia1b2cc52011-05-31 09:25:16 +0000367 } else {
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000368 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000369 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000370
371 spin_lock_bh(&sh_chan->desc_lock);
372 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
373 spin_unlock_bh(&sh_chan->desc_lock);
374 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
375 if (!desc) {
376 spin_lock_bh(&sh_chan->desc_lock);
377 break;
378 }
379 dma_async_tx_descriptor_init(&desc->async_tx,
380 &sh_chan->common);
381 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700382 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000383
384 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700385 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000386 sh_chan->descs_allocated++;
387 }
388 spin_unlock_bh(&sh_chan->desc_lock);
389
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000390 if (!sh_chan->descs_allocated) {
391 ret = -ENOMEM;
392 goto edescalloc;
393 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000394
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000395 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000396
397edescalloc:
398 if (param)
399 clear_bit(param->slave_id, sh_dmae_slave_used);
400etestused:
401efindslave:
402 pm_runtime_put(sh_chan->dev);
403 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000404}
405
406/*
407 * sh_dma_free_chan_resources - Free all resources of the channel.
408 */
409static void sh_dmae_free_chan_resources(struct dma_chan *chan)
410{
411 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
412 struct sh_desc *desc, *_desc;
413 LIST_HEAD(list);
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000414 int descs = sh_chan->descs_allocated;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000415
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000416 /* Protect against ISR */
417 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000418 dmae_halt(sh_chan);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000419 spin_unlock_irq(&sh_chan->desc_lock);
420
421 /* Now no new interrupts will occur */
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000422
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700423 /* Prepared and not submitted descriptors can still be on the queue */
424 if (!list_empty(&sh_chan->ld_queue))
425 sh_dmae_chan_ld_cleanup(sh_chan, true);
426
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000427 if (chan->private) {
428 /* The caller is holding dma_list_mutex */
429 struct sh_dmae_slave *param = chan->private;
430 clear_bit(param->slave_id, sh_dmae_slave_used);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000431 chan->private = NULL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000432 }
433
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000434 spin_lock_bh(&sh_chan->desc_lock);
435
436 list_splice_init(&sh_chan->ld_free, &list);
437 sh_chan->descs_allocated = 0;
438
439 spin_unlock_bh(&sh_chan->desc_lock);
440
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000441 if (descs > 0)
442 pm_runtime_put(sh_chan->dev);
443
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000444 list_for_each_entry_safe(desc, _desc, &list, node)
445 kfree(desc);
446}
447
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000448/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000449 * sh_dmae_add_desc - get, set up and return one transfer descriptor
450 * @sh_chan: DMA channel
451 * @flags: DMA transfer flags
452 * @dest: destination DMA address, incremented when direction equals
453 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
454 * @src: source DMA address, incremented when direction equals
455 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
456 * @len: DMA transfer length
457 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
458 * @direction: needed for slave DMA to decide which address to keep constant,
459 * equals DMA_BIDIRECTIONAL for MEMCPY
460 * Returns 0 or an error
461 * Locks: called with desc_lock held
462 */
463static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
464 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
465 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000466{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000467 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000468 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000469
470 if (!*len)
471 return NULL;
472
473 /* Allocate the link descriptor from the free list */
474 new = sh_dmae_get_desc(sh_chan);
475 if (!new) {
476 dev_err(sh_chan->dev, "No free link descriptor available\n");
477 return NULL;
478 }
479
480 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
481
482 new->hw.sar = *src;
483 new->hw.dar = *dest;
484 new->hw.tcr = copy_size;
485
486 if (!*first) {
487 /* First desc */
488 new->async_tx.cookie = -EBUSY;
489 *first = new;
490 } else {
491 /* Other desc - invisible to the user */
492 new->async_tx.cookie = -EINVAL;
493 }
494
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000495 dev_dbg(sh_chan->dev,
496 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000497 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000498 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000499
500 new->mark = DESC_PREPARED;
501 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000502 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000503
504 *len -= copy_size;
505 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
506 *src += copy_size;
507 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
508 *dest += copy_size;
509
510 return new;
511}
512
513/*
514 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
515 *
516 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
517 * converted to scatter-gather to guarantee consistent locking and a correct
518 * list manipulation. For slave DMA direction carries the usual meaning, and,
519 * logically, the SG list is RAM and the addr variable contains slave address,
520 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
521 * and the SG list contains only one element and points at the source buffer.
522 */
523static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
524 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
525 enum dma_data_direction direction, unsigned long flags)
526{
527 struct scatterlist *sg;
528 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700529 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000530 int chunks = 0;
531 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000532
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000533 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000534 return NULL;
535
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000536 for_each_sg(sgl, sg, sg_len, i)
537 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
538 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000539
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700540 /* Have to lock the whole loop to protect against concurrent release */
541 spin_lock_bh(&sh_chan->desc_lock);
542
543 /*
544 * Chaining:
545 * first descriptor is what user is dealing with in all API calls, its
546 * cookie is at first set to -EBUSY, at tx-submit to a positive
547 * number
548 * if more than one chunk is needed further chunks have cookie = -EINVAL
549 * the last chunk, if not equal to the first, has cookie = -ENOSPC
550 * all chunks are linked onto the tx_list head with their .node heads
551 * only during this function, then they are immediately spliced
552 * back onto the free list in form of a chain
553 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000554 for_each_sg(sgl, sg, sg_len, i) {
555 dma_addr_t sg_addr = sg_dma_address(sg);
556 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000557
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000558 if (!len)
559 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000560
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000561 do {
562 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
563 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000564
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000565 if (direction == DMA_FROM_DEVICE)
566 new = sh_dmae_add_desc(sh_chan, flags,
567 &sg_addr, addr, &len, &first,
568 direction);
569 else
570 new = sh_dmae_add_desc(sh_chan, flags,
571 addr, &sg_addr, &len, &first,
572 direction);
573 if (!new)
574 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700575
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000576 new->chunks = chunks--;
577 list_add_tail(&new->node, &tx_list);
578 } while (len);
579 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000580
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700581 if (new != first)
582 new->async_tx.cookie = -ENOSPC;
583
584 /* Put them back on the free list, so, they don't get lost */
585 list_splice_tail(&tx_list, &sh_chan->ld_free);
586
587 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000588
589 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000590
591err_get_desc:
592 list_for_each_entry(new, &tx_list, node)
593 new->mark = DESC_IDLE;
594 list_splice(&tx_list, &sh_chan->ld_free);
595
596 spin_unlock_bh(&sh_chan->desc_lock);
597
598 return NULL;
599}
600
601static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
602 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
603 size_t len, unsigned long flags)
604{
605 struct sh_dmae_chan *sh_chan;
606 struct scatterlist sg;
607
608 if (!chan || !len)
609 return NULL;
610
611 sh_chan = to_sh_chan(chan);
612
613 sg_init_table(&sg, 1);
614 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
615 offset_in_page(dma_src));
616 sg_dma_address(&sg) = dma_src;
617 sg_dma_len(&sg) = len;
618
619 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
620 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700621}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000622
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000623static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
624 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
625 enum dma_data_direction direction, unsigned long flags)
626{
627 struct sh_dmae_slave *param;
628 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000629 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000630
631 if (!chan)
632 return NULL;
633
634 sh_chan = to_sh_chan(chan);
635 param = chan->private;
636
637 /* Someone calling slave DMA on a public channel? */
638 if (!param || !sg_len) {
639 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
640 __func__, param, sg_len, param ? param->slave_id : -1);
641 return NULL;
642 }
643
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200644 slave_addr = param->config->addr;
645
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000646 /*
647 * if (param != NULL), this is a successfully requested slave channel,
648 * therefore param->config != NULL too.
649 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000650 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000651 direction, flags);
652}
653
Linus Walleij05827632010-05-17 16:30:42 -0700654static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
655 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000656{
657 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
658
Linus Walleijc3635c72010-03-26 16:44:01 -0700659 /* Only supports DMA_TERMINATE_ALL */
660 if (cmd != DMA_TERMINATE_ALL)
661 return -ENXIO;
662
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000663 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700664 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000665
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000666 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000667 dmae_halt(sh_chan);
668
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000669 if (!list_empty(&sh_chan->ld_queue)) {
670 /* Record partial transfer */
671 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
672 struct sh_desc, node);
673 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
674 sh_chan->xmit_shift;
675
676 }
677 spin_unlock_bh(&sh_chan->desc_lock);
678
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000679 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700680
681 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000682}
683
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700684static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
685{
686 struct sh_desc *desc, *_desc;
687 /* Is the "exposed" head of a chain acked? */
688 bool head_acked = false;
689 dma_cookie_t cookie = 0;
690 dma_async_tx_callback callback = NULL;
691 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000692
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700693 spin_lock_bh(&sh_chan->desc_lock);
694 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
695 struct dma_async_tx_descriptor *tx = &desc->async_tx;
696
697 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
698 BUG_ON(desc->mark != DESC_SUBMITTED &&
699 desc->mark != DESC_COMPLETED &&
700 desc->mark != DESC_WAITING);
701
702 /*
703 * queue is ordered, and we use this loop to (1) clean up all
704 * completed descriptors, and to (2) update descriptor flags of
705 * any chunks in a (partially) completed chain
706 */
707 if (!all && desc->mark == DESC_SUBMITTED &&
708 desc->cookie != cookie)
709 break;
710
711 if (tx->cookie > 0)
712 cookie = tx->cookie;
713
714 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000715 if (sh_chan->completed_cookie != desc->cookie - 1)
716 dev_dbg(sh_chan->dev,
717 "Completing cookie %d, expected %d\n",
718 desc->cookie,
719 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700720 sh_chan->completed_cookie = desc->cookie;
721 }
722
723 /* Call callback on the last chunk */
724 if (desc->mark == DESC_COMPLETED && tx->callback) {
725 desc->mark = DESC_WAITING;
726 callback = tx->callback;
727 param = tx->callback_param;
728 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
729 tx->cookie, tx, sh_chan->id);
730 BUG_ON(desc->chunks != 1);
731 break;
732 }
733
734 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
735 if (desc->mark == DESC_COMPLETED) {
736 BUG_ON(tx->cookie < 0);
737 desc->mark = DESC_WAITING;
738 }
739 head_acked = async_tx_test_ack(tx);
740 } else {
741 switch (desc->mark) {
742 case DESC_COMPLETED:
743 desc->mark = DESC_WAITING;
744 /* Fall through */
745 case DESC_WAITING:
746 if (head_acked)
747 async_tx_ack(&desc->async_tx);
748 }
749 }
750
751 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
752 tx, tx->cookie);
753
754 if (((desc->mark == DESC_COMPLETED ||
755 desc->mark == DESC_WAITING) &&
756 async_tx_test_ack(&desc->async_tx)) || all) {
757 /* Remove from ld_queue list */
758 desc->mark = DESC_IDLE;
759 list_move(&desc->node, &sh_chan->ld_free);
760 }
761 }
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000762
763 if (all && !callback)
764 /*
765 * Terminating and the loop completed normally: forgive
766 * uncompleted cookies
767 */
768 sh_chan->completed_cookie = sh_chan->common.cookie;
769
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700770 spin_unlock_bh(&sh_chan->desc_lock);
771
772 if (callback)
773 callback(param);
774
775 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000776}
777
778/*
779 * sh_chan_ld_cleanup - Clean up link descriptors
780 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700781 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000782 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700783static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000784{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700785 while (__ld_cleanup(sh_chan, all))
786 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000787}
788
789static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
790{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000791 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000792
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700793 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000794 /* DMA work check */
Kuninori Morimoto1d2c0982011-06-16 05:08:18 +0000795 if (dmae_is_busy(sh_chan))
796 goto sh_chan_xfer_ld_queue_end;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000797
Justin P. Mattock5a3a7652011-01-19 15:36:38 +0100798 /* Find the first not transferred descriptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000799 list_for_each_entry(desc, &sh_chan->ld_queue, node)
800 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000801 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
802 desc->async_tx.cookie, sh_chan->id,
803 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700804 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000805 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700806 dmae_start(sh_chan);
807 break;
808 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000809
Kuninori Morimoto1d2c0982011-06-16 05:08:18 +0000810sh_chan_xfer_ld_queue_end:
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700811 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000812}
813
814static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
815{
816 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
817 sh_chan_xfer_ld_queue(sh_chan);
818}
819
Linus Walleij07934482010-03-26 16:50:49 -0700820static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000821 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700822 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000823{
824 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
825 dma_cookie_t last_used;
826 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000827 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000828
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700829 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000830
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000831 /* First read completed cookie to avoid a skew */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000832 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000833 rmb();
834 last_used = chan->cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700835 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700836 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000837
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000838 spin_lock_bh(&sh_chan->desc_lock);
839
840 status = dma_async_is_complete(cookie, last_complete, last_used);
841
842 /*
843 * If we don't find cookie on the queue, it has been aborted and we have
844 * to report error
845 */
846 if (status != DMA_SUCCESS) {
847 struct sh_desc *desc;
848 status = DMA_ERROR;
849 list_for_each_entry(desc, &sh_chan->ld_queue, node)
850 if (desc->cookie == cookie) {
851 status = DMA_IN_PROGRESS;
852 break;
853 }
854 }
855
856 spin_unlock_bh(&sh_chan->desc_lock);
857
858 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000859}
860
861static irqreturn_t sh_dmae_interrupt(int irq, void *data)
862{
863 irqreturn_t ret = IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000864 struct sh_dmae_chan *sh_chan = data;
865 u32 chcr;
866
867 spin_lock(&sh_chan->desc_lock);
868
Kuninori Morimoto5899a722011-06-17 08:20:40 +0000869 chcr = chcr_read(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000870
871 if (chcr & CHCR_TE) {
872 /* DMA stop */
873 dmae_halt(sh_chan);
874
875 ret = IRQ_HANDLED;
876 tasklet_schedule(&sh_chan->tasklet);
877 }
878
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000879 spin_unlock(&sh_chan->desc_lock);
880
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000881 return ret;
882}
883
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000884/* Called from error IRQ or NMI */
885static bool sh_dmae_reset(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000886{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900887 unsigned int handled = 0;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000888 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000889
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000890 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000891 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000892
893 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000894 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000895 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Paul Mundt03aa18f2010-12-17 19:16:10 +0900896 struct sh_desc *desc;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000897 LIST_HEAD(dl);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900898
899 if (!sh_chan)
900 continue;
901
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000902 spin_lock(&sh_chan->desc_lock);
903
Paul Mundt03aa18f2010-12-17 19:16:10 +0900904 /* Stop the channel */
905 dmae_halt(sh_chan);
906
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000907 list_splice_init(&sh_chan->ld_queue, &dl);
908
909 spin_unlock(&sh_chan->desc_lock);
910
Paul Mundt03aa18f2010-12-17 19:16:10 +0900911 /* Complete all */
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000912 list_for_each_entry(desc, &dl, node) {
Paul Mundt03aa18f2010-12-17 19:16:10 +0900913 struct dma_async_tx_descriptor *tx = &desc->async_tx;
914 desc->mark = DESC_IDLE;
915 if (tx->callback)
916 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000917 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900918
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000919 spin_lock(&sh_chan->desc_lock);
920 list_splice(&dl, &sh_chan->ld_free);
921 spin_unlock(&sh_chan->desc_lock);
922
Paul Mundt03aa18f2010-12-17 19:16:10 +0900923 handled++;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000924 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900925
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000926 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000927
Paul Mundt03aa18f2010-12-17 19:16:10 +0900928 return !!handled;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000929}
Paul Mundt03aa18f2010-12-17 19:16:10 +0900930
931static irqreturn_t sh_dmae_err(int irq, void *data)
932{
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000933 struct sh_dmae_device *shdev = data;
934
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000935 if (!(dmaor_read(shdev) & DMAOR_AE))
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000936 return IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000937
938 sh_dmae_reset(data);
939 return IRQ_HANDLED;
Paul Mundt03aa18f2010-12-17 19:16:10 +0900940}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000941
942static void dmae_do_tasklet(unsigned long data)
943{
944 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700945 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000946 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000947 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100948
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700949 spin_lock(&sh_chan->desc_lock);
950 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000951 if (desc->mark == DESC_SUBMITTED &&
952 ((desc->direction == DMA_FROM_DEVICE &&
953 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
954 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700955 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
956 desc->async_tx.cookie, &desc->async_tx,
957 desc->hw.dar);
958 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000959 break;
960 }
961 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700962 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000963
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000964 /* Next desc */
965 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700966 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000967}
968
Paul Mundt03aa18f2010-12-17 19:16:10 +0900969static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
970{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900971 /* Fast path out if NMIF is not asserted for this controller */
972 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
973 return false;
974
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000975 return sh_dmae_reset(shdev);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900976}
977
978static int sh_dmae_nmi_handler(struct notifier_block *self,
979 unsigned long cmd, void *data)
980{
981 struct sh_dmae_device *shdev;
982 int ret = NOTIFY_DONE;
983 bool triggered;
984
985 /*
986 * Only concern ourselves with NMI events.
987 *
988 * Normally we would check the die chain value, but as this needs
989 * to be architecture independent, check for NMI context instead.
990 */
991 if (!in_nmi())
992 return NOTIFY_DONE;
993
994 rcu_read_lock();
995 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
996 /*
997 * Only stop if one of the controllers has NMIF asserted,
998 * we do not want to interfere with regular address error
999 * handling or NMI events that don't concern the DMACs.
1000 */
1001 triggered = sh_dmae_nmi_notify(shdev);
1002 if (triggered == true)
1003 ret = NOTIFY_OK;
1004 }
1005 rcu_read_unlock();
1006
1007 return ret;
1008}
1009
1010static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1011 .notifier_call = sh_dmae_nmi_handler,
1012
1013 /* Run before NMI debug handler and KGDB */
1014 .priority = 1,
1015};
1016
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001017static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1018 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001019{
1020 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +00001021 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001022 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001023 struct sh_dmae_chan *new_sh_chan;
1024
1025 /* alloc channel */
1026 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1027 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001028 dev_err(shdev->common.dev,
1029 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001030 return -ENOMEM;
1031 }
1032
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001033 /* copy struct dma_device */
1034 new_sh_chan->common.device = &shdev->common;
1035
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001036 new_sh_chan->dev = shdev->common.dev;
1037 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001038 new_sh_chan->irq = irq;
1039 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001040
1041 /* Init DMA tasklet */
1042 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1043 (unsigned long)new_sh_chan);
1044
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001045 spin_lock_init(&new_sh_chan->desc_lock);
1046
1047 /* Init descripter manage list */
1048 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1049 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1050
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001051 /* Add the channel to DMA device channel list */
1052 list_add_tail(&new_sh_chan->common.device_node,
1053 &shdev->common.channels);
1054 shdev->common.chancnt++;
1055
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001056 if (pdev->id >= 0)
1057 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1058 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1059 else
1060 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1061 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001062
1063 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001064 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001065 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001066 if (err) {
1067 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1068 "with return %d\n", id, err);
1069 goto err_no_irq;
1070 }
1071
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001072 shdev->chan[id] = new_sh_chan;
1073 return 0;
1074
1075err_no_irq:
1076 /* remove from dmaengine device node */
1077 list_del(&new_sh_chan->common.device_node);
1078 kfree(new_sh_chan);
1079 return err;
1080}
1081
1082static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1083{
1084 int i;
1085
1086 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1087 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001088 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001089
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001090 free_irq(sh_chan->irq, sh_chan);
1091
1092 list_del(&sh_chan->common.device_node);
1093 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001094 shdev->chan[i] = NULL;
1095 }
1096 }
1097 shdev->common.chancnt = 0;
1098}
1099
1100static int __init sh_dmae_probe(struct platform_device *pdev)
1101{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001102 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1103 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001104 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1105 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Magnus Damm300e5f92011-05-24 10:31:20 +00001106 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001107 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001108 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001109
Dan Williams56adf7e2009-11-22 12:10:10 -07001110 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001111 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -07001112 return -ENODEV;
1113
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001114 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001115 /* DMARS area is optional */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001116 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1117 /*
1118 * IRQ resources:
1119 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1120 * the error IRQ, in which case it is the only IRQ in this resource:
1121 * start == end. If it is the only IRQ resource, all channels also
1122 * use the same IRQ.
1123 * 2. DMA channel IRQ resources can be specified one per resource or in
1124 * ranges (start != end)
1125 * 3. iff all events (channels and, optionally, error) on this
1126 * controller use the same IRQ, only one IRQ resource can be
1127 * specified, otherwise there must be one IRQ per channel, even if
1128 * some of them are equal
1129 * 4. if all IRQs on this controller are equal or if some specific IRQs
1130 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1131 * requested with the IRQF_SHARED flag
1132 */
1133 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1134 if (!chan || !errirq_res)
1135 return -ENODEV;
1136
1137 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1138 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1139 return -EBUSY;
1140 }
1141
1142 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1143 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1144 err = -EBUSY;
1145 goto ermrdmars;
1146 }
1147
1148 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001149 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1150 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001151 dev_err(&pdev->dev, "Not enough memory\n");
1152 goto ealloc;
1153 }
1154
1155 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1156 if (!shdev->chan_reg)
1157 goto emapchan;
1158 if (dmars) {
1159 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1160 if (!shdev->dmars)
1161 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001162 }
1163
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001164 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001165 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001166
Kuninori Morimoto5899a722011-06-17 08:20:40 +00001167 if (pdata->chcr_offset)
1168 shdev->chcr_offset = pdata->chcr_offset;
1169 else
1170 shdev->chcr_offset = CHCR;
1171
Kuninori Morimoto67c62692011-06-17 08:20:51 +00001172 if (pdata->chcr_ie_bit)
1173 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1174 else
1175 shdev->chcr_ie_bit = CHCR_IE;
1176
Paul Mundt5c2de442011-05-31 15:53:03 +09001177 platform_set_drvdata(pdev, shdev);
1178
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001179 pm_runtime_enable(&pdev->dev);
1180 pm_runtime_get_sync(&pdev->dev);
1181
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001182 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001183 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001184 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001185
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001186 /* reset dma controller - only needed as a test */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001187 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001188 if (err)
1189 goto rst_err;
1190
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001191 INIT_LIST_HEAD(&shdev->common.channels);
1192
1193 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001194 if (pdata->slave && pdata->slave_num)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001195 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001196
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001197 shdev->common.device_alloc_chan_resources
1198 = sh_dmae_alloc_chan_resources;
1199 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1200 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001201 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001202 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001203
1204 /* Compulsory for DMA_SLAVE fields */
1205 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001206 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001207
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001208 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001209 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001210 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001211
Magnus Damm927a7c92010-03-19 04:47:19 +00001212#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001213 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1214
1215 if (!chanirq_res)
1216 chanirq_res = errirq_res;
1217 else
1218 irqres++;
1219
1220 if (chanirq_res == errirq_res ||
1221 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001222 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001223
1224 errirq = errirq_res->start;
1225
1226 err = request_irq(errirq, sh_dmae_err, irqflags,
1227 "DMAC Address Error", shdev);
1228 if (err) {
1229 dev_err(&pdev->dev,
1230 "DMA failed requesting irq #%d, error %d\n",
1231 errirq, err);
1232 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001233 }
1234
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001235#else
1236 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001237#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001238
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001239 if (chanirq_res->start == chanirq_res->end &&
1240 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1241 /* Special case - all multiplexed */
1242 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
Magnus Damm300e5f92011-05-24 10:31:20 +00001243 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1244 chan_irq[irq_cnt] = chanirq_res->start;
1245 chan_flag[irq_cnt] = IRQF_SHARED;
1246 } else {
1247 irq_cap = 1;
1248 break;
1249 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001250 }
1251 } else {
1252 do {
1253 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1254 if ((errirq_res->flags & IORESOURCE_BITS) ==
1255 IORESOURCE_IRQ_SHAREABLE)
1256 chan_flag[irq_cnt] = IRQF_SHARED;
1257 else
1258 chan_flag[irq_cnt] = IRQF_DISABLED;
1259 dev_dbg(&pdev->dev,
1260 "Found IRQ %d for channel %d\n",
1261 i, irq_cnt);
1262 chan_irq[irq_cnt++] = i;
Magnus Damm300e5f92011-05-24 10:31:20 +00001263
1264 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1265 break;
1266 }
1267
1268 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1269 irq_cap = 1;
1270 break;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001271 }
1272 chanirq_res = platform_get_resource(pdev,
1273 IORESOURCE_IRQ, ++irqres);
1274 } while (irq_cnt < pdata->channel_num && chanirq_res);
1275 }
1276
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001277 /* Create DMA Channel */
Magnus Damm300e5f92011-05-24 10:31:20 +00001278 for (i = 0; i < irq_cnt; i++) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001279 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001280 if (err)
1281 goto chan_probe_err;
1282 }
1283
Magnus Damm300e5f92011-05-24 10:31:20 +00001284 if (irq_cap)
1285 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1286 "channels when a maximum of %d are supported.\n",
1287 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1288
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001289 pm_runtime_put(&pdev->dev);
1290
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001291 dma_async_device_register(&shdev->common);
1292
1293 return err;
1294
1295chan_probe_err:
1296 sh_dmae_chan_remove(shdev);
Magnus Damm300e5f92011-05-24 10:31:20 +00001297
Magnus Damm927a7c92010-03-19 04:47:19 +00001298#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001299 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001300eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001301#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001302rst_err:
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001303 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001304 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001305 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001306
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001307 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001308 pm_runtime_disable(&pdev->dev);
1309
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001310 if (dmars)
1311 iounmap(shdev->dmars);
Paul Mundt5c2de442011-05-31 15:53:03 +09001312
1313 platform_set_drvdata(pdev, NULL);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001314emapdmars:
1315 iounmap(shdev->chan_reg);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001316 synchronize_rcu();
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001317emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001318 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001319ealloc:
1320 if (dmars)
1321 release_mem_region(dmars->start, resource_size(dmars));
1322ermrdmars:
1323 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001324
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001325 return err;
1326}
1327
1328static int __exit sh_dmae_remove(struct platform_device *pdev)
1329{
1330 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001331 struct resource *res;
1332 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001333
1334 dma_async_device_unregister(&shdev->common);
1335
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001336 if (errirq > 0)
1337 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001338
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001339 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001340 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001341 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001342
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001343 /* channel data remove */
1344 sh_dmae_chan_remove(shdev);
1345
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001346 pm_runtime_disable(&pdev->dev);
1347
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001348 if (shdev->dmars)
1349 iounmap(shdev->dmars);
1350 iounmap(shdev->chan_reg);
1351
Paul Mundt5c2de442011-05-31 15:53:03 +09001352 platform_set_drvdata(pdev, NULL);
1353
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001354 synchronize_rcu();
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001355 kfree(shdev);
1356
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001357 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1358 if (res)
1359 release_mem_region(res->start, resource_size(res));
1360 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1361 if (res)
1362 release_mem_region(res->start, resource_size(res));
1363
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001364 return 0;
1365}
1366
1367static void sh_dmae_shutdown(struct platform_device *pdev)
1368{
1369 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001370 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001371}
1372
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001373static int sh_dmae_runtime_suspend(struct device *dev)
1374{
1375 return 0;
1376}
1377
1378static int sh_dmae_runtime_resume(struct device *dev)
1379{
1380 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1381
1382 return sh_dmae_rst(shdev);
1383}
1384
1385#ifdef CONFIG_PM
1386static int sh_dmae_suspend(struct device *dev)
1387{
1388 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1389 int i;
1390
1391 for (i = 0; i < shdev->pdata->channel_num; i++) {
1392 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1393 if (sh_chan->descs_allocated)
1394 sh_chan->pm_error = pm_runtime_put_sync(dev);
1395 }
1396
1397 return 0;
1398}
1399
1400static int sh_dmae_resume(struct device *dev)
1401{
1402 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1403 int i;
1404
1405 for (i = 0; i < shdev->pdata->channel_num; i++) {
1406 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1407 struct sh_dmae_slave *param = sh_chan->common.private;
1408
1409 if (!sh_chan->descs_allocated)
1410 continue;
1411
1412 if (!sh_chan->pm_error)
1413 pm_runtime_get_sync(dev);
1414
1415 if (param) {
1416 const struct sh_dmae_slave_config *cfg = param->config;
1417 dmae_set_dmars(sh_chan, cfg->mid_rid);
1418 dmae_set_chcr(sh_chan, cfg->chcr);
1419 } else {
1420 dmae_init(sh_chan);
1421 }
1422 }
1423
1424 return 0;
1425}
1426#else
1427#define sh_dmae_suspend NULL
1428#define sh_dmae_resume NULL
1429#endif
1430
1431const struct dev_pm_ops sh_dmae_pm = {
1432 .suspend = sh_dmae_suspend,
1433 .resume = sh_dmae_resume,
1434 .runtime_suspend = sh_dmae_runtime_suspend,
1435 .runtime_resume = sh_dmae_runtime_resume,
1436};
1437
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001438static struct platform_driver sh_dmae_driver = {
1439 .remove = __exit_p(sh_dmae_remove),
1440 .shutdown = sh_dmae_shutdown,
1441 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001442 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001443 .name = "sh-dma-engine",
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001444 .pm = &sh_dmae_pm,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001445 },
1446};
1447
1448static int __init sh_dmae_init(void)
1449{
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001450 /* Wire up NMI handling */
1451 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1452 if (err)
1453 return err;
1454
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001455 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1456}
1457module_init(sh_dmae_init);
1458
1459static void __exit sh_dmae_exit(void)
1460{
1461 platform_driver_unregister(&sh_dmae_driver);
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001462
1463 unregister_die_notifier(&sh_dmae_nmi_notifier);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001464}
1465module_exit(sh_dmae_exit);
1466
1467MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1468MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1469MODULE_LICENSE("GPL");
Guennadi Liakhovetskie5843342010-11-24 09:48:10 +00001470MODULE_ALIAS("platform:sh-dma-engine");