blob: d2fb16d31bb953795bdb8ebe065eac45f638c7ac [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000023#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000027#include <linux/platform_device.h>
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +000028#include <linux/pm_runtime.h>
Magnus Dammb2623a62010-03-19 04:47:10 +000029#include <linux/sh_dma.h>
Paul Mundt03aa18f2010-12-17 19:16:10 +090030#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000034#include "shdma.h"
35
36/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070037enum sh_dmae_desc_status {
38 DESC_IDLE,
39 DESC_PREPARED,
40 DESC_SUBMITTED,
41 DESC_COMPLETED, /* completed, have to call callback */
42 DESC_WAITING, /* callback called, waiting for ack / re-submit */
43};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000044
45#define NR_DESCS_PER_CHANNEL 32
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +000046/* Default MEMCPY transfer size = 2^2 = 4 bytes */
47#define LOG2_DEFAULT_XFER_SIZE 2
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000048
Paul Mundt03aa18f2010-12-17 19:16:10 +090049/*
50 * Used for write-side mutual exclusion for the global device list,
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000051 * read-side synchronization by way of RCU, and per-controller data.
Paul Mundt03aa18f2010-12-17 19:16:10 +090052 */
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000056/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
Magnus Damm02ca5082010-03-19 04:46:47 +000057static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000058
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070059static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000061static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000063 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000064}
65
66static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000068 return __raw_readl(sh_dc->base + reg / sizeof(u32));
69}
70
71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{
73 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
74}
75
76static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
77{
78 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000079}
80
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000081/*
82 * Reset DMA controller
83 *
84 * SH7780 has two DMAOR register
85 */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000086static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000087{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000088 unsigned short dmaor;
89 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000090
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000091 spin_lock_irqsave(&sh_dmae_lock, flags);
92
93 dmaor = dmaor_read(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000094 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +000095
96 spin_unlock_irqrestore(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000097}
98
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +000099static int sh_dmae_rst(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000100{
101 unsigned short dmaor;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000102 unsigned long flags;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000103
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000104 spin_lock_irqsave(&sh_dmae_lock, flags);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000105
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000106 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
107
108 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
109
110 dmaor = dmaor_read(shdev);
111
112 spin_unlock_irqrestore(&sh_dmae_lock, flags);
113
114 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
115 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
116 return -EIO;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000117 }
118 return 0;
119}
120
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000121static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000122{
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000124
125 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
126 return true; /* working */
127
128 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000129}
130
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000131static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000132{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000133 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000134 struct sh_dmae_pdata *pdata = shdev->pdata;
135 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
136 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000137
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000138 if (cnt >= pdata->ts_shift_num)
139 cnt = 0;
140
141 return pdata->ts_shift[cnt];
142}
143
144static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
145{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000146 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000147 struct sh_dmae_pdata *pdata = shdev->pdata;
148 int i;
149
150 for (i = 0; i < pdata->ts_shift_num; i++)
151 if (pdata->ts_shift[i] == l2size)
152 break;
153
154 if (i == pdata->ts_shift_num)
155 i = 0;
156
157 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
158 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000159}
160
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700161static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000162{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700163 sh_dmae_writel(sh_chan, hw->sar, SAR);
164 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000165 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000166}
167
168static void dmae_start(struct sh_dmae_chan *sh_chan)
169{
170 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
171
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100172 chcr |= CHCR_DE | CHCR_IE;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000173 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000174}
175
176static void dmae_halt(struct sh_dmae_chan *sh_chan)
177{
178 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
179
180 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
181 sh_dmae_writel(sh_chan, chcr, CHCR);
182}
183
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000184static void dmae_init(struct sh_dmae_chan *sh_chan)
185{
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000186 /*
187 * Default configuration for dual address memory-memory transfer.
188 * 0x400 represents auto-request.
189 */
190 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
191 LOG2_DEFAULT_XFER_SIZE);
192 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000193 sh_dmae_writel(sh_chan, chcr, CHCR);
194}
195
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000196static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
197{
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000198 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000199 if (dmae_is_busy(sh_chan))
200 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000201
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000202 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000203 sh_dmae_writel(sh_chan, val, CHCR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000204
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000205 return 0;
206}
207
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000208static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
209{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000210 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000211 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000212 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
Magnus Damm26fc02a2011-05-24 10:31:12 +0000213 u16 __iomem *addr = shdev->dmars;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000214 int shift = chan_pdata->dmars_bit;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000215
216 if (dmae_is_busy(sh_chan))
217 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000218
Magnus Damm26fc02a2011-05-24 10:31:12 +0000219 /* in the case of a missing DMARS resource use first memory window */
220 if (!addr)
221 addr = (u16 __iomem *)shdev->chan_reg;
222 addr += chan_pdata->dmars / sizeof(u16);
223
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000224 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
225 addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000226
227 return 0;
228}
229
230static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
231{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700232 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000233 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700234 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000235 dma_cookie_t cookie;
236
237 spin_lock_bh(&sh_chan->desc_lock);
238
239 cookie = sh_chan->common.cookie;
240 cookie++;
241 if (cookie < 0)
242 cookie = 1;
243
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700244 sh_chan->common.cookie = cookie;
245 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000246
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700247 /* Mark all chunks of this descriptor as submitted, move to the queue */
248 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
249 /*
250 * All chunks are on the global ld_free, so, we have to find
251 * the end of the chain ourselves
252 */
253 if (chunk != desc && (chunk->mark == DESC_IDLE ||
254 chunk->async_tx.cookie > 0 ||
255 chunk->async_tx.cookie == -EBUSY ||
256 &chunk->node == &sh_chan->ld_free))
257 break;
258 chunk->mark = DESC_SUBMITTED;
259 /* Callback goes to the last chunk */
260 chunk->async_tx.callback = NULL;
261 chunk->cookie = cookie;
262 list_move_tail(&chunk->node, &sh_chan->ld_queue);
263 last = chunk;
264 }
265
266 last->async_tx.callback = callback;
267 last->async_tx.callback_param = tx->callback_param;
268
269 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
270 tx->cookie, &last->async_tx, sh_chan->id,
271 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000272
273 spin_unlock_bh(&sh_chan->desc_lock);
274
275 return cookie;
276}
277
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700278/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000279static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
280{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700281 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000282
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700283 list_for_each_entry(desc, &sh_chan->ld_free, node)
284 if (desc->mark != DESC_PREPARED) {
285 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000286 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700287 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000288 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000289
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700290 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000291}
292
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000293static const struct sh_dmae_slave_config *sh_dmae_find_slave(
Magnus Damm4bab9d42010-03-19 04:46:38 +0000294 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000295{
Kuninori Morimotoc4e0dd72011-06-16 05:08:09 +0000296 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000297 struct sh_dmae_pdata *pdata = shdev->pdata;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000298 int i;
299
Magnus Damm02ca5082010-03-19 04:46:47 +0000300 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000301 return NULL;
302
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000303 for (i = 0; i < pdata->slave_num; i++)
Magnus Damm4bab9d42010-03-19 04:46:38 +0000304 if (pdata->slave[i].slave_id == param->slave_id)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000305 return pdata->slave + i;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000306
307 return NULL;
308}
309
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000310static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
311{
312 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
313 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000314 struct sh_dmae_slave *param = chan->private;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000315 int ret;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000316
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000317 pm_runtime_get_sync(sh_chan->dev);
318
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000319 /*
320 * This relies on the guarantee from dmaengine that alloc_chan_resources
321 * never runs concurrently with itself or free_chan_resources.
322 */
323 if (param) {
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000324 const struct sh_dmae_slave_config *cfg;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000325
Magnus Damm4bab9d42010-03-19 04:46:38 +0000326 cfg = sh_dmae_find_slave(sh_chan, param);
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000327 if (!cfg) {
328 ret = -EINVAL;
329 goto efindslave;
330 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000331
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000332 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
333 ret = -EBUSY;
334 goto etestused;
335 }
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000336
337 param->config = cfg;
338
339 dmae_set_dmars(sh_chan, cfg->mid_rid);
340 dmae_set_chcr(sh_chan, cfg->chcr);
Guennadi Liakhovetskia1b2cc52011-05-31 09:25:16 +0000341 } else {
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000342 dmae_init(sh_chan);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000343 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000344
345 spin_lock_bh(&sh_chan->desc_lock);
346 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
347 spin_unlock_bh(&sh_chan->desc_lock);
348 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
349 if (!desc) {
350 spin_lock_bh(&sh_chan->desc_lock);
351 break;
352 }
353 dma_async_tx_descriptor_init(&desc->async_tx,
354 &sh_chan->common);
355 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700356 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000357
358 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700359 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000360 sh_chan->descs_allocated++;
361 }
362 spin_unlock_bh(&sh_chan->desc_lock);
363
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000364 if (!sh_chan->descs_allocated) {
365 ret = -ENOMEM;
366 goto edescalloc;
367 }
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000368
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000369 return sh_chan->descs_allocated;
Guennadi Liakhovetski83515bc2010-04-19 08:39:39 +0000370
371edescalloc:
372 if (param)
373 clear_bit(param->slave_id, sh_dmae_slave_used);
374etestused:
375efindslave:
376 pm_runtime_put(sh_chan->dev);
377 return ret;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000378}
379
380/*
381 * sh_dma_free_chan_resources - Free all resources of the channel.
382 */
383static void sh_dmae_free_chan_resources(struct dma_chan *chan)
384{
385 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
386 struct sh_desc *desc, *_desc;
387 LIST_HEAD(list);
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000388 int descs = sh_chan->descs_allocated;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000389
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000390 /* Protect against ISR */
391 spin_lock_irq(&sh_chan->desc_lock);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000392 dmae_halt(sh_chan);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000393 spin_unlock_irq(&sh_chan->desc_lock);
394
395 /* Now no new interrupts will occur */
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000396
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700397 /* Prepared and not submitted descriptors can still be on the queue */
398 if (!list_empty(&sh_chan->ld_queue))
399 sh_dmae_chan_ld_cleanup(sh_chan, true);
400
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000401 if (chan->private) {
402 /* The caller is holding dma_list_mutex */
403 struct sh_dmae_slave *param = chan->private;
404 clear_bit(param->slave_id, sh_dmae_slave_used);
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000405 chan->private = NULL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000406 }
407
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000408 spin_lock_bh(&sh_chan->desc_lock);
409
410 list_splice_init(&sh_chan->ld_free, &list);
411 sh_chan->descs_allocated = 0;
412
413 spin_unlock_bh(&sh_chan->desc_lock);
414
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +0000415 if (descs > 0)
416 pm_runtime_put(sh_chan->dev);
417
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000418 list_for_each_entry_safe(desc, _desc, &list, node)
419 kfree(desc);
420}
421
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000422/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000423 * sh_dmae_add_desc - get, set up and return one transfer descriptor
424 * @sh_chan: DMA channel
425 * @flags: DMA transfer flags
426 * @dest: destination DMA address, incremented when direction equals
427 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
428 * @src: source DMA address, incremented when direction equals
429 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
430 * @len: DMA transfer length
431 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
432 * @direction: needed for slave DMA to decide which address to keep constant,
433 * equals DMA_BIDIRECTIONAL for MEMCPY
434 * Returns 0 or an error
435 * Locks: called with desc_lock held
436 */
437static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
438 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
439 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000440{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000441 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000442 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000443
444 if (!*len)
445 return NULL;
446
447 /* Allocate the link descriptor from the free list */
448 new = sh_dmae_get_desc(sh_chan);
449 if (!new) {
450 dev_err(sh_chan->dev, "No free link descriptor available\n");
451 return NULL;
452 }
453
454 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
455
456 new->hw.sar = *src;
457 new->hw.dar = *dest;
458 new->hw.tcr = copy_size;
459
460 if (!*first) {
461 /* First desc */
462 new->async_tx.cookie = -EBUSY;
463 *first = new;
464 } else {
465 /* Other desc - invisible to the user */
466 new->async_tx.cookie = -EINVAL;
467 }
468
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000469 dev_dbg(sh_chan->dev,
470 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000471 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000472 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000473
474 new->mark = DESC_PREPARED;
475 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000476 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000477
478 *len -= copy_size;
479 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
480 *src += copy_size;
481 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
482 *dest += copy_size;
483
484 return new;
485}
486
487/*
488 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
489 *
490 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
491 * converted to scatter-gather to guarantee consistent locking and a correct
492 * list manipulation. For slave DMA direction carries the usual meaning, and,
493 * logically, the SG list is RAM and the addr variable contains slave address,
494 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
495 * and the SG list contains only one element and points at the source buffer.
496 */
497static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
498 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
499 enum dma_data_direction direction, unsigned long flags)
500{
501 struct scatterlist *sg;
502 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700503 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000504 int chunks = 0;
505 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000506
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000507 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000508 return NULL;
509
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000510 for_each_sg(sgl, sg, sg_len, i)
511 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
512 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000513
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700514 /* Have to lock the whole loop to protect against concurrent release */
515 spin_lock_bh(&sh_chan->desc_lock);
516
517 /*
518 * Chaining:
519 * first descriptor is what user is dealing with in all API calls, its
520 * cookie is at first set to -EBUSY, at tx-submit to a positive
521 * number
522 * if more than one chunk is needed further chunks have cookie = -EINVAL
523 * the last chunk, if not equal to the first, has cookie = -ENOSPC
524 * all chunks are linked onto the tx_list head with their .node heads
525 * only during this function, then they are immediately spliced
526 * back onto the free list in form of a chain
527 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000528 for_each_sg(sgl, sg, sg_len, i) {
529 dma_addr_t sg_addr = sg_dma_address(sg);
530 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000531
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000532 if (!len)
533 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000534
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000535 do {
536 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
537 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000538
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000539 if (direction == DMA_FROM_DEVICE)
540 new = sh_dmae_add_desc(sh_chan, flags,
541 &sg_addr, addr, &len, &first,
542 direction);
543 else
544 new = sh_dmae_add_desc(sh_chan, flags,
545 addr, &sg_addr, &len, &first,
546 direction);
547 if (!new)
548 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700549
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000550 new->chunks = chunks--;
551 list_add_tail(&new->node, &tx_list);
552 } while (len);
553 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000554
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700555 if (new != first)
556 new->async_tx.cookie = -ENOSPC;
557
558 /* Put them back on the free list, so, they don't get lost */
559 list_splice_tail(&tx_list, &sh_chan->ld_free);
560
561 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000562
563 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000564
565err_get_desc:
566 list_for_each_entry(new, &tx_list, node)
567 new->mark = DESC_IDLE;
568 list_splice(&tx_list, &sh_chan->ld_free);
569
570 spin_unlock_bh(&sh_chan->desc_lock);
571
572 return NULL;
573}
574
575static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
576 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
577 size_t len, unsigned long flags)
578{
579 struct sh_dmae_chan *sh_chan;
580 struct scatterlist sg;
581
582 if (!chan || !len)
583 return NULL;
584
585 sh_chan = to_sh_chan(chan);
586
587 sg_init_table(&sg, 1);
588 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
589 offset_in_page(dma_src));
590 sg_dma_address(&sg) = dma_src;
591 sg_dma_len(&sg) = len;
592
593 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
594 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700595}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000596
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000597static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
598 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
599 enum dma_data_direction direction, unsigned long flags)
600{
601 struct sh_dmae_slave *param;
602 struct sh_dmae_chan *sh_chan;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000603 dma_addr_t slave_addr;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000604
605 if (!chan)
606 return NULL;
607
608 sh_chan = to_sh_chan(chan);
609 param = chan->private;
610
611 /* Someone calling slave DMA on a public channel? */
612 if (!param || !sg_len) {
613 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
614 __func__, param, sg_len, param ? param->slave_id : -1);
615 return NULL;
616 }
617
Dan Carpenter9f9ff202010-08-14 11:01:45 +0200618 slave_addr = param->config->addr;
619
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000620 /*
621 * if (param != NULL), this is a successfully requested slave channel,
622 * therefore param->config != NULL too.
623 */
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000624 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000625 direction, flags);
626}
627
Linus Walleij05827632010-05-17 16:30:42 -0700628static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
629 unsigned long arg)
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000630{
631 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
632
Linus Walleijc3635c72010-03-26 16:44:01 -0700633 /* Only supports DMA_TERMINATE_ALL */
634 if (cmd != DMA_TERMINATE_ALL)
635 return -ENXIO;
636
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000637 if (!chan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700638 return -EINVAL;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000639
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000640 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000641 dmae_halt(sh_chan);
642
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000643 if (!list_empty(&sh_chan->ld_queue)) {
644 /* Record partial transfer */
645 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
646 struct sh_desc, node);
647 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
648 sh_chan->xmit_shift;
649
650 }
651 spin_unlock_bh(&sh_chan->desc_lock);
652
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000653 sh_dmae_chan_ld_cleanup(sh_chan, true);
Linus Walleijc3635c72010-03-26 16:44:01 -0700654
655 return 0;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000656}
657
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700658static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
659{
660 struct sh_desc *desc, *_desc;
661 /* Is the "exposed" head of a chain acked? */
662 bool head_acked = false;
663 dma_cookie_t cookie = 0;
664 dma_async_tx_callback callback = NULL;
665 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000666
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700667 spin_lock_bh(&sh_chan->desc_lock);
668 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
669 struct dma_async_tx_descriptor *tx = &desc->async_tx;
670
671 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
672 BUG_ON(desc->mark != DESC_SUBMITTED &&
673 desc->mark != DESC_COMPLETED &&
674 desc->mark != DESC_WAITING);
675
676 /*
677 * queue is ordered, and we use this loop to (1) clean up all
678 * completed descriptors, and to (2) update descriptor flags of
679 * any chunks in a (partially) completed chain
680 */
681 if (!all && desc->mark == DESC_SUBMITTED &&
682 desc->cookie != cookie)
683 break;
684
685 if (tx->cookie > 0)
686 cookie = tx->cookie;
687
688 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000689 if (sh_chan->completed_cookie != desc->cookie - 1)
690 dev_dbg(sh_chan->dev,
691 "Completing cookie %d, expected %d\n",
692 desc->cookie,
693 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700694 sh_chan->completed_cookie = desc->cookie;
695 }
696
697 /* Call callback on the last chunk */
698 if (desc->mark == DESC_COMPLETED && tx->callback) {
699 desc->mark = DESC_WAITING;
700 callback = tx->callback;
701 param = tx->callback_param;
702 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
703 tx->cookie, tx, sh_chan->id);
704 BUG_ON(desc->chunks != 1);
705 break;
706 }
707
708 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
709 if (desc->mark == DESC_COMPLETED) {
710 BUG_ON(tx->cookie < 0);
711 desc->mark = DESC_WAITING;
712 }
713 head_acked = async_tx_test_ack(tx);
714 } else {
715 switch (desc->mark) {
716 case DESC_COMPLETED:
717 desc->mark = DESC_WAITING;
718 /* Fall through */
719 case DESC_WAITING:
720 if (head_acked)
721 async_tx_ack(&desc->async_tx);
722 }
723 }
724
725 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
726 tx, tx->cookie);
727
728 if (((desc->mark == DESC_COMPLETED ||
729 desc->mark == DESC_WAITING) &&
730 async_tx_test_ack(&desc->async_tx)) || all) {
731 /* Remove from ld_queue list */
732 desc->mark = DESC_IDLE;
733 list_move(&desc->node, &sh_chan->ld_free);
734 }
735 }
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000736
737 if (all && !callback)
738 /*
739 * Terminating and the loop completed normally: forgive
740 * uncompleted cookies
741 */
742 sh_chan->completed_cookie = sh_chan->common.cookie;
743
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700744 spin_unlock_bh(&sh_chan->desc_lock);
745
746 if (callback)
747 callback(param);
748
749 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000750}
751
752/*
753 * sh_chan_ld_cleanup - Clean up link descriptors
754 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700755 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000756 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700757static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000758{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700759 while (__ld_cleanup(sh_chan, all))
760 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000761}
762
763static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
764{
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000765 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000766
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700767 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000768 /* DMA work check */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700769 if (dmae_is_busy(sh_chan)) {
770 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000771 return;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700772 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000773
Justin P. Mattock5a3a7652011-01-19 15:36:38 +0100774 /* Find the first not transferred descriptor */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000775 list_for_each_entry(desc, &sh_chan->ld_queue, node)
776 if (desc->mark == DESC_SUBMITTED) {
Guennadi Liakhovetskic0149062010-02-18 16:30:02 +0000777 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
778 desc->async_tx.cookie, sh_chan->id,
779 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700780 /* Get the ld start address from ld_queue */
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000781 dmae_set_reg(sh_chan, &desc->hw);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700782 dmae_start(sh_chan);
783 break;
784 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000785
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700786 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000787}
788
789static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
790{
791 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
792 sh_chan_xfer_ld_queue(sh_chan);
793}
794
Linus Walleij07934482010-03-26 16:50:49 -0700795static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000796 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700797 struct dma_tx_state *txstate)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000798{
799 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
800 dma_cookie_t last_used;
801 dma_cookie_t last_complete;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000802 enum dma_status status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000803
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700804 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000805
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000806 /* First read completed cookie to avoid a skew */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000807 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000808 rmb();
809 last_used = chan->cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700810 BUG_ON(last_complete < 0);
Dan Williamsbca34692010-03-26 16:52:10 -0700811 dma_set_tx_state(txstate, last_complete, last_used, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000812
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000813 spin_lock_bh(&sh_chan->desc_lock);
814
815 status = dma_async_is_complete(cookie, last_complete, last_used);
816
817 /*
818 * If we don't find cookie on the queue, it has been aborted and we have
819 * to report error
820 */
821 if (status != DMA_SUCCESS) {
822 struct sh_desc *desc;
823 status = DMA_ERROR;
824 list_for_each_entry(desc, &sh_chan->ld_queue, node)
825 if (desc->cookie == cookie) {
826 status = DMA_IN_PROGRESS;
827 break;
828 }
829 }
830
831 spin_unlock_bh(&sh_chan->desc_lock);
832
833 return status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000834}
835
836static irqreturn_t sh_dmae_interrupt(int irq, void *data)
837{
838 irqreturn_t ret = IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000839 struct sh_dmae_chan *sh_chan = data;
840 u32 chcr;
841
842 spin_lock(&sh_chan->desc_lock);
843
844 chcr = sh_dmae_readl(sh_chan, CHCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000845
846 if (chcr & CHCR_TE) {
847 /* DMA stop */
848 dmae_halt(sh_chan);
849
850 ret = IRQ_HANDLED;
851 tasklet_schedule(&sh_chan->tasklet);
852 }
853
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000854 spin_unlock(&sh_chan->desc_lock);
855
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000856 return ret;
857}
858
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000859/* Called from error IRQ or NMI */
860static bool sh_dmae_reset(struct sh_dmae_device *shdev)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000861{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900862 unsigned int handled = 0;
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000863 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000864
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000865 /* halt the dma controller */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000866 sh_dmae_ctl_stop(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000867
868 /* We cannot detect, which channel caused the error, have to reset all */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +0000869 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000870 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Paul Mundt03aa18f2010-12-17 19:16:10 +0900871 struct sh_desc *desc;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000872 LIST_HEAD(dl);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900873
874 if (!sh_chan)
875 continue;
876
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000877 spin_lock(&sh_chan->desc_lock);
878
Paul Mundt03aa18f2010-12-17 19:16:10 +0900879 /* Stop the channel */
880 dmae_halt(sh_chan);
881
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000882 list_splice_init(&sh_chan->ld_queue, &dl);
883
884 spin_unlock(&sh_chan->desc_lock);
885
Paul Mundt03aa18f2010-12-17 19:16:10 +0900886 /* Complete all */
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000887 list_for_each_entry(desc, &dl, node) {
Paul Mundt03aa18f2010-12-17 19:16:10 +0900888 struct dma_async_tx_descriptor *tx = &desc->async_tx;
889 desc->mark = DESC_IDLE;
890 if (tx->callback)
891 tx->callback(tx->callback_param);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000892 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900893
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000894 spin_lock(&sh_chan->desc_lock);
895 list_splice(&dl, &sh_chan->ld_free);
896 spin_unlock(&sh_chan->desc_lock);
897
Paul Mundt03aa18f2010-12-17 19:16:10 +0900898 handled++;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000899 }
Paul Mundt03aa18f2010-12-17 19:16:10 +0900900
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000901 sh_dmae_rst(shdev);
Guennadi Liakhovetski47a4dc22010-02-11 16:50:05 +0000902
Paul Mundt03aa18f2010-12-17 19:16:10 +0900903 return !!handled;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000904}
Paul Mundt03aa18f2010-12-17 19:16:10 +0900905
906static irqreturn_t sh_dmae_err(int irq, void *data)
907{
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000908 struct sh_dmae_device *shdev = data;
909
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000910 if (!(dmaor_read(shdev) & DMAOR_AE))
Yoshihiro Shimodaff7690b2011-02-09 07:46:47 +0000911 return IRQ_NONE;
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000912
913 sh_dmae_reset(data);
914 return IRQ_HANDLED;
Paul Mundt03aa18f2010-12-17 19:16:10 +0900915}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000916
917static void dmae_do_tasklet(unsigned long data)
918{
919 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700920 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000921 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000922 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100923
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700924 spin_lock(&sh_chan->desc_lock);
925 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000926 if (desc->mark == DESC_SUBMITTED &&
927 ((desc->direction == DMA_FROM_DEVICE &&
928 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
929 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700930 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
931 desc->async_tx.cookie, &desc->async_tx,
932 desc->hw.dar);
933 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000934 break;
935 }
936 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700937 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000938
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000939 /* Next desc */
940 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700941 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000942}
943
Paul Mundt03aa18f2010-12-17 19:16:10 +0900944static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
945{
Paul Mundt03aa18f2010-12-17 19:16:10 +0900946 /* Fast path out if NMIF is not asserted for this controller */
947 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
948 return false;
949
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +0000950 return sh_dmae_reset(shdev);
Paul Mundt03aa18f2010-12-17 19:16:10 +0900951}
952
953static int sh_dmae_nmi_handler(struct notifier_block *self,
954 unsigned long cmd, void *data)
955{
956 struct sh_dmae_device *shdev;
957 int ret = NOTIFY_DONE;
958 bool triggered;
959
960 /*
961 * Only concern ourselves with NMI events.
962 *
963 * Normally we would check the die chain value, but as this needs
964 * to be architecture independent, check for NMI context instead.
965 */
966 if (!in_nmi())
967 return NOTIFY_DONE;
968
969 rcu_read_lock();
970 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
971 /*
972 * Only stop if one of the controllers has NMIF asserted,
973 * we do not want to interfere with regular address error
974 * handling or NMI events that don't concern the DMACs.
975 */
976 triggered = sh_dmae_nmi_notify(shdev);
977 if (triggered == true)
978 ret = NOTIFY_OK;
979 }
980 rcu_read_unlock();
981
982 return ret;
983}
984
985static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
986 .notifier_call = sh_dmae_nmi_handler,
987
988 /* Run before NMI debug handler and KGDB */
989 .priority = 1,
990};
991
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000992static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
993 int irq, unsigned long flags)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000994{
995 int err;
Guennadi Liakhovetski5bac9422010-04-21 15:36:49 +0000996 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +0000997 struct platform_device *pdev = to_platform_device(shdev->common.dev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000998 struct sh_dmae_chan *new_sh_chan;
999
1000 /* alloc channel */
1001 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1002 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001003 dev_err(shdev->common.dev,
1004 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001005 return -ENOMEM;
1006 }
1007
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001008 /* copy struct dma_device */
1009 new_sh_chan->common.device = &shdev->common;
1010
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001011 new_sh_chan->dev = shdev->common.dev;
1012 new_sh_chan->id = id;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001013 new_sh_chan->irq = irq;
1014 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001015
1016 /* Init DMA tasklet */
1017 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1018 (unsigned long)new_sh_chan);
1019
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001020 spin_lock_init(&new_sh_chan->desc_lock);
1021
1022 /* Init descripter manage list */
1023 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1024 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1025
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001026 /* Add the channel to DMA device channel list */
1027 list_add_tail(&new_sh_chan->common.device_node,
1028 &shdev->common.channels);
1029 shdev->common.chancnt++;
1030
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001031 if (pdev->id >= 0)
1032 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1033 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1034 else
1035 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1036 "sh-dma%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001037
1038 /* set up channel irq */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001039 err = request_irq(irq, &sh_dmae_interrupt, flags,
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +01001040 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001041 if (err) {
1042 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1043 "with return %d\n", id, err);
1044 goto err_no_irq;
1045 }
1046
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001047 shdev->chan[id] = new_sh_chan;
1048 return 0;
1049
1050err_no_irq:
1051 /* remove from dmaengine device node */
1052 list_del(&new_sh_chan->common.device_node);
1053 kfree(new_sh_chan);
1054 return err;
1055}
1056
1057static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1058{
1059 int i;
1060
1061 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1062 if (shdev->chan[i]) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001063 struct sh_dmae_chan *sh_chan = shdev->chan[i];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001064
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001065 free_irq(sh_chan->irq, sh_chan);
1066
1067 list_del(&sh_chan->common.device_node);
1068 kfree(sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001069 shdev->chan[i] = NULL;
1070 }
1071 }
1072 shdev->common.chancnt = 0;
1073}
1074
1075static int __init sh_dmae_probe(struct platform_device *pdev)
1076{
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001077 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1078 unsigned long irqflags = IRQF_DISABLED,
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001079 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1080 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
Magnus Damm300e5f92011-05-24 10:31:20 +00001081 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001082 struct sh_dmae_device *shdev;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001083 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001084
Dan Williams56adf7e2009-11-22 12:10:10 -07001085 /* get platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001086 if (!pdata || !pdata->channel_num)
Dan Williams56adf7e2009-11-22 12:10:10 -07001087 return -ENODEV;
1088
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001089 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001090 /* DMARS area is optional */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001091 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1092 /*
1093 * IRQ resources:
1094 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1095 * the error IRQ, in which case it is the only IRQ in this resource:
1096 * start == end. If it is the only IRQ resource, all channels also
1097 * use the same IRQ.
1098 * 2. DMA channel IRQ resources can be specified one per resource or in
1099 * ranges (start != end)
1100 * 3. iff all events (channels and, optionally, error) on this
1101 * controller use the same IRQ, only one IRQ resource can be
1102 * specified, otherwise there must be one IRQ per channel, even if
1103 * some of them are equal
1104 * 4. if all IRQs on this controller are equal or if some specific IRQs
1105 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1106 * requested with the IRQF_SHARED flag
1107 */
1108 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1109 if (!chan || !errirq_res)
1110 return -ENODEV;
1111
1112 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1113 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1114 return -EBUSY;
1115 }
1116
1117 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1118 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1119 err = -EBUSY;
1120 goto ermrdmars;
1121 }
1122
1123 err = -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001124 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1125 if (!shdev) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001126 dev_err(&pdev->dev, "Not enough memory\n");
1127 goto ealloc;
1128 }
1129
1130 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1131 if (!shdev->chan_reg)
1132 goto emapchan;
1133 if (dmars) {
1134 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1135 if (!shdev->dmars)
1136 goto emapdmars;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001137 }
1138
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001139 /* platform data */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001140 shdev->pdata = pdata;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001141
Paul Mundt5c2de442011-05-31 15:53:03 +09001142 platform_set_drvdata(pdev, shdev);
1143
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001144 pm_runtime_enable(&pdev->dev);
1145 pm_runtime_get_sync(&pdev->dev);
1146
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001147 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001148 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001149 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001150
Guennadi Liakhovetski2dc66662011-04-29 17:09:21 +00001151 /* reset dma controller - only needed as a test */
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001152 err = sh_dmae_rst(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001153 if (err)
1154 goto rst_err;
1155
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001156 INIT_LIST_HEAD(&shdev->common.channels);
1157
1158 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Magnus Damm26fc02a2011-05-24 10:31:12 +00001159 if (pdata->slave && pdata->slave_num)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001160 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001161
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001162 shdev->common.device_alloc_chan_resources
1163 = sh_dmae_alloc_chan_resources;
1164 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1165 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
Linus Walleij07934482010-03-26 16:50:49 -07001166 shdev->common.device_tx_status = sh_dmae_tx_status;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001167 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001168
1169 /* Compulsory for DMA_SLAVE fields */
1170 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001171 shdev->common.device_control = sh_dmae_control;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +00001172
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001173 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +01001174 /* Default transfer size of 32 bytes requires 32-byte alignment */
Guennadi Liakhovetski8b1935e2010-02-11 16:50:14 +00001175 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001176
Magnus Damm927a7c92010-03-19 04:47:19 +00001177#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001178 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1179
1180 if (!chanirq_res)
1181 chanirq_res = errirq_res;
1182 else
1183 irqres++;
1184
1185 if (chanirq_res == errirq_res ||
1186 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001187 irqflags = IRQF_SHARED;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001188
1189 errirq = errirq_res->start;
1190
1191 err = request_irq(errirq, sh_dmae_err, irqflags,
1192 "DMAC Address Error", shdev);
1193 if (err) {
1194 dev_err(&pdev->dev,
1195 "DMA failed requesting irq #%d, error %d\n",
1196 errirq, err);
1197 goto eirq_err;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001198 }
1199
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001200#else
1201 chanirq_res = errirq_res;
Magnus Damm927a7c92010-03-19 04:47:19 +00001202#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001203
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001204 if (chanirq_res->start == chanirq_res->end &&
1205 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1206 /* Special case - all multiplexed */
1207 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
Magnus Damm300e5f92011-05-24 10:31:20 +00001208 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1209 chan_irq[irq_cnt] = chanirq_res->start;
1210 chan_flag[irq_cnt] = IRQF_SHARED;
1211 } else {
1212 irq_cap = 1;
1213 break;
1214 }
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001215 }
1216 } else {
1217 do {
1218 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1219 if ((errirq_res->flags & IORESOURCE_BITS) ==
1220 IORESOURCE_IRQ_SHAREABLE)
1221 chan_flag[irq_cnt] = IRQF_SHARED;
1222 else
1223 chan_flag[irq_cnt] = IRQF_DISABLED;
1224 dev_dbg(&pdev->dev,
1225 "Found IRQ %d for channel %d\n",
1226 i, irq_cnt);
1227 chan_irq[irq_cnt++] = i;
Magnus Damm300e5f92011-05-24 10:31:20 +00001228
1229 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1230 break;
1231 }
1232
1233 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1234 irq_cap = 1;
1235 break;
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001236 }
1237 chanirq_res = platform_get_resource(pdev,
1238 IORESOURCE_IRQ, ++irqres);
1239 } while (irq_cnt < pdata->channel_num && chanirq_res);
1240 }
1241
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001242 /* Create DMA Channel */
Magnus Damm300e5f92011-05-24 10:31:20 +00001243 for (i = 0; i < irq_cnt; i++) {
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001244 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001245 if (err)
1246 goto chan_probe_err;
1247 }
1248
Magnus Damm300e5f92011-05-24 10:31:20 +00001249 if (irq_cap)
1250 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1251 "channels when a maximum of %d are supported.\n",
1252 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1253
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001254 pm_runtime_put(&pdev->dev);
1255
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001256 dma_async_device_register(&shdev->common);
1257
1258 return err;
1259
1260chan_probe_err:
1261 sh_dmae_chan_remove(shdev);
Magnus Damm300e5f92011-05-24 10:31:20 +00001262
Magnus Damm927a7c92010-03-19 04:47:19 +00001263#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001264 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001265eirq_err:
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001266#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001267rst_err:
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001268 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001269 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001270 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001271
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001272 pm_runtime_put(&pdev->dev);
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001273 pm_runtime_disable(&pdev->dev);
1274
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001275 if (dmars)
1276 iounmap(shdev->dmars);
Paul Mundt5c2de442011-05-31 15:53:03 +09001277
1278 platform_set_drvdata(pdev, NULL);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001279emapdmars:
1280 iounmap(shdev->chan_reg);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001281 synchronize_rcu();
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001282emapchan:
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001283 kfree(shdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001284ealloc:
1285 if (dmars)
1286 release_mem_region(dmars->start, resource_size(dmars));
1287ermrdmars:
1288 release_mem_region(chan->start, resource_size(chan));
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001289
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001290 return err;
1291}
1292
1293static int __exit sh_dmae_remove(struct platform_device *pdev)
1294{
1295 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001296 struct resource *res;
1297 int errirq = platform_get_irq(pdev, 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001298
1299 dma_async_device_unregister(&shdev->common);
1300
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001301 if (errirq > 0)
1302 free_irq(errirq, shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001303
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001304 spin_lock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001305 list_del_rcu(&shdev->node);
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001306 spin_unlock_irq(&sh_dmae_lock);
Paul Mundt03aa18f2010-12-17 19:16:10 +09001307
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001308 /* channel data remove */
1309 sh_dmae_chan_remove(shdev);
1310
Guennadi Liakhovetski20f2a3b2010-02-11 16:50:18 +00001311 pm_runtime_disable(&pdev->dev);
1312
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001313 if (shdev->dmars)
1314 iounmap(shdev->dmars);
1315 iounmap(shdev->chan_reg);
1316
Paul Mundt5c2de442011-05-31 15:53:03 +09001317 platform_set_drvdata(pdev, NULL);
1318
Guennadi Liakhovetski31705e22011-05-02 07:59:02 +00001319 synchronize_rcu();
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001320 kfree(shdev);
1321
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1323 if (res)
1324 release_mem_region(res->start, resource_size(res));
1325 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1326 if (res)
1327 release_mem_region(res->start, resource_size(res));
1328
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001329 return 0;
1330}
1331
1332static void sh_dmae_shutdown(struct platform_device *pdev)
1333{
1334 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
Guennadi Liakhovetski027811b2010-02-11 16:50:10 +00001335 sh_dmae_ctl_stop(shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001336}
1337
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001338static int sh_dmae_runtime_suspend(struct device *dev)
1339{
1340 return 0;
1341}
1342
1343static int sh_dmae_runtime_resume(struct device *dev)
1344{
1345 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1346
1347 return sh_dmae_rst(shdev);
1348}
1349
1350#ifdef CONFIG_PM
1351static int sh_dmae_suspend(struct device *dev)
1352{
1353 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1354 int i;
1355
1356 for (i = 0; i < shdev->pdata->channel_num; i++) {
1357 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1358 if (sh_chan->descs_allocated)
1359 sh_chan->pm_error = pm_runtime_put_sync(dev);
1360 }
1361
1362 return 0;
1363}
1364
1365static int sh_dmae_resume(struct device *dev)
1366{
1367 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1368 int i;
1369
1370 for (i = 0; i < shdev->pdata->channel_num; i++) {
1371 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1372 struct sh_dmae_slave *param = sh_chan->common.private;
1373
1374 if (!sh_chan->descs_allocated)
1375 continue;
1376
1377 if (!sh_chan->pm_error)
1378 pm_runtime_get_sync(dev);
1379
1380 if (param) {
1381 const struct sh_dmae_slave_config *cfg = param->config;
1382 dmae_set_dmars(sh_chan, cfg->mid_rid);
1383 dmae_set_chcr(sh_chan, cfg->chcr);
1384 } else {
1385 dmae_init(sh_chan);
1386 }
1387 }
1388
1389 return 0;
1390}
1391#else
1392#define sh_dmae_suspend NULL
1393#define sh_dmae_resume NULL
1394#endif
1395
1396const struct dev_pm_ops sh_dmae_pm = {
1397 .suspend = sh_dmae_suspend,
1398 .resume = sh_dmae_resume,
1399 .runtime_suspend = sh_dmae_runtime_suspend,
1400 .runtime_resume = sh_dmae_runtime_resume,
1401};
1402
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001403static struct platform_driver sh_dmae_driver = {
1404 .remove = __exit_p(sh_dmae_remove),
1405 .shutdown = sh_dmae_shutdown,
1406 .driver = {
Guennadi Liakhovetski7a5c1062010-05-21 15:28:51 +00001407 .owner = THIS_MODULE,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001408 .name = "sh-dma-engine",
Guennadi Liakhovetski467017b2011-04-29 17:09:25 +00001409 .pm = &sh_dmae_pm,
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001410 },
1411};
1412
1413static int __init sh_dmae_init(void)
1414{
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001415 /* Wire up NMI handling */
1416 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1417 if (err)
1418 return err;
1419
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001420 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1421}
1422module_init(sh_dmae_init);
1423
1424static void __exit sh_dmae_exit(void)
1425{
1426 platform_driver_unregister(&sh_dmae_driver);
Guennadi Liakhovetski661382f2011-01-06 17:04:50 +00001427
1428 unregister_die_notifier(&sh_dmae_nmi_notifier);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001429}
1430module_exit(sh_dmae_exit);
1431
1432MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1433MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1434MODULE_LICENSE("GPL");
Guennadi Liakhovetskie5843342010-11-24 09:48:10 +00001435MODULE_ALIAS("platform:sh-dma-engine");