blob: b75ce8b84c46ac2622af1000b3ff6095ffefaf43 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/dmaengine.h>
24#include <linux/delay.h>
25#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000026#include <linux/platform_device.h>
27#include <cpu/dma.h>
28#include <asm/dma-sh.h>
29#include "shdma.h"
30
31/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070032enum sh_dmae_desc_status {
33 DESC_IDLE,
34 DESC_PREPARED,
35 DESC_SUBMITTED,
36 DESC_COMPLETED, /* completed, have to call callback */
37 DESC_WAITING, /* callback called, waiting for ack / re-submit */
38};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000039
40#define NR_DESCS_PER_CHANNEL 32
41/*
42 * Define the default configuration for dual address memory-memory transfer.
43 * The 0x400 value represents auto-request, external->external.
44 *
45 * And this driver set 4byte burst mode.
46 * If you want to change mode, you need to change RS_DEFAULT of value.
47 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
48 */
49#define RS_DEFAULT (RS_DUAL)
50
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +000051/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
52static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
53
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070054static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
55
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000056#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
57static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
58{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000059 ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000060}
61
62static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
63{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000064 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000065}
66
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000067/*
68 * Reset DMA controller
69 *
70 * SH7780 has two DMAOR register
71 */
72static void sh_dmae_ctl_stop(int id)
73{
74 unsigned short dmaor = dmaor_read_reg(id);
75
76 dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
77 dmaor_write_reg(id, dmaor);
78}
79
80static int sh_dmae_rst(int id)
81{
82 unsigned short dmaor;
83
84 sh_dmae_ctl_stop(id);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +010085 dmaor = dmaor_read_reg(id) | DMAOR_INIT;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000086
87 dmaor_write_reg(id, dmaor);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +010088 if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000089 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
90 return -EINVAL;
91 }
92 return 0;
93}
94
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000095static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000096{
97 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000098
99 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
100 return true; /* working */
101
102 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000103}
104
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000105static unsigned int ts_shift[] = TS_SHIFT;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000106static inline unsigned int calc_xmit_shift(u32 chcr)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000107{
Guennadi Liakhovetski623b4ac2010-02-03 14:44:12 +0000108 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
109 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
110
111 return ts_shift[cnt];
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000112}
113
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000115{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700116 sh_dmae_writel(sh_chan, hw->sar, SAR);
117 sh_dmae_writel(sh_chan, hw->dar, DAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000118 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000119}
120
121static void dmae_start(struct sh_dmae_chan *sh_chan)
122{
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
124
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100125 chcr |= CHCR_DE | CHCR_IE;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000126 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000127}
128
129static void dmae_halt(struct sh_dmae_chan *sh_chan)
130{
131 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
132
133 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
134 sh_dmae_writel(sh_chan, chcr, CHCR);
135}
136
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000137static void dmae_init(struct sh_dmae_chan *sh_chan)
138{
139 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
140 sh_chan->xmit_shift = calc_xmit_shift(chcr);
141 sh_dmae_writel(sh_chan, chcr, CHCR);
142}
143
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000144static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
145{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000146 /* When DMA was working, can not set data to CHCR */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000147 if (dmae_is_busy(sh_chan))
148 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000149
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000150 sh_chan->xmit_shift = calc_xmit_shift(val);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000151 sh_dmae_writel(sh_chan, val, CHCR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000152
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000153 return 0;
154}
155
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000156#define DMARS_SHIFT 8
157#define DMARS_CHAN_MSK 0x01
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000158static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
159{
160 u32 addr;
161 int shift = 0;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000162
163 if (dmae_is_busy(sh_chan))
164 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000165
166 if (sh_chan->id & DMARS_CHAN_MSK)
167 shift = DMARS_SHIFT;
168
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000169 if (sh_chan->id < 6)
170 /* DMA0RS0 - DMA0RS2 */
171 addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
172#ifdef SH_DMARS_BASE1
173 else if (sh_chan->id < 12)
174 /* DMA1RS0 - DMA1RS2 */
175 addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
176#endif
177 else
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000178 return -EINVAL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000179
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000180 ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000181
182 return 0;
183}
184
185static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
186{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700187 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000188 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700189 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000190 dma_cookie_t cookie;
191
192 spin_lock_bh(&sh_chan->desc_lock);
193
194 cookie = sh_chan->common.cookie;
195 cookie++;
196 if (cookie < 0)
197 cookie = 1;
198
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700199 sh_chan->common.cookie = cookie;
200 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000201
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700202 /* Mark all chunks of this descriptor as submitted, move to the queue */
203 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
204 /*
205 * All chunks are on the global ld_free, so, we have to find
206 * the end of the chain ourselves
207 */
208 if (chunk != desc && (chunk->mark == DESC_IDLE ||
209 chunk->async_tx.cookie > 0 ||
210 chunk->async_tx.cookie == -EBUSY ||
211 &chunk->node == &sh_chan->ld_free))
212 break;
213 chunk->mark = DESC_SUBMITTED;
214 /* Callback goes to the last chunk */
215 chunk->async_tx.callback = NULL;
216 chunk->cookie = cookie;
217 list_move_tail(&chunk->node, &sh_chan->ld_queue);
218 last = chunk;
219 }
220
221 last->async_tx.callback = callback;
222 last->async_tx.callback_param = tx->callback_param;
223
224 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
225 tx->cookie, &last->async_tx, sh_chan->id,
226 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000227
228 spin_unlock_bh(&sh_chan->desc_lock);
229
230 return cookie;
231}
232
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700233/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000234static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
235{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700236 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000237
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700238 list_for_each_entry(desc, &sh_chan->ld_free, node)
239 if (desc->mark != DESC_PREPARED) {
240 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000241 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700242 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000243 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000244
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700245 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000246}
247
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000248static struct sh_dmae_slave_config *sh_dmae_find_slave(
249 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
250{
251 struct dma_device *dma_dev = sh_chan->common.device;
252 struct sh_dmae_device *shdev = container_of(dma_dev,
253 struct sh_dmae_device, common);
254 struct sh_dmae_pdata *pdata = &shdev->pdata;
255 int i;
256
257 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
258 return NULL;
259
260 for (i = 0; i < pdata->config_num; i++)
261 if (pdata->config[i].slave_id == slave_id)
262 return pdata->config + i;
263
264 return NULL;
265}
266
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000267static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
268{
269 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
270 struct sh_desc *desc;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000271 struct sh_dmae_slave *param = chan->private;
272
273 /*
274 * This relies on the guarantee from dmaengine that alloc_chan_resources
275 * never runs concurrently with itself or free_chan_resources.
276 */
277 if (param) {
278 struct sh_dmae_slave_config *cfg;
279
280 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
281 if (!cfg)
282 return -EINVAL;
283
284 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
285 return -EBUSY;
286
287 param->config = cfg;
288
289 dmae_set_dmars(sh_chan, cfg->mid_rid);
290 dmae_set_chcr(sh_chan, cfg->chcr);
291 } else {
292 if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
293 dmae_set_chcr(sh_chan, RS_DEFAULT);
294 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000295
296 spin_lock_bh(&sh_chan->desc_lock);
297 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
298 spin_unlock_bh(&sh_chan->desc_lock);
299 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
300 if (!desc) {
301 spin_lock_bh(&sh_chan->desc_lock);
302 break;
303 }
304 dma_async_tx_descriptor_init(&desc->async_tx,
305 &sh_chan->common);
306 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700307 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000308
309 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700310 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000311 sh_chan->descs_allocated++;
312 }
313 spin_unlock_bh(&sh_chan->desc_lock);
314
315 return sh_chan->descs_allocated;
316}
317
318/*
319 * sh_dma_free_chan_resources - Free all resources of the channel.
320 */
321static void sh_dmae_free_chan_resources(struct dma_chan *chan)
322{
323 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
324 struct sh_desc *desc, *_desc;
325 LIST_HEAD(list);
326
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000327 dmae_halt(sh_chan);
328
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700329 /* Prepared and not submitted descriptors can still be on the queue */
330 if (!list_empty(&sh_chan->ld_queue))
331 sh_dmae_chan_ld_cleanup(sh_chan, true);
332
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000333 if (chan->private) {
334 /* The caller is holding dma_list_mutex */
335 struct sh_dmae_slave *param = chan->private;
336 clear_bit(param->slave_id, sh_dmae_slave_used);
337 }
338
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000339 spin_lock_bh(&sh_chan->desc_lock);
340
341 list_splice_init(&sh_chan->ld_free, &list);
342 sh_chan->descs_allocated = 0;
343
344 spin_unlock_bh(&sh_chan->desc_lock);
345
346 list_for_each_entry_safe(desc, _desc, &list, node)
347 kfree(desc);
348}
349
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000350/**
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000351 * sh_dmae_add_desc - get, set up and return one transfer descriptor
352 * @sh_chan: DMA channel
353 * @flags: DMA transfer flags
354 * @dest: destination DMA address, incremented when direction equals
355 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
356 * @src: source DMA address, incremented when direction equals
357 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
358 * @len: DMA transfer length
359 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
360 * @direction: needed for slave DMA to decide which address to keep constant,
361 * equals DMA_BIDIRECTIONAL for MEMCPY
362 * Returns 0 or an error
363 * Locks: called with desc_lock held
364 */
365static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
366 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
367 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000368{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000369 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000370 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000371
372 if (!*len)
373 return NULL;
374
375 /* Allocate the link descriptor from the free list */
376 new = sh_dmae_get_desc(sh_chan);
377 if (!new) {
378 dev_err(sh_chan->dev, "No free link descriptor available\n");
379 return NULL;
380 }
381
382 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
383
384 new->hw.sar = *src;
385 new->hw.dar = *dest;
386 new->hw.tcr = copy_size;
387
388 if (!*first) {
389 /* First desc */
390 new->async_tx.cookie = -EBUSY;
391 *first = new;
392 } else {
393 /* Other desc - invisible to the user */
394 new->async_tx.cookie = -EINVAL;
395 }
396
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000397 dev_dbg(sh_chan->dev,
398 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000399 copy_size, *len, *src, *dest, &new->async_tx,
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000400 new->async_tx.cookie, sh_chan->xmit_shift);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000401
402 new->mark = DESC_PREPARED;
403 new->async_tx.flags = flags;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000404 new->direction = direction;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000405
406 *len -= copy_size;
407 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
408 *src += copy_size;
409 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
410 *dest += copy_size;
411
412 return new;
413}
414
415/*
416 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
417 *
418 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
419 * converted to scatter-gather to guarantee consistent locking and a correct
420 * list manipulation. For slave DMA direction carries the usual meaning, and,
421 * logically, the SG list is RAM and the addr variable contains slave address,
422 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
423 * and the SG list contains only one element and points at the source buffer.
424 */
425static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
426 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
427 enum dma_data_direction direction, unsigned long flags)
428{
429 struct scatterlist *sg;
430 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700431 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000432 int chunks = 0;
433 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000434
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000435 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000436 return NULL;
437
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000438 for_each_sg(sgl, sg, sg_len, i)
439 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
440 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000441
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700442 /* Have to lock the whole loop to protect against concurrent release */
443 spin_lock_bh(&sh_chan->desc_lock);
444
445 /*
446 * Chaining:
447 * first descriptor is what user is dealing with in all API calls, its
448 * cookie is at first set to -EBUSY, at tx-submit to a positive
449 * number
450 * if more than one chunk is needed further chunks have cookie = -EINVAL
451 * the last chunk, if not equal to the first, has cookie = -ENOSPC
452 * all chunks are linked onto the tx_list head with their .node heads
453 * only during this function, then they are immediately spliced
454 * back onto the free list in form of a chain
455 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000456 for_each_sg(sgl, sg, sg_len, i) {
457 dma_addr_t sg_addr = sg_dma_address(sg);
458 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000459
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000460 if (!len)
461 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000462
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000463 do {
464 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
465 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000466
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000467 if (direction == DMA_FROM_DEVICE)
468 new = sh_dmae_add_desc(sh_chan, flags,
469 &sg_addr, addr, &len, &first,
470 direction);
471 else
472 new = sh_dmae_add_desc(sh_chan, flags,
473 addr, &sg_addr, &len, &first,
474 direction);
475 if (!new)
476 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700477
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000478 new->chunks = chunks--;
479 list_add_tail(&new->node, &tx_list);
480 } while (len);
481 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000482
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700483 if (new != first)
484 new->async_tx.cookie = -ENOSPC;
485
486 /* Put them back on the free list, so, they don't get lost */
487 list_splice_tail(&tx_list, &sh_chan->ld_free);
488
489 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000490
491 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000492
493err_get_desc:
494 list_for_each_entry(new, &tx_list, node)
495 new->mark = DESC_IDLE;
496 list_splice(&tx_list, &sh_chan->ld_free);
497
498 spin_unlock_bh(&sh_chan->desc_lock);
499
500 return NULL;
501}
502
503static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
504 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
505 size_t len, unsigned long flags)
506{
507 struct sh_dmae_chan *sh_chan;
508 struct scatterlist sg;
509
510 if (!chan || !len)
511 return NULL;
512
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000513 chan->private = NULL;
514
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000515 sh_chan = to_sh_chan(chan);
516
517 sg_init_table(&sg, 1);
518 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
519 offset_in_page(dma_src));
520 sg_dma_address(&sg) = dma_src;
521 sg_dma_len(&sg) = len;
522
523 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
524 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700525}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000526
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000527static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
528 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
529 enum dma_data_direction direction, unsigned long flags)
530{
531 struct sh_dmae_slave *param;
532 struct sh_dmae_chan *sh_chan;
533
534 if (!chan)
535 return NULL;
536
537 sh_chan = to_sh_chan(chan);
538 param = chan->private;
539
540 /* Someone calling slave DMA on a public channel? */
541 if (!param || !sg_len) {
542 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
543 __func__, param, sg_len, param ? param->slave_id : -1);
544 return NULL;
545 }
546
547 /*
548 * if (param != NULL), this is a successfully requested slave channel,
549 * therefore param->config != NULL too.
550 */
551 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
552 direction, flags);
553}
554
555static void sh_dmae_terminate_all(struct dma_chan *chan)
556{
557 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
558
559 if (!chan)
560 return;
561
562 sh_dmae_chan_ld_cleanup(sh_chan, true);
563}
564
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700565static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
566{
567 struct sh_desc *desc, *_desc;
568 /* Is the "exposed" head of a chain acked? */
569 bool head_acked = false;
570 dma_cookie_t cookie = 0;
571 dma_async_tx_callback callback = NULL;
572 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000573
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700574 spin_lock_bh(&sh_chan->desc_lock);
575 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
576 struct dma_async_tx_descriptor *tx = &desc->async_tx;
577
578 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
579 BUG_ON(desc->mark != DESC_SUBMITTED &&
580 desc->mark != DESC_COMPLETED &&
581 desc->mark != DESC_WAITING);
582
583 /*
584 * queue is ordered, and we use this loop to (1) clean up all
585 * completed descriptors, and to (2) update descriptor flags of
586 * any chunks in a (partially) completed chain
587 */
588 if (!all && desc->mark == DESC_SUBMITTED &&
589 desc->cookie != cookie)
590 break;
591
592 if (tx->cookie > 0)
593 cookie = tx->cookie;
594
595 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000596 if (sh_chan->completed_cookie != desc->cookie - 1)
597 dev_dbg(sh_chan->dev,
598 "Completing cookie %d, expected %d\n",
599 desc->cookie,
600 sh_chan->completed_cookie + 1);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700601 sh_chan->completed_cookie = desc->cookie;
602 }
603
604 /* Call callback on the last chunk */
605 if (desc->mark == DESC_COMPLETED && tx->callback) {
606 desc->mark = DESC_WAITING;
607 callback = tx->callback;
608 param = tx->callback_param;
609 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
610 tx->cookie, tx, sh_chan->id);
611 BUG_ON(desc->chunks != 1);
612 break;
613 }
614
615 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
616 if (desc->mark == DESC_COMPLETED) {
617 BUG_ON(tx->cookie < 0);
618 desc->mark = DESC_WAITING;
619 }
620 head_acked = async_tx_test_ack(tx);
621 } else {
622 switch (desc->mark) {
623 case DESC_COMPLETED:
624 desc->mark = DESC_WAITING;
625 /* Fall through */
626 case DESC_WAITING:
627 if (head_acked)
628 async_tx_ack(&desc->async_tx);
629 }
630 }
631
632 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
633 tx, tx->cookie);
634
635 if (((desc->mark == DESC_COMPLETED ||
636 desc->mark == DESC_WAITING) &&
637 async_tx_test_ack(&desc->async_tx)) || all) {
638 /* Remove from ld_queue list */
639 desc->mark = DESC_IDLE;
640 list_move(&desc->node, &sh_chan->ld_free);
641 }
642 }
643 spin_unlock_bh(&sh_chan->desc_lock);
644
645 if (callback)
646 callback(param);
647
648 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000649}
650
651/*
652 * sh_chan_ld_cleanup - Clean up link descriptors
653 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700654 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000655 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700656static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000657{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700658 while (__ld_cleanup(sh_chan, all))
659 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000660}
661
662static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
663{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700664 struct sh_desc *sd;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000665
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700666 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000667 /* DMA work check */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700668 if (dmae_is_busy(sh_chan)) {
669 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000670 return;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700671 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000672
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000673 /* Find the first not transferred desciptor */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700674 list_for_each_entry(sd, &sh_chan->ld_queue, node)
675 if (sd->mark == DESC_SUBMITTED) {
676 /* Get the ld start address from ld_queue */
677 dmae_set_reg(sh_chan, &sd->hw);
678 dmae_start(sh_chan);
679 break;
680 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000681
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700682 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000683}
684
685static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
686{
687 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
688 sh_chan_xfer_ld_queue(sh_chan);
689}
690
691static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
692 dma_cookie_t cookie,
693 dma_cookie_t *done,
694 dma_cookie_t *used)
695{
696 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
697 dma_cookie_t last_used;
698 dma_cookie_t last_complete;
699
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700700 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000701
702 last_used = chan->cookie;
703 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700704 BUG_ON(last_complete < 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000705
706 if (done)
707 *done = last_complete;
708
709 if (used)
710 *used = last_used;
711
712 return dma_async_is_complete(cookie, last_complete, last_used);
713}
714
715static irqreturn_t sh_dmae_interrupt(int irq, void *data)
716{
717 irqreturn_t ret = IRQ_NONE;
718 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
719 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
720
721 if (chcr & CHCR_TE) {
722 /* DMA stop */
723 dmae_halt(sh_chan);
724
725 ret = IRQ_HANDLED;
726 tasklet_schedule(&sh_chan->tasklet);
727 }
728
729 return ret;
730}
731
732#if defined(CONFIG_CPU_SH4)
733static irqreturn_t sh_dmae_err(int irq, void *data)
734{
735 int err = 0;
736 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
737
738 /* IRQ Multi */
739 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000740 int __maybe_unused cnt = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000741 switch (irq) {
742#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
743 case DMTE6_IRQ:
744 cnt++;
745#endif
746 case DMTE0_IRQ:
747 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
748 disable_irq(irq);
749 return IRQ_HANDLED;
750 }
751 default:
752 return IRQ_NONE;
753 }
754 } else {
755 /* reset dma controller */
756 err = sh_dmae_rst(0);
757 if (err)
758 return err;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700759#ifdef SH_DMAC_BASE1
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000760 if (shdev->pdata.mode & SHDMA_DMAOR1) {
761 err = sh_dmae_rst(1);
762 if (err)
763 return err;
764 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700765#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000766 disable_irq(irq);
767 return IRQ_HANDLED;
768 }
769}
770#endif
771
772static void dmae_do_tasklet(unsigned long data)
773{
774 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700775 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000776 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000777 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100778
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700779 spin_lock(&sh_chan->desc_lock);
780 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000781 if (desc->mark == DESC_SUBMITTED &&
782 ((desc->direction == DMA_FROM_DEVICE &&
783 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
784 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700785 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
786 desc->async_tx.cookie, &desc->async_tx,
787 desc->hw.dar);
788 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000789 break;
790 }
791 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700792 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000793
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000794 /* Next desc */
795 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700796 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000797}
798
799static unsigned int get_dmae_irq(unsigned int id)
800{
801 unsigned int irq = 0;
802 if (id < ARRAY_SIZE(dmte_irq_map))
803 irq = dmte_irq_map[id];
804 return irq;
805}
806
807static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
808{
809 int err;
810 unsigned int irq = get_dmae_irq(id);
811 unsigned long irqflags = IRQF_DISABLED;
812 struct sh_dmae_chan *new_sh_chan;
813
814 /* alloc channel */
815 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
816 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100817 dev_err(shdev->common.dev,
818 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000819 return -ENOMEM;
820 }
821
822 new_sh_chan->dev = shdev->common.dev;
823 new_sh_chan->id = id;
824
825 /* Init DMA tasklet */
826 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
827 (unsigned long)new_sh_chan);
828
829 /* Init the channel */
830 dmae_init(new_sh_chan);
831
832 spin_lock_init(&new_sh_chan->desc_lock);
833
834 /* Init descripter manage list */
835 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
836 INIT_LIST_HEAD(&new_sh_chan->ld_free);
837
838 /* copy struct dma_device */
839 new_sh_chan->common.device = &shdev->common;
840
841 /* Add the channel to DMA device channel list */
842 list_add_tail(&new_sh_chan->common.device_node,
843 &shdev->common.channels);
844 shdev->common.chancnt++;
845
846 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
847 irqflags = IRQF_SHARED;
848#if defined(DMTE6_IRQ)
849 if (irq >= DMTE6_IRQ)
850 irq = DMTE6_IRQ;
851 else
852#endif
853 irq = DMTE0_IRQ;
854 }
855
856 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000857 "sh-dmae%d", new_sh_chan->id);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000858
859 /* set up channel irq */
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100860 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
861 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000862 if (err) {
863 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
864 "with return %d\n", id, err);
865 goto err_no_irq;
866 }
867
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000868 shdev->chan[id] = new_sh_chan;
869 return 0;
870
871err_no_irq:
872 /* remove from dmaengine device node */
873 list_del(&new_sh_chan->common.device_node);
874 kfree(new_sh_chan);
875 return err;
876}
877
878static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
879{
880 int i;
881
882 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
883 if (shdev->chan[i]) {
884 struct sh_dmae_chan *shchan = shdev->chan[i];
885 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
886 free_irq(dmte_irq_map[i], shchan);
887
888 list_del(&shchan->common.device_node);
889 kfree(shchan);
890 shdev->chan[i] = NULL;
891 }
892 }
893 shdev->common.chancnt = 0;
894}
895
896static int __init sh_dmae_probe(struct platform_device *pdev)
897{
898 int err = 0, cnt, ecnt;
899 unsigned long irqflags = IRQF_DISABLED;
900#if defined(CONFIG_CPU_SH4)
901 int eirq[] = { DMAE0_IRQ,
902#if defined(DMAE1_IRQ)
903 DMAE1_IRQ
904#endif
905 };
906#endif
907 struct sh_dmae_device *shdev;
908
Dan Williams56adf7e2009-11-22 12:10:10 -0700909 /* get platform data */
910 if (!pdev->dev.platform_data)
911 return -ENODEV;
912
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000913 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
914 if (!shdev) {
915 dev_err(&pdev->dev, "No enough memory\n");
Dan Williams56adf7e2009-11-22 12:10:10 -0700916 return -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000917 }
918
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000919 /* platform data */
920 memcpy(&shdev->pdata, pdev->dev.platform_data,
921 sizeof(struct sh_dmae_pdata));
922
923 /* reset dma controller */
924 err = sh_dmae_rst(0);
925 if (err)
926 goto rst_err;
927
928 /* SH7780/85/23 has DMAOR1 */
929 if (shdev->pdata.mode & SHDMA_DMAOR1) {
930 err = sh_dmae_rst(1);
931 if (err)
932 goto rst_err;
933 }
934
935 INIT_LIST_HEAD(&shdev->common.channels);
936
937 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000938 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
939
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000940 shdev->common.device_alloc_chan_resources
941 = sh_dmae_alloc_chan_resources;
942 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
943 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
944 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
945 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
Guennadi Liakhovetskicfefe992010-02-03 14:46:41 +0000946
947 /* Compulsory for DMA_SLAVE fields */
948 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
949 shdev->common.device_terminate_all = sh_dmae_terminate_all;
950
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000951 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +0100952 /* Default transfer size of 32 bytes requires 32-byte alignment */
953 shdev->common.copy_align = 5;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000954
955#if defined(CONFIG_CPU_SH4)
956 /* Non Mix IRQ mode SH7722/SH7730 etc... */
957 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
958 irqflags = IRQF_SHARED;
959 eirq[0] = DMTE0_IRQ;
960#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
961 eirq[1] = DMTE6_IRQ;
962#endif
963 }
964
965 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100966 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
967 "DMAC Address Error", shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000968 if (err) {
969 dev_err(&pdev->dev, "DMA device request_irq"
970 "error (irq %d) with return %d\n",
971 eirq[ecnt], err);
972 goto eirq_err;
973 }
974 }
975#endif /* CONFIG_CPU_SH4 */
976
977 /* Create DMA Channel */
978 for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
979 err = sh_dmae_chan_probe(shdev, cnt);
980 if (err)
981 goto chan_probe_err;
982 }
983
984 platform_set_drvdata(pdev, shdev);
985 dma_async_device_register(&shdev->common);
986
987 return err;
988
989chan_probe_err:
990 sh_dmae_chan_remove(shdev);
991
992eirq_err:
993 for (ecnt-- ; ecnt >= 0; ecnt--)
994 free_irq(eirq[ecnt], shdev);
995
996rst_err:
997 kfree(shdev);
998
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000999 return err;
1000}
1001
1002static int __exit sh_dmae_remove(struct platform_device *pdev)
1003{
1004 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1005
1006 dma_async_device_unregister(&shdev->common);
1007
1008 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
1009 free_irq(DMTE0_IRQ, shdev);
1010#if defined(DMTE6_IRQ)
1011 free_irq(DMTE6_IRQ, shdev);
1012#endif
1013 }
1014
1015 /* channel data remove */
1016 sh_dmae_chan_remove(shdev);
1017
1018 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
1019 free_irq(DMAE0_IRQ, shdev);
1020#if defined(DMAE1_IRQ)
1021 free_irq(DMAE1_IRQ, shdev);
1022#endif
1023 }
1024 kfree(shdev);
1025
1026 return 0;
1027}
1028
1029static void sh_dmae_shutdown(struct platform_device *pdev)
1030{
1031 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1032 sh_dmae_ctl_stop(0);
1033 if (shdev->pdata.mode & SHDMA_DMAOR1)
1034 sh_dmae_ctl_stop(1);
1035}
1036
1037static struct platform_driver sh_dmae_driver = {
1038 .remove = __exit_p(sh_dmae_remove),
1039 .shutdown = sh_dmae_shutdown,
1040 .driver = {
1041 .name = "sh-dma-engine",
1042 },
1043};
1044
1045static int __init sh_dmae_init(void)
1046{
1047 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1048}
1049module_init(sh_dmae_init);
1050
1051static void __exit sh_dmae_exit(void)
1052{
1053 platform_driver_unregister(&sh_dmae_driver);
1054}
1055module_exit(sh_dmae_exit);
1056
1057MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1058MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1059MODULE_LICENSE("GPL");