blob: 427c3effc4328ef1a2ccfcca5c66ac1e87782202 [file] [log] [blame]
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +00001/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/dmaengine.h>
24#include <linux/delay.h>
25#include <linux/dma-mapping.h>
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000026#include <linux/platform_device.h>
27#include <cpu/dma.h>
28#include <asm/dma-sh.h>
29#include "shdma.h"
30
31/* DMA descriptor control */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070032enum sh_dmae_desc_status {
33 DESC_IDLE,
34 DESC_PREPARED,
35 DESC_SUBMITTED,
36 DESC_COMPLETED, /* completed, have to call callback */
37 DESC_WAITING, /* callback called, waiting for ack / re-submit */
38};
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000039
40#define NR_DESCS_PER_CHANNEL 32
41/*
42 * Define the default configuration for dual address memory-memory transfer.
43 * The 0x400 value represents auto-request, external->external.
44 *
45 * And this driver set 4byte burst mode.
46 * If you want to change mode, you need to change RS_DEFAULT of value.
47 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
48 */
49#define RS_DEFAULT (RS_DUAL)
50
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -070051static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
52
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000053#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
54static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
55{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000056 ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000057}
58
59static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
60{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000061 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000062}
63
64static void dmae_init(struct sh_dmae_chan *sh_chan)
65{
66 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
67 sh_dmae_writel(sh_chan, chcr, CHCR);
68}
69
70/*
71 * Reset DMA controller
72 *
73 * SH7780 has two DMAOR register
74 */
75static void sh_dmae_ctl_stop(int id)
76{
77 unsigned short dmaor = dmaor_read_reg(id);
78
79 dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
80 dmaor_write_reg(id, dmaor);
81}
82
83static int sh_dmae_rst(int id)
84{
85 unsigned short dmaor;
86
87 sh_dmae_ctl_stop(id);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +010088 dmaor = dmaor_read_reg(id) | DMAOR_INIT;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000089
90 dmaor_write_reg(id, dmaor);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +010091 if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000092 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
93 return -EINVAL;
94 }
95 return 0;
96}
97
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +000098static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +000099{
100 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000101
102 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
103 return true; /* working */
104
105 return false; /* waiting */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000106}
107
108static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
109{
110 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
111 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
112}
113
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000115{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700116 sh_dmae_writel(sh_chan, hw->sar, SAR);
117 sh_dmae_writel(sh_chan, hw->dar, DAR);
118 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000119}
120
121static void dmae_start(struct sh_dmae_chan *sh_chan)
122{
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
124
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100125 chcr |= CHCR_DE | CHCR_IE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000126 sh_dmae_writel(sh_chan, chcr, CHCR);
127}
128
129static void dmae_halt(struct sh_dmae_chan *sh_chan)
130{
131 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
132
133 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
134 sh_dmae_writel(sh_chan, chcr, CHCR);
135}
136
137static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
138{
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000139 /* When DMA was working, can not set data to CHCR */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000140 if (dmae_is_busy(sh_chan))
141 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000142
143 sh_dmae_writel(sh_chan, val, CHCR);
144 return 0;
145}
146
147#define DMARS1_ADDR 0x04
148#define DMARS2_ADDR 0x08
149#define DMARS_SHIFT 8
150#define DMARS_CHAN_MSK 0x01
151static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
152{
153 u32 addr;
154 int shift = 0;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000155
156 if (dmae_is_busy(sh_chan))
157 return -EBUSY;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000158
159 if (sh_chan->id & DMARS_CHAN_MSK)
160 shift = DMARS_SHIFT;
161
162 switch (sh_chan->id) {
163 /* DMARS0 */
164 case 0:
165 case 1:
166 addr = SH_DMARS_BASE;
167 break;
168 /* DMARS1 */
169 case 2:
170 case 3:
171 addr = (SH_DMARS_BASE + DMARS1_ADDR);
172 break;
173 /* DMARS2 */
174 case 4:
175 case 5:
176 addr = (SH_DMARS_BASE + DMARS2_ADDR);
177 break;
178 default:
179 return -EINVAL;
180 }
181
182 ctrl_outw((val << shift) |
183 (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
184 addr);
185
186 return 0;
187}
188
189static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
190{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700191 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000192 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700193 dma_async_tx_callback callback = tx->callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000194 dma_cookie_t cookie;
195
196 spin_lock_bh(&sh_chan->desc_lock);
197
198 cookie = sh_chan->common.cookie;
199 cookie++;
200 if (cookie < 0)
201 cookie = 1;
202
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700203 sh_chan->common.cookie = cookie;
204 tx->cookie = cookie;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000205
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700206 /* Mark all chunks of this descriptor as submitted, move to the queue */
207 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
208 /*
209 * All chunks are on the global ld_free, so, we have to find
210 * the end of the chain ourselves
211 */
212 if (chunk != desc && (chunk->mark == DESC_IDLE ||
213 chunk->async_tx.cookie > 0 ||
214 chunk->async_tx.cookie == -EBUSY ||
215 &chunk->node == &sh_chan->ld_free))
216 break;
217 chunk->mark = DESC_SUBMITTED;
218 /* Callback goes to the last chunk */
219 chunk->async_tx.callback = NULL;
220 chunk->cookie = cookie;
221 list_move_tail(&chunk->node, &sh_chan->ld_queue);
222 last = chunk;
223 }
224
225 last->async_tx.callback = callback;
226 last->async_tx.callback_param = tx->callback_param;
227
228 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
229 tx->cookie, &last->async_tx, sh_chan->id,
230 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000231
232 spin_unlock_bh(&sh_chan->desc_lock);
233
234 return cookie;
235}
236
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700237/* Called with desc_lock held */
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000238static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
239{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700240 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000241
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700242 list_for_each_entry(desc, &sh_chan->ld_free, node)
243 if (desc->mark != DESC_PREPARED) {
244 BUG_ON(desc->mark != DESC_IDLE);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000245 list_del(&desc->node);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700246 return desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000247 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000248
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700249 return NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000250}
251
252static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
253{
254 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
255 struct sh_desc *desc;
256
257 spin_lock_bh(&sh_chan->desc_lock);
258 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
259 spin_unlock_bh(&sh_chan->desc_lock);
260 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
261 if (!desc) {
262 spin_lock_bh(&sh_chan->desc_lock);
263 break;
264 }
265 dma_async_tx_descriptor_init(&desc->async_tx,
266 &sh_chan->common);
267 desc->async_tx.tx_submit = sh_dmae_tx_submit;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700268 desc->mark = DESC_IDLE;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000269
270 spin_lock_bh(&sh_chan->desc_lock);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700271 list_add(&desc->node, &sh_chan->ld_free);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000272 sh_chan->descs_allocated++;
273 }
274 spin_unlock_bh(&sh_chan->desc_lock);
275
276 return sh_chan->descs_allocated;
277}
278
279/*
280 * sh_dma_free_chan_resources - Free all resources of the channel.
281 */
282static void sh_dmae_free_chan_resources(struct dma_chan *chan)
283{
284 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
285 struct sh_desc *desc, *_desc;
286 LIST_HEAD(list);
287
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700288 /* Prepared and not submitted descriptors can still be on the queue */
289 if (!list_empty(&sh_chan->ld_queue))
290 sh_dmae_chan_ld_cleanup(sh_chan, true);
291
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000292 spin_lock_bh(&sh_chan->desc_lock);
293
294 list_splice_init(&sh_chan->ld_free, &list);
295 sh_chan->descs_allocated = 0;
296
297 spin_unlock_bh(&sh_chan->desc_lock);
298
299 list_for_each_entry_safe(desc, _desc, &list, node)
300 kfree(desc);
301}
302
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000303/*
304 * sh_dmae_add_desc - get, set up and return one transfer descriptor
305 * @sh_chan: DMA channel
306 * @flags: DMA transfer flags
307 * @dest: destination DMA address, incremented when direction equals
308 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
309 * @src: source DMA address, incremented when direction equals
310 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
311 * @len: DMA transfer length
312 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
313 * @direction: needed for slave DMA to decide which address to keep constant,
314 * equals DMA_BIDIRECTIONAL for MEMCPY
315 * Returns 0 or an error
316 * Locks: called with desc_lock held
317 */
318static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
319 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
320 struct sh_desc **first, enum dma_data_direction direction)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000321{
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000322 struct sh_desc *new;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000323 size_t copy_size;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000324
325 if (!*len)
326 return NULL;
327
328 /* Allocate the link descriptor from the free list */
329 new = sh_dmae_get_desc(sh_chan);
330 if (!new) {
331 dev_err(sh_chan->dev, "No free link descriptor available\n");
332 return NULL;
333 }
334
335 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
336
337 new->hw.sar = *src;
338 new->hw.dar = *dest;
339 new->hw.tcr = copy_size;
340
341 if (!*first) {
342 /* First desc */
343 new->async_tx.cookie = -EBUSY;
344 *first = new;
345 } else {
346 /* Other desc - invisible to the user */
347 new->async_tx.cookie = -EINVAL;
348 }
349
350 dev_dbg(sh_chan->dev, "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
351 copy_size, *len, *src, *dest, &new->async_tx,
352 new->async_tx.cookie);
353
354 new->mark = DESC_PREPARED;
355 new->async_tx.flags = flags;
356
357 *len -= copy_size;
358 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
359 *src += copy_size;
360 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
361 *dest += copy_size;
362
363 return new;
364}
365
366/*
367 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
368 *
369 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
370 * converted to scatter-gather to guarantee consistent locking and a correct
371 * list manipulation. For slave DMA direction carries the usual meaning, and,
372 * logically, the SG list is RAM and the addr variable contains slave address,
373 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
374 * and the SG list contains only one element and points at the source buffer.
375 */
376static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
377 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
378 enum dma_data_direction direction, unsigned long flags)
379{
380 struct scatterlist *sg;
381 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700382 LIST_HEAD(tx_list);
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000383 int chunks = 0;
384 int i;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000385
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000386 if (!sg_len)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000387 return NULL;
388
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000389 for_each_sg(sgl, sg, sg_len, i)
390 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
391 (SH_DMA_TCR_MAX + 1);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000392
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700393 /* Have to lock the whole loop to protect against concurrent release */
394 spin_lock_bh(&sh_chan->desc_lock);
395
396 /*
397 * Chaining:
398 * first descriptor is what user is dealing with in all API calls, its
399 * cookie is at first set to -EBUSY, at tx-submit to a positive
400 * number
401 * if more than one chunk is needed further chunks have cookie = -EINVAL
402 * the last chunk, if not equal to the first, has cookie = -ENOSPC
403 * all chunks are linked onto the tx_list head with their .node heads
404 * only during this function, then they are immediately spliced
405 * back onto the free list in form of a chain
406 */
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000407 for_each_sg(sgl, sg, sg_len, i) {
408 dma_addr_t sg_addr = sg_dma_address(sg);
409 size_t len = sg_dma_len(sg);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000410
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000411 if (!len)
412 goto err_get_desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000413
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000414 do {
415 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
416 i, sg, len, (unsigned long long)sg_addr);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000417
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000418 if (direction == DMA_FROM_DEVICE)
419 new = sh_dmae_add_desc(sh_chan, flags,
420 &sg_addr, addr, &len, &first,
421 direction);
422 else
423 new = sh_dmae_add_desc(sh_chan, flags,
424 addr, &sg_addr, &len, &first,
425 direction);
426 if (!new)
427 goto err_get_desc;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700428
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000429 new->chunks = chunks--;
430 list_add_tail(&new->node, &tx_list);
431 } while (len);
432 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000433
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700434 if (new != first)
435 new->async_tx.cookie = -ENOSPC;
436
437 /* Put them back on the free list, so, they don't get lost */
438 list_splice_tail(&tx_list, &sh_chan->ld_free);
439
440 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000441
442 return &first->async_tx;
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000443
444err_get_desc:
445 list_for_each_entry(new, &tx_list, node)
446 new->mark = DESC_IDLE;
447 list_splice(&tx_list, &sh_chan->ld_free);
448
449 spin_unlock_bh(&sh_chan->desc_lock);
450
451 return NULL;
452}
453
454static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
455 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
456 size_t len, unsigned long flags)
457{
458 struct sh_dmae_chan *sh_chan;
459 struct scatterlist sg;
460
461 if (!chan || !len)
462 return NULL;
463
464 sh_chan = to_sh_chan(chan);
465
466 sg_init_table(&sg, 1);
467 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
468 offset_in_page(dma_src));
469 sg_dma_address(&sg) = dma_src;
470 sg_dma_len(&sg) = len;
471
472 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
473 flags);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700474}
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000475
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700476static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
477{
478 struct sh_desc *desc, *_desc;
479 /* Is the "exposed" head of a chain acked? */
480 bool head_acked = false;
481 dma_cookie_t cookie = 0;
482 dma_async_tx_callback callback = NULL;
483 void *param = NULL;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000484
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700485 spin_lock_bh(&sh_chan->desc_lock);
486 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
487 struct dma_async_tx_descriptor *tx = &desc->async_tx;
488
489 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
490 BUG_ON(desc->mark != DESC_SUBMITTED &&
491 desc->mark != DESC_COMPLETED &&
492 desc->mark != DESC_WAITING);
493
494 /*
495 * queue is ordered, and we use this loop to (1) clean up all
496 * completed descriptors, and to (2) update descriptor flags of
497 * any chunks in a (partially) completed chain
498 */
499 if (!all && desc->mark == DESC_SUBMITTED &&
500 desc->cookie != cookie)
501 break;
502
503 if (tx->cookie > 0)
504 cookie = tx->cookie;
505
506 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
507 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
508 sh_chan->completed_cookie = desc->cookie;
509 }
510
511 /* Call callback on the last chunk */
512 if (desc->mark == DESC_COMPLETED && tx->callback) {
513 desc->mark = DESC_WAITING;
514 callback = tx->callback;
515 param = tx->callback_param;
516 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
517 tx->cookie, tx, sh_chan->id);
518 BUG_ON(desc->chunks != 1);
519 break;
520 }
521
522 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
523 if (desc->mark == DESC_COMPLETED) {
524 BUG_ON(tx->cookie < 0);
525 desc->mark = DESC_WAITING;
526 }
527 head_acked = async_tx_test_ack(tx);
528 } else {
529 switch (desc->mark) {
530 case DESC_COMPLETED:
531 desc->mark = DESC_WAITING;
532 /* Fall through */
533 case DESC_WAITING:
534 if (head_acked)
535 async_tx_ack(&desc->async_tx);
536 }
537 }
538
539 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
540 tx, tx->cookie);
541
542 if (((desc->mark == DESC_COMPLETED ||
543 desc->mark == DESC_WAITING) &&
544 async_tx_test_ack(&desc->async_tx)) || all) {
545 /* Remove from ld_queue list */
546 desc->mark = DESC_IDLE;
547 list_move(&desc->node, &sh_chan->ld_free);
548 }
549 }
550 spin_unlock_bh(&sh_chan->desc_lock);
551
552 if (callback)
553 callback(param);
554
555 return callback;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000556}
557
558/*
559 * sh_chan_ld_cleanup - Clean up link descriptors
560 *
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700561 * This function cleans up the ld_queue of DMA channel.
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000562 */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700563static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000564{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700565 while (__ld_cleanup(sh_chan, all))
566 ;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000567}
568
569static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
570{
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700571 struct sh_desc *sd;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000572
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700573 spin_lock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000574 /* DMA work check */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700575 if (dmae_is_busy(sh_chan)) {
576 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000577 return;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700578 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000579
580 /* Find the first un-transfer desciptor */
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700581 list_for_each_entry(sd, &sh_chan->ld_queue, node)
582 if (sd->mark == DESC_SUBMITTED) {
583 /* Get the ld start address from ld_queue */
584 dmae_set_reg(sh_chan, &sd->hw);
585 dmae_start(sh_chan);
586 break;
587 }
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000588
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700589 spin_unlock_bh(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000590}
591
592static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
593{
594 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
595 sh_chan_xfer_ld_queue(sh_chan);
596}
597
598static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
599 dma_cookie_t cookie,
600 dma_cookie_t *done,
601 dma_cookie_t *used)
602{
603 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
604 dma_cookie_t last_used;
605 dma_cookie_t last_complete;
606
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700607 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000608
609 last_used = chan->cookie;
610 last_complete = sh_chan->completed_cookie;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700611 BUG_ON(last_complete < 0);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000612
613 if (done)
614 *done = last_complete;
615
616 if (used)
617 *used = last_used;
618
619 return dma_async_is_complete(cookie, last_complete, last_used);
620}
621
622static irqreturn_t sh_dmae_interrupt(int irq, void *data)
623{
624 irqreturn_t ret = IRQ_NONE;
625 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
626 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
627
628 if (chcr & CHCR_TE) {
629 /* DMA stop */
630 dmae_halt(sh_chan);
631
632 ret = IRQ_HANDLED;
633 tasklet_schedule(&sh_chan->tasklet);
634 }
635
636 return ret;
637}
638
639#if defined(CONFIG_CPU_SH4)
640static irqreturn_t sh_dmae_err(int irq, void *data)
641{
642 int err = 0;
643 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
644
645 /* IRQ Multi */
646 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
Guennadi Liakhovetskifc461852010-01-19 07:24:55 +0000647 int __maybe_unused cnt = 0;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000648 switch (irq) {
649#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
650 case DMTE6_IRQ:
651 cnt++;
652#endif
653 case DMTE0_IRQ:
654 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
655 disable_irq(irq);
656 return IRQ_HANDLED;
657 }
658 default:
659 return IRQ_NONE;
660 }
661 } else {
662 /* reset dma controller */
663 err = sh_dmae_rst(0);
664 if (err)
665 return err;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700666#ifdef SH_DMAC_BASE1
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000667 if (shdev->pdata.mode & SHDMA_DMAOR1) {
668 err = sh_dmae_rst(1);
669 if (err)
670 return err;
671 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700672#endif
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000673 disable_irq(irq);
674 return IRQ_HANDLED;
675 }
676}
677#endif
678
679static void dmae_do_tasklet(unsigned long data)
680{
681 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700682 struct sh_desc *desc;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000683 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100684
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700685 spin_lock(&sh_chan->desc_lock);
686 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
687 if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
688 desc->mark == DESC_SUBMITTED) {
689 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
690 desc->async_tx.cookie, &desc->async_tx,
691 desc->hw.dar);
692 desc->mark = DESC_COMPLETED;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000693 break;
694 }
695 }
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700696 spin_unlock(&sh_chan->desc_lock);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000697
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000698 /* Next desc */
699 sh_chan_xfer_ld_queue(sh_chan);
Guennadi Liakhovetski3542a112009-12-17 09:41:39 -0700700 sh_dmae_chan_ld_cleanup(sh_chan, false);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000701}
702
703static unsigned int get_dmae_irq(unsigned int id)
704{
705 unsigned int irq = 0;
706 if (id < ARRAY_SIZE(dmte_irq_map))
707 irq = dmte_irq_map[id];
708 return irq;
709}
710
711static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
712{
713 int err;
714 unsigned int irq = get_dmae_irq(id);
715 unsigned long irqflags = IRQF_DISABLED;
716 struct sh_dmae_chan *new_sh_chan;
717
718 /* alloc channel */
719 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
720 if (!new_sh_chan) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100721 dev_err(shdev->common.dev,
722 "No free memory for allocating dma channels!\n");
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000723 return -ENOMEM;
724 }
725
726 new_sh_chan->dev = shdev->common.dev;
727 new_sh_chan->id = id;
728
729 /* Init DMA tasklet */
730 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
731 (unsigned long)new_sh_chan);
732
733 /* Init the channel */
734 dmae_init(new_sh_chan);
735
736 spin_lock_init(&new_sh_chan->desc_lock);
737
738 /* Init descripter manage list */
739 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
740 INIT_LIST_HEAD(&new_sh_chan->ld_free);
741
742 /* copy struct dma_device */
743 new_sh_chan->common.device = &shdev->common;
744
745 /* Add the channel to DMA device channel list */
746 list_add_tail(&new_sh_chan->common.device_node,
747 &shdev->common.channels);
748 shdev->common.chancnt++;
749
750 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
751 irqflags = IRQF_SHARED;
752#if defined(DMTE6_IRQ)
753 if (irq >= DMTE6_IRQ)
754 irq = DMTE6_IRQ;
755 else
756#endif
757 irq = DMTE0_IRQ;
758 }
759
760 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
761 "sh-dmae%d", new_sh_chan->id);
762
763 /* set up channel irq */
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100764 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
765 new_sh_chan->dev_id, new_sh_chan);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000766 if (err) {
767 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
768 "with return %d\n", id, err);
769 goto err_no_irq;
770 }
771
772 /* CHCR register control function */
773 new_sh_chan->set_chcr = dmae_set_chcr;
774 /* DMARS register control function */
775 new_sh_chan->set_dmars = dmae_set_dmars;
776
777 shdev->chan[id] = new_sh_chan;
778 return 0;
779
780err_no_irq:
781 /* remove from dmaengine device node */
782 list_del(&new_sh_chan->common.device_node);
783 kfree(new_sh_chan);
784 return err;
785}
786
787static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
788{
789 int i;
790
791 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
792 if (shdev->chan[i]) {
793 struct sh_dmae_chan *shchan = shdev->chan[i];
794 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
795 free_irq(dmte_irq_map[i], shchan);
796
797 list_del(&shchan->common.device_node);
798 kfree(shchan);
799 shdev->chan[i] = NULL;
800 }
801 }
802 shdev->common.chancnt = 0;
803}
804
805static int __init sh_dmae_probe(struct platform_device *pdev)
806{
807 int err = 0, cnt, ecnt;
808 unsigned long irqflags = IRQF_DISABLED;
809#if defined(CONFIG_CPU_SH4)
810 int eirq[] = { DMAE0_IRQ,
811#if defined(DMAE1_IRQ)
812 DMAE1_IRQ
813#endif
814 };
815#endif
816 struct sh_dmae_device *shdev;
817
Dan Williams56adf7e2009-11-22 12:10:10 -0700818 /* get platform data */
819 if (!pdev->dev.platform_data)
820 return -ENODEV;
821
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000822 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
823 if (!shdev) {
824 dev_err(&pdev->dev, "No enough memory\n");
Dan Williams56adf7e2009-11-22 12:10:10 -0700825 return -ENOMEM;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000826 }
827
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000828 /* platform data */
829 memcpy(&shdev->pdata, pdev->dev.platform_data,
830 sizeof(struct sh_dmae_pdata));
831
832 /* reset dma controller */
833 err = sh_dmae_rst(0);
834 if (err)
835 goto rst_err;
836
837 /* SH7780/85/23 has DMAOR1 */
838 if (shdev->pdata.mode & SHDMA_DMAOR1) {
839 err = sh_dmae_rst(1);
840 if (err)
841 goto rst_err;
842 }
843
844 INIT_LIST_HEAD(&shdev->common.channels);
845
846 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
847 shdev->common.device_alloc_chan_resources
848 = sh_dmae_alloc_chan_resources;
849 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
850 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
851 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
852 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
853 shdev->common.dev = &pdev->dev;
Guennadi Liakhovetskiddb4f0f2009-12-04 19:44:41 +0100854 /* Default transfer size of 32 bytes requires 32-byte alignment */
855 shdev->common.copy_align = 5;
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000856
857#if defined(CONFIG_CPU_SH4)
858 /* Non Mix IRQ mode SH7722/SH7730 etc... */
859 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
860 irqflags = IRQF_SHARED;
861 eirq[0] = DMTE0_IRQ;
862#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
863 eirq[1] = DMTE6_IRQ;
864#endif
865 }
866
867 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
Guennadi Liakhovetski86d61b32009-12-10 18:35:07 +0100868 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
869 "DMAC Address Error", shdev);
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000870 if (err) {
871 dev_err(&pdev->dev, "DMA device request_irq"
872 "error (irq %d) with return %d\n",
873 eirq[ecnt], err);
874 goto eirq_err;
875 }
876 }
877#endif /* CONFIG_CPU_SH4 */
878
879 /* Create DMA Channel */
880 for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
881 err = sh_dmae_chan_probe(shdev, cnt);
882 if (err)
883 goto chan_probe_err;
884 }
885
886 platform_set_drvdata(pdev, shdev);
887 dma_async_device_register(&shdev->common);
888
889 return err;
890
891chan_probe_err:
892 sh_dmae_chan_remove(shdev);
893
894eirq_err:
895 for (ecnt-- ; ecnt >= 0; ecnt--)
896 free_irq(eirq[ecnt], shdev);
897
898rst_err:
899 kfree(shdev);
900
Nobuhiro Iwamatsud8902ad2009-09-07 03:26:23 +0000901 return err;
902}
903
904static int __exit sh_dmae_remove(struct platform_device *pdev)
905{
906 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
907
908 dma_async_device_unregister(&shdev->common);
909
910 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
911 free_irq(DMTE0_IRQ, shdev);
912#if defined(DMTE6_IRQ)
913 free_irq(DMTE6_IRQ, shdev);
914#endif
915 }
916
917 /* channel data remove */
918 sh_dmae_chan_remove(shdev);
919
920 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
921 free_irq(DMAE0_IRQ, shdev);
922#if defined(DMAE1_IRQ)
923 free_irq(DMAE1_IRQ, shdev);
924#endif
925 }
926 kfree(shdev);
927
928 return 0;
929}
930
931static void sh_dmae_shutdown(struct platform_device *pdev)
932{
933 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
934 sh_dmae_ctl_stop(0);
935 if (shdev->pdata.mode & SHDMA_DMAOR1)
936 sh_dmae_ctl_stop(1);
937}
938
939static struct platform_driver sh_dmae_driver = {
940 .remove = __exit_p(sh_dmae_remove),
941 .shutdown = sh_dmae_shutdown,
942 .driver = {
943 .name = "sh-dma-engine",
944 },
945};
946
947static int __init sh_dmae_init(void)
948{
949 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
950}
951module_init(sh_dmae_init);
952
953static void __exit sh_dmae_exit(void)
954{
955 platform_driver_unregister(&sh_dmae_driver);
956}
957module_exit(sh_dmae_exit);
958
959MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
960MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
961MODULE_LICENSE("GPL");