blob: 7e5f6b65c6518e5c0db3b346c7faa7171c87f53f [file] [log] [blame]
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
Nicolas Ferre9102d872012-06-12 10:44:55 +020012 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
Nicolas Ferredc78baa2009-07-03 19:24:33 +020015 */
16
17#include <linux/clk.h>
18#include <linux/dmaengine.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Nicolas Ferrec5115952011-10-17 14:56:41 +020025#include <linux/of.h>
26#include <linux/of_device.h>
Nicolas Ferredc78baa2009-07-03 19:24:33 +020027
28#include "at_hdmac_regs.h"
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000029#include "dmaengine.h"
Nicolas Ferredc78baa2009-07-03 19:24:33 +020030
31/*
32 * Glossary
33 * --------
34 *
35 * at_hdmac : Name of the ATmel AHB DMA Controller
36 * at_dma_ / atdma : ATmel DMA controller entity related
37 * atc_ / atchan : ATmel DMA Channel entity related
38 */
39
40#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
Nicolas Ferreae14d4b2011-04-30 16:57:49 +020041#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
42 |ATC_DIF(AT_DMA_MEM_IF))
Nicolas Ferredc78baa2009-07-03 19:24:33 +020043
44/*
45 * Initial number of descriptors to allocate for each channel. This could
46 * be increased during dma usage.
47 */
48static unsigned int init_nr_desc_per_channel = 64;
49module_param(init_nr_desc_per_channel, uint, 0644);
50MODULE_PARM_DESC(init_nr_desc_per_channel,
51 "initial descriptors per channel (default: 64)");
52
53
54/* prototypes */
55static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
56
57
58/*----------------------------------------------------------------------*/
59
60static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
61{
62 return list_first_entry(&atchan->active_list,
63 struct at_desc, desc_node);
64}
65
66static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
67{
68 return list_first_entry(&atchan->queue,
69 struct at_desc, desc_node);
70}
71
72/**
Uwe Kleine-König421f91d2010-06-11 12:17:00 +020073 * atc_alloc_descriptor - allocate and return an initialized descriptor
Nicolas Ferredc78baa2009-07-03 19:24:33 +020074 * @chan: the channel to allocate descriptors for
75 * @gfp_flags: GFP allocation flags
76 *
77 * Note: The ack-bit is positioned in the descriptor flag at creation time
78 * to make initial allocation more convenient. This bit will be cleared
79 * and control will be given to client at usage time (during
80 * preparation functions).
81 */
82static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
83 gfp_t gfp_flags)
84{
85 struct at_desc *desc = NULL;
86 struct at_dma *atdma = to_at_dma(chan->device);
87 dma_addr_t phys;
88
89 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
90 if (desc) {
91 memset(desc, 0, sizeof(struct at_desc));
Dan Williams285a3c72009-09-08 17:53:03 -070092 INIT_LIST_HEAD(&desc->tx_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +020093 dma_async_tx_descriptor_init(&desc->txd, chan);
94 /* txd.flags will be overwritten in prep functions */
95 desc->txd.flags = DMA_CTRL_ACK;
96 desc->txd.tx_submit = atc_tx_submit;
97 desc->txd.phys = phys;
98 }
99
100 return desc;
101}
102
103/**
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200104 * atc_desc_get - get an unused descriptor from free_list
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200105 * @atchan: channel we want a new descriptor for
106 */
107static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
108{
109 struct at_desc *desc, *_desc;
110 struct at_desc *ret = NULL;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000111 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200112 unsigned int i = 0;
113 LIST_HEAD(tmp_list);
114
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000115 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200116 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
117 i++;
118 if (async_tx_test_ack(&desc->txd)) {
119 list_del(&desc->desc_node);
120 ret = desc;
121 break;
122 }
123 dev_dbg(chan2dev(&atchan->chan_common),
124 "desc %p not ACKed\n", desc);
125 }
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000126 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200127 dev_vdbg(chan2dev(&atchan->chan_common),
128 "scanned %u descriptors on freelist\n", i);
129
130 /* no more descriptor available in initial pool: create one more */
131 if (!ret) {
132 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
133 if (ret) {
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000134 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200135 atchan->descs_allocated++;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000136 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200137 } else {
138 dev_err(chan2dev(&atchan->chan_common),
139 "not enough descriptors available\n");
140 }
141 }
142
143 return ret;
144}
145
146/**
147 * atc_desc_put - move a descriptor, including any children, to the free list
148 * @atchan: channel we work on
149 * @desc: descriptor, at the head of a chain, to move to free list
150 */
151static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
152{
153 if (desc) {
154 struct at_desc *child;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000155 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200156
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000157 spin_lock_irqsave(&atchan->lock, flags);
Dan Williams285a3c72009-09-08 17:53:03 -0700158 list_for_each_entry(child, &desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200159 dev_vdbg(chan2dev(&atchan->chan_common),
160 "moving child desc %p to freelist\n",
161 child);
Dan Williams285a3c72009-09-08 17:53:03 -0700162 list_splice_init(&desc->tx_list, &atchan->free_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200163 dev_vdbg(chan2dev(&atchan->chan_common),
164 "moving desc %p to freelist\n", desc);
165 list_add(&desc->desc_node, &atchan->free_list);
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000166 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200167 }
168}
169
170/**
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200171 * atc_desc_chain - build chain adding a descripor
172 * @first: address of first descripor of the chain
173 * @prev: address of previous descripor of the chain
174 * @desc: descriptor to queue
175 *
176 * Called from prep_* functions
177 */
178static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
179 struct at_desc *desc)
180{
181 if (!(*first)) {
182 *first = desc;
183 } else {
184 /* inform the HW lli about chaining */
185 (*prev)->lli.dscr = desc->txd.phys;
186 /* insert the link descriptor to the LD ring */
187 list_add_tail(&desc->desc_node,
188 &(*first)->tx_list);
189 }
190 *prev = desc;
191}
192
193/**
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200194 * atc_dostart - starts the DMA engine for real
195 * @atchan: the channel we want to start
196 * @first: first descriptor in the list we want to begin with
197 *
198 * Called with atchan->lock held and bh disabled
199 */
200static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
201{
202 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
203
204 /* ASSERT: channel is idle */
205 if (atc_chan_is_enabled(atchan)) {
206 dev_err(chan2dev(&atchan->chan_common),
207 "BUG: Attempted to start non-idle channel\n");
208 dev_err(chan2dev(&atchan->chan_common),
209 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
210 channel_readl(atchan, SADDR),
211 channel_readl(atchan, DADDR),
212 channel_readl(atchan, CTRLA),
213 channel_readl(atchan, CTRLB),
214 channel_readl(atchan, DSCR));
215
216 /* The tasklet will hopefully advance the queue... */
217 return;
218 }
219
220 vdbg_dump_regs(atchan);
221
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200222 channel_writel(atchan, SADDR, 0);
223 channel_writel(atchan, DADDR, 0);
224 channel_writel(atchan, CTRLA, 0);
225 channel_writel(atchan, CTRLB, 0);
226 channel_writel(atchan, DSCR, first->txd.phys);
227 dma_writel(atdma, CHER, atchan->mask);
228
229 vdbg_dump_regs(atchan);
230}
231
232/**
233 * atc_chain_complete - finish work for one transaction chain
234 * @atchan: channel we work on
235 * @desc: descriptor at the head of the chain we want do complete
236 *
237 * Called with atchan->lock held and bh disabled */
238static void
239atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
240{
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200241 struct dma_async_tx_descriptor *txd = &desc->txd;
242
243 dev_vdbg(chan2dev(&atchan->chan_common),
244 "descriptor %u complete\n", txd->cookie);
245
Vinod Kould4116052012-05-11 11:48:21 +0530246 /* mark the descriptor as complete for non cyclic cases only */
247 if (!atc_chan_is_cyclic(atchan))
248 dma_cookie_complete(txd);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200249
250 /* move children to free_list */
Dan Williams285a3c72009-09-08 17:53:03 -0700251 list_splice_init(&desc->tx_list, &atchan->free_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200252 /* move myself to free_list */
253 list_move(&desc->desc_node, &atchan->free_list);
254
Nicolas Ferreebcf9b82011-01-12 15:39:06 +0100255 /* unmap dma addresses (not on slave channels) */
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700256 if (!atchan->chan_common.private) {
257 struct device *parent = chan2parent(&atchan->chan_common);
258 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
259 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
260 dma_unmap_single(parent,
261 desc->lli.daddr,
262 desc->len, DMA_FROM_DEVICE);
263 else
264 dma_unmap_page(parent,
265 desc->lli.daddr,
266 desc->len, DMA_FROM_DEVICE);
267 }
268 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
269 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
270 dma_unmap_single(parent,
271 desc->lli.saddr,
272 desc->len, DMA_TO_DEVICE);
273 else
274 dma_unmap_page(parent,
275 desc->lli.saddr,
276 desc->len, DMA_TO_DEVICE);
277 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200278 }
279
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200280 /* for cyclic transfers,
281 * no need to replay callback function while stopping */
Nicolas Ferre3c477482011-07-25 21:09:23 +0000282 if (!atc_chan_is_cyclic(atchan)) {
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200283 dma_async_tx_callback callback = txd->callback;
284 void *param = txd->callback_param;
285
286 /*
287 * The API requires that no submissions are done from a
288 * callback, so we don't need to drop the lock here
289 */
290 if (callback)
291 callback(param);
292 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200293
294 dma_run_dependencies(txd);
295}
296
297/**
298 * atc_complete_all - finish work for all transactions
299 * @atchan: channel to complete transactions for
300 *
301 * Eventually submit queued descriptors if any
302 *
303 * Assume channel is idle while calling this function
304 * Called with atchan->lock held and bh disabled
305 */
306static void atc_complete_all(struct at_dma_chan *atchan)
307{
308 struct at_desc *desc, *_desc;
309 LIST_HEAD(list);
310
311 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
312
313 BUG_ON(atc_chan_is_enabled(atchan));
314
315 /*
316 * Submit queued descriptors ASAP, i.e. before we go through
317 * the completed ones.
318 */
319 if (!list_empty(&atchan->queue))
320 atc_dostart(atchan, atc_first_queued(atchan));
321 /* empty active_list now it is completed */
322 list_splice_init(&atchan->active_list, &list);
323 /* empty queue list by moving descriptors (if any) to active_list */
324 list_splice_init(&atchan->queue, &atchan->active_list);
325
326 list_for_each_entry_safe(desc, _desc, &list, desc_node)
327 atc_chain_complete(atchan, desc);
328}
329
330/**
331 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
332 * @atchan: channel to be cleaned up
333 *
334 * Called with atchan->lock held and bh disabled
335 */
336static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
337{
338 struct at_desc *desc, *_desc;
339 struct at_desc *child;
340
341 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
342
343 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
344 if (!(desc->lli.ctrla & ATC_DONE))
345 /* This one is currently in progress */
346 return;
347
Dan Williams285a3c72009-09-08 17:53:03 -0700348 list_for_each_entry(child, &desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200349 if (!(child->lli.ctrla & ATC_DONE))
350 /* Currently in progress */
351 return;
352
353 /*
354 * No descriptors so far seem to be in progress, i.e.
355 * this chain must be done.
356 */
357 atc_chain_complete(atchan, desc);
358 }
359}
360
361/**
362 * atc_advance_work - at the end of a transaction, move forward
363 * @atchan: channel where the transaction ended
364 *
365 * Called with atchan->lock held and bh disabled
366 */
367static void atc_advance_work(struct at_dma_chan *atchan)
368{
369 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
370
371 if (list_empty(&atchan->active_list) ||
372 list_is_singular(&atchan->active_list)) {
373 atc_complete_all(atchan);
374 } else {
375 atc_chain_complete(atchan, atc_first_active(atchan));
376 /* advance work */
377 atc_dostart(atchan, atc_first_active(atchan));
378 }
379}
380
381
382/**
383 * atc_handle_error - handle errors reported by DMA controller
384 * @atchan: channel where error occurs
385 *
386 * Called with atchan->lock held and bh disabled
387 */
388static void atc_handle_error(struct at_dma_chan *atchan)
389{
390 struct at_desc *bad_desc;
391 struct at_desc *child;
392
393 /*
394 * The descriptor currently at the head of the active list is
395 * broked. Since we don't have any way to report errors, we'll
396 * just have to scream loudly and try to carry on.
397 */
398 bad_desc = atc_first_active(atchan);
399 list_del_init(&bad_desc->desc_node);
400
401 /* As we are stopped, take advantage to push queued descriptors
402 * in active_list */
403 list_splice_init(&atchan->queue, atchan->active_list.prev);
404
405 /* Try to restart the controller */
406 if (!list_empty(&atchan->active_list))
407 atc_dostart(atchan, atc_first_active(atchan));
408
409 /*
410 * KERN_CRITICAL may seem harsh, but since this only happens
411 * when someone submits a bad physical address in a
412 * descriptor, we should consider ourselves lucky that the
413 * controller flagged an error instead of scribbling over
414 * random memory locations.
415 */
416 dev_crit(chan2dev(&atchan->chan_common),
417 "Bad descriptor submitted for DMA!\n");
418 dev_crit(chan2dev(&atchan->chan_common),
419 " cookie: %d\n", bad_desc->txd.cookie);
420 atc_dump_lli(atchan, &bad_desc->lli);
Dan Williams285a3c72009-09-08 17:53:03 -0700421 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200422 atc_dump_lli(atchan, &child->lli);
423
424 /* Pretend the descriptor completed successfully */
425 atc_chain_complete(atchan, bad_desc);
426}
427
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200428/**
429 * atc_handle_cyclic - at the end of a period, run callback function
430 * @atchan: channel used for cyclic operations
431 *
432 * Called with atchan->lock held and bh disabled
433 */
434static void atc_handle_cyclic(struct at_dma_chan *atchan)
435{
436 struct at_desc *first = atc_first_active(atchan);
437 struct dma_async_tx_descriptor *txd = &first->txd;
438 dma_async_tx_callback callback = txd->callback;
439 void *param = txd->callback_param;
440
441 dev_vdbg(chan2dev(&atchan->chan_common),
442 "new cyclic period llp 0x%08x\n",
443 channel_readl(atchan, DSCR));
444
445 if (callback)
446 callback(param);
447}
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200448
449/*-- IRQ & Tasklet ---------------------------------------------------*/
450
451static void atc_tasklet(unsigned long data)
452{
453 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000454 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200455
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000456 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200457 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200458 atc_handle_error(atchan);
Nicolas Ferre3c477482011-07-25 21:09:23 +0000459 else if (atc_chan_is_cyclic(atchan))
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200460 atc_handle_cyclic(atchan);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200461 else
462 atc_advance_work(atchan);
463
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000464 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200465}
466
467static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
468{
469 struct at_dma *atdma = (struct at_dma *)dev_id;
470 struct at_dma_chan *atchan;
471 int i;
472 u32 status, pending, imr;
473 int ret = IRQ_NONE;
474
475 do {
476 imr = dma_readl(atdma, EBCIMR);
477 status = dma_readl(atdma, EBCISR);
478 pending = status & imr;
479
480 if (!pending)
481 break;
482
483 dev_vdbg(atdma->dma_common.dev,
484 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
485 status, imr, pending);
486
487 for (i = 0; i < atdma->dma_common.chancnt; i++) {
488 atchan = &atdma->chan[i];
Nicolas Ferre9b3aa582011-04-30 16:57:45 +0200489 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200490 if (pending & AT_DMA_ERR(i)) {
491 /* Disable channel on AHB error */
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200492 dma_writel(atdma, CHDR,
493 AT_DMA_RES(i) | atchan->mask);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200494 /* Give information to tasklet */
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200495 set_bit(ATC_IS_ERROR, &atchan->status);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200496 }
497 tasklet_schedule(&atchan->tasklet);
498 ret = IRQ_HANDLED;
499 }
500 }
501
502 } while (pending);
503
504 return ret;
505}
506
507
508/*-- DMA Engine API --------------------------------------------------*/
509
510/**
511 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
512 * @desc: descriptor at the head of the transaction chain
513 *
514 * Queue chain if DMA engine is working already
515 *
516 * Cookie increment and adding to active_list or queue must be atomic
517 */
518static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
519{
520 struct at_desc *desc = txd_to_at_desc(tx);
521 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
522 dma_cookie_t cookie;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000523 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200524
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000525 spin_lock_irqsave(&atchan->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000526 cookie = dma_cookie_assign(tx);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200527
528 if (list_empty(&atchan->active_list)) {
529 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
530 desc->txd.cookie);
531 atc_dostart(atchan, desc);
532 list_add_tail(&desc->desc_node, &atchan->active_list);
533 } else {
534 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
535 desc->txd.cookie);
536 list_add_tail(&desc->desc_node, &atchan->queue);
537 }
538
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000539 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200540
541 return cookie;
542}
543
544/**
545 * atc_prep_dma_memcpy - prepare a memcpy operation
546 * @chan: the channel to prepare operation on
547 * @dest: operation virtual destination address
548 * @src: operation virtual source address
549 * @len: operation length
550 * @flags: tx descriptor status flags
551 */
552static struct dma_async_tx_descriptor *
553atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
554 size_t len, unsigned long flags)
555{
556 struct at_dma_chan *atchan = to_at_dma_chan(chan);
557 struct at_desc *desc = NULL;
558 struct at_desc *first = NULL;
559 struct at_desc *prev = NULL;
560 size_t xfer_count;
561 size_t offset;
562 unsigned int src_width;
563 unsigned int dst_width;
564 u32 ctrla;
565 u32 ctrlb;
566
567 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
568 dest, src, len, flags);
569
570 if (unlikely(!len)) {
571 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
572 return NULL;
573 }
574
Nicolas Ferre9b3aa582011-04-30 16:57:45 +0200575 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200576 | ATC_SRC_ADDR_MODE_INCR
577 | ATC_DST_ADDR_MODE_INCR
578 | ATC_FC_MEM2MEM;
579
580 /*
581 * We can be a lot more clever here, but this should take care
582 * of the most common optimization.
583 */
584 if (!((src | dest | len) & 3)) {
Nicolas Ferreb409ebf2012-05-10 12:17:40 +0200585 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200586 src_width = dst_width = 2;
587 } else if (!((src | dest | len) & 1)) {
Nicolas Ferreb409ebf2012-05-10 12:17:40 +0200588 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200589 src_width = dst_width = 1;
590 } else {
Nicolas Ferreb409ebf2012-05-10 12:17:40 +0200591 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200592 src_width = dst_width = 0;
593 }
594
595 for (offset = 0; offset < len; offset += xfer_count << src_width) {
596 xfer_count = min_t(size_t, (len - offset) >> src_width,
597 ATC_BTSIZE_MAX);
598
599 desc = atc_desc_get(atchan);
600 if (!desc)
601 goto err_desc_get;
602
603 desc->lli.saddr = src + offset;
604 desc->lli.daddr = dest + offset;
605 desc->lli.ctrla = ctrla | xfer_count;
606 desc->lli.ctrlb = ctrlb;
607
608 desc->txd.cookie = 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200609
Nicolas Ferree257e152011-05-06 19:56:53 +0200610 atc_desc_chain(&first, &prev, desc);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200611 }
612
613 /* First descriptor of the chain embedds additional information */
614 first->txd.cookie = -EBUSY;
615 first->len = len;
616
617 /* set end-of-link to the last link descriptor of list*/
618 set_desc_eol(desc);
619
Nicolas Ferre568f7f02011-01-12 15:39:09 +0100620 first->txd.flags = flags; /* client is in control of this ack */
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200621
622 return &first->txd;
623
624err_desc_get:
625 atc_desc_put(atchan, first);
626 return NULL;
627}
628
Nicolas Ferre808347f2009-07-22 20:04:45 +0200629
630/**
631 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
632 * @chan: DMA channel
633 * @sgl: scatterlist to transfer to/from
634 * @sg_len: number of entries in @scatterlist
635 * @direction: DMA direction
636 * @flags: tx descriptor status flags
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500637 * @context: transaction context (ignored)
Nicolas Ferre808347f2009-07-22 20:04:45 +0200638 */
639static struct dma_async_tx_descriptor *
640atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530641 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500642 unsigned long flags, void *context)
Nicolas Ferre808347f2009-07-22 20:04:45 +0200643{
644 struct at_dma_chan *atchan = to_at_dma_chan(chan);
645 struct at_dma_slave *atslave = chan->private;
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100646 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200647 struct at_desc *first = NULL;
648 struct at_desc *prev = NULL;
649 u32 ctrla;
650 u32 ctrlb;
651 dma_addr_t reg;
652 unsigned int reg_width;
653 unsigned int mem_width;
654 unsigned int i;
655 struct scatterlist *sg;
656 size_t total_len = 0;
657
Nicolas Ferrecc52a102011-04-30 16:57:47 +0200658 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
659 sg_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530660 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
Nicolas Ferre808347f2009-07-22 20:04:45 +0200661 flags);
662
663 if (unlikely(!atslave || !sg_len)) {
664 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
665 return NULL;
666 }
667
Nicolas Ferre1dd1ea82012-05-10 12:17:41 +0200668 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
669 | ATC_DCSIZE(sconfig->dst_maxburst);
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200670 ctrlb = ATC_IEN;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200671
672 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530673 case DMA_MEM_TO_DEV:
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100674 reg_width = convert_buswidth(sconfig->dst_addr_width);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200675 ctrla |= ATC_DST_WIDTH(reg_width);
676 ctrlb |= ATC_DST_ADDR_MODE_FIXED
677 | ATC_SRC_ADDR_MODE_INCR
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200678 | ATC_FC_MEM2PER
679 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100680 reg = sconfig->dst_addr;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200681 for_each_sg(sgl, sg, sg_len, i) {
682 struct at_desc *desc;
683 u32 len;
684 u32 mem;
685
686 desc = atc_desc_get(atchan);
687 if (!desc)
688 goto err_desc_get;
689
Nicolas Ferre0f70e8c2010-12-15 18:50:16 +0100690 mem = sg_dma_address(sg);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200691 len = sg_dma_len(sg);
692 mem_width = 2;
693 if (unlikely(mem & 3 || len & 3))
694 mem_width = 0;
695
696 desc->lli.saddr = mem;
697 desc->lli.daddr = reg;
698 desc->lli.ctrla = ctrla
699 | ATC_SRC_WIDTH(mem_width)
700 | len >> mem_width;
701 desc->lli.ctrlb = ctrlb;
702
Nicolas Ferree257e152011-05-06 19:56:53 +0200703 atc_desc_chain(&first, &prev, desc);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200704 total_len += len;
705 }
706 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530707 case DMA_DEV_TO_MEM:
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100708 reg_width = convert_buswidth(sconfig->src_addr_width);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200709 ctrla |= ATC_SRC_WIDTH(reg_width);
710 ctrlb |= ATC_DST_ADDR_MODE_INCR
711 | ATC_SRC_ADDR_MODE_FIXED
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200712 | ATC_FC_PER2MEM
713 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200714
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100715 reg = sconfig->src_addr;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200716 for_each_sg(sgl, sg, sg_len, i) {
717 struct at_desc *desc;
718 u32 len;
719 u32 mem;
720
721 desc = atc_desc_get(atchan);
722 if (!desc)
723 goto err_desc_get;
724
Nicolas Ferre0f70e8c2010-12-15 18:50:16 +0100725 mem = sg_dma_address(sg);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200726 len = sg_dma_len(sg);
727 mem_width = 2;
728 if (unlikely(mem & 3 || len & 3))
729 mem_width = 0;
730
731 desc->lli.saddr = reg;
732 desc->lli.daddr = mem;
733 desc->lli.ctrla = ctrla
734 | ATC_DST_WIDTH(mem_width)
Nicolas Ferre59a609d2010-12-13 13:48:41 +0100735 | len >> reg_width;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200736 desc->lli.ctrlb = ctrlb;
737
Nicolas Ferree257e152011-05-06 19:56:53 +0200738 atc_desc_chain(&first, &prev, desc);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200739 total_len += len;
740 }
741 break;
742 default:
743 return NULL;
744 }
745
746 /* set end-of-link to the last link descriptor of list*/
747 set_desc_eol(prev);
748
749 /* First descriptor of the chain embedds additional information */
750 first->txd.cookie = -EBUSY;
751 first->len = total_len;
752
Nicolas Ferre568f7f02011-01-12 15:39:09 +0100753 /* first link descriptor of list is responsible of flags */
754 first->txd.flags = flags; /* client is in control of this ack */
Nicolas Ferre808347f2009-07-22 20:04:45 +0200755
756 return &first->txd;
757
758err_desc_get:
759 dev_err(chan2dev(chan), "not enough descriptors available\n");
760 atc_desc_put(atchan, first);
761 return NULL;
762}
763
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200764/**
765 * atc_dma_cyclic_check_values
766 * Check for too big/unaligned periods and unaligned DMA buffer
767 */
768static int
769atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530770 size_t period_len, enum dma_transfer_direction direction)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200771{
772 if (period_len > (ATC_BTSIZE_MAX << reg_width))
773 goto err_out;
774 if (unlikely(period_len & ((1 << reg_width) - 1)))
775 goto err_out;
776 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
777 goto err_out;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530778 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200779 goto err_out;
780
781 return 0;
782
783err_out:
784 return -EINVAL;
785}
786
787/**
788 * atc_dma_cyclic_fill_desc - Fill one period decriptor
789 */
790static int
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100791atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200792 unsigned int period_index, dma_addr_t buf_addr,
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100793 unsigned int reg_width, size_t period_len,
794 enum dma_transfer_direction direction)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200795{
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100796 struct at_dma_chan *atchan = to_at_dma_chan(chan);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100797 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
798 u32 ctrla;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200799
800 /* prepare common CRTLA value */
Nicolas Ferre1dd1ea82012-05-10 12:17:41 +0200801 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
802 | ATC_DCSIZE(sconfig->dst_maxburst)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200803 | ATC_DST_WIDTH(reg_width)
804 | ATC_SRC_WIDTH(reg_width)
805 | period_len >> reg_width;
806
807 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530808 case DMA_MEM_TO_DEV:
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200809 desc->lli.saddr = buf_addr + (period_len * period_index);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100810 desc->lli.daddr = sconfig->dst_addr;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200811 desc->lli.ctrla = ctrla;
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200812 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200813 | ATC_SRC_ADDR_MODE_INCR
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200814 | ATC_FC_MEM2PER
815 | ATC_SIF(AT_DMA_MEM_IF)
816 | ATC_DIF(AT_DMA_PER_IF);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200817 break;
818
Vinod Kouldb8196d2011-10-13 22:34:23 +0530819 case DMA_DEV_TO_MEM:
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100820 desc->lli.saddr = sconfig->src_addr;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200821 desc->lli.daddr = buf_addr + (period_len * period_index);
822 desc->lli.ctrla = ctrla;
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200823 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200824 | ATC_SRC_ADDR_MODE_FIXED
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200825 | ATC_FC_PER2MEM
826 | ATC_SIF(AT_DMA_PER_IF)
827 | ATC_DIF(AT_DMA_MEM_IF);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200828 break;
829
830 default:
831 return -EINVAL;
832 }
833
834 return 0;
835}
836
837/**
838 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
839 * @chan: the DMA channel to prepare
840 * @buf_addr: physical DMA address where the buffer starts
841 * @buf_len: total number of bytes for the entire buffer
842 * @period_len: number of bytes for each period
843 * @direction: transfer direction, to or from device
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +0300844 * @flags: tx descriptor status flags
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500845 * @context: transfer context (ignored)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200846 */
847static struct dma_async_tx_descriptor *
848atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500849 size_t period_len, enum dma_transfer_direction direction,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +0300850 unsigned long flags, void *context)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200851{
852 struct at_dma_chan *atchan = to_at_dma_chan(chan);
853 struct at_dma_slave *atslave = chan->private;
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100854 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200855 struct at_desc *first = NULL;
856 struct at_desc *prev = NULL;
857 unsigned long was_cyclic;
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100858 unsigned int reg_width;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200859 unsigned int periods = buf_len / period_len;
860 unsigned int i;
861
862 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
Vinod Kouldb8196d2011-10-13 22:34:23 +0530863 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200864 buf_addr,
865 periods, buf_len, period_len);
866
867 if (unlikely(!atslave || !buf_len || !period_len)) {
868 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
869 return NULL;
870 }
871
872 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
873 if (was_cyclic) {
874 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
875 return NULL;
876 }
877
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100878 if (sconfig->direction == DMA_MEM_TO_DEV)
879 reg_width = convert_buswidth(sconfig->dst_addr_width);
880 else
881 reg_width = convert_buswidth(sconfig->src_addr_width);
882
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200883 /* Check for too big/unaligned periods and unaligned DMA buffer */
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100884 if (atc_dma_cyclic_check_values(reg_width, buf_addr,
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200885 period_len, direction))
886 goto err_out;
887
888 /* build cyclic linked list */
889 for (i = 0; i < periods; i++) {
890 struct at_desc *desc;
891
892 desc = atc_desc_get(atchan);
893 if (!desc)
894 goto err_desc_get;
895
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100896 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
897 reg_width, period_len, direction))
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200898 goto err_desc_get;
899
900 atc_desc_chain(&first, &prev, desc);
901 }
902
903 /* lets make a cyclic list */
904 prev->lli.dscr = first->txd.phys;
905
906 /* First descriptor of the chain embedds additional information */
907 first->txd.cookie = -EBUSY;
908 first->len = buf_len;
909
910 return &first->txd;
911
912err_desc_get:
913 dev_err(chan2dev(chan), "not enough descriptors available\n");
914 atc_desc_put(atchan, first);
915err_out:
916 clear_bit(ATC_IS_CYCLIC, &atchan->status);
917 return NULL;
918}
919
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100920static int set_runtime_config(struct dma_chan *chan,
921 struct dma_slave_config *sconfig)
922{
923 struct at_dma_chan *atchan = to_at_dma_chan(chan);
924
925 /* Check if it is chan is configured for slave transfers */
926 if (!chan->private)
927 return -EINVAL;
928
929 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
930
931 convert_burst(&atchan->dma_sconfig.src_maxburst);
932 convert_burst(&atchan->dma_sconfig.dst_maxburst);
933
934 return 0;
935}
936
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200937
Linus Walleij05827632010-05-17 16:30:42 -0700938static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
939 unsigned long arg)
Nicolas Ferre808347f2009-07-22 20:04:45 +0200940{
941 struct at_dma_chan *atchan = to_at_dma_chan(chan);
942 struct at_dma *atdma = to_at_dma(chan->device);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200943 int chan_id = atchan->chan_common.chan_id;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000944 unsigned long flags;
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200945
Nicolas Ferre808347f2009-07-22 20:04:45 +0200946 LIST_HEAD(list);
947
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200948 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
949
950 if (cmd == DMA_PAUSE) {
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000951 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200952
953 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200954 set_bit(ATC_IS_PAUSED, &atchan->status);
955
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000956 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200957 } else if (cmd == DMA_RESUME) {
Nicolas Ferre3c477482011-07-25 21:09:23 +0000958 if (!atc_chan_is_paused(atchan))
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200959 return 0;
960
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000961 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200962
963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
964 clear_bit(ATC_IS_PAUSED, &atchan->status);
965
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000966 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200967 } else if (cmd == DMA_TERMINATE_ALL) {
968 struct at_desc *desc, *_desc;
969 /*
970 * This is only called when something went wrong elsewhere, so
971 * we don't really care about the data. Just disable the
972 * channel. We still have to poll the channel enable bit due
973 * to AHB/HSB limitations.
974 */
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000975 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200976
977 /* disabling channel: must also remove suspend state */
978 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
979
980 /* confirm that this channel is disabled */
981 while (dma_readl(atdma, CHSR) & atchan->mask)
982 cpu_relax();
983
984 /* active_list entries will end up before queued entries */
985 list_splice_init(&atchan->queue, &list);
986 list_splice_init(&atchan->active_list, &list);
987
988 /* Flush all pending and queued descriptors */
989 list_for_each_entry_safe(desc, _desc, &list, desc_node)
990 atc_chain_complete(atchan, desc);
991
992 clear_bit(ATC_IS_PAUSED, &atchan->status);
993 /* if channel dedicated to cyclic operations, free it */
994 clear_bit(ATC_IS_CYCLIC, &atchan->status);
995
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000996 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100997 } else if (cmd == DMA_SLAVE_CONFIG) {
998 return set_runtime_config(chan, (struct dma_slave_config *)arg);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200999 } else {
Linus Walleijc3635c72010-03-26 16:44:01 -07001000 return -ENXIO;
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +02001001 }
Yong Wangb0ebeb92010-08-05 10:40:08 +08001002
Linus Walleijc3635c72010-03-26 16:44:01 -07001003 return 0;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001004}
1005
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001006/**
Linus Walleij07934482010-03-26 16:50:49 -07001007 * atc_tx_status - poll for transaction completion
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001008 * @chan: DMA channel
1009 * @cookie: transaction identifier to check status of
Linus Walleij07934482010-03-26 16:50:49 -07001010 * @txstate: if not %NULL updated with transaction state
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001011 *
Linus Walleij07934482010-03-26 16:50:49 -07001012 * If @txstate is passed in, upon return it reflect the driver
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001013 * internal state and can be used with dma_async_is_complete() to check
1014 * the status of multiple cookies without re-checking hardware state.
1015 */
1016static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -07001017atc_tx_status(struct dma_chan *chan,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001018 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -07001019 struct dma_tx_state *txstate)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001020{
1021 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1022 dma_cookie_t last_used;
1023 dma_cookie_t last_complete;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001024 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001025 enum dma_status ret;
1026
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001027 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001028
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001029 ret = dma_cookie_status(chan, cookie, txstate);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001030 if (ret != DMA_SUCCESS) {
1031 atc_cleanup_descriptors(atchan);
1032
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001033 ret = dma_cookie_status(chan, cookie, txstate);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001034 }
1035
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001036 last_complete = chan->completed_cookie;
1037 last_used = chan->cookie;
1038
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001039 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001040
Nicolas Ferre543aabc2011-05-06 19:56:51 +02001041 if (ret != DMA_SUCCESS)
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001042 dma_set_residue(txstate, atc_first_active(atchan)->len);
Nicolas Ferre543aabc2011-05-06 19:56:51 +02001043
Nicolas Ferre3c477482011-07-25 21:09:23 +00001044 if (atc_chan_is_paused(atchan))
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +02001045 ret = DMA_PAUSED;
1046
1047 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1048 ret, cookie, last_complete ? last_complete : 0,
Linus Walleij07934482010-03-26 16:50:49 -07001049 last_used ? last_used : 0);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001050
1051 return ret;
1052}
1053
1054/**
1055 * atc_issue_pending - try to finish work
1056 * @chan: target DMA channel
1057 */
1058static void atc_issue_pending(struct dma_chan *chan)
1059{
1060 struct at_dma_chan *atchan = to_at_dma_chan(chan);
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001061 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001062
1063 dev_vdbg(chan2dev(chan), "issue_pending\n");
1064
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001065 /* Not needed for cyclic transfers */
Nicolas Ferre3c477482011-07-25 21:09:23 +00001066 if (atc_chan_is_cyclic(atchan))
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001067 return;
1068
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001069 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001070 if (!atc_chan_is_enabled(atchan)) {
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001071 atc_advance_work(atchan);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001072 }
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001073 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001074}
1075
1076/**
1077 * atc_alloc_chan_resources - allocate resources for DMA channel
1078 * @chan: allocate descriptor resources for this channel
1079 * @client: current client requesting the channel be ready for requests
1080 *
1081 * return - the number of allocated descriptors
1082 */
1083static int atc_alloc_chan_resources(struct dma_chan *chan)
1084{
1085 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1086 struct at_dma *atdma = to_at_dma(chan->device);
1087 struct at_desc *desc;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001088 struct at_dma_slave *atslave;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001089 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001090 int i;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001091 u32 cfg;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001092 LIST_HEAD(tmp_list);
1093
1094 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1095
1096 /* ASSERT: channel is idle */
1097 if (atc_chan_is_enabled(atchan)) {
1098 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1099 return -EIO;
1100 }
1101
Nicolas Ferre808347f2009-07-22 20:04:45 +02001102 cfg = ATC_DEFAULT_CFG;
1103
1104 atslave = chan->private;
1105 if (atslave) {
1106 /*
1107 * We need controller-specific data to set up slave
1108 * transfers.
1109 */
1110 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1111
1112 /* if cfg configuration specified take it instad of default */
1113 if (atslave->cfg)
1114 cfg = atslave->cfg;
1115 }
1116
1117 /* have we already been set up?
1118 * reconfigure channel but no need to reallocate descriptors */
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001119 if (!list_empty(&atchan->free_list))
1120 return atchan->descs_allocated;
1121
1122 /* Allocate initial pool of descriptors */
1123 for (i = 0; i < init_nr_desc_per_channel; i++) {
1124 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1125 if (!desc) {
1126 dev_err(atdma->dma_common.dev,
1127 "Only %d initial descriptors\n", i);
1128 break;
1129 }
1130 list_add_tail(&desc->desc_node, &tmp_list);
1131 }
1132
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001133 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001134 atchan->descs_allocated = i;
1135 list_splice(&tmp_list, &atchan->free_list);
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001136 dma_cookie_init(chan);
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001137 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001138
1139 /* channel parameters */
Nicolas Ferre808347f2009-07-22 20:04:45 +02001140 channel_writel(atchan, CFG, cfg);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001141
1142 dev_dbg(chan2dev(chan),
1143 "alloc_chan_resources: allocated %d descriptors\n",
1144 atchan->descs_allocated);
1145
1146 return atchan->descs_allocated;
1147}
1148
1149/**
1150 * atc_free_chan_resources - free all channel resources
1151 * @chan: DMA channel
1152 */
1153static void atc_free_chan_resources(struct dma_chan *chan)
1154{
1155 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1156 struct at_dma *atdma = to_at_dma(chan->device);
1157 struct at_desc *desc, *_desc;
1158 LIST_HEAD(list);
1159
1160 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1161 atchan->descs_allocated);
1162
1163 /* ASSERT: channel is idle */
1164 BUG_ON(!list_empty(&atchan->active_list));
1165 BUG_ON(!list_empty(&atchan->queue));
1166 BUG_ON(atc_chan_is_enabled(atchan));
1167
1168 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1169 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1170 list_del(&desc->desc_node);
1171 /* free link descriptor */
1172 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1173 }
1174 list_splice_init(&atchan->free_list, &list);
1175 atchan->descs_allocated = 0;
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001176 atchan->status = 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001177
1178 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1179}
1180
1181
1182/*-- Module Management -----------------------------------------------*/
1183
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001184/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1185static struct at_dma_platform_data at91sam9rl_config = {
1186 .nr_channels = 2,
1187};
1188static struct at_dma_platform_data at91sam9g45_config = {
1189 .nr_channels = 8,
1190};
1191
Nicolas Ferrec5115952011-10-17 14:56:41 +02001192#if defined(CONFIG_OF)
1193static const struct of_device_id atmel_dma_dt_ids[] = {
1194 {
1195 .compatible = "atmel,at91sam9rl-dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001196 .data = &at91sam9rl_config,
Nicolas Ferrec5115952011-10-17 14:56:41 +02001197 }, {
1198 .compatible = "atmel,at91sam9g45-dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001199 .data = &at91sam9g45_config,
Nicolas Ferredcc81732011-11-22 11:55:53 +01001200 }, {
1201 /* sentinel */
1202 }
Nicolas Ferrec5115952011-10-17 14:56:41 +02001203};
1204
1205MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1206#endif
1207
Nicolas Ferre0ab88a02011-11-22 11:55:52 +01001208static const struct platform_device_id atdma_devtypes[] = {
Nicolas Ferre67348452011-10-17 14:56:40 +02001209 {
1210 .name = "at91sam9rl_dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001211 .driver_data = (unsigned long) &at91sam9rl_config,
Nicolas Ferre67348452011-10-17 14:56:40 +02001212 }, {
1213 .name = "at91sam9g45_dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001214 .driver_data = (unsigned long) &at91sam9g45_config,
Nicolas Ferre67348452011-10-17 14:56:40 +02001215 }, {
1216 /* sentinel */
1217 }
1218};
1219
Uwe Kleine-König7fd63cc2012-07-13 14:32:10 +02001220static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001221 struct platform_device *pdev)
Nicolas Ferrec5115952011-10-17 14:56:41 +02001222{
1223 if (pdev->dev.of_node) {
1224 const struct of_device_id *match;
1225 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1226 if (match == NULL)
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001227 return NULL;
1228 return match->data;
Nicolas Ferrec5115952011-10-17 14:56:41 +02001229 }
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001230 return (struct at_dma_platform_data *)
1231 platform_get_device_id(pdev)->driver_data;
Nicolas Ferrec5115952011-10-17 14:56:41 +02001232}
1233
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001234/**
1235 * at_dma_off - disable DMA controller
1236 * @atdma: the Atmel HDAMC device
1237 */
1238static void at_dma_off(struct at_dma *atdma)
1239{
1240 dma_writel(atdma, EN, 0);
1241
1242 /* disable all interrupts */
1243 dma_writel(atdma, EBCIDR, -1L);
1244
1245 /* confirm that all channels are disabled */
1246 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1247 cpu_relax();
1248}
1249
1250static int __init at_dma_probe(struct platform_device *pdev)
1251{
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001252 struct resource *io;
1253 struct at_dma *atdma;
1254 size_t size;
1255 int irq;
1256 int err;
1257 int i;
Uwe Kleine-König7fd63cc2012-07-13 14:32:10 +02001258 const struct at_dma_platform_data *plat_dat;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001259
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001260 /* setup platform data for each SoC */
1261 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1262 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1263 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
Nicolas Ferre67348452011-10-17 14:56:40 +02001264
1265 /* get DMA parameters from controller type */
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001266 plat_dat = at_dma_get_driver_data(pdev);
1267 if (!plat_dat)
1268 return -ENODEV;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001269
1270 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1271 if (!io)
1272 return -EINVAL;
1273
1274 irq = platform_get_irq(pdev, 0);
1275 if (irq < 0)
1276 return irq;
1277
1278 size = sizeof(struct at_dma);
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001279 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001280 atdma = kzalloc(size, GFP_KERNEL);
1281 if (!atdma)
1282 return -ENOMEM;
1283
Nicolas Ferre67348452011-10-17 14:56:40 +02001284 /* discover transaction capabilities */
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001285 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1286 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001287
H Hartley Sweeten114df7d2011-06-01 15:16:09 -07001288 size = resource_size(io);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001289 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1290 err = -EBUSY;
1291 goto err_kfree;
1292 }
1293
1294 atdma->regs = ioremap(io->start, size);
1295 if (!atdma->regs) {
1296 err = -ENOMEM;
1297 goto err_release_r;
1298 }
1299
1300 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1301 if (IS_ERR(atdma->clk)) {
1302 err = PTR_ERR(atdma->clk);
1303 goto err_clk;
1304 }
1305 clk_enable(atdma->clk);
1306
1307 /* force dma off, just in case */
1308 at_dma_off(atdma);
1309
1310 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1311 if (err)
1312 goto err_irq;
1313
1314 platform_set_drvdata(pdev, atdma);
1315
1316 /* create a pool of consistent memory blocks for hardware descriptors */
1317 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1318 &pdev->dev, sizeof(struct at_desc),
1319 4 /* word alignment */, 0);
1320 if (!atdma->dma_desc_pool) {
1321 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1322 err = -ENOMEM;
1323 goto err_pool_create;
1324 }
1325
1326 /* clear any pending interrupt */
1327 while (dma_readl(atdma, EBCISR))
1328 cpu_relax();
1329
1330 /* initialize channels related values */
1331 INIT_LIST_HEAD(&atdma->dma_common.channels);
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001332 for (i = 0; i < plat_dat->nr_channels; i++) {
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001333 struct at_dma_chan *atchan = &atdma->chan[i];
1334
1335 atchan->chan_common.device = &atdma->dma_common;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001336 dma_cookie_init(&atchan->chan_common);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001337 list_add_tail(&atchan->chan_common.device_node,
1338 &atdma->dma_common.channels);
1339
1340 atchan->ch_regs = atdma->regs + ch_regs(i);
1341 spin_lock_init(&atchan->lock);
1342 atchan->mask = 1 << i;
1343
1344 INIT_LIST_HEAD(&atchan->active_list);
1345 INIT_LIST_HEAD(&atchan->queue);
1346 INIT_LIST_HEAD(&atchan->free_list);
1347
1348 tasklet_init(&atchan->tasklet, atc_tasklet,
1349 (unsigned long)atchan);
Nikolaus Vossbda3a472012-01-17 10:28:33 +01001350 atc_enable_chan_irq(atdma, i);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001351 }
1352
1353 /* set base routines */
1354 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1355 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001356 atdma->dma_common.device_tx_status = atc_tx_status;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001357 atdma->dma_common.device_issue_pending = atc_issue_pending;
1358 atdma->dma_common.dev = &pdev->dev;
1359
1360 /* set prep routines based on capability */
1361 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1362 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1363
Nicolas Ferred7db8082011-08-05 11:43:44 +00001364 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
Nicolas Ferre808347f2009-07-22 20:04:45 +02001365 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
Nicolas Ferred7db8082011-08-05 11:43:44 +00001366 /* controller can do slave DMA: can trigger cyclic transfers */
1367 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001368 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
Linus Walleijc3635c72010-03-26 16:44:01 -07001369 atdma->dma_common.device_control = atc_control;
Nicolas Ferred7db8082011-08-05 11:43:44 +00001370 }
Nicolas Ferre808347f2009-07-22 20:04:45 +02001371
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001372 dma_writel(atdma, EN, AT_DMA_ENABLE);
1373
1374 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1375 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1376 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001377 plat_dat->nr_channels);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001378
1379 dma_async_device_register(&atdma->dma_common);
1380
1381 return 0;
1382
1383err_pool_create:
1384 platform_set_drvdata(pdev, NULL);
1385 free_irq(platform_get_irq(pdev, 0), atdma);
1386err_irq:
1387 clk_disable(atdma->clk);
1388 clk_put(atdma->clk);
1389err_clk:
1390 iounmap(atdma->regs);
1391 atdma->regs = NULL;
1392err_release_r:
1393 release_mem_region(io->start, size);
1394err_kfree:
1395 kfree(atdma);
1396 return err;
1397}
1398
1399static int __exit at_dma_remove(struct platform_device *pdev)
1400{
1401 struct at_dma *atdma = platform_get_drvdata(pdev);
1402 struct dma_chan *chan, *_chan;
1403 struct resource *io;
1404
1405 at_dma_off(atdma);
1406 dma_async_device_unregister(&atdma->dma_common);
1407
1408 dma_pool_destroy(atdma->dma_desc_pool);
1409 platform_set_drvdata(pdev, NULL);
1410 free_irq(platform_get_irq(pdev, 0), atdma);
1411
1412 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1413 device_node) {
1414 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1415
1416 /* Disable interrupts */
Nikolaus Vossbda3a472012-01-17 10:28:33 +01001417 atc_disable_chan_irq(atdma, chan->chan_id);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001418 tasklet_disable(&atchan->tasklet);
1419
1420 tasklet_kill(&atchan->tasklet);
1421 list_del(&chan->device_node);
1422 }
1423
1424 clk_disable(atdma->clk);
1425 clk_put(atdma->clk);
1426
1427 iounmap(atdma->regs);
1428 atdma->regs = NULL;
1429
1430 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
H Hartley Sweeten114df7d2011-06-01 15:16:09 -07001431 release_mem_region(io->start, resource_size(io));
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001432
1433 kfree(atdma);
1434
1435 return 0;
1436}
1437
1438static void at_dma_shutdown(struct platform_device *pdev)
1439{
1440 struct at_dma *atdma = platform_get_drvdata(pdev);
1441
1442 at_dma_off(platform_get_drvdata(pdev));
1443 clk_disable(atdma->clk);
1444}
1445
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001446static int at_dma_prepare(struct device *dev)
1447{
1448 struct platform_device *pdev = to_platform_device(dev);
1449 struct at_dma *atdma = platform_get_drvdata(pdev);
1450 struct dma_chan *chan, *_chan;
1451
1452 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1453 device_node) {
1454 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1455 /* wait for transaction completion (except in cyclic case) */
Nicolas Ferre3c477482011-07-25 21:09:23 +00001456 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001457 return -EAGAIN;
1458 }
1459 return 0;
1460}
1461
1462static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1463{
1464 struct dma_chan *chan = &atchan->chan_common;
1465
1466 /* Channel should be paused by user
1467 * do it anyway even if it is not done already */
Nicolas Ferre3c477482011-07-25 21:09:23 +00001468 if (!atc_chan_is_paused(atchan)) {
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001469 dev_warn(chan2dev(chan),
1470 "cyclic channel not paused, should be done by channel user\n");
1471 atc_control(chan, DMA_PAUSE, 0);
1472 }
1473
1474 /* now preserve additional data for cyclic operations */
1475 /* next descriptor address in the cyclic list */
1476 atchan->save_dscr = channel_readl(atchan, DSCR);
1477
1478 vdbg_dump_regs(atchan);
1479}
1480
Dan Williams33f82d12009-09-10 00:06:44 +02001481static int at_dma_suspend_noirq(struct device *dev)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001482{
Dan Williams33f82d12009-09-10 00:06:44 +02001483 struct platform_device *pdev = to_platform_device(dev);
1484 struct at_dma *atdma = platform_get_drvdata(pdev);
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001485 struct dma_chan *chan, *_chan;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001486
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001487 /* preserve data */
1488 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1489 device_node) {
1490 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1491
Nicolas Ferre3c477482011-07-25 21:09:23 +00001492 if (atc_chan_is_cyclic(atchan))
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001493 atc_suspend_cyclic(atchan);
1494 atchan->save_cfg = channel_readl(atchan, CFG);
1495 }
1496 atdma->save_imr = dma_readl(atdma, EBCIMR);
1497
1498 /* disable DMA controller */
1499 at_dma_off(atdma);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001500 clk_disable(atdma->clk);
1501 return 0;
1502}
1503
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001504static void atc_resume_cyclic(struct at_dma_chan *atchan)
1505{
1506 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1507
1508 /* restore channel status for cyclic descriptors list:
1509 * next descriptor in the cyclic list at the time of suspend */
1510 channel_writel(atchan, SADDR, 0);
1511 channel_writel(atchan, DADDR, 0);
1512 channel_writel(atchan, CTRLA, 0);
1513 channel_writel(atchan, CTRLB, 0);
1514 channel_writel(atchan, DSCR, atchan->save_dscr);
1515 dma_writel(atdma, CHER, atchan->mask);
1516
1517 /* channel pause status should be removed by channel user
1518 * We cannot take the initiative to do it here */
1519
1520 vdbg_dump_regs(atchan);
1521}
1522
Dan Williams33f82d12009-09-10 00:06:44 +02001523static int at_dma_resume_noirq(struct device *dev)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001524{
Dan Williams33f82d12009-09-10 00:06:44 +02001525 struct platform_device *pdev = to_platform_device(dev);
1526 struct at_dma *atdma = platform_get_drvdata(pdev);
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001527 struct dma_chan *chan, *_chan;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001528
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001529 /* bring back DMA controller */
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001530 clk_enable(atdma->clk);
1531 dma_writel(atdma, EN, AT_DMA_ENABLE);
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001532
1533 /* clear any pending interrupt */
1534 while (dma_readl(atdma, EBCISR))
1535 cpu_relax();
1536
1537 /* restore saved data */
1538 dma_writel(atdma, EBCIER, atdma->save_imr);
1539 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1540 device_node) {
1541 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1542
1543 channel_writel(atchan, CFG, atchan->save_cfg);
Nicolas Ferre3c477482011-07-25 21:09:23 +00001544 if (atc_chan_is_cyclic(atchan))
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001545 atc_resume_cyclic(atchan);
1546 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001547 return 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001548}
1549
Alexey Dobriyan47145212009-12-14 18:00:08 -08001550static const struct dev_pm_ops at_dma_dev_pm_ops = {
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001551 .prepare = at_dma_prepare,
Dan Williams33f82d12009-09-10 00:06:44 +02001552 .suspend_noirq = at_dma_suspend_noirq,
1553 .resume_noirq = at_dma_resume_noirq,
1554};
1555
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001556static struct platform_driver at_dma_driver = {
1557 .remove = __exit_p(at_dma_remove),
1558 .shutdown = at_dma_shutdown,
Nicolas Ferre67348452011-10-17 14:56:40 +02001559 .id_table = atdma_devtypes,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001560 .driver = {
1561 .name = "at_hdmac",
Dan Williams33f82d12009-09-10 00:06:44 +02001562 .pm = &at_dma_dev_pm_ops,
Nicolas Ferrec5115952011-10-17 14:56:41 +02001563 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001564 },
1565};
1566
1567static int __init at_dma_init(void)
1568{
1569 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1570}
Eric Xu93d0bec2011-01-12 15:39:08 +01001571subsys_initcall(at_dma_init);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001572
1573static void __exit at_dma_exit(void)
1574{
1575 platform_driver_unregister(&at_dma_driver);
1576}
1577module_exit(at_dma_exit);
1578
1579MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1580MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1581MODULE_LICENSE("GPL");
1582MODULE_ALIAS("platform:at_hdmac");