blob: e923cda930f98a09c90fa73b868cfeb3b619b30d [file] [log] [blame]
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
Nicolas Ferre9102d872012-06-12 10:44:55 +020012 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
Nicolas Ferredc78baa2009-07-03 19:24:33 +020015 */
16
17#include <linux/clk.h>
18#include <linux/dmaengine.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Nicolas Ferrec5115952011-10-17 14:56:41 +020025#include <linux/of.h>
26#include <linux/of_device.h>
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +000027#include <linux/of_dma.h>
Nicolas Ferredc78baa2009-07-03 19:24:33 +020028
29#include "at_hdmac_regs.h"
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030#include "dmaengine.h"
Nicolas Ferredc78baa2009-07-03 19:24:33 +020031
32/*
33 * Glossary
34 * --------
35 *
36 * at_hdmac : Name of the ATmel AHB DMA Controller
37 * at_dma_ / atdma : ATmel DMA controller entity related
38 * atc_ / atchan : ATmel DMA Channel entity related
39 */
40
41#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
Nicolas Ferreae14d4b2011-04-30 16:57:49 +020042#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
43 |ATC_DIF(AT_DMA_MEM_IF))
Nicolas Ferredc78baa2009-07-03 19:24:33 +020044
45/*
46 * Initial number of descriptors to allocate for each channel. This could
47 * be increased during dma usage.
48 */
49static unsigned int init_nr_desc_per_channel = 64;
50module_param(init_nr_desc_per_channel, uint, 0644);
51MODULE_PARM_DESC(init_nr_desc_per_channel,
52 "initial descriptors per channel (default: 64)");
53
54
55/* prototypes */
56static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
57
58
59/*----------------------------------------------------------------------*/
60
61static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
62{
63 return list_first_entry(&atchan->active_list,
64 struct at_desc, desc_node);
65}
66
67static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
68{
69 return list_first_entry(&atchan->queue,
70 struct at_desc, desc_node);
71}
72
73/**
Uwe Kleine-König421f91d2010-06-11 12:17:00 +020074 * atc_alloc_descriptor - allocate and return an initialized descriptor
Nicolas Ferredc78baa2009-07-03 19:24:33 +020075 * @chan: the channel to allocate descriptors for
76 * @gfp_flags: GFP allocation flags
77 *
78 * Note: The ack-bit is positioned in the descriptor flag at creation time
79 * to make initial allocation more convenient. This bit will be cleared
80 * and control will be given to client at usage time (during
81 * preparation functions).
82 */
83static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
84 gfp_t gfp_flags)
85{
86 struct at_desc *desc = NULL;
87 struct at_dma *atdma = to_at_dma(chan->device);
88 dma_addr_t phys;
89
90 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
91 if (desc) {
92 memset(desc, 0, sizeof(struct at_desc));
Dan Williams285a3c72009-09-08 17:53:03 -070093 INIT_LIST_HEAD(&desc->tx_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +020094 dma_async_tx_descriptor_init(&desc->txd, chan);
95 /* txd.flags will be overwritten in prep functions */
96 desc->txd.flags = DMA_CTRL_ACK;
97 desc->txd.tx_submit = atc_tx_submit;
98 desc->txd.phys = phys;
99 }
100
101 return desc;
102}
103
104/**
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200105 * atc_desc_get - get an unused descriptor from free_list
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200106 * @atchan: channel we want a new descriptor for
107 */
108static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
109{
110 struct at_desc *desc, *_desc;
111 struct at_desc *ret = NULL;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000112 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200113 unsigned int i = 0;
114 LIST_HEAD(tmp_list);
115
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000116 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200117 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
118 i++;
119 if (async_tx_test_ack(&desc->txd)) {
120 list_del(&desc->desc_node);
121 ret = desc;
122 break;
123 }
124 dev_dbg(chan2dev(&atchan->chan_common),
125 "desc %p not ACKed\n", desc);
126 }
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000127 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200128 dev_vdbg(chan2dev(&atchan->chan_common),
129 "scanned %u descriptors on freelist\n", i);
130
131 /* no more descriptor available in initial pool: create one more */
132 if (!ret) {
133 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
134 if (ret) {
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000135 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200136 atchan->descs_allocated++;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000137 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200138 } else {
139 dev_err(chan2dev(&atchan->chan_common),
140 "not enough descriptors available\n");
141 }
142 }
143
144 return ret;
145}
146
147/**
148 * atc_desc_put - move a descriptor, including any children, to the free list
149 * @atchan: channel we work on
150 * @desc: descriptor, at the head of a chain, to move to free list
151 */
152static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
153{
154 if (desc) {
155 struct at_desc *child;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000156 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200157
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000158 spin_lock_irqsave(&atchan->lock, flags);
Dan Williams285a3c72009-09-08 17:53:03 -0700159 list_for_each_entry(child, &desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200160 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving child desc %p to freelist\n",
162 child);
Dan Williams285a3c72009-09-08 17:53:03 -0700163 list_splice_init(&desc->tx_list, &atchan->free_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200164 dev_vdbg(chan2dev(&atchan->chan_common),
165 "moving desc %p to freelist\n", desc);
166 list_add(&desc->desc_node, &atchan->free_list);
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000167 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200168 }
169}
170
171/**
Masanari Iidad73111c2012-08-04 23:37:53 +0900172 * atc_desc_chain - build chain adding a descriptor
173 * @first: address of first descriptor of the chain
174 * @prev: address of previous descriptor of the chain
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200175 * @desc: descriptor to queue
176 *
177 * Called from prep_* functions
178 */
179static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
180 struct at_desc *desc)
181{
182 if (!(*first)) {
183 *first = desc;
184 } else {
185 /* inform the HW lli about chaining */
186 (*prev)->lli.dscr = desc->txd.phys;
187 /* insert the link descriptor to the LD ring */
188 list_add_tail(&desc->desc_node,
189 &(*first)->tx_list);
190 }
191 *prev = desc;
192}
193
194/**
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200195 * atc_dostart - starts the DMA engine for real
196 * @atchan: the channel we want to start
197 * @first: first descriptor in the list we want to begin with
198 *
199 * Called with atchan->lock held and bh disabled
200 */
201static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
202{
203 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
204
205 /* ASSERT: channel is idle */
206 if (atc_chan_is_enabled(atchan)) {
207 dev_err(chan2dev(&atchan->chan_common),
208 "BUG: Attempted to start non-idle channel\n");
209 dev_err(chan2dev(&atchan->chan_common),
210 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
211 channel_readl(atchan, SADDR),
212 channel_readl(atchan, DADDR),
213 channel_readl(atchan, CTRLA),
214 channel_readl(atchan, CTRLB),
215 channel_readl(atchan, DSCR));
216
217 /* The tasklet will hopefully advance the queue... */
218 return;
219 }
220
221 vdbg_dump_regs(atchan);
222
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200223 channel_writel(atchan, SADDR, 0);
224 channel_writel(atchan, DADDR, 0);
225 channel_writel(atchan, CTRLA, 0);
226 channel_writel(atchan, CTRLB, 0);
227 channel_writel(atchan, DSCR, first->txd.phys);
228 dma_writel(atdma, CHER, atchan->mask);
229
230 vdbg_dump_regs(atchan);
231}
232
233/**
234 * atc_chain_complete - finish work for one transaction chain
235 * @atchan: channel we work on
236 * @desc: descriptor at the head of the chain we want do complete
237 *
238 * Called with atchan->lock held and bh disabled */
239static void
240atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
241{
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200242 struct dma_async_tx_descriptor *txd = &desc->txd;
243
244 dev_vdbg(chan2dev(&atchan->chan_common),
245 "descriptor %u complete\n", txd->cookie);
246
Vinod Kould4116052012-05-11 11:48:21 +0530247 /* mark the descriptor as complete for non cyclic cases only */
248 if (!atc_chan_is_cyclic(atchan))
249 dma_cookie_complete(txd);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200250
251 /* move children to free_list */
Dan Williams285a3c72009-09-08 17:53:03 -0700252 list_splice_init(&desc->tx_list, &atchan->free_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200253 /* move myself to free_list */
254 list_move(&desc->desc_node, &atchan->free_list);
255
Nicolas Ferreebcf9b82011-01-12 15:39:06 +0100256 /* unmap dma addresses (not on slave channels) */
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700257 if (!atchan->chan_common.private) {
258 struct device *parent = chan2parent(&atchan->chan_common);
259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
260 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
261 dma_unmap_single(parent,
262 desc->lli.daddr,
263 desc->len, DMA_FROM_DEVICE);
264 else
265 dma_unmap_page(parent,
266 desc->lli.daddr,
267 desc->len, DMA_FROM_DEVICE);
268 }
269 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
270 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
271 dma_unmap_single(parent,
272 desc->lli.saddr,
273 desc->len, DMA_TO_DEVICE);
274 else
275 dma_unmap_page(parent,
276 desc->lli.saddr,
277 desc->len, DMA_TO_DEVICE);
278 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200279 }
280
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200281 /* for cyclic transfers,
282 * no need to replay callback function while stopping */
Nicolas Ferre3c477482011-07-25 21:09:23 +0000283 if (!atc_chan_is_cyclic(atchan)) {
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200284 dma_async_tx_callback callback = txd->callback;
285 void *param = txd->callback_param;
286
287 /*
288 * The API requires that no submissions are done from a
289 * callback, so we don't need to drop the lock here
290 */
291 if (callback)
292 callback(param);
293 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200294
295 dma_run_dependencies(txd);
296}
297
298/**
299 * atc_complete_all - finish work for all transactions
300 * @atchan: channel to complete transactions for
301 *
302 * Eventually submit queued descriptors if any
303 *
304 * Assume channel is idle while calling this function
305 * Called with atchan->lock held and bh disabled
306 */
307static void atc_complete_all(struct at_dma_chan *atchan)
308{
309 struct at_desc *desc, *_desc;
310 LIST_HEAD(list);
311
312 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
313
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200314 /*
315 * Submit queued descriptors ASAP, i.e. before we go through
316 * the completed ones.
317 */
318 if (!list_empty(&atchan->queue))
319 atc_dostart(atchan, atc_first_queued(atchan));
320 /* empty active_list now it is completed */
321 list_splice_init(&atchan->active_list, &list);
322 /* empty queue list by moving descriptors (if any) to active_list */
323 list_splice_init(&atchan->queue, &atchan->active_list);
324
325 list_for_each_entry_safe(desc, _desc, &list, desc_node)
326 atc_chain_complete(atchan, desc);
327}
328
329/**
330 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
331 * @atchan: channel to be cleaned up
332 *
333 * Called with atchan->lock held and bh disabled
334 */
335static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
336{
337 struct at_desc *desc, *_desc;
338 struct at_desc *child;
339
340 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
341
342 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
343 if (!(desc->lli.ctrla & ATC_DONE))
344 /* This one is currently in progress */
345 return;
346
Dan Williams285a3c72009-09-08 17:53:03 -0700347 list_for_each_entry(child, &desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200348 if (!(child->lli.ctrla & ATC_DONE))
349 /* Currently in progress */
350 return;
351
352 /*
353 * No descriptors so far seem to be in progress, i.e.
354 * this chain must be done.
355 */
356 atc_chain_complete(atchan, desc);
357 }
358}
359
360/**
361 * atc_advance_work - at the end of a transaction, move forward
362 * @atchan: channel where the transaction ended
363 *
364 * Called with atchan->lock held and bh disabled
365 */
366static void atc_advance_work(struct at_dma_chan *atchan)
367{
368 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
369
Ludovic Desrochesd202f052013-04-18 09:52:59 +0200370 if (atc_chan_is_enabled(atchan))
371 return;
372
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200373 if (list_empty(&atchan->active_list) ||
374 list_is_singular(&atchan->active_list)) {
375 atc_complete_all(atchan);
376 } else {
377 atc_chain_complete(atchan, atc_first_active(atchan));
378 /* advance work */
379 atc_dostart(atchan, atc_first_active(atchan));
380 }
381}
382
383
384/**
385 * atc_handle_error - handle errors reported by DMA controller
386 * @atchan: channel where error occurs
387 *
388 * Called with atchan->lock held and bh disabled
389 */
390static void atc_handle_error(struct at_dma_chan *atchan)
391{
392 struct at_desc *bad_desc;
393 struct at_desc *child;
394
395 /*
396 * The descriptor currently at the head of the active list is
397 * broked. Since we don't have any way to report errors, we'll
398 * just have to scream loudly and try to carry on.
399 */
400 bad_desc = atc_first_active(atchan);
401 list_del_init(&bad_desc->desc_node);
402
403 /* As we are stopped, take advantage to push queued descriptors
404 * in active_list */
405 list_splice_init(&atchan->queue, atchan->active_list.prev);
406
407 /* Try to restart the controller */
408 if (!list_empty(&atchan->active_list))
409 atc_dostart(atchan, atc_first_active(atchan));
410
411 /*
412 * KERN_CRITICAL may seem harsh, but since this only happens
413 * when someone submits a bad physical address in a
414 * descriptor, we should consider ourselves lucky that the
415 * controller flagged an error instead of scribbling over
416 * random memory locations.
417 */
418 dev_crit(chan2dev(&atchan->chan_common),
419 "Bad descriptor submitted for DMA!\n");
420 dev_crit(chan2dev(&atchan->chan_common),
421 " cookie: %d\n", bad_desc->txd.cookie);
422 atc_dump_lli(atchan, &bad_desc->lli);
Dan Williams285a3c72009-09-08 17:53:03 -0700423 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200424 atc_dump_lli(atchan, &child->lli);
425
426 /* Pretend the descriptor completed successfully */
427 atc_chain_complete(atchan, bad_desc);
428}
429
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200430/**
431 * atc_handle_cyclic - at the end of a period, run callback function
432 * @atchan: channel used for cyclic operations
433 *
434 * Called with atchan->lock held and bh disabled
435 */
436static void atc_handle_cyclic(struct at_dma_chan *atchan)
437{
438 struct at_desc *first = atc_first_active(atchan);
439 struct dma_async_tx_descriptor *txd = &first->txd;
440 dma_async_tx_callback callback = txd->callback;
441 void *param = txd->callback_param;
442
443 dev_vdbg(chan2dev(&atchan->chan_common),
444 "new cyclic period llp 0x%08x\n",
445 channel_readl(atchan, DSCR));
446
447 if (callback)
448 callback(param);
449}
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200450
451/*-- IRQ & Tasklet ---------------------------------------------------*/
452
453static void atc_tasklet(unsigned long data)
454{
455 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000456 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200457
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000458 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200459 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200460 atc_handle_error(atchan);
Nicolas Ferre3c477482011-07-25 21:09:23 +0000461 else if (atc_chan_is_cyclic(atchan))
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200462 atc_handle_cyclic(atchan);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200463 else
464 atc_advance_work(atchan);
465
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000466 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200467}
468
469static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
470{
471 struct at_dma *atdma = (struct at_dma *)dev_id;
472 struct at_dma_chan *atchan;
473 int i;
474 u32 status, pending, imr;
475 int ret = IRQ_NONE;
476
477 do {
478 imr = dma_readl(atdma, EBCIMR);
479 status = dma_readl(atdma, EBCISR);
480 pending = status & imr;
481
482 if (!pending)
483 break;
484
485 dev_vdbg(atdma->dma_common.dev,
486 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
487 status, imr, pending);
488
489 for (i = 0; i < atdma->dma_common.chancnt; i++) {
490 atchan = &atdma->chan[i];
Nicolas Ferre9b3aa582011-04-30 16:57:45 +0200491 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200492 if (pending & AT_DMA_ERR(i)) {
493 /* Disable channel on AHB error */
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200494 dma_writel(atdma, CHDR,
495 AT_DMA_RES(i) | atchan->mask);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200496 /* Give information to tasklet */
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200497 set_bit(ATC_IS_ERROR, &atchan->status);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200498 }
499 tasklet_schedule(&atchan->tasklet);
500 ret = IRQ_HANDLED;
501 }
502 }
503
504 } while (pending);
505
506 return ret;
507}
508
509
510/*-- DMA Engine API --------------------------------------------------*/
511
512/**
513 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
514 * @desc: descriptor at the head of the transaction chain
515 *
516 * Queue chain if DMA engine is working already
517 *
518 * Cookie increment and adding to active_list or queue must be atomic
519 */
520static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
521{
522 struct at_desc *desc = txd_to_at_desc(tx);
523 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
524 dma_cookie_t cookie;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000525 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200526
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000527 spin_lock_irqsave(&atchan->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000528 cookie = dma_cookie_assign(tx);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200529
530 if (list_empty(&atchan->active_list)) {
531 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
532 desc->txd.cookie);
533 atc_dostart(atchan, desc);
534 list_add_tail(&desc->desc_node, &atchan->active_list);
535 } else {
536 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
537 desc->txd.cookie);
538 list_add_tail(&desc->desc_node, &atchan->queue);
539 }
540
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000541 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200542
543 return cookie;
544}
545
546/**
547 * atc_prep_dma_memcpy - prepare a memcpy operation
548 * @chan: the channel to prepare operation on
549 * @dest: operation virtual destination address
550 * @src: operation virtual source address
551 * @len: operation length
552 * @flags: tx descriptor status flags
553 */
554static struct dma_async_tx_descriptor *
555atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
556 size_t len, unsigned long flags)
557{
558 struct at_dma_chan *atchan = to_at_dma_chan(chan);
559 struct at_desc *desc = NULL;
560 struct at_desc *first = NULL;
561 struct at_desc *prev = NULL;
562 size_t xfer_count;
563 size_t offset;
564 unsigned int src_width;
565 unsigned int dst_width;
566 u32 ctrla;
567 u32 ctrlb;
568
569 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
570 dest, src, len, flags);
571
572 if (unlikely(!len)) {
573 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
574 return NULL;
575 }
576
Nicolas Ferre9b3aa582011-04-30 16:57:45 +0200577 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200578 | ATC_SRC_ADDR_MODE_INCR
579 | ATC_DST_ADDR_MODE_INCR
580 | ATC_FC_MEM2MEM;
581
582 /*
583 * We can be a lot more clever here, but this should take care
584 * of the most common optimization.
585 */
586 if (!((src | dest | len) & 3)) {
Nicolas Ferreb409ebf2012-05-10 12:17:40 +0200587 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200588 src_width = dst_width = 2;
589 } else if (!((src | dest | len) & 1)) {
Nicolas Ferreb409ebf2012-05-10 12:17:40 +0200590 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200591 src_width = dst_width = 1;
592 } else {
Nicolas Ferreb409ebf2012-05-10 12:17:40 +0200593 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200594 src_width = dst_width = 0;
595 }
596
597 for (offset = 0; offset < len; offset += xfer_count << src_width) {
598 xfer_count = min_t(size_t, (len - offset) >> src_width,
599 ATC_BTSIZE_MAX);
600
601 desc = atc_desc_get(atchan);
602 if (!desc)
603 goto err_desc_get;
604
605 desc->lli.saddr = src + offset;
606 desc->lli.daddr = dest + offset;
607 desc->lli.ctrla = ctrla | xfer_count;
608 desc->lli.ctrlb = ctrlb;
609
610 desc->txd.cookie = 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200611
Nicolas Ferree257e152011-05-06 19:56:53 +0200612 atc_desc_chain(&first, &prev, desc);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200613 }
614
615 /* First descriptor of the chain embedds additional information */
616 first->txd.cookie = -EBUSY;
617 first->len = len;
618
619 /* set end-of-link to the last link descriptor of list*/
620 set_desc_eol(desc);
621
Nicolas Ferre568f7f02011-01-12 15:39:09 +0100622 first->txd.flags = flags; /* client is in control of this ack */
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200623
624 return &first->txd;
625
626err_desc_get:
627 atc_desc_put(atchan, first);
628 return NULL;
629}
630
Nicolas Ferre808347f2009-07-22 20:04:45 +0200631
632/**
633 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
634 * @chan: DMA channel
635 * @sgl: scatterlist to transfer to/from
636 * @sg_len: number of entries in @scatterlist
637 * @direction: DMA direction
638 * @flags: tx descriptor status flags
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500639 * @context: transaction context (ignored)
Nicolas Ferre808347f2009-07-22 20:04:45 +0200640 */
641static struct dma_async_tx_descriptor *
642atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530643 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500644 unsigned long flags, void *context)
Nicolas Ferre808347f2009-07-22 20:04:45 +0200645{
646 struct at_dma_chan *atchan = to_at_dma_chan(chan);
647 struct at_dma_slave *atslave = chan->private;
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100648 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200649 struct at_desc *first = NULL;
650 struct at_desc *prev = NULL;
651 u32 ctrla;
652 u32 ctrlb;
653 dma_addr_t reg;
654 unsigned int reg_width;
655 unsigned int mem_width;
656 unsigned int i;
657 struct scatterlist *sg;
658 size_t total_len = 0;
659
Nicolas Ferrecc52a102011-04-30 16:57:47 +0200660 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
661 sg_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530662 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
Nicolas Ferre808347f2009-07-22 20:04:45 +0200663 flags);
664
665 if (unlikely(!atslave || !sg_len)) {
Nicolas Ferrec618a9b2012-09-11 17:21:44 +0200666 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
Nicolas Ferre808347f2009-07-22 20:04:45 +0200667 return NULL;
668 }
669
Nicolas Ferre1dd1ea82012-05-10 12:17:41 +0200670 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
671 | ATC_DCSIZE(sconfig->dst_maxburst);
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200672 ctrlb = ATC_IEN;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200673
674 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530675 case DMA_MEM_TO_DEV:
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100676 reg_width = convert_buswidth(sconfig->dst_addr_width);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200677 ctrla |= ATC_DST_WIDTH(reg_width);
678 ctrlb |= ATC_DST_ADDR_MODE_FIXED
679 | ATC_SRC_ADDR_MODE_INCR
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200680 | ATC_FC_MEM2PER
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +0000681 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100682 reg = sconfig->dst_addr;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200683 for_each_sg(sgl, sg, sg_len, i) {
684 struct at_desc *desc;
685 u32 len;
686 u32 mem;
687
688 desc = atc_desc_get(atchan);
689 if (!desc)
690 goto err_desc_get;
691
Nicolas Ferre0f70e8c2010-12-15 18:50:16 +0100692 mem = sg_dma_address(sg);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200693 len = sg_dma_len(sg);
Nicolas Ferrec4567972012-09-11 17:21:45 +0200694 if (unlikely(!len)) {
695 dev_dbg(chan2dev(chan),
696 "prep_slave_sg: sg(%d) data length is zero\n", i);
697 goto err;
698 }
Nicolas Ferre808347f2009-07-22 20:04:45 +0200699 mem_width = 2;
700 if (unlikely(mem & 3 || len & 3))
701 mem_width = 0;
702
703 desc->lli.saddr = mem;
704 desc->lli.daddr = reg;
705 desc->lli.ctrla = ctrla
706 | ATC_SRC_WIDTH(mem_width)
707 | len >> mem_width;
708 desc->lli.ctrlb = ctrlb;
709
Nicolas Ferree257e152011-05-06 19:56:53 +0200710 atc_desc_chain(&first, &prev, desc);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200711 total_len += len;
712 }
713 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530714 case DMA_DEV_TO_MEM:
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100715 reg_width = convert_buswidth(sconfig->src_addr_width);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200716 ctrla |= ATC_SRC_WIDTH(reg_width);
717 ctrlb |= ATC_DST_ADDR_MODE_INCR
718 | ATC_SRC_ADDR_MODE_FIXED
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200719 | ATC_FC_PER2MEM
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +0000720 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200721
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100722 reg = sconfig->src_addr;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200723 for_each_sg(sgl, sg, sg_len, i) {
724 struct at_desc *desc;
725 u32 len;
726 u32 mem;
727
728 desc = atc_desc_get(atchan);
729 if (!desc)
730 goto err_desc_get;
731
Nicolas Ferre0f70e8c2010-12-15 18:50:16 +0100732 mem = sg_dma_address(sg);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200733 len = sg_dma_len(sg);
Nicolas Ferrec4567972012-09-11 17:21:45 +0200734 if (unlikely(!len)) {
735 dev_dbg(chan2dev(chan),
736 "prep_slave_sg: sg(%d) data length is zero\n", i);
737 goto err;
738 }
Nicolas Ferre808347f2009-07-22 20:04:45 +0200739 mem_width = 2;
740 if (unlikely(mem & 3 || len & 3))
741 mem_width = 0;
742
743 desc->lli.saddr = reg;
744 desc->lli.daddr = mem;
745 desc->lli.ctrla = ctrla
746 | ATC_DST_WIDTH(mem_width)
Nicolas Ferre59a609d2010-12-13 13:48:41 +0100747 | len >> reg_width;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200748 desc->lli.ctrlb = ctrlb;
749
Nicolas Ferree257e152011-05-06 19:56:53 +0200750 atc_desc_chain(&first, &prev, desc);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200751 total_len += len;
752 }
753 break;
754 default:
755 return NULL;
756 }
757
758 /* set end-of-link to the last link descriptor of list*/
759 set_desc_eol(prev);
760
761 /* First descriptor of the chain embedds additional information */
762 first->txd.cookie = -EBUSY;
763 first->len = total_len;
764
Nicolas Ferre568f7f02011-01-12 15:39:09 +0100765 /* first link descriptor of list is responsible of flags */
766 first->txd.flags = flags; /* client is in control of this ack */
Nicolas Ferre808347f2009-07-22 20:04:45 +0200767
768 return &first->txd;
769
770err_desc_get:
771 dev_err(chan2dev(chan), "not enough descriptors available\n");
Nicolas Ferrec4567972012-09-11 17:21:45 +0200772err:
Nicolas Ferre808347f2009-07-22 20:04:45 +0200773 atc_desc_put(atchan, first);
774 return NULL;
775}
776
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200777/**
778 * atc_dma_cyclic_check_values
779 * Check for too big/unaligned periods and unaligned DMA buffer
780 */
781static int
782atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
Andy Shevchenko0e7264c2013-01-10 10:52:57 +0200783 size_t period_len)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200784{
785 if (period_len > (ATC_BTSIZE_MAX << reg_width))
786 goto err_out;
787 if (unlikely(period_len & ((1 << reg_width) - 1)))
788 goto err_out;
789 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
790 goto err_out;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200791
792 return 0;
793
794err_out:
795 return -EINVAL;
796}
797
798/**
Masanari Iidad73111c2012-08-04 23:37:53 +0900799 * atc_dma_cyclic_fill_desc - Fill one period descriptor
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200800 */
801static int
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100802atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200803 unsigned int period_index, dma_addr_t buf_addr,
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100804 unsigned int reg_width, size_t period_len,
805 enum dma_transfer_direction direction)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200806{
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100807 struct at_dma_chan *atchan = to_at_dma_chan(chan);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100808 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
809 u32 ctrla;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200810
811 /* prepare common CRTLA value */
Nicolas Ferre1dd1ea82012-05-10 12:17:41 +0200812 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
813 | ATC_DCSIZE(sconfig->dst_maxburst)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200814 | ATC_DST_WIDTH(reg_width)
815 | ATC_SRC_WIDTH(reg_width)
816 | period_len >> reg_width;
817
818 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530819 case DMA_MEM_TO_DEV:
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200820 desc->lli.saddr = buf_addr + (period_len * period_index);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100821 desc->lli.daddr = sconfig->dst_addr;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200822 desc->lli.ctrla = ctrla;
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200823 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200824 | ATC_SRC_ADDR_MODE_INCR
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200825 | ATC_FC_MEM2PER
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +0000826 | ATC_SIF(atchan->mem_if)
827 | ATC_DIF(atchan->per_if);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200828 break;
829
Vinod Kouldb8196d2011-10-13 22:34:23 +0530830 case DMA_DEV_TO_MEM:
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100831 desc->lli.saddr = sconfig->src_addr;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200832 desc->lli.daddr = buf_addr + (period_len * period_index);
833 desc->lli.ctrla = ctrla;
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200834 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200835 | ATC_SRC_ADDR_MODE_FIXED
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200836 | ATC_FC_PER2MEM
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +0000837 | ATC_SIF(atchan->per_if)
838 | ATC_DIF(atchan->mem_if);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200839 break;
840
841 default:
842 return -EINVAL;
843 }
844
845 return 0;
846}
847
848/**
849 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
850 * @chan: the DMA channel to prepare
851 * @buf_addr: physical DMA address where the buffer starts
852 * @buf_len: total number of bytes for the entire buffer
853 * @period_len: number of bytes for each period
854 * @direction: transfer direction, to or from device
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +0300855 * @flags: tx descriptor status flags
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500856 * @context: transfer context (ignored)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200857 */
858static struct dma_async_tx_descriptor *
859atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500860 size_t period_len, enum dma_transfer_direction direction,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +0300861 unsigned long flags, void *context)
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200862{
863 struct at_dma_chan *atchan = to_at_dma_chan(chan);
864 struct at_dma_slave *atslave = chan->private;
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100865 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200866 struct at_desc *first = NULL;
867 struct at_desc *prev = NULL;
868 unsigned long was_cyclic;
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100869 unsigned int reg_width;
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200870 unsigned int periods = buf_len / period_len;
871 unsigned int i;
872
873 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
Vinod Kouldb8196d2011-10-13 22:34:23 +0530874 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200875 buf_addr,
876 periods, buf_len, period_len);
877
878 if (unlikely(!atslave || !buf_len || !period_len)) {
879 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
880 return NULL;
881 }
882
883 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
884 if (was_cyclic) {
885 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
886 return NULL;
887 }
888
Andy Shevchenko0e7264c2013-01-10 10:52:57 +0200889 if (unlikely(!is_slave_direction(direction)))
890 goto err_out;
891
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100892 if (sconfig->direction == DMA_MEM_TO_DEV)
893 reg_width = convert_buswidth(sconfig->dst_addr_width);
894 else
895 reg_width = convert_buswidth(sconfig->src_addr_width);
896
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200897 /* Check for too big/unaligned periods and unaligned DMA buffer */
Andy Shevchenko0e7264c2013-01-10 10:52:57 +0200898 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200899 goto err_out;
900
901 /* build cyclic linked list */
902 for (i = 0; i < periods; i++) {
903 struct at_desc *desc;
904
905 desc = atc_desc_get(atchan);
906 if (!desc)
907 goto err_desc_get;
908
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100909 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
910 reg_width, period_len, direction))
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200911 goto err_desc_get;
912
913 atc_desc_chain(&first, &prev, desc);
914 }
915
916 /* lets make a cyclic list */
917 prev->lli.dscr = first->txd.phys;
918
919 /* First descriptor of the chain embedds additional information */
920 first->txd.cookie = -EBUSY;
921 first->len = buf_len;
922
923 return &first->txd;
924
925err_desc_get:
926 dev_err(chan2dev(chan), "not enough descriptors available\n");
927 atc_desc_put(atchan, first);
928err_out:
929 clear_bit(ATC_IS_CYCLIC, &atchan->status);
930 return NULL;
931}
932
Nicolas Ferrebeeaa102012-03-14 12:41:43 +0100933static int set_runtime_config(struct dma_chan *chan,
934 struct dma_slave_config *sconfig)
935{
936 struct at_dma_chan *atchan = to_at_dma_chan(chan);
937
938 /* Check if it is chan is configured for slave transfers */
939 if (!chan->private)
940 return -EINVAL;
941
942 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
943
944 convert_burst(&atchan->dma_sconfig.src_maxburst);
945 convert_burst(&atchan->dma_sconfig.dst_maxburst);
946
947 return 0;
948}
949
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200950
Linus Walleij05827632010-05-17 16:30:42 -0700951static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
952 unsigned long arg)
Nicolas Ferre808347f2009-07-22 20:04:45 +0200953{
954 struct at_dma_chan *atchan = to_at_dma_chan(chan);
955 struct at_dma *atdma = to_at_dma(chan->device);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200956 int chan_id = atchan->chan_common.chan_id;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000957 unsigned long flags;
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200958
Nicolas Ferre808347f2009-07-22 20:04:45 +0200959 LIST_HEAD(list);
960
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200961 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
962
963 if (cmd == DMA_PAUSE) {
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000964 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200965
966 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200967 set_bit(ATC_IS_PAUSED, &atchan->status);
968
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000969 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200970 } else if (cmd == DMA_RESUME) {
Nicolas Ferre3c477482011-07-25 21:09:23 +0000971 if (!atc_chan_is_paused(atchan))
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200972 return 0;
973
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000974 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200975
976 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
977 clear_bit(ATC_IS_PAUSED, &atchan->status);
978
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000979 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200980 } else if (cmd == DMA_TERMINATE_ALL) {
981 struct at_desc *desc, *_desc;
982 /*
983 * This is only called when something went wrong elsewhere, so
984 * we don't really care about the data. Just disable the
985 * channel. We still have to poll the channel enable bit due
986 * to AHB/HSB limitations.
987 */
Nicolas Ferred8cb04b2011-07-27 12:21:28 +0000988 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +0200989
990 /* disabling channel: must also remove suspend state */
991 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
992
993 /* confirm that this channel is disabled */
994 while (dma_readl(atdma, CHSR) & atchan->mask)
995 cpu_relax();
996
997 /* active_list entries will end up before queued entries */
998 list_splice_init(&atchan->queue, &list);
999 list_splice_init(&atchan->active_list, &list);
1000
1001 /* Flush all pending and queued descriptors */
1002 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1003 atc_chain_complete(atchan, desc);
1004
1005 clear_bit(ATC_IS_PAUSED, &atchan->status);
1006 /* if channel dedicated to cyclic operations, free it */
1007 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1008
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001009 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferrebeeaa102012-03-14 12:41:43 +01001010 } else if (cmd == DMA_SLAVE_CONFIG) {
1011 return set_runtime_config(chan, (struct dma_slave_config *)arg);
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +02001012 } else {
Linus Walleijc3635c72010-03-26 16:44:01 -07001013 return -ENXIO;
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +02001014 }
Yong Wangb0ebeb92010-08-05 10:40:08 +08001015
Linus Walleijc3635c72010-03-26 16:44:01 -07001016 return 0;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001017}
1018
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001019/**
Linus Walleij07934482010-03-26 16:50:49 -07001020 * atc_tx_status - poll for transaction completion
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001021 * @chan: DMA channel
1022 * @cookie: transaction identifier to check status of
Linus Walleij07934482010-03-26 16:50:49 -07001023 * @txstate: if not %NULL updated with transaction state
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001024 *
Linus Walleij07934482010-03-26 16:50:49 -07001025 * If @txstate is passed in, upon return it reflect the driver
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001026 * internal state and can be used with dma_async_is_complete() to check
1027 * the status of multiple cookies without re-checking hardware state.
1028 */
1029static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -07001030atc_tx_status(struct dma_chan *chan,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001031 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -07001032 struct dma_tx_state *txstate)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001033{
1034 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1035 dma_cookie_t last_used;
1036 dma_cookie_t last_complete;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001037 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001038 enum dma_status ret;
1039
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001040 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001041
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001042 ret = dma_cookie_status(chan, cookie, txstate);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001043 if (ret != DMA_SUCCESS) {
1044 atc_cleanup_descriptors(atchan);
1045
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001046 ret = dma_cookie_status(chan, cookie, txstate);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001047 }
1048
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001049 last_complete = chan->completed_cookie;
1050 last_used = chan->cookie;
1051
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001052 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001053
Nicolas Ferre543aabc2011-05-06 19:56:51 +02001054 if (ret != DMA_SUCCESS)
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001055 dma_set_residue(txstate, atc_first_active(atchan)->len);
Nicolas Ferre543aabc2011-05-06 19:56:51 +02001056
Nicolas Ferre3c477482011-07-25 21:09:23 +00001057 if (atc_chan_is_paused(atchan))
Nicolas Ferre23b5e3a2011-05-06 19:56:52 +02001058 ret = DMA_PAUSED;
1059
1060 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1061 ret, cookie, last_complete ? last_complete : 0,
Linus Walleij07934482010-03-26 16:50:49 -07001062 last_used ? last_used : 0);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001063
1064 return ret;
1065}
1066
1067/**
1068 * atc_issue_pending - try to finish work
1069 * @chan: target DMA channel
1070 */
1071static void atc_issue_pending(struct dma_chan *chan)
1072{
1073 struct at_dma_chan *atchan = to_at_dma_chan(chan);
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001074 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001075
1076 dev_vdbg(chan2dev(chan), "issue_pending\n");
1077
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001078 /* Not needed for cyclic transfers */
Nicolas Ferre3c477482011-07-25 21:09:23 +00001079 if (atc_chan_is_cyclic(atchan))
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001080 return;
1081
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001082 spin_lock_irqsave(&atchan->lock, flags);
Ludovic Desrochesd202f052013-04-18 09:52:59 +02001083 atc_advance_work(atchan);
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001084 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001085}
1086
1087/**
1088 * atc_alloc_chan_resources - allocate resources for DMA channel
1089 * @chan: allocate descriptor resources for this channel
1090 * @client: current client requesting the channel be ready for requests
1091 *
1092 * return - the number of allocated descriptors
1093 */
1094static int atc_alloc_chan_resources(struct dma_chan *chan)
1095{
1096 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1097 struct at_dma *atdma = to_at_dma(chan->device);
1098 struct at_desc *desc;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001099 struct at_dma_slave *atslave;
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001100 unsigned long flags;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001101 int i;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001102 u32 cfg;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001103 LIST_HEAD(tmp_list);
1104
1105 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1106
1107 /* ASSERT: channel is idle */
1108 if (atc_chan_is_enabled(atchan)) {
1109 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1110 return -EIO;
1111 }
1112
Nicolas Ferre808347f2009-07-22 20:04:45 +02001113 cfg = ATC_DEFAULT_CFG;
1114
1115 atslave = chan->private;
1116 if (atslave) {
1117 /*
1118 * We need controller-specific data to set up slave
1119 * transfers.
1120 */
1121 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1122
1123 /* if cfg configuration specified take it instad of default */
1124 if (atslave->cfg)
1125 cfg = atslave->cfg;
1126 }
1127
1128 /* have we already been set up?
1129 * reconfigure channel but no need to reallocate descriptors */
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001130 if (!list_empty(&atchan->free_list))
1131 return atchan->descs_allocated;
1132
1133 /* Allocate initial pool of descriptors */
1134 for (i = 0; i < init_nr_desc_per_channel; i++) {
1135 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1136 if (!desc) {
1137 dev_err(atdma->dma_common.dev,
1138 "Only %d initial descriptors\n", i);
1139 break;
1140 }
1141 list_add_tail(&desc->desc_node, &tmp_list);
1142 }
1143
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001144 spin_lock_irqsave(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001145 atchan->descs_allocated = i;
1146 list_splice(&tmp_list, &atchan->free_list);
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001147 dma_cookie_init(chan);
Nicolas Ferred8cb04b2011-07-27 12:21:28 +00001148 spin_unlock_irqrestore(&atchan->lock, flags);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001149
1150 /* channel parameters */
Nicolas Ferre808347f2009-07-22 20:04:45 +02001151 channel_writel(atchan, CFG, cfg);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001152
1153 dev_dbg(chan2dev(chan),
1154 "alloc_chan_resources: allocated %d descriptors\n",
1155 atchan->descs_allocated);
1156
1157 return atchan->descs_allocated;
1158}
1159
1160/**
1161 * atc_free_chan_resources - free all channel resources
1162 * @chan: DMA channel
1163 */
1164static void atc_free_chan_resources(struct dma_chan *chan)
1165{
1166 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1167 struct at_dma *atdma = to_at_dma(chan->device);
1168 struct at_desc *desc, *_desc;
1169 LIST_HEAD(list);
1170
1171 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1172 atchan->descs_allocated);
1173
1174 /* ASSERT: channel is idle */
1175 BUG_ON(!list_empty(&atchan->active_list));
1176 BUG_ON(!list_empty(&atchan->queue));
1177 BUG_ON(atc_chan_is_enabled(atchan));
1178
1179 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1180 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1181 list_del(&desc->desc_node);
1182 /* free link descriptor */
1183 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1184 }
1185 list_splice_init(&atchan->free_list, &list);
1186 atchan->descs_allocated = 0;
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001187 atchan->status = 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001188
1189 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1190}
1191
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +00001192#ifdef CONFIG_OF
1193static bool at_dma_filter(struct dma_chan *chan, void *slave)
1194{
1195 struct at_dma_slave *atslave = slave;
1196
1197 if (atslave->dma_dev == chan->device->dev) {
1198 chan->private = atslave;
1199 return true;
1200 } else {
1201 return false;
1202 }
1203}
1204
1205static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1206 struct of_dma *of_dma)
1207{
1208 struct dma_chan *chan;
1209 struct at_dma_chan *atchan;
1210 struct at_dma_slave *atslave;
1211 dma_cap_mask_t mask;
1212 unsigned int per_id;
1213 struct platform_device *dmac_pdev;
1214
1215 if (dma_spec->args_count != 2)
1216 return NULL;
1217
1218 dmac_pdev = of_find_device_by_node(dma_spec->np);
1219
1220 dma_cap_zero(mask);
1221 dma_cap_set(DMA_SLAVE, mask);
1222
1223 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1224 if (!atslave)
1225 return NULL;
1226 /*
1227 * We can fill both SRC_PER and DST_PER, one of these fields will be
1228 * ignored depending on DMA transfer direction.
1229 */
1230 per_id = dma_spec->args[1];
1231 atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
1232 | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
1233 | ATC_SRC_PER(per_id);
1234 atslave->dma_dev = &dmac_pdev->dev;
1235
1236 chan = dma_request_channel(mask, at_dma_filter, atslave);
1237 if (!chan)
1238 return NULL;
1239
1240 atchan = to_at_dma_chan(chan);
1241 atchan->per_if = dma_spec->args[0] & 0xff;
1242 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1243
1244 return chan;
1245}
1246#else
1247static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1248 struct of_dma *of_dma)
1249{
1250 return NULL;
1251}
1252#endif
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001253
1254/*-- Module Management -----------------------------------------------*/
1255
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001256/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1257static struct at_dma_platform_data at91sam9rl_config = {
1258 .nr_channels = 2,
1259};
1260static struct at_dma_platform_data at91sam9g45_config = {
1261 .nr_channels = 8,
1262};
1263
Nicolas Ferrec5115952011-10-17 14:56:41 +02001264#if defined(CONFIG_OF)
1265static const struct of_device_id atmel_dma_dt_ids[] = {
1266 {
1267 .compatible = "atmel,at91sam9rl-dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001268 .data = &at91sam9rl_config,
Nicolas Ferrec5115952011-10-17 14:56:41 +02001269 }, {
1270 .compatible = "atmel,at91sam9g45-dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001271 .data = &at91sam9g45_config,
Nicolas Ferredcc81732011-11-22 11:55:53 +01001272 }, {
1273 /* sentinel */
1274 }
Nicolas Ferrec5115952011-10-17 14:56:41 +02001275};
1276
1277MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1278#endif
1279
Nicolas Ferre0ab88a02011-11-22 11:55:52 +01001280static const struct platform_device_id atdma_devtypes[] = {
Nicolas Ferre67348452011-10-17 14:56:40 +02001281 {
1282 .name = "at91sam9rl_dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001283 .driver_data = (unsigned long) &at91sam9rl_config,
Nicolas Ferre67348452011-10-17 14:56:40 +02001284 }, {
1285 .name = "at91sam9g45_dma",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001286 .driver_data = (unsigned long) &at91sam9g45_config,
Nicolas Ferre67348452011-10-17 14:56:40 +02001287 }, {
1288 /* sentinel */
1289 }
1290};
1291
Uwe Kleine-König7fd63cc2012-07-13 14:32:10 +02001292static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001293 struct platform_device *pdev)
Nicolas Ferrec5115952011-10-17 14:56:41 +02001294{
1295 if (pdev->dev.of_node) {
1296 const struct of_device_id *match;
1297 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1298 if (match == NULL)
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001299 return NULL;
1300 return match->data;
Nicolas Ferrec5115952011-10-17 14:56:41 +02001301 }
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001302 return (struct at_dma_platform_data *)
1303 platform_get_device_id(pdev)->driver_data;
Nicolas Ferrec5115952011-10-17 14:56:41 +02001304}
1305
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001306/**
1307 * at_dma_off - disable DMA controller
1308 * @atdma: the Atmel HDAMC device
1309 */
1310static void at_dma_off(struct at_dma *atdma)
1311{
1312 dma_writel(atdma, EN, 0);
1313
1314 /* disable all interrupts */
1315 dma_writel(atdma, EBCIDR, -1L);
1316
1317 /* confirm that all channels are disabled */
1318 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1319 cpu_relax();
1320}
1321
1322static int __init at_dma_probe(struct platform_device *pdev)
1323{
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001324 struct resource *io;
1325 struct at_dma *atdma;
1326 size_t size;
1327 int irq;
1328 int err;
1329 int i;
Uwe Kleine-König7fd63cc2012-07-13 14:32:10 +02001330 const struct at_dma_platform_data *plat_dat;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001331
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001332 /* setup platform data for each SoC */
1333 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1334 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1335 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
Nicolas Ferre67348452011-10-17 14:56:40 +02001336
1337 /* get DMA parameters from controller type */
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001338 plat_dat = at_dma_get_driver_data(pdev);
1339 if (!plat_dat)
1340 return -ENODEV;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001341
1342 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1343 if (!io)
1344 return -EINVAL;
1345
1346 irq = platform_get_irq(pdev, 0);
1347 if (irq < 0)
1348 return irq;
1349
1350 size = sizeof(struct at_dma);
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001351 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001352 atdma = kzalloc(size, GFP_KERNEL);
1353 if (!atdma)
1354 return -ENOMEM;
1355
Nicolas Ferre67348452011-10-17 14:56:40 +02001356 /* discover transaction capabilities */
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001357 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1358 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001359
H Hartley Sweeten114df7d2011-06-01 15:16:09 -07001360 size = resource_size(io);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001361 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1362 err = -EBUSY;
1363 goto err_kfree;
1364 }
1365
1366 atdma->regs = ioremap(io->start, size);
1367 if (!atdma->regs) {
1368 err = -ENOMEM;
1369 goto err_release_r;
1370 }
1371
1372 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1373 if (IS_ERR(atdma->clk)) {
1374 err = PTR_ERR(atdma->clk);
1375 goto err_clk;
1376 }
1377 clk_enable(atdma->clk);
1378
1379 /* force dma off, just in case */
1380 at_dma_off(atdma);
1381
1382 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1383 if (err)
1384 goto err_irq;
1385
1386 platform_set_drvdata(pdev, atdma);
1387
1388 /* create a pool of consistent memory blocks for hardware descriptors */
1389 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1390 &pdev->dev, sizeof(struct at_desc),
1391 4 /* word alignment */, 0);
1392 if (!atdma->dma_desc_pool) {
1393 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1394 err = -ENOMEM;
1395 goto err_pool_create;
1396 }
1397
1398 /* clear any pending interrupt */
1399 while (dma_readl(atdma, EBCISR))
1400 cpu_relax();
1401
1402 /* initialize channels related values */
1403 INIT_LIST_HEAD(&atdma->dma_common.channels);
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001404 for (i = 0; i < plat_dat->nr_channels; i++) {
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001405 struct at_dma_chan *atchan = &atdma->chan[i];
1406
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +00001407 atchan->mem_if = AT_DMA_MEM_IF;
1408 atchan->per_if = AT_DMA_PER_IF;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001409 atchan->chan_common.device = &atdma->dma_common;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001410 dma_cookie_init(&atchan->chan_common);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001411 list_add_tail(&atchan->chan_common.device_node,
1412 &atdma->dma_common.channels);
1413
1414 atchan->ch_regs = atdma->regs + ch_regs(i);
1415 spin_lock_init(&atchan->lock);
1416 atchan->mask = 1 << i;
1417
1418 INIT_LIST_HEAD(&atchan->active_list);
1419 INIT_LIST_HEAD(&atchan->queue);
1420 INIT_LIST_HEAD(&atchan->free_list);
1421
1422 tasklet_init(&atchan->tasklet, atc_tasklet,
1423 (unsigned long)atchan);
Nikolaus Vossbda3a472012-01-17 10:28:33 +01001424 atc_enable_chan_irq(atdma, i);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001425 }
1426
1427 /* set base routines */
1428 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1429 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001430 atdma->dma_common.device_tx_status = atc_tx_status;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001431 atdma->dma_common.device_issue_pending = atc_issue_pending;
1432 atdma->dma_common.dev = &pdev->dev;
1433
1434 /* set prep routines based on capability */
1435 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1436 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1437
Nicolas Ferred7db8082011-08-05 11:43:44 +00001438 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
Nicolas Ferre808347f2009-07-22 20:04:45 +02001439 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
Nicolas Ferred7db8082011-08-05 11:43:44 +00001440 /* controller can do slave DMA: can trigger cyclic transfers */
1441 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001442 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
Linus Walleijc3635c72010-03-26 16:44:01 -07001443 atdma->dma_common.device_control = atc_control;
Nicolas Ferred7db8082011-08-05 11:43:44 +00001444 }
Nicolas Ferre808347f2009-07-22 20:04:45 +02001445
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001446 dma_writel(atdma, EN, AT_DMA_ENABLE);
1447
1448 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1449 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1450 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
Nicolas Ferre02f88be2011-11-22 11:55:54 +01001451 plat_dat->nr_channels);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001452
1453 dma_async_device_register(&atdma->dma_common);
1454
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +00001455 /*
1456 * Do not return an error if the dmac node is not present in order to
1457 * not break the existing way of requesting channel with
1458 * dma_request_channel().
1459 */
1460 if (pdev->dev.of_node) {
1461 err = of_dma_controller_register(pdev->dev.of_node,
1462 at_dma_xlate, atdma);
1463 if (err) {
1464 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1465 goto err_of_dma_controller_register;
1466 }
1467 }
1468
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001469 return 0;
1470
Ludovic Desrochesbbe89c82013-04-19 09:11:18 +00001471err_of_dma_controller_register:
1472 dma_async_device_unregister(&atdma->dma_common);
1473 dma_pool_destroy(atdma->dma_desc_pool);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001474err_pool_create:
1475 platform_set_drvdata(pdev, NULL);
1476 free_irq(platform_get_irq(pdev, 0), atdma);
1477err_irq:
1478 clk_disable(atdma->clk);
1479 clk_put(atdma->clk);
1480err_clk:
1481 iounmap(atdma->regs);
1482 atdma->regs = NULL;
1483err_release_r:
1484 release_mem_region(io->start, size);
1485err_kfree:
1486 kfree(atdma);
1487 return err;
1488}
1489
Maxin B. John1d1bbd32013-02-20 02:07:04 +02001490static int at_dma_remove(struct platform_device *pdev)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001491{
1492 struct at_dma *atdma = platform_get_drvdata(pdev);
1493 struct dma_chan *chan, *_chan;
1494 struct resource *io;
1495
1496 at_dma_off(atdma);
1497 dma_async_device_unregister(&atdma->dma_common);
1498
1499 dma_pool_destroy(atdma->dma_desc_pool);
1500 platform_set_drvdata(pdev, NULL);
1501 free_irq(platform_get_irq(pdev, 0), atdma);
1502
1503 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1504 device_node) {
1505 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1506
1507 /* Disable interrupts */
Nikolaus Vossbda3a472012-01-17 10:28:33 +01001508 atc_disable_chan_irq(atdma, chan->chan_id);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001509 tasklet_disable(&atchan->tasklet);
1510
1511 tasklet_kill(&atchan->tasklet);
1512 list_del(&chan->device_node);
1513 }
1514
1515 clk_disable(atdma->clk);
1516 clk_put(atdma->clk);
1517
1518 iounmap(atdma->regs);
1519 atdma->regs = NULL;
1520
1521 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
H Hartley Sweeten114df7d2011-06-01 15:16:09 -07001522 release_mem_region(io->start, resource_size(io));
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001523
1524 kfree(atdma);
1525
1526 return 0;
1527}
1528
1529static void at_dma_shutdown(struct platform_device *pdev)
1530{
1531 struct at_dma *atdma = platform_get_drvdata(pdev);
1532
1533 at_dma_off(platform_get_drvdata(pdev));
1534 clk_disable(atdma->clk);
1535}
1536
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001537static int at_dma_prepare(struct device *dev)
1538{
1539 struct platform_device *pdev = to_platform_device(dev);
1540 struct at_dma *atdma = platform_get_drvdata(pdev);
1541 struct dma_chan *chan, *_chan;
1542
1543 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1544 device_node) {
1545 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1546 /* wait for transaction completion (except in cyclic case) */
Nicolas Ferre3c477482011-07-25 21:09:23 +00001547 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001548 return -EAGAIN;
1549 }
1550 return 0;
1551}
1552
1553static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1554{
1555 struct dma_chan *chan = &atchan->chan_common;
1556
1557 /* Channel should be paused by user
1558 * do it anyway even if it is not done already */
Nicolas Ferre3c477482011-07-25 21:09:23 +00001559 if (!atc_chan_is_paused(atchan)) {
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001560 dev_warn(chan2dev(chan),
1561 "cyclic channel not paused, should be done by channel user\n");
1562 atc_control(chan, DMA_PAUSE, 0);
1563 }
1564
1565 /* now preserve additional data for cyclic operations */
1566 /* next descriptor address in the cyclic list */
1567 atchan->save_dscr = channel_readl(atchan, DSCR);
1568
1569 vdbg_dump_regs(atchan);
1570}
1571
Dan Williams33f82d12009-09-10 00:06:44 +02001572static int at_dma_suspend_noirq(struct device *dev)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001573{
Dan Williams33f82d12009-09-10 00:06:44 +02001574 struct platform_device *pdev = to_platform_device(dev);
1575 struct at_dma *atdma = platform_get_drvdata(pdev);
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001576 struct dma_chan *chan, *_chan;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001577
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001578 /* preserve data */
1579 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1580 device_node) {
1581 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1582
Nicolas Ferre3c477482011-07-25 21:09:23 +00001583 if (atc_chan_is_cyclic(atchan))
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001584 atc_suspend_cyclic(atchan);
1585 atchan->save_cfg = channel_readl(atchan, CFG);
1586 }
1587 atdma->save_imr = dma_readl(atdma, EBCIMR);
1588
1589 /* disable DMA controller */
1590 at_dma_off(atdma);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001591 clk_disable(atdma->clk);
1592 return 0;
1593}
1594
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001595static void atc_resume_cyclic(struct at_dma_chan *atchan)
1596{
1597 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1598
1599 /* restore channel status for cyclic descriptors list:
1600 * next descriptor in the cyclic list at the time of suspend */
1601 channel_writel(atchan, SADDR, 0);
1602 channel_writel(atchan, DADDR, 0);
1603 channel_writel(atchan, CTRLA, 0);
1604 channel_writel(atchan, CTRLB, 0);
1605 channel_writel(atchan, DSCR, atchan->save_dscr);
1606 dma_writel(atdma, CHER, atchan->mask);
1607
1608 /* channel pause status should be removed by channel user
1609 * We cannot take the initiative to do it here */
1610
1611 vdbg_dump_regs(atchan);
1612}
1613
Dan Williams33f82d12009-09-10 00:06:44 +02001614static int at_dma_resume_noirq(struct device *dev)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001615{
Dan Williams33f82d12009-09-10 00:06:44 +02001616 struct platform_device *pdev = to_platform_device(dev);
1617 struct at_dma *atdma = platform_get_drvdata(pdev);
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001618 struct dma_chan *chan, *_chan;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001619
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001620 /* bring back DMA controller */
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001621 clk_enable(atdma->clk);
1622 dma_writel(atdma, EN, AT_DMA_ENABLE);
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001623
1624 /* clear any pending interrupt */
1625 while (dma_readl(atdma, EBCISR))
1626 cpu_relax();
1627
1628 /* restore saved data */
1629 dma_writel(atdma, EBCIER, atdma->save_imr);
1630 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1631 device_node) {
1632 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1633
1634 channel_writel(atchan, CFG, atchan->save_cfg);
Nicolas Ferre3c477482011-07-25 21:09:23 +00001635 if (atc_chan_is_cyclic(atchan))
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001636 atc_resume_cyclic(atchan);
1637 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001638 return 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001639}
1640
Alexey Dobriyan47145212009-12-14 18:00:08 -08001641static const struct dev_pm_ops at_dma_dev_pm_ops = {
Nicolas Ferrec0ba5942011-07-27 12:21:29 +00001642 .prepare = at_dma_prepare,
Dan Williams33f82d12009-09-10 00:06:44 +02001643 .suspend_noirq = at_dma_suspend_noirq,
1644 .resume_noirq = at_dma_resume_noirq,
1645};
1646
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001647static struct platform_driver at_dma_driver = {
Maxin B. John1d1bbd32013-02-20 02:07:04 +02001648 .remove = at_dma_remove,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001649 .shutdown = at_dma_shutdown,
Nicolas Ferre67348452011-10-17 14:56:40 +02001650 .id_table = atdma_devtypes,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001651 .driver = {
1652 .name = "at_hdmac",
Dan Williams33f82d12009-09-10 00:06:44 +02001653 .pm = &at_dma_dev_pm_ops,
Nicolas Ferrec5115952011-10-17 14:56:41 +02001654 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001655 },
1656};
1657
1658static int __init at_dma_init(void)
1659{
1660 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1661}
Eric Xu93d0bec2011-01-12 15:39:08 +01001662subsys_initcall(at_dma_init);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001663
1664static void __exit at_dma_exit(void)
1665{
1666 platform_driver_unregister(&at_dma_driver);
1667}
1668module_exit(at_dma_exit);
1669
1670MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1671MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1672MODULE_LICENSE("GPL");
1673MODULE_ALIAS("platform:at_hdmac");