blob: ba0b5ec4e4c29820c141acd8f27b18b19fa6c794 [file] [log] [blame]
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
12 * This supports the Atmel AHB DMA Controller,
13 *
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
16 */
17
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Nicolas Ferredc78baa2009-07-03 19:24:33 +020026
27#include "at_hdmac_regs.h"
28
29/*
30 * Glossary
31 * --------
32 *
33 * at_hdmac : Name of the ATmel AHB DMA Controller
34 * at_dma_ / atdma : ATmel DMA controller entity related
35 * atc_ / atchan : ATmel DMA Channel entity related
36 */
37
38#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
39#define ATC_DEFAULT_CTRLA (0)
Nicolas Ferreae14d4b2011-04-30 16:57:49 +020040#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
41 |ATC_DIF(AT_DMA_MEM_IF))
Nicolas Ferredc78baa2009-07-03 19:24:33 +020042
43/*
44 * Initial number of descriptors to allocate for each channel. This could
45 * be increased during dma usage.
46 */
47static unsigned int init_nr_desc_per_channel = 64;
48module_param(init_nr_desc_per_channel, uint, 0644);
49MODULE_PARM_DESC(init_nr_desc_per_channel,
50 "initial descriptors per channel (default: 64)");
51
52
53/* prototypes */
54static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
55
56
57/*----------------------------------------------------------------------*/
58
59static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
60{
61 return list_first_entry(&atchan->active_list,
62 struct at_desc, desc_node);
63}
64
65static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
66{
67 return list_first_entry(&atchan->queue,
68 struct at_desc, desc_node);
69}
70
71/**
Uwe Kleine-König421f91d2010-06-11 12:17:00 +020072 * atc_alloc_descriptor - allocate and return an initialized descriptor
Nicolas Ferredc78baa2009-07-03 19:24:33 +020073 * @chan: the channel to allocate descriptors for
74 * @gfp_flags: GFP allocation flags
75 *
76 * Note: The ack-bit is positioned in the descriptor flag at creation time
77 * to make initial allocation more convenient. This bit will be cleared
78 * and control will be given to client at usage time (during
79 * preparation functions).
80 */
81static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
82 gfp_t gfp_flags)
83{
84 struct at_desc *desc = NULL;
85 struct at_dma *atdma = to_at_dma(chan->device);
86 dma_addr_t phys;
87
88 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
89 if (desc) {
90 memset(desc, 0, sizeof(struct at_desc));
Dan Williams285a3c72009-09-08 17:53:03 -070091 INIT_LIST_HEAD(&desc->tx_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +020092 dma_async_tx_descriptor_init(&desc->txd, chan);
93 /* txd.flags will be overwritten in prep functions */
94 desc->txd.flags = DMA_CTRL_ACK;
95 desc->txd.tx_submit = atc_tx_submit;
96 desc->txd.phys = phys;
97 }
98
99 return desc;
100}
101
102/**
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200103 * atc_desc_get - get an unused descriptor from free_list
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200104 * @atchan: channel we want a new descriptor for
105 */
106static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
107{
108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL;
110 unsigned int i = 0;
111 LIST_HEAD(tmp_list);
112
113 spin_lock_bh(&atchan->lock);
114 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
115 i++;
116 if (async_tx_test_ack(&desc->txd)) {
117 list_del(&desc->desc_node);
118 ret = desc;
119 break;
120 }
121 dev_dbg(chan2dev(&atchan->chan_common),
122 "desc %p not ACKed\n", desc);
123 }
124 spin_unlock_bh(&atchan->lock);
125 dev_vdbg(chan2dev(&atchan->chan_common),
126 "scanned %u descriptors on freelist\n", i);
127
128 /* no more descriptor available in initial pool: create one more */
129 if (!ret) {
130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
131 if (ret) {
132 spin_lock_bh(&atchan->lock);
133 atchan->descs_allocated++;
134 spin_unlock_bh(&atchan->lock);
135 } else {
136 dev_err(chan2dev(&atchan->chan_common),
137 "not enough descriptors available\n");
138 }
139 }
140
141 return ret;
142}
143
144/**
145 * atc_desc_put - move a descriptor, including any children, to the free list
146 * @atchan: channel we work on
147 * @desc: descriptor, at the head of a chain, to move to free list
148 */
149static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
150{
151 if (desc) {
152 struct at_desc *child;
153
154 spin_lock_bh(&atchan->lock);
Dan Williams285a3c72009-09-08 17:53:03 -0700155 list_for_each_entry(child, &desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200156 dev_vdbg(chan2dev(&atchan->chan_common),
157 "moving child desc %p to freelist\n",
158 child);
Dan Williams285a3c72009-09-08 17:53:03 -0700159 list_splice_init(&desc->tx_list, &atchan->free_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200160 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving desc %p to freelist\n", desc);
162 list_add(&desc->desc_node, &atchan->free_list);
163 spin_unlock_bh(&atchan->lock);
164 }
165}
166
167/**
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200168 * atc_desc_chain - build chain adding a descripor
169 * @first: address of first descripor of the chain
170 * @prev: address of previous descripor of the chain
171 * @desc: descriptor to queue
172 *
173 * Called from prep_* functions
174 */
175static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
176 struct at_desc *desc)
177{
178 if (!(*first)) {
179 *first = desc;
180 } else {
181 /* inform the HW lli about chaining */
182 (*prev)->lli.dscr = desc->txd.phys;
183 /* insert the link descriptor to the LD ring */
184 list_add_tail(&desc->desc_node,
185 &(*first)->tx_list);
186 }
187 *prev = desc;
188}
189
190/**
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200191 * atc_assign_cookie - compute and assign new cookie
192 * @atchan: channel we work on
193 * @desc: descriptor to asign cookie for
194 *
195 * Called with atchan->lock held and bh disabled
196 */
197static dma_cookie_t
198atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
199{
200 dma_cookie_t cookie = atchan->chan_common.cookie;
201
202 if (++cookie < 0)
203 cookie = 1;
204
205 atchan->chan_common.cookie = cookie;
206 desc->txd.cookie = cookie;
207
208 return cookie;
209}
210
211/**
212 * atc_dostart - starts the DMA engine for real
213 * @atchan: the channel we want to start
214 * @first: first descriptor in the list we want to begin with
215 *
216 * Called with atchan->lock held and bh disabled
217 */
218static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
219{
220 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
221
222 /* ASSERT: channel is idle */
223 if (atc_chan_is_enabled(atchan)) {
224 dev_err(chan2dev(&atchan->chan_common),
225 "BUG: Attempted to start non-idle channel\n");
226 dev_err(chan2dev(&atchan->chan_common),
227 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
228 channel_readl(atchan, SADDR),
229 channel_readl(atchan, DADDR),
230 channel_readl(atchan, CTRLA),
231 channel_readl(atchan, CTRLB),
232 channel_readl(atchan, DSCR));
233
234 /* The tasklet will hopefully advance the queue... */
235 return;
236 }
237
238 vdbg_dump_regs(atchan);
239
240 /* clear any pending interrupt */
241 while (dma_readl(atdma, EBCISR))
242 cpu_relax();
243
244 channel_writel(atchan, SADDR, 0);
245 channel_writel(atchan, DADDR, 0);
246 channel_writel(atchan, CTRLA, 0);
247 channel_writel(atchan, CTRLB, 0);
248 channel_writel(atchan, DSCR, first->txd.phys);
249 dma_writel(atdma, CHER, atchan->mask);
250
251 vdbg_dump_regs(atchan);
252}
253
254/**
255 * atc_chain_complete - finish work for one transaction chain
256 * @atchan: channel we work on
257 * @desc: descriptor at the head of the chain we want do complete
258 *
259 * Called with atchan->lock held and bh disabled */
260static void
261atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
262{
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200263 struct dma_async_tx_descriptor *txd = &desc->txd;
264
265 dev_vdbg(chan2dev(&atchan->chan_common),
266 "descriptor %u complete\n", txd->cookie);
267
268 atchan->completed_cookie = txd->cookie;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200269
270 /* move children to free_list */
Dan Williams285a3c72009-09-08 17:53:03 -0700271 list_splice_init(&desc->tx_list, &atchan->free_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200272 /* move myself to free_list */
273 list_move(&desc->desc_node, &atchan->free_list);
274
Nicolas Ferreebcf9b82011-01-12 15:39:06 +0100275 /* unmap dma addresses (not on slave channels) */
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700276 if (!atchan->chan_common.private) {
277 struct device *parent = chan2parent(&atchan->chan_common);
278 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
279 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
280 dma_unmap_single(parent,
281 desc->lli.daddr,
282 desc->len, DMA_FROM_DEVICE);
283 else
284 dma_unmap_page(parent,
285 desc->lli.daddr,
286 desc->len, DMA_FROM_DEVICE);
287 }
288 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
289 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
290 dma_unmap_single(parent,
291 desc->lli.saddr,
292 desc->len, DMA_TO_DEVICE);
293 else
294 dma_unmap_page(parent,
295 desc->lli.saddr,
296 desc->len, DMA_TO_DEVICE);
297 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200298 }
299
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200300 /* for cyclic transfers,
301 * no need to replay callback function while stopping */
302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
303 dma_async_tx_callback callback = txd->callback;
304 void *param = txd->callback_param;
305
306 /*
307 * The API requires that no submissions are done from a
308 * callback, so we don't need to drop the lock here
309 */
310 if (callback)
311 callback(param);
312 }
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200313
314 dma_run_dependencies(txd);
315}
316
317/**
318 * atc_complete_all - finish work for all transactions
319 * @atchan: channel to complete transactions for
320 *
321 * Eventually submit queued descriptors if any
322 *
323 * Assume channel is idle while calling this function
324 * Called with atchan->lock held and bh disabled
325 */
326static void atc_complete_all(struct at_dma_chan *atchan)
327{
328 struct at_desc *desc, *_desc;
329 LIST_HEAD(list);
330
331 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
332
333 BUG_ON(atc_chan_is_enabled(atchan));
334
335 /*
336 * Submit queued descriptors ASAP, i.e. before we go through
337 * the completed ones.
338 */
339 if (!list_empty(&atchan->queue))
340 atc_dostart(atchan, atc_first_queued(atchan));
341 /* empty active_list now it is completed */
342 list_splice_init(&atchan->active_list, &list);
343 /* empty queue list by moving descriptors (if any) to active_list */
344 list_splice_init(&atchan->queue, &atchan->active_list);
345
346 list_for_each_entry_safe(desc, _desc, &list, desc_node)
347 atc_chain_complete(atchan, desc);
348}
349
350/**
351 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
352 * @atchan: channel to be cleaned up
353 *
354 * Called with atchan->lock held and bh disabled
355 */
356static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
357{
358 struct at_desc *desc, *_desc;
359 struct at_desc *child;
360
361 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
362
363 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
364 if (!(desc->lli.ctrla & ATC_DONE))
365 /* This one is currently in progress */
366 return;
367
Dan Williams285a3c72009-09-08 17:53:03 -0700368 list_for_each_entry(child, &desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200369 if (!(child->lli.ctrla & ATC_DONE))
370 /* Currently in progress */
371 return;
372
373 /*
374 * No descriptors so far seem to be in progress, i.e.
375 * this chain must be done.
376 */
377 atc_chain_complete(atchan, desc);
378 }
379}
380
381/**
382 * atc_advance_work - at the end of a transaction, move forward
383 * @atchan: channel where the transaction ended
384 *
385 * Called with atchan->lock held and bh disabled
386 */
387static void atc_advance_work(struct at_dma_chan *atchan)
388{
389 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
390
391 if (list_empty(&atchan->active_list) ||
392 list_is_singular(&atchan->active_list)) {
393 atc_complete_all(atchan);
394 } else {
395 atc_chain_complete(atchan, atc_first_active(atchan));
396 /* advance work */
397 atc_dostart(atchan, atc_first_active(atchan));
398 }
399}
400
401
402/**
403 * atc_handle_error - handle errors reported by DMA controller
404 * @atchan: channel where error occurs
405 *
406 * Called with atchan->lock held and bh disabled
407 */
408static void atc_handle_error(struct at_dma_chan *atchan)
409{
410 struct at_desc *bad_desc;
411 struct at_desc *child;
412
413 /*
414 * The descriptor currently at the head of the active list is
415 * broked. Since we don't have any way to report errors, we'll
416 * just have to scream loudly and try to carry on.
417 */
418 bad_desc = atc_first_active(atchan);
419 list_del_init(&bad_desc->desc_node);
420
421 /* As we are stopped, take advantage to push queued descriptors
422 * in active_list */
423 list_splice_init(&atchan->queue, atchan->active_list.prev);
424
425 /* Try to restart the controller */
426 if (!list_empty(&atchan->active_list))
427 atc_dostart(atchan, atc_first_active(atchan));
428
429 /*
430 * KERN_CRITICAL may seem harsh, but since this only happens
431 * when someone submits a bad physical address in a
432 * descriptor, we should consider ourselves lucky that the
433 * controller flagged an error instead of scribbling over
434 * random memory locations.
435 */
436 dev_crit(chan2dev(&atchan->chan_common),
437 "Bad descriptor submitted for DMA!\n");
438 dev_crit(chan2dev(&atchan->chan_common),
439 " cookie: %d\n", bad_desc->txd.cookie);
440 atc_dump_lli(atchan, &bad_desc->lli);
Dan Williams285a3c72009-09-08 17:53:03 -0700441 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200442 atc_dump_lli(atchan, &child->lli);
443
444 /* Pretend the descriptor completed successfully */
445 atc_chain_complete(atchan, bad_desc);
446}
447
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200448/**
449 * atc_handle_cyclic - at the end of a period, run callback function
450 * @atchan: channel used for cyclic operations
451 *
452 * Called with atchan->lock held and bh disabled
453 */
454static void atc_handle_cyclic(struct at_dma_chan *atchan)
455{
456 struct at_desc *first = atc_first_active(atchan);
457 struct dma_async_tx_descriptor *txd = &first->txd;
458 dma_async_tx_callback callback = txd->callback;
459 void *param = txd->callback_param;
460
461 dev_vdbg(chan2dev(&atchan->chan_common),
462 "new cyclic period llp 0x%08x\n",
463 channel_readl(atchan, DSCR));
464
465 if (callback)
466 callback(param);
467}
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200468
469/*-- IRQ & Tasklet ---------------------------------------------------*/
470
471static void atc_tasklet(unsigned long data)
472{
473 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
474
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200475 spin_lock(&atchan->lock);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200476 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200477 atc_handle_error(atchan);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200478 else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
479 atc_handle_cyclic(atchan);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200480 else
481 atc_advance_work(atchan);
482
483 spin_unlock(&atchan->lock);
484}
485
486static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
487{
488 struct at_dma *atdma = (struct at_dma *)dev_id;
489 struct at_dma_chan *atchan;
490 int i;
491 u32 status, pending, imr;
492 int ret = IRQ_NONE;
493
494 do {
495 imr = dma_readl(atdma, EBCIMR);
496 status = dma_readl(atdma, EBCISR);
497 pending = status & imr;
498
499 if (!pending)
500 break;
501
502 dev_vdbg(atdma->dma_common.dev,
503 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
504 status, imr, pending);
505
506 for (i = 0; i < atdma->dma_common.chancnt; i++) {
507 atchan = &atdma->chan[i];
Nicolas Ferre9b3aa582011-04-30 16:57:45 +0200508 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200509 if (pending & AT_DMA_ERR(i)) {
510 /* Disable channel on AHB error */
511 dma_writel(atdma, CHDR, atchan->mask);
512 /* Give information to tasklet */
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200513 set_bit(ATC_IS_ERROR, &atchan->status);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200514 }
515 tasklet_schedule(&atchan->tasklet);
516 ret = IRQ_HANDLED;
517 }
518 }
519
520 } while (pending);
521
522 return ret;
523}
524
525
526/*-- DMA Engine API --------------------------------------------------*/
527
528/**
529 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
530 * @desc: descriptor at the head of the transaction chain
531 *
532 * Queue chain if DMA engine is working already
533 *
534 * Cookie increment and adding to active_list or queue must be atomic
535 */
536static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
537{
538 struct at_desc *desc = txd_to_at_desc(tx);
539 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
540 dma_cookie_t cookie;
541
542 spin_lock_bh(&atchan->lock);
543 cookie = atc_assign_cookie(atchan, desc);
544
545 if (list_empty(&atchan->active_list)) {
546 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
547 desc->txd.cookie);
548 atc_dostart(atchan, desc);
549 list_add_tail(&desc->desc_node, &atchan->active_list);
550 } else {
551 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
552 desc->txd.cookie);
553 list_add_tail(&desc->desc_node, &atchan->queue);
554 }
555
556 spin_unlock_bh(&atchan->lock);
557
558 return cookie;
559}
560
561/**
562 * atc_prep_dma_memcpy - prepare a memcpy operation
563 * @chan: the channel to prepare operation on
564 * @dest: operation virtual destination address
565 * @src: operation virtual source address
566 * @len: operation length
567 * @flags: tx descriptor status flags
568 */
569static struct dma_async_tx_descriptor *
570atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
571 size_t len, unsigned long flags)
572{
573 struct at_dma_chan *atchan = to_at_dma_chan(chan);
574 struct at_desc *desc = NULL;
575 struct at_desc *first = NULL;
576 struct at_desc *prev = NULL;
577 size_t xfer_count;
578 size_t offset;
579 unsigned int src_width;
580 unsigned int dst_width;
581 u32 ctrla;
582 u32 ctrlb;
583
584 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
585 dest, src, len, flags);
586
587 if (unlikely(!len)) {
588 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
589 return NULL;
590 }
591
592 ctrla = ATC_DEFAULT_CTRLA;
Nicolas Ferre9b3aa582011-04-30 16:57:45 +0200593 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200594 | ATC_SRC_ADDR_MODE_INCR
595 | ATC_DST_ADDR_MODE_INCR
596 | ATC_FC_MEM2MEM;
597
598 /*
599 * We can be a lot more clever here, but this should take care
600 * of the most common optimization.
601 */
602 if (!((src | dest | len) & 3)) {
603 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
604 src_width = dst_width = 2;
605 } else if (!((src | dest | len) & 1)) {
606 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
607 src_width = dst_width = 1;
608 } else {
609 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
610 src_width = dst_width = 0;
611 }
612
613 for (offset = 0; offset < len; offset += xfer_count << src_width) {
614 xfer_count = min_t(size_t, (len - offset) >> src_width,
615 ATC_BTSIZE_MAX);
616
617 desc = atc_desc_get(atchan);
618 if (!desc)
619 goto err_desc_get;
620
621 desc->lli.saddr = src + offset;
622 desc->lli.daddr = dest + offset;
623 desc->lli.ctrla = ctrla | xfer_count;
624 desc->lli.ctrlb = ctrlb;
625
626 desc->txd.cookie = 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200627
628 if (!first) {
629 first = desc;
630 } else {
631 /* inform the HW lli about chaining */
632 prev->lli.dscr = desc->txd.phys;
633 /* insert the link descriptor to the LD ring */
634 list_add_tail(&desc->desc_node,
Dan Williams285a3c72009-09-08 17:53:03 -0700635 &first->tx_list);
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200636 }
637 prev = desc;
638 }
639
640 /* First descriptor of the chain embedds additional information */
641 first->txd.cookie = -EBUSY;
642 first->len = len;
643
644 /* set end-of-link to the last link descriptor of list*/
645 set_desc_eol(desc);
646
Nicolas Ferre568f7f02011-01-12 15:39:09 +0100647 first->txd.flags = flags; /* client is in control of this ack */
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200648
649 return &first->txd;
650
651err_desc_get:
652 atc_desc_put(atchan, first);
653 return NULL;
654}
655
Nicolas Ferre808347f2009-07-22 20:04:45 +0200656
657/**
658 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
659 * @chan: DMA channel
660 * @sgl: scatterlist to transfer to/from
661 * @sg_len: number of entries in @scatterlist
662 * @direction: DMA direction
663 * @flags: tx descriptor status flags
664 */
665static struct dma_async_tx_descriptor *
666atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
667 unsigned int sg_len, enum dma_data_direction direction,
668 unsigned long flags)
669{
670 struct at_dma_chan *atchan = to_at_dma_chan(chan);
671 struct at_dma_slave *atslave = chan->private;
672 struct at_desc *first = NULL;
673 struct at_desc *prev = NULL;
674 u32 ctrla;
675 u32 ctrlb;
676 dma_addr_t reg;
677 unsigned int reg_width;
678 unsigned int mem_width;
679 unsigned int i;
680 struct scatterlist *sg;
681 size_t total_len = 0;
682
Nicolas Ferrecc52a102011-04-30 16:57:47 +0200683 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
684 sg_len,
Nicolas Ferre808347f2009-07-22 20:04:45 +0200685 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
686 flags);
687
688 if (unlikely(!atslave || !sg_len)) {
689 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
690 return NULL;
691 }
692
693 reg_width = atslave->reg_width;
694
Nicolas Ferre808347f2009-07-22 20:04:45 +0200695 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200696 ctrlb = ATC_IEN;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200697
698 switch (direction) {
699 case DMA_TO_DEVICE:
700 ctrla |= ATC_DST_WIDTH(reg_width);
701 ctrlb |= ATC_DST_ADDR_MODE_FIXED
702 | ATC_SRC_ADDR_MODE_INCR
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200703 | ATC_FC_MEM2PER
704 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200705 reg = atslave->tx_reg;
706 for_each_sg(sgl, sg, sg_len, i) {
707 struct at_desc *desc;
708 u32 len;
709 u32 mem;
710
711 desc = atc_desc_get(atchan);
712 if (!desc)
713 goto err_desc_get;
714
Nicolas Ferre0f70e8c2010-12-15 18:50:16 +0100715 mem = sg_dma_address(sg);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200716 len = sg_dma_len(sg);
717 mem_width = 2;
718 if (unlikely(mem & 3 || len & 3))
719 mem_width = 0;
720
721 desc->lli.saddr = mem;
722 desc->lli.daddr = reg;
723 desc->lli.ctrla = ctrla
724 | ATC_SRC_WIDTH(mem_width)
725 | len >> mem_width;
726 desc->lli.ctrlb = ctrlb;
727
728 if (!first) {
729 first = desc;
730 } else {
731 /* inform the HW lli about chaining */
732 prev->lli.dscr = desc->txd.phys;
733 /* insert the link descriptor to the LD ring */
734 list_add_tail(&desc->desc_node,
Dan Williams285a3c72009-09-08 17:53:03 -0700735 &first->tx_list);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200736 }
737 prev = desc;
738 total_len += len;
739 }
740 break;
741 case DMA_FROM_DEVICE:
742 ctrla |= ATC_SRC_WIDTH(reg_width);
743 ctrlb |= ATC_DST_ADDR_MODE_INCR
744 | ATC_SRC_ADDR_MODE_FIXED
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200745 | ATC_FC_PER2MEM
746 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200747
748 reg = atslave->rx_reg;
749 for_each_sg(sgl, sg, sg_len, i) {
750 struct at_desc *desc;
751 u32 len;
752 u32 mem;
753
754 desc = atc_desc_get(atchan);
755 if (!desc)
756 goto err_desc_get;
757
Nicolas Ferre0f70e8c2010-12-15 18:50:16 +0100758 mem = sg_dma_address(sg);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200759 len = sg_dma_len(sg);
760 mem_width = 2;
761 if (unlikely(mem & 3 || len & 3))
762 mem_width = 0;
763
764 desc->lli.saddr = reg;
765 desc->lli.daddr = mem;
766 desc->lli.ctrla = ctrla
767 | ATC_DST_WIDTH(mem_width)
Nicolas Ferre59a609d2010-12-13 13:48:41 +0100768 | len >> reg_width;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200769 desc->lli.ctrlb = ctrlb;
770
771 if (!first) {
772 first = desc;
773 } else {
774 /* inform the HW lli about chaining */
775 prev->lli.dscr = desc->txd.phys;
776 /* insert the link descriptor to the LD ring */
777 list_add_tail(&desc->desc_node,
Dan Williams285a3c72009-09-08 17:53:03 -0700778 &first->tx_list);
Nicolas Ferre808347f2009-07-22 20:04:45 +0200779 }
780 prev = desc;
781 total_len += len;
782 }
783 break;
784 default:
785 return NULL;
786 }
787
788 /* set end-of-link to the last link descriptor of list*/
789 set_desc_eol(prev);
790
791 /* First descriptor of the chain embedds additional information */
792 first->txd.cookie = -EBUSY;
793 first->len = total_len;
794
Nicolas Ferre568f7f02011-01-12 15:39:09 +0100795 /* first link descriptor of list is responsible of flags */
796 first->txd.flags = flags; /* client is in control of this ack */
Nicolas Ferre808347f2009-07-22 20:04:45 +0200797
798 return &first->txd;
799
800err_desc_get:
801 dev_err(chan2dev(chan), "not enough descriptors available\n");
802 atc_desc_put(atchan, first);
803 return NULL;
804}
805
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200806/**
807 * atc_dma_cyclic_check_values
808 * Check for too big/unaligned periods and unaligned DMA buffer
809 */
810static int
811atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
812 size_t period_len, enum dma_data_direction direction)
813{
814 if (period_len > (ATC_BTSIZE_MAX << reg_width))
815 goto err_out;
816 if (unlikely(period_len & ((1 << reg_width) - 1)))
817 goto err_out;
818 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
819 goto err_out;
820 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
821 goto err_out;
822
823 return 0;
824
825err_out:
826 return -EINVAL;
827}
828
829/**
830 * atc_dma_cyclic_fill_desc - Fill one period decriptor
831 */
832static int
833atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
834 unsigned int period_index, dma_addr_t buf_addr,
835 size_t period_len, enum dma_data_direction direction)
836{
837 u32 ctrla;
838 unsigned int reg_width = atslave->reg_width;
839
840 /* prepare common CRTLA value */
841 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
842 | ATC_DST_WIDTH(reg_width)
843 | ATC_SRC_WIDTH(reg_width)
844 | period_len >> reg_width;
845
846 switch (direction) {
847 case DMA_TO_DEVICE:
848 desc->lli.saddr = buf_addr + (period_len * period_index);
849 desc->lli.daddr = atslave->tx_reg;
850 desc->lli.ctrla = ctrla;
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200851 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200852 | ATC_SRC_ADDR_MODE_INCR
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200853 | ATC_FC_MEM2PER
854 | ATC_SIF(AT_DMA_MEM_IF)
855 | ATC_DIF(AT_DMA_PER_IF);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200856 break;
857
858 case DMA_FROM_DEVICE:
859 desc->lli.saddr = atslave->rx_reg;
860 desc->lli.daddr = buf_addr + (period_len * period_index);
861 desc->lli.ctrla = ctrla;
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200862 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200863 | ATC_SRC_ADDR_MODE_FIXED
Nicolas Ferreae14d4b2011-04-30 16:57:49 +0200864 | ATC_FC_PER2MEM
865 | ATC_SIF(AT_DMA_PER_IF)
866 | ATC_DIF(AT_DMA_MEM_IF);
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200867 break;
868
869 default:
870 return -EINVAL;
871 }
872
873 return 0;
874}
875
876/**
877 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
878 * @chan: the DMA channel to prepare
879 * @buf_addr: physical DMA address where the buffer starts
880 * @buf_len: total number of bytes for the entire buffer
881 * @period_len: number of bytes for each period
882 * @direction: transfer direction, to or from device
883 */
884static struct dma_async_tx_descriptor *
885atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
886 size_t period_len, enum dma_data_direction direction)
887{
888 struct at_dma_chan *atchan = to_at_dma_chan(chan);
889 struct at_dma_slave *atslave = chan->private;
890 struct at_desc *first = NULL;
891 struct at_desc *prev = NULL;
892 unsigned long was_cyclic;
893 unsigned int periods = buf_len / period_len;
894 unsigned int i;
895
896 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
897 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
898 buf_addr,
899 periods, buf_len, period_len);
900
901 if (unlikely(!atslave || !buf_len || !period_len)) {
902 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
903 return NULL;
904 }
905
906 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
907 if (was_cyclic) {
908 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
909 return NULL;
910 }
911
912 /* Check for too big/unaligned periods and unaligned DMA buffer */
913 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
914 period_len, direction))
915 goto err_out;
916
917 /* build cyclic linked list */
918 for (i = 0; i < periods; i++) {
919 struct at_desc *desc;
920
921 desc = atc_desc_get(atchan);
922 if (!desc)
923 goto err_desc_get;
924
925 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
926 period_len, direction))
927 goto err_desc_get;
928
929 atc_desc_chain(&first, &prev, desc);
930 }
931
932 /* lets make a cyclic list */
933 prev->lli.dscr = first->txd.phys;
934
935 /* First descriptor of the chain embedds additional information */
936 first->txd.cookie = -EBUSY;
937 first->len = buf_len;
938
939 return &first->txd;
940
941err_desc_get:
942 dev_err(chan2dev(chan), "not enough descriptors available\n");
943 atc_desc_put(atchan, first);
944err_out:
945 clear_bit(ATC_IS_CYCLIC, &atchan->status);
946 return NULL;
947}
948
949
Linus Walleij05827632010-05-17 16:30:42 -0700950static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
951 unsigned long arg)
Nicolas Ferre808347f2009-07-22 20:04:45 +0200952{
953 struct at_dma_chan *atchan = to_at_dma_chan(chan);
954 struct at_dma *atdma = to_at_dma(chan->device);
955 struct at_desc *desc, *_desc;
956 LIST_HEAD(list);
957
Linus Walleijc3635c72010-03-26 16:44:01 -0700958 /* Only supports DMA_TERMINATE_ALL */
959 if (cmd != DMA_TERMINATE_ALL)
960 return -ENXIO;
961
Nicolas Ferre808347f2009-07-22 20:04:45 +0200962 /*
963 * This is only called when something went wrong elsewhere, so
964 * we don't really care about the data. Just disable the
965 * channel. We still have to poll the channel enable bit due
966 * to AHB/HSB limitations.
967 */
968 spin_lock_bh(&atchan->lock);
969
970 dma_writel(atdma, CHDR, atchan->mask);
971
972 /* confirm that this channel is disabled */
973 while (dma_readl(atdma, CHSR) & atchan->mask)
974 cpu_relax();
975
976 /* active_list entries will end up before queued entries */
977 list_splice_init(&atchan->queue, &list);
978 list_splice_init(&atchan->active_list, &list);
979
Nicolas Ferre808347f2009-07-22 20:04:45 +0200980 /* Flush all pending and queued descriptors */
981 list_for_each_entry_safe(desc, _desc, &list, desc_node)
982 atc_chain_complete(atchan, desc);
Linus Walleijc3635c72010-03-26 16:44:01 -0700983
Nicolas Ferre53830cc2011-04-30 16:57:46 +0200984 /* if channel dedicated to cyclic operations, free it */
985 clear_bit(ATC_IS_CYCLIC, &atchan->status);
986
Yong Wangb0ebeb92010-08-05 10:40:08 +0800987 spin_unlock_bh(&atchan->lock);
988
Linus Walleijc3635c72010-03-26 16:44:01 -0700989 return 0;
Nicolas Ferre808347f2009-07-22 20:04:45 +0200990}
991
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200992/**
Linus Walleij07934482010-03-26 16:50:49 -0700993 * atc_tx_status - poll for transaction completion
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200994 * @chan: DMA channel
995 * @cookie: transaction identifier to check status of
Linus Walleij07934482010-03-26 16:50:49 -0700996 * @txstate: if not %NULL updated with transaction state
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200997 *
Linus Walleij07934482010-03-26 16:50:49 -0700998 * If @txstate is passed in, upon return it reflect the driver
Nicolas Ferredc78baa2009-07-03 19:24:33 +0200999 * internal state and can be used with dma_async_is_complete() to check
1000 * the status of multiple cookies without re-checking hardware state.
1001 */
1002static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -07001003atc_tx_status(struct dma_chan *chan,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001004 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -07001005 struct dma_tx_state *txstate)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001006{
1007 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1008 dma_cookie_t last_used;
1009 dma_cookie_t last_complete;
1010 enum dma_status ret;
1011
Nicolas Ferre4297a462009-12-16 16:28:03 +01001012 spin_lock_bh(&atchan->lock);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001013
1014 last_complete = atchan->completed_cookie;
1015 last_used = chan->cookie;
1016
1017 ret = dma_async_is_complete(cookie, last_complete, last_used);
1018 if (ret != DMA_SUCCESS) {
1019 atc_cleanup_descriptors(atchan);
1020
1021 last_complete = atchan->completed_cookie;
1022 last_used = chan->cookie;
1023
1024 ret = dma_async_is_complete(cookie, last_complete, last_used);
1025 }
1026
Nicolas Ferre4297a462009-12-16 16:28:03 +01001027 spin_unlock_bh(&atchan->lock);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001028
Nicolas Ferre543aabc2011-05-06 19:56:51 +02001029 if (ret != DMA_SUCCESS)
1030 dma_set_tx_state(txstate, last_complete, last_used,
1031 atc_first_active(atchan)->len);
1032 else
1033 dma_set_tx_state(txstate, last_complete, last_used, 0);
1034
Linus Walleij07934482010-03-26 16:50:49 -07001035 dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
1036 cookie, last_complete ? last_complete : 0,
1037 last_used ? last_used : 0);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001038
1039 return ret;
1040}
1041
1042/**
1043 * atc_issue_pending - try to finish work
1044 * @chan: target DMA channel
1045 */
1046static void atc_issue_pending(struct dma_chan *chan)
1047{
1048 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1049
1050 dev_vdbg(chan2dev(chan), "issue_pending\n");
1051
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001052 /* Not needed for cyclic transfers */
1053 if (test_bit(ATC_IS_CYCLIC, &atchan->status))
1054 return;
1055
Nicolas Ferredda36f92011-01-12 15:39:10 +01001056 spin_lock_bh(&atchan->lock);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001057 if (!atc_chan_is_enabled(atchan)) {
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001058 atc_advance_work(atchan);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001059 }
Nicolas Ferredda36f92011-01-12 15:39:10 +01001060 spin_unlock_bh(&atchan->lock);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001061}
1062
1063/**
1064 * atc_alloc_chan_resources - allocate resources for DMA channel
1065 * @chan: allocate descriptor resources for this channel
1066 * @client: current client requesting the channel be ready for requests
1067 *
1068 * return - the number of allocated descriptors
1069 */
1070static int atc_alloc_chan_resources(struct dma_chan *chan)
1071{
1072 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1073 struct at_dma *atdma = to_at_dma(chan->device);
1074 struct at_desc *desc;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001075 struct at_dma_slave *atslave;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001076 int i;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001077 u32 cfg;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001078 LIST_HEAD(tmp_list);
1079
1080 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1081
1082 /* ASSERT: channel is idle */
1083 if (atc_chan_is_enabled(atchan)) {
1084 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1085 return -EIO;
1086 }
1087
Nicolas Ferre808347f2009-07-22 20:04:45 +02001088 cfg = ATC_DEFAULT_CFG;
1089
1090 atslave = chan->private;
1091 if (atslave) {
1092 /*
1093 * We need controller-specific data to set up slave
1094 * transfers.
1095 */
1096 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1097
1098 /* if cfg configuration specified take it instad of default */
1099 if (atslave->cfg)
1100 cfg = atslave->cfg;
1101 }
1102
1103 /* have we already been set up?
1104 * reconfigure channel but no need to reallocate descriptors */
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001105 if (!list_empty(&atchan->free_list))
1106 return atchan->descs_allocated;
1107
1108 /* Allocate initial pool of descriptors */
1109 for (i = 0; i < init_nr_desc_per_channel; i++) {
1110 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1111 if (!desc) {
1112 dev_err(atdma->dma_common.dev,
1113 "Only %d initial descriptors\n", i);
1114 break;
1115 }
1116 list_add_tail(&desc->desc_node, &tmp_list);
1117 }
1118
1119 spin_lock_bh(&atchan->lock);
1120 atchan->descs_allocated = i;
1121 list_splice(&tmp_list, &atchan->free_list);
1122 atchan->completed_cookie = chan->cookie = 1;
1123 spin_unlock_bh(&atchan->lock);
1124
1125 /* channel parameters */
Nicolas Ferre808347f2009-07-22 20:04:45 +02001126 channel_writel(atchan, CFG, cfg);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001127
1128 dev_dbg(chan2dev(chan),
1129 "alloc_chan_resources: allocated %d descriptors\n",
1130 atchan->descs_allocated);
1131
1132 return atchan->descs_allocated;
1133}
1134
1135/**
1136 * atc_free_chan_resources - free all channel resources
1137 * @chan: DMA channel
1138 */
1139static void atc_free_chan_resources(struct dma_chan *chan)
1140{
1141 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1142 struct at_dma *atdma = to_at_dma(chan->device);
1143 struct at_desc *desc, *_desc;
1144 LIST_HEAD(list);
1145
1146 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1147 atchan->descs_allocated);
1148
1149 /* ASSERT: channel is idle */
1150 BUG_ON(!list_empty(&atchan->active_list));
1151 BUG_ON(!list_empty(&atchan->queue));
1152 BUG_ON(atc_chan_is_enabled(atchan));
1153
1154 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1155 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1156 list_del(&desc->desc_node);
1157 /* free link descriptor */
1158 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1159 }
1160 list_splice_init(&atchan->free_list, &list);
1161 atchan->descs_allocated = 0;
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001162 atchan->status = 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001163
1164 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1165}
1166
1167
1168/*-- Module Management -----------------------------------------------*/
1169
1170/**
1171 * at_dma_off - disable DMA controller
1172 * @atdma: the Atmel HDAMC device
1173 */
1174static void at_dma_off(struct at_dma *atdma)
1175{
1176 dma_writel(atdma, EN, 0);
1177
1178 /* disable all interrupts */
1179 dma_writel(atdma, EBCIDR, -1L);
1180
1181 /* confirm that all channels are disabled */
1182 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1183 cpu_relax();
1184}
1185
1186static int __init at_dma_probe(struct platform_device *pdev)
1187{
1188 struct at_dma_platform_data *pdata;
1189 struct resource *io;
1190 struct at_dma *atdma;
1191 size_t size;
1192 int irq;
1193 int err;
1194 int i;
1195
1196 /* get DMA Controller parameters from platform */
1197 pdata = pdev->dev.platform_data;
1198 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
1199 return -EINVAL;
1200
1201 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1202 if (!io)
1203 return -EINVAL;
1204
1205 irq = platform_get_irq(pdev, 0);
1206 if (irq < 0)
1207 return irq;
1208
1209 size = sizeof(struct at_dma);
1210 size += pdata->nr_channels * sizeof(struct at_dma_chan);
1211 atdma = kzalloc(size, GFP_KERNEL);
1212 if (!atdma)
1213 return -ENOMEM;
1214
1215 /* discover transaction capabilites from the platform data */
1216 atdma->dma_common.cap_mask = pdata->cap_mask;
1217 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1218
1219 size = io->end - io->start + 1;
1220 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1221 err = -EBUSY;
1222 goto err_kfree;
1223 }
1224
1225 atdma->regs = ioremap(io->start, size);
1226 if (!atdma->regs) {
1227 err = -ENOMEM;
1228 goto err_release_r;
1229 }
1230
1231 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1232 if (IS_ERR(atdma->clk)) {
1233 err = PTR_ERR(atdma->clk);
1234 goto err_clk;
1235 }
1236 clk_enable(atdma->clk);
1237
1238 /* force dma off, just in case */
1239 at_dma_off(atdma);
1240
1241 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1242 if (err)
1243 goto err_irq;
1244
1245 platform_set_drvdata(pdev, atdma);
1246
1247 /* create a pool of consistent memory blocks for hardware descriptors */
1248 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1249 &pdev->dev, sizeof(struct at_desc),
1250 4 /* word alignment */, 0);
1251 if (!atdma->dma_desc_pool) {
1252 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1253 err = -ENOMEM;
1254 goto err_pool_create;
1255 }
1256
1257 /* clear any pending interrupt */
1258 while (dma_readl(atdma, EBCISR))
1259 cpu_relax();
1260
1261 /* initialize channels related values */
1262 INIT_LIST_HEAD(&atdma->dma_common.channels);
1263 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1264 struct at_dma_chan *atchan = &atdma->chan[i];
1265
1266 atchan->chan_common.device = &atdma->dma_common;
1267 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1268 atchan->chan_common.chan_id = i;
1269 list_add_tail(&atchan->chan_common.device_node,
1270 &atdma->dma_common.channels);
1271
1272 atchan->ch_regs = atdma->regs + ch_regs(i);
1273 spin_lock_init(&atchan->lock);
1274 atchan->mask = 1 << i;
1275
1276 INIT_LIST_HEAD(&atchan->active_list);
1277 INIT_LIST_HEAD(&atchan->queue);
1278 INIT_LIST_HEAD(&atchan->free_list);
1279
1280 tasklet_init(&atchan->tasklet, atc_tasklet,
1281 (unsigned long)atchan);
1282 atc_enable_irq(atchan);
1283 }
1284
1285 /* set base routines */
1286 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1287 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001288 atdma->dma_common.device_tx_status = atc_tx_status;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001289 atdma->dma_common.device_issue_pending = atc_issue_pending;
1290 atdma->dma_common.dev = &pdev->dev;
1291
1292 /* set prep routines based on capability */
1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1295
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001296 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
Nicolas Ferre808347f2009-07-22 20:04:45 +02001297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
Nicolas Ferre53830cc2011-04-30 16:57:46 +02001298
1299 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1301
1302 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1303 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
Linus Walleijc3635c72010-03-26 16:44:01 -07001304 atdma->dma_common.device_control = atc_control;
Nicolas Ferre808347f2009-07-22 20:04:45 +02001305
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001306 dma_writel(atdma, EN, AT_DMA_ENABLE);
1307
1308 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1309 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1310 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1311 atdma->dma_common.chancnt);
1312
1313 dma_async_device_register(&atdma->dma_common);
1314
1315 return 0;
1316
1317err_pool_create:
1318 platform_set_drvdata(pdev, NULL);
1319 free_irq(platform_get_irq(pdev, 0), atdma);
1320err_irq:
1321 clk_disable(atdma->clk);
1322 clk_put(atdma->clk);
1323err_clk:
1324 iounmap(atdma->regs);
1325 atdma->regs = NULL;
1326err_release_r:
1327 release_mem_region(io->start, size);
1328err_kfree:
1329 kfree(atdma);
1330 return err;
1331}
1332
1333static int __exit at_dma_remove(struct platform_device *pdev)
1334{
1335 struct at_dma *atdma = platform_get_drvdata(pdev);
1336 struct dma_chan *chan, *_chan;
1337 struct resource *io;
1338
1339 at_dma_off(atdma);
1340 dma_async_device_unregister(&atdma->dma_common);
1341
1342 dma_pool_destroy(atdma->dma_desc_pool);
1343 platform_set_drvdata(pdev, NULL);
1344 free_irq(platform_get_irq(pdev, 0), atdma);
1345
1346 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1347 device_node) {
1348 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1349
1350 /* Disable interrupts */
1351 atc_disable_irq(atchan);
1352 tasklet_disable(&atchan->tasklet);
1353
1354 tasklet_kill(&atchan->tasklet);
1355 list_del(&chan->device_node);
1356 }
1357
1358 clk_disable(atdma->clk);
1359 clk_put(atdma->clk);
1360
1361 iounmap(atdma->regs);
1362 atdma->regs = NULL;
1363
1364 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365 release_mem_region(io->start, io->end - io->start + 1);
1366
1367 kfree(atdma);
1368
1369 return 0;
1370}
1371
1372static void at_dma_shutdown(struct platform_device *pdev)
1373{
1374 struct at_dma *atdma = platform_get_drvdata(pdev);
1375
1376 at_dma_off(platform_get_drvdata(pdev));
1377 clk_disable(atdma->clk);
1378}
1379
Dan Williams33f82d12009-09-10 00:06:44 +02001380static int at_dma_suspend_noirq(struct device *dev)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001381{
Dan Williams33f82d12009-09-10 00:06:44 +02001382 struct platform_device *pdev = to_platform_device(dev);
1383 struct at_dma *atdma = platform_get_drvdata(pdev);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001384
1385 at_dma_off(platform_get_drvdata(pdev));
1386 clk_disable(atdma->clk);
1387 return 0;
1388}
1389
Dan Williams33f82d12009-09-10 00:06:44 +02001390static int at_dma_resume_noirq(struct device *dev)
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001391{
Dan Williams33f82d12009-09-10 00:06:44 +02001392 struct platform_device *pdev = to_platform_device(dev);
1393 struct at_dma *atdma = platform_get_drvdata(pdev);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001394
1395 clk_enable(atdma->clk);
1396 dma_writel(atdma, EN, AT_DMA_ENABLE);
1397 return 0;
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001398}
1399
Alexey Dobriyan47145212009-12-14 18:00:08 -08001400static const struct dev_pm_ops at_dma_dev_pm_ops = {
Dan Williams33f82d12009-09-10 00:06:44 +02001401 .suspend_noirq = at_dma_suspend_noirq,
1402 .resume_noirq = at_dma_resume_noirq,
1403};
1404
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001405static struct platform_driver at_dma_driver = {
1406 .remove = __exit_p(at_dma_remove),
1407 .shutdown = at_dma_shutdown,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001408 .driver = {
1409 .name = "at_hdmac",
Dan Williams33f82d12009-09-10 00:06:44 +02001410 .pm = &at_dma_dev_pm_ops,
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001411 },
1412};
1413
1414static int __init at_dma_init(void)
1415{
1416 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1417}
Eric Xu93d0bec2011-01-12 15:39:08 +01001418subsys_initcall(at_dma_init);
Nicolas Ferredc78baa2009-07-03 19:24:33 +02001419
1420static void __exit at_dma_exit(void)
1421{
1422 platform_driver_unregister(&at_dma_driver);
1423}
1424module_exit(at_dma_exit);
1425
1426MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1427MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1428MODULE_LICENSE("GPL");
1429MODULE_ALIAS("platform:at_hdmac");