blob: 98c9a847bf51c27a8671cd3947690458e0896746 [file] [log] [blame]
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dw_dmac_regs.h"
24
25/*
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
30 *
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
33 */
34
35/* NOTE: DMS+SMS is system-specific. We should get this information
36 * from the platform code somehow.
37 */
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
39 | DWC_CTLL_SRC_MSIZE(0) \
40 | DWC_CTLL_DMS(0) \
41 | DWC_CTLL_SMS(1) \
42 | DWC_CTLL_LLP_D_EN \
43 | DWC_CTLL_LLP_S_EN)
44
45/*
46 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor.
51 *
52 * This parameter is also system-specific.
53 */
54#define DWC_MAX_COUNT 2048U
55
56/*
57 * Number of descriptors to allocate for each channel. This should be
58 * made configurable somehow; preferably, the clients (at least the
59 * ones using slave transfers) should be able to give us a hint.
60 */
61#define NR_DESCS_PER_CHANNEL 64
62
63/*----------------------------------------------------------------------*/
64
65/*
66 * Because we're not relying on writeback from the controller (it may not
67 * even be configured into the core!) we don't need to use dma_pool. These
68 * descriptors -- and associated data -- are cacheable. We do need to make
69 * sure their dcache entries are written back before handing them off to
70 * the controller, though.
71 */
72
Dan Williams41d5e592009-01-06 11:38:21 -070073static struct device *chan2dev(struct dma_chan *chan)
74{
75 return &chan->dev->device;
76}
77static struct device *chan2parent(struct dma_chan *chan)
78{
79 return chan->dev->device.parent;
80}
81
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070082static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
83{
84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
85}
86
87static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
88{
89 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
90}
91
92static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93{
94 struct dw_desc *desc, *_desc;
95 struct dw_desc *ret = NULL;
96 unsigned int i = 0;
97
98 spin_lock_bh(&dwc->lock);
99 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
100 if (async_tx_test_ack(&desc->txd)) {
101 list_del(&desc->desc_node);
102 ret = desc;
103 break;
104 }
Dan Williams41d5e592009-01-06 11:38:21 -0700105 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700106 i++;
107 }
108 spin_unlock_bh(&dwc->lock);
109
Dan Williams41d5e592009-01-06 11:38:21 -0700110 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700111
112 return ret;
113}
114
115static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
116{
117 struct dw_desc *child;
118
119 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700120 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700121 child->txd.phys, sizeof(child->lli),
122 DMA_TO_DEVICE);
Dan Williams41d5e592009-01-06 11:38:21 -0700123 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700124 desc->txd.phys, sizeof(desc->lli),
125 DMA_TO_DEVICE);
126}
127
128/*
129 * Move a descriptor, including any children, to the free list.
130 * `desc' must not be on any lists.
131 */
132static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
133{
134 if (desc) {
135 struct dw_desc *child;
136
137 dwc_sync_desc_for_cpu(dwc, desc);
138
139 spin_lock_bh(&dwc->lock);
140 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700141 dev_vdbg(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700142 "moving child desc %p to freelist\n",
143 child);
144 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
Dan Williams41d5e592009-01-06 11:38:21 -0700145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700146 list_add(&desc->desc_node, &dwc->free_list);
147 spin_unlock_bh(&dwc->lock);
148 }
149}
150
151/* Called with dwc->lock held and bh disabled */
152static dma_cookie_t
153dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
154{
155 dma_cookie_t cookie = dwc->chan.cookie;
156
157 if (++cookie < 0)
158 cookie = 1;
159
160 dwc->chan.cookie = cookie;
161 desc->txd.cookie = cookie;
162
163 return cookie;
164}
165
166/*----------------------------------------------------------------------*/
167
168/* Called with dwc->lock held and bh disabled */
169static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
170{
171 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
172
173 /* ASSERT: channel is idle */
174 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700175 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700176 "BUG: Attempted to start non-idle channel\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700177 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700178 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
179 channel_readl(dwc, SAR),
180 channel_readl(dwc, DAR),
181 channel_readl(dwc, LLP),
182 channel_readl(dwc, CTL_HI),
183 channel_readl(dwc, CTL_LO));
184
185 /* The tasklet will hopefully advance the queue... */
186 return;
187 }
188
189 channel_writel(dwc, LLP, first->txd.phys);
190 channel_writel(dwc, CTL_LO,
191 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
192 channel_writel(dwc, CTL_HI, 0);
193 channel_set_bit(dw, CH_EN, dwc->mask);
194}
195
196/*----------------------------------------------------------------------*/
197
198static void
199dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
200{
201 dma_async_tx_callback callback;
202 void *param;
203 struct dma_async_tx_descriptor *txd = &desc->txd;
204
Dan Williams41d5e592009-01-06 11:38:21 -0700205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700206
207 dwc->completed = txd->cookie;
208 callback = txd->callback;
209 param = txd->callback_param;
210
211 dwc_sync_desc_for_cpu(dwc, desc);
212 list_splice_init(&txd->tx_list, &dwc->free_list);
213 list_move(&desc->desc_node, &dwc->free_list);
214
215 /*
216 * We use dma_unmap_page() regardless of how the buffers were
217 * mapped before they were submitted...
218 */
219 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
Dan Williams41d5e592009-01-06 11:38:21 -0700220 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
221 desc->len, DMA_FROM_DEVICE);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700222 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
Dan Williams41d5e592009-01-06 11:38:21 -0700223 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
224 desc->len, DMA_TO_DEVICE);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700225
226 /*
227 * The API requires that no submissions are done from a
228 * callback, so we don't need to drop the lock here
229 */
230 if (callback)
231 callback(param);
232}
233
234static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
235{
236 struct dw_desc *desc, *_desc;
237 LIST_HEAD(list);
238
239 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700240 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700241 "BUG: XFER bit set, but channel not idle!\n");
242
243 /* Try to continue after resetting the channel... */
244 channel_clear_bit(dw, CH_EN, dwc->mask);
245 while (dma_readl(dw, CH_EN) & dwc->mask)
246 cpu_relax();
247 }
248
249 /*
250 * Submit queued descriptors ASAP, i.e. before we go through
251 * the completed ones.
252 */
253 if (!list_empty(&dwc->queue))
254 dwc_dostart(dwc, dwc_first_queued(dwc));
255 list_splice_init(&dwc->active_list, &list);
256 list_splice_init(&dwc->queue, &dwc->active_list);
257
258 list_for_each_entry_safe(desc, _desc, &list, desc_node)
259 dwc_descriptor_complete(dwc, desc);
260}
261
262static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
263{
264 dma_addr_t llp;
265 struct dw_desc *desc, *_desc;
266 struct dw_desc *child;
267 u32 status_xfer;
268
269 /*
270 * Clear block interrupt flag before scanning so that we don't
271 * miss any, and read LLP before RAW_XFER to ensure it is
272 * valid if we decide to scan the list.
273 */
274 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
275 llp = channel_readl(dwc, LLP);
276 status_xfer = dma_readl(dw, RAW.XFER);
277
278 if (status_xfer & dwc->mask) {
279 /* Everything we've submitted is done */
280 dma_writel(dw, CLEAR.XFER, dwc->mask);
281 dwc_complete_all(dw, dwc);
282 return;
283 }
284
Dan Williams41d5e592009-01-06 11:38:21 -0700285 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700286
287 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
288 if (desc->lli.llp == llp)
289 /* This one is currently in progress */
290 return;
291
292 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
293 if (child->lli.llp == llp)
294 /* Currently in progress */
295 return;
296
297 /*
298 * No descriptors so far seem to be in progress, i.e.
299 * this one must be done.
300 */
301 dwc_descriptor_complete(dwc, desc);
302 }
303
Dan Williams41d5e592009-01-06 11:38:21 -0700304 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700305 "BUG: All descriptors done, but channel not idle!\n");
306
307 /* Try to continue after resetting the channel... */
308 channel_clear_bit(dw, CH_EN, dwc->mask);
309 while (dma_readl(dw, CH_EN) & dwc->mask)
310 cpu_relax();
311
312 if (!list_empty(&dwc->queue)) {
313 dwc_dostart(dwc, dwc_first_queued(dwc));
314 list_splice_init(&dwc->queue, &dwc->active_list);
315 }
316}
317
318static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
319{
Dan Williams41d5e592009-01-06 11:38:21 -0700320 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700321 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
322 lli->sar, lli->dar, lli->llp,
323 lli->ctlhi, lli->ctllo);
324}
325
326static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
327{
328 struct dw_desc *bad_desc;
329 struct dw_desc *child;
330
331 dwc_scan_descriptors(dw, dwc);
332
333 /*
334 * The descriptor currently at the head of the active list is
335 * borked. Since we don't have any way to report errors, we'll
336 * just have to scream loudly and try to carry on.
337 */
338 bad_desc = dwc_first_active(dwc);
339 list_del_init(&bad_desc->desc_node);
340 list_splice_init(&dwc->queue, dwc->active_list.prev);
341
342 /* Clear the error flag and try to restart the controller */
343 dma_writel(dw, CLEAR.ERROR, dwc->mask);
344 if (!list_empty(&dwc->active_list))
345 dwc_dostart(dwc, dwc_first_active(dwc));
346
347 /*
348 * KERN_CRITICAL may seem harsh, but since this only happens
349 * when someone submits a bad physical address in a
350 * descriptor, we should consider ourselves lucky that the
351 * controller flagged an error instead of scribbling over
352 * random memory locations.
353 */
Dan Williams41d5e592009-01-06 11:38:21 -0700354 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700355 "Bad descriptor submitted for DMA!\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700356 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700357 " cookie: %d\n", bad_desc->txd.cookie);
358 dwc_dump_lli(dwc, &bad_desc->lli);
359 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
360 dwc_dump_lli(dwc, &child->lli);
361
362 /* Pretend the descriptor completed successfully */
363 dwc_descriptor_complete(dwc, bad_desc);
364}
365
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200366/* --------------------- Cyclic DMA API extensions -------------------- */
367
368inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
369{
370 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
371 return channel_readl(dwc, SAR);
372}
373EXPORT_SYMBOL(dw_dma_get_src_addr);
374
375inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
376{
377 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
378 return channel_readl(dwc, DAR);
379}
380EXPORT_SYMBOL(dw_dma_get_dst_addr);
381
382/* called with dwc->lock held and all DMAC interrupts disabled */
383static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
384 u32 status_block, u32 status_err, u32 status_xfer)
385{
386 if (status_block & dwc->mask) {
387 void (*callback)(void *param);
388 void *callback_param;
389
390 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
391 channel_readl(dwc, LLP));
392 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
393
394 callback = dwc->cdesc->period_callback;
395 callback_param = dwc->cdesc->period_callback_param;
396 if (callback) {
397 spin_unlock(&dwc->lock);
398 callback(callback_param);
399 spin_lock(&dwc->lock);
400 }
401 }
402
403 /*
404 * Error and transfer complete are highly unlikely, and will most
405 * likely be due to a configuration error by the user.
406 */
407 if (unlikely(status_err & dwc->mask) ||
408 unlikely(status_xfer & dwc->mask)) {
409 int i;
410
411 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
412 "interrupt, stopping DMA transfer\n",
413 status_xfer ? "xfer" : "error");
414 dev_err(chan2dev(&dwc->chan),
415 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
416 channel_readl(dwc, SAR),
417 channel_readl(dwc, DAR),
418 channel_readl(dwc, LLP),
419 channel_readl(dwc, CTL_HI),
420 channel_readl(dwc, CTL_LO));
421
422 channel_clear_bit(dw, CH_EN, dwc->mask);
423 while (dma_readl(dw, CH_EN) & dwc->mask)
424 cpu_relax();
425
426 /* make sure DMA does not restart by loading a new list */
427 channel_writel(dwc, LLP, 0);
428 channel_writel(dwc, CTL_LO, 0);
429 channel_writel(dwc, CTL_HI, 0);
430
431 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
432 dma_writel(dw, CLEAR.ERROR, dwc->mask);
433 dma_writel(dw, CLEAR.XFER, dwc->mask);
434
435 for (i = 0; i < dwc->cdesc->periods; i++)
436 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
437 }
438}
439
440/* ------------------------------------------------------------------------- */
441
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700442static void dw_dma_tasklet(unsigned long data)
443{
444 struct dw_dma *dw = (struct dw_dma *)data;
445 struct dw_dma_chan *dwc;
446 u32 status_block;
447 u32 status_xfer;
448 u32 status_err;
449 int i;
450
451 status_block = dma_readl(dw, RAW.BLOCK);
Haavard Skinnemoen7fe7b2f2008-10-03 15:23:46 -0700452 status_xfer = dma_readl(dw, RAW.XFER);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700453 status_err = dma_readl(dw, RAW.ERROR);
454
455 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
456 status_block, status_err);
457
458 for (i = 0; i < dw->dma.chancnt; i++) {
459 dwc = &dw->chan[i];
460 spin_lock(&dwc->lock);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200461 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
462 dwc_handle_cyclic(dw, dwc, status_block, status_err,
463 status_xfer);
464 else if (status_err & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700465 dwc_handle_error(dw, dwc);
466 else if ((status_block | status_xfer) & (1 << i))
467 dwc_scan_descriptors(dw, dwc);
468 spin_unlock(&dwc->lock);
469 }
470
471 /*
472 * Re-enable interrupts. Block Complete interrupts are only
473 * enabled if the INT_EN bit in the descriptor is set. This
474 * will trigger a scan before the whole list is done.
475 */
476 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
477 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
478 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
479}
480
481static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
482{
483 struct dw_dma *dw = dev_id;
484 u32 status;
485
486 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
487 dma_readl(dw, STATUS_INT));
488
489 /*
490 * Just disable the interrupts. We'll turn them back on in the
491 * softirq handler.
492 */
493 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
494 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
495 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
496
497 status = dma_readl(dw, STATUS_INT);
498 if (status) {
499 dev_err(dw->dma.dev,
500 "BUG: Unexpected interrupts pending: 0x%x\n",
501 status);
502
503 /* Try to recover */
504 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
505 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
506 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
507 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
508 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
509 }
510
511 tasklet_schedule(&dw->tasklet);
512
513 return IRQ_HANDLED;
514}
515
516/*----------------------------------------------------------------------*/
517
518static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
519{
520 struct dw_desc *desc = txd_to_dw_desc(tx);
521 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
522 dma_cookie_t cookie;
523
524 spin_lock_bh(&dwc->lock);
525 cookie = dwc_assign_cookie(dwc, desc);
526
527 /*
528 * REVISIT: We should attempt to chain as many descriptors as
529 * possible, perhaps even appending to those already submitted
530 * for DMA. But this is hard to do in a race-free manner.
531 */
532 if (list_empty(&dwc->active_list)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700533 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700534 desc->txd.cookie);
535 dwc_dostart(dwc, desc);
536 list_add_tail(&desc->desc_node, &dwc->active_list);
537 } else {
Dan Williams41d5e592009-01-06 11:38:21 -0700538 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700539 desc->txd.cookie);
540
541 list_add_tail(&desc->desc_node, &dwc->queue);
542 }
543
544 spin_unlock_bh(&dwc->lock);
545
546 return cookie;
547}
548
549static struct dma_async_tx_descriptor *
550dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
551 size_t len, unsigned long flags)
552{
553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
554 struct dw_desc *desc;
555 struct dw_desc *first;
556 struct dw_desc *prev;
557 size_t xfer_count;
558 size_t offset;
559 unsigned int src_width;
560 unsigned int dst_width;
561 u32 ctllo;
562
Dan Williams41d5e592009-01-06 11:38:21 -0700563 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700564 dest, src, len, flags);
565
566 if (unlikely(!len)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700567 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700568 return NULL;
569 }
570
571 /*
572 * We can be a lot more clever here, but this should take care
573 * of the most common optimization.
574 */
575 if (!((src | dest | len) & 3))
576 src_width = dst_width = 2;
577 else if (!((src | dest | len) & 1))
578 src_width = dst_width = 1;
579 else
580 src_width = dst_width = 0;
581
582 ctllo = DWC_DEFAULT_CTLLO
583 | DWC_CTLL_DST_WIDTH(dst_width)
584 | DWC_CTLL_SRC_WIDTH(src_width)
585 | DWC_CTLL_DST_INC
586 | DWC_CTLL_SRC_INC
587 | DWC_CTLL_FC_M2M;
588 prev = first = NULL;
589
590 for (offset = 0; offset < len; offset += xfer_count << src_width) {
591 xfer_count = min_t(size_t, (len - offset) >> src_width,
592 DWC_MAX_COUNT);
593
594 desc = dwc_desc_get(dwc);
595 if (!desc)
596 goto err_desc_get;
597
598 desc->lli.sar = src + offset;
599 desc->lli.dar = dest + offset;
600 desc->lli.ctllo = ctllo;
601 desc->lli.ctlhi = xfer_count;
602
603 if (!first) {
604 first = desc;
605 } else {
606 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700607 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700608 prev->txd.phys, sizeof(prev->lli),
609 DMA_TO_DEVICE);
610 list_add_tail(&desc->desc_node,
611 &first->txd.tx_list);
612 }
613 prev = desc;
614 }
615
616
617 if (flags & DMA_PREP_INTERRUPT)
618 /* Trigger interrupt after last block */
619 prev->lli.ctllo |= DWC_CTLL_INT_EN;
620
621 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700622 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700623 prev->txd.phys, sizeof(prev->lli),
624 DMA_TO_DEVICE);
625
626 first->txd.flags = flags;
627 first->len = len;
628
629 return &first->txd;
630
631err_desc_get:
632 dwc_desc_put(dwc, first);
633 return NULL;
634}
635
636static struct dma_async_tx_descriptor *
637dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
638 unsigned int sg_len, enum dma_data_direction direction,
639 unsigned long flags)
640{
641 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Dan Williams287d8592009-02-18 14:48:26 -0800642 struct dw_dma_slave *dws = chan->private;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700643 struct dw_desc *prev;
644 struct dw_desc *first;
645 u32 ctllo;
646 dma_addr_t reg;
647 unsigned int reg_width;
648 unsigned int mem_width;
649 unsigned int i;
650 struct scatterlist *sg;
651 size_t total_len = 0;
652
Dan Williams41d5e592009-01-06 11:38:21 -0700653 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700654
655 if (unlikely(!dws || !sg_len))
656 return NULL;
657
Dan Williams74465b42009-01-06 11:38:16 -0700658 reg_width = dws->reg_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700659 prev = first = NULL;
660
Dan Williams41d5e592009-01-06 11:38:21 -0700661 sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700662
663 switch (direction) {
664 case DMA_TO_DEVICE:
665 ctllo = (DWC_DEFAULT_CTLLO
666 | DWC_CTLL_DST_WIDTH(reg_width)
667 | DWC_CTLL_DST_FIX
668 | DWC_CTLL_SRC_INC
669 | DWC_CTLL_FC_M2P);
Dan Williams74465b42009-01-06 11:38:16 -0700670 reg = dws->tx_reg;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700671 for_each_sg(sgl, sg, sg_len, i) {
672 struct dw_desc *desc;
673 u32 len;
674 u32 mem;
675
676 desc = dwc_desc_get(dwc);
677 if (!desc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700678 dev_err(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700679 "not enough descriptors available\n");
680 goto err_desc_get;
681 }
682
683 mem = sg_phys(sg);
684 len = sg_dma_len(sg);
685 mem_width = 2;
686 if (unlikely(mem & 3 || len & 3))
687 mem_width = 0;
688
689 desc->lli.sar = mem;
690 desc->lli.dar = reg;
691 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
692 desc->lli.ctlhi = len >> mem_width;
693
694 if (!first) {
695 first = desc;
696 } else {
697 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700698 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700699 prev->txd.phys,
700 sizeof(prev->lli),
701 DMA_TO_DEVICE);
702 list_add_tail(&desc->desc_node,
703 &first->txd.tx_list);
704 }
705 prev = desc;
706 total_len += len;
707 }
708 break;
709 case DMA_FROM_DEVICE:
710 ctllo = (DWC_DEFAULT_CTLLO
711 | DWC_CTLL_SRC_WIDTH(reg_width)
712 | DWC_CTLL_DST_INC
713 | DWC_CTLL_SRC_FIX
714 | DWC_CTLL_FC_P2M);
715
Dan Williams74465b42009-01-06 11:38:16 -0700716 reg = dws->rx_reg;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700717 for_each_sg(sgl, sg, sg_len, i) {
718 struct dw_desc *desc;
719 u32 len;
720 u32 mem;
721
722 desc = dwc_desc_get(dwc);
723 if (!desc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700724 dev_err(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700725 "not enough descriptors available\n");
726 goto err_desc_get;
727 }
728
729 mem = sg_phys(sg);
730 len = sg_dma_len(sg);
731 mem_width = 2;
732 if (unlikely(mem & 3 || len & 3))
733 mem_width = 0;
734
735 desc->lli.sar = reg;
736 desc->lli.dar = mem;
737 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
738 desc->lli.ctlhi = len >> reg_width;
739
740 if (!first) {
741 first = desc;
742 } else {
743 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700744 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700745 prev->txd.phys,
746 sizeof(prev->lli),
747 DMA_TO_DEVICE);
748 list_add_tail(&desc->desc_node,
749 &first->txd.tx_list);
750 }
751 prev = desc;
752 total_len += len;
753 }
754 break;
755 default:
756 return NULL;
757 }
758
759 if (flags & DMA_PREP_INTERRUPT)
760 /* Trigger interrupt after last block */
761 prev->lli.ctllo |= DWC_CTLL_INT_EN;
762
763 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700764 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700765 prev->txd.phys, sizeof(prev->lli),
766 DMA_TO_DEVICE);
767
768 first->len = total_len;
769
770 return &first->txd;
771
772err_desc_get:
773 dwc_desc_put(dwc, first);
774 return NULL;
775}
776
777static void dwc_terminate_all(struct dma_chan *chan)
778{
779 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
780 struct dw_dma *dw = to_dw_dma(chan->device);
781 struct dw_desc *desc, *_desc;
782 LIST_HEAD(list);
783
784 /*
785 * This is only called when something went wrong elsewhere, so
786 * we don't really care about the data. Just disable the
787 * channel. We still have to poll the channel enable bit due
788 * to AHB/HSB limitations.
789 */
790 spin_lock_bh(&dwc->lock);
791
792 channel_clear_bit(dw, CH_EN, dwc->mask);
793
794 while (dma_readl(dw, CH_EN) & dwc->mask)
795 cpu_relax();
796
797 /* active_list entries will end up before queued entries */
798 list_splice_init(&dwc->queue, &list);
799 list_splice_init(&dwc->active_list, &list);
800
801 spin_unlock_bh(&dwc->lock);
802
803 /* Flush all pending and queued descriptors */
804 list_for_each_entry_safe(desc, _desc, &list, desc_node)
805 dwc_descriptor_complete(dwc, desc);
806}
807
808static enum dma_status
809dwc_is_tx_complete(struct dma_chan *chan,
810 dma_cookie_t cookie,
811 dma_cookie_t *done, dma_cookie_t *used)
812{
813 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
814 dma_cookie_t last_used;
815 dma_cookie_t last_complete;
816 int ret;
817
818 last_complete = dwc->completed;
819 last_used = chan->cookie;
820
821 ret = dma_async_is_complete(cookie, last_complete, last_used);
822 if (ret != DMA_SUCCESS) {
823 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
824
825 last_complete = dwc->completed;
826 last_used = chan->cookie;
827
828 ret = dma_async_is_complete(cookie, last_complete, last_used);
829 }
830
831 if (done)
832 *done = last_complete;
833 if (used)
834 *used = last_used;
835
836 return ret;
837}
838
839static void dwc_issue_pending(struct dma_chan *chan)
840{
841 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
842
843 spin_lock_bh(&dwc->lock);
844 if (!list_empty(&dwc->queue))
845 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
846 spin_unlock_bh(&dwc->lock);
847}
848
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700849static int dwc_alloc_chan_resources(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700850{
851 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
852 struct dw_dma *dw = to_dw_dma(chan->device);
853 struct dw_desc *desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700854 struct dw_dma_slave *dws;
855 int i;
856 u32 cfghi;
857 u32 cfglo;
858
Dan Williams41d5e592009-01-06 11:38:21 -0700859 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700860
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700861 /* ASSERT: channel is idle */
862 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700863 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700864 return -EIO;
865 }
866
867 dwc->completed = chan->cookie = 1;
868
869 cfghi = DWC_CFGH_FIFO_MODE;
870 cfglo = 0;
871
Dan Williams287d8592009-02-18 14:48:26 -0800872 dws = chan->private;
Dan Williams74465b42009-01-06 11:38:16 -0700873 if (dws) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700874 /*
875 * We need controller-specific data to set up slave
876 * transfers.
877 */
Dan Williams74465b42009-01-06 11:38:16 -0700878 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700879
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700880 cfghi = dws->cfg_hi;
881 cfglo = dws->cfg_lo;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700882 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700883 channel_writel(dwc, CFG_LO, cfglo);
884 channel_writel(dwc, CFG_HI, cfghi);
885
886 /*
887 * NOTE: some controllers may have additional features that we
888 * need to initialize here, like "scatter-gather" (which
889 * doesn't mean what you think it means), and status writeback.
890 */
891
892 spin_lock_bh(&dwc->lock);
893 i = dwc->descs_allocated;
894 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
895 spin_unlock_bh(&dwc->lock);
896
897 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
898 if (!desc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700899 dev_info(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700900 "only allocated %d descriptors\n", i);
901 spin_lock_bh(&dwc->lock);
902 break;
903 }
904
905 dma_async_tx_descriptor_init(&desc->txd, chan);
906 desc->txd.tx_submit = dwc_tx_submit;
907 desc->txd.flags = DMA_CTRL_ACK;
Dan Williams41d5e592009-01-06 11:38:21 -0700908 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700909 sizeof(desc->lli), DMA_TO_DEVICE);
910 dwc_desc_put(dwc, desc);
911
912 spin_lock_bh(&dwc->lock);
913 i = ++dwc->descs_allocated;
914 }
915
916 /* Enable interrupts */
917 channel_set_bit(dw, MASK.XFER, dwc->mask);
918 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
919 channel_set_bit(dw, MASK.ERROR, dwc->mask);
920
921 spin_unlock_bh(&dwc->lock);
922
Dan Williams41d5e592009-01-06 11:38:21 -0700923 dev_dbg(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700924 "alloc_chan_resources allocated %d descriptors\n", i);
925
926 return i;
927}
928
929static void dwc_free_chan_resources(struct dma_chan *chan)
930{
931 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
932 struct dw_dma *dw = to_dw_dma(chan->device);
933 struct dw_desc *desc, *_desc;
934 LIST_HEAD(list);
935
Dan Williams41d5e592009-01-06 11:38:21 -0700936 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700937 dwc->descs_allocated);
938
939 /* ASSERT: channel is idle */
940 BUG_ON(!list_empty(&dwc->active_list));
941 BUG_ON(!list_empty(&dwc->queue));
942 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
943
944 spin_lock_bh(&dwc->lock);
945 list_splice_init(&dwc->free_list, &list);
946 dwc->descs_allocated = 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700947
948 /* Disable interrupts */
949 channel_clear_bit(dw, MASK.XFER, dwc->mask);
950 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
951 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
952
953 spin_unlock_bh(&dwc->lock);
954
955 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
Dan Williams41d5e592009-01-06 11:38:21 -0700956 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
957 dma_unmap_single(chan2parent(chan), desc->txd.phys,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700958 sizeof(desc->lli), DMA_TO_DEVICE);
959 kfree(desc);
960 }
961
Dan Williams41d5e592009-01-06 11:38:21 -0700962 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700963}
964
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200965/* --------------------- Cyclic DMA API extensions -------------------- */
966
967/**
968 * dw_dma_cyclic_start - start the cyclic DMA transfer
969 * @chan: the DMA channel to start
970 *
971 * Must be called with soft interrupts disabled. Returns zero on success or
972 * -errno on failure.
973 */
974int dw_dma_cyclic_start(struct dma_chan *chan)
975{
976 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
977 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
978
979 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
980 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
981 return -ENODEV;
982 }
983
984 spin_lock(&dwc->lock);
985
986 /* assert channel is idle */
987 if (dma_readl(dw, CH_EN) & dwc->mask) {
988 dev_err(chan2dev(&dwc->chan),
989 "BUG: Attempted to start non-idle channel\n");
990 dev_err(chan2dev(&dwc->chan),
991 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
992 channel_readl(dwc, SAR),
993 channel_readl(dwc, DAR),
994 channel_readl(dwc, LLP),
995 channel_readl(dwc, CTL_HI),
996 channel_readl(dwc, CTL_LO));
997 spin_unlock(&dwc->lock);
998 return -EBUSY;
999 }
1000
1001 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1002 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1003 dma_writel(dw, CLEAR.XFER, dwc->mask);
1004
1005 /* setup DMAC channel registers */
1006 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1007 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1008 channel_writel(dwc, CTL_HI, 0);
1009
1010 channel_set_bit(dw, CH_EN, dwc->mask);
1011
1012 spin_unlock(&dwc->lock);
1013
1014 return 0;
1015}
1016EXPORT_SYMBOL(dw_dma_cyclic_start);
1017
1018/**
1019 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1020 * @chan: the DMA channel to stop
1021 *
1022 * Must be called with soft interrupts disabled.
1023 */
1024void dw_dma_cyclic_stop(struct dma_chan *chan)
1025{
1026 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1027 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1028
1029 spin_lock(&dwc->lock);
1030
1031 channel_clear_bit(dw, CH_EN, dwc->mask);
1032 while (dma_readl(dw, CH_EN) & dwc->mask)
1033 cpu_relax();
1034
1035 spin_unlock(&dwc->lock);
1036}
1037EXPORT_SYMBOL(dw_dma_cyclic_stop);
1038
1039/**
1040 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1041 * @chan: the DMA channel to prepare
1042 * @buf_addr: physical DMA address where the buffer starts
1043 * @buf_len: total number of bytes for the entire buffer
1044 * @period_len: number of bytes for each period
1045 * @direction: transfer direction, to or from device
1046 *
1047 * Must be called before trying to start the transfer. Returns a valid struct
1048 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1049 */
1050struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1051 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1052 enum dma_data_direction direction)
1053{
1054 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1055 struct dw_cyclic_desc *cdesc;
1056 struct dw_cyclic_desc *retval = NULL;
1057 struct dw_desc *desc;
1058 struct dw_desc *last = NULL;
1059 struct dw_dma_slave *dws = chan->private;
1060 unsigned long was_cyclic;
1061 unsigned int reg_width;
1062 unsigned int periods;
1063 unsigned int i;
1064
1065 spin_lock_bh(&dwc->lock);
1066 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1067 spin_unlock_bh(&dwc->lock);
1068 dev_dbg(chan2dev(&dwc->chan),
1069 "queue and/or active list are not empty\n");
1070 return ERR_PTR(-EBUSY);
1071 }
1072
1073 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1074 spin_unlock_bh(&dwc->lock);
1075 if (was_cyclic) {
1076 dev_dbg(chan2dev(&dwc->chan),
1077 "channel already prepared for cyclic DMA\n");
1078 return ERR_PTR(-EBUSY);
1079 }
1080
1081 retval = ERR_PTR(-EINVAL);
1082 reg_width = dws->reg_width;
1083 periods = buf_len / period_len;
1084
1085 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1086 if (period_len > (DWC_MAX_COUNT << reg_width))
1087 goto out_err;
1088 if (unlikely(period_len & ((1 << reg_width) - 1)))
1089 goto out_err;
1090 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1091 goto out_err;
1092 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
1093 goto out_err;
1094
1095 retval = ERR_PTR(-ENOMEM);
1096
1097 if (periods > NR_DESCS_PER_CHANNEL)
1098 goto out_err;
1099
1100 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1101 if (!cdesc)
1102 goto out_err;
1103
1104 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1105 if (!cdesc->desc)
1106 goto out_err_alloc;
1107
1108 for (i = 0; i < periods; i++) {
1109 desc = dwc_desc_get(dwc);
1110 if (!desc)
1111 goto out_err_desc_get;
1112
1113 switch (direction) {
1114 case DMA_TO_DEVICE:
1115 desc->lli.dar = dws->tx_reg;
1116 desc->lli.sar = buf_addr + (period_len * i);
1117 desc->lli.ctllo = (DWC_DEFAULT_CTLLO
1118 | DWC_CTLL_DST_WIDTH(reg_width)
1119 | DWC_CTLL_SRC_WIDTH(reg_width)
1120 | DWC_CTLL_DST_FIX
1121 | DWC_CTLL_SRC_INC
1122 | DWC_CTLL_FC_M2P
1123 | DWC_CTLL_INT_EN);
1124 break;
1125 case DMA_FROM_DEVICE:
1126 desc->lli.dar = buf_addr + (period_len * i);
1127 desc->lli.sar = dws->rx_reg;
1128 desc->lli.ctllo = (DWC_DEFAULT_CTLLO
1129 | DWC_CTLL_SRC_WIDTH(reg_width)
1130 | DWC_CTLL_DST_WIDTH(reg_width)
1131 | DWC_CTLL_DST_INC
1132 | DWC_CTLL_SRC_FIX
1133 | DWC_CTLL_FC_P2M
1134 | DWC_CTLL_INT_EN);
1135 break;
1136 default:
1137 break;
1138 }
1139
1140 desc->lli.ctlhi = (period_len >> reg_width);
1141 cdesc->desc[i] = desc;
1142
1143 if (last) {
1144 last->lli.llp = desc->txd.phys;
1145 dma_sync_single_for_device(chan2parent(chan),
1146 last->txd.phys, sizeof(last->lli),
1147 DMA_TO_DEVICE);
1148 }
1149
1150 last = desc;
1151 }
1152
1153 /* lets make a cyclic list */
1154 last->lli.llp = cdesc->desc[0]->txd.phys;
1155 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1156 sizeof(last->lli), DMA_TO_DEVICE);
1157
1158 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1159 "period %zu periods %d\n", buf_addr, buf_len,
1160 period_len, periods);
1161
1162 cdesc->periods = periods;
1163 dwc->cdesc = cdesc;
1164
1165 return cdesc;
1166
1167out_err_desc_get:
1168 while (i--)
1169 dwc_desc_put(dwc, cdesc->desc[i]);
1170out_err_alloc:
1171 kfree(cdesc);
1172out_err:
1173 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1174 return (struct dw_cyclic_desc *)retval;
1175}
1176EXPORT_SYMBOL(dw_dma_cyclic_prep);
1177
1178/**
1179 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1180 * @chan: the DMA channel to free
1181 */
1182void dw_dma_cyclic_free(struct dma_chan *chan)
1183{
1184 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1185 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1186 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1187 int i;
1188
1189 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1190
1191 if (!cdesc)
1192 return;
1193
1194 spin_lock_bh(&dwc->lock);
1195
1196 channel_clear_bit(dw, CH_EN, dwc->mask);
1197 while (dma_readl(dw, CH_EN) & dwc->mask)
1198 cpu_relax();
1199
1200 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1201 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1202 dma_writel(dw, CLEAR.XFER, dwc->mask);
1203
1204 spin_unlock_bh(&dwc->lock);
1205
1206 for (i = 0; i < cdesc->periods; i++)
1207 dwc_desc_put(dwc, cdesc->desc[i]);
1208
1209 kfree(cdesc->desc);
1210 kfree(cdesc);
1211
1212 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1213}
1214EXPORT_SYMBOL(dw_dma_cyclic_free);
1215
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001216/*----------------------------------------------------------------------*/
1217
1218static void dw_dma_off(struct dw_dma *dw)
1219{
1220 dma_writel(dw, CFG, 0);
1221
1222 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1223 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1224 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1225 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1226 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1227
1228 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1229 cpu_relax();
1230}
1231
1232static int __init dw_probe(struct platform_device *pdev)
1233{
1234 struct dw_dma_platform_data *pdata;
1235 struct resource *io;
1236 struct dw_dma *dw;
1237 size_t size;
1238 int irq;
1239 int err;
1240 int i;
1241
1242 pdata = pdev->dev.platform_data;
1243 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1244 return -EINVAL;
1245
1246 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1247 if (!io)
1248 return -EINVAL;
1249
1250 irq = platform_get_irq(pdev, 0);
1251 if (irq < 0)
1252 return irq;
1253
1254 size = sizeof(struct dw_dma);
1255 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1256 dw = kzalloc(size, GFP_KERNEL);
1257 if (!dw)
1258 return -ENOMEM;
1259
1260 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1261 err = -EBUSY;
1262 goto err_kfree;
1263 }
1264
1265 memset(dw, 0, sizeof *dw);
1266
1267 dw->regs = ioremap(io->start, DW_REGLEN);
1268 if (!dw->regs) {
1269 err = -ENOMEM;
1270 goto err_release_r;
1271 }
1272
1273 dw->clk = clk_get(&pdev->dev, "hclk");
1274 if (IS_ERR(dw->clk)) {
1275 err = PTR_ERR(dw->clk);
1276 goto err_clk;
1277 }
1278 clk_enable(dw->clk);
1279
1280 /* force dma off, just in case */
1281 dw_dma_off(dw);
1282
1283 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1284 if (err)
1285 goto err_irq;
1286
1287 platform_set_drvdata(pdev, dw);
1288
1289 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1290
1291 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1292
1293 INIT_LIST_HEAD(&dw->dma.channels);
1294 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
1295 struct dw_dma_chan *dwc = &dw->chan[i];
1296
1297 dwc->chan.device = &dw->dma;
1298 dwc->chan.cookie = dwc->completed = 1;
1299 dwc->chan.chan_id = i;
1300 list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
1301
1302 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1303 spin_lock_init(&dwc->lock);
1304 dwc->mask = 1 << i;
1305
1306 INIT_LIST_HEAD(&dwc->active_list);
1307 INIT_LIST_HEAD(&dwc->queue);
1308 INIT_LIST_HEAD(&dwc->free_list);
1309
1310 channel_clear_bit(dw, CH_EN, dwc->mask);
1311 }
1312
1313 /* Clear/disable all interrupts on all channels. */
1314 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1315 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1316 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1317 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1318 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1319
1320 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1321 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1322 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1323 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1324 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1325
1326 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1327 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1328 dw->dma.dev = &pdev->dev;
1329 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1330 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1331
1332 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1333
1334 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1335 dw->dma.device_terminate_all = dwc_terminate_all;
1336
1337 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1338 dw->dma.device_issue_pending = dwc_issue_pending;
1339
1340 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1341
1342 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
Kay Sieversdfbc9012009-03-24 16:38:22 -07001343 dev_name(&pdev->dev), dw->dma.chancnt);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001344
1345 dma_async_device_register(&dw->dma);
1346
1347 return 0;
1348
1349err_irq:
1350 clk_disable(dw->clk);
1351 clk_put(dw->clk);
1352err_clk:
1353 iounmap(dw->regs);
1354 dw->regs = NULL;
1355err_release_r:
1356 release_resource(io);
1357err_kfree:
1358 kfree(dw);
1359 return err;
1360}
1361
1362static int __exit dw_remove(struct platform_device *pdev)
1363{
1364 struct dw_dma *dw = platform_get_drvdata(pdev);
1365 struct dw_dma_chan *dwc, *_dwc;
1366 struct resource *io;
1367
1368 dw_dma_off(dw);
1369 dma_async_device_unregister(&dw->dma);
1370
1371 free_irq(platform_get_irq(pdev, 0), dw);
1372 tasklet_kill(&dw->tasklet);
1373
1374 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1375 chan.device_node) {
1376 list_del(&dwc->chan.device_node);
1377 channel_clear_bit(dw, CH_EN, dwc->mask);
1378 }
1379
1380 clk_disable(dw->clk);
1381 clk_put(dw->clk);
1382
1383 iounmap(dw->regs);
1384 dw->regs = NULL;
1385
1386 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1387 release_mem_region(io->start, DW_REGLEN);
1388
1389 kfree(dw);
1390
1391 return 0;
1392}
1393
1394static void dw_shutdown(struct platform_device *pdev)
1395{
1396 struct dw_dma *dw = platform_get_drvdata(pdev);
1397
1398 dw_dma_off(platform_get_drvdata(pdev));
1399 clk_disable(dw->clk);
1400}
1401
1402static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1403{
1404 struct dw_dma *dw = platform_get_drvdata(pdev);
1405
1406 dw_dma_off(platform_get_drvdata(pdev));
1407 clk_disable(dw->clk);
1408 return 0;
1409}
1410
1411static int dw_resume_early(struct platform_device *pdev)
1412{
1413 struct dw_dma *dw = platform_get_drvdata(pdev);
1414
1415 clk_enable(dw->clk);
1416 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1417 return 0;
1418
1419}
1420
1421static struct platform_driver dw_driver = {
1422 .remove = __exit_p(dw_remove),
1423 .shutdown = dw_shutdown,
1424 .suspend_late = dw_suspend_late,
1425 .resume_early = dw_resume_early,
1426 .driver = {
1427 .name = "dw_dmac",
1428 },
1429};
1430
1431static int __init dw_init(void)
1432{
1433 return platform_driver_probe(&dw_driver, dw_probe);
1434}
1435module_init(dw_init);
1436
1437static void __exit dw_exit(void)
1438{
1439 platform_driver_unregister(&dw_driver);
1440}
1441module_exit(dw_exit);
1442
1443MODULE_LICENSE("GPL v2");
1444MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1445MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");