blob: 6122c364cf11bb0050fb2b029c083b3c40233722 [file] [log] [blame]
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001/*
2 * Driver for the TXx9 SoC DMA Controller
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/scatterlist.h>
18#include "txx9dmac.h"
19
20static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
21{
22 return container_of(chan, struct txx9dmac_chan, chan);
23}
24
25static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
26{
27 return dc->ch_regs;
28}
29
30static struct txx9dmac_cregs32 __iomem *__dma_regs32(
31 const struct txx9dmac_chan *dc)
32{
33 return dc->ch_regs;
34}
35
36#define channel64_readq(dc, name) \
37 __raw_readq(&(__dma_regs(dc)->name))
38#define channel64_writeq(dc, name, val) \
39 __raw_writeq((val), &(__dma_regs(dc)->name))
40#define channel64_readl(dc, name) \
41 __raw_readl(&(__dma_regs(dc)->name))
42#define channel64_writel(dc, name, val) \
43 __raw_writel((val), &(__dma_regs(dc)->name))
44
45#define channel32_readl(dc, name) \
46 __raw_readl(&(__dma_regs32(dc)->name))
47#define channel32_writel(dc, name, val) \
48 __raw_writel((val), &(__dma_regs32(dc)->name))
49
50#define channel_readq(dc, name) channel64_readq(dc, name)
51#define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
52#define channel_readl(dc, name) \
53 (is_dmac64(dc) ? \
54 channel64_readl(dc, name) : channel32_readl(dc, name))
55#define channel_writel(dc, name, val) \
56 (is_dmac64(dc) ? \
57 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
58
59static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
60{
61 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
62 return channel64_readq(dc, CHAR);
63 else
64 return channel64_readl(dc, CHAR);
65}
66
67static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
68{
69 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
70 channel64_writeq(dc, CHAR, val);
71 else
72 channel64_writel(dc, CHAR, val);
73}
74
75static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
76{
77#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
78 channel64_writel(dc, CHAR, 0);
79 channel64_writel(dc, __pad_CHAR, 0);
80#else
81 channel64_writeq(dc, CHAR, 0);
82#endif
83}
84
85static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
86{
87 if (is_dmac64(dc))
88 return channel64_read_CHAR(dc);
89 else
90 return channel32_readl(dc, CHAR);
91}
92
93static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
94{
95 if (is_dmac64(dc))
96 channel64_write_CHAR(dc, val);
97 else
98 channel32_writel(dc, CHAR, val);
99}
100
101static struct txx9dmac_regs __iomem *__txx9dmac_regs(
102 const struct txx9dmac_dev *ddev)
103{
104 return ddev->regs;
105}
106
107static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
108 const struct txx9dmac_dev *ddev)
109{
110 return ddev->regs;
111}
112
113#define dma64_readl(ddev, name) \
114 __raw_readl(&(__txx9dmac_regs(ddev)->name))
115#define dma64_writel(ddev, name, val) \
116 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
117
118#define dma32_readl(ddev, name) \
119 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
120#define dma32_writel(ddev, name, val) \
121 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
122
123#define dma_readl(ddev, name) \
124 (__is_dmac64(ddev) ? \
125 dma64_readl(ddev, name) : dma32_readl(ddev, name))
126#define dma_writel(ddev, name, val) \
127 (__is_dmac64(ddev) ? \
128 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
129
130static struct device *chan2dev(struct dma_chan *chan)
131{
132 return &chan->dev->device;
133}
134static struct device *chan2parent(struct dma_chan *chan)
135{
136 return chan->dev->device.parent;
137}
138
139static struct txx9dmac_desc *
140txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
141{
142 return container_of(txd, struct txx9dmac_desc, txd);
143}
144
145static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
146 const struct txx9dmac_desc *desc)
147{
148 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
149}
150
151static void desc_write_CHAR(const struct txx9dmac_chan *dc,
152 struct txx9dmac_desc *desc, dma_addr_t val)
153{
154 if (is_dmac64(dc))
155 desc->hwdesc.CHAR = val;
156 else
157 desc->hwdesc32.CHAR = val;
158}
159
160#define TXX9_DMA_MAX_COUNT 0x04000000
161
162#define TXX9_DMA_INITIAL_DESC_COUNT 64
163
164static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
165{
166 return list_entry(dc->active_list.next,
167 struct txx9dmac_desc, desc_node);
168}
169
170static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
171{
172 return list_entry(dc->active_list.prev,
173 struct txx9dmac_desc, desc_node);
174}
175
176static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
177{
178 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
179}
180
181static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
182{
Dan Williams1979b182009-09-08 17:53:03 -0700183 if (!list_empty(&desc->tx_list))
184 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900185 return desc;
186}
187
188static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
189
190static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
191 gfp_t flags)
192{
193 struct txx9dmac_dev *ddev = dc->ddev;
194 struct txx9dmac_desc *desc;
195
196 desc = kzalloc(sizeof(*desc), flags);
197 if (!desc)
198 return NULL;
Dan Williams1979b182009-09-08 17:53:03 -0700199 INIT_LIST_HEAD(&desc->tx_list);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900200 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
201 desc->txd.tx_submit = txx9dmac_tx_submit;
202 /* txd.flags will be overwritten in prep funcs */
203 desc->txd.flags = DMA_CTRL_ACK;
204 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
205 ddev->descsize, DMA_TO_DEVICE);
206 return desc;
207}
208
209static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
210{
211 struct txx9dmac_desc *desc, *_desc;
212 struct txx9dmac_desc *ret = NULL;
213 unsigned int i = 0;
214
215 spin_lock_bh(&dc->lock);
216 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
217 if (async_tx_test_ack(&desc->txd)) {
218 list_del(&desc->desc_node);
219 ret = desc;
220 break;
221 }
222 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
223 i++;
224 }
225 spin_unlock_bh(&dc->lock);
226
227 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
228 i);
229 if (!ret) {
230 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
231 if (ret) {
232 spin_lock_bh(&dc->lock);
233 dc->descs_allocated++;
234 spin_unlock_bh(&dc->lock);
235 } else
236 dev_err(chan2dev(&dc->chan),
237 "not enough descriptors available\n");
238 }
239 return ret;
240}
241
242static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
243 struct txx9dmac_desc *desc)
244{
245 struct txx9dmac_dev *ddev = dc->ddev;
246 struct txx9dmac_desc *child;
247
Dan Williams1979b182009-09-08 17:53:03 -0700248 list_for_each_entry(child, &desc->tx_list, desc_node)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900249 dma_sync_single_for_cpu(chan2parent(&dc->chan),
250 child->txd.phys, ddev->descsize,
251 DMA_TO_DEVICE);
252 dma_sync_single_for_cpu(chan2parent(&dc->chan),
253 desc->txd.phys, ddev->descsize,
254 DMA_TO_DEVICE);
255}
256
257/*
258 * Move a descriptor, including any children, to the free list.
259 * `desc' must not be on any lists.
260 */
261static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
262 struct txx9dmac_desc *desc)
263{
264 if (desc) {
265 struct txx9dmac_desc *child;
266
267 txx9dmac_sync_desc_for_cpu(dc, desc);
268
269 spin_lock_bh(&dc->lock);
Dan Williams1979b182009-09-08 17:53:03 -0700270 list_for_each_entry(child, &desc->tx_list, desc_node)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900271 dev_vdbg(chan2dev(&dc->chan),
272 "moving child desc %p to freelist\n",
273 child);
Dan Williams1979b182009-09-08 17:53:03 -0700274 list_splice_init(&desc->tx_list, &dc->free_list);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900275 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
276 desc);
277 list_add(&desc->desc_node, &dc->free_list);
278 spin_unlock_bh(&dc->lock);
279 }
280}
281
282/* Called with dc->lock held and bh disabled */
283static dma_cookie_t
284txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
285{
286 dma_cookie_t cookie = dc->chan.cookie;
287
288 if (++cookie < 0)
289 cookie = 1;
290
291 dc->chan.cookie = cookie;
292 desc->txd.cookie = cookie;
293
294 return cookie;
295}
296
297/*----------------------------------------------------------------------*/
298
299static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
300{
301 if (is_dmac64(dc))
302 dev_err(chan2dev(&dc->chan),
303 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
304 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
305 (u64)channel64_read_CHAR(dc),
306 channel64_readq(dc, SAR),
307 channel64_readq(dc, DAR),
308 channel64_readl(dc, CNTR),
309 channel64_readl(dc, SAIR),
310 channel64_readl(dc, DAIR),
311 channel64_readl(dc, CCR),
312 channel64_readl(dc, CSR));
313 else
314 dev_err(chan2dev(&dc->chan),
315 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
316 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
317 channel32_readl(dc, CHAR),
318 channel32_readl(dc, SAR),
319 channel32_readl(dc, DAR),
320 channel32_readl(dc, CNTR),
321 channel32_readl(dc, SAIR),
322 channel32_readl(dc, DAIR),
323 channel32_readl(dc, CCR),
324 channel32_readl(dc, CSR));
325}
326
327static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
328{
329 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
330 if (is_dmac64(dc)) {
331 channel64_clear_CHAR(dc);
332 channel_writeq(dc, SAR, 0);
333 channel_writeq(dc, DAR, 0);
334 } else {
335 channel_writel(dc, CHAR, 0);
336 channel_writel(dc, SAR, 0);
337 channel_writel(dc, DAR, 0);
338 }
339 channel_writel(dc, CNTR, 0);
340 channel_writel(dc, SAIR, 0);
341 channel_writel(dc, DAIR, 0);
342 channel_writel(dc, CCR, 0);
343 mmiowb();
344}
345
346/* Called with dc->lock held and bh disabled */
347static void txx9dmac_dostart(struct txx9dmac_chan *dc,
348 struct txx9dmac_desc *first)
349{
350 struct txx9dmac_slave *ds = dc->chan.private;
351 u32 sai, dai;
352
353 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
354 first->txd.cookie, first);
355 /* ASSERT: channel is idle */
356 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
357 dev_err(chan2dev(&dc->chan),
358 "BUG: Attempted to start non-idle channel\n");
359 txx9dmac_dump_regs(dc);
360 /* The tasklet will hopefully advance the queue... */
361 return;
362 }
363
364 if (is_dmac64(dc)) {
365 channel64_writel(dc, CNTR, 0);
366 channel64_writel(dc, CSR, 0xffffffff);
367 if (ds) {
368 if (ds->tx_reg) {
369 sai = ds->reg_width;
370 dai = 0;
371 } else {
372 sai = 0;
373 dai = ds->reg_width;
374 }
375 } else {
376 sai = 8;
377 dai = 8;
378 }
379 channel64_writel(dc, SAIR, sai);
380 channel64_writel(dc, DAIR, dai);
381 /* All 64-bit DMAC supports SMPCHN */
382 channel64_writel(dc, CCR, dc->ccr);
383 /* Writing a non zero value to CHAR will assert XFACT */
384 channel64_write_CHAR(dc, first->txd.phys);
385 } else {
386 channel32_writel(dc, CNTR, 0);
387 channel32_writel(dc, CSR, 0xffffffff);
388 if (ds) {
389 if (ds->tx_reg) {
390 sai = ds->reg_width;
391 dai = 0;
392 } else {
393 sai = 0;
394 dai = ds->reg_width;
395 }
396 } else {
397 sai = 4;
398 dai = 4;
399 }
400 channel32_writel(dc, SAIR, sai);
401 channel32_writel(dc, DAIR, dai);
402 if (txx9_dma_have_SMPCHN()) {
403 channel32_writel(dc, CCR, dc->ccr);
404 /* Writing a non zero value to CHAR will assert XFACT */
405 channel32_writel(dc, CHAR, first->txd.phys);
406 } else {
407 channel32_writel(dc, CHAR, first->txd.phys);
408 channel32_writel(dc, CCR, dc->ccr);
409 }
410 }
411}
412
413/*----------------------------------------------------------------------*/
414
415static void
416txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
417 struct txx9dmac_desc *desc)
418{
419 dma_async_tx_callback callback;
420 void *param;
421 struct dma_async_tx_descriptor *txd = &desc->txd;
422 struct txx9dmac_slave *ds = dc->chan.private;
423
424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 txd->cookie, desc);
426
427 dc->completed = txd->cookie;
428 callback = txd->callback;
429 param = txd->callback_param;
430
431 txx9dmac_sync_desc_for_cpu(dc, desc);
Dan Williams1979b182009-09-08 17:53:03 -0700432 list_splice_init(&desc->tx_list, &dc->free_list);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900433 list_move(&desc->desc_node, &dc->free_list);
434
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900435 if (!ds) {
436 dma_addr_t dmaaddr;
437 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
438 dmaaddr = is_dmac64(dc) ?
439 desc->hwdesc.DAR : desc->hwdesc32.DAR;
Atsushi Nemoto4ac4aa52009-06-17 13:08:31 -0700440 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
441 dma_unmap_single(chan2parent(&dc->chan),
442 dmaaddr, desc->len, DMA_FROM_DEVICE);
443 else
444 dma_unmap_page(chan2parent(&dc->chan),
445 dmaaddr, desc->len, DMA_FROM_DEVICE);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900446 }
447 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
448 dmaaddr = is_dmac64(dc) ?
449 desc->hwdesc.SAR : desc->hwdesc32.SAR;
Atsushi Nemoto4ac4aa52009-06-17 13:08:31 -0700450 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
451 dma_unmap_single(chan2parent(&dc->chan),
452 dmaaddr, desc->len, DMA_TO_DEVICE);
453 else
454 dma_unmap_page(chan2parent(&dc->chan),
455 dmaaddr, desc->len, DMA_TO_DEVICE);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900456 }
457 }
458
459 /*
460 * The API requires that no submissions are done from a
461 * callback, so we don't need to drop the lock here
462 */
463 if (callback)
464 callback(param);
465 dma_run_dependencies(txd);
466}
467
468static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
469{
470 struct txx9dmac_dev *ddev = dc->ddev;
471 struct txx9dmac_desc *desc;
472 struct txx9dmac_desc *prev = NULL;
473
474 BUG_ON(!list_empty(list));
475 do {
476 desc = txx9dmac_first_queued(dc);
477 if (prev) {
478 desc_write_CHAR(dc, prev, desc->txd.phys);
479 dma_sync_single_for_device(chan2parent(&dc->chan),
480 prev->txd.phys, ddev->descsize,
481 DMA_TO_DEVICE);
482 }
483 prev = txx9dmac_last_child(desc);
484 list_move_tail(&desc->desc_node, list);
485 /* Make chain-completion interrupt happen */
486 if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
487 !txx9dmac_chan_INTENT(dc))
488 break;
489 } while (!list_empty(&dc->queue));
490}
491
492static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
493{
494 struct txx9dmac_desc *desc, *_desc;
495 LIST_HEAD(list);
496
497 /*
498 * Submit queued descriptors ASAP, i.e. before we go through
499 * the completed ones.
500 */
501 list_splice_init(&dc->active_list, &list);
502 if (!list_empty(&dc->queue)) {
503 txx9dmac_dequeue(dc, &dc->active_list);
504 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
505 }
506
507 list_for_each_entry_safe(desc, _desc, &list, desc_node)
508 txx9dmac_descriptor_complete(dc, desc);
509}
510
511static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
512 struct txx9dmac_hwdesc *desc)
513{
514 if (is_dmac64(dc)) {
515#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
516 dev_crit(chan2dev(&dc->chan),
517 " desc: ch%#llx s%#llx d%#llx c%#x\n",
518 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
519#else
520 dev_crit(chan2dev(&dc->chan),
521 " desc: ch%#llx s%#llx d%#llx c%#x"
522 " si%#x di%#x cc%#x cs%#x\n",
523 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
524 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
525#endif
526 } else {
527 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
528#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
529 dev_crit(chan2dev(&dc->chan),
530 " desc: ch%#x s%#x d%#x c%#x\n",
531 d->CHAR, d->SAR, d->DAR, d->CNTR);
532#else
533 dev_crit(chan2dev(&dc->chan),
534 " desc: ch%#x s%#x d%#x c%#x"
535 " si%#x di%#x cc%#x cs%#x\n",
536 d->CHAR, d->SAR, d->DAR, d->CNTR,
537 d->SAIR, d->DAIR, d->CCR, d->CSR);
538#endif
539 }
540}
541
542static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
543{
544 struct txx9dmac_desc *bad_desc;
545 struct txx9dmac_desc *child;
546 u32 errors;
547
548 /*
549 * The descriptor currently at the head of the active list is
550 * borked. Since we don't have any way to report errors, we'll
551 * just have to scream loudly and try to carry on.
552 */
553 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
554 txx9dmac_dump_regs(dc);
555
556 bad_desc = txx9dmac_first_active(dc);
557 list_del_init(&bad_desc->desc_node);
558
559 /* Clear all error flags and try to restart the controller */
560 errors = csr & (TXX9_DMA_CSR_ABCHC |
561 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
562 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
563 channel_writel(dc, CSR, errors);
564
565 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
566 txx9dmac_dequeue(dc, &dc->active_list);
567 if (!list_empty(&dc->active_list))
568 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
569
570 dev_crit(chan2dev(&dc->chan),
571 "Bad descriptor submitted for DMA! (cookie: %d)\n",
572 bad_desc->txd.cookie);
573 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
Dan Williams1979b182009-09-08 17:53:03 -0700574 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900575 txx9dmac_dump_desc(dc, &child->hwdesc);
576 /* Pretend the descriptor completed successfully */
577 txx9dmac_descriptor_complete(dc, bad_desc);
578}
579
580static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
581{
582 dma_addr_t chain;
583 struct txx9dmac_desc *desc, *_desc;
584 struct txx9dmac_desc *child;
585 u32 csr;
586
587 if (is_dmac64(dc)) {
588 chain = channel64_read_CHAR(dc);
589 csr = channel64_readl(dc, CSR);
590 channel64_writel(dc, CSR, csr);
591 } else {
592 chain = channel32_readl(dc, CHAR);
593 csr = channel32_readl(dc, CSR);
594 channel32_writel(dc, CSR, csr);
595 }
596 /* For dynamic chain, we should look at XFACT instead of NCHNC */
597 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
598 /* Everything we've submitted is done */
599 txx9dmac_complete_all(dc);
600 return;
601 }
602 if (!(csr & TXX9_DMA_CSR_CHNEN))
603 chain = 0; /* last descriptor of this chain */
604
605 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
606 (u64)chain);
607
608 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
609 if (desc_read_CHAR(dc, desc) == chain) {
610 /* This one is currently in progress */
611 if (csr & TXX9_DMA_CSR_ABCHC)
612 goto scan_done;
613 return;
614 }
615
Dan Williams1979b182009-09-08 17:53:03 -0700616 list_for_each_entry(child, &desc->tx_list, desc_node)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900617 if (desc_read_CHAR(dc, child) == chain) {
618 /* Currently in progress */
619 if (csr & TXX9_DMA_CSR_ABCHC)
620 goto scan_done;
621 return;
622 }
623
624 /*
625 * No descriptors so far seem to be in progress, i.e.
626 * this one must be done.
627 */
628 txx9dmac_descriptor_complete(dc, desc);
629 }
630scan_done:
631 if (csr & TXX9_DMA_CSR_ABCHC) {
632 txx9dmac_handle_error(dc, csr);
633 return;
634 }
635
636 dev_err(chan2dev(&dc->chan),
637 "BUG: All descriptors done, but channel not idle!\n");
638
639 /* Try to continue after resetting the channel... */
640 txx9dmac_reset_chan(dc);
641
642 if (!list_empty(&dc->queue)) {
643 txx9dmac_dequeue(dc, &dc->active_list);
644 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
645 }
646}
647
648static void txx9dmac_chan_tasklet(unsigned long data)
649{
650 int irq;
651 u32 csr;
652 struct txx9dmac_chan *dc;
653
654 dc = (struct txx9dmac_chan *)data;
655 csr = channel_readl(dc, CSR);
656 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
657
658 spin_lock(&dc->lock);
659 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
660 TXX9_DMA_CSR_NTRNFC))
661 txx9dmac_scan_descriptors(dc);
662 spin_unlock(&dc->lock);
663 irq = dc->irq;
664
665 enable_irq(irq);
666}
667
668static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
669{
670 struct txx9dmac_chan *dc = dev_id;
671
672 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
673 channel_readl(dc, CSR));
674
675 tasklet_schedule(&dc->tasklet);
676 /*
677 * Just disable the interrupts. We'll turn them back on in the
678 * softirq handler.
679 */
680 disable_irq_nosync(irq);
681
682 return IRQ_HANDLED;
683}
684
685static void txx9dmac_tasklet(unsigned long data)
686{
687 int irq;
688 u32 csr;
689 struct txx9dmac_chan *dc;
690
691 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
692 u32 mcr;
693 int i;
694
695 mcr = dma_readl(ddev, MCR);
696 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
697 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
698 if ((mcr >> (24 + i)) & 0x11) {
699 dc = ddev->chan[i];
700 csr = channel_readl(dc, CSR);
701 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
702 csr);
703 spin_lock(&dc->lock);
704 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
705 TXX9_DMA_CSR_NTRNFC))
706 txx9dmac_scan_descriptors(dc);
707 spin_unlock(&dc->lock);
708 }
709 }
710 irq = ddev->irq;
711
712 enable_irq(irq);
713}
714
715static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
716{
717 struct txx9dmac_dev *ddev = dev_id;
718
719 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
720 dma_readl(ddev, MCR));
721
722 tasklet_schedule(&ddev->tasklet);
723 /*
724 * Just disable the interrupts. We'll turn them back on in the
725 * softirq handler.
726 */
727 disable_irq_nosync(irq);
728
729 return IRQ_HANDLED;
730}
731
732/*----------------------------------------------------------------------*/
733
734static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
735{
736 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
737 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
738 dma_cookie_t cookie;
739
740 spin_lock_bh(&dc->lock);
741 cookie = txx9dmac_assign_cookie(dc, desc);
742
743 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
744 desc->txd.cookie, desc);
745
746 list_add_tail(&desc->desc_node, &dc->queue);
747 spin_unlock_bh(&dc->lock);
748
749 return cookie;
750}
751
752static struct dma_async_tx_descriptor *
753txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
754 size_t len, unsigned long flags)
755{
756 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
757 struct txx9dmac_dev *ddev = dc->ddev;
758 struct txx9dmac_desc *desc;
759 struct txx9dmac_desc *first;
760 struct txx9dmac_desc *prev;
761 size_t xfer_count;
762 size_t offset;
763
764 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
765 (u64)dest, (u64)src, len, flags);
766
767 if (unlikely(!len)) {
768 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
769 return NULL;
770 }
771
772 prev = first = NULL;
773
774 for (offset = 0; offset < len; offset += xfer_count) {
775 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
776 /*
777 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
778 * ERT-TX49H4-016 (slightly conservative)
779 */
780 if (__is_dmac64(ddev)) {
781 if (xfer_count > 0x100 &&
782 (xfer_count & 0xff) >= 0xfa &&
783 (xfer_count & 0xff) <= 0xff)
784 xfer_count -= 0x20;
785 } else {
786 if (xfer_count > 0x80 &&
787 (xfer_count & 0x7f) >= 0x7e &&
788 (xfer_count & 0x7f) <= 0x7f)
789 xfer_count -= 0x20;
790 }
791
792 desc = txx9dmac_desc_get(dc);
793 if (!desc) {
794 txx9dmac_desc_put(dc, first);
795 return NULL;
796 }
797
798 if (__is_dmac64(ddev)) {
799 desc->hwdesc.SAR = src + offset;
800 desc->hwdesc.DAR = dest + offset;
801 desc->hwdesc.CNTR = xfer_count;
802 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
803 dc->ccr | TXX9_DMA_CCR_XFACT);
804 } else {
805 desc->hwdesc32.SAR = src + offset;
806 desc->hwdesc32.DAR = dest + offset;
807 desc->hwdesc32.CNTR = xfer_count;
808 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
809 dc->ccr | TXX9_DMA_CCR_XFACT);
810 }
811
812 /*
813 * The descriptors on tx_list are not reachable from
814 * the dc->queue list or dc->active_list after a
815 * submit. If we put all descriptors on active_list,
816 * calling of callback on the completion will be more
817 * complex.
818 */
819 if (!first) {
820 first = desc;
821 } else {
822 desc_write_CHAR(dc, prev, desc->txd.phys);
823 dma_sync_single_for_device(chan2parent(&dc->chan),
824 prev->txd.phys, ddev->descsize,
825 DMA_TO_DEVICE);
Dan Williams1979b182009-09-08 17:53:03 -0700826 list_add_tail(&desc->desc_node, &first->tx_list);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900827 }
828 prev = desc;
829 }
830
831 /* Trigger interrupt after last block */
832 if (flags & DMA_PREP_INTERRUPT)
833 txx9dmac_desc_set_INTENT(ddev, prev);
834
835 desc_write_CHAR(dc, prev, 0);
836 dma_sync_single_for_device(chan2parent(&dc->chan),
837 prev->txd.phys, ddev->descsize,
838 DMA_TO_DEVICE);
839
840 first->txd.flags = flags;
841 first->len = len;
842
843 return &first->txd;
844}
845
846static struct dma_async_tx_descriptor *
847txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530848 unsigned int sg_len, enum dma_transfer_direction direction,
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900849 unsigned long flags)
850{
851 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
852 struct txx9dmac_dev *ddev = dc->ddev;
853 struct txx9dmac_slave *ds = chan->private;
854 struct txx9dmac_desc *prev;
855 struct txx9dmac_desc *first;
856 unsigned int i;
857 struct scatterlist *sg;
858
859 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
860
861 BUG_ON(!ds || !ds->reg_width);
862 if (ds->tx_reg)
Vinod Kouldb8196d2011-10-13 22:34:23 +0530863 BUG_ON(direction != DMA_MEM_TO_DEV);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900864 else
Vinod Kouldb8196d2011-10-13 22:34:23 +0530865 BUG_ON(direction != DMA_DEV_TO_MEM);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900866 if (unlikely(!sg_len))
867 return NULL;
868
869 prev = first = NULL;
870
871 for_each_sg(sgl, sg, sg_len, i) {
872 struct txx9dmac_desc *desc;
873 dma_addr_t mem;
874 u32 sai, dai;
875
876 desc = txx9dmac_desc_get(dc);
877 if (!desc) {
878 txx9dmac_desc_put(dc, first);
879 return NULL;
880 }
881
882 mem = sg_dma_address(sg);
883
884 if (__is_dmac64(ddev)) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530885 if (direction == DMA_MEM_TO_DEV) {
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900886 desc->hwdesc.SAR = mem;
887 desc->hwdesc.DAR = ds->tx_reg;
888 } else {
889 desc->hwdesc.SAR = ds->rx_reg;
890 desc->hwdesc.DAR = mem;
891 }
892 desc->hwdesc.CNTR = sg_dma_len(sg);
893 } else {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530894 if (direction == DMA_MEM_TO_DEV) {
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900895 desc->hwdesc32.SAR = mem;
896 desc->hwdesc32.DAR = ds->tx_reg;
897 } else {
898 desc->hwdesc32.SAR = ds->rx_reg;
899 desc->hwdesc32.DAR = mem;
900 }
901 desc->hwdesc32.CNTR = sg_dma_len(sg);
902 }
Vinod Kouldb8196d2011-10-13 22:34:23 +0530903 if (direction == DMA_MEM_TO_DEV) {
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900904 sai = ds->reg_width;
905 dai = 0;
906 } else {
907 sai = 0;
908 dai = ds->reg_width;
909 }
910 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
911 dc->ccr | TXX9_DMA_CCR_XFACT);
912
913 if (!first) {
914 first = desc;
915 } else {
916 desc_write_CHAR(dc, prev, desc->txd.phys);
917 dma_sync_single_for_device(chan2parent(&dc->chan),
918 prev->txd.phys,
919 ddev->descsize,
920 DMA_TO_DEVICE);
Dan Williams1979b182009-09-08 17:53:03 -0700921 list_add_tail(&desc->desc_node, &first->tx_list);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900922 }
923 prev = desc;
924 }
925
926 /* Trigger interrupt after last block */
927 if (flags & DMA_PREP_INTERRUPT)
928 txx9dmac_desc_set_INTENT(ddev, prev);
929
930 desc_write_CHAR(dc, prev, 0);
931 dma_sync_single_for_device(chan2parent(&dc->chan),
932 prev->txd.phys, ddev->descsize,
933 DMA_TO_DEVICE);
934
935 first->txd.flags = flags;
936 first->len = 0;
937
938 return &first->txd;
939}
940
Linus Walleij05827632010-05-17 16:30:42 -0700941static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
942 unsigned long arg)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900943{
944 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
945 struct txx9dmac_desc *desc, *_desc;
946 LIST_HEAD(list);
947
Linus Walleijc3635c72010-03-26 16:44:01 -0700948 /* Only supports DMA_TERMINATE_ALL */
949 if (cmd != DMA_TERMINATE_ALL)
950 return -EINVAL;
951
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900952 dev_vdbg(chan2dev(chan), "terminate_all\n");
953 spin_lock_bh(&dc->lock);
954
955 txx9dmac_reset_chan(dc);
956
957 /* active_list entries will end up before queued entries */
958 list_splice_init(&dc->queue, &list);
959 list_splice_init(&dc->active_list, &list);
960
961 spin_unlock_bh(&dc->lock);
962
963 /* Flush all pending and queued descriptors */
964 list_for_each_entry_safe(desc, _desc, &list, desc_node)
965 txx9dmac_descriptor_complete(dc, desc);
Linus Walleijc3635c72010-03-26 16:44:01 -0700966
967 return 0;
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900968}
969
970static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -0700971txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
972 struct dma_tx_state *txstate)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900973{
974 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
975 dma_cookie_t last_used;
976 dma_cookie_t last_complete;
977 int ret;
978
979 last_complete = dc->completed;
980 last_used = chan->cookie;
981
982 ret = dma_async_is_complete(cookie, last_complete, last_used);
983 if (ret != DMA_SUCCESS) {
984 spin_lock_bh(&dc->lock);
985 txx9dmac_scan_descriptors(dc);
986 spin_unlock_bh(&dc->lock);
987
988 last_complete = dc->completed;
989 last_used = chan->cookie;
990
991 ret = dma_async_is_complete(cookie, last_complete, last_used);
992 }
993
Dan Williamsbca34692010-03-26 16:52:10 -0700994 dma_set_tx_state(txstate, last_complete, last_used, 0);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +0900995
996 return ret;
997}
998
999static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
1000 struct txx9dmac_desc *prev)
1001{
1002 struct txx9dmac_dev *ddev = dc->ddev;
1003 struct txx9dmac_desc *desc;
1004 LIST_HEAD(list);
1005
1006 prev = txx9dmac_last_child(prev);
1007 txx9dmac_dequeue(dc, &list);
1008 desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
1009 desc_write_CHAR(dc, prev, desc->txd.phys);
1010 dma_sync_single_for_device(chan2parent(&dc->chan),
1011 prev->txd.phys, ddev->descsize,
1012 DMA_TO_DEVICE);
1013 mmiowb();
1014 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
1015 channel_read_CHAR(dc) == prev->txd.phys)
1016 /* Restart chain DMA */
1017 channel_write_CHAR(dc, desc->txd.phys);
1018 list_splice_tail(&list, &dc->active_list);
1019}
1020
1021static void txx9dmac_issue_pending(struct dma_chan *chan)
1022{
1023 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1024
1025 spin_lock_bh(&dc->lock);
1026
1027 if (!list_empty(&dc->active_list))
1028 txx9dmac_scan_descriptors(dc);
1029 if (!list_empty(&dc->queue)) {
1030 if (list_empty(&dc->active_list)) {
1031 txx9dmac_dequeue(dc, &dc->active_list);
1032 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
1033 } else if (txx9_dma_have_SMPCHN()) {
1034 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
1035
1036 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
1037 txx9dmac_chan_INTENT(dc))
1038 txx9dmac_chain_dynamic(dc, prev);
1039 }
1040 }
1041
1042 spin_unlock_bh(&dc->lock);
1043}
1044
1045static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1046{
1047 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1048 struct txx9dmac_slave *ds = chan->private;
1049 struct txx9dmac_desc *desc;
1050 int i;
1051
1052 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1053
1054 /* ASSERT: channel is idle */
1055 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
1056 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1057 return -EIO;
1058 }
1059
1060 dc->completed = chan->cookie = 1;
1061
1062 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1063 txx9dmac_chan_set_SMPCHN(dc);
1064 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1065 dc->ccr |= TXX9_DMA_CCR_INTENC;
1066 if (chan->device->device_prep_dma_memcpy) {
1067 if (ds)
1068 return -EINVAL;
1069 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1070 } else {
1071 if (!ds ||
1072 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
1073 return -EINVAL;
1074 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1075 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
1076 txx9dmac_chan_set_INTENT(dc);
1077 }
1078
1079 spin_lock_bh(&dc->lock);
1080 i = dc->descs_allocated;
1081 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1082 spin_unlock_bh(&dc->lock);
1083
1084 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1085 if (!desc) {
1086 dev_info(chan2dev(chan),
1087 "only allocated %d descriptors\n", i);
1088 spin_lock_bh(&dc->lock);
1089 break;
1090 }
1091 txx9dmac_desc_put(dc, desc);
1092
1093 spin_lock_bh(&dc->lock);
1094 i = ++dc->descs_allocated;
1095 }
1096 spin_unlock_bh(&dc->lock);
1097
1098 dev_dbg(chan2dev(chan),
1099 "alloc_chan_resources allocated %d descriptors\n", i);
1100
1101 return i;
1102}
1103
1104static void txx9dmac_free_chan_resources(struct dma_chan *chan)
1105{
1106 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1107 struct txx9dmac_dev *ddev = dc->ddev;
1108 struct txx9dmac_desc *desc, *_desc;
1109 LIST_HEAD(list);
1110
1111 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1112 dc->descs_allocated);
1113
1114 /* ASSERT: channel is idle */
1115 BUG_ON(!list_empty(&dc->active_list));
1116 BUG_ON(!list_empty(&dc->queue));
1117 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1118
1119 spin_lock_bh(&dc->lock);
1120 list_splice_init(&dc->free_list, &list);
1121 dc->descs_allocated = 0;
1122 spin_unlock_bh(&dc->lock);
1123
1124 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1125 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1126 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1127 ddev->descsize, DMA_TO_DEVICE);
1128 kfree(desc);
1129 }
1130
1131 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1132}
1133
1134/*----------------------------------------------------------------------*/
1135
1136static void txx9dmac_off(struct txx9dmac_dev *ddev)
1137{
1138 dma_writel(ddev, MCR, 0);
1139 mmiowb();
1140}
1141
1142static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1143{
1144 struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
1145 struct platform_device *dmac_dev = cpdata->dmac_dev;
1146 struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
1147 struct txx9dmac_chan *dc;
1148 int err;
1149 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
1150 int irq;
1151
1152 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1153 if (!dc)
1154 return -ENOMEM;
1155
1156 dc->dma.dev = &pdev->dev;
1157 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1158 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
Linus Walleijc3635c72010-03-26 16:44:01 -07001159 dc->dma.device_control = txx9dmac_control;
Linus Walleij07934482010-03-26 16:50:49 -07001160 dc->dma.device_tx_status = txx9dmac_tx_status;
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001161 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1162 if (pdata && pdata->memcpy_chan == ch) {
1163 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1164 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1165 } else {
1166 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1167 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1168 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1169 }
1170
1171 INIT_LIST_HEAD(&dc->dma.channels);
1172 dc->ddev = platform_get_drvdata(dmac_dev);
1173 if (dc->ddev->irq < 0) {
1174 irq = platform_get_irq(pdev, 0);
1175 if (irq < 0)
1176 return irq;
1177 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
1178 (unsigned long)dc);
1179 dc->irq = irq;
1180 err = devm_request_irq(&pdev->dev, dc->irq,
1181 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1182 if (err)
1183 return err;
1184 } else
1185 dc->irq = -1;
1186 dc->ddev->chan[ch] = dc;
1187 dc->chan.device = &dc->dma;
1188 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1189 dc->chan.cookie = dc->completed = 1;
1190
1191 if (is_dmac64(dc))
1192 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1193 else
1194 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1195 spin_lock_init(&dc->lock);
1196
1197 INIT_LIST_HEAD(&dc->active_list);
1198 INIT_LIST_HEAD(&dc->queue);
1199 INIT_LIST_HEAD(&dc->free_list);
1200
1201 txx9dmac_reset_chan(dc);
1202
1203 platform_set_drvdata(pdev, dc);
1204
1205 err = dma_async_device_register(&dc->dma);
1206 if (err)
1207 return err;
1208 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
1209 dc->dma.dev_id,
1210 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1211 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1212
1213 return 0;
1214}
1215
1216static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
1217{
1218 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1219
1220 dma_async_device_unregister(&dc->dma);
1221 if (dc->irq >= 0)
1222 tasklet_kill(&dc->tasklet);
1223 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
1224 return 0;
1225}
1226
1227static int __init txx9dmac_probe(struct platform_device *pdev)
1228{
1229 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1230 struct resource *io;
1231 struct txx9dmac_dev *ddev;
1232 u32 mcr;
1233 int err;
1234
1235 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1236 if (!io)
1237 return -EINVAL;
1238
1239 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
1240 if (!ddev)
1241 return -ENOMEM;
1242
1243 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
1244 dev_name(&pdev->dev)))
1245 return -EBUSY;
1246
1247 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1248 if (!ddev->regs)
1249 return -ENOMEM;
1250 ddev->have_64bit_regs = pdata->have_64bit_regs;
1251 if (__is_dmac64(ddev))
1252 ddev->descsize = sizeof(struct txx9dmac_hwdesc);
1253 else
1254 ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
1255
1256 /* force dma off, just in case */
1257 txx9dmac_off(ddev);
1258
1259 ddev->irq = platform_get_irq(pdev, 0);
1260 if (ddev->irq >= 0) {
1261 tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
1262 (unsigned long)ddev);
1263 err = devm_request_irq(&pdev->dev, ddev->irq,
1264 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
1265 if (err)
1266 return err;
1267 }
1268
1269 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1270 if (pdata && pdata->memcpy_chan >= 0)
1271 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1272 dma_writel(ddev, MCR, mcr);
1273
1274 platform_set_drvdata(pdev, ddev);
1275 return 0;
1276}
1277
1278static int __exit txx9dmac_remove(struct platform_device *pdev)
1279{
1280 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1281
1282 txx9dmac_off(ddev);
1283 if (ddev->irq >= 0)
1284 tasklet_kill(&ddev->tasklet);
1285 return 0;
1286}
1287
1288static void txx9dmac_shutdown(struct platform_device *pdev)
1289{
1290 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1291
1292 txx9dmac_off(ddev);
1293}
1294
Magnus Damm4aebac22009-07-08 13:22:27 +02001295static int txx9dmac_suspend_noirq(struct device *dev)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001296{
Magnus Damm4aebac22009-07-08 13:22:27 +02001297 struct platform_device *pdev = to_platform_device(dev);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001298 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1299
1300 txx9dmac_off(ddev);
1301 return 0;
1302}
1303
Magnus Damm4aebac22009-07-08 13:22:27 +02001304static int txx9dmac_resume_noirq(struct device *dev)
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001305{
Magnus Damm4aebac22009-07-08 13:22:27 +02001306 struct platform_device *pdev = to_platform_device(dev);
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001307 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1308 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1309 u32 mcr;
1310
1311 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1312 if (pdata && pdata->memcpy_chan >= 0)
1313 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1314 dma_writel(ddev, MCR, mcr);
1315 return 0;
1316
1317}
1318
Alexey Dobriyan47145212009-12-14 18:00:08 -08001319static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
Magnus Damm4aebac22009-07-08 13:22:27 +02001320 .suspend_noirq = txx9dmac_suspend_noirq,
1321 .resume_noirq = txx9dmac_resume_noirq,
1322};
1323
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001324static struct platform_driver txx9dmac_chan_driver = {
1325 .remove = __exit_p(txx9dmac_chan_remove),
1326 .driver = {
1327 .name = "txx9dmac-chan",
1328 },
1329};
1330
1331static struct platform_driver txx9dmac_driver = {
1332 .remove = __exit_p(txx9dmac_remove),
1333 .shutdown = txx9dmac_shutdown,
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001334 .driver = {
1335 .name = "txx9dmac",
Magnus Damm4aebac22009-07-08 13:22:27 +02001336 .pm = &txx9dmac_dev_pm_ops,
Atsushi Nemotoea76f0b2009-04-23 00:40:30 +09001337 },
1338};
1339
1340static int __init txx9dmac_init(void)
1341{
1342 int rc;
1343
1344 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
1345 if (!rc) {
1346 rc = platform_driver_probe(&txx9dmac_chan_driver,
1347 txx9dmac_chan_probe);
1348 if (rc)
1349 platform_driver_unregister(&txx9dmac_driver);
1350 }
1351 return rc;
1352}
1353module_init(txx9dmac_init);
1354
1355static void __exit txx9dmac_exit(void)
1356{
1357 platform_driver_unregister(&txx9dmac_chan_driver);
1358 platform_driver_unregister(&txx9dmac_driver);
1359}
1360module_exit(txx9dmac_exit);
1361
1362MODULE_LICENSE("GPL");
1363MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1364MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
Geert Uytterhoevenb0b4ce32010-04-08 20:52:00 +02001365MODULE_ALIAS("platform:txx9dmac");
1366MODULE_ALIAS("platform:txx9dmac-chan");