blob: dc7466563507e6cc2d164556b4e8ce3f242ac51f [file] [log] [blame]
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001/*
2 * Copyright 2012 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
Thierry Reding73312052013-01-21 11:09:00 +01008#include <linux/err.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08009#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/interrupt.h>
13#include <linux/dma-mapping.h>
14#include <linux/slab.h>
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/device.h>
18#include <linux/platform_data/mmp_dma.h>
19#include <linux/dmapool.h>
20#include <linux/of_device.h>
21#include <linux/of.h>
22
23#include "dmaengine.h"
24
25#define DCSR 0x0000
26#define DALGN 0x00a0
27#define DINT 0x00f0
28#define DDADR 0x0200
29#define DSADR 0x0204
30#define DTADR 0x0208
31#define DCMD 0x020c
32
33#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
34#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
35#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
36#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
37#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
38#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
39#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
40#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
41
42#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
43#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
44#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
45#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
46#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
47#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
48#define DCSR_EORINTR (1 << 9) /* The end of Receive */
49
50#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
51#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
52
53#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
54#define DDADR_STOP (1 << 0) /* Stop (read / write) */
55
56#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
57#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
58#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
59#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
60#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
61#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
62#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
63#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
64#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
65#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
66#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
67#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
68#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
69#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
70
71#define PDMA_ALIGNMENT 3
72#define PDMA_MAX_DESC_BYTES 0x1000
73
74struct mmp_pdma_desc_hw {
75 u32 ddadr; /* Points to the next descriptor + flags */
76 u32 dsadr; /* DSADR value for the current transfer */
77 u32 dtadr; /* DTADR value for the current transfer */
78 u32 dcmd; /* DCMD value for the current transfer */
79} __aligned(32);
80
81struct mmp_pdma_desc_sw {
82 struct mmp_pdma_desc_hw desc;
83 struct list_head node;
84 struct list_head tx_list;
85 struct dma_async_tx_descriptor async_tx;
86};
87
88struct mmp_pdma_phy;
89
90struct mmp_pdma_chan {
91 struct device *dev;
92 struct dma_chan chan;
93 struct dma_async_tx_descriptor desc;
94 struct mmp_pdma_phy *phy;
95 enum dma_transfer_direction dir;
96
97 /* channel's basic info */
98 struct tasklet_struct tasklet;
99 u32 dcmd;
100 u32 drcmr;
101 u32 dev_addr;
102
103 /* list for desc */
104 spinlock_t desc_lock; /* Descriptor list lock */
105 struct list_head chain_pending; /* Link descriptors queue for pending */
106 struct list_head chain_running; /* Link descriptors queue for running */
107 bool idle; /* channel statue machine */
108
109 struct dma_pool *desc_pool; /* Descriptors pool */
110};
111
112struct mmp_pdma_phy {
113 int idx;
114 void __iomem *base;
115 struct mmp_pdma_chan *vchan;
116};
117
118struct mmp_pdma_device {
119 int dma_channels;
120 void __iomem *base;
121 struct device *dev;
122 struct dma_device device;
123 struct mmp_pdma_phy *phy;
124};
125
126#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
127#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
128#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
129#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
130
131static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
132{
133 u32 reg = (phy->idx << 4) + DDADR;
134
135 writel(addr, phy->base + reg);
136}
137
138static void enable_chan(struct mmp_pdma_phy *phy)
139{
140 u32 reg;
141
142 if (!phy->vchan)
143 return;
144
145 reg = phy->vchan->drcmr;
146 reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
147 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
148
149 reg = (phy->idx << 2) + DCSR;
150 writel(readl(phy->base + reg) | DCSR_RUN,
151 phy->base + reg);
152}
153
154static void disable_chan(struct mmp_pdma_phy *phy)
155{
156 u32 reg;
157
158 if (phy) {
159 reg = (phy->idx << 2) + DCSR;
160 writel(readl(phy->base + reg) & ~DCSR_RUN,
161 phy->base + reg);
162 }
163}
164
165static int clear_chan_irq(struct mmp_pdma_phy *phy)
166{
167 u32 dcsr;
168 u32 dint = readl(phy->base + DINT);
169 u32 reg = (phy->idx << 2) + DCSR;
170
171 if (dint & BIT(phy->idx)) {
172 /* clear irq */
173 dcsr = readl(phy->base + reg);
174 writel(dcsr, phy->base + reg);
175 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
176 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
177 return 0;
178 }
179 return -EAGAIN;
180}
181
182static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
183{
184 struct mmp_pdma_phy *phy = dev_id;
185
186 if (clear_chan_irq(phy) == 0) {
187 tasklet_schedule(&phy->vchan->tasklet);
188 return IRQ_HANDLED;
189 } else
190 return IRQ_NONE;
191}
192
193static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
194{
195 struct mmp_pdma_device *pdev = dev_id;
196 struct mmp_pdma_phy *phy;
197 u32 dint = readl(pdev->base + DINT);
198 int i, ret;
199 int irq_num = 0;
200
201 while (dint) {
202 i = __ffs(dint);
203 dint &= (dint - 1);
204 phy = &pdev->phy[i];
205 ret = mmp_pdma_chan_handler(irq, phy);
206 if (ret == IRQ_HANDLED)
207 irq_num++;
208 }
209
210 if (irq_num)
211 return IRQ_HANDLED;
212 else
213 return IRQ_NONE;
214}
215
216/* lookup free phy channel as descending priority */
217static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
218{
219 int prio, i;
220 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
221 struct mmp_pdma_phy *phy;
222
223 /*
224 * dma channel priorities
225 * ch 0 - 3, 16 - 19 <--> (0)
226 * ch 4 - 7, 20 - 23 <--> (1)
227 * ch 8 - 11, 24 - 27 <--> (2)
228 * ch 12 - 15, 28 - 31 <--> (3)
229 */
230 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
231 for (i = 0; i < pdev->dma_channels; i++) {
232 if (prio != ((i & 0xf) >> 2))
233 continue;
234 phy = &pdev->phy[i];
235 if (!phy->vchan) {
236 phy->vchan = pchan;
237 return phy;
238 }
239 }
240 }
241
242 return NULL;
243}
244
245/* desc->tx_list ==> pending list */
246static void append_pending_queue(struct mmp_pdma_chan *chan,
247 struct mmp_pdma_desc_sw *desc)
248{
249 struct mmp_pdma_desc_sw *tail =
250 to_mmp_pdma_desc(chan->chain_pending.prev);
251
252 if (list_empty(&chan->chain_pending))
253 goto out_splice;
254
255 /* one irq per queue, even appended */
256 tail->desc.ddadr = desc->async_tx.phys;
257 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
258
259 /* softly link to pending list */
260out_splice:
261 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
262}
263
264/**
265 * start_pending_queue - transfer any pending transactions
266 * pending list ==> running list
267 */
268static void start_pending_queue(struct mmp_pdma_chan *chan)
269{
270 struct mmp_pdma_desc_sw *desc;
271
272 /* still in running, irq will start the pending list */
273 if (!chan->idle) {
274 dev_dbg(chan->dev, "DMA controller still busy\n");
275 return;
276 }
277
278 if (list_empty(&chan->chain_pending)) {
279 /* chance to re-fetch phy channel with higher prio */
280 if (chan->phy) {
281 chan->phy->vchan = NULL;
282 chan->phy = NULL;
283 }
284 dev_dbg(chan->dev, "no pending list\n");
285 return;
286 }
287
288 if (!chan->phy) {
289 chan->phy = lookup_phy(chan);
290 if (!chan->phy) {
291 dev_dbg(chan->dev, "no free dma channel\n");
292 return;
293 }
294 }
295
296 /*
297 * pending -> running
298 * reintilize pending list
299 */
300 desc = list_first_entry(&chan->chain_pending,
301 struct mmp_pdma_desc_sw, node);
302 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
303
304 /*
305 * Program the descriptor's address into the DMA controller,
306 * then start the DMA transaction
307 */
308 set_desc(chan->phy, desc->async_tx.phys);
309 enable_chan(chan->phy);
310 chan->idle = false;
311}
312
313
314/* desc->tx_list ==> pending list */
315static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
316{
317 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
318 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
319 struct mmp_pdma_desc_sw *child;
320 unsigned long flags;
321 dma_cookie_t cookie = -EBUSY;
322
323 spin_lock_irqsave(&chan->desc_lock, flags);
324
325 list_for_each_entry(child, &desc->tx_list, node) {
326 cookie = dma_cookie_assign(&child->async_tx);
327 }
328
329 append_pending_queue(chan, desc);
330
331 spin_unlock_irqrestore(&chan->desc_lock, flags);
332
333 return cookie;
334}
335
336struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
337{
338 struct mmp_pdma_desc_sw *desc;
339 dma_addr_t pdesc;
340
341 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
342 if (!desc) {
343 dev_err(chan->dev, "out of memory for link descriptor\n");
344 return NULL;
345 }
346
347 memset(desc, 0, sizeof(*desc));
348 INIT_LIST_HEAD(&desc->tx_list);
349 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
350 /* each desc has submit */
351 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
352 desc->async_tx.phys = pdesc;
353
354 return desc;
355}
356
357/**
358 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
359 *
360 * This function will create a dma pool for descriptor allocation.
361 * Request irq only when channel is requested
362 * Return - The number of allocated descriptors.
363 */
364
365static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
366{
367 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
368
369 if (chan->desc_pool)
370 return 1;
371
372 chan->desc_pool =
373 dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
374 sizeof(struct mmp_pdma_desc_sw),
375 __alignof__(struct mmp_pdma_desc_sw), 0);
376 if (!chan->desc_pool) {
377 dev_err(chan->dev, "unable to allocate descriptor pool\n");
378 return -ENOMEM;
379 }
380 if (chan->phy) {
381 chan->phy->vchan = NULL;
382 chan->phy = NULL;
383 }
384 chan->idle = true;
385 chan->dev_addr = 0;
386 return 1;
387}
388
389static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
390 struct list_head *list)
391{
392 struct mmp_pdma_desc_sw *desc, *_desc;
393
394 list_for_each_entry_safe(desc, _desc, list, node) {
395 list_del(&desc->node);
396 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
397 }
398}
399
400static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
401{
402 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
403 unsigned long flags;
404
405 spin_lock_irqsave(&chan->desc_lock, flags);
406 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
407 mmp_pdma_free_desc_list(chan, &chan->chain_running);
408 spin_unlock_irqrestore(&chan->desc_lock, flags);
409
410 dma_pool_destroy(chan->desc_pool);
411 chan->desc_pool = NULL;
412 chan->idle = true;
413 chan->dev_addr = 0;
414 if (chan->phy) {
415 chan->phy->vchan = NULL;
416 chan->phy = NULL;
417 }
418 return;
419}
420
421static struct dma_async_tx_descriptor *
422mmp_pdma_prep_memcpy(struct dma_chan *dchan,
423 dma_addr_t dma_dst, dma_addr_t dma_src,
424 size_t len, unsigned long flags)
425{
426 struct mmp_pdma_chan *chan;
427 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
428 size_t copy = 0;
429
430 if (!dchan)
431 return NULL;
432
433 if (!len)
434 return NULL;
435
436 chan = to_mmp_pdma_chan(dchan);
437
438 if (!chan->dir) {
439 chan->dir = DMA_MEM_TO_MEM;
440 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
441 chan->dcmd |= DCMD_BURST32;
442 }
443
444 do {
445 /* Allocate the link descriptor from DMA pool */
446 new = mmp_pdma_alloc_descriptor(chan);
447 if (!new) {
448 dev_err(chan->dev, "no memory for desc\n");
449 goto fail;
450 }
451
452 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
453
454 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
455 new->desc.dsadr = dma_src;
456 new->desc.dtadr = dma_dst;
457
458 if (!first)
459 first = new;
460 else
461 prev->desc.ddadr = new->async_tx.phys;
462
463 new->async_tx.cookie = 0;
464 async_tx_ack(&new->async_tx);
465
466 prev = new;
467 len -= copy;
468
469 if (chan->dir == DMA_MEM_TO_DEV) {
470 dma_src += copy;
471 } else if (chan->dir == DMA_DEV_TO_MEM) {
472 dma_dst += copy;
473 } else if (chan->dir == DMA_MEM_TO_MEM) {
474 dma_src += copy;
475 dma_dst += copy;
476 }
477
478 /* Insert the link descriptor to the LD ring */
479 list_add_tail(&new->node, &first->tx_list);
480 } while (len);
481
482 first->async_tx.flags = flags; /* client is in control of this ack */
483 first->async_tx.cookie = -EBUSY;
484
485 /* last desc and fire IRQ */
486 new->desc.ddadr = DDADR_STOP;
487 new->desc.dcmd |= DCMD_ENDIRQEN;
488
489 return &first->async_tx;
490
491fail:
492 if (first)
493 mmp_pdma_free_desc_list(chan, &first->tx_list);
494 return NULL;
495}
496
497static struct dma_async_tx_descriptor *
498mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
499 unsigned int sg_len, enum dma_transfer_direction dir,
500 unsigned long flags, void *context)
501{
502 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
503 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
504 size_t len, avail;
505 struct scatterlist *sg;
506 dma_addr_t addr;
507 int i;
508
509 if ((sgl == NULL) || (sg_len == 0))
510 return NULL;
511
512 for_each_sg(sgl, sg, sg_len, i) {
513 addr = sg_dma_address(sg);
514 avail = sg_dma_len(sgl);
515
516 do {
517 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
518
519 /* allocate and populate the descriptor */
520 new = mmp_pdma_alloc_descriptor(chan);
521 if (!new) {
522 dev_err(chan->dev, "no memory for desc\n");
523 goto fail;
524 }
525
526 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
527 if (dir == DMA_MEM_TO_DEV) {
528 new->desc.dsadr = addr;
529 new->desc.dtadr = chan->dev_addr;
530 } else {
531 new->desc.dsadr = chan->dev_addr;
532 new->desc.dtadr = addr;
533 }
534
535 if (!first)
536 first = new;
537 else
538 prev->desc.ddadr = new->async_tx.phys;
539
540 new->async_tx.cookie = 0;
541 async_tx_ack(&new->async_tx);
542 prev = new;
543
544 /* Insert the link descriptor to the LD ring */
545 list_add_tail(&new->node, &first->tx_list);
546
547 /* update metadata */
548 addr += len;
549 avail -= len;
550 } while (avail);
551 }
552
553 first->async_tx.cookie = -EBUSY;
554 first->async_tx.flags = flags;
555
556 /* last desc and fire IRQ */
557 new->desc.ddadr = DDADR_STOP;
558 new->desc.dcmd |= DCMD_ENDIRQEN;
559
560 return &first->async_tx;
561
562fail:
563 if (first)
564 mmp_pdma_free_desc_list(chan, &first->tx_list);
565 return NULL;
566}
567
568static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
569 unsigned long arg)
570{
571 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
572 struct dma_slave_config *cfg = (void *)arg;
573 unsigned long flags;
574 int ret = 0;
575 u32 maxburst = 0, addr = 0;
576 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
577
578 if (!dchan)
579 return -EINVAL;
580
581 switch (cmd) {
582 case DMA_TERMINATE_ALL:
583 disable_chan(chan->phy);
584 if (chan->phy) {
585 chan->phy->vchan = NULL;
586 chan->phy = NULL;
587 }
588 spin_lock_irqsave(&chan->desc_lock, flags);
589 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
590 mmp_pdma_free_desc_list(chan, &chan->chain_running);
591 spin_unlock_irqrestore(&chan->desc_lock, flags);
592 chan->idle = true;
593 break;
594 case DMA_SLAVE_CONFIG:
595 if (cfg->direction == DMA_DEV_TO_MEM) {
596 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
597 maxburst = cfg->src_maxburst;
598 width = cfg->src_addr_width;
599 addr = cfg->src_addr;
600 } else if (cfg->direction == DMA_MEM_TO_DEV) {
601 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
602 maxburst = cfg->dst_maxburst;
603 width = cfg->dst_addr_width;
604 addr = cfg->dst_addr;
605 }
606
607 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
608 chan->dcmd |= DCMD_WIDTH1;
609 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
610 chan->dcmd |= DCMD_WIDTH2;
611 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
612 chan->dcmd |= DCMD_WIDTH4;
613
614 if (maxburst == 8)
615 chan->dcmd |= DCMD_BURST8;
616 else if (maxburst == 16)
617 chan->dcmd |= DCMD_BURST16;
618 else if (maxburst == 32)
619 chan->dcmd |= DCMD_BURST32;
620
621 if (cfg) {
622 chan->dir = cfg->direction;
623 chan->drcmr = cfg->slave_id;
624 }
625 chan->dev_addr = addr;
626 break;
627 default:
628 return -ENOSYS;
629 }
630
631 return ret;
632}
633
634static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
635 dma_cookie_t cookie, struct dma_tx_state *txstate)
636{
637 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
638 enum dma_status ret;
639 unsigned long flags;
640
641 spin_lock_irqsave(&chan->desc_lock, flags);
642 ret = dma_cookie_status(dchan, cookie, txstate);
643 spin_unlock_irqrestore(&chan->desc_lock, flags);
644
645 return ret;
646}
647
648/**
649 * mmp_pdma_issue_pending - Issue the DMA start command
650 * pending list ==> running list
651 */
652static void mmp_pdma_issue_pending(struct dma_chan *dchan)
653{
654 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
655 unsigned long flags;
656
657 spin_lock_irqsave(&chan->desc_lock, flags);
658 start_pending_queue(chan);
659 spin_unlock_irqrestore(&chan->desc_lock, flags);
660}
661
662/*
663 * dma_do_tasklet
664 * Do call back
665 * Start pending list
666 */
667static void dma_do_tasklet(unsigned long data)
668{
669 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
670 struct mmp_pdma_desc_sw *desc, *_desc;
671 LIST_HEAD(chain_cleanup);
672 unsigned long flags;
673
674 /* submit pending list; callback for each desc; free desc */
675
676 spin_lock_irqsave(&chan->desc_lock, flags);
677
678 /* update the cookie if we have some descriptors to cleanup */
679 if (!list_empty(&chan->chain_running)) {
680 dma_cookie_t cookie;
681
682 desc = to_mmp_pdma_desc(chan->chain_running.prev);
683 cookie = desc->async_tx.cookie;
684 dma_cookie_complete(&desc->async_tx);
685
686 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
687 }
688
689 /*
690 * move the descriptors to a temporary list so we can drop the lock
691 * during the entire cleanup operation
692 */
693 list_splice_tail_init(&chan->chain_running, &chain_cleanup);
694
695 /* the hardware is now idle and ready for more */
696 chan->idle = true;
697
698 /* Start any pending transactions automatically */
699 start_pending_queue(chan);
700 spin_unlock_irqrestore(&chan->desc_lock, flags);
701
702 /* Run the callback for each descriptor, in order */
703 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
704 struct dma_async_tx_descriptor *txd = &desc->async_tx;
705
706 /* Remove from the list of transactions */
707 list_del(&desc->node);
708 /* Run the link descriptor callback function */
709 if (txd->callback)
710 txd->callback(txd->callback_param);
711
712 dma_pool_free(chan->desc_pool, desc, txd->phys);
713 }
714}
715
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800716static int mmp_pdma_remove(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800717{
718 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
719
720 dma_async_device_unregister(&pdev->device);
721 return 0;
722}
723
Bill Pemberton463a1f82012-11-19 13:22:55 -0500724static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800725 int idx, int irq)
726{
727 struct mmp_pdma_phy *phy = &pdev->phy[idx];
728 struct mmp_pdma_chan *chan;
729 int ret;
730
731 chan = devm_kzalloc(pdev->dev,
732 sizeof(struct mmp_pdma_chan), GFP_KERNEL);
733 if (chan == NULL)
734 return -ENOMEM;
735
736 phy->idx = idx;
737 phy->base = pdev->base;
738
739 if (irq) {
740 ret = devm_request_irq(pdev->dev, irq,
741 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
742 if (ret) {
743 dev_err(pdev->dev, "channel request irq fail!\n");
744 return ret;
745 }
746 }
747
748 spin_lock_init(&chan->desc_lock);
749 chan->dev = pdev->dev;
750 chan->chan.device = &pdev->device;
751 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
752 INIT_LIST_HEAD(&chan->chain_pending);
753 INIT_LIST_HEAD(&chan->chain_running);
754
755 /* register virt channel to dma engine */
756 list_add_tail(&chan->chan.device_node,
757 &pdev->device.channels);
758
759 return 0;
760}
761
762static struct of_device_id mmp_pdma_dt_ids[] = {
763 { .compatible = "marvell,pdma-1.0", },
764 {}
765};
766MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
767
Bill Pemberton463a1f82012-11-19 13:22:55 -0500768static int mmp_pdma_probe(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800769{
770 struct mmp_pdma_device *pdev;
771 const struct of_device_id *of_id;
772 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
773 struct resource *iores;
774 int i, ret, irq = 0;
775 int dma_channels = 0, irq_num = 0;
776
777 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
778 if (!pdev)
779 return -ENOMEM;
780 pdev->dev = &op->dev;
781
782 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
783 if (!iores)
784 return -EINVAL;
785
Thierry Reding73312052013-01-21 11:09:00 +0100786 pdev->base = devm_ioremap_resource(pdev->dev, iores);
787 if (IS_ERR(pdev->base))
788 return PTR_ERR(pdev->base);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800789
790 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
791 if (of_id)
792 of_property_read_u32(pdev->dev->of_node,
793 "#dma-channels", &dma_channels);
794 else if (pdata && pdata->dma_channels)
795 dma_channels = pdata->dma_channels;
796 else
797 dma_channels = 32; /* default 32 channel */
798 pdev->dma_channels = dma_channels;
799
800 for (i = 0; i < dma_channels; i++) {
801 if (platform_get_irq(op, i) > 0)
802 irq_num++;
803 }
804
805 pdev->phy = devm_kzalloc(pdev->dev,
806 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
807 if (pdev->phy == NULL)
808 return -ENOMEM;
809
810 INIT_LIST_HEAD(&pdev->device.channels);
811
812 if (irq_num != dma_channels) {
813 /* all chan share one irq, demux inside */
814 irq = platform_get_irq(op, 0);
815 ret = devm_request_irq(pdev->dev, irq,
816 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
817 if (ret)
818 return ret;
819 }
820
821 for (i = 0; i < dma_channels; i++) {
822 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
823 ret = mmp_pdma_chan_init(pdev, i, irq);
824 if (ret)
825 return ret;
826 }
827
828 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
829 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
830 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
831 pdev->device.dev = &op->dev;
832 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
833 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
834 pdev->device.device_tx_status = mmp_pdma_tx_status;
835 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
836 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
837 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
838 pdev->device.device_control = mmp_pdma_control;
839 pdev->device.copy_align = PDMA_ALIGNMENT;
840
841 if (pdev->dev->coherent_dma_mask)
842 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
843 else
844 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
845
846 ret = dma_async_device_register(&pdev->device);
847 if (ret) {
848 dev_err(pdev->device.dev, "unable to register\n");
849 return ret;
850 }
851
852 dev_info(pdev->device.dev, "initialized\n");
853 return 0;
854}
855
856static const struct platform_device_id mmp_pdma_id_table[] = {
857 { "mmp-pdma", },
858 { },
859};
860
861static struct platform_driver mmp_pdma_driver = {
862 .driver = {
863 .name = "mmp-pdma",
864 .owner = THIS_MODULE,
865 .of_match_table = mmp_pdma_dt_ids,
866 },
867 .id_table = mmp_pdma_id_table,
868 .probe = mmp_pdma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -0500869 .remove = mmp_pdma_remove,
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800870};
871
872module_platform_driver(mmp_pdma_driver);
873
874MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
875MODULE_AUTHOR("Marvell International Ltd.");
876MODULE_LICENSE("GPL v2");