blob: 3bf9c9154919df6694d6712e37aa91b705253a80 [file] [log] [blame]
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001/*
2 * Copyright 2012 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
Thierry Reding73312052013-01-21 11:09:00 +01008#include <linux/err.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08009#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/interrupt.h>
13#include <linux/dma-mapping.h>
14#include <linux/slab.h>
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/device.h>
18#include <linux/platform_data/mmp_dma.h>
19#include <linux/dmapool.h>
20#include <linux/of_device.h>
21#include <linux/of.h>
22
23#include "dmaengine.h"
24
25#define DCSR 0x0000
26#define DALGN 0x00a0
27#define DINT 0x00f0
28#define DDADR 0x0200
29#define DSADR 0x0204
30#define DTADR 0x0208
31#define DCMD 0x020c
32
33#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
34#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
35#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
36#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
37#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
38#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
39#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
40#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
41
42#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
43#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
44#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
45#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
46#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
47#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
48#define DCSR_EORINTR (1 << 9) /* The end of Receive */
49
50#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
51#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
52
53#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
54#define DDADR_STOP (1 << 0) /* Stop (read / write) */
55
56#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
57#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
58#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
59#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
60#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
61#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
62#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
63#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
64#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
65#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
66#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
67#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
68#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
69#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
70
71#define PDMA_ALIGNMENT 3
72#define PDMA_MAX_DESC_BYTES 0x1000
73
74struct mmp_pdma_desc_hw {
75 u32 ddadr; /* Points to the next descriptor + flags */
76 u32 dsadr; /* DSADR value for the current transfer */
77 u32 dtadr; /* DTADR value for the current transfer */
78 u32 dcmd; /* DCMD value for the current transfer */
79} __aligned(32);
80
81struct mmp_pdma_desc_sw {
82 struct mmp_pdma_desc_hw desc;
83 struct list_head node;
84 struct list_head tx_list;
85 struct dma_async_tx_descriptor async_tx;
86};
87
88struct mmp_pdma_phy;
89
90struct mmp_pdma_chan {
91 struct device *dev;
92 struct dma_chan chan;
93 struct dma_async_tx_descriptor desc;
94 struct mmp_pdma_phy *phy;
95 enum dma_transfer_direction dir;
96
97 /* channel's basic info */
98 struct tasklet_struct tasklet;
99 u32 dcmd;
100 u32 drcmr;
101 u32 dev_addr;
102
103 /* list for desc */
104 spinlock_t desc_lock; /* Descriptor list lock */
105 struct list_head chain_pending; /* Link descriptors queue for pending */
106 struct list_head chain_running; /* Link descriptors queue for running */
107 bool idle; /* channel statue machine */
108
109 struct dma_pool *desc_pool; /* Descriptors pool */
110};
111
112struct mmp_pdma_phy {
113 int idx;
114 void __iomem *base;
115 struct mmp_pdma_chan *vchan;
116};
117
118struct mmp_pdma_device {
119 int dma_channels;
120 void __iomem *base;
121 struct device *dev;
122 struct dma_device device;
123 struct mmp_pdma_phy *phy;
Xiang Wang027f28b2013-06-18 14:55:58 +0800124 spinlock_t phy_lock; /* protect alloc/free phy channels */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800125};
126
127#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
128#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
129#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
130#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
131
132static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
133{
134 u32 reg = (phy->idx << 4) + DDADR;
135
136 writel(addr, phy->base + reg);
137}
138
139static void enable_chan(struct mmp_pdma_phy *phy)
140{
141 u32 reg;
142
143 if (!phy->vchan)
144 return;
145
146 reg = phy->vchan->drcmr;
147 reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
148 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
149
150 reg = (phy->idx << 2) + DCSR;
151 writel(readl(phy->base + reg) | DCSR_RUN,
152 phy->base + reg);
153}
154
155static void disable_chan(struct mmp_pdma_phy *phy)
156{
157 u32 reg;
158
159 if (phy) {
160 reg = (phy->idx << 2) + DCSR;
161 writel(readl(phy->base + reg) & ~DCSR_RUN,
162 phy->base + reg);
163 }
164}
165
166static int clear_chan_irq(struct mmp_pdma_phy *phy)
167{
168 u32 dcsr;
169 u32 dint = readl(phy->base + DINT);
170 u32 reg = (phy->idx << 2) + DCSR;
171
172 if (dint & BIT(phy->idx)) {
173 /* clear irq */
174 dcsr = readl(phy->base + reg);
175 writel(dcsr, phy->base + reg);
176 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
177 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
178 return 0;
179 }
180 return -EAGAIN;
181}
182
183static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
184{
185 struct mmp_pdma_phy *phy = dev_id;
186
187 if (clear_chan_irq(phy) == 0) {
188 tasklet_schedule(&phy->vchan->tasklet);
189 return IRQ_HANDLED;
190 } else
191 return IRQ_NONE;
192}
193
194static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
195{
196 struct mmp_pdma_device *pdev = dev_id;
197 struct mmp_pdma_phy *phy;
198 u32 dint = readl(pdev->base + DINT);
199 int i, ret;
200 int irq_num = 0;
201
202 while (dint) {
203 i = __ffs(dint);
204 dint &= (dint - 1);
205 phy = &pdev->phy[i];
206 ret = mmp_pdma_chan_handler(irq, phy);
207 if (ret == IRQ_HANDLED)
208 irq_num++;
209 }
210
211 if (irq_num)
212 return IRQ_HANDLED;
213 else
214 return IRQ_NONE;
215}
216
217/* lookup free phy channel as descending priority */
218static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
219{
220 int prio, i;
221 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
222 struct mmp_pdma_phy *phy;
Xiang Wang027f28b2013-06-18 14:55:58 +0800223 unsigned long flags;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800224
225 /*
226 * dma channel priorities
227 * ch 0 - 3, 16 - 19 <--> (0)
228 * ch 4 - 7, 20 - 23 <--> (1)
229 * ch 8 - 11, 24 - 27 <--> (2)
230 * ch 12 - 15, 28 - 31 <--> (3)
231 */
Xiang Wang027f28b2013-06-18 14:55:58 +0800232
233 spin_lock_irqsave(&pdev->phy_lock, flags);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800234 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
235 for (i = 0; i < pdev->dma_channels; i++) {
236 if (prio != ((i & 0xf) >> 2))
237 continue;
238 phy = &pdev->phy[i];
239 if (!phy->vchan) {
240 phy->vchan = pchan;
Xiang Wang027f28b2013-06-18 14:55:58 +0800241 spin_unlock_irqrestore(&pdev->phy_lock, flags);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800242 return phy;
243 }
244 }
245 }
246
Xiang Wang027f28b2013-06-18 14:55:58 +0800247 spin_unlock_irqrestore(&pdev->phy_lock, flags);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800248 return NULL;
249}
250
Xiang Wang027f28b2013-06-18 14:55:58 +0800251static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
252{
253 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
254 unsigned long flags;
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800255 u32 reg;
Xiang Wang027f28b2013-06-18 14:55:58 +0800256
257 if (!pchan->phy)
258 return;
259
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800260 /* clear the channel mapping in DRCMR */
261 reg = pchan->phy->vchan->drcmr;
262 reg = ((reg < 64) ? 0x0100 : 0x1100) + ((reg & 0x3f) << 2);
263 writel(0, pchan->phy->base + reg);
264
Xiang Wang027f28b2013-06-18 14:55:58 +0800265 spin_lock_irqsave(&pdev->phy_lock, flags);
266 pchan->phy->vchan = NULL;
267 pchan->phy = NULL;
268 spin_unlock_irqrestore(&pdev->phy_lock, flags);
269}
270
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800271/* desc->tx_list ==> pending list */
272static void append_pending_queue(struct mmp_pdma_chan *chan,
273 struct mmp_pdma_desc_sw *desc)
274{
275 struct mmp_pdma_desc_sw *tail =
276 to_mmp_pdma_desc(chan->chain_pending.prev);
277
278 if (list_empty(&chan->chain_pending))
279 goto out_splice;
280
281 /* one irq per queue, even appended */
282 tail->desc.ddadr = desc->async_tx.phys;
283 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
284
285 /* softly link to pending list */
286out_splice:
287 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
288}
289
290/**
291 * start_pending_queue - transfer any pending transactions
292 * pending list ==> running list
293 */
294static void start_pending_queue(struct mmp_pdma_chan *chan)
295{
296 struct mmp_pdma_desc_sw *desc;
297
298 /* still in running, irq will start the pending list */
299 if (!chan->idle) {
300 dev_dbg(chan->dev, "DMA controller still busy\n");
301 return;
302 }
303
304 if (list_empty(&chan->chain_pending)) {
305 /* chance to re-fetch phy channel with higher prio */
Xiang Wang027f28b2013-06-18 14:55:58 +0800306 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800307 dev_dbg(chan->dev, "no pending list\n");
308 return;
309 }
310
311 if (!chan->phy) {
312 chan->phy = lookup_phy(chan);
313 if (!chan->phy) {
314 dev_dbg(chan->dev, "no free dma channel\n");
315 return;
316 }
317 }
318
319 /*
320 * pending -> running
321 * reintilize pending list
322 */
323 desc = list_first_entry(&chan->chain_pending,
324 struct mmp_pdma_desc_sw, node);
325 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
326
327 /*
328 * Program the descriptor's address into the DMA controller,
329 * then start the DMA transaction
330 */
331 set_desc(chan->phy, desc->async_tx.phys);
332 enable_chan(chan->phy);
333 chan->idle = false;
334}
335
336
337/* desc->tx_list ==> pending list */
338static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
339{
340 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
341 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
342 struct mmp_pdma_desc_sw *child;
343 unsigned long flags;
344 dma_cookie_t cookie = -EBUSY;
345
346 spin_lock_irqsave(&chan->desc_lock, flags);
347
348 list_for_each_entry(child, &desc->tx_list, node) {
349 cookie = dma_cookie_assign(&child->async_tx);
350 }
351
352 append_pending_queue(chan, desc);
353
354 spin_unlock_irqrestore(&chan->desc_lock, flags);
355
356 return cookie;
357}
358
Jingoo Han69c9f0a2013-08-06 19:35:13 +0900359static struct mmp_pdma_desc_sw *
360mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800361{
362 struct mmp_pdma_desc_sw *desc;
363 dma_addr_t pdesc;
364
365 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
366 if (!desc) {
367 dev_err(chan->dev, "out of memory for link descriptor\n");
368 return NULL;
369 }
370
371 memset(desc, 0, sizeof(*desc));
372 INIT_LIST_HEAD(&desc->tx_list);
373 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
374 /* each desc has submit */
375 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
376 desc->async_tx.phys = pdesc;
377
378 return desc;
379}
380
381/**
382 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
383 *
384 * This function will create a dma pool for descriptor allocation.
385 * Request irq only when channel is requested
386 * Return - The number of allocated descriptors.
387 */
388
389static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
390{
391 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
392
393 if (chan->desc_pool)
394 return 1;
395
396 chan->desc_pool =
397 dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
398 sizeof(struct mmp_pdma_desc_sw),
399 __alignof__(struct mmp_pdma_desc_sw), 0);
400 if (!chan->desc_pool) {
401 dev_err(chan->dev, "unable to allocate descriptor pool\n");
402 return -ENOMEM;
403 }
Xiang Wang027f28b2013-06-18 14:55:58 +0800404 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800405 chan->idle = true;
406 chan->dev_addr = 0;
407 return 1;
408}
409
410static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
411 struct list_head *list)
412{
413 struct mmp_pdma_desc_sw *desc, *_desc;
414
415 list_for_each_entry_safe(desc, _desc, list, node) {
416 list_del(&desc->node);
417 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
418 }
419}
420
421static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
422{
423 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
424 unsigned long flags;
425
426 spin_lock_irqsave(&chan->desc_lock, flags);
427 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
428 mmp_pdma_free_desc_list(chan, &chan->chain_running);
429 spin_unlock_irqrestore(&chan->desc_lock, flags);
430
431 dma_pool_destroy(chan->desc_pool);
432 chan->desc_pool = NULL;
433 chan->idle = true;
434 chan->dev_addr = 0;
Xiang Wang027f28b2013-06-18 14:55:58 +0800435 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800436 return;
437}
438
439static struct dma_async_tx_descriptor *
440mmp_pdma_prep_memcpy(struct dma_chan *dchan,
441 dma_addr_t dma_dst, dma_addr_t dma_src,
442 size_t len, unsigned long flags)
443{
444 struct mmp_pdma_chan *chan;
445 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
446 size_t copy = 0;
447
448 if (!dchan)
449 return NULL;
450
451 if (!len)
452 return NULL;
453
454 chan = to_mmp_pdma_chan(dchan);
455
456 if (!chan->dir) {
457 chan->dir = DMA_MEM_TO_MEM;
458 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
459 chan->dcmd |= DCMD_BURST32;
460 }
461
462 do {
463 /* Allocate the link descriptor from DMA pool */
464 new = mmp_pdma_alloc_descriptor(chan);
465 if (!new) {
466 dev_err(chan->dev, "no memory for desc\n");
467 goto fail;
468 }
469
470 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
471
472 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
473 new->desc.dsadr = dma_src;
474 new->desc.dtadr = dma_dst;
475
476 if (!first)
477 first = new;
478 else
479 prev->desc.ddadr = new->async_tx.phys;
480
481 new->async_tx.cookie = 0;
482 async_tx_ack(&new->async_tx);
483
484 prev = new;
485 len -= copy;
486
487 if (chan->dir == DMA_MEM_TO_DEV) {
488 dma_src += copy;
489 } else if (chan->dir == DMA_DEV_TO_MEM) {
490 dma_dst += copy;
491 } else if (chan->dir == DMA_MEM_TO_MEM) {
492 dma_src += copy;
493 dma_dst += copy;
494 }
495
496 /* Insert the link descriptor to the LD ring */
497 list_add_tail(&new->node, &first->tx_list);
498 } while (len);
499
500 first->async_tx.flags = flags; /* client is in control of this ack */
501 first->async_tx.cookie = -EBUSY;
502
503 /* last desc and fire IRQ */
504 new->desc.ddadr = DDADR_STOP;
505 new->desc.dcmd |= DCMD_ENDIRQEN;
506
507 return &first->async_tx;
508
509fail:
510 if (first)
511 mmp_pdma_free_desc_list(chan, &first->tx_list);
512 return NULL;
513}
514
515static struct dma_async_tx_descriptor *
516mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
517 unsigned int sg_len, enum dma_transfer_direction dir,
518 unsigned long flags, void *context)
519{
520 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
521 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
522 size_t len, avail;
523 struct scatterlist *sg;
524 dma_addr_t addr;
525 int i;
526
527 if ((sgl == NULL) || (sg_len == 0))
528 return NULL;
529
530 for_each_sg(sgl, sg, sg_len, i) {
531 addr = sg_dma_address(sg);
532 avail = sg_dma_len(sgl);
533
534 do {
535 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
536
537 /* allocate and populate the descriptor */
538 new = mmp_pdma_alloc_descriptor(chan);
539 if (!new) {
540 dev_err(chan->dev, "no memory for desc\n");
541 goto fail;
542 }
543
544 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
545 if (dir == DMA_MEM_TO_DEV) {
546 new->desc.dsadr = addr;
547 new->desc.dtadr = chan->dev_addr;
548 } else {
549 new->desc.dsadr = chan->dev_addr;
550 new->desc.dtadr = addr;
551 }
552
553 if (!first)
554 first = new;
555 else
556 prev->desc.ddadr = new->async_tx.phys;
557
558 new->async_tx.cookie = 0;
559 async_tx_ack(&new->async_tx);
560 prev = new;
561
562 /* Insert the link descriptor to the LD ring */
563 list_add_tail(&new->node, &first->tx_list);
564
565 /* update metadata */
566 addr += len;
567 avail -= len;
568 } while (avail);
569 }
570
571 first->async_tx.cookie = -EBUSY;
572 first->async_tx.flags = flags;
573
574 /* last desc and fire IRQ */
575 new->desc.ddadr = DDADR_STOP;
576 new->desc.dcmd |= DCMD_ENDIRQEN;
577
578 return &first->async_tx;
579
580fail:
581 if (first)
582 mmp_pdma_free_desc_list(chan, &first->tx_list);
583 return NULL;
584}
585
586static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
587 unsigned long arg)
588{
589 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
590 struct dma_slave_config *cfg = (void *)arg;
591 unsigned long flags;
592 int ret = 0;
593 u32 maxburst = 0, addr = 0;
594 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
595
596 if (!dchan)
597 return -EINVAL;
598
599 switch (cmd) {
600 case DMA_TERMINATE_ALL:
601 disable_chan(chan->phy);
Xiang Wang027f28b2013-06-18 14:55:58 +0800602 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800603 spin_lock_irqsave(&chan->desc_lock, flags);
604 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
605 mmp_pdma_free_desc_list(chan, &chan->chain_running);
606 spin_unlock_irqrestore(&chan->desc_lock, flags);
607 chan->idle = true;
608 break;
609 case DMA_SLAVE_CONFIG:
610 if (cfg->direction == DMA_DEV_TO_MEM) {
611 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
612 maxburst = cfg->src_maxburst;
613 width = cfg->src_addr_width;
614 addr = cfg->src_addr;
615 } else if (cfg->direction == DMA_MEM_TO_DEV) {
616 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
617 maxburst = cfg->dst_maxburst;
618 width = cfg->dst_addr_width;
619 addr = cfg->dst_addr;
620 }
621
622 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
623 chan->dcmd |= DCMD_WIDTH1;
624 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
625 chan->dcmd |= DCMD_WIDTH2;
626 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
627 chan->dcmd |= DCMD_WIDTH4;
628
629 if (maxburst == 8)
630 chan->dcmd |= DCMD_BURST8;
631 else if (maxburst == 16)
632 chan->dcmd |= DCMD_BURST16;
633 else if (maxburst == 32)
634 chan->dcmd |= DCMD_BURST32;
635
Cong Dinged30933e2013-01-15 01:19:48 +0100636 chan->dir = cfg->direction;
637 chan->drcmr = cfg->slave_id;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800638 chan->dev_addr = addr;
639 break;
640 default:
641 return -ENOSYS;
642 }
643
644 return ret;
645}
646
647static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
648 dma_cookie_t cookie, struct dma_tx_state *txstate)
649{
Andy Shevchenko4aa9fe02013-05-27 15:14:36 +0300650 return dma_cookie_status(dchan, cookie, txstate);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800651}
652
653/**
654 * mmp_pdma_issue_pending - Issue the DMA start command
655 * pending list ==> running list
656 */
657static void mmp_pdma_issue_pending(struct dma_chan *dchan)
658{
659 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
660 unsigned long flags;
661
662 spin_lock_irqsave(&chan->desc_lock, flags);
663 start_pending_queue(chan);
664 spin_unlock_irqrestore(&chan->desc_lock, flags);
665}
666
667/*
668 * dma_do_tasklet
669 * Do call back
670 * Start pending list
671 */
672static void dma_do_tasklet(unsigned long data)
673{
674 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
675 struct mmp_pdma_desc_sw *desc, *_desc;
676 LIST_HEAD(chain_cleanup);
677 unsigned long flags;
678
679 /* submit pending list; callback for each desc; free desc */
680
681 spin_lock_irqsave(&chan->desc_lock, flags);
682
683 /* update the cookie if we have some descriptors to cleanup */
684 if (!list_empty(&chan->chain_running)) {
685 dma_cookie_t cookie;
686
687 desc = to_mmp_pdma_desc(chan->chain_running.prev);
688 cookie = desc->async_tx.cookie;
689 dma_cookie_complete(&desc->async_tx);
690
691 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
692 }
693
694 /*
695 * move the descriptors to a temporary list so we can drop the lock
696 * during the entire cleanup operation
697 */
698 list_splice_tail_init(&chan->chain_running, &chain_cleanup);
699
700 /* the hardware is now idle and ready for more */
701 chan->idle = true;
702
703 /* Start any pending transactions automatically */
704 start_pending_queue(chan);
705 spin_unlock_irqrestore(&chan->desc_lock, flags);
706
707 /* Run the callback for each descriptor, in order */
708 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
709 struct dma_async_tx_descriptor *txd = &desc->async_tx;
710
711 /* Remove from the list of transactions */
712 list_del(&desc->node);
713 /* Run the link descriptor callback function */
714 if (txd->callback)
715 txd->callback(txd->callback_param);
716
717 dma_pool_free(chan->desc_pool, desc, txd->phys);
718 }
719}
720
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800721static int mmp_pdma_remove(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800722{
723 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
724
725 dma_async_device_unregister(&pdev->device);
726 return 0;
727}
728
Bill Pemberton463a1f82012-11-19 13:22:55 -0500729static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800730 int idx, int irq)
731{
732 struct mmp_pdma_phy *phy = &pdev->phy[idx];
733 struct mmp_pdma_chan *chan;
734 int ret;
735
736 chan = devm_kzalloc(pdev->dev,
737 sizeof(struct mmp_pdma_chan), GFP_KERNEL);
738 if (chan == NULL)
739 return -ENOMEM;
740
741 phy->idx = idx;
742 phy->base = pdev->base;
743
744 if (irq) {
745 ret = devm_request_irq(pdev->dev, irq,
746 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
747 if (ret) {
748 dev_err(pdev->dev, "channel request irq fail!\n");
749 return ret;
750 }
751 }
752
753 spin_lock_init(&chan->desc_lock);
754 chan->dev = pdev->dev;
755 chan->chan.device = &pdev->device;
756 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
757 INIT_LIST_HEAD(&chan->chain_pending);
758 INIT_LIST_HEAD(&chan->chain_running);
759
760 /* register virt channel to dma engine */
761 list_add_tail(&chan->chan.device_node,
762 &pdev->device.channels);
763
764 return 0;
765}
766
767static struct of_device_id mmp_pdma_dt_ids[] = {
768 { .compatible = "marvell,pdma-1.0", },
769 {}
770};
771MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
772
Bill Pemberton463a1f82012-11-19 13:22:55 -0500773static int mmp_pdma_probe(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800774{
775 struct mmp_pdma_device *pdev;
776 const struct of_device_id *of_id;
777 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
778 struct resource *iores;
779 int i, ret, irq = 0;
780 int dma_channels = 0, irq_num = 0;
781
782 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
783 if (!pdev)
784 return -ENOMEM;
785 pdev->dev = &op->dev;
786
Xiang Wang027f28b2013-06-18 14:55:58 +0800787 spin_lock_init(&pdev->phy_lock);
788
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800789 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
790 if (!iores)
791 return -EINVAL;
792
Thierry Reding73312052013-01-21 11:09:00 +0100793 pdev->base = devm_ioremap_resource(pdev->dev, iores);
794 if (IS_ERR(pdev->base))
795 return PTR_ERR(pdev->base);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800796
797 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
798 if (of_id)
799 of_property_read_u32(pdev->dev->of_node,
800 "#dma-channels", &dma_channels);
801 else if (pdata && pdata->dma_channels)
802 dma_channels = pdata->dma_channels;
803 else
804 dma_channels = 32; /* default 32 channel */
805 pdev->dma_channels = dma_channels;
806
807 for (i = 0; i < dma_channels; i++) {
808 if (platform_get_irq(op, i) > 0)
809 irq_num++;
810 }
811
812 pdev->phy = devm_kzalloc(pdev->dev,
813 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
814 if (pdev->phy == NULL)
815 return -ENOMEM;
816
817 INIT_LIST_HEAD(&pdev->device.channels);
818
819 if (irq_num != dma_channels) {
820 /* all chan share one irq, demux inside */
821 irq = platform_get_irq(op, 0);
822 ret = devm_request_irq(pdev->dev, irq,
823 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
824 if (ret)
825 return ret;
826 }
827
828 for (i = 0; i < dma_channels; i++) {
829 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
830 ret = mmp_pdma_chan_init(pdev, i, irq);
831 if (ret)
832 return ret;
833 }
834
835 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
836 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
837 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
838 pdev->device.dev = &op->dev;
839 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
840 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
841 pdev->device.device_tx_status = mmp_pdma_tx_status;
842 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
843 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
844 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
845 pdev->device.device_control = mmp_pdma_control;
846 pdev->device.copy_align = PDMA_ALIGNMENT;
847
848 if (pdev->dev->coherent_dma_mask)
849 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
850 else
851 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
852
853 ret = dma_async_device_register(&pdev->device);
854 if (ret) {
855 dev_err(pdev->device.dev, "unable to register\n");
856 return ret;
857 }
858
859 dev_info(pdev->device.dev, "initialized\n");
860 return 0;
861}
862
863static const struct platform_device_id mmp_pdma_id_table[] = {
864 { "mmp-pdma", },
865 { },
866};
867
868static struct platform_driver mmp_pdma_driver = {
869 .driver = {
870 .name = "mmp-pdma",
871 .owner = THIS_MODULE,
872 .of_match_table = mmp_pdma_dt_ids,
873 },
874 .id_table = mmp_pdma_id_table,
875 .probe = mmp_pdma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -0500876 .remove = mmp_pdma_remove,
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800877};
878
879module_platform_driver(mmp_pdma_driver);
880
881MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
882MODULE_AUTHOR("Marvell International Ltd.");
883MODULE_LICENSE("GPL v2");