blob: dba9fe9cbbbef30b9392de69913bdb0d1bc9e55f [file] [log] [blame]
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001/*
2 * Copyright 2012 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
Thierry Reding73312052013-01-21 11:09:00 +01008#include <linux/err.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08009#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/interrupt.h>
13#include <linux/dma-mapping.h>
14#include <linux/slab.h>
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/device.h>
18#include <linux/platform_data/mmp_dma.h>
19#include <linux/dmapool.h>
20#include <linux/of_device.h>
Daniel Macka9a7cf02013-08-10 18:52:19 +020021#include <linux/of_dma.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080022#include <linux/of.h>
Daniel Mack13b30062013-08-10 18:52:18 +020023#include <linux/dma/mmp-pdma.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080024
25#include "dmaengine.h"
26
27#define DCSR 0x0000
28#define DALGN 0x00a0
29#define DINT 0x00f0
30#define DDADR 0x0200
31#define DSADR 0x0204
32#define DTADR 0x0208
33#define DCMD 0x020c
34
35#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
36#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
37#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
38#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
39#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
40#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
41#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
42#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
43
44#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
45#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
46#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
47#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
48#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
49#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
50#define DCSR_EORINTR (1 << 9) /* The end of Receive */
51
Daniel Mack8b298de2013-08-10 18:52:15 +020052#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
53 (((n) & 0x3f) << 2))
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080054#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
55#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
56
57#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58#define DDADR_STOP (1 << 0) /* Stop (read / write) */
59
60#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
61#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
62#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
63#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
64#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
65#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
66#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
67#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
70#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
74
75#define PDMA_ALIGNMENT 3
Daniel Mack1ac0e842013-08-10 18:52:17 +020076#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080077
78struct mmp_pdma_desc_hw {
79 u32 ddadr; /* Points to the next descriptor + flags */
80 u32 dsadr; /* DSADR value for the current transfer */
81 u32 dtadr; /* DTADR value for the current transfer */
82 u32 dcmd; /* DCMD value for the current transfer */
83} __aligned(32);
84
85struct mmp_pdma_desc_sw {
86 struct mmp_pdma_desc_hw desc;
87 struct list_head node;
88 struct list_head tx_list;
89 struct dma_async_tx_descriptor async_tx;
90};
91
92struct mmp_pdma_phy;
93
94struct mmp_pdma_chan {
95 struct device *dev;
96 struct dma_chan chan;
97 struct dma_async_tx_descriptor desc;
98 struct mmp_pdma_phy *phy;
99 enum dma_transfer_direction dir;
100
101 /* channel's basic info */
102 struct tasklet_struct tasklet;
103 u32 dcmd;
104 u32 drcmr;
105 u32 dev_addr;
106
107 /* list for desc */
108 spinlock_t desc_lock; /* Descriptor list lock */
109 struct list_head chain_pending; /* Link descriptors queue for pending */
110 struct list_head chain_running; /* Link descriptors queue for running */
111 bool idle; /* channel statue machine */
Daniel Mack6fc45732013-08-10 18:52:22 +0200112 bool byte_align;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800113
114 struct dma_pool *desc_pool; /* Descriptors pool */
115};
116
117struct mmp_pdma_phy {
118 int idx;
119 void __iomem *base;
120 struct mmp_pdma_chan *vchan;
121};
122
123struct mmp_pdma_device {
124 int dma_channels;
125 void __iomem *base;
126 struct device *dev;
127 struct dma_device device;
128 struct mmp_pdma_phy *phy;
Xiang Wang027f28b2013-06-18 14:55:58 +0800129 spinlock_t phy_lock; /* protect alloc/free phy channels */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800130};
131
132#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
133#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
134#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
135#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
136
137static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
138{
139 u32 reg = (phy->idx << 4) + DDADR;
140
141 writel(addr, phy->base + reg);
142}
143
144static void enable_chan(struct mmp_pdma_phy *phy)
145{
Daniel Mack6fc45732013-08-10 18:52:22 +0200146 u32 reg, dalgn;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800147
148 if (!phy->vchan)
149 return;
150
Daniel Mack8b298de2013-08-10 18:52:15 +0200151 reg = DRCMR(phy->vchan->drcmr);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800152 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
153
Daniel Mack6fc45732013-08-10 18:52:22 +0200154 dalgn = readl(phy->base + DALGN);
155 if (phy->vchan->byte_align)
156 dalgn |= 1 << phy->idx;
157 else
158 dalgn &= ~(1 << phy->idx);
159 writel(dalgn, phy->base + DALGN);
160
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800161 reg = (phy->idx << 2) + DCSR;
162 writel(readl(phy->base + reg) | DCSR_RUN,
163 phy->base + reg);
164}
165
166static void disable_chan(struct mmp_pdma_phy *phy)
167{
168 u32 reg;
169
170 if (phy) {
171 reg = (phy->idx << 2) + DCSR;
172 writel(readl(phy->base + reg) & ~DCSR_RUN,
173 phy->base + reg);
174 }
175}
176
177static int clear_chan_irq(struct mmp_pdma_phy *phy)
178{
179 u32 dcsr;
180 u32 dint = readl(phy->base + DINT);
181 u32 reg = (phy->idx << 2) + DCSR;
182
183 if (dint & BIT(phy->idx)) {
184 /* clear irq */
185 dcsr = readl(phy->base + reg);
186 writel(dcsr, phy->base + reg);
187 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
188 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
189 return 0;
190 }
191 return -EAGAIN;
192}
193
194static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
195{
196 struct mmp_pdma_phy *phy = dev_id;
197
198 if (clear_chan_irq(phy) == 0) {
199 tasklet_schedule(&phy->vchan->tasklet);
200 return IRQ_HANDLED;
201 } else
202 return IRQ_NONE;
203}
204
205static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
206{
207 struct mmp_pdma_device *pdev = dev_id;
208 struct mmp_pdma_phy *phy;
209 u32 dint = readl(pdev->base + DINT);
210 int i, ret;
211 int irq_num = 0;
212
213 while (dint) {
214 i = __ffs(dint);
215 dint &= (dint - 1);
216 phy = &pdev->phy[i];
217 ret = mmp_pdma_chan_handler(irq, phy);
218 if (ret == IRQ_HANDLED)
219 irq_num++;
220 }
221
222 if (irq_num)
223 return IRQ_HANDLED;
224 else
225 return IRQ_NONE;
226}
227
228/* lookup free phy channel as descending priority */
229static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
230{
231 int prio, i;
232 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
Daniel Mack638a5422013-08-10 18:52:16 +0200233 struct mmp_pdma_phy *phy, *found = NULL;
Xiang Wang027f28b2013-06-18 14:55:58 +0800234 unsigned long flags;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800235
236 /*
237 * dma channel priorities
238 * ch 0 - 3, 16 - 19 <--> (0)
239 * ch 4 - 7, 20 - 23 <--> (1)
240 * ch 8 - 11, 24 - 27 <--> (2)
241 * ch 12 - 15, 28 - 31 <--> (3)
242 */
Xiang Wang027f28b2013-06-18 14:55:58 +0800243
244 spin_lock_irqsave(&pdev->phy_lock, flags);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800245 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
246 for (i = 0; i < pdev->dma_channels; i++) {
247 if (prio != ((i & 0xf) >> 2))
248 continue;
249 phy = &pdev->phy[i];
250 if (!phy->vchan) {
251 phy->vchan = pchan;
Daniel Mack638a5422013-08-10 18:52:16 +0200252 found = phy;
253 goto out_unlock;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800254 }
255 }
256 }
257
Daniel Mack638a5422013-08-10 18:52:16 +0200258out_unlock:
Xiang Wang027f28b2013-06-18 14:55:58 +0800259 spin_unlock_irqrestore(&pdev->phy_lock, flags);
Daniel Mack638a5422013-08-10 18:52:16 +0200260 return found;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800261}
262
Xiang Wang027f28b2013-06-18 14:55:58 +0800263static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
264{
265 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
266 unsigned long flags;
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800267 u32 reg;
Xiang Wang027f28b2013-06-18 14:55:58 +0800268
269 if (!pchan->phy)
270 return;
271
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800272 /* clear the channel mapping in DRCMR */
Daniel Mack8b298de2013-08-10 18:52:15 +0200273 reg = DRCMR(pchan->phy->vchan->drcmr);
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800274 writel(0, pchan->phy->base + reg);
275
Xiang Wang027f28b2013-06-18 14:55:58 +0800276 spin_lock_irqsave(&pdev->phy_lock, flags);
277 pchan->phy->vchan = NULL;
278 pchan->phy = NULL;
279 spin_unlock_irqrestore(&pdev->phy_lock, flags);
280}
281
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800282/* desc->tx_list ==> pending list */
283static void append_pending_queue(struct mmp_pdma_chan *chan,
284 struct mmp_pdma_desc_sw *desc)
285{
286 struct mmp_pdma_desc_sw *tail =
287 to_mmp_pdma_desc(chan->chain_pending.prev);
288
289 if (list_empty(&chan->chain_pending))
290 goto out_splice;
291
292 /* one irq per queue, even appended */
293 tail->desc.ddadr = desc->async_tx.phys;
294 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
295
296 /* softly link to pending list */
297out_splice:
298 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
299}
300
301/**
302 * start_pending_queue - transfer any pending transactions
303 * pending list ==> running list
304 */
305static void start_pending_queue(struct mmp_pdma_chan *chan)
306{
307 struct mmp_pdma_desc_sw *desc;
308
309 /* still in running, irq will start the pending list */
310 if (!chan->idle) {
311 dev_dbg(chan->dev, "DMA controller still busy\n");
312 return;
313 }
314
315 if (list_empty(&chan->chain_pending)) {
316 /* chance to re-fetch phy channel with higher prio */
Xiang Wang027f28b2013-06-18 14:55:58 +0800317 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800318 dev_dbg(chan->dev, "no pending list\n");
319 return;
320 }
321
322 if (!chan->phy) {
323 chan->phy = lookup_phy(chan);
324 if (!chan->phy) {
325 dev_dbg(chan->dev, "no free dma channel\n");
326 return;
327 }
328 }
329
330 /*
331 * pending -> running
332 * reintilize pending list
333 */
334 desc = list_first_entry(&chan->chain_pending,
335 struct mmp_pdma_desc_sw, node);
336 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
337
338 /*
339 * Program the descriptor's address into the DMA controller,
340 * then start the DMA transaction
341 */
342 set_desc(chan->phy, desc->async_tx.phys);
343 enable_chan(chan->phy);
344 chan->idle = false;
345}
346
347
348/* desc->tx_list ==> pending list */
349static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
350{
351 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
352 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
353 struct mmp_pdma_desc_sw *child;
354 unsigned long flags;
355 dma_cookie_t cookie = -EBUSY;
356
357 spin_lock_irqsave(&chan->desc_lock, flags);
358
359 list_for_each_entry(child, &desc->tx_list, node) {
360 cookie = dma_cookie_assign(&child->async_tx);
361 }
362
363 append_pending_queue(chan, desc);
364
365 spin_unlock_irqrestore(&chan->desc_lock, flags);
366
367 return cookie;
368}
369
Jingoo Han69c9f0a2013-08-06 19:35:13 +0900370static struct mmp_pdma_desc_sw *
371mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800372{
373 struct mmp_pdma_desc_sw *desc;
374 dma_addr_t pdesc;
375
376 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
377 if (!desc) {
378 dev_err(chan->dev, "out of memory for link descriptor\n");
379 return NULL;
380 }
381
382 memset(desc, 0, sizeof(*desc));
383 INIT_LIST_HEAD(&desc->tx_list);
384 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
385 /* each desc has submit */
386 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
387 desc->async_tx.phys = pdesc;
388
389 return desc;
390}
391
392/**
393 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
394 *
395 * This function will create a dma pool for descriptor allocation.
396 * Request irq only when channel is requested
397 * Return - The number of allocated descriptors.
398 */
399
400static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
401{
402 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
403
404 if (chan->desc_pool)
405 return 1;
406
407 chan->desc_pool =
408 dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
409 sizeof(struct mmp_pdma_desc_sw),
410 __alignof__(struct mmp_pdma_desc_sw), 0);
411 if (!chan->desc_pool) {
412 dev_err(chan->dev, "unable to allocate descriptor pool\n");
413 return -ENOMEM;
414 }
Xiang Wang027f28b2013-06-18 14:55:58 +0800415 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800416 chan->idle = true;
417 chan->dev_addr = 0;
418 return 1;
419}
420
421static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
422 struct list_head *list)
423{
424 struct mmp_pdma_desc_sw *desc, *_desc;
425
426 list_for_each_entry_safe(desc, _desc, list, node) {
427 list_del(&desc->node);
428 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
429 }
430}
431
432static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
433{
434 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
435 unsigned long flags;
436
437 spin_lock_irqsave(&chan->desc_lock, flags);
438 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
439 mmp_pdma_free_desc_list(chan, &chan->chain_running);
440 spin_unlock_irqrestore(&chan->desc_lock, flags);
441
442 dma_pool_destroy(chan->desc_pool);
443 chan->desc_pool = NULL;
444 chan->idle = true;
445 chan->dev_addr = 0;
Xiang Wang027f28b2013-06-18 14:55:58 +0800446 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800447 return;
448}
449
450static struct dma_async_tx_descriptor *
451mmp_pdma_prep_memcpy(struct dma_chan *dchan,
452 dma_addr_t dma_dst, dma_addr_t dma_src,
453 size_t len, unsigned long flags)
454{
455 struct mmp_pdma_chan *chan;
456 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
457 size_t copy = 0;
458
459 if (!dchan)
460 return NULL;
461
462 if (!len)
463 return NULL;
464
465 chan = to_mmp_pdma_chan(dchan);
Daniel Mack6fc45732013-08-10 18:52:22 +0200466 chan->byte_align = false;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800467
468 if (!chan->dir) {
469 chan->dir = DMA_MEM_TO_MEM;
470 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
471 chan->dcmd |= DCMD_BURST32;
472 }
473
474 do {
475 /* Allocate the link descriptor from DMA pool */
476 new = mmp_pdma_alloc_descriptor(chan);
477 if (!new) {
478 dev_err(chan->dev, "no memory for desc\n");
479 goto fail;
480 }
481
482 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
Daniel Mack6fc45732013-08-10 18:52:22 +0200483 if (dma_src & 0x7 || dma_dst & 0x7)
484 chan->byte_align = true;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800485
486 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
487 new->desc.dsadr = dma_src;
488 new->desc.dtadr = dma_dst;
489
490 if (!first)
491 first = new;
492 else
493 prev->desc.ddadr = new->async_tx.phys;
494
495 new->async_tx.cookie = 0;
496 async_tx_ack(&new->async_tx);
497
498 prev = new;
499 len -= copy;
500
501 if (chan->dir == DMA_MEM_TO_DEV) {
502 dma_src += copy;
503 } else if (chan->dir == DMA_DEV_TO_MEM) {
504 dma_dst += copy;
505 } else if (chan->dir == DMA_MEM_TO_MEM) {
506 dma_src += copy;
507 dma_dst += copy;
508 }
509
510 /* Insert the link descriptor to the LD ring */
511 list_add_tail(&new->node, &first->tx_list);
512 } while (len);
513
514 first->async_tx.flags = flags; /* client is in control of this ack */
515 first->async_tx.cookie = -EBUSY;
516
517 /* last desc and fire IRQ */
518 new->desc.ddadr = DDADR_STOP;
519 new->desc.dcmd |= DCMD_ENDIRQEN;
520
521 return &first->async_tx;
522
523fail:
524 if (first)
525 mmp_pdma_free_desc_list(chan, &first->tx_list);
526 return NULL;
527}
528
529static struct dma_async_tx_descriptor *
530mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
531 unsigned int sg_len, enum dma_transfer_direction dir,
532 unsigned long flags, void *context)
533{
534 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
535 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
536 size_t len, avail;
537 struct scatterlist *sg;
538 dma_addr_t addr;
539 int i;
540
541 if ((sgl == NULL) || (sg_len == 0))
542 return NULL;
543
Daniel Mack6fc45732013-08-10 18:52:22 +0200544 chan->byte_align = false;
545
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800546 for_each_sg(sgl, sg, sg_len, i) {
547 addr = sg_dma_address(sg);
548 avail = sg_dma_len(sgl);
549
550 do {
551 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
Daniel Mack6fc45732013-08-10 18:52:22 +0200552 if (addr & 0x7)
553 chan->byte_align = true;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800554
555 /* allocate and populate the descriptor */
556 new = mmp_pdma_alloc_descriptor(chan);
557 if (!new) {
558 dev_err(chan->dev, "no memory for desc\n");
559 goto fail;
560 }
561
562 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
563 if (dir == DMA_MEM_TO_DEV) {
564 new->desc.dsadr = addr;
565 new->desc.dtadr = chan->dev_addr;
566 } else {
567 new->desc.dsadr = chan->dev_addr;
568 new->desc.dtadr = addr;
569 }
570
571 if (!first)
572 first = new;
573 else
574 prev->desc.ddadr = new->async_tx.phys;
575
576 new->async_tx.cookie = 0;
577 async_tx_ack(&new->async_tx);
578 prev = new;
579
580 /* Insert the link descriptor to the LD ring */
581 list_add_tail(&new->node, &first->tx_list);
582
583 /* update metadata */
584 addr += len;
585 avail -= len;
586 } while (avail);
587 }
588
589 first->async_tx.cookie = -EBUSY;
590 first->async_tx.flags = flags;
591
592 /* last desc and fire IRQ */
593 new->desc.ddadr = DDADR_STOP;
594 new->desc.dcmd |= DCMD_ENDIRQEN;
595
596 return &first->async_tx;
597
598fail:
599 if (first)
600 mmp_pdma_free_desc_list(chan, &first->tx_list);
601 return NULL;
602}
603
604static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
605 unsigned long arg)
606{
607 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
608 struct dma_slave_config *cfg = (void *)arg;
609 unsigned long flags;
610 int ret = 0;
611 u32 maxburst = 0, addr = 0;
612 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
613
614 if (!dchan)
615 return -EINVAL;
616
617 switch (cmd) {
618 case DMA_TERMINATE_ALL:
619 disable_chan(chan->phy);
Xiang Wang027f28b2013-06-18 14:55:58 +0800620 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800621 spin_lock_irqsave(&chan->desc_lock, flags);
622 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
623 mmp_pdma_free_desc_list(chan, &chan->chain_running);
624 spin_unlock_irqrestore(&chan->desc_lock, flags);
625 chan->idle = true;
626 break;
627 case DMA_SLAVE_CONFIG:
628 if (cfg->direction == DMA_DEV_TO_MEM) {
629 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
630 maxburst = cfg->src_maxburst;
631 width = cfg->src_addr_width;
632 addr = cfg->src_addr;
633 } else if (cfg->direction == DMA_MEM_TO_DEV) {
634 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
635 maxburst = cfg->dst_maxburst;
636 width = cfg->dst_addr_width;
637 addr = cfg->dst_addr;
638 }
639
640 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
641 chan->dcmd |= DCMD_WIDTH1;
642 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
643 chan->dcmd |= DCMD_WIDTH2;
644 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
645 chan->dcmd |= DCMD_WIDTH4;
646
647 if (maxburst == 8)
648 chan->dcmd |= DCMD_BURST8;
649 else if (maxburst == 16)
650 chan->dcmd |= DCMD_BURST16;
651 else if (maxburst == 32)
652 chan->dcmd |= DCMD_BURST32;
653
Cong Dinged30933e2013-01-15 01:19:48 +0100654 chan->dir = cfg->direction;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800655 chan->dev_addr = addr;
Daniel Mack13b30062013-08-10 18:52:18 +0200656 /* FIXME: drivers should be ported over to use the filter
657 * function. Once that's done, the following two lines can
658 * be removed.
659 */
660 if (cfg->slave_id)
661 chan->drcmr = cfg->slave_id;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800662 break;
663 default:
664 return -ENOSYS;
665 }
666
667 return ret;
668}
669
670static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
671 dma_cookie_t cookie, struct dma_tx_state *txstate)
672{
Andy Shevchenko4aa9fe02013-05-27 15:14:36 +0300673 return dma_cookie_status(dchan, cookie, txstate);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800674}
675
676/**
677 * mmp_pdma_issue_pending - Issue the DMA start command
678 * pending list ==> running list
679 */
680static void mmp_pdma_issue_pending(struct dma_chan *dchan)
681{
682 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
683 unsigned long flags;
684
685 spin_lock_irqsave(&chan->desc_lock, flags);
686 start_pending_queue(chan);
687 spin_unlock_irqrestore(&chan->desc_lock, flags);
688}
689
690/*
691 * dma_do_tasklet
692 * Do call back
693 * Start pending list
694 */
695static void dma_do_tasklet(unsigned long data)
696{
697 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
698 struct mmp_pdma_desc_sw *desc, *_desc;
699 LIST_HEAD(chain_cleanup);
700 unsigned long flags;
701
702 /* submit pending list; callback for each desc; free desc */
703
704 spin_lock_irqsave(&chan->desc_lock, flags);
705
706 /* update the cookie if we have some descriptors to cleanup */
707 if (!list_empty(&chan->chain_running)) {
708 dma_cookie_t cookie;
709
710 desc = to_mmp_pdma_desc(chan->chain_running.prev);
711 cookie = desc->async_tx.cookie;
712 dma_cookie_complete(&desc->async_tx);
713
714 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
715 }
716
717 /*
718 * move the descriptors to a temporary list so we can drop the lock
719 * during the entire cleanup operation
720 */
721 list_splice_tail_init(&chan->chain_running, &chain_cleanup);
722
723 /* the hardware is now idle and ready for more */
724 chan->idle = true;
725
726 /* Start any pending transactions automatically */
727 start_pending_queue(chan);
728 spin_unlock_irqrestore(&chan->desc_lock, flags);
729
730 /* Run the callback for each descriptor, in order */
731 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
732 struct dma_async_tx_descriptor *txd = &desc->async_tx;
733
734 /* Remove from the list of transactions */
735 list_del(&desc->node);
736 /* Run the link descriptor callback function */
737 if (txd->callback)
738 txd->callback(txd->callback_param);
739
740 dma_pool_free(chan->desc_pool, desc, txd->phys);
741 }
742}
743
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800744static int mmp_pdma_remove(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800745{
746 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
747
748 dma_async_device_unregister(&pdev->device);
749 return 0;
750}
751
Bill Pemberton463a1f82012-11-19 13:22:55 -0500752static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800753 int idx, int irq)
754{
755 struct mmp_pdma_phy *phy = &pdev->phy[idx];
756 struct mmp_pdma_chan *chan;
757 int ret;
758
759 chan = devm_kzalloc(pdev->dev,
760 sizeof(struct mmp_pdma_chan), GFP_KERNEL);
761 if (chan == NULL)
762 return -ENOMEM;
763
764 phy->idx = idx;
765 phy->base = pdev->base;
766
767 if (irq) {
768 ret = devm_request_irq(pdev->dev, irq,
769 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
770 if (ret) {
771 dev_err(pdev->dev, "channel request irq fail!\n");
772 return ret;
773 }
774 }
775
776 spin_lock_init(&chan->desc_lock);
777 chan->dev = pdev->dev;
778 chan->chan.device = &pdev->device;
779 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
780 INIT_LIST_HEAD(&chan->chain_pending);
781 INIT_LIST_HEAD(&chan->chain_running);
782
783 /* register virt channel to dma engine */
784 list_add_tail(&chan->chan.device_node,
785 &pdev->device.channels);
786
787 return 0;
788}
789
790static struct of_device_id mmp_pdma_dt_ids[] = {
791 { .compatible = "marvell,pdma-1.0", },
792 {}
793};
794MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
795
Daniel Macka9a7cf02013-08-10 18:52:19 +0200796static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
797 struct of_dma *ofdma)
798{
799 struct mmp_pdma_device *d = ofdma->of_dma_data;
800 struct dma_chan *chan, *candidate;
801
802retry:
803 candidate = NULL;
804
805 /* walk the list of channels registered with the current instance and
806 * find one that is currently unused */
807 list_for_each_entry(chan, &d->device.channels, device_node)
808 if (chan->client_count == 0) {
809 candidate = chan;
810 break;
811 }
812
813 if (!candidate)
814 return NULL;
815
816 /* dma_get_slave_channel will return NULL if we lost a race between
817 * the lookup and the reservation */
818 chan = dma_get_slave_channel(candidate);
819
820 if (chan) {
821 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
822 c->drcmr = dma_spec->args[0];
823 return chan;
824 }
825
826 goto retry;
827}
828
Bill Pemberton463a1f82012-11-19 13:22:55 -0500829static int mmp_pdma_probe(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800830{
831 struct mmp_pdma_device *pdev;
832 const struct of_device_id *of_id;
833 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
834 struct resource *iores;
835 int i, ret, irq = 0;
836 int dma_channels = 0, irq_num = 0;
837
838 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
839 if (!pdev)
840 return -ENOMEM;
841 pdev->dev = &op->dev;
842
Xiang Wang027f28b2013-06-18 14:55:58 +0800843 spin_lock_init(&pdev->phy_lock);
844
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800845 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
846 if (!iores)
847 return -EINVAL;
848
Thierry Reding73312052013-01-21 11:09:00 +0100849 pdev->base = devm_ioremap_resource(pdev->dev, iores);
850 if (IS_ERR(pdev->base))
851 return PTR_ERR(pdev->base);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800852
853 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
854 if (of_id)
855 of_property_read_u32(pdev->dev->of_node,
856 "#dma-channels", &dma_channels);
857 else if (pdata && pdata->dma_channels)
858 dma_channels = pdata->dma_channels;
859 else
860 dma_channels = 32; /* default 32 channel */
861 pdev->dma_channels = dma_channels;
862
863 for (i = 0; i < dma_channels; i++) {
864 if (platform_get_irq(op, i) > 0)
865 irq_num++;
866 }
867
868 pdev->phy = devm_kzalloc(pdev->dev,
869 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
870 if (pdev->phy == NULL)
871 return -ENOMEM;
872
873 INIT_LIST_HEAD(&pdev->device.channels);
874
875 if (irq_num != dma_channels) {
876 /* all chan share one irq, demux inside */
877 irq = platform_get_irq(op, 0);
878 ret = devm_request_irq(pdev->dev, irq,
879 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
880 if (ret)
881 return ret;
882 }
883
884 for (i = 0; i < dma_channels; i++) {
885 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
886 ret = mmp_pdma_chan_init(pdev, i, irq);
887 if (ret)
888 return ret;
889 }
890
891 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
892 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800893 pdev->device.dev = &op->dev;
894 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
895 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
896 pdev->device.device_tx_status = mmp_pdma_tx_status;
897 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
898 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
899 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
900 pdev->device.device_control = mmp_pdma_control;
901 pdev->device.copy_align = PDMA_ALIGNMENT;
902
903 if (pdev->dev->coherent_dma_mask)
904 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
905 else
906 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
907
908 ret = dma_async_device_register(&pdev->device);
909 if (ret) {
910 dev_err(pdev->device.dev, "unable to register\n");
911 return ret;
912 }
913
Daniel Macka9a7cf02013-08-10 18:52:19 +0200914 if (op->dev.of_node) {
915 /* Device-tree DMA controller registration */
916 ret = of_dma_controller_register(op->dev.of_node,
917 mmp_pdma_dma_xlate, pdev);
918 if (ret < 0) {
919 dev_err(&op->dev, "of_dma_controller_register failed\n");
920 return ret;
921 }
922 }
923
Daniel Mack419d1f12013-08-10 18:52:20 +0200924 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800925 return 0;
926}
927
928static const struct platform_device_id mmp_pdma_id_table[] = {
929 { "mmp-pdma", },
930 { },
931};
932
933static struct platform_driver mmp_pdma_driver = {
934 .driver = {
935 .name = "mmp-pdma",
936 .owner = THIS_MODULE,
937 .of_match_table = mmp_pdma_dt_ids,
938 },
939 .id_table = mmp_pdma_id_table,
940 .probe = mmp_pdma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -0500941 .remove = mmp_pdma_remove,
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800942};
943
Daniel Mack13b30062013-08-10 18:52:18 +0200944bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
945{
946 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
947
948 if (chan->device->dev->driver != &mmp_pdma_driver.driver)
949 return false;
950
951 c->drcmr = *(unsigned int *) param;
952
953 return true;
954}
955EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
956
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800957module_platform_driver(mmp_pdma_driver);
958
959MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
960MODULE_AUTHOR("Marvell International Ltd.");
961MODULE_LICENSE("GPL v2");