blob: eb410044e1af5415f4aa1aad48f7564588be1bb1 [file] [log] [blame]
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001/*
2 * Copyright 2012 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
Joe Perches2b7f65b2013-11-17 12:12:56 -08008
Thierry Reding73312052013-01-21 11:09:00 +01009#include <linux/err.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080010#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/interrupt.h>
14#include <linux/dma-mapping.h>
15#include <linux/slab.h>
16#include <linux/dmaengine.h>
17#include <linux/platform_device.h>
18#include <linux/device.h>
19#include <linux/platform_data/mmp_dma.h>
20#include <linux/dmapool.h>
21#include <linux/of_device.h>
Daniel Macka9a7cf02013-08-10 18:52:19 +020022#include <linux/of_dma.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080023#include <linux/of.h>
Daniel Mack13b30062013-08-10 18:52:18 +020024#include <linux/dma/mmp-pdma.h>
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080025
26#include "dmaengine.h"
27
28#define DCSR 0x0000
29#define DALGN 0x00a0
30#define DINT 0x00f0
31#define DDADR 0x0200
Daniel Mack1b38da22014-02-17 12:29:06 +010032#define DSADR(n) (0x0204 + ((n) << 4))
33#define DTADR(n) (0x0208 + ((n) << 4))
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080034#define DCMD 0x020c
35
Joe Perches2b7f65b2013-11-17 12:12:56 -080036#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
37#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
38#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
39#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
40#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
41#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
42#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
43#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080044
Joe Perches2b7f65b2013-11-17 12:12:56 -080045#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
46#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
47#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
48#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
49#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
50#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
51#define DCSR_EORINTR BIT(9) /* The end of Receive */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080052
Joe Perches2b7f65b2013-11-17 12:12:56 -080053#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
54#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
55#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080056
57#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
Joe Perches2b7f65b2013-11-17 12:12:56 -080058#define DDADR_STOP BIT(0) /* Stop (read / write) */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080059
Joe Perches2b7f65b2013-11-17 12:12:56 -080060#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
61#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
62#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
63#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
64#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
65#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
66#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080067#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
70#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
74
75#define PDMA_ALIGNMENT 3
Daniel Mack1ac0e842013-08-10 18:52:17 +020076#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +080077
78struct mmp_pdma_desc_hw {
79 u32 ddadr; /* Points to the next descriptor + flags */
80 u32 dsadr; /* DSADR value for the current transfer */
81 u32 dtadr; /* DTADR value for the current transfer */
82 u32 dcmd; /* DCMD value for the current transfer */
83} __aligned(32);
84
85struct mmp_pdma_desc_sw {
86 struct mmp_pdma_desc_hw desc;
87 struct list_head node;
88 struct list_head tx_list;
89 struct dma_async_tx_descriptor async_tx;
90};
91
92struct mmp_pdma_phy;
93
94struct mmp_pdma_chan {
95 struct device *dev;
96 struct dma_chan chan;
97 struct dma_async_tx_descriptor desc;
98 struct mmp_pdma_phy *phy;
99 enum dma_transfer_direction dir;
100
Daniel Mack50440d72013-08-21 14:08:56 +0200101 struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
102 * is in cyclic mode */
103
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800104 /* channel's basic info */
105 struct tasklet_struct tasklet;
106 u32 dcmd;
107 u32 drcmr;
108 u32 dev_addr;
109
110 /* list for desc */
111 spinlock_t desc_lock; /* Descriptor list lock */
112 struct list_head chain_pending; /* Link descriptors queue for pending */
113 struct list_head chain_running; /* Link descriptors queue for running */
114 bool idle; /* channel statue machine */
Daniel Mack6fc45732013-08-10 18:52:22 +0200115 bool byte_align;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800116
117 struct dma_pool *desc_pool; /* Descriptors pool */
118};
119
120struct mmp_pdma_phy {
121 int idx;
122 void __iomem *base;
123 struct mmp_pdma_chan *vchan;
124};
125
126struct mmp_pdma_device {
127 int dma_channels;
128 void __iomem *base;
129 struct device *dev;
130 struct dma_device device;
131 struct mmp_pdma_phy *phy;
Xiang Wang027f28b2013-06-18 14:55:58 +0800132 spinlock_t phy_lock; /* protect alloc/free phy channels */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800133};
134
Joe Perches2b7f65b2013-11-17 12:12:56 -0800135#define tx_to_mmp_pdma_desc(tx) \
136 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
137#define to_mmp_pdma_desc(lh) \
138 container_of(lh, struct mmp_pdma_desc_sw, node)
139#define to_mmp_pdma_chan(dchan) \
140 container_of(dchan, struct mmp_pdma_chan, chan)
141#define to_mmp_pdma_dev(dmadev) \
142 container_of(dmadev, struct mmp_pdma_device, device)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800143
144static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
145{
146 u32 reg = (phy->idx << 4) + DDADR;
147
148 writel(addr, phy->base + reg);
149}
150
151static void enable_chan(struct mmp_pdma_phy *phy)
152{
Daniel Mack6fc45732013-08-10 18:52:22 +0200153 u32 reg, dalgn;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800154
155 if (!phy->vchan)
156 return;
157
Daniel Mack8b298de2013-08-10 18:52:15 +0200158 reg = DRCMR(phy->vchan->drcmr);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800159 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
160
Daniel Mack6fc45732013-08-10 18:52:22 +0200161 dalgn = readl(phy->base + DALGN);
162 if (phy->vchan->byte_align)
163 dalgn |= 1 << phy->idx;
164 else
165 dalgn &= ~(1 << phy->idx);
166 writel(dalgn, phy->base + DALGN);
167
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800168 reg = (phy->idx << 2) + DCSR;
Joe Perches2b7f65b2013-11-17 12:12:56 -0800169 writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800170}
171
172static void disable_chan(struct mmp_pdma_phy *phy)
173{
174 u32 reg;
175
Joe Perches2b7f65b2013-11-17 12:12:56 -0800176 if (!phy)
177 return;
178
179 reg = (phy->idx << 2) + DCSR;
180 writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800181}
182
183static int clear_chan_irq(struct mmp_pdma_phy *phy)
184{
185 u32 dcsr;
186 u32 dint = readl(phy->base + DINT);
187 u32 reg = (phy->idx << 2) + DCSR;
188
Joe Perches2b7f65b2013-11-17 12:12:56 -0800189 if (!(dint & BIT(phy->idx)))
190 return -EAGAIN;
191
192 /* clear irq */
193 dcsr = readl(phy->base + reg);
194 writel(dcsr, phy->base + reg);
195 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
196 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
197
198 return 0;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800199}
200
201static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
202{
203 struct mmp_pdma_phy *phy = dev_id;
204
Joe Perches2b7f65b2013-11-17 12:12:56 -0800205 if (clear_chan_irq(phy) != 0)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800206 return IRQ_NONE;
Joe Perches2b7f65b2013-11-17 12:12:56 -0800207
208 tasklet_schedule(&phy->vchan->tasklet);
209 return IRQ_HANDLED;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800210}
211
212static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
213{
214 struct mmp_pdma_device *pdev = dev_id;
215 struct mmp_pdma_phy *phy;
216 u32 dint = readl(pdev->base + DINT);
217 int i, ret;
218 int irq_num = 0;
219
220 while (dint) {
221 i = __ffs(dint);
Qiao Zhou3a314f12015-02-04 14:16:03 +0800222 /* only handle interrupts belonging to pdma driver*/
223 if (i >= pdev->dma_channels)
224 break;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800225 dint &= (dint - 1);
226 phy = &pdev->phy[i];
227 ret = mmp_pdma_chan_handler(irq, phy);
228 if (ret == IRQ_HANDLED)
229 irq_num++;
230 }
231
232 if (irq_num)
233 return IRQ_HANDLED;
Joe Perches2b7f65b2013-11-17 12:12:56 -0800234
235 return IRQ_NONE;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800236}
237
238/* lookup free phy channel as descending priority */
239static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
240{
241 int prio, i;
242 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
Daniel Mack638a5422013-08-10 18:52:16 +0200243 struct mmp_pdma_phy *phy, *found = NULL;
Xiang Wang027f28b2013-06-18 14:55:58 +0800244 unsigned long flags;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800245
246 /*
247 * dma channel priorities
248 * ch 0 - 3, 16 - 19 <--> (0)
249 * ch 4 - 7, 20 - 23 <--> (1)
250 * ch 8 - 11, 24 - 27 <--> (2)
251 * ch 12 - 15, 28 - 31 <--> (3)
252 */
Xiang Wang027f28b2013-06-18 14:55:58 +0800253
254 spin_lock_irqsave(&pdev->phy_lock, flags);
Joe Perches2b7f65b2013-11-17 12:12:56 -0800255 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800256 for (i = 0; i < pdev->dma_channels; i++) {
Joe Perches2b7f65b2013-11-17 12:12:56 -0800257 if (prio != (i & 0xf) >> 2)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800258 continue;
259 phy = &pdev->phy[i];
260 if (!phy->vchan) {
261 phy->vchan = pchan;
Daniel Mack638a5422013-08-10 18:52:16 +0200262 found = phy;
263 goto out_unlock;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800264 }
265 }
266 }
267
Daniel Mack638a5422013-08-10 18:52:16 +0200268out_unlock:
Xiang Wang027f28b2013-06-18 14:55:58 +0800269 spin_unlock_irqrestore(&pdev->phy_lock, flags);
Daniel Mack638a5422013-08-10 18:52:16 +0200270 return found;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800271}
272
Xiang Wang027f28b2013-06-18 14:55:58 +0800273static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
274{
275 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
276 unsigned long flags;
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800277 u32 reg;
Xiang Wang027f28b2013-06-18 14:55:58 +0800278
279 if (!pchan->phy)
280 return;
281
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800282 /* clear the channel mapping in DRCMR */
Laurent Pincharta2a7c172014-04-15 17:13:34 +0200283 reg = DRCMR(pchan->drcmr);
Xiang Wang26a2dfd2013-06-18 14:55:59 +0800284 writel(0, pchan->phy->base + reg);
285
Xiang Wang027f28b2013-06-18 14:55:58 +0800286 spin_lock_irqsave(&pdev->phy_lock, flags);
287 pchan->phy->vchan = NULL;
288 pchan->phy = NULL;
289 spin_unlock_irqrestore(&pdev->phy_lock, flags);
290}
291
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800292/**
293 * start_pending_queue - transfer any pending transactions
294 * pending list ==> running list
295 */
296static void start_pending_queue(struct mmp_pdma_chan *chan)
297{
298 struct mmp_pdma_desc_sw *desc;
299
300 /* still in running, irq will start the pending list */
301 if (!chan->idle) {
302 dev_dbg(chan->dev, "DMA controller still busy\n");
303 return;
304 }
305
306 if (list_empty(&chan->chain_pending)) {
307 /* chance to re-fetch phy channel with higher prio */
Xiang Wang027f28b2013-06-18 14:55:58 +0800308 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800309 dev_dbg(chan->dev, "no pending list\n");
310 return;
311 }
312
313 if (!chan->phy) {
314 chan->phy = lookup_phy(chan);
315 if (!chan->phy) {
316 dev_dbg(chan->dev, "no free dma channel\n");
317 return;
318 }
319 }
320
321 /*
322 * pending -> running
323 * reintilize pending list
324 */
325 desc = list_first_entry(&chan->chain_pending,
326 struct mmp_pdma_desc_sw, node);
327 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
328
329 /*
330 * Program the descriptor's address into the DMA controller,
331 * then start the DMA transaction
332 */
333 set_desc(chan->phy, desc->async_tx.phys);
334 enable_chan(chan->phy);
335 chan->idle = false;
336}
337
338
339/* desc->tx_list ==> pending list */
340static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
341{
342 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
343 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
344 struct mmp_pdma_desc_sw *child;
345 unsigned long flags;
346 dma_cookie_t cookie = -EBUSY;
347
348 spin_lock_irqsave(&chan->desc_lock, flags);
349
350 list_for_each_entry(child, &desc->tx_list, node) {
351 cookie = dma_cookie_assign(&child->async_tx);
352 }
353
Daniel Mack0cd61562013-08-21 14:08:55 +0200354 /* softly link to pending list - desc->tx_list ==> pending list */
355 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800356
357 spin_unlock_irqrestore(&chan->desc_lock, flags);
358
359 return cookie;
360}
361
Jingoo Han69c9f0a2013-08-06 19:35:13 +0900362static struct mmp_pdma_desc_sw *
363mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800364{
365 struct mmp_pdma_desc_sw *desc;
366 dma_addr_t pdesc;
367
368 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
369 if (!desc) {
370 dev_err(chan->dev, "out of memory for link descriptor\n");
371 return NULL;
372 }
373
374 memset(desc, 0, sizeof(*desc));
375 INIT_LIST_HEAD(&desc->tx_list);
376 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
377 /* each desc has submit */
378 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
379 desc->async_tx.phys = pdesc;
380
381 return desc;
382}
383
384/**
385 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
386 *
387 * This function will create a dma pool for descriptor allocation.
388 * Request irq only when channel is requested
389 * Return - The number of allocated descriptors.
390 */
391
392static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
393{
394 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
395
396 if (chan->desc_pool)
397 return 1;
398
Joe Perches2b7f65b2013-11-17 12:12:56 -0800399 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
400 chan->dev,
401 sizeof(struct mmp_pdma_desc_sw),
402 __alignof__(struct mmp_pdma_desc_sw),
403 0);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800404 if (!chan->desc_pool) {
405 dev_err(chan->dev, "unable to allocate descriptor pool\n");
406 return -ENOMEM;
407 }
Joe Perches2b7f65b2013-11-17 12:12:56 -0800408
Xiang Wang027f28b2013-06-18 14:55:58 +0800409 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800410 chan->idle = true;
411 chan->dev_addr = 0;
412 return 1;
413}
414
415static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
Joe Perches2b7f65b2013-11-17 12:12:56 -0800416 struct list_head *list)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800417{
418 struct mmp_pdma_desc_sw *desc, *_desc;
419
420 list_for_each_entry_safe(desc, _desc, list, node) {
421 list_del(&desc->node);
422 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
423 }
424}
425
426static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
427{
428 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
429 unsigned long flags;
430
431 spin_lock_irqsave(&chan->desc_lock, flags);
432 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
433 mmp_pdma_free_desc_list(chan, &chan->chain_running);
434 spin_unlock_irqrestore(&chan->desc_lock, flags);
435
436 dma_pool_destroy(chan->desc_pool);
437 chan->desc_pool = NULL;
438 chan->idle = true;
439 chan->dev_addr = 0;
Xiang Wang027f28b2013-06-18 14:55:58 +0800440 mmp_pdma_free_phy(chan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800441 return;
442}
443
444static struct dma_async_tx_descriptor *
445mmp_pdma_prep_memcpy(struct dma_chan *dchan,
Joe Perches2b7f65b2013-11-17 12:12:56 -0800446 dma_addr_t dma_dst, dma_addr_t dma_src,
447 size_t len, unsigned long flags)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800448{
449 struct mmp_pdma_chan *chan;
450 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
451 size_t copy = 0;
452
453 if (!dchan)
454 return NULL;
455
456 if (!len)
457 return NULL;
458
459 chan = to_mmp_pdma_chan(dchan);
Daniel Mack6fc45732013-08-10 18:52:22 +0200460 chan->byte_align = false;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800461
462 if (!chan->dir) {
463 chan->dir = DMA_MEM_TO_MEM;
464 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
465 chan->dcmd |= DCMD_BURST32;
466 }
467
468 do {
469 /* Allocate the link descriptor from DMA pool */
470 new = mmp_pdma_alloc_descriptor(chan);
471 if (!new) {
472 dev_err(chan->dev, "no memory for desc\n");
473 goto fail;
474 }
475
476 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
Daniel Mack6fc45732013-08-10 18:52:22 +0200477 if (dma_src & 0x7 || dma_dst & 0x7)
478 chan->byte_align = true;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800479
480 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
481 new->desc.dsadr = dma_src;
482 new->desc.dtadr = dma_dst;
483
484 if (!first)
485 first = new;
486 else
487 prev->desc.ddadr = new->async_tx.phys;
488
489 new->async_tx.cookie = 0;
490 async_tx_ack(&new->async_tx);
491
492 prev = new;
493 len -= copy;
494
495 if (chan->dir == DMA_MEM_TO_DEV) {
496 dma_src += copy;
497 } else if (chan->dir == DMA_DEV_TO_MEM) {
498 dma_dst += copy;
499 } else if (chan->dir == DMA_MEM_TO_MEM) {
500 dma_src += copy;
501 dma_dst += copy;
502 }
503
504 /* Insert the link descriptor to the LD ring */
505 list_add_tail(&new->node, &first->tx_list);
506 } while (len);
507
508 first->async_tx.flags = flags; /* client is in control of this ack */
509 first->async_tx.cookie = -EBUSY;
510
511 /* last desc and fire IRQ */
512 new->desc.ddadr = DDADR_STOP;
513 new->desc.dcmd |= DCMD_ENDIRQEN;
514
Daniel Mack50440d72013-08-21 14:08:56 +0200515 chan->cyclic_first = NULL;
516
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800517 return &first->async_tx;
518
519fail:
520 if (first)
521 mmp_pdma_free_desc_list(chan, &first->tx_list);
522 return NULL;
523}
524
525static struct dma_async_tx_descriptor *
526mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
Joe Perches2b7f65b2013-11-17 12:12:56 -0800527 unsigned int sg_len, enum dma_transfer_direction dir,
528 unsigned long flags, void *context)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800529{
530 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
531 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
532 size_t len, avail;
533 struct scatterlist *sg;
534 dma_addr_t addr;
535 int i;
536
537 if ((sgl == NULL) || (sg_len == 0))
538 return NULL;
539
Daniel Mack6fc45732013-08-10 18:52:22 +0200540 chan->byte_align = false;
541
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800542 for_each_sg(sgl, sg, sg_len, i) {
543 addr = sg_dma_address(sg);
544 avail = sg_dma_len(sgl);
545
546 do {
547 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
Daniel Mack6fc45732013-08-10 18:52:22 +0200548 if (addr & 0x7)
549 chan->byte_align = true;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800550
551 /* allocate and populate the descriptor */
552 new = mmp_pdma_alloc_descriptor(chan);
553 if (!new) {
554 dev_err(chan->dev, "no memory for desc\n");
555 goto fail;
556 }
557
558 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
559 if (dir == DMA_MEM_TO_DEV) {
560 new->desc.dsadr = addr;
561 new->desc.dtadr = chan->dev_addr;
562 } else {
563 new->desc.dsadr = chan->dev_addr;
564 new->desc.dtadr = addr;
565 }
566
567 if (!first)
568 first = new;
569 else
570 prev->desc.ddadr = new->async_tx.phys;
571
572 new->async_tx.cookie = 0;
573 async_tx_ack(&new->async_tx);
574 prev = new;
575
576 /* Insert the link descriptor to the LD ring */
577 list_add_tail(&new->node, &first->tx_list);
578
579 /* update metadata */
580 addr += len;
581 avail -= len;
582 } while (avail);
583 }
584
585 first->async_tx.cookie = -EBUSY;
586 first->async_tx.flags = flags;
587
588 /* last desc and fire IRQ */
589 new->desc.ddadr = DDADR_STOP;
590 new->desc.dcmd |= DCMD_ENDIRQEN;
591
Daniel Mack50440d72013-08-21 14:08:56 +0200592 chan->dir = dir;
593 chan->cyclic_first = NULL;
594
595 return &first->async_tx;
596
597fail:
598 if (first)
599 mmp_pdma_free_desc_list(chan, &first->tx_list);
600 return NULL;
601}
602
Joe Perches2b7f65b2013-11-17 12:12:56 -0800603static struct dma_async_tx_descriptor *
604mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
605 dma_addr_t buf_addr, size_t len, size_t period_len,
606 enum dma_transfer_direction direction,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +0200607 unsigned long flags)
Daniel Mack50440d72013-08-21 14:08:56 +0200608{
609 struct mmp_pdma_chan *chan;
610 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
611 dma_addr_t dma_src, dma_dst;
612
613 if (!dchan || !len || !period_len)
614 return NULL;
615
616 /* the buffer length must be a multiple of period_len */
617 if (len % period_len != 0)
618 return NULL;
619
620 if (period_len > PDMA_MAX_DESC_BYTES)
621 return NULL;
622
623 chan = to_mmp_pdma_chan(dchan);
624
625 switch (direction) {
626 case DMA_MEM_TO_DEV:
627 dma_src = buf_addr;
628 dma_dst = chan->dev_addr;
629 break;
630 case DMA_DEV_TO_MEM:
631 dma_dst = buf_addr;
632 dma_src = chan->dev_addr;
633 break;
634 default:
635 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
636 return NULL;
637 }
638
639 chan->dir = direction;
640
641 do {
642 /* Allocate the link descriptor from DMA pool */
643 new = mmp_pdma_alloc_descriptor(chan);
644 if (!new) {
645 dev_err(chan->dev, "no memory for desc\n");
646 goto fail;
647 }
648
Joe Perches2b7f65b2013-11-17 12:12:56 -0800649 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
650 (DCMD_LENGTH & period_len));
Daniel Mack50440d72013-08-21 14:08:56 +0200651 new->desc.dsadr = dma_src;
652 new->desc.dtadr = dma_dst;
653
654 if (!first)
655 first = new;
656 else
657 prev->desc.ddadr = new->async_tx.phys;
658
659 new->async_tx.cookie = 0;
660 async_tx_ack(&new->async_tx);
661
662 prev = new;
663 len -= period_len;
664
665 if (chan->dir == DMA_MEM_TO_DEV)
666 dma_src += period_len;
667 else
668 dma_dst += period_len;
669
670 /* Insert the link descriptor to the LD ring */
671 list_add_tail(&new->node, &first->tx_list);
672 } while (len);
673
674 first->async_tx.flags = flags; /* client is in control of this ack */
675 first->async_tx.cookie = -EBUSY;
676
677 /* make the cyclic link */
678 new->desc.ddadr = first->async_tx.phys;
679 chan->cyclic_first = first;
680
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800681 return &first->async_tx;
682
683fail:
684 if (first)
685 mmp_pdma_free_desc_list(chan, &first->tx_list);
686 return NULL;
687}
688
Maxime Riparda0abd672014-11-17 14:42:21 +0100689static int mmp_pdma_config(struct dma_chan *dchan,
690 struct dma_slave_config *cfg)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800691{
692 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800693 u32 maxburst = 0, addr = 0;
694 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
695
696 if (!dchan)
697 return -EINVAL;
698
Maxime Riparda0abd672014-11-17 14:42:21 +0100699 if (cfg->direction == DMA_DEV_TO_MEM) {
700 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
701 maxburst = cfg->src_maxburst;
702 width = cfg->src_addr_width;
703 addr = cfg->src_addr;
704 } else if (cfg->direction == DMA_MEM_TO_DEV) {
705 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
706 maxburst = cfg->dst_maxburst;
707 width = cfg->dst_addr_width;
708 addr = cfg->dst_addr;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800709 }
710
Maxime Riparda0abd672014-11-17 14:42:21 +0100711 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
712 chan->dcmd |= DCMD_WIDTH1;
713 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
714 chan->dcmd |= DCMD_WIDTH2;
715 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
716 chan->dcmd |= DCMD_WIDTH4;
717
718 if (maxburst == 8)
719 chan->dcmd |= DCMD_BURST8;
720 else if (maxburst == 16)
721 chan->dcmd |= DCMD_BURST16;
722 else if (maxburst == 32)
723 chan->dcmd |= DCMD_BURST32;
724
725 chan->dir = cfg->direction;
726 chan->dev_addr = addr;
727 /* FIXME: drivers should be ported over to use the filter
728 * function. Once that's done, the following two lines can
729 * be removed.
730 */
731 if (cfg->slave_id)
732 chan->drcmr = cfg->slave_id;
733
734 return 0;
735}
736
737static int mmp_pdma_terminate_all(struct dma_chan *dchan)
738{
739 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
740 unsigned long flags;
741
742 if (!dchan)
743 return -EINVAL;
744
745 disable_chan(chan->phy);
746 mmp_pdma_free_phy(chan);
747 spin_lock_irqsave(&chan->desc_lock, flags);
748 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
749 mmp_pdma_free_desc_list(chan, &chan->chain_running);
750 spin_unlock_irqrestore(&chan->desc_lock, flags);
751 chan->idle = true;
752
Joe Perches2b7f65b2013-11-17 12:12:56 -0800753 return 0;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800754}
755
Daniel Mack1b38da22014-02-17 12:29:06 +0100756static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
757 dma_cookie_t cookie)
758{
759 struct mmp_pdma_desc_sw *sw;
760 u32 curr, residue = 0;
761 bool passed = false;
762 bool cyclic = chan->cyclic_first != NULL;
763
764 /*
765 * If the channel does not have a phy pointer anymore, it has already
766 * been completed. Therefore, its residue is 0.
767 */
768 if (!chan->phy)
769 return 0;
770
771 if (chan->dir == DMA_DEV_TO_MEM)
772 curr = readl(chan->phy->base + DTADR(chan->phy->idx));
773 else
774 curr = readl(chan->phy->base + DSADR(chan->phy->idx));
775
776 list_for_each_entry(sw, &chan->chain_running, node) {
777 u32 start, end, len;
778
779 if (chan->dir == DMA_DEV_TO_MEM)
780 start = sw->desc.dtadr;
781 else
782 start = sw->desc.dsadr;
783
784 len = sw->desc.dcmd & DCMD_LENGTH;
785 end = start + len;
786
787 /*
788 * 'passed' will be latched once we found the descriptor which
789 * lies inside the boundaries of the curr pointer. All
790 * descriptors that occur in the list _after_ we found that
791 * partially handled descriptor are still to be processed and
792 * are hence added to the residual bytes counter.
793 */
794
795 if (passed) {
796 residue += len;
797 } else if (curr >= start && curr <= end) {
798 residue += end - curr;
799 passed = true;
800 }
801
802 /*
803 * Descriptors that have the ENDIRQEN bit set mark the end of a
804 * transaction chain, and the cookie assigned with it has been
805 * returned previously from mmp_pdma_tx_submit().
806 *
807 * In case we have multiple transactions in the running chain,
808 * and the cookie does not match the one the user asked us
809 * about, reset the state variables and start over.
810 *
811 * This logic does not apply to cyclic transactions, where all
812 * descriptors have the ENDIRQEN bit set, and for which we
813 * can't have multiple transactions on one channel anyway.
814 */
815 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
816 continue;
817
818 if (sw->async_tx.cookie == cookie) {
819 return residue;
820 } else {
821 residue = 0;
822 passed = false;
823 }
824 }
825
826 /* We should only get here in case of cyclic transactions */
827 return residue;
828}
829
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800830static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
Joe Perches2b7f65b2013-11-17 12:12:56 -0800831 dma_cookie_t cookie,
832 struct dma_tx_state *txstate)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800833{
Daniel Mack1b38da22014-02-17 12:29:06 +0100834 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
835 enum dma_status ret;
836
837 ret = dma_cookie_status(dchan, cookie, txstate);
838 if (likely(ret != DMA_ERROR))
839 dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
840
841 return ret;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800842}
843
844/**
845 * mmp_pdma_issue_pending - Issue the DMA start command
846 * pending list ==> running list
847 */
848static void mmp_pdma_issue_pending(struct dma_chan *dchan)
849{
850 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
851 unsigned long flags;
852
853 spin_lock_irqsave(&chan->desc_lock, flags);
854 start_pending_queue(chan);
855 spin_unlock_irqrestore(&chan->desc_lock, flags);
856}
857
858/*
859 * dma_do_tasklet
860 * Do call back
861 * Start pending list
862 */
863static void dma_do_tasklet(unsigned long data)
864{
865 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
866 struct mmp_pdma_desc_sw *desc, *_desc;
867 LIST_HEAD(chain_cleanup);
868 unsigned long flags;
869
Daniel Mack50440d72013-08-21 14:08:56 +0200870 if (chan->cyclic_first) {
871 dma_async_tx_callback cb = NULL;
872 void *cb_data = NULL;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800873
Daniel Mack50440d72013-08-21 14:08:56 +0200874 spin_lock_irqsave(&chan->desc_lock, flags);
875 desc = chan->cyclic_first;
876 cb = desc->async_tx.callback;
877 cb_data = desc->async_tx.callback_param;
878 spin_unlock_irqrestore(&chan->desc_lock, flags);
879
880 if (cb)
881 cb(cb_data);
882
883 return;
884 }
885
886 /* submit pending list; callback for each desc; free desc */
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800887 spin_lock_irqsave(&chan->desc_lock, flags);
888
Daniel Mackb721f9e2013-08-21 14:08:54 +0200889 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
890 /*
891 * move the descriptors to a temporary list so we can drop
892 * the lock during the entire cleanup operation
893 */
Wei Yongjunf358c282013-09-23 14:07:20 +0800894 list_move(&desc->node, &chain_cleanup);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800895
Daniel Mackb721f9e2013-08-21 14:08:54 +0200896 /*
897 * Look for the first list entry which has the ENDIRQEN flag
898 * set. That is the descriptor we got an interrupt for, so
899 * complete that transaction and its cookie.
900 */
901 if (desc->desc.dcmd & DCMD_ENDIRQEN) {
902 dma_cookie_t cookie = desc->async_tx.cookie;
903 dma_cookie_complete(&desc->async_tx);
904 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
905 break;
906 }
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800907 }
908
909 /*
Daniel Mackb721f9e2013-08-21 14:08:54 +0200910 * The hardware is idle and ready for more when the
911 * chain_running list is empty.
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800912 */
Daniel Mackb721f9e2013-08-21 14:08:54 +0200913 chan->idle = list_empty(&chan->chain_running);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800914
915 /* Start any pending transactions automatically */
916 start_pending_queue(chan);
917 spin_unlock_irqrestore(&chan->desc_lock, flags);
918
919 /* Run the callback for each descriptor, in order */
920 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
921 struct dma_async_tx_descriptor *txd = &desc->async_tx;
922
923 /* Remove from the list of transactions */
924 list_del(&desc->node);
925 /* Run the link descriptor callback function */
926 if (txd->callback)
927 txd->callback(txd->callback_param);
928
929 dma_pool_free(chan->desc_pool, desc, txd->phys);
930 }
931}
932
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800933static int mmp_pdma_remove(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800934{
935 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
936
937 dma_async_device_unregister(&pdev->device);
938 return 0;
939}
940
Joe Perches2b7f65b2013-11-17 12:12:56 -0800941static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800942{
943 struct mmp_pdma_phy *phy = &pdev->phy[idx];
944 struct mmp_pdma_chan *chan;
945 int ret;
946
Laurent Pinchart593d9c22014-04-15 17:13:35 +0200947 chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800948 if (chan == NULL)
949 return -ENOMEM;
950
951 phy->idx = idx;
952 phy->base = pdev->base;
953
954 if (irq) {
Chao Xief0b50772014-01-27 09:44:07 +0800955 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
956 IRQF_SHARED, "pdma", phy);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800957 if (ret) {
958 dev_err(pdev->dev, "channel request irq fail!\n");
959 return ret;
960 }
961 }
962
963 spin_lock_init(&chan->desc_lock);
964 chan->dev = pdev->dev;
965 chan->chan.device = &pdev->device;
966 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
967 INIT_LIST_HEAD(&chan->chain_pending);
968 INIT_LIST_HEAD(&chan->chain_running);
969
970 /* register virt channel to dma engine */
Joe Perches2b7f65b2013-11-17 12:12:56 -0800971 list_add_tail(&chan->chan.device_node, &pdev->device.channels);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800972
973 return 0;
974}
975
976static struct of_device_id mmp_pdma_dt_ids[] = {
977 { .compatible = "marvell,pdma-1.0", },
978 {}
979};
980MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
981
Daniel Macka9a7cf02013-08-10 18:52:19 +0200982static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
983 struct of_dma *ofdma)
984{
985 struct mmp_pdma_device *d = ofdma->of_dma_data;
Stephen Warren8010dad2013-11-26 12:40:51 -0700986 struct dma_chan *chan;
Daniel Macka9a7cf02013-08-10 18:52:19 +0200987
Stephen Warren8010dad2013-11-26 12:40:51 -0700988 chan = dma_get_any_slave_channel(&d->device);
989 if (!chan)
Daniel Macka9a7cf02013-08-10 18:52:19 +0200990 return NULL;
991
Joe Perches2b7f65b2013-11-17 12:12:56 -0800992 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
993
994 return chan;
Daniel Macka9a7cf02013-08-10 18:52:19 +0200995}
996
Bill Pemberton463a1f82012-11-19 13:22:55 -0500997static int mmp_pdma_probe(struct platform_device *op)
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +0800998{
999 struct mmp_pdma_device *pdev;
1000 const struct of_device_id *of_id;
1001 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1002 struct resource *iores;
1003 int i, ret, irq = 0;
1004 int dma_channels = 0, irq_num = 0;
Robert Jarzmikecb9b422015-02-15 19:49:16 +01001005 const enum dma_slave_buswidth widths =
1006 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1007 DMA_SLAVE_BUSWIDTH_4_BYTES;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001008
1009 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1010 if (!pdev)
1011 return -ENOMEM;
Joe Perches2b7f65b2013-11-17 12:12:56 -08001012
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001013 pdev->dev = &op->dev;
1014
Xiang Wang027f28b2013-06-18 14:55:58 +08001015 spin_lock_init(&pdev->phy_lock);
1016
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001017 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
Thierry Reding73312052013-01-21 11:09:00 +01001018 pdev->base = devm_ioremap_resource(pdev->dev, iores);
1019 if (IS_ERR(pdev->base))
1020 return PTR_ERR(pdev->base);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001021
1022 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
1023 if (of_id)
Joe Perches2b7f65b2013-11-17 12:12:56 -08001024 of_property_read_u32(pdev->dev->of_node, "#dma-channels",
1025 &dma_channels);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001026 else if (pdata && pdata->dma_channels)
1027 dma_channels = pdata->dma_channels;
1028 else
1029 dma_channels = 32; /* default 32 channel */
1030 pdev->dma_channels = dma_channels;
1031
1032 for (i = 0; i < dma_channels; i++) {
1033 if (platform_get_irq(op, i) > 0)
1034 irq_num++;
1035 }
1036
Laurent Pinchart593d9c22014-04-15 17:13:35 +02001037 pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
Joe Perches2b7f65b2013-11-17 12:12:56 -08001038 GFP_KERNEL);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001039 if (pdev->phy == NULL)
1040 return -ENOMEM;
1041
1042 INIT_LIST_HEAD(&pdev->device.channels);
1043
1044 if (irq_num != dma_channels) {
1045 /* all chan share one irq, demux inside */
1046 irq = platform_get_irq(op, 0);
Chao Xief0b50772014-01-27 09:44:07 +08001047 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1048 IRQF_SHARED, "pdma", pdev);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001049 if (ret)
1050 return ret;
1051 }
1052
1053 for (i = 0; i < dma_channels; i++) {
1054 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1055 ret = mmp_pdma_chan_init(pdev, i, irq);
1056 if (ret)
1057 return ret;
1058 }
1059
1060 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1061 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
Daniel Mack50440d72013-08-21 14:08:56 +02001062 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
Daniel Mack023bf552013-08-21 14:08:58 +02001063 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001064 pdev->device.dev = &op->dev;
1065 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1066 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1067 pdev->device.device_tx_status = mmp_pdma_tx_status;
1068 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1069 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
Daniel Mack50440d72013-08-21 14:08:56 +02001070 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001071 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
Maxime Riparda0abd672014-11-17 14:42:21 +01001072 pdev->device.device_config = mmp_pdma_config;
1073 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001074 pdev->device.copy_align = PDMA_ALIGNMENT;
Robert Jarzmikecb9b422015-02-15 19:49:16 +01001075 pdev->device.src_addr_widths = widths;
1076 pdev->device.dst_addr_widths = widths;
1077 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1078 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001079
1080 if (pdev->dev->coherent_dma_mask)
1081 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
1082 else
1083 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
1084
1085 ret = dma_async_device_register(&pdev->device);
1086 if (ret) {
1087 dev_err(pdev->device.dev, "unable to register\n");
1088 return ret;
1089 }
1090
Daniel Macka9a7cf02013-08-10 18:52:19 +02001091 if (op->dev.of_node) {
1092 /* Device-tree DMA controller registration */
1093 ret = of_dma_controller_register(op->dev.of_node,
1094 mmp_pdma_dma_xlate, pdev);
1095 if (ret < 0) {
1096 dev_err(&op->dev, "of_dma_controller_register failed\n");
1097 return ret;
1098 }
1099 }
1100
Wei Yongjun086b0af2013-11-25 15:15:14 +08001101 platform_set_drvdata(op, pdev);
Daniel Mack419d1f12013-08-10 18:52:20 +02001102 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001103 return 0;
1104}
1105
1106static const struct platform_device_id mmp_pdma_id_table[] = {
1107 { "mmp-pdma", },
1108 { },
1109};
1110
1111static struct platform_driver mmp_pdma_driver = {
1112 .driver = {
1113 .name = "mmp-pdma",
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001114 .of_match_table = mmp_pdma_dt_ids,
1115 },
1116 .id_table = mmp_pdma_id_table,
1117 .probe = mmp_pdma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05001118 .remove = mmp_pdma_remove,
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001119};
1120
Daniel Mack13b30062013-08-10 18:52:18 +02001121bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
1122{
1123 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
1124
1125 if (chan->device->dev->driver != &mmp_pdma_driver.driver)
1126 return false;
1127
Joe Perches2b7f65b2013-11-17 12:12:56 -08001128 c->drcmr = *(unsigned int *)param;
Daniel Mack13b30062013-08-10 18:52:18 +02001129
1130 return true;
1131}
1132EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
1133
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001134module_platform_driver(mmp_pdma_driver);
1135
Joe Perches2b7f65b2013-11-17 12:12:56 -08001136MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
Zhangfei Gaoc8acd6a2012-09-03 11:03:45 +08001137MODULE_AUTHOR("Marvell International Ltd.");
1138MODULE_LICENSE("GPL v2");