blob: b743adf56465644670ed77f4a4153cce9015491a [file] [log] [blame]
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +01001#include <linux/delay.h>
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02002#include <linux/dmaengine.h>
3#include <linux/dma-mapping.h>
4#include <linux/platform_device.h>
5#include <linux/module.h>
6#include <linux/of.h>
7#include <linux/slab.h>
8#include <linux/of_dma.h>
9#include <linux/of_irq.h>
10#include <linux/dmapool.h>
11#include <linux/interrupt.h>
12#include <linux/of_address.h>
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +020013#include <linux/pm_runtime.h>
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020014#include "dmaengine.h"
15
16#define DESC_TYPE 27
17#define DESC_TYPE_HOST 0x10
18#define DESC_TYPE_TEARD 0x13
19
20#define TD_DESC_IS_RX (1 << 16)
21#define TD_DESC_DMA_NUM 10
22
23#define DESC_LENGTH_BITS_NUM 21
24
25#define DESC_TYPE_USB (5 << 26)
26#define DESC_PD_COMPLETE (1 << 31)
27
28/* DMA engine */
29#define DMA_TDFDQ 4
30#define DMA_TXGCR(x) (0x800 + (x) * 0x20)
31#define DMA_RXGCR(x) (0x808 + (x) * 0x20)
32#define RXHPCRA0 4
33
34#define GCR_CHAN_ENABLE (1 << 31)
35#define GCR_TEARDOWN (1 << 30)
36#define GCR_STARV_RETRY (1 << 24)
37#define GCR_DESC_TYPE_HOST (1 << 14)
38
39/* DMA scheduler */
40#define DMA_SCHED_CTRL 0
41#define DMA_SCHED_CTRL_EN (1 << 31)
42#define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
43
44#define SCHED_ENTRY0_CHAN(x) ((x) << 0)
45#define SCHED_ENTRY0_IS_RX (1 << 7)
46
47#define SCHED_ENTRY1_CHAN(x) ((x) << 8)
48#define SCHED_ENTRY1_IS_RX (1 << 15)
49
50#define SCHED_ENTRY2_CHAN(x) ((x) << 16)
51#define SCHED_ENTRY2_IS_RX (1 << 23)
52
53#define SCHED_ENTRY3_CHAN(x) ((x) << 24)
54#define SCHED_ENTRY3_IS_RX (1 << 31)
55
56/* Queue manager */
57/* 4 KiB of memory for descriptors, 2 for each endpoint */
58#define ALLOC_DECS_NUM 128
59#define DESCS_AREAS 1
60#define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
61#define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
62
63#define QMGR_LRAM0_BASE 0x80
64#define QMGR_LRAM_SIZE 0x84
65#define QMGR_LRAM1_BASE 0x88
66#define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
67#define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
68#define QMGR_MEMCTRL_IDX_SH 16
69#define QMGR_MEMCTRL_DESC_SH 8
70
71#define QMGR_NUM_PEND 5
72#define QMGR_PEND(x) (0x90 + (x) * 4)
73
74#define QMGR_PENDING_SLOT_Q(x) (x / 32)
75#define QMGR_PENDING_BIT_Q(x) (x % 32)
76
77#define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
78#define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
79#define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
80#define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
81
82/* Glue layer specific */
83/* USBSS / USB AM335x */
84#define USBSS_IRQ_STATUS 0x28
85#define USBSS_IRQ_ENABLER 0x2c
86#define USBSS_IRQ_CLEARR 0x30
87
88#define USBSS_IRQ_PD_COMP (1 << 2)
89
Daniel Mack13bbfb52014-05-26 14:52:34 +020090/* Packet Descriptor */
91#define PD2_ZERO_LENGTH (1 << 19)
92
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020093struct cppi41_channel {
94 struct dma_chan chan;
95 struct dma_async_tx_descriptor txd;
96 struct cppi41_dd *cdd;
97 struct cppi41_desc *desc;
98 dma_addr_t desc_phys;
99 void __iomem *gcr_reg;
100 int is_tx;
101 u32 residue;
102
103 unsigned int q_num;
104 unsigned int q_comp_num;
105 unsigned int port_num;
106
107 unsigned td_retry;
108 unsigned td_queued:1;
109 unsigned td_seen:1;
110 unsigned td_desc_seen:1;
111};
112
113struct cppi41_desc {
114 u32 pd0;
115 u32 pd1;
116 u32 pd2;
117 u32 pd3;
118 u32 pd4;
119 u32 pd5;
120 u32 pd6;
121 u32 pd7;
122} __aligned(32);
123
124struct chan_queues {
125 u16 submit;
126 u16 complete;
127};
128
129struct cppi41_dd {
130 struct dma_device ddev;
131
132 void *qmgr_scratch;
133 dma_addr_t scratch_phys;
134
135 struct cppi41_desc *cd;
136 dma_addr_t descs_phys;
137 u32 first_td_desc;
138 struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
139
140 void __iomem *usbss_mem;
141 void __iomem *ctrl_mem;
142 void __iomem *sched_mem;
143 void __iomem *qmgr_mem;
144 unsigned int irq;
145 const struct chan_queues *queues_rx;
146 const struct chan_queues *queues_tx;
147 struct chan_queues td_queue;
Daniel Mackf8964962013-10-22 12:14:03 +0200148
149 /* context for suspend/resume */
150 unsigned int dma_tdfdq;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200151};
152
153#define FIST_COMPLETION_QUEUE 93
154static struct chan_queues usb_queues_tx[] = {
155 /* USB0 ENDP 1 */
156 [ 0] = { .submit = 32, .complete = 93},
157 [ 1] = { .submit = 34, .complete = 94},
158 [ 2] = { .submit = 36, .complete = 95},
159 [ 3] = { .submit = 38, .complete = 96},
160 [ 4] = { .submit = 40, .complete = 97},
161 [ 5] = { .submit = 42, .complete = 98},
162 [ 6] = { .submit = 44, .complete = 99},
163 [ 7] = { .submit = 46, .complete = 100},
164 [ 8] = { .submit = 48, .complete = 101},
165 [ 9] = { .submit = 50, .complete = 102},
166 [10] = { .submit = 52, .complete = 103},
167 [11] = { .submit = 54, .complete = 104},
168 [12] = { .submit = 56, .complete = 105},
169 [13] = { .submit = 58, .complete = 106},
170 [14] = { .submit = 60, .complete = 107},
171
172 /* USB1 ENDP1 */
173 [15] = { .submit = 62, .complete = 125},
174 [16] = { .submit = 64, .complete = 126},
175 [17] = { .submit = 66, .complete = 127},
176 [18] = { .submit = 68, .complete = 128},
177 [19] = { .submit = 70, .complete = 129},
178 [20] = { .submit = 72, .complete = 130},
179 [21] = { .submit = 74, .complete = 131},
180 [22] = { .submit = 76, .complete = 132},
181 [23] = { .submit = 78, .complete = 133},
182 [24] = { .submit = 80, .complete = 134},
183 [25] = { .submit = 82, .complete = 135},
184 [26] = { .submit = 84, .complete = 136},
185 [27] = { .submit = 86, .complete = 137},
186 [28] = { .submit = 88, .complete = 138},
187 [29] = { .submit = 90, .complete = 139},
188};
189
190static const struct chan_queues usb_queues_rx[] = {
191 /* USB0 ENDP 1 */
192 [ 0] = { .submit = 1, .complete = 109},
193 [ 1] = { .submit = 2, .complete = 110},
194 [ 2] = { .submit = 3, .complete = 111},
195 [ 3] = { .submit = 4, .complete = 112},
196 [ 4] = { .submit = 5, .complete = 113},
197 [ 5] = { .submit = 6, .complete = 114},
198 [ 6] = { .submit = 7, .complete = 115},
199 [ 7] = { .submit = 8, .complete = 116},
200 [ 8] = { .submit = 9, .complete = 117},
201 [ 9] = { .submit = 10, .complete = 118},
202 [10] = { .submit = 11, .complete = 119},
203 [11] = { .submit = 12, .complete = 120},
204 [12] = { .submit = 13, .complete = 121},
205 [13] = { .submit = 14, .complete = 122},
206 [14] = { .submit = 15, .complete = 123},
207
208 /* USB1 ENDP 1 */
209 [15] = { .submit = 16, .complete = 141},
210 [16] = { .submit = 17, .complete = 142},
211 [17] = { .submit = 18, .complete = 143},
212 [18] = { .submit = 19, .complete = 144},
213 [19] = { .submit = 20, .complete = 145},
214 [20] = { .submit = 21, .complete = 146},
215 [21] = { .submit = 22, .complete = 147},
216 [22] = { .submit = 23, .complete = 148},
217 [23] = { .submit = 24, .complete = 149},
218 [24] = { .submit = 25, .complete = 150},
219 [25] = { .submit = 26, .complete = 151},
220 [26] = { .submit = 27, .complete = 152},
221 [27] = { .submit = 28, .complete = 153},
222 [28] = { .submit = 29, .complete = 154},
223 [29] = { .submit = 30, .complete = 155},
224};
225
226struct cppi_glue_infos {
227 irqreturn_t (*isr)(int irq, void *data);
228 const struct chan_queues *queues_rx;
229 const struct chan_queues *queues_tx;
230 struct chan_queues td_queue;
231};
232
233static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
234{
235 return container_of(c, struct cppi41_channel, chan);
236}
237
238static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
239{
240 struct cppi41_channel *c;
241 u32 descs_size;
242 u32 desc_num;
243
244 descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
245
246 if (!((desc >= cdd->descs_phys) &&
247 (desc < (cdd->descs_phys + descs_size)))) {
248 return NULL;
249 }
250
251 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
Dan Carpenter2d17f7f2013-08-28 13:48:44 +0300252 BUG_ON(desc_num >= ALLOC_DECS_NUM);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200253 c = cdd->chan_busy[desc_num];
254 cdd->chan_busy[desc_num] = NULL;
255 return c;
256}
257
258static void cppi_writel(u32 val, void *__iomem *mem)
259{
260 __raw_writel(val, mem);
261}
262
263static u32 cppi_readl(void *__iomem *mem)
264{
265 return __raw_readl(mem);
266}
267
268static u32 pd_trans_len(u32 val)
269{
270 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
271}
272
Daniel Mack706ff622013-10-22 12:14:04 +0200273static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
274{
275 u32 desc;
276
277 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
278 desc &= ~0x1f;
279 return desc;
280}
281
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200282static irqreturn_t cppi41_irq(int irq, void *data)
283{
284 struct cppi41_dd *cdd = data;
285 struct cppi41_channel *c;
286 u32 status;
287 int i;
288
289 status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
290 if (!(status & USBSS_IRQ_PD_COMP))
291 return IRQ_NONE;
292 cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
293
294 for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
295 i++) {
296 u32 val;
297 u32 q_num;
298
299 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
300 if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
301 u32 mask;
302 /* set corresponding bit for completetion Q 93 */
303 mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
304 /* not set all bits for queues less than Q 93 */
305 mask--;
306 /* now invert and keep only Q 93+ set */
307 val &= ~mask;
308 }
309
310 if (val)
311 __iormb();
312
313 while (val) {
Daniel Mack13bbfb52014-05-26 14:52:34 +0200314 u32 desc, len;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200315
316 q_num = __fls(val);
317 val &= ~(1 << q_num);
318 q_num += 32 * i;
Daniel Mack706ff622013-10-22 12:14:04 +0200319 desc = cppi41_pop_desc(cdd, q_num);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200320 c = desc_to_chan(cdd, desc);
321 if (WARN_ON(!c)) {
322 pr_err("%s() q %d desc %08x\n", __func__,
323 q_num, desc);
324 continue;
325 }
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200326
Daniel Mack13bbfb52014-05-26 14:52:34 +0200327 if (c->desc->pd2 & PD2_ZERO_LENGTH)
328 len = 0;
329 else
330 len = pd_trans_len(c->desc->pd0);
331
332 c->residue = pd_trans_len(c->desc->pd6) - len;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200333 dma_cookie_complete(&c->txd);
334 c->txd.callback(c->txd.callback_param);
335 }
336 }
337 return IRQ_HANDLED;
338}
339
340static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
341{
342 dma_cookie_t cookie;
343
344 cookie = dma_cookie_assign(tx);
345
346 return cookie;
347}
348
349static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
350{
351 struct cppi41_channel *c = to_cpp41_chan(chan);
352
353 dma_cookie_init(chan);
354 dma_async_tx_descriptor_init(&c->txd, chan);
355 c->txd.tx_submit = cppi41_tx_submit;
356
357 if (!c->is_tx)
358 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
359
360 return 0;
361}
362
363static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
364{
365}
366
367static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
368 dma_cookie_t cookie, struct dma_tx_state *txstate)
369{
370 struct cppi41_channel *c = to_cpp41_chan(chan);
371 enum dma_status ret;
372
373 /* lock */
374 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Kouled83c0c2013-10-16 13:36:28 +0530375 if (txstate && ret == DMA_COMPLETE)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200376 txstate->residue = c->residue;
377 /* unlock */
378
379 return ret;
380}
381
382static void push_desc_queue(struct cppi41_channel *c)
383{
384 struct cppi41_dd *cdd = c->cdd;
385 u32 desc_num;
386 u32 desc_phys;
387 u32 reg;
388
389 desc_phys = lower_32_bits(c->desc_phys);
390 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
391 WARN_ON(cdd->chan_busy[desc_num]);
392 cdd->chan_busy[desc_num] = c;
393
394 reg = (sizeof(struct cppi41_desc) - 24) / 4;
395 reg |= desc_phys;
396 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
397}
398
399static void cppi41_dma_issue_pending(struct dma_chan *chan)
400{
401 struct cppi41_channel *c = to_cpp41_chan(chan);
402 u32 reg;
403
404 c->residue = 0;
405
406 reg = GCR_CHAN_ENABLE;
407 if (!c->is_tx) {
408 reg |= GCR_STARV_RETRY;
409 reg |= GCR_DESC_TYPE_HOST;
410 reg |= c->q_comp_num;
411 }
412
413 cppi_writel(reg, c->gcr_reg);
414
415 /*
416 * We don't use writel() but __raw_writel() so we have to make sure
417 * that the DMA descriptor in coherent memory made to the main memory
418 * before starting the dma engine.
419 */
420 __iowmb();
421 push_desc_queue(c);
422}
423
424static u32 get_host_pd0(u32 length)
425{
426 u32 reg;
427
428 reg = DESC_TYPE_HOST << DESC_TYPE;
429 reg |= length;
430
431 return reg;
432}
433
434static u32 get_host_pd1(struct cppi41_channel *c)
435{
436 u32 reg;
437
438 reg = 0;
439
440 return reg;
441}
442
443static u32 get_host_pd2(struct cppi41_channel *c)
444{
445 u32 reg;
446
447 reg = DESC_TYPE_USB;
448 reg |= c->q_comp_num;
449
450 return reg;
451}
452
453static u32 get_host_pd3(u32 length)
454{
455 u32 reg;
456
457 /* PD3 = packet size */
458 reg = length;
459
460 return reg;
461}
462
463static u32 get_host_pd6(u32 length)
464{
465 u32 reg;
466
467 /* PD6 buffer size */
468 reg = DESC_PD_COMPLETE;
469 reg |= length;
470
471 return reg;
472}
473
474static u32 get_host_pd4_or_7(u32 addr)
475{
476 u32 reg;
477
478 reg = addr;
479
480 return reg;
481}
482
483static u32 get_host_pd5(void)
484{
485 u32 reg;
486
487 reg = 0;
488
489 return reg;
490}
491
492static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
493 struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
494 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
495{
496 struct cppi41_channel *c = to_cpp41_chan(chan);
497 struct cppi41_desc *d;
498 struct scatterlist *sg;
499 unsigned int i;
500 unsigned int num;
501
502 num = 0;
503 d = c->desc;
504 for_each_sg(sgl, sg, sg_len, i) {
505 u32 addr;
506 u32 len;
507
508 /* We need to use more than one desc once musb supports sg */
509 BUG_ON(num > 0);
510 addr = lower_32_bits(sg_dma_address(sg));
511 len = sg_dma_len(sg);
512
513 d->pd0 = get_host_pd0(len);
514 d->pd1 = get_host_pd1(c);
515 d->pd2 = get_host_pd2(c);
516 d->pd3 = get_host_pd3(len);
517 d->pd4 = get_host_pd4_or_7(addr);
518 d->pd5 = get_host_pd5();
519 d->pd6 = get_host_pd6(len);
520 d->pd7 = get_host_pd4_or_7(addr);
521
522 d++;
523 }
524
525 return &c->txd;
526}
527
528static int cpp41_cfg_chan(struct cppi41_channel *c,
529 struct dma_slave_config *cfg)
530{
531 return 0;
532}
533
534static void cppi41_compute_td_desc(struct cppi41_desc *d)
535{
536 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
537}
538
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200539static int cppi41_tear_down_chan(struct cppi41_channel *c)
540{
541 struct cppi41_dd *cdd = c->cdd;
542 struct cppi41_desc *td;
543 u32 reg;
544 u32 desc_phys;
545 u32 td_desc_phys;
546
547 td = cdd->cd;
548 td += cdd->first_td_desc;
549
550 td_desc_phys = cdd->descs_phys;
551 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
552
553 if (!c->td_queued) {
554 cppi41_compute_td_desc(td);
555 __iowmb();
556
557 reg = (sizeof(struct cppi41_desc) - 24) / 4;
558 reg |= td_desc_phys;
559 cppi_writel(reg, cdd->qmgr_mem +
560 QMGR_QUEUE_D(cdd->td_queue.submit));
561
562 reg = GCR_CHAN_ENABLE;
563 if (!c->is_tx) {
564 reg |= GCR_STARV_RETRY;
565 reg |= GCR_DESC_TYPE_HOST;
566 reg |= c->q_comp_num;
567 }
568 reg |= GCR_TEARDOWN;
569 cppi_writel(reg, c->gcr_reg);
570 c->td_queued = 1;
Sebastian Andrzej Siewior6f9d7052014-12-03 15:09:49 +0100571 c->td_retry = 500;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200572 }
573
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200574 if (!c->td_seen || !c->td_desc_seen) {
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200575
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200576 desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
577 if (!desc_phys)
578 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200579
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200580 if (desc_phys == c->desc_phys) {
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200581 c->td_desc_seen = 1;
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200582
583 } else if (desc_phys == td_desc_phys) {
584 u32 pd0;
585
586 __iormb();
587 pd0 = td->pd0;
588 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
589 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
590 WARN_ON((pd0 & 0x1f) != c->port_num);
591 c->td_seen = 1;
592 } else if (desc_phys) {
593 WARN_ON_ONCE(1);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200594 }
595 }
596 c->td_retry--;
597 /*
598 * If the TX descriptor / channel is in use, the caller needs to poke
599 * his TD bit multiple times. After that he hardware releases the
600 * transfer descriptor followed by TD descriptor. Waiting seems not to
601 * cause any difference.
602 * RX seems to be thrown out right away. However once the TearDown
603 * descriptor gets through we are done. If we have seens the transfer
604 * descriptor before the TD we fetch it from enqueue, it has to be
605 * there waiting for us.
606 */
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100607 if (!c->td_seen && c->td_retry) {
608 udelay(1);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200609 return -EAGAIN;
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100610 }
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200611 WARN_ON(!c->td_retry);
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100612
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200613 if (!c->td_desc_seen) {
Daniel Mack706ff622013-10-22 12:14:04 +0200614 desc_phys = cppi41_pop_desc(cdd, c->q_num);
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100615 if (!desc_phys)
616 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200617 WARN_ON(!desc_phys);
618 }
619
620 c->td_queued = 0;
621 c->td_seen = 0;
622 c->td_desc_seen = 0;
623 cppi_writel(0, c->gcr_reg);
624 return 0;
625}
626
627static int cppi41_stop_chan(struct dma_chan *chan)
628{
629 struct cppi41_channel *c = to_cpp41_chan(chan);
630 struct cppi41_dd *cdd = c->cdd;
631 u32 desc_num;
632 u32 desc_phys;
633 int ret;
634
George Cherian975faae2014-02-27 10:44:40 +0530635 desc_phys = lower_32_bits(c->desc_phys);
636 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
637 if (!cdd->chan_busy[desc_num])
638 return 0;
639
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200640 ret = cppi41_tear_down_chan(c);
641 if (ret)
642 return ret;
643
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200644 WARN_ON(!cdd->chan_busy[desc_num]);
645 cdd->chan_busy[desc_num] = NULL;
646
647 return 0;
648}
649
650static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
651 unsigned long arg)
652{
653 struct cppi41_channel *c = to_cpp41_chan(chan);
654 int ret;
655
656 switch (cmd) {
657 case DMA_SLAVE_CONFIG:
658 ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
659 break;
660
661 case DMA_TERMINATE_ALL:
662 ret = cppi41_stop_chan(chan);
663 break;
664
665 default:
666 ret = -ENXIO;
667 break;
668 }
669 return ret;
670}
671
672static void cleanup_chans(struct cppi41_dd *cdd)
673{
674 while (!list_empty(&cdd->ddev.channels)) {
675 struct cppi41_channel *cchan;
676
677 cchan = list_first_entry(&cdd->ddev.channels,
678 struct cppi41_channel, chan.device_node);
679 list_del(&cchan->chan.device_node);
680 kfree(cchan);
681 }
682}
683
Daniel Macke327e212013-09-22 16:50:00 +0200684static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200685{
686 struct cppi41_channel *cchan;
687 int i;
688 int ret;
689 u32 n_chans;
690
Daniel Macke327e212013-09-22 16:50:00 +0200691 ret = of_property_read_u32(dev->of_node, "#dma-channels",
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200692 &n_chans);
693 if (ret)
694 return ret;
695 /*
696 * The channels can only be used as TX or as RX. So we add twice
697 * that much dma channels because USB can only do RX or TX.
698 */
699 n_chans *= 2;
700
701 for (i = 0; i < n_chans; i++) {
702 cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
703 if (!cchan)
704 goto err;
705
706 cchan->cdd = cdd;
707 if (i & 1) {
708 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
709 cchan->is_tx = 1;
710 } else {
711 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
712 cchan->is_tx = 0;
713 }
714 cchan->port_num = i >> 1;
715 cchan->desc = &cdd->cd[i];
716 cchan->desc_phys = cdd->descs_phys;
717 cchan->desc_phys += i * sizeof(struct cppi41_desc);
718 cchan->chan.device = &cdd->ddev;
719 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
720 }
721 cdd->first_td_desc = n_chans;
722
723 return 0;
724err:
725 cleanup_chans(cdd);
726 return -ENOMEM;
727}
728
Daniel Macke327e212013-09-22 16:50:00 +0200729static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200730{
731 unsigned int mem_decs;
732 int i;
733
734 mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
735
736 for (i = 0; i < DESCS_AREAS; i++) {
737
738 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
739 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
740
Daniel Macke327e212013-09-22 16:50:00 +0200741 dma_free_coherent(dev, mem_decs, cdd->cd,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200742 cdd->descs_phys);
743 }
744}
745
746static void disable_sched(struct cppi41_dd *cdd)
747{
748 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
749}
750
Daniel Mackb46ce4d2013-09-22 16:50:01 +0200751static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200752{
753 disable_sched(cdd);
754
Daniel Macke327e212013-09-22 16:50:00 +0200755 purge_descs(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200756
757 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
758 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
Daniel Macke327e212013-09-22 16:50:00 +0200759 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200760 cdd->scratch_phys);
761}
762
Daniel Macke327e212013-09-22 16:50:00 +0200763static int init_descs(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200764{
765 unsigned int desc_size;
766 unsigned int mem_decs;
767 int i;
768 u32 reg;
769 u32 idx;
770
771 BUILD_BUG_ON(sizeof(struct cppi41_desc) &
772 (sizeof(struct cppi41_desc) - 1));
773 BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
774 BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
775
776 desc_size = sizeof(struct cppi41_desc);
777 mem_decs = ALLOC_DECS_NUM * desc_size;
778
779 idx = 0;
780 for (i = 0; i < DESCS_AREAS; i++) {
781
782 reg = idx << QMGR_MEMCTRL_IDX_SH;
783 reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
784 reg |= ilog2(ALLOC_DECS_NUM) - 5;
785
786 BUILD_BUG_ON(DESCS_AREAS != 1);
Daniel Macke327e212013-09-22 16:50:00 +0200787 cdd->cd = dma_alloc_coherent(dev, mem_decs,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200788 &cdd->descs_phys, GFP_KERNEL);
789 if (!cdd->cd)
790 return -ENOMEM;
791
792 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
793 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
794
795 idx += ALLOC_DECS_NUM;
796 }
797 return 0;
798}
799
800static void init_sched(struct cppi41_dd *cdd)
801{
802 unsigned ch;
803 unsigned word;
804 u32 reg;
805
806 word = 0;
807 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
808 for (ch = 0; ch < 15 * 2; ch += 2) {
809
810 reg = SCHED_ENTRY0_CHAN(ch);
811 reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
812
813 reg |= SCHED_ENTRY2_CHAN(ch + 1);
814 reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
815 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
816 word++;
817 }
818 reg = 15 * 2 * 2 - 1;
819 reg |= DMA_SCHED_CTRL_EN;
820 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
821}
822
Daniel Macke327e212013-09-22 16:50:00 +0200823static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200824{
825 int ret;
826
827 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
Daniel Macke327e212013-09-22 16:50:00 +0200828 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200829 &cdd->scratch_phys, GFP_KERNEL);
830 if (!cdd->qmgr_scratch)
831 return -ENOMEM;
832
833 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
834 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
835 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
836
Daniel Macke327e212013-09-22 16:50:00 +0200837 ret = init_descs(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200838 if (ret)
839 goto err_td;
840
841 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
842 init_sched(cdd);
843 return 0;
844err_td:
Daniel Mackb46ce4d2013-09-22 16:50:01 +0200845 deinit_cppi41(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200846 return ret;
847}
848
849static struct platform_driver cpp41_dma_driver;
850/*
851 * The param format is:
852 * X Y
853 * X: Port
854 * Y: 0 = RX else TX
855 */
856#define INFO_PORT 0
857#define INFO_IS_TX 1
858
859static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
860{
861 struct cppi41_channel *cchan;
862 struct cppi41_dd *cdd;
863 const struct chan_queues *queues;
864 u32 *num = param;
865
866 if (chan->device->dev->driver != &cpp41_dma_driver.driver)
867 return false;
868
869 cchan = to_cpp41_chan(chan);
870
871 if (cchan->port_num != num[INFO_PORT])
872 return false;
873
874 if (cchan->is_tx && !num[INFO_IS_TX])
875 return false;
876 cdd = cchan->cdd;
877 if (cchan->is_tx)
878 queues = cdd->queues_tx;
879 else
880 queues = cdd->queues_rx;
881
882 BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
883 if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
884 return false;
885
886 cchan->q_num = queues[cchan->port_num].submit;
887 cchan->q_comp_num = queues[cchan->port_num].complete;
888 return true;
889}
890
891static struct of_dma_filter_info cpp41_dma_info = {
892 .filter_fn = cpp41_dma_filter_fn,
893};
894
895static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
896 struct of_dma *ofdma)
897{
898 int count = dma_spec->args_count;
899 struct of_dma_filter_info *info = ofdma->of_dma_data;
900
901 if (!info || !info->filter_fn)
902 return NULL;
903
904 if (count != 2)
905 return NULL;
906
907 return dma_request_channel(info->dma_cap, info->filter_fn,
908 &dma_spec->args[0]);
909}
910
911static const struct cppi_glue_infos usb_infos = {
912 .isr = cppi41_irq,
913 .queues_rx = usb_queues_rx,
914 .queues_tx = usb_queues_tx,
915 .td_queue = { .submit = 31, .complete = 0 },
916};
917
918static const struct of_device_id cppi41_dma_ids[] = {
919 { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
920 {},
921};
922MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
923
Daniel Macke327e212013-09-22 16:50:00 +0200924static const struct cppi_glue_infos *get_glue_info(struct device *dev)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200925{
926 const struct of_device_id *of_id;
927
Daniel Macke327e212013-09-22 16:50:00 +0200928 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200929 if (!of_id)
930 return NULL;
931 return of_id->data;
932}
933
934static int cppi41_dma_probe(struct platform_device *pdev)
935{
936 struct cppi41_dd *cdd;
Daniel Mack717d8182013-09-22 16:50:02 +0200937 struct device *dev = &pdev->dev;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200938 const struct cppi_glue_infos *glue_info;
939 int irq;
940 int ret;
941
Daniel Mack717d8182013-09-22 16:50:02 +0200942 glue_info = get_glue_info(dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200943 if (!glue_info)
944 return -EINVAL;
945
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +0530946 cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200947 if (!cdd)
948 return -ENOMEM;
949
950 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
951 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
952 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
953 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
954 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
955 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
956 cdd->ddev.device_control = cppi41_dma_control;
Daniel Mack717d8182013-09-22 16:50:02 +0200957 cdd->ddev.dev = dev;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200958 INIT_LIST_HEAD(&cdd->ddev.channels);
959 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
960
Daniel Mack717d8182013-09-22 16:50:02 +0200961 cdd->usbss_mem = of_iomap(dev->of_node, 0);
962 cdd->ctrl_mem = of_iomap(dev->of_node, 1);
963 cdd->sched_mem = of_iomap(dev->of_node, 2);
964 cdd->qmgr_mem = of_iomap(dev->of_node, 3);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200965
966 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +0530967 !cdd->qmgr_mem)
968 return -ENXIO;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200969
Daniel Mack717d8182013-09-22 16:50:02 +0200970 pm_runtime_enable(dev);
971 ret = pm_runtime_get_sync(dev);
Sebastian Andrzej Siewiorcbf1e562013-10-22 12:14:06 +0200972 if (ret < 0)
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +0200973 goto err_get_sync;
974
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200975 cdd->queues_rx = glue_info->queues_rx;
976 cdd->queues_tx = glue_info->queues_tx;
977 cdd->td_queue = glue_info->td_queue;
978
Daniel Mack717d8182013-09-22 16:50:02 +0200979 ret = init_cppi41(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200980 if (ret)
981 goto err_init_cppi;
982
Daniel Mack717d8182013-09-22 16:50:02 +0200983 ret = cppi41_add_chans(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200984 if (ret)
985 goto err_chans;
986
Daniel Mack717d8182013-09-22 16:50:02 +0200987 irq = irq_of_parse_and_map(dev->of_node, 0);
Julia Lawallf3b77722013-12-29 23:47:23 +0100988 if (!irq) {
989 ret = -EINVAL;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200990 goto err_irq;
Julia Lawallf3b77722013-12-29 23:47:23 +0100991 }
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200992
993 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
994
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +0530995 ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
Daniel Mack717d8182013-09-22 16:50:02 +0200996 dev_name(dev), cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200997 if (ret)
998 goto err_irq;
999 cdd->irq = irq;
1000
1001 ret = dma_async_device_register(&cdd->ddev);
1002 if (ret)
1003 goto err_dma_reg;
1004
Daniel Mack717d8182013-09-22 16:50:02 +02001005 ret = of_dma_controller_register(dev->of_node,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001006 cppi41_dma_xlate, &cpp41_dma_info);
1007 if (ret)
1008 goto err_of;
1009
1010 platform_set_drvdata(pdev, cdd);
1011 return 0;
1012err_of:
1013 dma_async_device_unregister(&cdd->ddev);
1014err_dma_reg:
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001015err_irq:
1016 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1017 cleanup_chans(cdd);
1018err_chans:
Daniel Mack717d8182013-09-22 16:50:02 +02001019 deinit_cppi41(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001020err_init_cppi:
Daniel Mack717d8182013-09-22 16:50:02 +02001021 pm_runtime_put(dev);
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +02001022err_get_sync:
Daniel Mack717d8182013-09-22 16:50:02 +02001023 pm_runtime_disable(dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001024 iounmap(cdd->usbss_mem);
1025 iounmap(cdd->ctrl_mem);
1026 iounmap(cdd->sched_mem);
1027 iounmap(cdd->qmgr_mem);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001028 return ret;
1029}
1030
1031static int cppi41_dma_remove(struct platform_device *pdev)
1032{
1033 struct cppi41_dd *cdd = platform_get_drvdata(pdev);
1034
1035 of_dma_controller_free(pdev->dev.of_node);
1036 dma_async_device_unregister(&cdd->ddev);
1037
1038 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +05301039 devm_free_irq(&pdev->dev, cdd->irq, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001040 cleanup_chans(cdd);
Daniel Mackb46ce4d2013-09-22 16:50:01 +02001041 deinit_cppi41(&pdev->dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001042 iounmap(cdd->usbss_mem);
1043 iounmap(cdd->ctrl_mem);
1044 iounmap(cdd->sched_mem);
1045 iounmap(cdd->qmgr_mem);
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +02001046 pm_runtime_put(&pdev->dev);
1047 pm_runtime_disable(&pdev->dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001048 return 0;
1049}
1050
Daniel Mackf97b98d2013-09-22 16:50:04 +02001051#ifdef CONFIG_PM_SLEEP
1052static int cppi41_suspend(struct device *dev)
1053{
1054 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1055
Daniel Mackf8964962013-10-22 12:14:03 +02001056 cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
Daniel Mackf97b98d2013-09-22 16:50:04 +02001057 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1058 disable_sched(cdd);
1059
1060 return 0;
1061}
1062
1063static int cppi41_resume(struct device *dev)
1064{
1065 struct cppi41_dd *cdd = dev_get_drvdata(dev);
Daniel Mackf8964962013-10-22 12:14:03 +02001066 struct cppi41_channel *c;
Daniel Mackf97b98d2013-09-22 16:50:04 +02001067 int i;
1068
1069 for (i = 0; i < DESCS_AREAS; i++)
1070 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1071
Daniel Mackf8964962013-10-22 12:14:03 +02001072 list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
1073 if (!c->is_tx)
1074 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
1075
Daniel Mackf97b98d2013-09-22 16:50:04 +02001076 init_sched(cdd);
Daniel Mackf8964962013-10-22 12:14:03 +02001077
1078 cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
1079 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
1080 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
1081 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
1082
Daniel Mackf97b98d2013-09-22 16:50:04 +02001083 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1084
1085 return 0;
1086}
1087#endif
1088
1089static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
1090
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001091static struct platform_driver cpp41_dma_driver = {
1092 .probe = cppi41_dma_probe,
1093 .remove = cppi41_dma_remove,
1094 .driver = {
1095 .name = "cppi41-dma-engine",
Daniel Mackf97b98d2013-09-22 16:50:04 +02001096 .pm = &cppi41_pm_ops,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001097 .of_match_table = of_match_ptr(cppi41_dma_ids),
1098 },
1099};
1100
1101module_platform_driver(cpp41_dma_driver);
1102MODULE_LICENSE("GPL");
1103MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");