blob: 15cab7d79525914d862e5cd38344bc2e3046c95a [file] [log] [blame]
Jonas Jensen5f9e6852014-01-17 09:46:05 +01001/*
2 * MOXA ART SoCs DMA Engine support.
3 *
4 * Copyright (C) 2013 Jonas Jensen
5 *
6 * Jonas Jensen <jonas.jensen@gmail.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/of_dma.h>
26#include <linux/bitops.h>
27
28#include <asm/cacheflush.h>
29
30#include "dmaengine.h"
31#include "virt-dma.h"
32
33#define APB_DMA_MAX_CHANNEL 4
34
35#define REG_OFF_ADDRESS_SOURCE 0
36#define REG_OFF_ADDRESS_DEST 4
37#define REG_OFF_CYCLES 8
38#define REG_OFF_CTRL 12
39#define REG_OFF_CHAN_SIZE 16
40
41#define APB_DMA_ENABLE BIT(0)
42#define APB_DMA_FIN_INT_STS BIT(1)
43#define APB_DMA_FIN_INT_EN BIT(2)
44#define APB_DMA_BURST_MODE BIT(3)
45#define APB_DMA_ERR_INT_STS BIT(4)
46#define APB_DMA_ERR_INT_EN BIT(5)
47
48/*
49 * Unset: APB
50 * Set: AHB
51 */
52#define APB_DMA_SOURCE_SELECT 0x40
53#define APB_DMA_DEST_SELECT 0x80
54
55#define APB_DMA_SOURCE 0x100
56#define APB_DMA_DEST 0x1000
57
58#define APB_DMA_SOURCE_MASK 0x700
59#define APB_DMA_DEST_MASK 0x7000
60
61/*
62 * 000: No increment
63 * 001: +1 (Burst=0), +4 (Burst=1)
64 * 010: +2 (Burst=0), +8 (Burst=1)
65 * 011: +4 (Burst=0), +16 (Burst=1)
66 * 101: -1 (Burst=0), -4 (Burst=1)
67 * 110: -2 (Burst=0), -8 (Burst=1)
68 * 111: -4 (Burst=0), -16 (Burst=1)
69 */
70#define APB_DMA_SOURCE_INC_0 0
71#define APB_DMA_SOURCE_INC_1_4 0x100
72#define APB_DMA_SOURCE_INC_2_8 0x200
73#define APB_DMA_SOURCE_INC_4_16 0x300
74#define APB_DMA_SOURCE_DEC_1_4 0x500
75#define APB_DMA_SOURCE_DEC_2_8 0x600
76#define APB_DMA_SOURCE_DEC_4_16 0x700
77#define APB_DMA_DEST_INC_0 0
78#define APB_DMA_DEST_INC_1_4 0x1000
79#define APB_DMA_DEST_INC_2_8 0x2000
80#define APB_DMA_DEST_INC_4_16 0x3000
81#define APB_DMA_DEST_DEC_1_4 0x5000
82#define APB_DMA_DEST_DEC_2_8 0x6000
83#define APB_DMA_DEST_DEC_4_16 0x7000
84
85/*
86 * Request signal select source/destination address for DMA hardware handshake.
87 *
88 * The request line number is a property of the DMA controller itself,
89 * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
90 *
91 * 0: No request / Grant signal
92 * 1-15: Request / Grant signal
93 */
94#define APB_DMA_SOURCE_REQ_NO 0x1000000
95#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
96#define APB_DMA_DEST_REQ_NO 0x10000
97#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
98
99#define APB_DMA_DATA_WIDTH 0x100000
100#define APB_DMA_DATA_WIDTH_MASK 0x300000
101/*
102 * Data width of transfer:
103 *
104 * 00: Word
105 * 01: Half
106 * 10: Byte
107 */
108#define APB_DMA_DATA_WIDTH_4 0
109#define APB_DMA_DATA_WIDTH_2 0x100000
110#define APB_DMA_DATA_WIDTH_1 0x200000
111
112#define APB_DMA_CYCLES_MASK 0x00ffffff
113
114#define MOXART_DMA_DATA_TYPE_S8 0x00
115#define MOXART_DMA_DATA_TYPE_S16 0x01
116#define MOXART_DMA_DATA_TYPE_S32 0x02
117
118struct moxart_sg {
119 dma_addr_t addr;
120 uint32_t len;
121};
122
123struct moxart_desc {
124 enum dma_transfer_direction dma_dir;
125 dma_addr_t dev_addr;
126 unsigned int sglen;
127 unsigned int dma_cycles;
128 struct virt_dma_desc vd;
129 uint8_t es;
130 struct moxart_sg sg[0];
131};
132
133struct moxart_chan {
134 struct virt_dma_chan vc;
135
136 void __iomem *base;
137 struct moxart_desc *desc;
138
139 struct dma_slave_config cfg;
140
141 bool allocated;
142 bool error;
143 int ch_num;
144 unsigned int line_reqno;
145 unsigned int sgidx;
146};
147
148struct moxart_dmadev {
149 struct dma_device dma_slave;
150 struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL];
151};
152
153struct moxart_filter_data {
154 struct moxart_dmadev *mdc;
155 struct of_phandle_args *dma_spec;
156};
157
158static const unsigned int es_bytes[] = {
159 [MOXART_DMA_DATA_TYPE_S8] = 1,
160 [MOXART_DMA_DATA_TYPE_S16] = 2,
161 [MOXART_DMA_DATA_TYPE_S32] = 4,
162};
163
164static struct device *chan2dev(struct dma_chan *chan)
165{
166 return &chan->dev->device;
167}
168
169static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
170{
171 return container_of(c, struct moxart_chan, vc.chan);
172}
173
174static inline struct moxart_desc *to_moxart_dma_desc(
175 struct dma_async_tx_descriptor *t)
176{
177 return container_of(t, struct moxart_desc, vd.tx);
178}
179
180static void moxart_dma_desc_free(struct virt_dma_desc *vd)
181{
182 kfree(container_of(vd, struct moxart_desc, vd));
183}
184
185static int moxart_terminate_all(struct dma_chan *chan)
186{
187 struct moxart_chan *ch = to_moxart_dma_chan(chan);
188 unsigned long flags;
189 LIST_HEAD(head);
190 u32 ctrl;
191
192 dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
193
194 spin_lock_irqsave(&ch->vc.lock, flags);
195
196 if (ch->desc)
197 ch->desc = NULL;
198
199 ctrl = readl(ch->base + REG_OFF_CTRL);
200 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
201 writel(ctrl, ch->base + REG_OFF_CTRL);
202
203 vchan_get_all_descriptors(&ch->vc, &head);
204 spin_unlock_irqrestore(&ch->vc.lock, flags);
205 vchan_dma_desc_free_list(&ch->vc, &head);
206
207 return 0;
208}
209
210static int moxart_slave_config(struct dma_chan *chan,
211 struct dma_slave_config *cfg)
212{
213 struct moxart_chan *ch = to_moxart_dma_chan(chan);
214 u32 ctrl;
215
216 ch->cfg = *cfg;
217
218 ctrl = readl(ch->base + REG_OFF_CTRL);
219 ctrl |= APB_DMA_BURST_MODE;
220 ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
221 ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
222
223 switch (ch->cfg.src_addr_width) {
224 case DMA_SLAVE_BUSWIDTH_1_BYTE:
225 ctrl |= APB_DMA_DATA_WIDTH_1;
226 if (ch->cfg.direction != DMA_MEM_TO_DEV)
227 ctrl |= APB_DMA_DEST_INC_1_4;
228 else
229 ctrl |= APB_DMA_SOURCE_INC_1_4;
230 break;
231 case DMA_SLAVE_BUSWIDTH_2_BYTES:
232 ctrl |= APB_DMA_DATA_WIDTH_2;
233 if (ch->cfg.direction != DMA_MEM_TO_DEV)
234 ctrl |= APB_DMA_DEST_INC_2_8;
235 else
236 ctrl |= APB_DMA_SOURCE_INC_2_8;
237 break;
238 case DMA_SLAVE_BUSWIDTH_4_BYTES:
239 ctrl &= ~APB_DMA_DATA_WIDTH;
240 if (ch->cfg.direction != DMA_MEM_TO_DEV)
241 ctrl |= APB_DMA_DEST_INC_4_16;
242 else
243 ctrl |= APB_DMA_SOURCE_INC_4_16;
244 break;
245 default:
246 return -EINVAL;
247 }
248
249 if (ch->cfg.direction == DMA_MEM_TO_DEV) {
250 ctrl &= ~APB_DMA_DEST_SELECT;
251 ctrl |= APB_DMA_SOURCE_SELECT;
252 ctrl |= (ch->line_reqno << 16 &
253 APB_DMA_DEST_REQ_NO_MASK);
254 } else {
255 ctrl |= APB_DMA_DEST_SELECT;
256 ctrl &= ~APB_DMA_SOURCE_SELECT;
257 ctrl |= (ch->line_reqno << 24 &
258 APB_DMA_SOURCE_REQ_NO_MASK);
259 }
260
261 writel(ctrl, ch->base + REG_OFF_CTRL);
262
263 return 0;
264}
265
Jonas Jensen5f9e6852014-01-17 09:46:05 +0100266static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
267 struct dma_chan *chan, struct scatterlist *sgl,
268 unsigned int sg_len, enum dma_transfer_direction dir,
269 unsigned long tx_flags, void *context)
270{
271 struct moxart_chan *ch = to_moxart_dma_chan(chan);
272 struct moxart_desc *d;
273 enum dma_slave_buswidth dev_width;
274 dma_addr_t dev_addr;
275 struct scatterlist *sgent;
276 unsigned int es;
277 unsigned int i;
278
279 if (!is_slave_direction(dir)) {
280 dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
281 __func__);
282 return NULL;
283 }
284
285 if (dir == DMA_DEV_TO_MEM) {
286 dev_addr = ch->cfg.src_addr;
287 dev_width = ch->cfg.src_addr_width;
288 } else {
289 dev_addr = ch->cfg.dst_addr;
290 dev_width = ch->cfg.dst_addr_width;
291 }
292
293 switch (dev_width) {
294 case DMA_SLAVE_BUSWIDTH_1_BYTE:
295 es = MOXART_DMA_DATA_TYPE_S8;
296 break;
297 case DMA_SLAVE_BUSWIDTH_2_BYTES:
298 es = MOXART_DMA_DATA_TYPE_S16;
299 break;
300 case DMA_SLAVE_BUSWIDTH_4_BYTES:
301 es = MOXART_DMA_DATA_TYPE_S32;
302 break;
303 default:
304 dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
305 __func__, dev_width);
306 return NULL;
307 }
308
309 d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
310 if (!d)
311 return NULL;
312
313 d->dma_dir = dir;
314 d->dev_addr = dev_addr;
315 d->es = es;
316
317 for_each_sg(sgl, sgent, sg_len, i) {
318 d->sg[i].addr = sg_dma_address(sgent);
319 d->sg[i].len = sg_dma_len(sgent);
320 }
321
322 d->sglen = sg_len;
323
324 ch->error = 0;
325
326 return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
327}
328
329static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
330 struct of_dma *ofdma)
331{
332 struct moxart_dmadev *mdc = ofdma->of_dma_data;
333 struct dma_chan *chan;
334 struct moxart_chan *ch;
335
336 chan = dma_get_any_slave_channel(&mdc->dma_slave);
337 if (!chan)
338 return NULL;
339
340 ch = to_moxart_dma_chan(chan);
341 ch->line_reqno = dma_spec->args[0];
342
343 return chan;
344}
345
346static int moxart_alloc_chan_resources(struct dma_chan *chan)
347{
348 struct moxart_chan *ch = to_moxart_dma_chan(chan);
349
350 dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
351 __func__, ch->ch_num);
352 ch->allocated = 1;
353
354 return 0;
355}
356
357static void moxart_free_chan_resources(struct dma_chan *chan)
358{
359 struct moxart_chan *ch = to_moxart_dma_chan(chan);
360
361 vchan_free_chan_resources(&ch->vc);
362
363 dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
364 __func__, ch->ch_num);
365 ch->allocated = 0;
366}
367
368static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
369 dma_addr_t dst_addr)
370{
371 writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
372 writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
373}
374
375static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
376{
377 struct moxart_desc *d = ch->desc;
378 unsigned int sglen_div = es_bytes[d->es];
379
380 d->dma_cycles = len >> sglen_div;
381
382 /*
383 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
384 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
385 */
386 writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
387
388 dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
389 __func__, d->dma_cycles, len);
390}
391
392static void moxart_start_dma(struct moxart_chan *ch)
393{
394 u32 ctrl;
395
396 ctrl = readl(ch->base + REG_OFF_CTRL);
397 ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
398 writel(ctrl, ch->base + REG_OFF_CTRL);
399}
400
401static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
402{
403 struct moxart_desc *d = ch->desc;
404 struct moxart_sg *sg = ch->desc->sg + idx;
405
406 if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
407 moxart_dma_set_params(ch, sg->addr, d->dev_addr);
408 else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
409 moxart_dma_set_params(ch, d->dev_addr, sg->addr);
410
411 moxart_set_transfer_params(ch, sg->len);
412
413 moxart_start_dma(ch);
414}
415
416static void moxart_dma_start_desc(struct dma_chan *chan)
417{
418 struct moxart_chan *ch = to_moxart_dma_chan(chan);
419 struct virt_dma_desc *vd;
420
421 vd = vchan_next_desc(&ch->vc);
422
423 if (!vd) {
424 ch->desc = NULL;
425 return;
426 }
427
428 list_del(&vd->node);
429
430 ch->desc = to_moxart_dma_desc(&vd->tx);
431 ch->sgidx = 0;
432
433 moxart_dma_start_sg(ch, 0);
434}
435
436static void moxart_issue_pending(struct dma_chan *chan)
437{
438 struct moxart_chan *ch = to_moxart_dma_chan(chan);
439 unsigned long flags;
440
441 spin_lock_irqsave(&ch->vc.lock, flags);
442 if (vchan_issue_pending(&ch->vc) && !ch->desc)
443 moxart_dma_start_desc(chan);
444 spin_unlock_irqrestore(&ch->vc.lock, flags);
445}
446
447static size_t moxart_dma_desc_size(struct moxart_desc *d,
448 unsigned int completed_sgs)
449{
450 unsigned int i;
451 size_t size;
452
453 for (size = i = completed_sgs; i < d->sglen; i++)
454 size += d->sg[i].len;
455
456 return size;
457}
458
459static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
460{
461 size_t size;
462 unsigned int completed_cycles, cycles;
463
464 size = moxart_dma_desc_size(ch->desc, ch->sgidx);
465 cycles = readl(ch->base + REG_OFF_CYCLES);
466 completed_cycles = (ch->desc->dma_cycles - cycles);
467 size -= completed_cycles << es_bytes[ch->desc->es];
468
469 dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
470
471 return size;
472}
473
474static enum dma_status moxart_tx_status(struct dma_chan *chan,
475 dma_cookie_t cookie,
476 struct dma_tx_state *txstate)
477{
478 struct moxart_chan *ch = to_moxart_dma_chan(chan);
479 struct virt_dma_desc *vd;
480 struct moxart_desc *d;
481 enum dma_status ret;
482 unsigned long flags;
483
484 /*
485 * dma_cookie_status() assigns initial residue value.
486 */
487 ret = dma_cookie_status(chan, cookie, txstate);
488
489 spin_lock_irqsave(&ch->vc.lock, flags);
490 vd = vchan_find_desc(&ch->vc, cookie);
491 if (vd) {
492 d = to_moxart_dma_desc(&vd->tx);
493 txstate->residue = moxart_dma_desc_size(d, 0);
494 } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
495 txstate->residue = moxart_dma_desc_size_in_flight(ch);
496 }
497 spin_unlock_irqrestore(&ch->vc.lock, flags);
498
499 if (ch->error)
500 return DMA_ERROR;
501
502 return ret;
503}
504
505static void moxart_dma_init(struct dma_device *dma, struct device *dev)
506{
507 dma->device_prep_slave_sg = moxart_prep_slave_sg;
508 dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
509 dma->device_free_chan_resources = moxart_free_chan_resources;
510 dma->device_issue_pending = moxart_issue_pending;
511 dma->device_tx_status = moxart_tx_status;
Maxime Ripardac850cc2014-11-17 14:42:23 +0100512 dma->device_config = moxart_slave_config;
513 dma->device_terminate_all = moxart_terminate_all;
Jonas Jensen5f9e6852014-01-17 09:46:05 +0100514 dma->dev = dev;
515
516 INIT_LIST_HEAD(&dma->channels);
517}
518
519static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
520{
521 struct moxart_dmadev *mc = devid;
522 struct moxart_chan *ch = &mc->slave_chans[0];
523 unsigned int i;
524 unsigned long flags;
525 u32 ctrl;
526
527 dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
528
529 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
530 if (!ch->allocated)
531 continue;
532
533 ctrl = readl(ch->base + REG_OFF_CTRL);
534
535 dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
536 __func__, ch, ch->base, ctrl);
537
538 if (ctrl & APB_DMA_FIN_INT_STS) {
539 ctrl &= ~APB_DMA_FIN_INT_STS;
540 if (ch->desc) {
541 spin_lock_irqsave(&ch->vc.lock, flags);
542 if (++ch->sgidx < ch->desc->sglen) {
543 moxart_dma_start_sg(ch, ch->sgidx);
544 } else {
545 vchan_cookie_complete(&ch->desc->vd);
546 moxart_dma_start_desc(&ch->vc.chan);
547 }
548 spin_unlock_irqrestore(&ch->vc.lock, flags);
549 }
550 }
551
552 if (ctrl & APB_DMA_ERR_INT_STS) {
553 ctrl &= ~APB_DMA_ERR_INT_STS;
554 ch->error = 1;
555 }
556
557 writel(ctrl, ch->base + REG_OFF_CTRL);
558 }
559
560 return IRQ_HANDLED;
561}
562
563static int moxart_probe(struct platform_device *pdev)
564{
565 struct device *dev = &pdev->dev;
566 struct device_node *node = dev->of_node;
567 struct resource *res;
568 static void __iomem *dma_base_addr;
569 int ret, i;
570 unsigned int irq;
571 struct moxart_chan *ch;
572 struct moxart_dmadev *mdc;
573
574 mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
575 if (!mdc) {
576 dev_err(dev, "can't allocate DMA container\n");
577 return -ENOMEM;
578 }
579
580 irq = irq_of_parse_and_map(node, 0);
581 if (irq == NO_IRQ) {
582 dev_err(dev, "no IRQ resource\n");
583 return -EINVAL;
584 }
585
586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
587 dma_base_addr = devm_ioremap_resource(dev, res);
588 if (IS_ERR(dma_base_addr))
589 return PTR_ERR(dma_base_addr);
590
591 dma_cap_zero(mdc->dma_slave.cap_mask);
592 dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
593 dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
594
595 moxart_dma_init(&mdc->dma_slave, dev);
596
597 ch = &mdc->slave_chans[0];
598 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
599 ch->ch_num = i;
600 ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
601 ch->allocated = 0;
602
603 ch->vc.desc_free = moxart_dma_desc_free;
604 vchan_init(&ch->vc, &mdc->dma_slave);
605
606 dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
607 __func__, i, ch->ch_num, ch->base);
608 }
609
610 platform_set_drvdata(pdev, mdc);
611
612 ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
613 "moxart-dma-engine", mdc);
614 if (ret) {
615 dev_err(dev, "devm_request_irq failed\n");
616 return ret;
617 }
618
619 ret = dma_async_device_register(&mdc->dma_slave);
620 if (ret) {
621 dev_err(dev, "dma_async_device_register failed\n");
622 return ret;
623 }
624
625 ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
626 if (ret) {
627 dev_err(dev, "of_dma_controller_register failed\n");
628 dma_async_device_unregister(&mdc->dma_slave);
629 return ret;
630 }
631
632 dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
633
634 return 0;
635}
636
637static int moxart_remove(struct platform_device *pdev)
638{
639 struct moxart_dmadev *m = platform_get_drvdata(pdev);
640
641 dma_async_device_unregister(&m->dma_slave);
642
643 if (pdev->dev.of_node)
644 of_dma_controller_free(pdev->dev.of_node);
645
646 return 0;
647}
648
649static const struct of_device_id moxart_dma_match[] = {
650 { .compatible = "moxa,moxart-dma" },
651 { }
652};
653
654static struct platform_driver moxart_driver = {
655 .probe = moxart_probe,
656 .remove = moxart_remove,
657 .driver = {
658 .name = "moxart-dma-engine",
Jonas Jensen5f9e6852014-01-17 09:46:05 +0100659 .of_match_table = moxart_dma_match,
660 },
661};
662
663static int moxart_init(void)
664{
665 return platform_driver_register(&moxart_driver);
666}
667subsys_initcall(moxart_init);
668
669static void __exit moxart_exit(void)
670{
671 platform_driver_unregister(&moxart_driver);
672}
673module_exit(moxart_exit);
674
675MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
676MODULE_DESCRIPTION("MOXART DMA engine driver");
677MODULE_LICENSE("GPL v2");