blob: 2e1662777661ca59ec652860ca2a2fccb5596266 [file] [log] [blame]
Russell King7bedaa52012-04-13 12:10:24 +01001/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/err.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/list.h>
14#include <linux/module.h>
15#include <linux/omap-dma.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "virt-dma.h"
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -070021
22#include <plat/cpu.h>
Russell King7bedaa52012-04-13 12:10:24 +010023#include <plat/dma.h>
24
25struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
30};
31
32struct omap_chan {
33 struct virt_dma_chan vc;
34 struct list_head node;
35
36 struct dma_slave_config cfg;
37 unsigned dma_sig;
Russell King3a774ea2012-06-21 10:40:15 +010038 bool cyclic;
Russell King7bedaa52012-04-13 12:10:24 +010039
40 int dma_ch;
41 struct omap_desc *desc;
42 unsigned sgidx;
43};
44
45struct omap_sg {
46 dma_addr_t addr;
47 uint32_t en; /* number of elements (24-bit) */
48 uint32_t fn; /* number of frames (16-bit) */
49};
50
51struct omap_desc {
52 struct virt_dma_desc vd;
53 enum dma_transfer_direction dir;
54 dma_addr_t dev_addr;
55
Russell King7c836bc2012-06-18 16:45:19 +010056 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
Russell King7bedaa52012-04-13 12:10:24 +010057 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
58 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
59 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
60 uint8_t periph_port; /* Peripheral port */
61
62 unsigned sglen;
63 struct omap_sg sg[0];
64};
65
66static const unsigned es_bytes[] = {
67 [OMAP_DMA_DATA_TYPE_S8] = 1,
68 [OMAP_DMA_DATA_TYPE_S16] = 2,
69 [OMAP_DMA_DATA_TYPE_S32] = 4,
70};
71
72static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
73{
74 return container_of(d, struct omap_dmadev, ddev);
75}
76
77static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
78{
79 return container_of(c, struct omap_chan, vc.chan);
80}
81
82static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
83{
84 return container_of(t, struct omap_desc, vd.tx);
85}
86
87static void omap_dma_desc_free(struct virt_dma_desc *vd)
88{
89 kfree(container_of(vd, struct omap_desc, vd));
90}
91
92static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
93 unsigned idx)
94{
95 struct omap_sg *sg = d->sg + idx;
96
97 if (d->dir == DMA_DEV_TO_MEM)
98 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
99 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
100 else
101 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
102 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
103
104 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
105 d->sync_mode, c->dma_sig, d->sync_type);
106
107 omap_start_dma(c->dma_ch);
108}
109
110static void omap_dma_start_desc(struct omap_chan *c)
111{
112 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
113 struct omap_desc *d;
114
115 if (!vd) {
116 c->desc = NULL;
117 return;
118 }
119
120 list_del(&vd->node);
121
122 c->desc = d = to_omap_dma_desc(&vd->tx);
123 c->sgidx = 0;
124
125 if (d->dir == DMA_DEV_TO_MEM)
126 omap_set_dma_src_params(c->dma_ch, d->periph_port,
Russell King7c836bc2012-06-18 16:45:19 +0100127 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
Russell King7bedaa52012-04-13 12:10:24 +0100128 else
129 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
Russell King7c836bc2012-06-18 16:45:19 +0100130 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
Russell King7bedaa52012-04-13 12:10:24 +0100131
132 omap_dma_start_sg(c, d, 0);
133}
134
135static void omap_dma_callback(int ch, u16 status, void *data)
136{
137 struct omap_chan *c = data;
138 struct omap_desc *d;
139 unsigned long flags;
140
141 spin_lock_irqsave(&c->vc.lock, flags);
142 d = c->desc;
143 if (d) {
Russell King3a774ea2012-06-21 10:40:15 +0100144 if (!c->cyclic) {
145 if (++c->sgidx < d->sglen) {
146 omap_dma_start_sg(c, d, c->sgidx);
147 } else {
148 omap_dma_start_desc(c);
149 vchan_cookie_complete(&d->vd);
150 }
Russell King7bedaa52012-04-13 12:10:24 +0100151 } else {
Russell King3a774ea2012-06-21 10:40:15 +0100152 vchan_cyclic_callback(&d->vd);
Russell King7bedaa52012-04-13 12:10:24 +0100153 }
154 }
155 spin_unlock_irqrestore(&c->vc.lock, flags);
156}
157
158/*
159 * This callback schedules all pending channels. We could be more
160 * clever here by postponing allocation of the real DMA channels to
161 * this point, and freeing them when our virtual channel becomes idle.
162 *
163 * We would then need to deal with 'all channels in-use'
164 */
165static void omap_dma_sched(unsigned long data)
166{
167 struct omap_dmadev *d = (struct omap_dmadev *)data;
168 LIST_HEAD(head);
169
170 spin_lock_irq(&d->lock);
171 list_splice_tail_init(&d->pending, &head);
172 spin_unlock_irq(&d->lock);
173
174 while (!list_empty(&head)) {
175 struct omap_chan *c = list_first_entry(&head,
176 struct omap_chan, node);
177
178 spin_lock_irq(&c->vc.lock);
179 list_del_init(&c->node);
180 omap_dma_start_desc(c);
181 spin_unlock_irq(&c->vc.lock);
182 }
183}
184
185static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
186{
187 struct omap_chan *c = to_omap_dma_chan(chan);
188
189 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
190
191 return omap_request_dma(c->dma_sig, "DMA engine",
192 omap_dma_callback, c, &c->dma_ch);
193}
194
195static void omap_dma_free_chan_resources(struct dma_chan *chan)
196{
197 struct omap_chan *c = to_omap_dma_chan(chan);
198
199 vchan_free_chan_resources(&c->vc);
200 omap_free_dma(c->dma_ch);
201
202 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
203}
204
Russell King3850e222012-06-21 10:37:35 +0100205static size_t omap_dma_sg_size(struct omap_sg *sg)
206{
207 return sg->en * sg->fn;
208}
209
210static size_t omap_dma_desc_size(struct omap_desc *d)
211{
212 unsigned i;
213 size_t size;
214
215 for (size = i = 0; i < d->sglen; i++)
216 size += omap_dma_sg_size(&d->sg[i]);
217
218 return size * es_bytes[d->es];
219}
220
221static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
222{
223 unsigned i;
224 size_t size, es_size = es_bytes[d->es];
225
226 for (size = i = 0; i < d->sglen; i++) {
227 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
228
229 if (size)
230 size += this_size;
231 else if (addr >= d->sg[i].addr &&
232 addr < d->sg[i].addr + this_size)
233 size += d->sg[i].addr + this_size - addr;
234 }
235 return size;
236}
237
Russell King7bedaa52012-04-13 12:10:24 +0100238static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
239 dma_cookie_t cookie, struct dma_tx_state *txstate)
240{
Russell King3850e222012-06-21 10:37:35 +0100241 struct omap_chan *c = to_omap_dma_chan(chan);
242 struct virt_dma_desc *vd;
243 enum dma_status ret;
244 unsigned long flags;
245
246 ret = dma_cookie_status(chan, cookie, txstate);
247 if (ret == DMA_SUCCESS || !txstate)
248 return ret;
249
250 spin_lock_irqsave(&c->vc.lock, flags);
251 vd = vchan_find_desc(&c->vc, cookie);
252 if (vd) {
253 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
254 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
255 struct omap_desc *d = c->desc;
256 dma_addr_t pos;
257
258 if (d->dir == DMA_MEM_TO_DEV)
259 pos = omap_get_dma_src_pos(c->dma_ch);
260 else if (d->dir == DMA_DEV_TO_MEM)
261 pos = omap_get_dma_dst_pos(c->dma_ch);
262 else
263 pos = 0;
264
265 txstate->residue = omap_dma_desc_size_pos(d, pos);
266 } else {
267 txstate->residue = 0;
268 }
269 spin_unlock_irqrestore(&c->vc.lock, flags);
270
271 return ret;
Russell King7bedaa52012-04-13 12:10:24 +0100272}
273
274static void omap_dma_issue_pending(struct dma_chan *chan)
275{
276 struct omap_chan *c = to_omap_dma_chan(chan);
277 unsigned long flags;
278
279 spin_lock_irqsave(&c->vc.lock, flags);
280 if (vchan_issue_pending(&c->vc) && !c->desc) {
281 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
282 spin_lock(&d->lock);
283 if (list_empty(&c->node))
284 list_add_tail(&c->node, &d->pending);
285 spin_unlock(&d->lock);
286 tasklet_schedule(&d->task);
287 }
288 spin_unlock_irqrestore(&c->vc.lock, flags);
289}
290
291static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
292 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
293 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
294{
295 struct omap_chan *c = to_omap_dma_chan(chan);
296 enum dma_slave_buswidth dev_width;
297 struct scatterlist *sgent;
298 struct omap_desc *d;
299 dma_addr_t dev_addr;
300 unsigned i, j = 0, es, en, frame_bytes, sync_type;
301 u32 burst;
302
303 if (dir == DMA_DEV_TO_MEM) {
304 dev_addr = c->cfg.src_addr;
305 dev_width = c->cfg.src_addr_width;
306 burst = c->cfg.src_maxburst;
307 sync_type = OMAP_DMA_SRC_SYNC;
308 } else if (dir == DMA_MEM_TO_DEV) {
309 dev_addr = c->cfg.dst_addr;
310 dev_width = c->cfg.dst_addr_width;
311 burst = c->cfg.dst_maxburst;
312 sync_type = OMAP_DMA_DST_SYNC;
313 } else {
314 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
315 return NULL;
316 }
317
318 /* Bus width translates to the element size (ES) */
319 switch (dev_width) {
320 case DMA_SLAVE_BUSWIDTH_1_BYTE:
321 es = OMAP_DMA_DATA_TYPE_S8;
322 break;
323 case DMA_SLAVE_BUSWIDTH_2_BYTES:
324 es = OMAP_DMA_DATA_TYPE_S16;
325 break;
326 case DMA_SLAVE_BUSWIDTH_4_BYTES:
327 es = OMAP_DMA_DATA_TYPE_S32;
328 break;
329 default: /* not reached */
330 return NULL;
331 }
332
333 /* Now allocate and setup the descriptor. */
334 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
335 if (!d)
336 return NULL;
337
338 d->dir = dir;
339 d->dev_addr = dev_addr;
340 d->es = es;
341 d->sync_mode = OMAP_DMA_SYNC_FRAME;
342 d->sync_type = sync_type;
343 d->periph_port = OMAP_DMA_PORT_TIPB;
344
345 /*
346 * Build our scatterlist entries: each contains the address,
347 * the number of elements (EN) in each frame, and the number of
348 * frames (FN). Number of bytes for this entry = ES * EN * FN.
349 *
350 * Burst size translates to number of elements with frame sync.
351 * Note: DMA engine defines burst to be the number of dev-width
352 * transfers.
353 */
354 en = burst;
355 frame_bytes = es_bytes[es] * en;
356 for_each_sg(sgl, sgent, sglen, i) {
357 d->sg[j].addr = sg_dma_address(sgent);
358 d->sg[j].en = en;
359 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
360 j++;
361 }
362
363 d->sglen = j;
364
365 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
366}
367
Russell King3a774ea2012-06-21 10:40:15 +0100368static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
369 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
370 size_t period_len, enum dma_transfer_direction dir, void *context)
371{
372 struct omap_chan *c = to_omap_dma_chan(chan);
373 enum dma_slave_buswidth dev_width;
374 struct omap_desc *d;
375 dma_addr_t dev_addr;
376 unsigned es, sync_type;
377 u32 burst;
378
379 if (dir == DMA_DEV_TO_MEM) {
380 dev_addr = c->cfg.src_addr;
381 dev_width = c->cfg.src_addr_width;
382 burst = c->cfg.src_maxburst;
383 sync_type = OMAP_DMA_SRC_SYNC;
384 } else if (dir == DMA_MEM_TO_DEV) {
385 dev_addr = c->cfg.dst_addr;
386 dev_width = c->cfg.dst_addr_width;
387 burst = c->cfg.dst_maxburst;
388 sync_type = OMAP_DMA_DST_SYNC;
389 } else {
390 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
391 return NULL;
392 }
393
394 /* Bus width translates to the element size (ES) */
395 switch (dev_width) {
396 case DMA_SLAVE_BUSWIDTH_1_BYTE:
397 es = OMAP_DMA_DATA_TYPE_S8;
398 break;
399 case DMA_SLAVE_BUSWIDTH_2_BYTES:
400 es = OMAP_DMA_DATA_TYPE_S16;
401 break;
402 case DMA_SLAVE_BUSWIDTH_4_BYTES:
403 es = OMAP_DMA_DATA_TYPE_S32;
404 break;
405 default: /* not reached */
406 return NULL;
407 }
408
409 /* Now allocate and setup the descriptor. */
410 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
411 if (!d)
412 return NULL;
413
414 d->dir = dir;
415 d->dev_addr = dev_addr;
416 d->fi = burst;
417 d->es = es;
418 d->sync_mode = OMAP_DMA_SYNC_PACKET;
419 d->sync_type = sync_type;
420 d->periph_port = OMAP_DMA_PORT_MPUI;
421 d->sg[0].addr = buf_addr;
422 d->sg[0].en = period_len / es_bytes[es];
423 d->sg[0].fn = buf_len / period_len;
424 d->sglen = 1;
425
426 if (!c->cyclic) {
427 c->cyclic = true;
428 omap_dma_link_lch(c->dma_ch, c->dma_ch);
429 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
430 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
431 }
432
433 if (!cpu_class_is_omap1()) {
434 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
435 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
436 }
437
438 return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
439}
440
Russell King7bedaa52012-04-13 12:10:24 +0100441static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
442{
443 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
444 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
445 return -EINVAL;
446
447 memcpy(&c->cfg, cfg, sizeof(c->cfg));
448
449 return 0;
450}
451
452static int omap_dma_terminate_all(struct omap_chan *c)
453{
454 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
455 unsigned long flags;
456 LIST_HEAD(head);
457
458 spin_lock_irqsave(&c->vc.lock, flags);
459
460 /* Prevent this channel being scheduled */
461 spin_lock(&d->lock);
462 list_del_init(&c->node);
463 spin_unlock(&d->lock);
464
465 /*
466 * Stop DMA activity: we assume the callback will not be called
467 * after omap_stop_dma() returns (even if it does, it will see
468 * c->desc is NULL and exit.)
469 */
470 if (c->desc) {
471 c->desc = NULL;
472 omap_stop_dma(c->dma_ch);
473 }
474
Russell King3a774ea2012-06-21 10:40:15 +0100475 if (c->cyclic) {
476 c->cyclic = false;
477 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
478 }
479
Russell King7bedaa52012-04-13 12:10:24 +0100480 vchan_get_all_descriptors(&c->vc, &head);
481 spin_unlock_irqrestore(&c->vc.lock, flags);
482 vchan_dma_desc_free_list(&c->vc, &head);
483
484 return 0;
485}
486
487static int omap_dma_pause(struct omap_chan *c)
488{
489 /* FIXME: not supported by platform private API */
490 return -EINVAL;
491}
492
493static int omap_dma_resume(struct omap_chan *c)
494{
495 /* FIXME: not supported by platform private API */
496 return -EINVAL;
497}
498
499static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
500 unsigned long arg)
501{
502 struct omap_chan *c = to_omap_dma_chan(chan);
503 int ret;
504
505 switch (cmd) {
506 case DMA_SLAVE_CONFIG:
507 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
508 break;
509
510 case DMA_TERMINATE_ALL:
511 ret = omap_dma_terminate_all(c);
512 break;
513
514 case DMA_PAUSE:
515 ret = omap_dma_pause(c);
516 break;
517
518 case DMA_RESUME:
519 ret = omap_dma_resume(c);
520 break;
521
522 default:
523 ret = -ENXIO;
524 break;
525 }
526
527 return ret;
528}
529
530static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
531{
532 struct omap_chan *c;
533
534 c = kzalloc(sizeof(*c), GFP_KERNEL);
535 if (!c)
536 return -ENOMEM;
537
538 c->dma_sig = dma_sig;
539 c->vc.desc_free = omap_dma_desc_free;
540 vchan_init(&c->vc, &od->ddev);
541 INIT_LIST_HEAD(&c->node);
542
543 od->ddev.chancnt++;
544
545 return 0;
546}
547
548static void omap_dma_free(struct omap_dmadev *od)
549{
550 tasklet_kill(&od->task);
551 while (!list_empty(&od->ddev.channels)) {
552 struct omap_chan *c = list_first_entry(&od->ddev.channels,
553 struct omap_chan, vc.chan.device_node);
554
555 list_del(&c->vc.chan.device_node);
556 tasklet_kill(&c->vc.task);
557 kfree(c);
558 }
559 kfree(od);
560}
561
562static int omap_dma_probe(struct platform_device *pdev)
563{
564 struct omap_dmadev *od;
565 int rc, i;
566
567 od = kzalloc(sizeof(*od), GFP_KERNEL);
568 if (!od)
569 return -ENOMEM;
570
571 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
Russell King3a774ea2012-06-21 10:40:15 +0100572 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
Russell King7bedaa52012-04-13 12:10:24 +0100573 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
574 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
575 od->ddev.device_tx_status = omap_dma_tx_status;
576 od->ddev.device_issue_pending = omap_dma_issue_pending;
577 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
Russell King3a774ea2012-06-21 10:40:15 +0100578 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
Russell King7bedaa52012-04-13 12:10:24 +0100579 od->ddev.device_control = omap_dma_control;
580 od->ddev.dev = &pdev->dev;
581 INIT_LIST_HEAD(&od->ddev.channels);
582 INIT_LIST_HEAD(&od->pending);
583 spin_lock_init(&od->lock);
584
585 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
586
587 for (i = 0; i < 127; i++) {
588 rc = omap_dma_chan_init(od, i);
589 if (rc) {
590 omap_dma_free(od);
591 return rc;
592 }
593 }
594
595 rc = dma_async_device_register(&od->ddev);
596 if (rc) {
597 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
598 rc);
599 omap_dma_free(od);
600 } else {
601 platform_set_drvdata(pdev, od);
602 }
603
604 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
605
606 return rc;
607}
608
609static int omap_dma_remove(struct platform_device *pdev)
610{
611 struct omap_dmadev *od = platform_get_drvdata(pdev);
612
613 dma_async_device_unregister(&od->ddev);
614 omap_dma_free(od);
615
616 return 0;
617}
618
619static struct platform_driver omap_dma_driver = {
620 .probe = omap_dma_probe,
621 .remove = omap_dma_remove,
622 .driver = {
623 .name = "omap-dma-engine",
624 .owner = THIS_MODULE,
625 },
626};
627
628bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
629{
630 if (chan->device->dev->driver == &omap_dma_driver.driver) {
631 struct omap_chan *c = to_omap_dma_chan(chan);
632 unsigned req = *(unsigned *)param;
633
634 return req == c->dma_sig;
635 }
636 return false;
637}
638EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
639
640static struct platform_device *pdev;
641
642static const struct platform_device_info omap_dma_dev_info = {
643 .name = "omap-dma-engine",
644 .id = -1,
645 .dma_mask = DMA_BIT_MASK(32),
646};
647
648static int omap_dma_init(void)
649{
650 int rc = platform_driver_register(&omap_dma_driver);
651
652 if (rc == 0) {
653 pdev = platform_device_register_full(&omap_dma_dev_info);
654 if (IS_ERR(pdev)) {
655 platform_driver_unregister(&omap_dma_driver);
656 rc = PTR_ERR(pdev);
657 }
658 }
659 return rc;
660}
661subsys_initcall(omap_dma_init);
662
663static void __exit omap_dma_exit(void)
664{
665 platform_device_unregister(pdev);
666 platform_driver_unregister(&omap_dma_driver);
667}
668module_exit(omap_dma_exit);
669
670MODULE_AUTHOR("Russell King");
671MODULE_LICENSE("GPL");