blob: 59943ec1e74a8aece4ba31721dca96fd787620fa [file] [log] [blame]
Jassi Brarb3040e42010-05-23 20:28:19 -07001/* linux/drivers/dma/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h>
Boojin Kima2f52032011-09-02 09:44:29 +090020#include <linux/pm_runtime.h>
Boojin Kim1b9bb712011-09-02 09:44:30 +090021#include <linux/scatterlist.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070022
23#define NR_DEFAULT_DESC 16
24
25enum desc_status {
26 /* In the DMAC pool */
27 FREE,
28 /*
29 * Allocted to some channel during prep_xxx
30 * Also may be sitting on the work_list.
31 */
32 PREP,
33 /*
34 * Sitting on the work_list and already submitted
35 * to the PL330 core. Not more than two descriptors
36 * of a channel can be BUSY at any time.
37 */
38 BUSY,
39 /*
40 * Sitting on the channel work_list but xfer done
41 * by PL330 core
42 */
43 DONE,
44};
45
46struct dma_pl330_chan {
47 /* Schedule desc completion */
48 struct tasklet_struct task;
49
50 /* DMA-Engine Channel */
51 struct dma_chan chan;
52
53 /* Last completed cookie */
54 dma_cookie_t completed;
55
56 /* List of to be xfered descriptors */
57 struct list_head work_list;
58
59 /* Pointer to the DMAC that manages this channel,
60 * NULL if the channel is available to be acquired.
61 * As the parent, this DMAC also provides descriptors
62 * to the channel.
63 */
64 struct dma_pl330_dmac *dmac;
65
66 /* To protect channel manipulation */
67 spinlock_t lock;
68
69 /* Token of a hardware channel thread of PL330 DMAC
70 * NULL if the channel is available to be acquired.
71 */
72 void *pl330_chid;
Boojin Kim1b9bb712011-09-02 09:44:30 +090073
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
Boojin Kim1d0c1d62011-09-02 09:44:31 +090076 int burst_len; /* the number of burst */
Boojin Kim1b9bb712011-09-02 09:44:30 +090077 dma_addr_t fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -070078};
79
80struct dma_pl330_dmac {
81 struct pl330_info pif;
82
83 /* DMA-Engine Device */
84 struct dma_device ddma;
85
86 /* Pool of descriptors available for the DMAC's channels */
87 struct list_head desc_pool;
88 /* To protect desc_pool manipulation */
89 spinlock_t pool_lock;
90
91 /* Peripheral channels connected to this DMAC */
Rob Herring4e0e6102011-07-25 16:05:04 -050092 struct dma_pl330_chan *peripherals; /* keep at end */
Boojin Kima2f52032011-09-02 09:44:29 +090093
94 struct clk *clk;
Jassi Brarb3040e42010-05-23 20:28:19 -070095};
96
97struct dma_pl330_desc {
98 /* To attach to a queue as child */
99 struct list_head node;
100
101 /* Descriptor for the DMA Engine API */
102 struct dma_async_tx_descriptor txd;
103
104 /* Xfer for PL330 core */
105 struct pl330_xfer px;
106
107 struct pl330_reqcfg rqcfg;
108 struct pl330_req req;
109
110 enum desc_status status;
111
112 /* The channel which currently holds this desc */
113 struct dma_pl330_chan *pchan;
114};
115
116static inline struct dma_pl330_chan *
117to_pchan(struct dma_chan *ch)
118{
119 if (!ch)
120 return NULL;
121
122 return container_of(ch, struct dma_pl330_chan, chan);
123}
124
125static inline struct dma_pl330_desc *
126to_desc(struct dma_async_tx_descriptor *tx)
127{
128 return container_of(tx, struct dma_pl330_desc, txd);
129}
130
131static inline void free_desc_list(struct list_head *list)
132{
133 struct dma_pl330_dmac *pdmac;
134 struct dma_pl330_desc *desc;
135 struct dma_pl330_chan *pch;
136 unsigned long flags;
137
138 if (list_empty(list))
139 return;
140
141 /* Finish off the work list */
142 list_for_each_entry(desc, list, node) {
143 dma_async_tx_callback callback;
144 void *param;
145
146 /* All desc in a list belong to same channel */
147 pch = desc->pchan;
148 callback = desc->txd.callback;
149 param = desc->txd.callback_param;
150
151 if (callback)
152 callback(param);
153
154 desc->pchan = NULL;
155 }
156
157 pdmac = pch->dmac;
158
159 spin_lock_irqsave(&pdmac->pool_lock, flags);
160 list_splice_tail_init(list, &pdmac->desc_pool);
161 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
162}
163
164static inline void fill_queue(struct dma_pl330_chan *pch)
165{
166 struct dma_pl330_desc *desc;
167 int ret;
168
169 list_for_each_entry(desc, &pch->work_list, node) {
170
171 /* If already submitted */
172 if (desc->status == BUSY)
173 break;
174
175 ret = pl330_submit_req(pch->pl330_chid,
176 &desc->req);
177 if (!ret) {
178 desc->status = BUSY;
179 break;
180 } else if (ret == -EAGAIN) {
181 /* QFull or DMAC Dying */
182 break;
183 } else {
184 /* Unacceptable request */
185 desc->status = DONE;
186 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
187 __func__, __LINE__, desc->txd.cookie);
188 tasklet_schedule(&pch->task);
189 }
190 }
191}
192
193static void pl330_tasklet(unsigned long data)
194{
195 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
196 struct dma_pl330_desc *desc, *_dt;
197 unsigned long flags;
198 LIST_HEAD(list);
199
200 spin_lock_irqsave(&pch->lock, flags);
201
202 /* Pick up ripe tomatoes */
203 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
204 if (desc->status == DONE) {
205 pch->completed = desc->txd.cookie;
206 list_move_tail(&desc->node, &list);
207 }
208
209 /* Try to submit a req imm. next to the last completed cookie */
210 fill_queue(pch);
211
212 /* Make sure the PL330 Channel thread is active */
213 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
214
215 spin_unlock_irqrestore(&pch->lock, flags);
216
217 free_desc_list(&list);
218}
219
220static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
221{
222 struct dma_pl330_desc *desc = token;
223 struct dma_pl330_chan *pch = desc->pchan;
224 unsigned long flags;
225
226 /* If desc aborted */
227 if (!pch)
228 return;
229
230 spin_lock_irqsave(&pch->lock, flags);
231
232 desc->status = DONE;
233
234 spin_unlock_irqrestore(&pch->lock, flags);
235
236 tasklet_schedule(&pch->task);
237}
238
239static int pl330_alloc_chan_resources(struct dma_chan *chan)
240{
241 struct dma_pl330_chan *pch = to_pchan(chan);
242 struct dma_pl330_dmac *pdmac = pch->dmac;
243 unsigned long flags;
244
245 spin_lock_irqsave(&pch->lock, flags);
246
247 pch->completed = chan->cookie = 1;
248
249 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
250 if (!pch->pl330_chid) {
251 spin_unlock_irqrestore(&pch->lock, flags);
252 return 0;
253 }
254
255 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
256
257 spin_unlock_irqrestore(&pch->lock, flags);
258
259 return 1;
260}
261
262static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
263{
264 struct dma_pl330_chan *pch = to_pchan(chan);
Boojin Kimae43b882011-09-02 09:44:32 +0900265 struct dma_pl330_desc *desc, *_dt;
Jassi Brarb3040e42010-05-23 20:28:19 -0700266 unsigned long flags;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900267 struct dma_pl330_dmac *pdmac = pch->dmac;
268 struct dma_slave_config *slave_config;
Boojin Kimae43b882011-09-02 09:44:32 +0900269 LIST_HEAD(list);
Jassi Brarb3040e42010-05-23 20:28:19 -0700270
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900271 switch (cmd) {
272 case DMA_TERMINATE_ALL:
273 spin_lock_irqsave(&pch->lock, flags);
274
275 /* FLUSH the PL330 Channel thread */
276 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
277
278 /* Mark all desc done */
Boojin Kimae43b882011-09-02 09:44:32 +0900279 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900280 desc->status = DONE;
Boojin Kimae43b882011-09-02 09:44:32 +0900281 pch->completed = desc->txd.cookie;
282 list_move_tail(&desc->node, &list);
283 }
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900284
Boojin Kimae43b882011-09-02 09:44:32 +0900285 list_splice_tail_init(&list, &pdmac->desc_pool);
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900286 spin_unlock_irqrestore(&pch->lock, flags);
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900287 break;
288 case DMA_SLAVE_CONFIG:
289 slave_config = (struct dma_slave_config *)arg;
290
291 if (slave_config->direction == DMA_TO_DEVICE) {
292 if (slave_config->dst_addr)
293 pch->fifo_addr = slave_config->dst_addr;
294 if (slave_config->dst_addr_width)
295 pch->burst_sz = __ffs(slave_config->dst_addr_width);
296 if (slave_config->dst_maxburst)
297 pch->burst_len = slave_config->dst_maxburst;
298 } else if (slave_config->direction == DMA_FROM_DEVICE) {
299 if (slave_config->src_addr)
300 pch->fifo_addr = slave_config->src_addr;
301 if (slave_config->src_addr_width)
302 pch->burst_sz = __ffs(slave_config->src_addr_width);
303 if (slave_config->src_maxburst)
304 pch->burst_len = slave_config->src_maxburst;
305 }
306 break;
307 default:
308 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
Jassi Brarb3040e42010-05-23 20:28:19 -0700309 return -ENXIO;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900310 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700311
312 return 0;
313}
314
315static void pl330_free_chan_resources(struct dma_chan *chan)
316{
317 struct dma_pl330_chan *pch = to_pchan(chan);
318 unsigned long flags;
319
320 spin_lock_irqsave(&pch->lock, flags);
321
322 tasklet_kill(&pch->task);
323
324 pl330_release_channel(pch->pl330_chid);
325 pch->pl330_chid = NULL;
326
327 spin_unlock_irqrestore(&pch->lock, flags);
328}
329
330static enum dma_status
331pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
332 struct dma_tx_state *txstate)
333{
334 struct dma_pl330_chan *pch = to_pchan(chan);
335 dma_cookie_t last_done, last_used;
336 int ret;
337
338 last_done = pch->completed;
339 last_used = chan->cookie;
340
341 ret = dma_async_is_complete(cookie, last_done, last_used);
342
343 dma_set_tx_state(txstate, last_done, last_used, 0);
344
345 return ret;
346}
347
348static void pl330_issue_pending(struct dma_chan *chan)
349{
350 pl330_tasklet((unsigned long) to_pchan(chan));
351}
352
353/*
354 * We returned the last one of the circular list of descriptor(s)
355 * from prep_xxx, so the argument to submit corresponds to the last
356 * descriptor of the list.
357 */
358static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
359{
360 struct dma_pl330_desc *desc, *last = to_desc(tx);
361 struct dma_pl330_chan *pch = to_pchan(tx->chan);
362 dma_cookie_t cookie;
363 unsigned long flags;
364
365 spin_lock_irqsave(&pch->lock, flags);
366
367 /* Assign cookies to all nodes */
368 cookie = tx->chan->cookie;
369
370 while (!list_empty(&last->node)) {
371 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
372
373 if (++cookie < 0)
374 cookie = 1;
375 desc->txd.cookie = cookie;
376
377 list_move_tail(&desc->node, &pch->work_list);
378 }
379
380 if (++cookie < 0)
381 cookie = 1;
382 last->txd.cookie = cookie;
383
384 list_add_tail(&last->node, &pch->work_list);
385
386 tx->chan->cookie = cookie;
387
388 spin_unlock_irqrestore(&pch->lock, flags);
389
390 return cookie;
391}
392
393static inline void _init_desc(struct dma_pl330_desc *desc)
394{
395 desc->pchan = NULL;
396 desc->req.x = &desc->px;
397 desc->req.token = desc;
398 desc->rqcfg.swap = SWAP_NO;
399 desc->rqcfg.privileged = 0;
400 desc->rqcfg.insnaccess = 0;
401 desc->rqcfg.scctl = SCCTRL0;
402 desc->rqcfg.dcctl = DCCTRL0;
403 desc->req.cfg = &desc->rqcfg;
404 desc->req.xfer_cb = dma_pl330_rqcb;
405 desc->txd.tx_submit = pl330_tx_submit;
406
407 INIT_LIST_HEAD(&desc->node);
408}
409
410/* Returns the number of descriptors added to the DMAC pool */
411int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
412{
413 struct dma_pl330_desc *desc;
414 unsigned long flags;
415 int i;
416
417 if (!pdmac)
418 return 0;
419
420 desc = kmalloc(count * sizeof(*desc), flg);
421 if (!desc)
422 return 0;
423
424 spin_lock_irqsave(&pdmac->pool_lock, flags);
425
426 for (i = 0; i < count; i++) {
427 _init_desc(&desc[i]);
428 list_add_tail(&desc[i].node, &pdmac->desc_pool);
429 }
430
431 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
432
433 return count;
434}
435
436static struct dma_pl330_desc *
437pluck_desc(struct dma_pl330_dmac *pdmac)
438{
439 struct dma_pl330_desc *desc = NULL;
440 unsigned long flags;
441
442 if (!pdmac)
443 return NULL;
444
445 spin_lock_irqsave(&pdmac->pool_lock, flags);
446
447 if (!list_empty(&pdmac->desc_pool)) {
448 desc = list_entry(pdmac->desc_pool.next,
449 struct dma_pl330_desc, node);
450
451 list_del_init(&desc->node);
452
453 desc->status = PREP;
454 desc->txd.callback = NULL;
455 }
456
457 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
458
459 return desc;
460}
461
462static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
463{
464 struct dma_pl330_dmac *pdmac = pch->dmac;
465 struct dma_pl330_peri *peri = pch->chan.private;
466 struct dma_pl330_desc *desc;
467
468 /* Pluck one desc from the pool of DMAC */
469 desc = pluck_desc(pdmac);
470
471 /* If the DMAC pool is empty, alloc new */
472 if (!desc) {
473 if (!add_desc(pdmac, GFP_ATOMIC, 1))
474 return NULL;
475
476 /* Try again */
477 desc = pluck_desc(pdmac);
478 if (!desc) {
479 dev_err(pch->dmac->pif.dev,
480 "%s:%d ALERT!\n", __func__, __LINE__);
481 return NULL;
482 }
483 }
484
485 /* Initialize the descriptor */
486 desc->pchan = pch;
487 desc->txd.cookie = 0;
488 async_tx_ack(&desc->txd);
489
Rob Herring4e0e6102011-07-25 16:05:04 -0500490 if (peri) {
491 desc->req.rqtype = peri->rqtype;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900492 desc->req.peri = pch->chan.chan_id;
Rob Herring4e0e6102011-07-25 16:05:04 -0500493 } else {
494 desc->req.rqtype = MEMTOMEM;
495 desc->req.peri = 0;
496 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700497
498 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
499
500 return desc;
501}
502
503static inline void fill_px(struct pl330_xfer *px,
504 dma_addr_t dst, dma_addr_t src, size_t len)
505{
506 px->next = NULL;
507 px->bytes = len;
508 px->dst_addr = dst;
509 px->src_addr = src;
510}
511
512static struct dma_pl330_desc *
513__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
514 dma_addr_t src, size_t len)
515{
516 struct dma_pl330_desc *desc = pl330_get_desc(pch);
517
518 if (!desc) {
519 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
520 __func__, __LINE__);
521 return NULL;
522 }
523
524 /*
525 * Ideally we should lookout for reqs bigger than
526 * those that can be programmed with 256 bytes of
527 * MC buffer, but considering a req size is seldom
528 * going to be word-unaligned and more than 200MB,
529 * we take it easy.
530 * Also, should the limit is reached we'd rather
531 * have the platform increase MC buffer size than
532 * complicating this API driver.
533 */
534 fill_px(&desc->px, dst, src, len);
535
536 return desc;
537}
538
539/* Call after fixing burst size */
540static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
541{
542 struct dma_pl330_chan *pch = desc->pchan;
543 struct pl330_info *pi = &pch->dmac->pif;
544 int burst_len;
545
546 burst_len = pi->pcfg.data_bus_width / 8;
547 burst_len *= pi->pcfg.data_buf_dep;
548 burst_len >>= desc->rqcfg.brst_size;
549
550 /* src/dst_burst_len can't be more than 16 */
551 if (burst_len > 16)
552 burst_len = 16;
553
554 while (burst_len > 1) {
555 if (!(len % (burst_len << desc->rqcfg.brst_size)))
556 break;
557 burst_len--;
558 }
559
560 return burst_len;
561}
562
563static struct dma_async_tx_descriptor *
564pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
565 dma_addr_t src, size_t len, unsigned long flags)
566{
567 struct dma_pl330_desc *desc;
568 struct dma_pl330_chan *pch = to_pchan(chan);
569 struct dma_pl330_peri *peri = chan->private;
570 struct pl330_info *pi;
571 int burst;
572
Rob Herring4e0e6102011-07-25 16:05:04 -0500573 if (unlikely(!pch || !len))
Jassi Brarb3040e42010-05-23 20:28:19 -0700574 return NULL;
575
Rob Herring4e0e6102011-07-25 16:05:04 -0500576 if (peri && peri->rqtype != MEMTOMEM)
Jassi Brarb3040e42010-05-23 20:28:19 -0700577 return NULL;
578
579 pi = &pch->dmac->pif;
580
581 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
582 if (!desc)
583 return NULL;
584
585 desc->rqcfg.src_inc = 1;
586 desc->rqcfg.dst_inc = 1;
587
588 /* Select max possible burst size */
589 burst = pi->pcfg.data_bus_width / 8;
590
591 while (burst > 1) {
592 if (!(len % burst))
593 break;
594 burst /= 2;
595 }
596
597 desc->rqcfg.brst_size = 0;
598 while (burst != (1 << desc->rqcfg.brst_size))
599 desc->rqcfg.brst_size++;
600
601 desc->rqcfg.brst_len = get_burst_len(desc, len);
602
603 desc->txd.flags = flags;
604
605 return &desc->txd;
606}
607
608static struct dma_async_tx_descriptor *
609pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
610 unsigned int sg_len, enum dma_data_direction direction,
611 unsigned long flg)
612{
613 struct dma_pl330_desc *first, *desc = NULL;
614 struct dma_pl330_chan *pch = to_pchan(chan);
615 struct dma_pl330_peri *peri = chan->private;
616 struct scatterlist *sg;
617 unsigned long flags;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900618 int i;
Jassi Brarb3040e42010-05-23 20:28:19 -0700619 dma_addr_t addr;
620
Rob Herring4e0e6102011-07-25 16:05:04 -0500621 if (unlikely(!pch || !sgl || !sg_len || !peri))
Jassi Brarb3040e42010-05-23 20:28:19 -0700622 return NULL;
623
624 /* Make sure the direction is consistent */
625 if ((direction == DMA_TO_DEVICE &&
626 peri->rqtype != MEMTODEV) ||
627 (direction == DMA_FROM_DEVICE &&
628 peri->rqtype != DEVTOMEM)) {
629 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
630 __func__, __LINE__);
631 return NULL;
632 }
633
Boojin Kim1b9bb712011-09-02 09:44:30 +0900634 addr = pch->fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -0700635
636 first = NULL;
637
638 for_each_sg(sgl, sg, sg_len, i) {
639
640 desc = pl330_get_desc(pch);
641 if (!desc) {
642 struct dma_pl330_dmac *pdmac = pch->dmac;
643
644 dev_err(pch->dmac->pif.dev,
645 "%s:%d Unable to fetch desc\n",
646 __func__, __LINE__);
647 if (!first)
648 return NULL;
649
650 spin_lock_irqsave(&pdmac->pool_lock, flags);
651
652 while (!list_empty(&first->node)) {
653 desc = list_entry(first->node.next,
654 struct dma_pl330_desc, node);
655 list_move_tail(&desc->node, &pdmac->desc_pool);
656 }
657
658 list_move_tail(&first->node, &pdmac->desc_pool);
659
660 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
661
662 return NULL;
663 }
664
665 if (!first)
666 first = desc;
667 else
668 list_add_tail(&desc->node, &first->node);
669
670 if (direction == DMA_TO_DEVICE) {
671 desc->rqcfg.src_inc = 1;
672 desc->rqcfg.dst_inc = 0;
673 fill_px(&desc->px,
674 addr, sg_dma_address(sg), sg_dma_len(sg));
675 } else {
676 desc->rqcfg.src_inc = 0;
677 desc->rqcfg.dst_inc = 1;
678 fill_px(&desc->px,
679 sg_dma_address(sg), addr, sg_dma_len(sg));
680 }
681
Boojin Kim1b9bb712011-09-02 09:44:30 +0900682 desc->rqcfg.brst_size = pch->burst_sz;
Jassi Brarb3040e42010-05-23 20:28:19 -0700683 desc->rqcfg.brst_len = 1;
684 }
685
686 /* Return the last desc in the chain */
687 desc->txd.flags = flg;
688 return &desc->txd;
689}
690
691static irqreturn_t pl330_irq_handler(int irq, void *data)
692{
693 if (pl330_update(data))
694 return IRQ_HANDLED;
695 else
696 return IRQ_NONE;
697}
698
699static int __devinit
Russell Kingaa25afa2011-02-19 15:55:00 +0000700pl330_probe(struct amba_device *adev, const struct amba_id *id)
Jassi Brarb3040e42010-05-23 20:28:19 -0700701{
702 struct dma_pl330_platdata *pdat;
703 struct dma_pl330_dmac *pdmac;
704 struct dma_pl330_chan *pch;
705 struct pl330_info *pi;
706 struct dma_device *pd;
707 struct resource *res;
708 int i, ret, irq;
Rob Herring4e0e6102011-07-25 16:05:04 -0500709 int num_chan;
Jassi Brarb3040e42010-05-23 20:28:19 -0700710
711 pdat = adev->dev.platform_data;
712
Jassi Brarb3040e42010-05-23 20:28:19 -0700713 /* Allocate a new DMAC and its Channels */
Rob Herring4e0e6102011-07-25 16:05:04 -0500714 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700715 if (!pdmac) {
716 dev_err(&adev->dev, "unable to allocate mem\n");
717 return -ENOMEM;
718 }
719
720 pi = &pdmac->pif;
721 pi->dev = &adev->dev;
722 pi->pl330_data = NULL;
Rob Herring4e0e6102011-07-25 16:05:04 -0500723 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -0700724
725 res = &adev->res;
726 request_mem_region(res->start, resource_size(res), "dma-pl330");
727
728 pi->base = ioremap(res->start, resource_size(res));
729 if (!pi->base) {
730 ret = -ENXIO;
731 goto probe_err1;
732 }
733
Boojin Kima2f52032011-09-02 09:44:29 +0900734 pdmac->clk = clk_get(&adev->dev, "dma");
735 if (IS_ERR(pdmac->clk)) {
736 dev_err(&adev->dev, "Cannot get operation clock.\n");
737 ret = -EINVAL;
738 goto probe_err1;
739 }
740
741 amba_set_drvdata(adev, pdmac);
742
743#ifdef CONFIG_PM_RUNTIME
744 /* to use the runtime PM helper functions */
745 pm_runtime_enable(&adev->dev);
746
747 /* enable the power domain */
748 if (pm_runtime_get_sync(&adev->dev)) {
749 dev_err(&adev->dev, "failed to get runtime pm\n");
750 ret = -ENODEV;
751 goto probe_err1;
752 }
753#else
754 /* enable dma clk */
755 clk_enable(pdmac->clk);
756#endif
757
Jassi Brarb3040e42010-05-23 20:28:19 -0700758 irq = adev->irq[0];
759 ret = request_irq(irq, pl330_irq_handler, 0,
760 dev_name(&adev->dev), pi);
761 if (ret)
762 goto probe_err2;
763
764 ret = pl330_add(pi);
765 if (ret)
766 goto probe_err3;
767
768 INIT_LIST_HEAD(&pdmac->desc_pool);
769 spin_lock_init(&pdmac->pool_lock);
770
771 /* Create a descriptor pool of default size */
772 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
773 dev_warn(&adev->dev, "unable to allocate desc\n");
774
775 pd = &pdmac->ddma;
776 INIT_LIST_HEAD(&pd->channels);
777
778 /* Initialize channel parameters */
Rob Herring4e0e6102011-07-25 16:05:04 -0500779 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
780 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700781
Rob Herring4e0e6102011-07-25 16:05:04 -0500782 for (i = 0; i < num_chan; i++) {
783 pch = &pdmac->peripherals[i];
784 if (pdat) {
785 struct dma_pl330_peri *peri = &pdat->peri[i];
786
787 switch (peri->rqtype) {
788 case MEMTOMEM:
789 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
790 break;
791 case MEMTODEV:
792 case DEVTOMEM:
793 dma_cap_set(DMA_SLAVE, pd->cap_mask);
794 break;
795 default:
796 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
797 continue;
798 }
799 pch->chan.private = peri;
800 } else {
Jassi Brarb3040e42010-05-23 20:28:19 -0700801 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
Rob Herring4e0e6102011-07-25 16:05:04 -0500802 pch->chan.private = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -0700803 }
804
805 INIT_LIST_HEAD(&pch->work_list);
806 spin_lock_init(&pch->lock);
807 pch->pl330_chid = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -0700808 pch->chan.device = pd;
809 pch->chan.chan_id = i;
810 pch->dmac = pdmac;
811
812 /* Add the channel to the DMAC list */
813 pd->chancnt++;
814 list_add_tail(&pch->chan.device_node, &pd->channels);
815 }
816
817 pd->dev = &adev->dev;
818
819 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
820 pd->device_free_chan_resources = pl330_free_chan_resources;
821 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
822 pd->device_tx_status = pl330_tx_status;
823 pd->device_prep_slave_sg = pl330_prep_slave_sg;
824 pd->device_control = pl330_control;
825 pd->device_issue_pending = pl330_issue_pending;
826
827 ret = dma_async_device_register(pd);
828 if (ret) {
829 dev_err(&adev->dev, "unable to register DMAC\n");
830 goto probe_err4;
831 }
832
Jassi Brarb3040e42010-05-23 20:28:19 -0700833 dev_info(&adev->dev,
834 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
835 dev_info(&adev->dev,
836 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
837 pi->pcfg.data_buf_dep,
838 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
839 pi->pcfg.num_peri, pi->pcfg.num_events);
840
841 return 0;
842
843probe_err4:
844 pl330_del(pi);
845probe_err3:
846 free_irq(irq, pi);
847probe_err2:
848 iounmap(pi->base);
849probe_err1:
850 release_mem_region(res->start, resource_size(res));
851 kfree(pdmac);
852
853 return ret;
854}
855
856static int __devexit pl330_remove(struct amba_device *adev)
857{
858 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
859 struct dma_pl330_chan *pch, *_p;
860 struct pl330_info *pi;
861 struct resource *res;
862 int irq;
863
864 if (!pdmac)
865 return 0;
866
867 amba_set_drvdata(adev, NULL);
868
869 /* Idle the DMAC */
870 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
871 chan.device_node) {
872
873 /* Remove the channel */
874 list_del(&pch->chan.device_node);
875
876 /* Flush the channel */
877 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
878 pl330_free_chan_resources(&pch->chan);
879 }
880
881 pi = &pdmac->pif;
882
883 pl330_del(pi);
884
885 irq = adev->irq[0];
886 free_irq(irq, pi);
887
888 iounmap(pi->base);
889
890 res = &adev->res;
891 release_mem_region(res->start, resource_size(res));
892
Boojin Kima2f52032011-09-02 09:44:29 +0900893#ifdef CONFIG_PM_RUNTIME
894 pm_runtime_put(&adev->dev);
895 pm_runtime_disable(&adev->dev);
896#else
897 clk_disable(pdmac->clk);
898#endif
899
Jassi Brarb3040e42010-05-23 20:28:19 -0700900 kfree(pdmac);
901
902 return 0;
903}
904
905static struct amba_id pl330_ids[] = {
906 {
907 .id = 0x00041330,
908 .mask = 0x000fffff,
909 },
910 { 0, 0 },
911};
912
Boojin Kima2f52032011-09-02 09:44:29 +0900913#ifdef CONFIG_PM_RUNTIME
914static int pl330_runtime_suspend(struct device *dev)
915{
916 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
917
918 if (!pdmac) {
919 dev_err(dev, "failed to get dmac\n");
920 return -ENODEV;
921 }
922
923 clk_disable(pdmac->clk);
924
925 return 0;
926}
927
928static int pl330_runtime_resume(struct device *dev)
929{
930 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
931
932 if (!pdmac) {
933 dev_err(dev, "failed to get dmac\n");
934 return -ENODEV;
935 }
936
937 clk_enable(pdmac->clk);
938
939 return 0;
940}
941#else
942#define pl330_runtime_suspend NULL
943#define pl330_runtime_resume NULL
944#endif /* CONFIG_PM_RUNTIME */
945
946static const struct dev_pm_ops pl330_pm_ops = {
947 .runtime_suspend = pl330_runtime_suspend,
948 .runtime_resume = pl330_runtime_resume,
949};
950
Jassi Brarb3040e42010-05-23 20:28:19 -0700951static struct amba_driver pl330_driver = {
952 .drv = {
953 .owner = THIS_MODULE,
954 .name = "dma-pl330",
Boojin Kima2f52032011-09-02 09:44:29 +0900955 .pm = &pl330_pm_ops,
Jassi Brarb3040e42010-05-23 20:28:19 -0700956 },
957 .id_table = pl330_ids,
958 .probe = pl330_probe,
959 .remove = pl330_remove,
960};
961
962static int __init pl330_init(void)
963{
964 return amba_driver_register(&pl330_driver);
965}
966module_init(pl330_init);
967
968static void __exit pl330_exit(void)
969{
970 amba_driver_unregister(&pl330_driver);
971 return;
972}
973module_exit(pl330_exit);
974
975MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
976MODULE_DESCRIPTION("API Driver for PL330 DMAC");
977MODULE_LICENSE("GPL");