blob: e7f9d1d3d81a05b6e887b3f573cb89ead92542b6 [file] [log] [blame]
Jassi Brarb3040e42010-05-23 20:28:19 -07001/* linux/drivers/dma/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h>
Boojin Kima2f52032011-09-02 09:44:29 +090020#include <linux/pm_runtime.h>
Boojin Kim1b9bb712011-09-02 09:44:30 +090021#include <linux/scatterlist.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070022
23#define NR_DEFAULT_DESC 16
24
25enum desc_status {
26 /* In the DMAC pool */
27 FREE,
28 /*
29 * Allocted to some channel during prep_xxx
30 * Also may be sitting on the work_list.
31 */
32 PREP,
33 /*
34 * Sitting on the work_list and already submitted
35 * to the PL330 core. Not more than two descriptors
36 * of a channel can be BUSY at any time.
37 */
38 BUSY,
39 /*
40 * Sitting on the channel work_list but xfer done
41 * by PL330 core
42 */
43 DONE,
44};
45
46struct dma_pl330_chan {
47 /* Schedule desc completion */
48 struct tasklet_struct task;
49
50 /* DMA-Engine Channel */
51 struct dma_chan chan;
52
53 /* Last completed cookie */
54 dma_cookie_t completed;
55
56 /* List of to be xfered descriptors */
57 struct list_head work_list;
58
59 /* Pointer to the DMAC that manages this channel,
60 * NULL if the channel is available to be acquired.
61 * As the parent, this DMAC also provides descriptors
62 * to the channel.
63 */
64 struct dma_pl330_dmac *dmac;
65
66 /* To protect channel manipulation */
67 spinlock_t lock;
68
69 /* Token of a hardware channel thread of PL330 DMAC
70 * NULL if the channel is available to be acquired.
71 */
72 void *pl330_chid;
Boojin Kim1b9bb712011-09-02 09:44:30 +090073
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
Boojin Kim1d0c1d62011-09-02 09:44:31 +090076 int burst_len; /* the number of burst */
Boojin Kim1b9bb712011-09-02 09:44:30 +090077 dma_addr_t fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -070078};
79
80struct dma_pl330_dmac {
81 struct pl330_info pif;
82
83 /* DMA-Engine Device */
84 struct dma_device ddma;
85
86 /* Pool of descriptors available for the DMAC's channels */
87 struct list_head desc_pool;
88 /* To protect desc_pool manipulation */
89 spinlock_t pool_lock;
90
91 /* Peripheral channels connected to this DMAC */
Rob Herring4e0e6102011-07-25 16:05:04 -050092 struct dma_pl330_chan *peripherals; /* keep at end */
Boojin Kima2f52032011-09-02 09:44:29 +090093
94 struct clk *clk;
Jassi Brarb3040e42010-05-23 20:28:19 -070095};
96
97struct dma_pl330_desc {
98 /* To attach to a queue as child */
99 struct list_head node;
100
101 /* Descriptor for the DMA Engine API */
102 struct dma_async_tx_descriptor txd;
103
104 /* Xfer for PL330 core */
105 struct pl330_xfer px;
106
107 struct pl330_reqcfg rqcfg;
108 struct pl330_req req;
109
110 enum desc_status status;
111
112 /* The channel which currently holds this desc */
113 struct dma_pl330_chan *pchan;
114};
115
116static inline struct dma_pl330_chan *
117to_pchan(struct dma_chan *ch)
118{
119 if (!ch)
120 return NULL;
121
122 return container_of(ch, struct dma_pl330_chan, chan);
123}
124
125static inline struct dma_pl330_desc *
126to_desc(struct dma_async_tx_descriptor *tx)
127{
128 return container_of(tx, struct dma_pl330_desc, txd);
129}
130
131static inline void free_desc_list(struct list_head *list)
132{
133 struct dma_pl330_dmac *pdmac;
134 struct dma_pl330_desc *desc;
135 struct dma_pl330_chan *pch;
136 unsigned long flags;
137
138 if (list_empty(list))
139 return;
140
141 /* Finish off the work list */
142 list_for_each_entry(desc, list, node) {
143 dma_async_tx_callback callback;
144 void *param;
145
146 /* All desc in a list belong to same channel */
147 pch = desc->pchan;
148 callback = desc->txd.callback;
149 param = desc->txd.callback_param;
150
151 if (callback)
152 callback(param);
153
154 desc->pchan = NULL;
155 }
156
157 pdmac = pch->dmac;
158
159 spin_lock_irqsave(&pdmac->pool_lock, flags);
160 list_splice_tail_init(list, &pdmac->desc_pool);
161 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
162}
163
164static inline void fill_queue(struct dma_pl330_chan *pch)
165{
166 struct dma_pl330_desc *desc;
167 int ret;
168
169 list_for_each_entry(desc, &pch->work_list, node) {
170
171 /* If already submitted */
172 if (desc->status == BUSY)
173 break;
174
175 ret = pl330_submit_req(pch->pl330_chid,
176 &desc->req);
177 if (!ret) {
178 desc->status = BUSY;
179 break;
180 } else if (ret == -EAGAIN) {
181 /* QFull or DMAC Dying */
182 break;
183 } else {
184 /* Unacceptable request */
185 desc->status = DONE;
186 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
187 __func__, __LINE__, desc->txd.cookie);
188 tasklet_schedule(&pch->task);
189 }
190 }
191}
192
193static void pl330_tasklet(unsigned long data)
194{
195 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
196 struct dma_pl330_desc *desc, *_dt;
197 unsigned long flags;
198 LIST_HEAD(list);
199
200 spin_lock_irqsave(&pch->lock, flags);
201
202 /* Pick up ripe tomatoes */
203 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
204 if (desc->status == DONE) {
205 pch->completed = desc->txd.cookie;
206 list_move_tail(&desc->node, &list);
207 }
208
209 /* Try to submit a req imm. next to the last completed cookie */
210 fill_queue(pch);
211
212 /* Make sure the PL330 Channel thread is active */
213 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
214
215 spin_unlock_irqrestore(&pch->lock, flags);
216
217 free_desc_list(&list);
218}
219
220static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
221{
222 struct dma_pl330_desc *desc = token;
223 struct dma_pl330_chan *pch = desc->pchan;
224 unsigned long flags;
225
226 /* If desc aborted */
227 if (!pch)
228 return;
229
230 spin_lock_irqsave(&pch->lock, flags);
231
232 desc->status = DONE;
233
234 spin_unlock_irqrestore(&pch->lock, flags);
235
236 tasklet_schedule(&pch->task);
237}
238
239static int pl330_alloc_chan_resources(struct dma_chan *chan)
240{
241 struct dma_pl330_chan *pch = to_pchan(chan);
242 struct dma_pl330_dmac *pdmac = pch->dmac;
243 unsigned long flags;
244
245 spin_lock_irqsave(&pch->lock, flags);
246
247 pch->completed = chan->cookie = 1;
248
249 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
250 if (!pch->pl330_chid) {
251 spin_unlock_irqrestore(&pch->lock, flags);
252 return 0;
253 }
254
255 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
256
257 spin_unlock_irqrestore(&pch->lock, flags);
258
259 return 1;
260}
261
262static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
263{
264 struct dma_pl330_chan *pch = to_pchan(chan);
265 struct dma_pl330_desc *desc;
266 unsigned long flags;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900267 struct dma_pl330_dmac *pdmac = pch->dmac;
268 struct dma_slave_config *slave_config;
Jassi Brarb3040e42010-05-23 20:28:19 -0700269
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900270 switch (cmd) {
271 case DMA_TERMINATE_ALL:
272 spin_lock_irqsave(&pch->lock, flags);
273
274 /* FLUSH the PL330 Channel thread */
275 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
276
277 /* Mark all desc done */
278 list_for_each_entry(desc, &pch->work_list, node)
279 desc->status = DONE;
280
281 spin_unlock_irqrestore(&pch->lock, flags);
282
283 pl330_tasklet((unsigned long) pch);
284 break;
285 case DMA_SLAVE_CONFIG:
286 slave_config = (struct dma_slave_config *)arg;
287
288 if (slave_config->direction == DMA_TO_DEVICE) {
289 if (slave_config->dst_addr)
290 pch->fifo_addr = slave_config->dst_addr;
291 if (slave_config->dst_addr_width)
292 pch->burst_sz = __ffs(slave_config->dst_addr_width);
293 if (slave_config->dst_maxburst)
294 pch->burst_len = slave_config->dst_maxburst;
295 } else if (slave_config->direction == DMA_FROM_DEVICE) {
296 if (slave_config->src_addr)
297 pch->fifo_addr = slave_config->src_addr;
298 if (slave_config->src_addr_width)
299 pch->burst_sz = __ffs(slave_config->src_addr_width);
300 if (slave_config->src_maxburst)
301 pch->burst_len = slave_config->src_maxburst;
302 }
303 break;
304 default:
305 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
Jassi Brarb3040e42010-05-23 20:28:19 -0700306 return -ENXIO;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900307 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700308
309 return 0;
310}
311
312static void pl330_free_chan_resources(struct dma_chan *chan)
313{
314 struct dma_pl330_chan *pch = to_pchan(chan);
315 unsigned long flags;
316
317 spin_lock_irqsave(&pch->lock, flags);
318
319 tasklet_kill(&pch->task);
320
321 pl330_release_channel(pch->pl330_chid);
322 pch->pl330_chid = NULL;
323
324 spin_unlock_irqrestore(&pch->lock, flags);
325}
326
327static enum dma_status
328pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
329 struct dma_tx_state *txstate)
330{
331 struct dma_pl330_chan *pch = to_pchan(chan);
332 dma_cookie_t last_done, last_used;
333 int ret;
334
335 last_done = pch->completed;
336 last_used = chan->cookie;
337
338 ret = dma_async_is_complete(cookie, last_done, last_used);
339
340 dma_set_tx_state(txstate, last_done, last_used, 0);
341
342 return ret;
343}
344
345static void pl330_issue_pending(struct dma_chan *chan)
346{
347 pl330_tasklet((unsigned long) to_pchan(chan));
348}
349
350/*
351 * We returned the last one of the circular list of descriptor(s)
352 * from prep_xxx, so the argument to submit corresponds to the last
353 * descriptor of the list.
354 */
355static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
356{
357 struct dma_pl330_desc *desc, *last = to_desc(tx);
358 struct dma_pl330_chan *pch = to_pchan(tx->chan);
359 dma_cookie_t cookie;
360 unsigned long flags;
361
362 spin_lock_irqsave(&pch->lock, flags);
363
364 /* Assign cookies to all nodes */
365 cookie = tx->chan->cookie;
366
367 while (!list_empty(&last->node)) {
368 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
369
370 if (++cookie < 0)
371 cookie = 1;
372 desc->txd.cookie = cookie;
373
374 list_move_tail(&desc->node, &pch->work_list);
375 }
376
377 if (++cookie < 0)
378 cookie = 1;
379 last->txd.cookie = cookie;
380
381 list_add_tail(&last->node, &pch->work_list);
382
383 tx->chan->cookie = cookie;
384
385 spin_unlock_irqrestore(&pch->lock, flags);
386
387 return cookie;
388}
389
390static inline void _init_desc(struct dma_pl330_desc *desc)
391{
392 desc->pchan = NULL;
393 desc->req.x = &desc->px;
394 desc->req.token = desc;
395 desc->rqcfg.swap = SWAP_NO;
396 desc->rqcfg.privileged = 0;
397 desc->rqcfg.insnaccess = 0;
398 desc->rqcfg.scctl = SCCTRL0;
399 desc->rqcfg.dcctl = DCCTRL0;
400 desc->req.cfg = &desc->rqcfg;
401 desc->req.xfer_cb = dma_pl330_rqcb;
402 desc->txd.tx_submit = pl330_tx_submit;
403
404 INIT_LIST_HEAD(&desc->node);
405}
406
407/* Returns the number of descriptors added to the DMAC pool */
408int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
409{
410 struct dma_pl330_desc *desc;
411 unsigned long flags;
412 int i;
413
414 if (!pdmac)
415 return 0;
416
417 desc = kmalloc(count * sizeof(*desc), flg);
418 if (!desc)
419 return 0;
420
421 spin_lock_irqsave(&pdmac->pool_lock, flags);
422
423 for (i = 0; i < count; i++) {
424 _init_desc(&desc[i]);
425 list_add_tail(&desc[i].node, &pdmac->desc_pool);
426 }
427
428 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
429
430 return count;
431}
432
433static struct dma_pl330_desc *
434pluck_desc(struct dma_pl330_dmac *pdmac)
435{
436 struct dma_pl330_desc *desc = NULL;
437 unsigned long flags;
438
439 if (!pdmac)
440 return NULL;
441
442 spin_lock_irqsave(&pdmac->pool_lock, flags);
443
444 if (!list_empty(&pdmac->desc_pool)) {
445 desc = list_entry(pdmac->desc_pool.next,
446 struct dma_pl330_desc, node);
447
448 list_del_init(&desc->node);
449
450 desc->status = PREP;
451 desc->txd.callback = NULL;
452 }
453
454 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
455
456 return desc;
457}
458
459static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
460{
461 struct dma_pl330_dmac *pdmac = pch->dmac;
462 struct dma_pl330_peri *peri = pch->chan.private;
463 struct dma_pl330_desc *desc;
464
465 /* Pluck one desc from the pool of DMAC */
466 desc = pluck_desc(pdmac);
467
468 /* If the DMAC pool is empty, alloc new */
469 if (!desc) {
470 if (!add_desc(pdmac, GFP_ATOMIC, 1))
471 return NULL;
472
473 /* Try again */
474 desc = pluck_desc(pdmac);
475 if (!desc) {
476 dev_err(pch->dmac->pif.dev,
477 "%s:%d ALERT!\n", __func__, __LINE__);
478 return NULL;
479 }
480 }
481
482 /* Initialize the descriptor */
483 desc->pchan = pch;
484 desc->txd.cookie = 0;
485 async_tx_ack(&desc->txd);
486
Rob Herring4e0e6102011-07-25 16:05:04 -0500487 if (peri) {
488 desc->req.rqtype = peri->rqtype;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900489 desc->req.peri = pch->chan.chan_id;
Rob Herring4e0e6102011-07-25 16:05:04 -0500490 } else {
491 desc->req.rqtype = MEMTOMEM;
492 desc->req.peri = 0;
493 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700494
495 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
496
497 return desc;
498}
499
500static inline void fill_px(struct pl330_xfer *px,
501 dma_addr_t dst, dma_addr_t src, size_t len)
502{
503 px->next = NULL;
504 px->bytes = len;
505 px->dst_addr = dst;
506 px->src_addr = src;
507}
508
509static struct dma_pl330_desc *
510__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
511 dma_addr_t src, size_t len)
512{
513 struct dma_pl330_desc *desc = pl330_get_desc(pch);
514
515 if (!desc) {
516 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
517 __func__, __LINE__);
518 return NULL;
519 }
520
521 /*
522 * Ideally we should lookout for reqs bigger than
523 * those that can be programmed with 256 bytes of
524 * MC buffer, but considering a req size is seldom
525 * going to be word-unaligned and more than 200MB,
526 * we take it easy.
527 * Also, should the limit is reached we'd rather
528 * have the platform increase MC buffer size than
529 * complicating this API driver.
530 */
531 fill_px(&desc->px, dst, src, len);
532
533 return desc;
534}
535
536/* Call after fixing burst size */
537static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
538{
539 struct dma_pl330_chan *pch = desc->pchan;
540 struct pl330_info *pi = &pch->dmac->pif;
541 int burst_len;
542
543 burst_len = pi->pcfg.data_bus_width / 8;
544 burst_len *= pi->pcfg.data_buf_dep;
545 burst_len >>= desc->rqcfg.brst_size;
546
547 /* src/dst_burst_len can't be more than 16 */
548 if (burst_len > 16)
549 burst_len = 16;
550
551 while (burst_len > 1) {
552 if (!(len % (burst_len << desc->rqcfg.brst_size)))
553 break;
554 burst_len--;
555 }
556
557 return burst_len;
558}
559
560static struct dma_async_tx_descriptor *
561pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
562 dma_addr_t src, size_t len, unsigned long flags)
563{
564 struct dma_pl330_desc *desc;
565 struct dma_pl330_chan *pch = to_pchan(chan);
566 struct dma_pl330_peri *peri = chan->private;
567 struct pl330_info *pi;
568 int burst;
569
Rob Herring4e0e6102011-07-25 16:05:04 -0500570 if (unlikely(!pch || !len))
Jassi Brarb3040e42010-05-23 20:28:19 -0700571 return NULL;
572
Rob Herring4e0e6102011-07-25 16:05:04 -0500573 if (peri && peri->rqtype != MEMTOMEM)
Jassi Brarb3040e42010-05-23 20:28:19 -0700574 return NULL;
575
576 pi = &pch->dmac->pif;
577
578 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
579 if (!desc)
580 return NULL;
581
582 desc->rqcfg.src_inc = 1;
583 desc->rqcfg.dst_inc = 1;
584
585 /* Select max possible burst size */
586 burst = pi->pcfg.data_bus_width / 8;
587
588 while (burst > 1) {
589 if (!(len % burst))
590 break;
591 burst /= 2;
592 }
593
594 desc->rqcfg.brst_size = 0;
595 while (burst != (1 << desc->rqcfg.brst_size))
596 desc->rqcfg.brst_size++;
597
598 desc->rqcfg.brst_len = get_burst_len(desc, len);
599
600 desc->txd.flags = flags;
601
602 return &desc->txd;
603}
604
605static struct dma_async_tx_descriptor *
606pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
607 unsigned int sg_len, enum dma_data_direction direction,
608 unsigned long flg)
609{
610 struct dma_pl330_desc *first, *desc = NULL;
611 struct dma_pl330_chan *pch = to_pchan(chan);
612 struct dma_pl330_peri *peri = chan->private;
613 struct scatterlist *sg;
614 unsigned long flags;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900615 int i;
Jassi Brarb3040e42010-05-23 20:28:19 -0700616 dma_addr_t addr;
617
Rob Herring4e0e6102011-07-25 16:05:04 -0500618 if (unlikely(!pch || !sgl || !sg_len || !peri))
Jassi Brarb3040e42010-05-23 20:28:19 -0700619 return NULL;
620
621 /* Make sure the direction is consistent */
622 if ((direction == DMA_TO_DEVICE &&
623 peri->rqtype != MEMTODEV) ||
624 (direction == DMA_FROM_DEVICE &&
625 peri->rqtype != DEVTOMEM)) {
626 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
627 __func__, __LINE__);
628 return NULL;
629 }
630
Boojin Kim1b9bb712011-09-02 09:44:30 +0900631 addr = pch->fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -0700632
633 first = NULL;
634
635 for_each_sg(sgl, sg, sg_len, i) {
636
637 desc = pl330_get_desc(pch);
638 if (!desc) {
639 struct dma_pl330_dmac *pdmac = pch->dmac;
640
641 dev_err(pch->dmac->pif.dev,
642 "%s:%d Unable to fetch desc\n",
643 __func__, __LINE__);
644 if (!first)
645 return NULL;
646
647 spin_lock_irqsave(&pdmac->pool_lock, flags);
648
649 while (!list_empty(&first->node)) {
650 desc = list_entry(first->node.next,
651 struct dma_pl330_desc, node);
652 list_move_tail(&desc->node, &pdmac->desc_pool);
653 }
654
655 list_move_tail(&first->node, &pdmac->desc_pool);
656
657 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
658
659 return NULL;
660 }
661
662 if (!first)
663 first = desc;
664 else
665 list_add_tail(&desc->node, &first->node);
666
667 if (direction == DMA_TO_DEVICE) {
668 desc->rqcfg.src_inc = 1;
669 desc->rqcfg.dst_inc = 0;
670 fill_px(&desc->px,
671 addr, sg_dma_address(sg), sg_dma_len(sg));
672 } else {
673 desc->rqcfg.src_inc = 0;
674 desc->rqcfg.dst_inc = 1;
675 fill_px(&desc->px,
676 sg_dma_address(sg), addr, sg_dma_len(sg));
677 }
678
Boojin Kim1b9bb712011-09-02 09:44:30 +0900679 desc->rqcfg.brst_size = pch->burst_sz;
Jassi Brarb3040e42010-05-23 20:28:19 -0700680 desc->rqcfg.brst_len = 1;
681 }
682
683 /* Return the last desc in the chain */
684 desc->txd.flags = flg;
685 return &desc->txd;
686}
687
688static irqreturn_t pl330_irq_handler(int irq, void *data)
689{
690 if (pl330_update(data))
691 return IRQ_HANDLED;
692 else
693 return IRQ_NONE;
694}
695
696static int __devinit
Russell Kingaa25afa2011-02-19 15:55:00 +0000697pl330_probe(struct amba_device *adev, const struct amba_id *id)
Jassi Brarb3040e42010-05-23 20:28:19 -0700698{
699 struct dma_pl330_platdata *pdat;
700 struct dma_pl330_dmac *pdmac;
701 struct dma_pl330_chan *pch;
702 struct pl330_info *pi;
703 struct dma_device *pd;
704 struct resource *res;
705 int i, ret, irq;
Rob Herring4e0e6102011-07-25 16:05:04 -0500706 int num_chan;
Jassi Brarb3040e42010-05-23 20:28:19 -0700707
708 pdat = adev->dev.platform_data;
709
Jassi Brarb3040e42010-05-23 20:28:19 -0700710 /* Allocate a new DMAC and its Channels */
Rob Herring4e0e6102011-07-25 16:05:04 -0500711 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700712 if (!pdmac) {
713 dev_err(&adev->dev, "unable to allocate mem\n");
714 return -ENOMEM;
715 }
716
717 pi = &pdmac->pif;
718 pi->dev = &adev->dev;
719 pi->pl330_data = NULL;
Rob Herring4e0e6102011-07-25 16:05:04 -0500720 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -0700721
722 res = &adev->res;
723 request_mem_region(res->start, resource_size(res), "dma-pl330");
724
725 pi->base = ioremap(res->start, resource_size(res));
726 if (!pi->base) {
727 ret = -ENXIO;
728 goto probe_err1;
729 }
730
Boojin Kima2f52032011-09-02 09:44:29 +0900731 pdmac->clk = clk_get(&adev->dev, "dma");
732 if (IS_ERR(pdmac->clk)) {
733 dev_err(&adev->dev, "Cannot get operation clock.\n");
734 ret = -EINVAL;
735 goto probe_err1;
736 }
737
738 amba_set_drvdata(adev, pdmac);
739
740#ifdef CONFIG_PM_RUNTIME
741 /* to use the runtime PM helper functions */
742 pm_runtime_enable(&adev->dev);
743
744 /* enable the power domain */
745 if (pm_runtime_get_sync(&adev->dev)) {
746 dev_err(&adev->dev, "failed to get runtime pm\n");
747 ret = -ENODEV;
748 goto probe_err1;
749 }
750#else
751 /* enable dma clk */
752 clk_enable(pdmac->clk);
753#endif
754
Jassi Brarb3040e42010-05-23 20:28:19 -0700755 irq = adev->irq[0];
756 ret = request_irq(irq, pl330_irq_handler, 0,
757 dev_name(&adev->dev), pi);
758 if (ret)
759 goto probe_err2;
760
761 ret = pl330_add(pi);
762 if (ret)
763 goto probe_err3;
764
765 INIT_LIST_HEAD(&pdmac->desc_pool);
766 spin_lock_init(&pdmac->pool_lock);
767
768 /* Create a descriptor pool of default size */
769 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
770 dev_warn(&adev->dev, "unable to allocate desc\n");
771
772 pd = &pdmac->ddma;
773 INIT_LIST_HEAD(&pd->channels);
774
775 /* Initialize channel parameters */
Rob Herring4e0e6102011-07-25 16:05:04 -0500776 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
777 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700778
Rob Herring4e0e6102011-07-25 16:05:04 -0500779 for (i = 0; i < num_chan; i++) {
780 pch = &pdmac->peripherals[i];
781 if (pdat) {
782 struct dma_pl330_peri *peri = &pdat->peri[i];
783
784 switch (peri->rqtype) {
785 case MEMTOMEM:
786 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
787 break;
788 case MEMTODEV:
789 case DEVTOMEM:
790 dma_cap_set(DMA_SLAVE, pd->cap_mask);
791 break;
792 default:
793 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
794 continue;
795 }
796 pch->chan.private = peri;
797 } else {
Jassi Brarb3040e42010-05-23 20:28:19 -0700798 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
Rob Herring4e0e6102011-07-25 16:05:04 -0500799 pch->chan.private = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -0700800 }
801
802 INIT_LIST_HEAD(&pch->work_list);
803 spin_lock_init(&pch->lock);
804 pch->pl330_chid = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -0700805 pch->chan.device = pd;
806 pch->chan.chan_id = i;
807 pch->dmac = pdmac;
808
809 /* Add the channel to the DMAC list */
810 pd->chancnt++;
811 list_add_tail(&pch->chan.device_node, &pd->channels);
812 }
813
814 pd->dev = &adev->dev;
815
816 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
817 pd->device_free_chan_resources = pl330_free_chan_resources;
818 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
819 pd->device_tx_status = pl330_tx_status;
820 pd->device_prep_slave_sg = pl330_prep_slave_sg;
821 pd->device_control = pl330_control;
822 pd->device_issue_pending = pl330_issue_pending;
823
824 ret = dma_async_device_register(pd);
825 if (ret) {
826 dev_err(&adev->dev, "unable to register DMAC\n");
827 goto probe_err4;
828 }
829
Jassi Brarb3040e42010-05-23 20:28:19 -0700830 dev_info(&adev->dev,
831 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
832 dev_info(&adev->dev,
833 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
834 pi->pcfg.data_buf_dep,
835 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
836 pi->pcfg.num_peri, pi->pcfg.num_events);
837
838 return 0;
839
840probe_err4:
841 pl330_del(pi);
842probe_err3:
843 free_irq(irq, pi);
844probe_err2:
845 iounmap(pi->base);
846probe_err1:
847 release_mem_region(res->start, resource_size(res));
848 kfree(pdmac);
849
850 return ret;
851}
852
853static int __devexit pl330_remove(struct amba_device *adev)
854{
855 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
856 struct dma_pl330_chan *pch, *_p;
857 struct pl330_info *pi;
858 struct resource *res;
859 int irq;
860
861 if (!pdmac)
862 return 0;
863
864 amba_set_drvdata(adev, NULL);
865
866 /* Idle the DMAC */
867 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
868 chan.device_node) {
869
870 /* Remove the channel */
871 list_del(&pch->chan.device_node);
872
873 /* Flush the channel */
874 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
875 pl330_free_chan_resources(&pch->chan);
876 }
877
878 pi = &pdmac->pif;
879
880 pl330_del(pi);
881
882 irq = adev->irq[0];
883 free_irq(irq, pi);
884
885 iounmap(pi->base);
886
887 res = &adev->res;
888 release_mem_region(res->start, resource_size(res));
889
Boojin Kima2f52032011-09-02 09:44:29 +0900890#ifdef CONFIG_PM_RUNTIME
891 pm_runtime_put(&adev->dev);
892 pm_runtime_disable(&adev->dev);
893#else
894 clk_disable(pdmac->clk);
895#endif
896
Jassi Brarb3040e42010-05-23 20:28:19 -0700897 kfree(pdmac);
898
899 return 0;
900}
901
902static struct amba_id pl330_ids[] = {
903 {
904 .id = 0x00041330,
905 .mask = 0x000fffff,
906 },
907 { 0, 0 },
908};
909
Boojin Kima2f52032011-09-02 09:44:29 +0900910#ifdef CONFIG_PM_RUNTIME
911static int pl330_runtime_suspend(struct device *dev)
912{
913 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
914
915 if (!pdmac) {
916 dev_err(dev, "failed to get dmac\n");
917 return -ENODEV;
918 }
919
920 clk_disable(pdmac->clk);
921
922 return 0;
923}
924
925static int pl330_runtime_resume(struct device *dev)
926{
927 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
928
929 if (!pdmac) {
930 dev_err(dev, "failed to get dmac\n");
931 return -ENODEV;
932 }
933
934 clk_enable(pdmac->clk);
935
936 return 0;
937}
938#else
939#define pl330_runtime_suspend NULL
940#define pl330_runtime_resume NULL
941#endif /* CONFIG_PM_RUNTIME */
942
943static const struct dev_pm_ops pl330_pm_ops = {
944 .runtime_suspend = pl330_runtime_suspend,
945 .runtime_resume = pl330_runtime_resume,
946};
947
Jassi Brarb3040e42010-05-23 20:28:19 -0700948static struct amba_driver pl330_driver = {
949 .drv = {
950 .owner = THIS_MODULE,
951 .name = "dma-pl330",
Boojin Kima2f52032011-09-02 09:44:29 +0900952 .pm = &pl330_pm_ops,
Jassi Brarb3040e42010-05-23 20:28:19 -0700953 },
954 .id_table = pl330_ids,
955 .probe = pl330_probe,
956 .remove = pl330_remove,
957};
958
959static int __init pl330_init(void)
960{
961 return amba_driver_register(&pl330_driver);
962}
963module_init(pl330_init);
964
965static void __exit pl330_exit(void)
966{
967 amba_driver_unregister(&pl330_driver);
968 return;
969}
970module_exit(pl330_exit);
971
972MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
973MODULE_DESCRIPTION("API Driver for PL330 DMAC");
974MODULE_LICENSE("GPL");