blob: 571041477ab2984230fb1d6f4f9849276f0f046f [file] [log] [blame]
Jassi Brarb3040e42010-05-23 20:28:19 -07001/* linux/drivers/dma/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h>
Boojin Kima2f52032011-09-02 09:44:29 +090020#include <linux/pm_runtime.h>
Boojin Kim1b9bb712011-09-02 09:44:30 +090021#include <linux/scatterlist.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070022
23#define NR_DEFAULT_DESC 16
24
25enum desc_status {
26 /* In the DMAC pool */
27 FREE,
28 /*
29 * Allocted to some channel during prep_xxx
30 * Also may be sitting on the work_list.
31 */
32 PREP,
33 /*
34 * Sitting on the work_list and already submitted
35 * to the PL330 core. Not more than two descriptors
36 * of a channel can be BUSY at any time.
37 */
38 BUSY,
39 /*
40 * Sitting on the channel work_list but xfer done
41 * by PL330 core
42 */
43 DONE,
44};
45
46struct dma_pl330_chan {
47 /* Schedule desc completion */
48 struct tasklet_struct task;
49
50 /* DMA-Engine Channel */
51 struct dma_chan chan;
52
53 /* Last completed cookie */
54 dma_cookie_t completed;
55
56 /* List of to be xfered descriptors */
57 struct list_head work_list;
58
59 /* Pointer to the DMAC that manages this channel,
60 * NULL if the channel is available to be acquired.
61 * As the parent, this DMAC also provides descriptors
62 * to the channel.
63 */
64 struct dma_pl330_dmac *dmac;
65
66 /* To protect channel manipulation */
67 spinlock_t lock;
68
69 /* Token of a hardware channel thread of PL330 DMAC
70 * NULL if the channel is available to be acquired.
71 */
72 void *pl330_chid;
Boojin Kim1b9bb712011-09-02 09:44:30 +090073
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
Boojin Kim1d0c1d62011-09-02 09:44:31 +090076 int burst_len; /* the number of burst */
Boojin Kim1b9bb712011-09-02 09:44:30 +090077 dma_addr_t fifo_addr;
Boojin Kim42bc9cf2011-09-02 09:44:33 +090078
79 /* for cyclic capability */
80 bool cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -070081};
82
83struct dma_pl330_dmac {
84 struct pl330_info pif;
85
86 /* DMA-Engine Device */
87 struct dma_device ddma;
88
89 /* Pool of descriptors available for the DMAC's channels */
90 struct list_head desc_pool;
91 /* To protect desc_pool manipulation */
92 spinlock_t pool_lock;
93
94 /* Peripheral channels connected to this DMAC */
Rob Herring4e0e6102011-07-25 16:05:04 -050095 struct dma_pl330_chan *peripherals; /* keep at end */
Boojin Kima2f52032011-09-02 09:44:29 +090096
97 struct clk *clk;
Jassi Brarb3040e42010-05-23 20:28:19 -070098};
99
100struct dma_pl330_desc {
101 /* To attach to a queue as child */
102 struct list_head node;
103
104 /* Descriptor for the DMA Engine API */
105 struct dma_async_tx_descriptor txd;
106
107 /* Xfer for PL330 core */
108 struct pl330_xfer px;
109
110 struct pl330_reqcfg rqcfg;
111 struct pl330_req req;
112
113 enum desc_status status;
114
115 /* The channel which currently holds this desc */
116 struct dma_pl330_chan *pchan;
117};
118
119static inline struct dma_pl330_chan *
120to_pchan(struct dma_chan *ch)
121{
122 if (!ch)
123 return NULL;
124
125 return container_of(ch, struct dma_pl330_chan, chan);
126}
127
128static inline struct dma_pl330_desc *
129to_desc(struct dma_async_tx_descriptor *tx)
130{
131 return container_of(tx, struct dma_pl330_desc, txd);
132}
133
134static inline void free_desc_list(struct list_head *list)
135{
136 struct dma_pl330_dmac *pdmac;
137 struct dma_pl330_desc *desc;
138 struct dma_pl330_chan *pch;
139 unsigned long flags;
140
141 if (list_empty(list))
142 return;
143
144 /* Finish off the work list */
145 list_for_each_entry(desc, list, node) {
146 dma_async_tx_callback callback;
147 void *param;
148
149 /* All desc in a list belong to same channel */
150 pch = desc->pchan;
151 callback = desc->txd.callback;
152 param = desc->txd.callback_param;
153
154 if (callback)
155 callback(param);
156
157 desc->pchan = NULL;
158 }
159
160 pdmac = pch->dmac;
161
162 spin_lock_irqsave(&pdmac->pool_lock, flags);
163 list_splice_tail_init(list, &pdmac->desc_pool);
164 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
165}
166
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900167static inline void handle_cyclic_desc_list(struct list_head *list)
168{
169 struct dma_pl330_desc *desc;
170 struct dma_pl330_chan *pch;
171 unsigned long flags;
172
173 if (list_empty(list))
174 return;
175
176 list_for_each_entry(desc, list, node) {
177 dma_async_tx_callback callback;
178
179 /* Change status to reload it */
180 desc->status = PREP;
181 pch = desc->pchan;
182 callback = desc->txd.callback;
183 if (callback)
184 callback(desc->txd.callback_param);
185 }
186
187 spin_lock_irqsave(&pch->lock, flags);
188 list_splice_tail_init(list, &pch->work_list);
189 spin_unlock_irqrestore(&pch->lock, flags);
190}
191
Jassi Brarb3040e42010-05-23 20:28:19 -0700192static inline void fill_queue(struct dma_pl330_chan *pch)
193{
194 struct dma_pl330_desc *desc;
195 int ret;
196
197 list_for_each_entry(desc, &pch->work_list, node) {
198
199 /* If already submitted */
200 if (desc->status == BUSY)
201 break;
202
203 ret = pl330_submit_req(pch->pl330_chid,
204 &desc->req);
205 if (!ret) {
206 desc->status = BUSY;
207 break;
208 } else if (ret == -EAGAIN) {
209 /* QFull or DMAC Dying */
210 break;
211 } else {
212 /* Unacceptable request */
213 desc->status = DONE;
214 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
215 __func__, __LINE__, desc->txd.cookie);
216 tasklet_schedule(&pch->task);
217 }
218 }
219}
220
221static void pl330_tasklet(unsigned long data)
222{
223 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
224 struct dma_pl330_desc *desc, *_dt;
225 unsigned long flags;
226 LIST_HEAD(list);
227
228 spin_lock_irqsave(&pch->lock, flags);
229
230 /* Pick up ripe tomatoes */
231 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
232 if (desc->status == DONE) {
233 pch->completed = desc->txd.cookie;
234 list_move_tail(&desc->node, &list);
235 }
236
237 /* Try to submit a req imm. next to the last completed cookie */
238 fill_queue(pch);
239
240 /* Make sure the PL330 Channel thread is active */
241 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
242
243 spin_unlock_irqrestore(&pch->lock, flags);
244
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900245 if (pch->cyclic)
246 handle_cyclic_desc_list(&list);
247 else
248 free_desc_list(&list);
Jassi Brarb3040e42010-05-23 20:28:19 -0700249}
250
251static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
252{
253 struct dma_pl330_desc *desc = token;
254 struct dma_pl330_chan *pch = desc->pchan;
255 unsigned long flags;
256
257 /* If desc aborted */
258 if (!pch)
259 return;
260
261 spin_lock_irqsave(&pch->lock, flags);
262
263 desc->status = DONE;
264
265 spin_unlock_irqrestore(&pch->lock, flags);
266
267 tasklet_schedule(&pch->task);
268}
269
270static int pl330_alloc_chan_resources(struct dma_chan *chan)
271{
272 struct dma_pl330_chan *pch = to_pchan(chan);
273 struct dma_pl330_dmac *pdmac = pch->dmac;
274 unsigned long flags;
275
276 spin_lock_irqsave(&pch->lock, flags);
277
278 pch->completed = chan->cookie = 1;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900279 pch->cyclic = false;
Jassi Brarb3040e42010-05-23 20:28:19 -0700280
281 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
282 if (!pch->pl330_chid) {
283 spin_unlock_irqrestore(&pch->lock, flags);
284 return 0;
285 }
286
287 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
288
289 spin_unlock_irqrestore(&pch->lock, flags);
290
291 return 1;
292}
293
294static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
295{
296 struct dma_pl330_chan *pch = to_pchan(chan);
Boojin Kimae43b882011-09-02 09:44:32 +0900297 struct dma_pl330_desc *desc, *_dt;
Jassi Brarb3040e42010-05-23 20:28:19 -0700298 unsigned long flags;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900299 struct dma_pl330_dmac *pdmac = pch->dmac;
300 struct dma_slave_config *slave_config;
Boojin Kimae43b882011-09-02 09:44:32 +0900301 LIST_HEAD(list);
Jassi Brarb3040e42010-05-23 20:28:19 -0700302
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900303 switch (cmd) {
304 case DMA_TERMINATE_ALL:
305 spin_lock_irqsave(&pch->lock, flags);
306
307 /* FLUSH the PL330 Channel thread */
308 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
309
310 /* Mark all desc done */
Boojin Kimae43b882011-09-02 09:44:32 +0900311 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900312 desc->status = DONE;
Boojin Kimae43b882011-09-02 09:44:32 +0900313 pch->completed = desc->txd.cookie;
314 list_move_tail(&desc->node, &list);
315 }
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900316
Boojin Kimae43b882011-09-02 09:44:32 +0900317 list_splice_tail_init(&list, &pdmac->desc_pool);
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900318 spin_unlock_irqrestore(&pch->lock, flags);
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900319 break;
320 case DMA_SLAVE_CONFIG:
321 slave_config = (struct dma_slave_config *)arg;
322
323 if (slave_config->direction == DMA_TO_DEVICE) {
324 if (slave_config->dst_addr)
325 pch->fifo_addr = slave_config->dst_addr;
326 if (slave_config->dst_addr_width)
327 pch->burst_sz = __ffs(slave_config->dst_addr_width);
328 if (slave_config->dst_maxburst)
329 pch->burst_len = slave_config->dst_maxburst;
330 } else if (slave_config->direction == DMA_FROM_DEVICE) {
331 if (slave_config->src_addr)
332 pch->fifo_addr = slave_config->src_addr;
333 if (slave_config->src_addr_width)
334 pch->burst_sz = __ffs(slave_config->src_addr_width);
335 if (slave_config->src_maxburst)
336 pch->burst_len = slave_config->src_maxburst;
337 }
338 break;
339 default:
340 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
Jassi Brarb3040e42010-05-23 20:28:19 -0700341 return -ENXIO;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900342 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700343
344 return 0;
345}
346
347static void pl330_free_chan_resources(struct dma_chan *chan)
348{
349 struct dma_pl330_chan *pch = to_pchan(chan);
350 unsigned long flags;
351
352 spin_lock_irqsave(&pch->lock, flags);
353
354 tasklet_kill(&pch->task);
355
356 pl330_release_channel(pch->pl330_chid);
357 pch->pl330_chid = NULL;
358
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900359 if (pch->cyclic)
360 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
361
Jassi Brarb3040e42010-05-23 20:28:19 -0700362 spin_unlock_irqrestore(&pch->lock, flags);
363}
364
365static enum dma_status
366pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
367 struct dma_tx_state *txstate)
368{
369 struct dma_pl330_chan *pch = to_pchan(chan);
370 dma_cookie_t last_done, last_used;
371 int ret;
372
373 last_done = pch->completed;
374 last_used = chan->cookie;
375
376 ret = dma_async_is_complete(cookie, last_done, last_used);
377
378 dma_set_tx_state(txstate, last_done, last_used, 0);
379
380 return ret;
381}
382
383static void pl330_issue_pending(struct dma_chan *chan)
384{
385 pl330_tasklet((unsigned long) to_pchan(chan));
386}
387
388/*
389 * We returned the last one of the circular list of descriptor(s)
390 * from prep_xxx, so the argument to submit corresponds to the last
391 * descriptor of the list.
392 */
393static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
394{
395 struct dma_pl330_desc *desc, *last = to_desc(tx);
396 struct dma_pl330_chan *pch = to_pchan(tx->chan);
397 dma_cookie_t cookie;
398 unsigned long flags;
399
400 spin_lock_irqsave(&pch->lock, flags);
401
402 /* Assign cookies to all nodes */
403 cookie = tx->chan->cookie;
404
405 while (!list_empty(&last->node)) {
406 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
407
408 if (++cookie < 0)
409 cookie = 1;
410 desc->txd.cookie = cookie;
411
412 list_move_tail(&desc->node, &pch->work_list);
413 }
414
415 if (++cookie < 0)
416 cookie = 1;
417 last->txd.cookie = cookie;
418
419 list_add_tail(&last->node, &pch->work_list);
420
421 tx->chan->cookie = cookie;
422
423 spin_unlock_irqrestore(&pch->lock, flags);
424
425 return cookie;
426}
427
428static inline void _init_desc(struct dma_pl330_desc *desc)
429{
430 desc->pchan = NULL;
431 desc->req.x = &desc->px;
432 desc->req.token = desc;
433 desc->rqcfg.swap = SWAP_NO;
434 desc->rqcfg.privileged = 0;
435 desc->rqcfg.insnaccess = 0;
436 desc->rqcfg.scctl = SCCTRL0;
437 desc->rqcfg.dcctl = DCCTRL0;
438 desc->req.cfg = &desc->rqcfg;
439 desc->req.xfer_cb = dma_pl330_rqcb;
440 desc->txd.tx_submit = pl330_tx_submit;
441
442 INIT_LIST_HEAD(&desc->node);
443}
444
445/* Returns the number of descriptors added to the DMAC pool */
446int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
447{
448 struct dma_pl330_desc *desc;
449 unsigned long flags;
450 int i;
451
452 if (!pdmac)
453 return 0;
454
455 desc = kmalloc(count * sizeof(*desc), flg);
456 if (!desc)
457 return 0;
458
459 spin_lock_irqsave(&pdmac->pool_lock, flags);
460
461 for (i = 0; i < count; i++) {
462 _init_desc(&desc[i]);
463 list_add_tail(&desc[i].node, &pdmac->desc_pool);
464 }
465
466 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
467
468 return count;
469}
470
471static struct dma_pl330_desc *
472pluck_desc(struct dma_pl330_dmac *pdmac)
473{
474 struct dma_pl330_desc *desc = NULL;
475 unsigned long flags;
476
477 if (!pdmac)
478 return NULL;
479
480 spin_lock_irqsave(&pdmac->pool_lock, flags);
481
482 if (!list_empty(&pdmac->desc_pool)) {
483 desc = list_entry(pdmac->desc_pool.next,
484 struct dma_pl330_desc, node);
485
486 list_del_init(&desc->node);
487
488 desc->status = PREP;
489 desc->txd.callback = NULL;
490 }
491
492 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
493
494 return desc;
495}
496
497static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
498{
499 struct dma_pl330_dmac *pdmac = pch->dmac;
500 struct dma_pl330_peri *peri = pch->chan.private;
501 struct dma_pl330_desc *desc;
502
503 /* Pluck one desc from the pool of DMAC */
504 desc = pluck_desc(pdmac);
505
506 /* If the DMAC pool is empty, alloc new */
507 if (!desc) {
508 if (!add_desc(pdmac, GFP_ATOMIC, 1))
509 return NULL;
510
511 /* Try again */
512 desc = pluck_desc(pdmac);
513 if (!desc) {
514 dev_err(pch->dmac->pif.dev,
515 "%s:%d ALERT!\n", __func__, __LINE__);
516 return NULL;
517 }
518 }
519
520 /* Initialize the descriptor */
521 desc->pchan = pch;
522 desc->txd.cookie = 0;
523 async_tx_ack(&desc->txd);
524
Rob Herring4e0e6102011-07-25 16:05:04 -0500525 if (peri) {
526 desc->req.rqtype = peri->rqtype;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900527 desc->req.peri = pch->chan.chan_id;
Rob Herring4e0e6102011-07-25 16:05:04 -0500528 } else {
529 desc->req.rqtype = MEMTOMEM;
530 desc->req.peri = 0;
531 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700532
533 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
534
535 return desc;
536}
537
538static inline void fill_px(struct pl330_xfer *px,
539 dma_addr_t dst, dma_addr_t src, size_t len)
540{
541 px->next = NULL;
542 px->bytes = len;
543 px->dst_addr = dst;
544 px->src_addr = src;
545}
546
547static struct dma_pl330_desc *
548__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
549 dma_addr_t src, size_t len)
550{
551 struct dma_pl330_desc *desc = pl330_get_desc(pch);
552
553 if (!desc) {
554 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
555 __func__, __LINE__);
556 return NULL;
557 }
558
559 /*
560 * Ideally we should lookout for reqs bigger than
561 * those that can be programmed with 256 bytes of
562 * MC buffer, but considering a req size is seldom
563 * going to be word-unaligned and more than 200MB,
564 * we take it easy.
565 * Also, should the limit is reached we'd rather
566 * have the platform increase MC buffer size than
567 * complicating this API driver.
568 */
569 fill_px(&desc->px, dst, src, len);
570
571 return desc;
572}
573
574/* Call after fixing burst size */
575static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
576{
577 struct dma_pl330_chan *pch = desc->pchan;
578 struct pl330_info *pi = &pch->dmac->pif;
579 int burst_len;
580
581 burst_len = pi->pcfg.data_bus_width / 8;
582 burst_len *= pi->pcfg.data_buf_dep;
583 burst_len >>= desc->rqcfg.brst_size;
584
585 /* src/dst_burst_len can't be more than 16 */
586 if (burst_len > 16)
587 burst_len = 16;
588
589 while (burst_len > 1) {
590 if (!(len % (burst_len << desc->rqcfg.brst_size)))
591 break;
592 burst_len--;
593 }
594
595 return burst_len;
596}
597
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900598static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
599 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
600 size_t period_len, enum dma_data_direction direction)
601{
602 struct dma_pl330_desc *desc;
603 struct dma_pl330_chan *pch = to_pchan(chan);
604 dma_addr_t dst;
605 dma_addr_t src;
606
607 desc = pl330_get_desc(pch);
608 if (!desc) {
609 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
610 __func__, __LINE__);
611 return NULL;
612 }
613
614 switch (direction) {
615 case DMA_TO_DEVICE:
616 desc->rqcfg.src_inc = 1;
617 desc->rqcfg.dst_inc = 0;
618 src = dma_addr;
619 dst = pch->fifo_addr;
620 break;
621 case DMA_FROM_DEVICE:
622 desc->rqcfg.src_inc = 0;
623 desc->rqcfg.dst_inc = 1;
624 src = pch->fifo_addr;
625 dst = dma_addr;
626 break;
627 default:
628 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
629 __func__, __LINE__);
630 return NULL;
631 }
632
633 desc->rqcfg.brst_size = pch->burst_sz;
634 desc->rqcfg.brst_len = 1;
635
636 pch->cyclic = true;
637
638 fill_px(&desc->px, dst, src, period_len);
639
640 return &desc->txd;
641}
642
Jassi Brarb3040e42010-05-23 20:28:19 -0700643static struct dma_async_tx_descriptor *
644pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
645 dma_addr_t src, size_t len, unsigned long flags)
646{
647 struct dma_pl330_desc *desc;
648 struct dma_pl330_chan *pch = to_pchan(chan);
649 struct dma_pl330_peri *peri = chan->private;
650 struct pl330_info *pi;
651 int burst;
652
Rob Herring4e0e6102011-07-25 16:05:04 -0500653 if (unlikely(!pch || !len))
Jassi Brarb3040e42010-05-23 20:28:19 -0700654 return NULL;
655
Rob Herring4e0e6102011-07-25 16:05:04 -0500656 if (peri && peri->rqtype != MEMTOMEM)
Jassi Brarb3040e42010-05-23 20:28:19 -0700657 return NULL;
658
659 pi = &pch->dmac->pif;
660
661 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
662 if (!desc)
663 return NULL;
664
665 desc->rqcfg.src_inc = 1;
666 desc->rqcfg.dst_inc = 1;
667
668 /* Select max possible burst size */
669 burst = pi->pcfg.data_bus_width / 8;
670
671 while (burst > 1) {
672 if (!(len % burst))
673 break;
674 burst /= 2;
675 }
676
677 desc->rqcfg.brst_size = 0;
678 while (burst != (1 << desc->rqcfg.brst_size))
679 desc->rqcfg.brst_size++;
680
681 desc->rqcfg.brst_len = get_burst_len(desc, len);
682
683 desc->txd.flags = flags;
684
685 return &desc->txd;
686}
687
688static struct dma_async_tx_descriptor *
689pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
690 unsigned int sg_len, enum dma_data_direction direction,
691 unsigned long flg)
692{
693 struct dma_pl330_desc *first, *desc = NULL;
694 struct dma_pl330_chan *pch = to_pchan(chan);
695 struct dma_pl330_peri *peri = chan->private;
696 struct scatterlist *sg;
697 unsigned long flags;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900698 int i;
Jassi Brarb3040e42010-05-23 20:28:19 -0700699 dma_addr_t addr;
700
Rob Herring4e0e6102011-07-25 16:05:04 -0500701 if (unlikely(!pch || !sgl || !sg_len || !peri))
Jassi Brarb3040e42010-05-23 20:28:19 -0700702 return NULL;
703
704 /* Make sure the direction is consistent */
705 if ((direction == DMA_TO_DEVICE &&
706 peri->rqtype != MEMTODEV) ||
707 (direction == DMA_FROM_DEVICE &&
708 peri->rqtype != DEVTOMEM)) {
709 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
710 __func__, __LINE__);
711 return NULL;
712 }
713
Boojin Kim1b9bb712011-09-02 09:44:30 +0900714 addr = pch->fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -0700715
716 first = NULL;
717
718 for_each_sg(sgl, sg, sg_len, i) {
719
720 desc = pl330_get_desc(pch);
721 if (!desc) {
722 struct dma_pl330_dmac *pdmac = pch->dmac;
723
724 dev_err(pch->dmac->pif.dev,
725 "%s:%d Unable to fetch desc\n",
726 __func__, __LINE__);
727 if (!first)
728 return NULL;
729
730 spin_lock_irqsave(&pdmac->pool_lock, flags);
731
732 while (!list_empty(&first->node)) {
733 desc = list_entry(first->node.next,
734 struct dma_pl330_desc, node);
735 list_move_tail(&desc->node, &pdmac->desc_pool);
736 }
737
738 list_move_tail(&first->node, &pdmac->desc_pool);
739
740 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
741
742 return NULL;
743 }
744
745 if (!first)
746 first = desc;
747 else
748 list_add_tail(&desc->node, &first->node);
749
750 if (direction == DMA_TO_DEVICE) {
751 desc->rqcfg.src_inc = 1;
752 desc->rqcfg.dst_inc = 0;
753 fill_px(&desc->px,
754 addr, sg_dma_address(sg), sg_dma_len(sg));
755 } else {
756 desc->rqcfg.src_inc = 0;
757 desc->rqcfg.dst_inc = 1;
758 fill_px(&desc->px,
759 sg_dma_address(sg), addr, sg_dma_len(sg));
760 }
761
Boojin Kim1b9bb712011-09-02 09:44:30 +0900762 desc->rqcfg.brst_size = pch->burst_sz;
Jassi Brarb3040e42010-05-23 20:28:19 -0700763 desc->rqcfg.brst_len = 1;
764 }
765
766 /* Return the last desc in the chain */
767 desc->txd.flags = flg;
768 return &desc->txd;
769}
770
771static irqreturn_t pl330_irq_handler(int irq, void *data)
772{
773 if (pl330_update(data))
774 return IRQ_HANDLED;
775 else
776 return IRQ_NONE;
777}
778
779static int __devinit
Russell Kingaa25afa2011-02-19 15:55:00 +0000780pl330_probe(struct amba_device *adev, const struct amba_id *id)
Jassi Brarb3040e42010-05-23 20:28:19 -0700781{
782 struct dma_pl330_platdata *pdat;
783 struct dma_pl330_dmac *pdmac;
784 struct dma_pl330_chan *pch;
785 struct pl330_info *pi;
786 struct dma_device *pd;
787 struct resource *res;
788 int i, ret, irq;
Rob Herring4e0e6102011-07-25 16:05:04 -0500789 int num_chan;
Jassi Brarb3040e42010-05-23 20:28:19 -0700790
791 pdat = adev->dev.platform_data;
792
Jassi Brarb3040e42010-05-23 20:28:19 -0700793 /* Allocate a new DMAC and its Channels */
Rob Herring4e0e6102011-07-25 16:05:04 -0500794 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700795 if (!pdmac) {
796 dev_err(&adev->dev, "unable to allocate mem\n");
797 return -ENOMEM;
798 }
799
800 pi = &pdmac->pif;
801 pi->dev = &adev->dev;
802 pi->pl330_data = NULL;
Rob Herring4e0e6102011-07-25 16:05:04 -0500803 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -0700804
805 res = &adev->res;
806 request_mem_region(res->start, resource_size(res), "dma-pl330");
807
808 pi->base = ioremap(res->start, resource_size(res));
809 if (!pi->base) {
810 ret = -ENXIO;
811 goto probe_err1;
812 }
813
Boojin Kima2f52032011-09-02 09:44:29 +0900814 pdmac->clk = clk_get(&adev->dev, "dma");
815 if (IS_ERR(pdmac->clk)) {
816 dev_err(&adev->dev, "Cannot get operation clock.\n");
817 ret = -EINVAL;
818 goto probe_err1;
819 }
820
821 amba_set_drvdata(adev, pdmac);
822
823#ifdef CONFIG_PM_RUNTIME
824 /* to use the runtime PM helper functions */
825 pm_runtime_enable(&adev->dev);
826
827 /* enable the power domain */
828 if (pm_runtime_get_sync(&adev->dev)) {
829 dev_err(&adev->dev, "failed to get runtime pm\n");
830 ret = -ENODEV;
831 goto probe_err1;
832 }
833#else
834 /* enable dma clk */
835 clk_enable(pdmac->clk);
836#endif
837
Jassi Brarb3040e42010-05-23 20:28:19 -0700838 irq = adev->irq[0];
839 ret = request_irq(irq, pl330_irq_handler, 0,
840 dev_name(&adev->dev), pi);
841 if (ret)
842 goto probe_err2;
843
844 ret = pl330_add(pi);
845 if (ret)
846 goto probe_err3;
847
848 INIT_LIST_HEAD(&pdmac->desc_pool);
849 spin_lock_init(&pdmac->pool_lock);
850
851 /* Create a descriptor pool of default size */
852 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
853 dev_warn(&adev->dev, "unable to allocate desc\n");
854
855 pd = &pdmac->ddma;
856 INIT_LIST_HEAD(&pd->channels);
857
858 /* Initialize channel parameters */
Rob Herring4e0e6102011-07-25 16:05:04 -0500859 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
860 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700861
Rob Herring4e0e6102011-07-25 16:05:04 -0500862 for (i = 0; i < num_chan; i++) {
863 pch = &pdmac->peripherals[i];
864 if (pdat) {
865 struct dma_pl330_peri *peri = &pdat->peri[i];
866
867 switch (peri->rqtype) {
868 case MEMTOMEM:
869 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
870 break;
871 case MEMTODEV:
872 case DEVTOMEM:
873 dma_cap_set(DMA_SLAVE, pd->cap_mask);
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900874 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
Rob Herring4e0e6102011-07-25 16:05:04 -0500875 break;
876 default:
877 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
878 continue;
879 }
880 pch->chan.private = peri;
881 } else {
Jassi Brarb3040e42010-05-23 20:28:19 -0700882 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
Rob Herring4e0e6102011-07-25 16:05:04 -0500883 pch->chan.private = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -0700884 }
885
886 INIT_LIST_HEAD(&pch->work_list);
887 spin_lock_init(&pch->lock);
888 pch->pl330_chid = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -0700889 pch->chan.device = pd;
Jassi Brarb3040e42010-05-23 20:28:19 -0700890 pch->dmac = pdmac;
891
892 /* Add the channel to the DMAC list */
Jassi Brarb3040e42010-05-23 20:28:19 -0700893 list_add_tail(&pch->chan.device_node, &pd->channels);
894 }
895
896 pd->dev = &adev->dev;
897
898 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
899 pd->device_free_chan_resources = pl330_free_chan_resources;
900 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900901 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -0700902 pd->device_tx_status = pl330_tx_status;
903 pd->device_prep_slave_sg = pl330_prep_slave_sg;
904 pd->device_control = pl330_control;
905 pd->device_issue_pending = pl330_issue_pending;
906
907 ret = dma_async_device_register(pd);
908 if (ret) {
909 dev_err(&adev->dev, "unable to register DMAC\n");
910 goto probe_err4;
911 }
912
Jassi Brarb3040e42010-05-23 20:28:19 -0700913 dev_info(&adev->dev,
914 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
915 dev_info(&adev->dev,
916 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
917 pi->pcfg.data_buf_dep,
918 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
919 pi->pcfg.num_peri, pi->pcfg.num_events);
920
921 return 0;
922
923probe_err4:
924 pl330_del(pi);
925probe_err3:
926 free_irq(irq, pi);
927probe_err2:
928 iounmap(pi->base);
929probe_err1:
930 release_mem_region(res->start, resource_size(res));
931 kfree(pdmac);
932
933 return ret;
934}
935
936static int __devexit pl330_remove(struct amba_device *adev)
937{
938 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
939 struct dma_pl330_chan *pch, *_p;
940 struct pl330_info *pi;
941 struct resource *res;
942 int irq;
943
944 if (!pdmac)
945 return 0;
946
947 amba_set_drvdata(adev, NULL);
948
949 /* Idle the DMAC */
950 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
951 chan.device_node) {
952
953 /* Remove the channel */
954 list_del(&pch->chan.device_node);
955
956 /* Flush the channel */
957 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
958 pl330_free_chan_resources(&pch->chan);
959 }
960
961 pi = &pdmac->pif;
962
963 pl330_del(pi);
964
965 irq = adev->irq[0];
966 free_irq(irq, pi);
967
968 iounmap(pi->base);
969
970 res = &adev->res;
971 release_mem_region(res->start, resource_size(res));
972
Boojin Kima2f52032011-09-02 09:44:29 +0900973#ifdef CONFIG_PM_RUNTIME
974 pm_runtime_put(&adev->dev);
975 pm_runtime_disable(&adev->dev);
976#else
977 clk_disable(pdmac->clk);
978#endif
979
Jassi Brarb3040e42010-05-23 20:28:19 -0700980 kfree(pdmac);
981
982 return 0;
983}
984
985static struct amba_id pl330_ids[] = {
986 {
987 .id = 0x00041330,
988 .mask = 0x000fffff,
989 },
990 { 0, 0 },
991};
992
Boojin Kima2f52032011-09-02 09:44:29 +0900993#ifdef CONFIG_PM_RUNTIME
994static int pl330_runtime_suspend(struct device *dev)
995{
996 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
997
998 if (!pdmac) {
999 dev_err(dev, "failed to get dmac\n");
1000 return -ENODEV;
1001 }
1002
1003 clk_disable(pdmac->clk);
1004
1005 return 0;
1006}
1007
1008static int pl330_runtime_resume(struct device *dev)
1009{
1010 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1011
1012 if (!pdmac) {
1013 dev_err(dev, "failed to get dmac\n");
1014 return -ENODEV;
1015 }
1016
1017 clk_enable(pdmac->clk);
1018
1019 return 0;
1020}
1021#else
1022#define pl330_runtime_suspend NULL
1023#define pl330_runtime_resume NULL
1024#endif /* CONFIG_PM_RUNTIME */
1025
1026static const struct dev_pm_ops pl330_pm_ops = {
1027 .runtime_suspend = pl330_runtime_suspend,
1028 .runtime_resume = pl330_runtime_resume,
1029};
1030
Jassi Brarb3040e42010-05-23 20:28:19 -07001031static struct amba_driver pl330_driver = {
1032 .drv = {
1033 .owner = THIS_MODULE,
1034 .name = "dma-pl330",
Boojin Kima2f52032011-09-02 09:44:29 +09001035 .pm = &pl330_pm_ops,
Jassi Brarb3040e42010-05-23 20:28:19 -07001036 },
1037 .id_table = pl330_ids,
1038 .probe = pl330_probe,
1039 .remove = pl330_remove,
1040};
1041
1042static int __init pl330_init(void)
1043{
1044 return amba_driver_register(&pl330_driver);
1045}
1046module_init(pl330_init);
1047
1048static void __exit pl330_exit(void)
1049{
1050 amba_driver_unregister(&pl330_driver);
1051 return;
1052}
1053module_exit(pl330_exit);
1054
1055MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1056MODULE_DESCRIPTION("API Driver for PL330 DMAC");
1057MODULE_LICENSE("GPL");