blob: a91305a60aed9d67126153f481351cf22cb17aac [file] [log] [blame]
Jassi Brard800ede2010-05-18 11:59:06 +09001/* linux/arch/arm/plat-samsung/s3c-pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18
19#include <asm/hardware/pl330.h>
20
21#include <plat/s3c-pl330-pdata.h>
22
23/**
24 * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
25 * @busy_chan: Number of channels currently busy.
26 * @peri: List of IDs of peripherals this DMAC can work with.
27 * @node: To attach to the global list of DMACs.
28 * @pi: PL330 configuration info for the DMAC.
29 * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
30 */
31struct s3c_pl330_dmac {
32 unsigned busy_chan;
33 enum dma_ch *peri;
34 struct list_head node;
35 struct pl330_info *pi;
36 struct kmem_cache *kmcache;
37};
38
39/**
40 * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
41 * @token: Xfer ID provided by the client.
42 * @node: To attach to the list of xfers on a channel.
43 * @px: Xfer for PL330 core.
44 * @chan: Owner channel of this xfer.
45 */
46struct s3c_pl330_xfer {
47 void *token;
48 struct list_head node;
49 struct pl330_xfer px;
50 struct s3c_pl330_chan *chan;
51};
52
53/**
54 * struct s3c_pl330_chan - Logical channel to communicate with
55 * a Physical peripheral.
56 * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
57 * NULL if the channel is available to be acquired.
58 * @id: ID of the peripheral that this channel can communicate with.
59 * @options: Options specified by the client.
60 * @sdaddr: Address provided via s3c2410_dma_devconfig.
61 * @node: To attach to the global list of channels.
62 * @lrq: Pointer to the last submitted pl330_req to PL330 core.
63 * @xfer_list: To manage list of xfers enqueued.
64 * @req: Two requests to communicate with the PL330 engine.
65 * @callback_fn: Callback function to the client.
66 * @rqcfg: Channel configuration for the xfers.
67 * @xfer_head: Pointer to the xfer to be next excecuted.
68 * @dmac: Pointer to the DMAC that manages this channel, NULL if the
69 * channel is available to be acquired.
70 * @client: Client of this channel. NULL if the
71 * channel is available to be acquired.
72 */
73struct s3c_pl330_chan {
74 void *pl330_chan_id;
75 enum dma_ch id;
76 unsigned int options;
77 unsigned long sdaddr;
78 struct list_head node;
79 struct pl330_req *lrq;
80 struct list_head xfer_list;
81 struct pl330_req req[2];
82 s3c2410_dma_cbfn_t callback_fn;
83 struct pl330_reqcfg rqcfg;
84 struct s3c_pl330_xfer *xfer_head;
85 struct s3c_pl330_dmac *dmac;
86 struct s3c2410_dma_client *client;
87};
88
89/* All DMACs in the platform */
90static LIST_HEAD(dmac_list);
91
92/* All channels to peripherals in the platform */
93static LIST_HEAD(chan_list);
94
95/*
96 * Since we add resources(DMACs and Channels) to the global pool,
97 * we need to guard access to the resources using a global lock
98 */
99static DEFINE_SPINLOCK(res_lock);
100
101/* Returns the channel with ID 'id' in the chan_list */
102static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
103{
104 struct s3c_pl330_chan *ch;
105
106 list_for_each_entry(ch, &chan_list, node)
107 if (ch->id == id)
108 return ch;
109
110 return NULL;
111}
112
113/* Allocate a new channel with ID 'id' and add to chan_list */
114static void chan_add(const enum dma_ch id)
115{
116 struct s3c_pl330_chan *ch = id_to_chan(id);
117
118 /* Return if the channel already exists */
119 if (ch)
120 return;
121
122 ch = kmalloc(sizeof(*ch), GFP_KERNEL);
123 /* Return silently to work with other channels */
124 if (!ch)
125 return;
126
127 ch->id = id;
128 ch->dmac = NULL;
129
130 list_add_tail(&ch->node, &chan_list);
131}
132
133/* If the channel is not yet acquired by any client */
134static bool chan_free(struct s3c_pl330_chan *ch)
135{
136 if (!ch)
137 return false;
138
139 /* Channel points to some DMAC only when it's acquired */
140 return ch->dmac ? false : true;
141}
142
143/*
144 * Returns 0 is peripheral i/f is invalid or not present on the dmac.
145 * Index + 1, otherwise.
146 */
147static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
148{
149 enum dma_ch *id = dmac->peri;
150 int i;
151
152 /* Discount invalid markers */
153 if (ch_id == DMACH_MAX)
154 return 0;
155
156 for (i = 0; i < PL330_MAX_PERI; i++)
157 if (id[i] == ch_id)
158 return i + 1;
159
160 return 0;
161}
162
163/* If all channel threads of the DMAC are busy */
164static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
165{
166 struct pl330_info *pi = dmac->pi;
167
168 return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
169}
170
171/*
172 * Returns the number of free channels that
173 * can be handled by this dmac only.
174 */
175static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
176{
177 enum dma_ch *id = dmac->peri;
178 struct s3c_pl330_dmac *d;
179 struct s3c_pl330_chan *ch;
180 unsigned found, count = 0;
181 enum dma_ch p;
182 int i;
183
184 for (i = 0; i < PL330_MAX_PERI; i++) {
185 p = id[i];
186 ch = id_to_chan(p);
187
188 if (p == DMACH_MAX || !chan_free(ch))
189 continue;
190
191 found = 0;
192 list_for_each_entry(d, &dmac_list, node) {
193 if (d != dmac && iface_of_dmac(d, ch->id)) {
194 found = 1;
195 break;
196 }
197 }
198 if (!found)
199 count++;
200 }
201
202 return count;
203}
204
205/*
206 * Measure of suitability of 'dmac' handling 'ch'
207 *
208 * 0 indicates 'dmac' can not handle 'ch' either
209 * because it is not supported by the hardware or
210 * because all dmac channels are currently busy.
211 *
212 * >0 vlaue indicates 'dmac' has the capability.
213 * The bigger the value the more suitable the dmac.
214 */
215#define MAX_SUIT UINT_MAX
216#define MIN_SUIT 0
217
218static unsigned suitablility(struct s3c_pl330_dmac *dmac,
219 struct s3c_pl330_chan *ch)
220{
221 struct pl330_info *pi = dmac->pi;
222 enum dma_ch *id = dmac->peri;
223 struct s3c_pl330_dmac *d;
224 unsigned s;
225 int i;
226
227 s = MIN_SUIT;
228 /* If all the DMAC channel threads are busy */
229 if (dmac_busy(dmac))
230 return s;
231
232 for (i = 0; i < PL330_MAX_PERI; i++)
233 if (id[i] == ch->id)
234 break;
235
236 /* If the 'dmac' can't talk to 'ch' */
237 if (i == PL330_MAX_PERI)
238 return s;
239
240 s = MAX_SUIT;
241 list_for_each_entry(d, &dmac_list, node) {
242 /*
243 * If some other dmac can talk to this
244 * peri and has some channel free.
245 */
246 if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
247 s = 0;
248 break;
249 }
250 }
251 if (s)
252 return s;
253
254 s = 100;
255
256 /* Good if free chans are more, bad otherwise */
257 s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
258
259 return s;
260}
261
262/* More than one DMAC may have capability to transfer data with the
263 * peripheral. This function assigns most suitable DMAC to manage the
264 * channel and hence communicate with the peripheral.
265 */
266static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
267{
268 struct s3c_pl330_dmac *d, *dmac = NULL;
269 unsigned sn, sl = MIN_SUIT;
270
271 list_for_each_entry(d, &dmac_list, node) {
272 sn = suitablility(d, ch);
273
274 if (sn == MAX_SUIT)
275 return d;
276
277 if (sn > sl)
278 dmac = d;
279 }
280
281 return dmac;
282}
283
284/* Acquire the channel for peripheral 'id' */
285static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
286{
287 struct s3c_pl330_chan *ch = id_to_chan(id);
288 struct s3c_pl330_dmac *dmac;
289
290 /* If the channel doesn't exist or is already acquired */
291 if (!ch || !chan_free(ch)) {
292 ch = NULL;
293 goto acq_exit;
294 }
295
296 dmac = map_chan_to_dmac(ch);
297 /* If couldn't map */
298 if (!dmac) {
299 ch = NULL;
300 goto acq_exit;
301 }
302
303 dmac->busy_chan++;
304 ch->dmac = dmac;
305
306acq_exit:
307 return ch;
308}
309
310/* Delete xfer from the queue */
311static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
312{
313 struct s3c_pl330_xfer *t;
314 struct s3c_pl330_chan *ch;
315 int found;
316
317 if (!xfer)
318 return;
319
320 ch = xfer->chan;
321
322 /* Make sure xfer is in the queue */
323 found = 0;
324 list_for_each_entry(t, &ch->xfer_list, node)
325 if (t == xfer) {
326 found = 1;
327 break;
328 }
329
330 if (!found)
331 return;
332
333 /* If xfer is last entry in the queue */
334 if (xfer->node.next == &ch->xfer_list)
335 t = list_entry(ch->xfer_list.next,
336 struct s3c_pl330_xfer, node);
337 else
338 t = list_entry(xfer->node.next,
339 struct s3c_pl330_xfer, node);
340
341 /* If there was only one node left */
342 if (t == xfer)
343 ch->xfer_head = NULL;
344 else if (ch->xfer_head == xfer)
345 ch->xfer_head = t;
346
347 list_del(&xfer->node);
348}
349
350/* Provides pointer to the next xfer in the queue.
351 * If CIRCULAR option is set, the list is left intact,
352 * otherwise the xfer is removed from the list.
353 * Forced delete 'pluck' can be set to override the CIRCULAR option.
354 */
355static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
356 int pluck)
357{
358 struct s3c_pl330_xfer *xfer = ch->xfer_head;
359
360 if (!xfer)
361 return NULL;
362
363 /* If xfer is last entry in the queue */
364 if (xfer->node.next == &ch->xfer_list)
365 ch->xfer_head = list_entry(ch->xfer_list.next,
366 struct s3c_pl330_xfer, node);
367 else
368 ch->xfer_head = list_entry(xfer->node.next,
369 struct s3c_pl330_xfer, node);
370
371 if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
372 del_from_queue(xfer);
373
374 return xfer;
375}
376
377static inline void add_to_queue(struct s3c_pl330_chan *ch,
378 struct s3c_pl330_xfer *xfer, int front)
379{
380 struct pl330_xfer *xt;
381
382 /* If queue empty */
383 if (ch->xfer_head == NULL)
384 ch->xfer_head = xfer;
385
386 xt = &ch->xfer_head->px;
387 /* If the head already submitted (CIRCULAR head) */
388 if (ch->options & S3C2410_DMAF_CIRCULAR &&
389 (xt == ch->req[0].x || xt == ch->req[1].x))
390 ch->xfer_head = xfer;
391
392 /* If this is a resubmission, it should go at the head */
393 if (front) {
394 ch->xfer_head = xfer;
395 list_add(&xfer->node, &ch->xfer_list);
396 } else {
397 list_add_tail(&xfer->node, &ch->xfer_list);
398 }
399}
400
401static inline void _finish_off(struct s3c_pl330_xfer *xfer,
402 enum s3c2410_dma_buffresult res, int ffree)
403{
404 struct s3c_pl330_chan *ch;
405
406 if (!xfer)
407 return;
408
409 ch = xfer->chan;
410
411 /* Do callback */
412 if (ch->callback_fn)
413 ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
414
415 /* Force Free or if buffer is not needed anymore */
416 if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
417 kmem_cache_free(ch->dmac->kmcache, xfer);
418}
419
420static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
421 struct pl330_req *r)
422{
423 struct s3c_pl330_xfer *xfer;
424 int ret = 0;
425
426 /* If already submitted */
427 if (r->x)
428 return 0;
429
430 xfer = get_from_queue(ch, 0);
431 if (xfer) {
432 r->x = &xfer->px;
433
434 /* Use max bandwidth for M<->M xfers */
435 if (r->rqtype == MEMTOMEM) {
436 struct pl330_info *pi = xfer->chan->dmac->pi;
437 int burst = 1 << ch->rqcfg.brst_size;
438 u32 bytes = r->x->bytes;
439 int bl;
440
441 bl = pi->pcfg.data_bus_width / 8;
442 bl *= pi->pcfg.data_buf_dep;
443 bl /= burst;
444
445 /* src/dst_burst_len can't be more than 16 */
446 if (bl > 16)
447 bl = 16;
448
449 while (bl > 1) {
450 if (!(bytes % (bl * burst)))
451 break;
452 bl--;
453 }
454
455 ch->rqcfg.brst_len = bl;
456 } else {
457 ch->rqcfg.brst_len = 1;
458 }
459
460 ret = pl330_submit_req(ch->pl330_chan_id, r);
461
462 /* If submission was successful */
463 if (!ret) {
464 ch->lrq = r; /* latest submitted req */
465 return 0;
466 }
467
468 r->x = NULL;
469
470 /* If both of the PL330 ping-pong buffers filled */
471 if (ret == -EAGAIN) {
472 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
473 __func__, __LINE__);
474 /* Queue back again */
475 add_to_queue(ch, xfer, 1);
476 ret = 0;
477 } else {
478 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
479 __func__, __LINE__);
480 _finish_off(xfer, S3C2410_RES_ERR, 0);
481 }
482 }
483
484 return ret;
485}
486
487static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
488 struct pl330_req *r, enum pl330_op_err err)
489{
490 unsigned long flags;
491 struct s3c_pl330_xfer *xfer;
492 struct pl330_xfer *xl = r->x;
493 enum s3c2410_dma_buffresult res;
494
495 spin_lock_irqsave(&res_lock, flags);
496
497 r->x = NULL;
498
499 s3c_pl330_submit(ch, r);
500
501 spin_unlock_irqrestore(&res_lock, flags);
502
503 /* Map result to S3C DMA API */
504 if (err == PL330_ERR_NONE)
505 res = S3C2410_RES_OK;
506 else if (err == PL330_ERR_ABORT)
507 res = S3C2410_RES_ABORT;
508 else
509 res = S3C2410_RES_ERR;
510
511 /* If last request had some xfer */
512 if (xl) {
513 xfer = container_of(xl, struct s3c_pl330_xfer, px);
514 _finish_off(xfer, res, 0);
515 } else {
516 dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
517 __func__, __LINE__);
518 }
519}
520
521static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
522{
523 struct pl330_req *r = token;
524 struct s3c_pl330_chan *ch = container_of(r,
525 struct s3c_pl330_chan, req[0]);
526 s3c_pl330_rq(ch, r, err);
527}
528
529static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
530{
531 struct pl330_req *r = token;
532 struct s3c_pl330_chan *ch = container_of(r,
533 struct s3c_pl330_chan, req[1]);
534 s3c_pl330_rq(ch, r, err);
535}
536
537/* Release an acquired channel */
538static void chan_release(struct s3c_pl330_chan *ch)
539{
540 struct s3c_pl330_dmac *dmac;
541
542 if (chan_free(ch))
543 return;
544
545 dmac = ch->dmac;
546 ch->dmac = NULL;
547 dmac->busy_chan--;
548}
549
550int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
551{
552 struct s3c_pl330_xfer *xfer;
553 enum pl330_chan_op pl330op;
554 struct s3c_pl330_chan *ch;
555 unsigned long flags;
556 int idx, ret;
557
558 spin_lock_irqsave(&res_lock, flags);
559
560 ch = id_to_chan(id);
561
562 if (!ch || chan_free(ch)) {
563 ret = -EINVAL;
564 goto ctrl_exit;
565 }
566
567 switch (op) {
568 case S3C2410_DMAOP_START:
569 /* Make sure both reqs are enqueued */
570 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
571 s3c_pl330_submit(ch, &ch->req[idx]);
572 s3c_pl330_submit(ch, &ch->req[1 - idx]);
573 pl330op = PL330_OP_START;
574 break;
575
576 case S3C2410_DMAOP_STOP:
577 pl330op = PL330_OP_ABORT;
578 break;
579
580 case S3C2410_DMAOP_FLUSH:
581 pl330op = PL330_OP_FLUSH;
582 break;
583
584 case S3C2410_DMAOP_PAUSE:
585 case S3C2410_DMAOP_RESUME:
586 case S3C2410_DMAOP_TIMEOUT:
587 case S3C2410_DMAOP_STARTED:
588 spin_unlock_irqrestore(&res_lock, flags);
589 return 0;
590
591 default:
592 spin_unlock_irqrestore(&res_lock, flags);
593 return -EINVAL;
594 }
595
596 ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
597
598 if (pl330op == PL330_OP_START) {
599 spin_unlock_irqrestore(&res_lock, flags);
600 return ret;
601 }
602
603 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
604
605 /* Abort the current xfer */
606 if (ch->req[idx].x) {
607 xfer = container_of(ch->req[idx].x,
608 struct s3c_pl330_xfer, px);
609
610 /* Drop xfer during FLUSH */
611 if (pl330op == PL330_OP_FLUSH)
612 del_from_queue(xfer);
613
614 ch->req[idx].x = NULL;
615
616 spin_unlock_irqrestore(&res_lock, flags);
617 _finish_off(xfer, S3C2410_RES_ABORT,
618 pl330op == PL330_OP_FLUSH ? 1 : 0);
619 spin_lock_irqsave(&res_lock, flags);
620 }
621
622 /* Flush the whole queue */
623 if (pl330op == PL330_OP_FLUSH) {
624
625 if (ch->req[1 - idx].x) {
626 xfer = container_of(ch->req[1 - idx].x,
627 struct s3c_pl330_xfer, px);
628
629 del_from_queue(xfer);
630
631 ch->req[1 - idx].x = NULL;
632
633 spin_unlock_irqrestore(&res_lock, flags);
634 _finish_off(xfer, S3C2410_RES_ABORT, 1);
635 spin_lock_irqsave(&res_lock, flags);
636 }
637
638 /* Finish off the remaining in the queue */
639 xfer = ch->xfer_head;
640 while (xfer) {
641
642 del_from_queue(xfer);
643
644 spin_unlock_irqrestore(&res_lock, flags);
645 _finish_off(xfer, S3C2410_RES_ABORT, 1);
646 spin_lock_irqsave(&res_lock, flags);
647
648 xfer = ch->xfer_head;
649 }
650 }
651
652ctrl_exit:
653 spin_unlock_irqrestore(&res_lock, flags);
654
655 return ret;
656}
657EXPORT_SYMBOL(s3c2410_dma_ctrl);
658
659int s3c2410_dma_enqueue(enum dma_ch id, void *token,
660 dma_addr_t addr, int size)
661{
662 struct s3c_pl330_chan *ch;
663 struct s3c_pl330_xfer *xfer;
664 unsigned long flags;
665 int idx, ret = 0;
666
667 spin_lock_irqsave(&res_lock, flags);
668
669 ch = id_to_chan(id);
670
671 /* Error if invalid or free channel */
672 if (!ch || chan_free(ch)) {
673 ret = -EINVAL;
674 goto enq_exit;
675 }
676
677 /* Error if size is unaligned */
678 if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
679 ret = -EINVAL;
680 goto enq_exit;
681 }
682
683 xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
684 if (!xfer) {
685 ret = -ENOMEM;
686 goto enq_exit;
687 }
688
689 xfer->token = token;
690 xfer->chan = ch;
691 xfer->px.bytes = size;
692 xfer->px.next = NULL; /* Single request */
693
694 /* For S3C DMA API, direction is always fixed for all xfers */
695 if (ch->req[0].rqtype == MEMTODEV) {
696 xfer->px.src_addr = addr;
697 xfer->px.dst_addr = ch->sdaddr;
698 } else {
699 xfer->px.src_addr = ch->sdaddr;
700 xfer->px.dst_addr = addr;
701 }
702
703 add_to_queue(ch, xfer, 0);
704
705 /* Try submitting on either request */
706 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
707
708 if (!ch->req[idx].x)
709 s3c_pl330_submit(ch, &ch->req[idx]);
710 else
711 s3c_pl330_submit(ch, &ch->req[1 - idx]);
712
713 spin_unlock_irqrestore(&res_lock, flags);
714
715 if (ch->options & S3C2410_DMAF_AUTOSTART)
716 s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
717
718 return 0;
719
720enq_exit:
721 spin_unlock_irqrestore(&res_lock, flags);
722
723 return ret;
724}
725EXPORT_SYMBOL(s3c2410_dma_enqueue);
726
727int s3c2410_dma_request(enum dma_ch id,
728 struct s3c2410_dma_client *client,
729 void *dev)
730{
731 struct s3c_pl330_dmac *dmac;
732 struct s3c_pl330_chan *ch;
733 unsigned long flags;
734 int ret = 0;
735
736 spin_lock_irqsave(&res_lock, flags);
737
738 ch = chan_acquire(id);
739 if (!ch) {
740 ret = -EBUSY;
741 goto req_exit;
742 }
743
744 dmac = ch->dmac;
745
746 ch->pl330_chan_id = pl330_request_channel(dmac->pi);
747 if (!ch->pl330_chan_id) {
748 chan_release(ch);
749 ret = -EBUSY;
750 goto req_exit;
751 }
752
753 ch->client = client;
754 ch->options = 0; /* Clear any option */
755 ch->callback_fn = NULL; /* Clear any callback */
756 ch->lrq = NULL;
757
758 ch->rqcfg.brst_size = 2; /* Default word size */
759 ch->rqcfg.swap = SWAP_NO;
760 ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
761 ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
762 ch->rqcfg.privileged = 0;
763 ch->rqcfg.insnaccess = 0;
764
765 /* Set invalid direction */
766 ch->req[0].rqtype = DEVTODEV;
767 ch->req[1].rqtype = ch->req[0].rqtype;
768
769 ch->req[0].cfg = &ch->rqcfg;
770 ch->req[1].cfg = ch->req[0].cfg;
771
772 ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
773 ch->req[1].peri = ch->req[0].peri;
774
775 ch->req[0].token = &ch->req[0];
776 ch->req[0].xfer_cb = s3c_pl330_rq0;
777 ch->req[1].token = &ch->req[1];
778 ch->req[1].xfer_cb = s3c_pl330_rq1;
779
780 ch->req[0].x = NULL;
781 ch->req[1].x = NULL;
782
783 /* Reset xfer list */
784 INIT_LIST_HEAD(&ch->xfer_list);
785 ch->xfer_head = NULL;
786
787req_exit:
788 spin_unlock_irqrestore(&res_lock, flags);
789
790 return ret;
791}
792EXPORT_SYMBOL(s3c2410_dma_request);
793
794int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
795{
796 struct s3c_pl330_chan *ch;
797 struct s3c_pl330_xfer *xfer;
798 unsigned long flags;
799 int ret = 0;
800 unsigned idx;
801
802 spin_lock_irqsave(&res_lock, flags);
803
804 ch = id_to_chan(id);
805
806 if (!ch || chan_free(ch))
807 goto free_exit;
808
809 /* Refuse if someone else wanted to free the channel */
810 if (ch->client != client) {
811 ret = -EBUSY;
812 goto free_exit;
813 }
814
815 /* Stop any active xfer, Flushe the queue and do callbacks */
816 pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
817
818 /* Abort the submitted requests */
819 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
820
821 if (ch->req[idx].x) {
822 xfer = container_of(ch->req[idx].x,
823 struct s3c_pl330_xfer, px);
824
825 ch->req[idx].x = NULL;
826 del_from_queue(xfer);
827
828 spin_unlock_irqrestore(&res_lock, flags);
829 _finish_off(xfer, S3C2410_RES_ABORT, 1);
830 spin_lock_irqsave(&res_lock, flags);
831 }
832
833 if (ch->req[1 - idx].x) {
834 xfer = container_of(ch->req[1 - idx].x,
835 struct s3c_pl330_xfer, px);
836
837 ch->req[1 - idx].x = NULL;
838 del_from_queue(xfer);
839
840 spin_unlock_irqrestore(&res_lock, flags);
841 _finish_off(xfer, S3C2410_RES_ABORT, 1);
842 spin_lock_irqsave(&res_lock, flags);
843 }
844
845 /* Pluck and Abort the queued requests in order */
846 do {
847 xfer = get_from_queue(ch, 1);
848
849 spin_unlock_irqrestore(&res_lock, flags);
850 _finish_off(xfer, S3C2410_RES_ABORT, 1);
851 spin_lock_irqsave(&res_lock, flags);
852 } while (xfer);
853
854 ch->client = NULL;
855
856 pl330_release_channel(ch->pl330_chan_id);
857
858 ch->pl330_chan_id = NULL;
859
860 chan_release(ch);
861
862free_exit:
863 spin_unlock_irqrestore(&res_lock, flags);
864
865 return ret;
866}
867EXPORT_SYMBOL(s3c2410_dma_free);
868
869int s3c2410_dma_config(enum dma_ch id, int xferunit)
870{
871 struct s3c_pl330_chan *ch;
872 struct pl330_info *pi;
873 unsigned long flags;
874 int i, dbwidth, ret = 0;
875
876 spin_lock_irqsave(&res_lock, flags);
877
878 ch = id_to_chan(id);
879
880 if (!ch || chan_free(ch)) {
881 ret = -EINVAL;
882 goto cfg_exit;
883 }
884
885 pi = ch->dmac->pi;
886 dbwidth = pi->pcfg.data_bus_width / 8;
887
888 /* Max size of xfer can be pcfg.data_bus_width */
889 if (xferunit > dbwidth) {
890 ret = -EINVAL;
891 goto cfg_exit;
892 }
893
894 i = 0;
895 while (xferunit != (1 << i))
896 i++;
897
898 /* If valid value */
899 if (xferunit == (1 << i))
900 ch->rqcfg.brst_size = i;
901 else
902 ret = -EINVAL;
903
904cfg_exit:
905 spin_unlock_irqrestore(&res_lock, flags);
906
907 return ret;
908}
909EXPORT_SYMBOL(s3c2410_dma_config);
910
911/* Options that are supported by this driver */
912#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
913
914int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
915{
916 struct s3c_pl330_chan *ch;
917 unsigned long flags;
918 int ret = 0;
919
920 spin_lock_irqsave(&res_lock, flags);
921
922 ch = id_to_chan(id);
923
924 if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
925 ret = -EINVAL;
926 else
927 ch->options = options;
928
929 spin_unlock_irqrestore(&res_lock, flags);
930
931 return 0;
932}
933EXPORT_SYMBOL(s3c2410_dma_setflags);
934
935int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
936{
937 struct s3c_pl330_chan *ch;
938 unsigned long flags;
939 int ret = 0;
940
941 spin_lock_irqsave(&res_lock, flags);
942
943 ch = id_to_chan(id);
944
945 if (!ch || chan_free(ch))
946 ret = -EINVAL;
947 else
948 ch->callback_fn = rtn;
949
950 spin_unlock_irqrestore(&res_lock, flags);
951
952 return ret;
953}
954EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
955
956int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
957 unsigned long address)
958{
959 struct s3c_pl330_chan *ch;
960 unsigned long flags;
961 int ret = 0;
962
963 spin_lock_irqsave(&res_lock, flags);
964
965 ch = id_to_chan(id);
966
967 if (!ch || chan_free(ch)) {
968 ret = -EINVAL;
969 goto devcfg_exit;
970 }
971
972 switch (source) {
973 case S3C2410_DMASRC_HW: /* P->M */
974 ch->req[0].rqtype = DEVTOMEM;
975 ch->req[1].rqtype = DEVTOMEM;
976 ch->rqcfg.src_inc = 0;
977 ch->rqcfg.dst_inc = 1;
978 break;
979 case S3C2410_DMASRC_MEM: /* M->P */
980 ch->req[0].rqtype = MEMTODEV;
981 ch->req[1].rqtype = MEMTODEV;
982 ch->rqcfg.src_inc = 1;
983 ch->rqcfg.dst_inc = 0;
984 break;
985 default:
986 ret = -EINVAL;
987 goto devcfg_exit;
988 }
989
990 ch->sdaddr = address;
991
992devcfg_exit:
993 spin_unlock_irqrestore(&res_lock, flags);
994
995 return ret;
996}
997EXPORT_SYMBOL(s3c2410_dma_devconfig);
998
999int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
1000{
1001 struct s3c_pl330_chan *ch = id_to_chan(id);
1002 struct pl330_chanstatus status;
1003 int ret;
1004
1005 if (!ch || chan_free(ch))
1006 return -EINVAL;
1007
1008 ret = pl330_chan_status(ch->pl330_chan_id, &status);
1009 if (ret < 0)
1010 return ret;
1011
1012 *src = status.src_addr;
1013 *dst = status.dst_addr;
1014
1015 return 0;
1016}
1017EXPORT_SYMBOL(s3c2410_dma_getposition);
1018
1019static irqreturn_t pl330_irq_handler(int irq, void *data)
1020{
1021 if (pl330_update(data))
1022 return IRQ_HANDLED;
1023 else
1024 return IRQ_NONE;
1025}
1026
1027static int pl330_probe(struct platform_device *pdev)
1028{
1029 struct s3c_pl330_dmac *s3c_pl330_dmac;
1030 struct s3c_pl330_platdata *pl330pd;
1031 struct pl330_info *pl330_info;
1032 struct resource *res;
1033 int i, ret, irq;
1034
1035 pl330pd = pdev->dev.platform_data;
1036
1037 /* Can't do without the list of _32_ peripherals */
1038 if (!pl330pd || !pl330pd->peri) {
1039 dev_err(&pdev->dev, "platform data missing!\n");
1040 return -ENODEV;
1041 }
1042
1043 pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
1044 if (!pl330_info)
1045 return -ENOMEM;
1046
1047 pl330_info->pl330_data = NULL;
1048 pl330_info->dev = &pdev->dev;
1049
1050 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1051 if (!res) {
1052 ret = -ENODEV;
1053 goto probe_err1;
1054 }
1055
1056 request_mem_region(res->start, resource_size(res), pdev->name);
1057
1058 pl330_info->base = ioremap(res->start, resource_size(res));
1059 if (!pl330_info->base) {
1060 ret = -ENXIO;
1061 goto probe_err2;
1062 }
1063
1064 irq = platform_get_irq(pdev, 0);
1065 if (irq < 0) {
1066 ret = irq;
1067 goto probe_err3;
1068 }
1069
1070 ret = request_irq(irq, pl330_irq_handler, 0,
1071 dev_name(&pdev->dev), pl330_info);
1072 if (ret)
1073 goto probe_err4;
1074
1075 ret = pl330_add(pl330_info);
1076 if (ret)
1077 goto probe_err5;
1078
1079 /* Allocate a new DMAC */
1080 s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
1081 if (!s3c_pl330_dmac) {
1082 ret = -ENOMEM;
1083 goto probe_err6;
1084 }
1085
1086 /* Hook the info */
1087 s3c_pl330_dmac->pi = pl330_info;
1088
1089 /* No busy channels */
1090 s3c_pl330_dmac->busy_chan = 0;
1091
1092 s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
1093 sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
1094
1095 if (!s3c_pl330_dmac->kmcache) {
1096 ret = -ENOMEM;
1097 goto probe_err7;
1098 }
1099
1100 /* Get the list of peripherals */
1101 s3c_pl330_dmac->peri = pl330pd->peri;
1102
1103 /* Attach to the list of DMACs */
1104 list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
1105
1106 /* Create a channel for each peripheral in the DMAC
1107 * that is, if it doesn't already exist
1108 */
1109 for (i = 0; i < PL330_MAX_PERI; i++)
1110 if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
1111 chan_add(s3c_pl330_dmac->peri[i]);
1112
1113 printk(KERN_INFO
1114 "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
1115 printk(KERN_INFO
1116 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1117 pl330_info->pcfg.data_buf_dep,
1118 pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
1119 pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
1120
1121 return 0;
1122
1123probe_err7:
1124 kfree(s3c_pl330_dmac);
1125probe_err6:
1126 pl330_del(pl330_info);
1127probe_err5:
1128 free_irq(irq, pl330_info);
1129probe_err4:
1130probe_err3:
1131 iounmap(pl330_info->base);
1132probe_err2:
1133 release_mem_region(res->start, resource_size(res));
1134probe_err1:
1135 kfree(pl330_info);
1136
1137 return ret;
1138}
1139
1140static int pl330_remove(struct platform_device *pdev)
1141{
1142 struct s3c_pl330_dmac *dmac, *d;
1143 struct s3c_pl330_chan *ch;
1144 unsigned long flags;
1145 int del, found;
1146
1147 if (!pdev->dev.platform_data)
1148 return -EINVAL;
1149
1150 spin_lock_irqsave(&res_lock, flags);
1151
1152 found = 0;
1153 list_for_each_entry(d, &dmac_list, node)
1154 if (d->pi->dev == &pdev->dev) {
1155 found = 1;
1156 break;
1157 }
1158
1159 if (!found) {
1160 spin_unlock_irqrestore(&res_lock, flags);
1161 return 0;
1162 }
1163
1164 dmac = d;
1165
1166 /* Remove all Channels that are managed only by this DMAC */
1167 list_for_each_entry(ch, &chan_list, node) {
1168
1169 /* Only channels that are handled by this DMAC */
1170 if (iface_of_dmac(dmac, ch->id))
1171 del = 1;
1172 else
1173 continue;
1174
1175 /* Don't remove if some other DMAC has it too */
1176 list_for_each_entry(d, &dmac_list, node)
1177 if (d != dmac && iface_of_dmac(d, ch->id)) {
1178 del = 0;
1179 break;
1180 }
1181
1182 if (del) {
1183 spin_unlock_irqrestore(&res_lock, flags);
1184 s3c2410_dma_free(ch->id, ch->client);
1185 spin_lock_irqsave(&res_lock, flags);
1186 list_del(&ch->node);
1187 kfree(ch);
1188 }
1189 }
1190
1191 /* Remove the DMAC */
1192 list_del(&dmac->node);
1193 kfree(dmac);
1194
1195 spin_unlock_irqrestore(&res_lock, flags);
1196
1197 return 0;
1198}
1199
1200static struct platform_driver pl330_driver = {
1201 .driver = {
1202 .owner = THIS_MODULE,
1203 .name = "s3c-pl330",
1204 },
1205 .probe = pl330_probe,
1206 .remove = pl330_remove,
1207};
1208
1209static int __init pl330_init(void)
1210{
1211 return platform_driver_register(&pl330_driver);
1212}
1213module_init(pl330_init);
1214
1215static void __exit pl330_exit(void)
1216{
1217 platform_driver_unregister(&pl330_driver);
1218 return;
1219}
1220module_exit(pl330_exit);
1221
1222MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1223MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1224MODULE_LICENSE("GPL");