blob: ec29b35f25c02a02c45c4bd4d7633d2fd125a057 [file] [log] [blame]
Ben Dooksfa7a7882009-03-10 23:57:26 +00001/* linux/arch/arm/plat-s3c64xx/dma.c
2 *
3 * Copyright 2009 Openmoko, Inc.
4 * Copyright 2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * S3C64XX DMA core
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/dmapool.h>
Kay Sievers4a858cf2011-12-21 16:01:38 -080019#include <linux/device.h>
Ben Dooksfa7a7882009-03-10 23:57:26 +000020#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Ben Dooksfa7a7882009-03-10 23:57:26 +000022#include <linux/delay.h>
23#include <linux/clk.h>
24#include <linux/err.h>
25#include <linux/io.h>
26
27#include <mach/dma.h>
28#include <mach/map.h>
29#include <mach/irqs.h>
30
Ben Dooksfa7a7882009-03-10 23:57:26 +000031#include <asm/hardware/pl080.h>
32
Kukjin Kimf2bfd172013-01-02 13:31:15 -080033#include "regs-sys.h"
34
Ben Dooksfa7a7882009-03-10 23:57:26 +000035/* dma channel state information */
36
37struct s3c64xx_dmac {
Kay Sievers4a858cf2011-12-21 16:01:38 -080038 struct device dev;
Ben Dooksfa7a7882009-03-10 23:57:26 +000039 struct clk *clk;
40 void __iomem *regs;
41 struct s3c2410_dma_chan *channels;
42 enum dma_ch chanbase;
43};
44
45/* pool to provide LLI buffers */
46static struct dma_pool *dma_pool;
47
48/* Debug configuration and code */
49
50static unsigned char debug_show_buffs = 0;
51
52static void dbg_showchan(struct s3c2410_dma_chan *chan)
53{
54 pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
55 chan->number,
56 readl(chan->regs + PL080_CH_SRC_ADDR),
57 readl(chan->regs + PL080_CH_DST_ADDR),
58 readl(chan->regs + PL080_CH_LLI),
59 readl(chan->regs + PL080_CH_CONTROL),
60 readl(chan->regs + PL080S_CH_CONTROL2),
61 readl(chan->regs + PL080S_CH_CONFIG));
62}
63
64static void show_lli(struct pl080s_lli *lli)
65{
66 pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
67 lli, lli->src_addr, lli->dst_addr, lli->next_lli,
68 lli->control0, lli->control1);
69}
70
71static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
72{
73 struct s3c64xx_dma_buff *ptr;
74 struct s3c64xx_dma_buff *end;
75
76 pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
77 chan->number, chan->next, chan->curr, chan->end);
78
79 ptr = chan->next;
80 end = chan->end;
81
82 if (debug_show_buffs) {
83 for (; ptr != NULL; ptr = ptr->next) {
84 pr_debug("DMA%d: %08x ",
85 chan->number, ptr->lli_dma);
86 show_lli(ptr->lli);
87 }
88 }
89}
90
91/* End of Debug */
92
93static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
94{
95 struct s3c2410_dma_chan *chan;
96 unsigned int start, offs;
97
98 start = 0;
99
100 if (channel >= DMACH_PCM1_TX)
101 start = 8;
102
103 for (offs = 0; offs < 8; offs++) {
104 chan = &s3c2410_chans[start + offs];
105 if (!chan->in_use)
106 goto found;
107 }
108
109 return NULL;
110
111found:
112 s3c_dma_chan_map[channel] = chan;
113 return chan;
114}
115
Sangwook Leed670ac02011-07-16 15:50:19 +0900116int s3c2410_dma_config(enum dma_ch channel, int xferunit)
Ben Dooksfa7a7882009-03-10 23:57:26 +0000117{
118 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
119
120 if (chan == NULL)
121 return -EINVAL;
122
123 switch (xferunit) {
124 case 1:
125 chan->hw_width = 0;
126 break;
127 case 2:
128 chan->hw_width = 1;
129 break;
130 case 4:
131 chan->hw_width = 2;
132 break;
133 default:
134 printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
135 return -EINVAL;
136 }
137
138 return 0;
139}
140EXPORT_SYMBOL(s3c2410_dma_config);
141
142static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
143 struct pl080s_lli *lli,
144 dma_addr_t data, int size)
145{
146 dma_addr_t src, dst;
147 u32 control0, control1;
148
149 switch (chan->source) {
Boojin Kim51ddf312011-09-02 09:44:44 +0900150 case DMA_FROM_DEVICE:
Ben Dooksfa7a7882009-03-10 23:57:26 +0000151 src = chan->dev_addr;
152 dst = data;
153 control0 = PL080_CONTROL_SRC_AHB2;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000154 control0 |= PL080_CONTROL_DST_INCR;
155 break;
156
Boojin Kim51ddf312011-09-02 09:44:44 +0900157 case DMA_TO_DEVICE:
Ben Dooksfa7a7882009-03-10 23:57:26 +0000158 src = data;
159 dst = chan->dev_addr;
160 control0 = PL080_CONTROL_DST_AHB2;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000161 control0 |= PL080_CONTROL_SRC_INCR;
162 break;
163 default:
164 BUG();
165 }
166
167 /* note, we do not currently setup any of the burst controls */
168
169 control1 = size >> chan->hw_width; /* size in no of xfers */
170 control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
171 control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
Jassi Brar7507f392009-11-05 13:44:45 +0900172 control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
173 control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000174
175 lli->src_addr = src;
176 lli->dst_addr = dst;
177 lli->next_lli = 0;
178 lli->control0 = control0;
179 lli->control1 = control1;
180}
181
182static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
183 struct pl080s_lli *lli)
184{
185 void __iomem *regs = chan->regs;
186
187 pr_debug("%s: LLI %p => regs\n", __func__, lli);
188 show_lli(lli);
189
190 writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
191 writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
192 writel(lli->next_lli, regs + PL080_CH_LLI);
193 writel(lli->control0, regs + PL080_CH_CONTROL);
194 writel(lli->control1, regs + PL080S_CH_CONTROL2);
195}
196
197static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
198{
199 struct s3c64xx_dmac *dmac = chan->dmac;
200 u32 config;
201 u32 bit = chan->bit;
202
203 dbg_showchan(chan);
204
205 pr_debug("%s: clearing interrupts\n", __func__);
206
207 /* clear interrupts */
208 writel(bit, dmac->regs + PL080_TC_CLEAR);
209 writel(bit, dmac->regs + PL080_ERR_CLEAR);
210
211 pr_debug("%s: starting channel\n", __func__);
212
213 config = readl(chan->regs + PL080S_CH_CONFIG);
214 config |= PL080_CONFIG_ENABLE;
Jassi Brard03e1192011-01-03 19:36:25 +0900215 config &= ~PL080_CONFIG_HALT;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000216
217 pr_debug("%s: writing config %08x\n", __func__, config);
218 writel(config, chan->regs + PL080S_CH_CONFIG);
219
220 return 0;
221}
222
223static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
224{
225 u32 config;
226 int timeout;
227
228 pr_debug("%s: stopping channel\n", __func__);
229
230 dbg_showchan(chan);
231
232 config = readl(chan->regs + PL080S_CH_CONFIG);
233 config |= PL080_CONFIG_HALT;
234 writel(config, chan->regs + PL080S_CH_CONFIG);
235
236 timeout = 1000;
237 do {
238 config = readl(chan->regs + PL080S_CH_CONFIG);
239 pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
240 if (config & PL080_CONFIG_ACTIVE)
241 udelay(10);
242 else
243 break;
244 } while (--timeout > 0);
245
246 if (config & PL080_CONFIG_ACTIVE) {
247 printk(KERN_ERR "%s: channel still active\n", __func__);
248 return -EFAULT;
249 }
250
251 config = readl(chan->regs + PL080S_CH_CONFIG);
252 config &= ~PL080_CONFIG_ENABLE;
253 writel(config, chan->regs + PL080S_CH_CONFIG);
254
255 return 0;
256}
257
258static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
259 struct s3c64xx_dma_buff *buf,
260 enum s3c2410_dma_buffresult result)
261{
262 if (chan->callback_fn != NULL)
263 (chan->callback_fn)(chan, buf->pw, 0, result);
264}
265
266static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
267{
268 dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
269 kfree(buff);
270}
271
272static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
273{
274 struct s3c64xx_dma_buff *buff, *next;
275 u32 config;
276
277 dbg_showchan(chan);
278
279 pr_debug("%s: flushing channel\n", __func__);
280
281 config = readl(chan->regs + PL080S_CH_CONFIG);
282 config &= ~PL080_CONFIG_ENABLE;
283 writel(config, chan->regs + PL080S_CH_CONFIG);
284
285 /* dump all the buffers associated with this channel */
286
287 for (buff = chan->curr; buff != NULL; buff = next) {
288 next = buff->next;
289 pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
290
291 s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
292 s3c64xx_dma_freebuff(buff);
293 }
294
295 chan->curr = chan->next = chan->end = NULL;
296
297 return 0;
298}
299
Sangwook Leed670ac02011-07-16 15:50:19 +0900300int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
Ben Dooksfa7a7882009-03-10 23:57:26 +0000301{
302 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
303
304 WARN_ON(!chan);
305 if (!chan)
306 return -EINVAL;
307
308 switch (op) {
309 case S3C2410_DMAOP_START:
310 return s3c64xx_dma_start(chan);
311
312 case S3C2410_DMAOP_STOP:
313 return s3c64xx_dma_stop(chan);
314
315 case S3C2410_DMAOP_FLUSH:
316 return s3c64xx_dma_flush(chan);
317
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300318 /* believe PAUSE/RESUME are no-ops */
Ben Dooksfa7a7882009-03-10 23:57:26 +0000319 case S3C2410_DMAOP_PAUSE:
320 case S3C2410_DMAOP_RESUME:
321 case S3C2410_DMAOP_STARTED:
322 case S3C2410_DMAOP_TIMEOUT:
323 return 0;
324 }
325
326 return -ENOENT;
327}
328EXPORT_SYMBOL(s3c2410_dma_ctrl);
329
330/* s3c2410_dma_enque
331 *
332 */
333
Sangwook Leed670ac02011-07-16 15:50:19 +0900334int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
Ben Dooksfa7a7882009-03-10 23:57:26 +0000335 dma_addr_t data, int size)
336{
337 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
338 struct s3c64xx_dma_buff *next;
339 struct s3c64xx_dma_buff *buff;
340 struct pl080s_lli *lli;
Jassi Brar210012a2009-11-05 13:44:20 +0900341 unsigned long flags;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000342 int ret;
343
344 WARN_ON(!chan);
345 if (!chan)
346 return -EINVAL;
347
Jassib93011e2009-09-15 19:01:20 +0900348 buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000349 if (!buff) {
350 printk(KERN_ERR "%s: no memory for buffer\n", __func__);
351 return -ENOMEM;
352 }
353
Jassib93011e2009-09-15 19:01:20 +0900354 lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000355 if (!lli) {
356 printk(KERN_ERR "%s: no memory for lli\n", __func__);
357 ret = -ENOMEM;
358 goto err_buff;
359 }
360
361 pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
362 __func__, buff, data, lli, (u32)buff->lli_dma, size);
363
364 buff->lli = lli;
365 buff->pw = id;
366
367 s3c64xx_dma_fill_lli(chan, lli, data, size);
368
Jassi Brar210012a2009-11-05 13:44:20 +0900369 local_irq_save(flags);
370
Ben Dooksfa7a7882009-03-10 23:57:26 +0000371 if ((next = chan->next) != NULL) {
372 struct s3c64xx_dma_buff *end = chan->end;
373 struct pl080s_lli *endlli = end->lli;
374
375 pr_debug("enquing onto channel\n");
376
377 end->next = buff;
378 endlli->next_lli = buff->lli_dma;
379
380 if (chan->flags & S3C2410_DMAF_CIRCULAR) {
381 struct s3c64xx_dma_buff *curr = chan->curr;
382 lli->next_lli = curr->lli_dma;
383 }
384
385 if (next == chan->curr) {
386 writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
387 chan->next = buff;
388 }
389
390 show_lli(endlli);
391 chan->end = buff;
392 } else {
393 pr_debug("enquing onto empty channel\n");
394
395 chan->curr = buff;
396 chan->next = buff;
397 chan->end = buff;
398
399 s3c64xx_lli_to_regs(chan, lli);
400 }
401
Jassi Brar210012a2009-11-05 13:44:20 +0900402 local_irq_restore(flags);
403
Ben Dooksfa7a7882009-03-10 23:57:26 +0000404 show_lli(lli);
405
406 dbg_showchan(chan);
407 dbg_showbuffs(chan);
408 return 0;
409
410err_buff:
411 kfree(buff);
412 return ret;
413}
414
415EXPORT_SYMBOL(s3c2410_dma_enqueue);
416
417
Sangwook Leed670ac02011-07-16 15:50:19 +0900418int s3c2410_dma_devconfig(enum dma_ch channel,
Boojin Kim51ddf312011-09-02 09:44:44 +0900419 enum dma_data_direction source,
Ben Dooksfa7a7882009-03-10 23:57:26 +0000420 unsigned long devaddr)
421{
422 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
423 u32 peripheral;
424 u32 config = 0;
425
Mark Brown0b134062009-04-30 14:58:40 +0100426 pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
Ben Dooksfa7a7882009-03-10 23:57:26 +0000427 __func__, channel, source, devaddr, chan);
428
429 WARN_ON(!chan);
430 if (!chan)
431 return -EINVAL;
432
433 peripheral = (chan->peripheral & 0xf);
434 chan->source = source;
435 chan->dev_addr = devaddr;
436
437 pr_debug("%s: peripheral %d\n", __func__, peripheral);
438
439 switch (source) {
Boojin Kim51ddf312011-09-02 09:44:44 +0900440 case DMA_FROM_DEVICE:
Ben Dooksfa7a7882009-03-10 23:57:26 +0000441 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
442 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
443 break;
Boojin Kim51ddf312011-09-02 09:44:44 +0900444 case DMA_TO_DEVICE:
Ben Dooksfa7a7882009-03-10 23:57:26 +0000445 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
446 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
447 break;
448 default:
449 printk(KERN_ERR "%s: bad source\n", __func__);
450 return -EINVAL;
451 }
452
453 /* allow TC and ERR interrupts */
454 config |= PL080_CONFIG_TC_IRQ_MASK;
455 config |= PL080_CONFIG_ERR_IRQ_MASK;
456
457 pr_debug("%s: config %08x\n", __func__, config);
458
459 writel(config, chan->regs + PL080S_CH_CONFIG);
460
461 return 0;
462}
463EXPORT_SYMBOL(s3c2410_dma_devconfig);
464
465
Sangwook Leed670ac02011-07-16 15:50:19 +0900466int s3c2410_dma_getposition(enum dma_ch channel,
Ben Dooksfa7a7882009-03-10 23:57:26 +0000467 dma_addr_t *src, dma_addr_t *dst)
468{
469 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
470
471 WARN_ON(!chan);
472 if (!chan)
473 return -EINVAL;
474
475 if (src != NULL)
476 *src = readl(chan->regs + PL080_CH_SRC_ADDR);
477
478 if (dst != NULL)
479 *dst = readl(chan->regs + PL080_CH_DST_ADDR);
480
481 return 0;
482}
483EXPORT_SYMBOL(s3c2410_dma_getposition);
484
485/* s3c2410_request_dma
486 *
487 * get control of an dma channel
488*/
489
Sangwook Leed670ac02011-07-16 15:50:19 +0900490int s3c2410_dma_request(enum dma_ch channel,
Ben Dooksfa7a7882009-03-10 23:57:26 +0000491 struct s3c2410_dma_client *client,
492 void *dev)
493{
494 struct s3c2410_dma_chan *chan;
495 unsigned long flags;
496
497 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
498 channel, client->name, dev);
499
500 local_irq_save(flags);
501
502 chan = s3c64xx_dma_map_channel(channel);
503 if (chan == NULL) {
504 local_irq_restore(flags);
505 return -EBUSY;
506 }
507
508 dbg_showchan(chan);
509
510 chan->client = client;
511 chan->in_use = 1;
512 chan->peripheral = channel;
513
514 local_irq_restore(flags);
515
516 /* need to setup */
517
518 pr_debug("%s: channel initialised, %p\n", __func__, chan);
519
520 return chan->number | DMACH_LOW_LEVEL;
521}
522
523EXPORT_SYMBOL(s3c2410_dma_request);
524
525/* s3c2410_dma_free
526 *
527 * release the given channel back to the system, will stop and flush
528 * any outstanding transfers, and ensure the channel is ready for the
529 * next claimant.
530 *
531 * Note, although a warning is currently printed if the freeing client
532 * info is not the same as the registrant's client info, the free is still
533 * allowed to go through.
534*/
535
Sangwook Leed670ac02011-07-16 15:50:19 +0900536int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
Ben Dooksfa7a7882009-03-10 23:57:26 +0000537{
538 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
539 unsigned long flags;
540
541 if (chan == NULL)
542 return -EINVAL;
543
544 local_irq_save(flags);
545
546 if (chan->client != client) {
547 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
548 channel, chan->client, client);
549 }
550
551 /* sort out stopping and freeing the channel */
552
553
554 chan->client = NULL;
555 chan->in_use = 0;
556
557 if (!(channel & DMACH_LOW_LEVEL))
558 s3c_dma_chan_map[channel] = NULL;
559
560 local_irq_restore(flags);
561
562 return 0;
563}
564
565EXPORT_SYMBOL(s3c2410_dma_free);
566
Ben Dooksfa7a7882009-03-10 23:57:26 +0000567static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
568{
569 struct s3c64xx_dmac *dmac = pw;
Jassi Brar6d0b8622009-11-05 13:44:26 +0900570 struct s3c2410_dma_chan *chan;
571 enum s3c2410_dma_buffresult res;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000572 u32 tcstat, errstat;
573 u32 bit;
574 int offs;
575
576 tcstat = readl(dmac->regs + PL080_TC_STATUS);
577 errstat = readl(dmac->regs + PL080_ERR_STATUS);
578
579 for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
Jassi Brar9b082842009-11-05 13:44:33 +0900580 struct s3c64xx_dma_buff *buff;
Jassi Brar6d0b8622009-11-05 13:44:26 +0900581
582 if (!(errstat & bit) && !(tcstat & bit))
583 continue;
584
585 chan = dmac->channels + offs;
586 res = S3C2410_RES_ERR;
587
Ben Dooksfa7a7882009-03-10 23:57:26 +0000588 if (tcstat & bit) {
589 writel(bit, dmac->regs + PL080_TC_CLEAR);
Jassi Brar6d0b8622009-11-05 13:44:26 +0900590 res = S3C2410_RES_OK;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000591 }
592
Jassi Brar6d0b8622009-11-05 13:44:26 +0900593 if (errstat & bit)
Ben Dooksfa7a7882009-03-10 23:57:26 +0000594 writel(bit, dmac->regs + PL080_ERR_CLEAR);
Jassi Brar6d0b8622009-11-05 13:44:26 +0900595
Jassi Brar9b082842009-11-05 13:44:33 +0900596 /* 'next' points to the buffer that is next to the
597 * currently active buffer.
598 * For CIRCULAR queues, 'next' will be same as 'curr'
599 * when 'end' is the active buffer.
600 */
601 buff = chan->curr;
602 while (buff && buff != chan->next
603 && buff->next != chan->next)
604 buff = buff->next;
605
606 if (!buff)
607 BUG();
608
609 if (buff == chan->next)
610 buff = chan->end;
611
612 s3c64xx_dma_bufffdone(chan, buff, res);
613
Jassi Brar336b1a312009-11-05 13:44:39 +0900614 /* Free the node and update curr, if non-circular queue */
615 if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
616 chan->curr = buff->next;
617 s3c64xx_dma_freebuff(buff);
618 }
619
Jassi Brar9b082842009-11-05 13:44:33 +0900620 /* Update 'next' */
621 buff = chan->next;
622 if (chan->next == chan->end) {
623 chan->next = chan->curr;
624 if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
625 chan->end = NULL;
626 } else {
627 chan->next = buff->next;
628 }
Ben Dooksfa7a7882009-03-10 23:57:26 +0000629 }
630
631 return IRQ_HANDLED;
632}
633
Kay Sievers4a858cf2011-12-21 16:01:38 -0800634static struct bus_type dma_subsys = {
Ben Dooksfa7a7882009-03-10 23:57:26 +0000635 .name = "s3c64xx-dma",
Kay Sievers4a858cf2011-12-21 16:01:38 -0800636 .dev_name = "s3c64xx-dma",
Ben Dooksfa7a7882009-03-10 23:57:26 +0000637};
638
639static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
640 int irq, unsigned int base)
641{
642 struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
643 struct s3c64xx_dmac *dmac;
644 char clkname[16];
645 void __iomem *regs;
646 void __iomem *regptr;
647 int err, ch;
648
649 dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
650 if (!dmac) {
651 printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
652 return -ENOMEM;
653 }
654
Kay Sievers4a858cf2011-12-21 16:01:38 -0800655 dmac->dev.id = chno / 8;
656 dmac->dev.bus = &dma_subsys;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000657
Kay Sievers4a858cf2011-12-21 16:01:38 -0800658 err = device_register(&dmac->dev);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000659 if (err) {
Kay Sievers4a858cf2011-12-21 16:01:38 -0800660 printk(KERN_ERR "%s: failed to register device\n", __func__);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000661 goto err_alloc;
662 }
663
664 regs = ioremap(base, 0x200);
665 if (!regs) {
666 printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
667 err = -ENXIO;
668 goto err_dev;
669 }
670
Kay Sievers4a858cf2011-12-21 16:01:38 -0800671 snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000672
673 dmac->clk = clk_get(NULL, clkname);
674 if (IS_ERR(dmac->clk)) {
675 printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
676 err = PTR_ERR(dmac->clk);
677 goto err_map;
678 }
679
680 clk_enable(dmac->clk);
681
682 dmac->regs = regs;
683 dmac->chanbase = chbase;
684 dmac->channels = chptr;
685
686 err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
687 if (err < 0) {
688 printk(KERN_ERR "%s: failed to get irq\n", __func__);
689 goto err_clk;
690 }
691
692 regptr = regs + PL080_Cx_BASE(0);
693
Ben Dooks00252832011-03-04 07:55:44 +0900694 for (ch = 0; ch < 8; ch++, chptr++) {
695 pr_debug("%s: registering DMA %d (%p)\n",
696 __func__, chno + ch, regptr);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000697
698 chptr->bit = 1 << ch;
Ben Dooks00252832011-03-04 07:55:44 +0900699 chptr->number = chno + ch;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000700 chptr->dmac = dmac;
701 chptr->regs = regptr;
Linus Walleijd92342d2010-08-09 12:47:52 +0200702 regptr += PL080_Cx_STRIDE;
Ben Dooksfa7a7882009-03-10 23:57:26 +0000703 }
704
705 /* for the moment, permanently enable the controller */
706 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
707
Ben Dooks00252832011-03-04 07:55:44 +0900708 printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
709 irq, regs, chno, chno+8);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000710
711 return 0;
712
713err_clk:
714 clk_disable(dmac->clk);
715 clk_put(dmac->clk);
716err_map:
717 iounmap(regs);
718err_dev:
Kay Sievers4a858cf2011-12-21 16:01:38 -0800719 device_unregister(&dmac->dev);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000720err_alloc:
721 kfree(dmac);
722 return err;
723}
724
725static int __init s3c64xx_dma_init(void)
726{
727 int ret;
728
729 printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
730
Jassi3ea61e42009-09-15 19:01:19 +0900731 dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000732 if (!dma_pool) {
733 printk(KERN_ERR "%s: failed to create pool\n", __func__);
734 return -ENOMEM;
735 }
736
Kay Sievers4a858cf2011-12-21 16:01:38 -0800737 ret = subsys_system_register(&dma_subsys, NULL);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000738 if (ret) {
Kay Sievers4a858cf2011-12-21 16:01:38 -0800739 printk(KERN_ERR "%s: failed to create subsys\n", __func__);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000740 return -ENOMEM;
741 }
742
743 /* Set all DMA configuration to be DMA, not SDMA */
Tomasz Figa6d8d4922011-08-23 11:33:08 +0900744 writel(0xffffff, S3C64XX_SDMA_SEL);
Ben Dooksfa7a7882009-03-10 23:57:26 +0000745
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400746 /* Register standard DMA controllers */
Ben Dooksfa7a7882009-03-10 23:57:26 +0000747 s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
748 s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
749
750 return 0;
751}
752
753arch_initcall(s3c64xx_dma_init);