blob: 40a480d20aaf1c9f0cb8bdb226d4ed96c6436dd0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/drivers/dma/dma-sh.c
3 *
4 * SuperH On-chip DMAC Support
5 *
6 * Copyright (C) 2000 Takashi YOSHII
7 * Copyright (C) 2003, 2004 Paul Mundt
Paul Mundt0d831772006-01-16 22:14:09 -08008 * Copyright (C) 2005 Andriy Skulysh
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/interrupt.h>
16#include <linux/module.h>
Paul Mundt0d831772006-01-16 22:14:09 -080017#include <asm/dreamcast/dma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/dma.h>
19#include <asm/io.h>
20#include "dma-sh.h"
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022static inline unsigned int get_dmte_irq(unsigned int chan)
23{
Paul Mundt0d831772006-01-16 22:14:09 -080024 unsigned int irq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26 /*
27 * Normally we could just do DMTE0_IRQ + chan outright, though in the
28 * case of the 7751R, the DMTE IRQs for channels > 4 start right above
29 * the SCIF
30 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 if (chan < 4) {
32 irq = DMTE0_IRQ + chan;
33 } else {
Paul Mundt0d831772006-01-16 22:14:09 -080034#ifdef DMTE4_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 irq = DMTE4_IRQ + chan - 4;
Paul Mundt0d831772006-01-16 22:14:09 -080036#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 }
38
39 return irq;
40}
41
42/*
43 * We determine the correct shift size based off of the CHCR transmit size
44 * for the given channel. Since we know that it will take:
45 *
46 * info->count >> ts_shift[transmit_size]
47 *
48 * iterations to complete the transfer.
49 */
50static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
51{
52 u32 chcr = ctrl_inl(CHCR[chan->chan]);
53
Paul Mundt0d831772006-01-16 22:14:09 -080054 return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT];
Linus Torvalds1da177e2005-04-16 15:20:36 -070055}
56
57/*
58 * The transfer end interrupt must read the chcr register to end the
59 * hardware interrupt active condition.
60 * Besides that it needs to waken any waiting process, which should handle
61 * setting up the next transfer.
62 */
63static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs)
64{
65 struct dma_channel *chan = (struct dma_channel *)dev_id;
66 u32 chcr;
67
68 chcr = ctrl_inl(CHCR[chan->chan]);
69
70 if (!(chcr & CHCR_TE))
71 return IRQ_NONE;
72
73 chcr &= ~(CHCR_IE | CHCR_DE);
74 ctrl_outl(chcr, CHCR[chan->chan]);
75
76 wake_up(&chan->wait_queue);
77
78 return IRQ_HANDLED;
79}
80
81static int sh_dmac_request_dma(struct dma_channel *chan)
82{
Paul Mundt0d831772006-01-16 22:14:09 -080083 char name[32];
84
Paul Mundt9e3043c2006-09-27 16:55:24 +090085 if (unlikely(!chan->flags & DMA_TEI_CAPABLE))
86 return 0;
87
Paul Mundt0d831772006-01-16 22:14:09 -080088 snprintf(name, sizeof(name), "DMAC Transfer End (Channel %d)",
89 chan->chan);
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 return request_irq(get_dmte_irq(chan->chan), dma_tei,
Thomas Gleixner6d208192006-07-01 19:29:25 -070092 IRQF_DISABLED, name, chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95static void sh_dmac_free_dma(struct dma_channel *chan)
96{
97 free_irq(get_dmte_irq(chan->chan), chan);
98}
99
Paul Mundt0d831772006-01-16 22:14:09 -0800100static void
101sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
103 if (!chcr)
Paul Mundt0d831772006-01-16 22:14:09 -0800104 chcr = RS_DUAL | CHCR_IE;
105
106 if (chcr & CHCR_IE) {
107 chcr &= ~CHCR_IE;
108 chan->flags |= DMA_TEI_CAPABLE;
109 } else {
110 chan->flags &= ~DMA_TEI_CAPABLE;
111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 ctrl_outl(chcr, CHCR[chan->chan]);
114
115 chan->flags |= DMA_CONFIGURED;
116}
117
118static void sh_dmac_enable_dma(struct dma_channel *chan)
119{
Paul Mundt0d831772006-01-16 22:14:09 -0800120 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 u32 chcr;
122
123 chcr = ctrl_inl(CHCR[chan->chan]);
Paul Mundt0d831772006-01-16 22:14:09 -0800124 chcr |= CHCR_DE;
125
126 if (chan->flags & DMA_TEI_CAPABLE)
127 chcr |= CHCR_IE;
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 ctrl_outl(chcr, CHCR[chan->chan]);
130
Paul Mundt0d831772006-01-16 22:14:09 -0800131 if (chan->flags & DMA_TEI_CAPABLE) {
132 irq = get_dmte_irq(chan->chan);
133 enable_irq(irq);
134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
137static void sh_dmac_disable_dma(struct dma_channel *chan)
138{
Paul Mundt0d831772006-01-16 22:14:09 -0800139 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 u32 chcr;
141
Paul Mundt0d831772006-01-16 22:14:09 -0800142 if (chan->flags & DMA_TEI_CAPABLE) {
143 irq = get_dmte_irq(chan->chan);
144 disable_irq(irq);
145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147 chcr = ctrl_inl(CHCR[chan->chan]);
148 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
149 ctrl_outl(chcr, CHCR[chan->chan]);
150}
151
152static int sh_dmac_xfer_dma(struct dma_channel *chan)
153{
154 /*
155 * If we haven't pre-configured the channel with special flags, use
156 * the defaults.
157 */
Paul Mundt0d831772006-01-16 22:14:09 -0800158 if (unlikely(!(chan->flags & DMA_CONFIGURED)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 sh_dmac_configure_channel(chan, 0);
160
161 sh_dmac_disable_dma(chan);
162
163 /*
164 * Single-address mode usage note!
165 *
166 * It's important that we don't accidentally write any value to SAR/DAR
167 * (this includes 0) that hasn't been directly specified by the user if
168 * we're in single-address mode.
169 *
170 * In this case, only one address can be defined, anything else will
171 * result in a DMA address error interrupt (at least on the SH-4),
172 * which will subsequently halt the transfer.
173 *
174 * Channel 2 on the Dreamcast is a special case, as this is used for
175 * cascading to the PVR2 DMAC. In this case, we still need to write
176 * SAR and DAR, regardless of value, in order for cascading to work.
177 */
Paul Mundt0d831772006-01-16 22:14:09 -0800178 if (chan->sar || (mach_is_dreamcast() &&
179 chan->chan == PVR2_CASCADE_CHAN))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 ctrl_outl(chan->sar, SAR[chan->chan]);
Paul Mundt0d831772006-01-16 22:14:09 -0800181 if (chan->dar || (mach_is_dreamcast() &&
182 chan->chan == PVR2_CASCADE_CHAN))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 ctrl_outl(chan->dar, DAR[chan->chan]);
184
185 ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]);
186
187 sh_dmac_enable_dma(chan);
188
189 return 0;
190}
191
192static int sh_dmac_get_dma_residue(struct dma_channel *chan)
193{
194 if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE))
195 return 0;
196
197 return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan);
198}
199
Paul Mundt0d831772006-01-16 22:14:09 -0800200#ifdef CONFIG_CPU_SUBTYPE_SH7780
201#define dmaor_read_reg() ctrl_inw(DMAOR)
202#define dmaor_write_reg(data) ctrl_outw(data, DMAOR)
203#else
204#define dmaor_read_reg() ctrl_inl(DMAOR)
205#define dmaor_write_reg(data) ctrl_outl(data, DMAOR)
206#endif
207
208static inline int dmaor_reset(void)
209{
210 unsigned long dmaor = dmaor_read_reg();
211
212 /* Try to clear the error flags first, incase they are set */
213 dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
214 dmaor_write_reg(dmaor);
215
216 dmaor |= DMAOR_INIT;
217 dmaor_write_reg(dmaor);
218
219 /* See if we got an error again */
220 if ((dmaor_read_reg() & (DMAOR_AE | DMAOR_NMIF))) {
221 printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
222 return -EINVAL;
223 }
224
225 return 0;
226}
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228#if defined(CONFIG_CPU_SH4)
229static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
230{
Paul Mundt0d831772006-01-16 22:14:09 -0800231 dmaor_reset();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 disable_irq(irq);
233
234 return IRQ_HANDLED;
235}
236#endif
237
238static struct dma_ops sh_dmac_ops = {
239 .request = sh_dmac_request_dma,
240 .free = sh_dmac_free_dma,
241 .get_residue = sh_dmac_get_dma_residue,
242 .xfer = sh_dmac_xfer_dma,
243 .configure = sh_dmac_configure_channel,
244};
245
246static struct dma_info sh_dmac_info = {
Paul Mundt0d831772006-01-16 22:14:09 -0800247 .name = "sh_dmac",
248 .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 .ops = &sh_dmac_ops,
250 .flags = DMAC_CHANNELS_TEI_CAPABLE,
251};
252
253static int __init sh_dmac_init(void)
254{
255 struct dma_info *info = &sh_dmac_info;
256 int i;
257
258#ifdef CONFIG_CPU_SH4
259 make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
Thomas Gleixner6d208192006-07-01 19:29:25 -0700260 i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0);
Paul Mundt9e3043c2006-09-27 16:55:24 +0900261 if (unlikely(i < 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 return i;
263#endif
264
265 for (i = 0; i < info->nr_channels; i++) {
266 int irq = get_dmte_irq(i);
267
268 make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
269 }
270
Paul Mundt0d831772006-01-16 22:14:09 -0800271 /*
272 * Initialize DMAOR, and clean up any error flags that may have
273 * been set.
274 */
275 i = dmaor_reset();
Paul Mundt9e3043c2006-09-27 16:55:24 +0900276 if (unlikely(i != 0))
Paul Mundt0d831772006-01-16 22:14:09 -0800277 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 return register_dmac(info);
280}
281
282static void __exit sh_dmac_exit(void)
283{
284#ifdef CONFIG_CPU_SH4
285 free_irq(DMAE_IRQ, 0);
286#endif
Paul Mundt0d831772006-01-16 22:14:09 -0800287 unregister_dmac(&sh_dmac_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288}
289
290subsys_initcall(sh_dmac_init);
291module_exit(sh_dmac_exit);
292
Paul Mundt0d831772006-01-16 22:14:09 -0800293MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
294MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295MODULE_LICENSE("GPL");