blob: aef0a45b7893db2c7a5a2c94f67da7618e181bc9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: dma.c,v 1.7 1994/12/28 03:35:33 root Exp root $
2 * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
3 *
4 * Written by Hennus Bergman, 1992.
5 *
6 * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
7 * In the previous version the reported device could end up being wrong,
8 * if a device requested a DMA channel that was already in use.
9 * [It also happened to remove the sizeof(char *) == sizeof(int)
10 * assumption introduced because of those /proc/dma patches. -- Hennus]
11 */
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/spinlock.h>
16#include <linux/string.h>
17#include <linux/seq_file.h>
18#include <linux/proc_fs.h>
19#include <linux/init.h>
20#include <asm/dma.h>
21#include <asm/system.h>
22
23
24
25/* A note on resource allocation:
26 *
27 * All drivers needing DMA channels, should allocate and release them
28 * through the public routines `request_dma()' and `free_dma()'.
29 *
30 * In order to avoid problems, all processes should allocate resources in
31 * the same sequence and release them in the reverse order.
32 *
33 * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
34 * When releasing them, first release the DMA, then release the IRQ.
35 * If you don't, you may cause allocation requests to fail unnecessarily.
36 * This doesn't really matter now, but it will once we get real semaphores
37 * in the kernel.
38 */
39
40
41DEFINE_SPINLOCK(dma_spin_lock);
42
43/*
44 * If our port doesn't define this it has no PC like DMA
45 */
46
47#ifdef MAX_DMA_CHANNELS
48
49
50/* Channel n is busy iff dma_chan_busy[n].lock != 0.
51 * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
52 * DMA4 is reserved for cascading.
53 */
54
55struct dma_chan {
56 int lock;
57 const char *device_id;
58};
59
60static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = {
61 [4] = { 1, "cascade" },
62};
63
64
65int request_dma(unsigned int dmanr, const char * device_id)
66{
67 if (dmanr >= MAX_DMA_CHANNELS)
68 return -EINVAL;
69
70 if (xchg(&dma_chan_busy[dmanr].lock, 1) != 0)
71 return -EBUSY;
72
73 dma_chan_busy[dmanr].device_id = device_id;
74
75 /* old flag was 0, now contains 1 to indicate busy */
76 return 0;
77} /* request_dma */
78
79
80void free_dma(unsigned int dmanr)
81{
82 if (dmanr >= MAX_DMA_CHANNELS) {
83 printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
84 return;
85 }
86
87 if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
88 printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
89 return;
90 }
91
92} /* free_dma */
93
94#else
95
96int request_dma(unsigned int dmanr, const char *device_id)
97{
98 return -EINVAL;
99}
100
101void free_dma(unsigned int dmanr)
102{
103}
104
105#endif
106
107#ifdef CONFIG_PROC_FS
108
109#ifdef MAX_DMA_CHANNELS
110static int proc_dma_show(struct seq_file *m, void *v)
111{
112 int i;
113
114 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
115 if (dma_chan_busy[i].lock) {
116 seq_printf(m, "%2d: %s\n", i,
117 dma_chan_busy[i].device_id);
118 }
119 }
120 return 0;
121}
122#else
123static int proc_dma_show(struct seq_file *m, void *v)
124{
125 seq_puts(m, "No DMA\n");
126 return 0;
127}
128#endif /* MAX_DMA_CHANNELS */
129
130static int proc_dma_open(struct inode *inode, struct file *file)
131{
132 return single_open(file, proc_dma_show, NULL);
133}
134
135static struct file_operations proc_dma_operations = {
136 .open = proc_dma_open,
137 .read = seq_read,
138 .llseek = seq_lseek,
139 .release = single_release,
140};
141
142static int __init proc_dma_init(void)
143{
144 struct proc_dir_entry *e;
145
146 e = create_proc_entry("dma", 0, NULL);
147 if (e)
148 e->proc_fops = &proc_dma_operations;
149
150 return 0;
151}
152
153__initcall(proc_dma_init);
154#endif
155
156EXPORT_SYMBOL(request_dma);
157EXPORT_SYMBOL(free_dma);
158EXPORT_SYMBOL(dma_spin_lock);