blob: 9cf20cacf76e8cf61cc9e6157a7a4acdb42b783d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_I386_DMA_MAPPING_H
2#define _ASM_I386_DMA_MAPPING_H
3
4#include <linux/mm.h>
5
6#include <asm/cache.h>
7#include <asm/io.h>
8#include <asm/scatterlist.h>
Andi Kleenfd78f112006-01-11 22:44:54 +01009#include <asm/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14void *dma_alloc_coherent(struct device *dev, size_t size,
Al Virodd0fc662005-10-07 07:46:04 +010015 dma_addr_t *dma_handle, gfp_t flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19
20static inline dma_addr_t
21dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction)
23{
Andi Kleenfd78f112006-01-11 22:44:54 +010024 if (direction == DMA_NONE)
25 BUG();
26 WARN_ON(size == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 flush_write_buffers();
28 return virt_to_phys(ptr);
29}
30
31static inline void
32dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
33 enum dma_data_direction direction)
34{
Andi Kleenfd78f112006-01-11 22:44:54 +010035 if (direction == DMA_NONE)
36 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -070037}
38
39static inline int
40dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
41 enum dma_data_direction direction)
42{
43 int i;
44
Andi Kleenfd78f112006-01-11 22:44:54 +010045 if (direction == DMA_NONE)
46 BUG();
47 WARN_ON(nents == 0 || sg[0].length == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49 for (i = 0; i < nents; i++ ) {
50 BUG_ON(!sg[i].page);
51
52 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
53 }
54
55 flush_write_buffers();
56 return nents;
57}
58
59static inline dma_addr_t
60dma_map_page(struct device *dev, struct page *page, unsigned long offset,
61 size_t size, enum dma_data_direction direction)
62{
63 BUG_ON(direction == DMA_NONE);
64 return page_to_phys(page) + offset;
65}
66
67static inline void
68dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
69 enum dma_data_direction direction)
70{
71 BUG_ON(direction == DMA_NONE);
72}
73
74
75static inline void
76dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
77 enum dma_data_direction direction)
78{
79 BUG_ON(direction == DMA_NONE);
80}
81
82static inline void
83dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
84 enum dma_data_direction direction)
85{
86}
87
88static inline void
89dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
90 enum dma_data_direction direction)
91{
92 flush_write_buffers();
93}
94
95static inline void
96dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
97 unsigned long offset, size_t size,
98 enum dma_data_direction direction)
99{
100}
101
102static inline void
103dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
104 unsigned long offset, size_t size,
105 enum dma_data_direction direction)
106{
107 flush_write_buffers();
108}
109
110static inline void
111dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
112 enum dma_data_direction direction)
113{
114}
115
116static inline void
117dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
118 enum dma_data_direction direction)
119{
120 flush_write_buffers();
121}
122
123static inline int
124dma_mapping_error(dma_addr_t dma_addr)
125{
126 return 0;
127}
128
129static inline int
130dma_supported(struct device *dev, u64 mask)
131{
132 /*
133 * we fall back to GFP_DMA when the mask isn't all 1s,
134 * so we can't guarantee allocations that must be
135 * within a tighter range than GFP_DMA..
136 */
137 if(mask < 0x00ffffff)
138 return 0;
139
140 return 1;
141}
142
143static inline int
144dma_set_mask(struct device *dev, u64 mask)
145{
146 if(!dev->dma_mask || !dma_supported(dev, mask))
147 return -EIO;
148
149 *dev->dma_mask = mask;
150
151 return 0;
152}
153
154static inline int
155dma_get_cache_alignment(void)
156{
157 /* no easy way to get cache size on all x86, so return the
158 * maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800159 return (1 << INTERNODE_CACHE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
162#define dma_is_consistent(d) (1)
163
164static inline void
165dma_cache_sync(void *vaddr, size_t size,
166 enum dma_data_direction direction)
167{
168 flush_write_buffers();
169}
170
171#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
172extern int
173dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
174 dma_addr_t device_addr, size_t size, int flags);
175
176extern void
177dma_release_declared_memory(struct device *dev);
178
179extern void *
180dma_mark_declared_memory_occupied(struct device *dev,
181 dma_addr_t device_addr, size_t size);
182
183#endif