blob: 81999a3ebe7c4c712ac91da21fc963e24c638270 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_I386_DMA_MAPPING_H
2#define _ASM_I386_DMA_MAPPING_H
3
4#include <linux/mm.h>
5
6#include <asm/cache.h>
7#include <asm/io.h>
8#include <asm/scatterlist.h>
Andi Kleenfd78f112006-01-11 22:44:54 +01009#include <asm/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14void *dma_alloc_coherent(struct device *dev, size_t size,
Al Virodd0fc662005-10-07 07:46:04 +010015 dma_addr_t *dma_handle, gfp_t flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19
20static inline dma_addr_t
21dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction)
23{
Rolf Eike Beercd1c6a42006-09-29 01:59:49 -070024 BUG_ON(!valid_dma_direction(direction));
Andi Kleenfd78f112006-01-11 22:44:54 +010025 WARN_ON(size == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 flush_write_buffers();
27 return virt_to_phys(ptr);
28}
29
30static inline void
31dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
32 enum dma_data_direction direction)
33{
Rolf Eike Beercd1c6a42006-09-29 01:59:49 -070034 BUG_ON(!valid_dma_direction(direction));
Linus Torvalds1da177e2005-04-16 15:20:36 -070035}
36
37static inline int
38dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
39 enum dma_data_direction direction)
40{
41 int i;
42
Rolf Eike Beercd1c6a42006-09-29 01:59:49 -070043 BUG_ON(!valid_dma_direction(direction));
Andi Kleenfd78f112006-01-11 22:44:54 +010044 WARN_ON(nents == 0 || sg[0].length == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46 for (i = 0; i < nents; i++ ) {
47 BUG_ON(!sg[i].page);
48
49 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
50 }
51
52 flush_write_buffers();
53 return nents;
54}
55
56static inline dma_addr_t
57dma_map_page(struct device *dev, struct page *page, unsigned long offset,
58 size_t size, enum dma_data_direction direction)
59{
Rolf Eike Beercd1c6a42006-09-29 01:59:49 -070060 BUG_ON(!valid_dma_direction(direction));
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 return page_to_phys(page) + offset;
62}
63
64static inline void
65dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
66 enum dma_data_direction direction)
67{
Rolf Eike Beercd1c6a42006-09-29 01:59:49 -070068 BUG_ON(!valid_dma_direction(direction));
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
71
72static inline void
73dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
74 enum dma_data_direction direction)
75{
Rolf Eike Beercd1c6a42006-09-29 01:59:49 -070076 BUG_ON(!valid_dma_direction(direction));
Linus Torvalds1da177e2005-04-16 15:20:36 -070077}
78
79static inline void
80dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
81 enum dma_data_direction direction)
82{
83}
84
85static inline void
86dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
87 enum dma_data_direction direction)
88{
89 flush_write_buffers();
90}
91
92static inline void
93dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
94 unsigned long offset, size_t size,
95 enum dma_data_direction direction)
96{
97}
98
99static inline void
100dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
101 unsigned long offset, size_t size,
102 enum dma_data_direction direction)
103{
104 flush_write_buffers();
105}
106
107static inline void
108dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
109 enum dma_data_direction direction)
110{
111}
112
113static inline void
114dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
115 enum dma_data_direction direction)
116{
117 flush_write_buffers();
118}
119
120static inline int
121dma_mapping_error(dma_addr_t dma_addr)
122{
123 return 0;
124}
125
126static inline int
127dma_supported(struct device *dev, u64 mask)
128{
129 /*
130 * we fall back to GFP_DMA when the mask isn't all 1s,
131 * so we can't guarantee allocations that must be
132 * within a tighter range than GFP_DMA..
133 */
134 if(mask < 0x00ffffff)
135 return 0;
136
137 return 1;
138}
139
140static inline int
141dma_set_mask(struct device *dev, u64 mask)
142{
143 if(!dev->dma_mask || !dma_supported(dev, mask))
144 return -EIO;
145
146 *dev->dma_mask = mask;
147
148 return 0;
149}
150
151static inline int
152dma_get_cache_alignment(void)
153{
154 /* no easy way to get cache size on all x86, so return the
155 * maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800156 return (1 << INTERNODE_CACHE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
159#define dma_is_consistent(d) (1)
160
161static inline void
162dma_cache_sync(void *vaddr, size_t size,
163 enum dma_data_direction direction)
164{
165 flush_write_buffers();
166}
167
168#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
169extern int
170dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
171 dma_addr_t device_addr, size_t size, int flags);
172
173extern void
174dma_release_declared_memory(struct device *dev);
175
176extern void *
177dma_mark_declared_memory_occupied(struct device *dev,
178 dma_addr_t device_addr, size_t size);
179
180#endif