blob: 84fefdaa01a53d8863fb145d0ff39e416328a597 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mm.h>
5#include <asm/scatterlist.h>
Paul Mundt0d831772006-01-16 22:14:09 -08006#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <asm/io.h>
8
9extern struct bus_type pci_bus_type;
10
11/* arch/sh/mm/consistent.c */
Al Viro6dae2c22005-10-21 03:21:38 -040012extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013extern void consistent_free(void *vaddr, size_t size);
14extern void consistent_sync(void *vaddr, size_t size, int direction);
15
16#define dma_supported(dev, mask) (1)
17
18static inline int dma_set_mask(struct device *dev, u64 mask)
19{
20 if (!dev->dma_mask || !dma_supported(dev, mask))
21 return -EIO;
22
23 *dev->dma_mask = mask;
24
25 return 0;
26}
27
28static inline void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro6dae2c22005-10-21 03:21:38 -040029 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
31 if (sh_mv.mv_consistent_alloc) {
32 void *ret;
33
34 ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag);
35 if (ret != NULL)
36 return ret;
37 }
38
39 return consistent_alloc(flag, size, dma_handle);
40}
41
42static inline void dma_free_coherent(struct device *dev, size_t size,
43 void *vaddr, dma_addr_t dma_handle)
44{
45 if (sh_mv.mv_consistent_free) {
46 int ret;
47
48 ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
49 if (ret == 0)
50 return;
51 }
52
53 consistent_free(vaddr, size);
54}
55
Paul Mundtc7666e72007-02-13 11:11:22 +090056#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
57#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
58#define dma_is_consistent(d, h) (1)
59
Ralf Baechled3fa72e2006-12-06 20:38:56 -080060static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 enum dma_data_direction dir)
62{
63 consistent_sync(vaddr, size, (int)dir);
64}
65
66static inline dma_addr_t dma_map_single(struct device *dev,
67 void *ptr, size_t size,
68 enum dma_data_direction dir)
69{
70#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
71 if (dev->bus == &pci_bus_type)
Paul Mundte257ad02007-07-25 11:18:00 +090072 return virt_to_phys(ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#endif
Paul Mundt54321432006-12-09 09:17:01 +090074 dma_cache_sync(dev, ptr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Paul Mundte257ad02007-07-25 11:18:00 +090076 return virt_to_phys(ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077}
78
79#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
80
81static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84 int i;
85
86 for (i = 0; i < nents; i++) {
87#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
Paul Mundt54321432006-12-09 09:17:01 +090088 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 sg[i].length, dir);
90#endif
91 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
92 }
93
94 return nents;
95}
96
97#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
98
99static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
100 unsigned long offset, size_t size,
101 enum dma_data_direction dir)
102{
103 return dma_map_single(dev, page_address(page) + offset, size, dir);
104}
105
106static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
107 size_t size, enum dma_data_direction dir)
108{
109 dma_unmap_single(dev, dma_address, size, dir);
110}
111
112static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
113 size_t size, enum dma_data_direction dir)
114{
115#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
116 if (dev->bus == &pci_bus_type)
117 return;
118#endif
Paul Mundte257ad02007-07-25 11:18:00 +0900119 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120}
121
122static inline void dma_sync_single_range(struct device *dev,
123 dma_addr_t dma_handle,
124 unsigned long offset, size_t size,
125 enum dma_data_direction dir)
126{
127#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
128 if (dev->bus == &pci_bus_type)
129 return;
130#endif
Paul Mundte257ad02007-07-25 11:18:00 +0900131 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
134static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
135 int nelems, enum dma_data_direction dir)
136{
137 int i;
138
139 for (i = 0; i < nelems; i++) {
140#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
Paul Mundt54321432006-12-09 09:17:01 +0900141 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 sg[i].length, dir);
143#endif
144 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
145 }
146}
147
Paul Mundt87b0ef92006-09-27 18:34:41 +0900148static inline void dma_sync_single_for_cpu(struct device *dev,
149 dma_addr_t dma_handle, size_t size,
150 enum dma_data_direction dir)
151{
152 dma_sync_single(dev, dma_handle, size, dir);
153}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Paul Mundt87b0ef92006-09-27 18:34:41 +0900155static inline void dma_sync_single_for_device(struct device *dev,
156 dma_addr_t dma_handle,
157 size_t size,
158 enum dma_data_direction dir)
159{
160 dma_sync_single(dev, dma_handle, size, dir);
161}
162
Paul Mundt32239262007-08-10 02:37:01 +0900163static inline void dma_sync_single_range_for_cpu(struct device *dev,
164 dma_addr_t dma_handle,
165 unsigned long offset,
166 size_t size,
167 enum dma_data_direction direction)
168{
169 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
170}
171
172static inline void dma_sync_single_range_for_device(struct device *dev,
173 dma_addr_t dma_handle,
174 unsigned long offset,
175 size_t size,
176 enum dma_data_direction direction)
177{
178 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
179}
180
181
Paul Mundt87b0ef92006-09-27 18:34:41 +0900182static inline void dma_sync_sg_for_cpu(struct device *dev,
183 struct scatterlist *sg, int nelems,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 enum dma_data_direction dir)
Paul Mundt87b0ef92006-09-27 18:34:41 +0900185{
186 dma_sync_sg(dev, sg, nelems, dir);
187}
Paul Mundt0d831772006-01-16 22:14:09 -0800188
Paul Mundt87b0ef92006-09-27 18:34:41 +0900189static inline void dma_sync_sg_for_device(struct device *dev,
190 struct scatterlist *sg, int nelems,
191 enum dma_data_direction dir)
192{
193 dma_sync_sg(dev, sg, nelems, dir);
194}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197static inline int dma_get_cache_alignment(void)
198{
199 /*
200 * Each processor family will define its own L1_CACHE_SHIFT,
201 * L1_CACHE_BYTES wraps to this, so this is always safe.
202 */
203 return L1_CACHE_BYTES;
204}
205
206static inline int dma_mapping_error(dma_addr_t dma_addr)
207{
208 return dma_addr == 0;
209}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#endif /* __ASM_SH_DMA_MAPPING_H */