blob: cc9a2e86f5b413d566f81c07781596cc4a7b5b65 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <asm/scatterlist.h>
7#include <asm/io.h>
8
9struct pci_dev;
10extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
11 dma_addr_t *dma_handle);
12extern void consistent_free(struct pci_dev *hwdev, size_t size,
13 void *vaddr, dma_addr_t dma_handle);
14
15#define dma_supported(dev, mask) (1)
16
17static inline int dma_set_mask(struct device *dev, u64 mask)
18{
19 if (!dev->dma_mask || !dma_supported(dev, mask))
20 return -EIO;
21
22 *dev->dma_mask = mask;
23
24 return 0;
25}
26
27static inline void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro970a9e72005-10-21 03:21:53 -040028 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
30 return consistent_alloc(NULL, size, dma_handle);
31}
32
33static inline void dma_free_coherent(struct device *dev, size_t size,
34 void *vaddr, dma_addr_t dma_handle)
35{
36 consistent_free(NULL, size, vaddr, dma_handle);
37}
38
39static inline void dma_cache_sync(void *vaddr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_cache_wback_inv((unsigned long)vaddr, size);
43}
44
45static inline dma_addr_t dma_map_single(struct device *dev,
46 void *ptr, size_t size,
47 enum dma_data_direction dir)
48{
49#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
50 if (dev->bus == &pci_bus_type)
51 return virt_to_bus(ptr);
52#endif
53 dma_cache_sync(ptr, size, dir);
54
55 return virt_to_bus(ptr);
56}
57
58#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
59
60static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
61 int nents, enum dma_data_direction dir)
62{
63 int i;
64
65 for (i = 0; i < nents; i++) {
66#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
67 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
68 sg[i].length, dir);
69#endif
70 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
71 }
72
73 return nents;
74}
75
76#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
77
78static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
79 unsigned long offset, size_t size,
80 enum dma_data_direction dir)
81{
82 return dma_map_single(dev, page_address(page) + offset, size, dir);
83}
84
85static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
86 size_t size, enum dma_data_direction dir)
87{
88 dma_unmap_single(dev, dma_address, size, dir);
89}
90
91static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
92 size_t size, enum dma_data_direction dir)
93{
94#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
95 if (dev->bus == &pci_bus_type)
96 return;
97#endif
98 dma_cache_sync(bus_to_virt(dma_handle), size, dir);
99}
100
101static inline void dma_sync_single_range(struct device *dev,
102 dma_addr_t dma_handle,
103 unsigned long offset, size_t size,
104 enum dma_data_direction dir)
105{
106#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
107 if (dev->bus == &pci_bus_type)
108 return;
109#endif
110 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
111}
112
113static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
114 int nelems, enum dma_data_direction dir)
115{
116 int i;
117
118 for (i = 0; i < nelems; i++) {
119#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
120 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
121 sg[i].length, dir);
122#endif
123 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
124 }
125}
126
127static inline void dma_sync_single_for_cpu(struct device *dev,
128 dma_addr_t dma_handle, size_t size,
129 enum dma_data_direction dir)
130 __attribute__ ((alias("dma_sync_single")));
131
132static inline void dma_sync_single_for_device(struct device *dev,
133 dma_addr_t dma_handle, size_t size,
134 enum dma_data_direction dir)
135 __attribute__ ((alias("dma_sync_single")));
136
137static inline void dma_sync_sg_for_cpu(struct device *dev,
138 struct scatterlist *sg, int nelems,
139 enum dma_data_direction dir)
140 __attribute__ ((alias("dma_sync_sg")));
141
142static inline void dma_sync_sg_for_device(struct device *dev,
143 struct scatterlist *sg, int nelems,
144 enum dma_data_direction dir)
145 __attribute__ ((alias("dma_sync_sg")));
146
147static inline int dma_get_cache_alignment(void)
148{
149 /*
150 * Each processor family will define its own L1_CACHE_SHIFT,
151 * L1_CACHE_BYTES wraps to this, so this is always safe.
152 */
153 return L1_CACHE_BYTES;
154}
155
156static inline int dma_mapping_error(dma_addr_t dma_addr)
157{
158 return dma_addr == 0;
159}
160
161#endif /* __ASM_SH_DMA_MAPPING_H */
162