blob: a74a49e47922b03e223b8db5a387d93335d1bc7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mm.h>
5#include <asm/scatterlist.h>
6#include <asm/io.h>
7
8struct pci_dev;
9extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
10 dma_addr_t *dma_handle);
11extern void consistent_free(struct pci_dev *hwdev, size_t size,
12 void *vaddr, dma_addr_t dma_handle);
13
14#define dma_supported(dev, mask) (1)
15
16static inline int dma_set_mask(struct device *dev, u64 mask)
17{
18 if (!dev->dma_mask || !dma_supported(dev, mask))
19 return -EIO;
20
21 *dev->dma_mask = mask;
22
23 return 0;
24}
25
26static inline void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro970a9e72005-10-21 03:21:53 -040027 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028{
29 return consistent_alloc(NULL, size, dma_handle);
30}
31
32static inline void dma_free_coherent(struct device *dev, size_t size,
33 void *vaddr, dma_addr_t dma_handle)
34{
35 consistent_free(NULL, size, vaddr, dma_handle);
36}
37
38static inline void dma_cache_sync(void *vaddr, size_t size,
39 enum dma_data_direction dir)
40{
41 dma_cache_wback_inv((unsigned long)vaddr, size);
42}
43
44static inline dma_addr_t dma_map_single(struct device *dev,
45 void *ptr, size_t size,
46 enum dma_data_direction dir)
47{
48#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
49 if (dev->bus == &pci_bus_type)
50 return virt_to_bus(ptr);
51#endif
52 dma_cache_sync(ptr, size, dir);
53
54 return virt_to_bus(ptr);
55}
56
57#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
58
59static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction dir)
61{
62 int i;
63
64 for (i = 0; i < nents; i++) {
65#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
66 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
67 sg[i].length, dir);
68#endif
69 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
70 }
71
72 return nents;
73}
74
75#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
76
77static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
78 unsigned long offset, size_t size,
79 enum dma_data_direction dir)
80{
81 return dma_map_single(dev, page_address(page) + offset, size, dir);
82}
83
84static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
85 size_t size, enum dma_data_direction dir)
86{
87 dma_unmap_single(dev, dma_address, size, dir);
88}
89
90static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
91 size_t size, enum dma_data_direction dir)
92{
93#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
94 if (dev->bus == &pci_bus_type)
95 return;
96#endif
97 dma_cache_sync(bus_to_virt(dma_handle), size, dir);
98}
99
100static inline void dma_sync_single_range(struct device *dev,
101 dma_addr_t dma_handle,
102 unsigned long offset, size_t size,
103 enum dma_data_direction dir)
104{
105#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
106 if (dev->bus == &pci_bus_type)
107 return;
108#endif
109 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
110}
111
112static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
113 int nelems, enum dma_data_direction dir)
114{
115 int i;
116
117 for (i = 0; i < nelems; i++) {
118#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
119 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
120 sg[i].length, dir);
121#endif
122 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
123 }
124}
125
126static inline void dma_sync_single_for_cpu(struct device *dev,
127 dma_addr_t dma_handle, size_t size,
128 enum dma_data_direction dir)
129 __attribute__ ((alias("dma_sync_single")));
130
131static inline void dma_sync_single_for_device(struct device *dev,
132 dma_addr_t dma_handle, size_t size,
133 enum dma_data_direction dir)
134 __attribute__ ((alias("dma_sync_single")));
135
136static inline void dma_sync_sg_for_cpu(struct device *dev,
137 struct scatterlist *sg, int nelems,
138 enum dma_data_direction dir)
139 __attribute__ ((alias("dma_sync_sg")));
140
141static inline void dma_sync_sg_for_device(struct device *dev,
142 struct scatterlist *sg, int nelems,
143 enum dma_data_direction dir)
144 __attribute__ ((alias("dma_sync_sg")));
145
146static inline int dma_get_cache_alignment(void)
147{
148 /*
149 * Each processor family will define its own L1_CACHE_SHIFT,
150 * L1_CACHE_BYTES wraps to this, so this is always safe.
151 */
152 return L1_CACHE_BYTES;
153}
154
155static inline int dma_mapping_error(dma_addr_t dma_addr)
156{
157 return dma_addr == 0;
158}
159
160#endif /* __ASM_SH_DMA_MAPPING_H */
161