blob: 3e6b8445af6a07285255fc5ce0fa80ab10003d8b [file] [log] [blame]
Greg Ungerer9a4048a2009-01-29 15:35:34 +10001#ifndef _M68K_DMA_MAPPING_H
2#define _M68K_DMA_MAPPING_H
3
4#include <asm/cache.h>
5
6struct scatterlist;
7
8#ifndef CONFIG_MMU_SUN3
9static inline int dma_supported(struct device *dev, u64 mask)
10{
11 return 1;
12}
13
14static inline int dma_set_mask(struct device *dev, u64 mask)
15{
16 return 0;
17}
18
Greg Ungerer9a4048a2009-01-29 15:35:34 +100019extern void *dma_alloc_coherent(struct device *, size_t,
20 dma_addr_t *, gfp_t);
21extern void dma_free_coherent(struct device *, size_t,
22 void *, dma_addr_t);
23
Geert Uytterhoeven4a09ab62012-12-16 18:27:33 +010024static inline void *dma_alloc_attrs(struct device *dev, size_t size,
25 dma_addr_t *dma_handle, gfp_t flag,
26 struct dma_attrs *attrs)
27{
28 /* attrs is not supported and ignored */
29 return dma_alloc_coherent(dev, size, dma_handle, flag);
30}
31
32static inline void dma_free_attrs(struct device *dev, size_t size,
33 void *cpu_addr, dma_addr_t dma_handle,
34 struct dma_attrs *attrs)
35{
36 /* attrs is not supported and ignored */
37 dma_free_coherent(dev, size, cpu_addr, dma_handle);
38}
39
Greg Ungerer9a4048a2009-01-29 15:35:34 +100040static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
41 dma_addr_t *handle, gfp_t flag)
42{
43 return dma_alloc_coherent(dev, size, handle, flag);
44}
45static inline void dma_free_noncoherent(struct device *dev, size_t size,
46 void *addr, dma_addr_t handle)
47{
48 dma_free_coherent(dev, size, addr, handle);
49}
50static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
51 enum dma_data_direction dir)
52{
53 /* we use coherent allocation, so not much to do here. */
54}
55
56extern dma_addr_t dma_map_single(struct device *, void *, size_t,
57 enum dma_data_direction);
58static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
59 size_t size, enum dma_data_direction dir)
60{
61}
62
63extern dma_addr_t dma_map_page(struct device *, struct page *,
64 unsigned long, size_t size,
65 enum dma_data_direction);
66static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
67 size_t size, enum dma_data_direction dir)
68{
69}
70
71extern int dma_map_sg(struct device *, struct scatterlist *, int,
72 enum dma_data_direction);
73static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
74 int nhwentries, enum dma_data_direction dir)
75{
76}
77
78extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
79 enum dma_data_direction);
80extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
81 enum dma_data_direction);
82
83static inline void dma_sync_single_range_for_device(struct device *dev,
84 dma_addr_t dma_handle, unsigned long offset, size_t size,
85 enum dma_data_direction direction)
86{
87 /* just sync everything for now */
88 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
89}
90
91static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
92 size_t size, enum dma_data_direction dir)
93{
94}
95
96static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
97 int nents, enum dma_data_direction dir)
98{
99}
100
101static inline void dma_sync_single_range_for_cpu(struct device *dev,
102 dma_addr_t dma_handle, unsigned long offset, size_t size,
103 enum dma_data_direction direction)
104{
105 /* just sync everything for now */
106 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
107}
108
109static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
110{
111 return 0;
112}
113
Sam Ravnborg49148022009-01-16 21:58:10 +1000114#else
Greg Ungerer9a4048a2009-01-29 15:35:34 +1000115#include <asm-generic/dma-mapping-broken.h>
Sam Ravnborg49148022009-01-16 21:58:10 +1000116#endif
Greg Ungerer9a4048a2009-01-29 15:35:34 +1000117
118#endif /* _M68K_DMA_MAPPING_H */