blob: 34c92264208a5dd5d0b2d3c68a6cda080f8a8fc0 [file] [log] [blame]
Sam Ravnborga439fe52008-07-27 23:00:59 +02001#ifndef ___ASM_SPARC_DMA_MAPPING_H
2#define ___ASM_SPARC_DMA_MAPPING_H
FUJITA Tomonorid6986412009-05-14 16:23:11 +00003
4#include <linux/scatterlist.h>
5#include <linux/mm.h>
FUJITA Tomonori02f7a182009-08-10 11:53:13 +09006#include <linux/dma-debug.h>
FUJITA Tomonorib9f69f42009-05-14 16:23:08 +00007
8#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
9
10extern int dma_supported(struct device *dev, u64 mask);
11extern int dma_set_mask(struct device *dev, u64 dma_mask);
12
FUJITA Tomonorib9f69f42009-05-14 16:23:08 +000013#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
15#define dma_is_consistent(d, h) (1)
16
FUJITA Tomonori02f7a182009-08-10 11:53:13 +090017extern struct dma_map_ops *dma_ops;
18
19static inline struct dma_map_ops *get_dma_ops(struct device *dev)
20{
21 return dma_ops;
22}
23
24#include <asm-generic/dma-mapping-common.h>
FUJITA Tomonorid6986412009-05-14 16:23:11 +000025
26static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t flag)
28{
FUJITA Tomonori02f7a182009-08-10 11:53:13 +090029 struct dma_map_ops *ops = get_dma_ops(dev);
30
31 return ops->alloc_coherent(dev, size, dma_handle, flag);
FUJITA Tomonorid6986412009-05-14 16:23:11 +000032}
33
34static inline void dma_free_coherent(struct device *dev, size_t size,
35 void *cpu_addr, dma_addr_t dma_handle)
36{
FUJITA Tomonori02f7a182009-08-10 11:53:13 +090037 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonorid6986412009-05-14 16:23:11 +000038
FUJITA Tomonori02f7a182009-08-10 11:53:13 +090039 ops->free_coherent(dev, size, cpu_addr, dma_handle);
FUJITA Tomonorid6986412009-05-14 16:23:11 +000040}
41
FUJITA Tomonorid6986412009-05-14 16:23:11 +000042static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
43{
44 return (dma_addr == DMA_ERROR_CODE);
45}
46
47static inline int dma_get_cache_alignment(void)
48{
49 /*
50 * no easy way to get cache size on all processors, so return
51 * the maximum possible, to be safe
52 */
53 return (1 << INTERNODE_CACHE_SHIFT);
54}
55
Sam Ravnborga439fe52008-07-27 23:00:59 +020056#endif