blob: 6d2416a857096be9548964987af7f701a5fe15c2 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
FUJITA Tomonori46bab4e2009-08-04 19:08:26 +000017#include <linux/dma-debug.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050018#include <asm/io.h>
Becky Bruceec3cf2e2009-05-14 12:42:28 +000019#include <asm/swiotlb.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050020
21#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
22
Becky Bruceec3cf2e2009-05-14 12:42:28 +000023/* Some dma direct funcs must be visible for use in other dma_ops */
24extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
25 dma_addr_t *dma_handle, gfp_t flag);
26extern void dma_direct_free_coherent(struct device *dev, size_t size,
27 void *vaddr, dma_addr_t dma_handle);
28
Becky Bruceec3cf2e2009-05-14 12:42:28 +000029
Anton Blanchard33ff9102007-10-16 14:54:33 -050030#ifdef CONFIG_NOT_COHERENT_CACHE
31/*
32 * DMA-consistent mapping functions for PowerPCs that don't support
33 * cache snooping. These allocate/free a region of uncached mapped
34 * memory space for use with DMA devices. Alternatively, you could
35 * allocate the space "normally" and use the cache management functions
36 * to ensure it is consistent.
37 */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100038struct device;
39extern void *__dma_alloc_coherent(struct device *dev, size_t size,
40 dma_addr_t *handle, gfp_t gfp);
Anton Blanchard33ff9102007-10-16 14:54:33 -050041extern void __dma_free_coherent(size_t size, void *vaddr);
42extern void __dma_sync(void *vaddr, size_t size, int direction);
43extern void __dma_sync_page(struct page *page, unsigned long offset,
44 size_t size, int direction);
45
46#else /* ! CONFIG_NOT_COHERENT_CACHE */
47/*
48 * Cache coherent cores.
49 */
50
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100051#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
Anton Blanchard33ff9102007-10-16 14:54:33 -050052#define __dma_free_coherent(size, addr) ((void)0)
53#define __dma_sync(addr, size, rw) ((void)0)
54#define __dma_sync_page(pg, off, sz, rw) ((void)0)
55
56#endif /* ! CONFIG_NOT_COHERENT_CACHE */
57
Mark Nelson3a4c6f02008-07-05 05:05:45 +100058static inline unsigned long device_to_mask(struct device *dev)
59{
60 if (dev->dma_mask && *dev->dma_mask)
61 return *dev->dma_mask;
62 /* Assume devices without mask can take 32 bit addresses */
63 return 0xfffffffful;
64}
65
Anton Blanchard33ff9102007-10-16 14:54:33 -050066/*
Becky Bruce4fc665b2008-09-12 10:34:46 +000067 * Available generic sets of operations
68 */
69#ifdef CONFIG_PPC64
FUJITA Tomonori45223c52009-08-04 19:08:25 +000070extern struct dma_map_ops dma_iommu_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000071#endif
FUJITA Tomonori45223c52009-08-04 19:08:25 +000072extern struct dma_map_ops dma_direct_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000073
FUJITA Tomonori45223c52009-08-04 19:08:25 +000074static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Anton Blanchard33ff9102007-10-16 14:54:33 -050075{
76 /* We don't handle the NULL dev case for ISA for now. We could
77 * do it via an out of line call but it is not needed for now. The
78 * only ISA DMA device we support is the floppy and we have a hack
79 * in the floppy driver directly to get a device for us.
80 */
Kumar Gala4ae0ff62009-03-19 03:40:52 +000081 if (unlikely(dev == NULL))
Anton Blanchard33ff9102007-10-16 14:54:33 -050082 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +000083
Anton Blanchard33ff9102007-10-16 14:54:33 -050084 return dev->archdata.dma_ops;
85}
86
FUJITA Tomonori45223c52009-08-04 19:08:25 +000087static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
Michael Ellerman1f62a162008-01-30 01:13:58 +110088{
89 dev->archdata.dma_ops = ops;
90}
91
Becky Bruce1cebd7a2009-09-21 08:26:34 +000092/*
93 * get_dma_offset()
94 *
95 * Get the dma offset on configurations where the dma address can be determined
96 * from the physical address by looking at a simple offset. Direct dma and
97 * swiotlb use this function, but it is typically not used by implementations
98 * with an iommu.
99 */
Becky Bruce738ef422009-09-21 08:26:35 +0000100static inline dma_addr_t get_dma_offset(struct device *dev)
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000101{
102 if (dev)
Becky Bruce738ef422009-09-21 08:26:35 +0000103 return dev->archdata.dma_data.dma_offset;
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000104
105 return PCI_DRAM_OFFSET;
106}
107
Becky Bruce738ef422009-09-21 08:26:35 +0000108static inline void set_dma_offset(struct device *dev, dma_addr_t off)
109{
110 if (dev)
111 dev->archdata.dma_data.dma_offset = off;
112}
113
FUJITA Tomonori46bab4e2009-08-04 19:08:26 +0000114/* this will be removed soon */
115#define flush_write_buffers()
116
117#include <asm-generic/dma-mapping-common.h>
118
Anton Blanchard33ff9102007-10-16 14:54:33 -0500119static inline int dma_supported(struct device *dev, u64 mask)
120{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000121 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500122
123 if (unlikely(dma_ops == NULL))
124 return 0;
125 if (dma_ops->dma_supported == NULL)
126 return 1;
127 return dma_ops->dma_supported(dev, mask);
128}
129
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +0000130extern int dma_set_mask(struct device *dev, u64 dma_mask);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500131
132static inline void *dma_alloc_coherent(struct device *dev, size_t size,
133 dma_addr_t *dma_handle, gfp_t flag)
134{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000135 struct dma_map_ops *dma_ops = get_dma_ops(dev);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000136 void *cpu_addr;
Anton Blanchard33ff9102007-10-16 14:54:33 -0500137
138 BUG_ON(!dma_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000139
140 cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
141
142 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
143
144 return cpu_addr;
Anton Blanchard33ff9102007-10-16 14:54:33 -0500145}
146
147static inline void dma_free_coherent(struct device *dev, size_t size,
148 void *cpu_addr, dma_addr_t dma_handle)
149{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000150 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500151
152 BUG_ON(!dma_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000153
154 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
155
Anton Blanchard33ff9102007-10-16 14:54:33 -0500156 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
157}
158
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700159static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100160{
FUJITA Tomonori4a9a6bf2009-08-04 19:08:27 +0000161 struct dma_map_ops *dma_ops = get_dma_ops(dev);
162
163 if (dma_ops->mapping_error)
164 return dma_ops->mapping_error(dev, dma_addr);
165
Stephen Rothwell78b09732005-11-19 01:40:46 +1100166#ifdef CONFIG_PPC64
167 return (dma_addr == DMA_ERROR_CODE);
168#else
169 return 0;
170#endif
171}
172
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900173static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
174{
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000175#ifdef CONFIG_SWIOTLB
176 struct dev_archdata *sd = &dev->archdata;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900177
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000178 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900179 return 0;
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000180#endif
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900181
182 if (!dev->dma_mask)
183 return 0;
184
Jan Beulichac2b3e62009-12-15 16:47:43 -0800185 return addr + size - 1 <= *dev->dma_mask;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900186}
187
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900188static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
189{
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000190 return paddr + get_dma_offset(dev);
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900191}
192
193static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
194{
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000195 return daddr - get_dma_offset(dev);
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900196}
197
Stephen Rothwell78b09732005-11-19 01:40:46 +1100198#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
199#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100200
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800201static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100202 enum dma_data_direction direction)
203{
204 BUG_ON(direction == DMA_NONE);
205 __dma_sync(vaddr, size, (int)direction);
206}
207
Arnd Bergmann88ced032005-12-16 22:43:46 +0100208#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100209#endif /* _ASM_DMA_MAPPING_H */