blob: 62678e365ca0768e0566329eb957f0058a04897d [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
FUJITA Tomonori46bab4e2009-08-04 19:08:26 +000017#include <linux/dma-debug.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050018#include <asm/io.h>
Becky Bruceec3cf2e2009-05-14 12:42:28 +000019#include <asm/swiotlb.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050020
21#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
22
Becky Bruceec3cf2e2009-05-14 12:42:28 +000023/* Some dma direct funcs must be visible for use in other dma_ops */
24extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +010025 dma_addr_t *dma_handle, gfp_t flag,
26 struct dma_attrs *attrs);
Becky Bruceec3cf2e2009-05-14 12:42:28 +000027extern void dma_direct_free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +010028 void *vaddr, dma_addr_t dma_handle,
29 struct dma_attrs *attrs);
Becky Bruceec3cf2e2009-05-14 12:42:28 +000030
Becky Bruceec3cf2e2009-05-14 12:42:28 +000031
Anton Blanchard33ff9102007-10-16 14:54:33 -050032#ifdef CONFIG_NOT_COHERENT_CACHE
33/*
34 * DMA-consistent mapping functions for PowerPCs that don't support
35 * cache snooping. These allocate/free a region of uncached mapped
36 * memory space for use with DMA devices. Alternatively, you could
37 * allocate the space "normally" and use the cache management functions
38 * to ensure it is consistent.
39 */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100040struct device;
41extern void *__dma_alloc_coherent(struct device *dev, size_t size,
42 dma_addr_t *handle, gfp_t gfp);
Anton Blanchard33ff9102007-10-16 14:54:33 -050043extern void __dma_free_coherent(size_t size, void *vaddr);
44extern void __dma_sync(void *vaddr, size_t size, int direction);
45extern void __dma_sync_page(struct page *page, unsigned long offset,
46 size_t size, int direction);
Benjamin Herrenschmidt60909122011-03-24 20:50:06 +000047extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
Anton Blanchard33ff9102007-10-16 14:54:33 -050048
49#else /* ! CONFIG_NOT_COHERENT_CACHE */
50/*
51 * Cache coherent cores.
52 */
53
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100054#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
Anton Blanchard33ff9102007-10-16 14:54:33 -050055#define __dma_free_coherent(size, addr) ((void)0)
56#define __dma_sync(addr, size, rw) ((void)0)
57#define __dma_sync_page(pg, off, sz, rw) ((void)0)
58
59#endif /* ! CONFIG_NOT_COHERENT_CACHE */
60
Mark Nelson3a4c6f02008-07-05 05:05:45 +100061static inline unsigned long device_to_mask(struct device *dev)
62{
63 if (dev->dma_mask && *dev->dma_mask)
64 return *dev->dma_mask;
65 /* Assume devices without mask can take 32 bit addresses */
66 return 0xfffffffful;
67}
68
Anton Blanchard33ff9102007-10-16 14:54:33 -050069/*
Becky Bruce4fc665b2008-09-12 10:34:46 +000070 * Available generic sets of operations
71 */
72#ifdef CONFIG_PPC64
FUJITA Tomonori45223c52009-08-04 19:08:25 +000073extern struct dma_map_ops dma_iommu_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000074#endif
FUJITA Tomonori45223c52009-08-04 19:08:25 +000075extern struct dma_map_ops dma_direct_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000076
FUJITA Tomonori45223c52009-08-04 19:08:25 +000077static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Anton Blanchard33ff9102007-10-16 14:54:33 -050078{
79 /* We don't handle the NULL dev case for ISA for now. We could
80 * do it via an out of line call but it is not needed for now. The
81 * only ISA DMA device we support is the floppy and we have a hack
82 * in the floppy driver directly to get a device for us.
83 */
Kumar Gala4ae0ff62009-03-19 03:40:52 +000084 if (unlikely(dev == NULL))
Anton Blanchard33ff9102007-10-16 14:54:33 -050085 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +000086
Anton Blanchard33ff9102007-10-16 14:54:33 -050087 return dev->archdata.dma_ops;
88}
89
FUJITA Tomonori45223c52009-08-04 19:08:25 +000090static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
Michael Ellerman1f62a162008-01-30 01:13:58 +110091{
92 dev->archdata.dma_ops = ops;
93}
94
Becky Bruce1cebd7a2009-09-21 08:26:34 +000095/*
96 * get_dma_offset()
97 *
98 * Get the dma offset on configurations where the dma address can be determined
99 * from the physical address by looking at a simple offset. Direct dma and
100 * swiotlb use this function, but it is typically not used by implementations
101 * with an iommu.
102 */
Becky Bruce738ef422009-09-21 08:26:35 +0000103static inline dma_addr_t get_dma_offset(struct device *dev)
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000104{
105 if (dev)
Becky Bruce738ef422009-09-21 08:26:35 +0000106 return dev->archdata.dma_data.dma_offset;
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000107
108 return PCI_DRAM_OFFSET;
109}
110
Becky Bruce738ef422009-09-21 08:26:35 +0000111static inline void set_dma_offset(struct device *dev, dma_addr_t off)
112{
113 if (dev)
114 dev->archdata.dma_data.dma_offset = off;
115}
116
FUJITA Tomonori46bab4e2009-08-04 19:08:26 +0000117/* this will be removed soon */
118#define flush_write_buffers()
119
120#include <asm-generic/dma-mapping-common.h>
121
Anton Blanchard33ff9102007-10-16 14:54:33 -0500122static inline int dma_supported(struct device *dev, u64 mask)
123{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000124 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500125
126 if (unlikely(dma_ops == NULL))
127 return 0;
128 if (dma_ops->dma_supported == NULL)
129 return 1;
130 return dma_ops->dma_supported(dev, mask);
131}
132
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +0000133extern int dma_set_mask(struct device *dev, u64 dma_mask);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500134
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +0100135#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
136
137static inline void *dma_alloc_attrs(struct device *dev, size_t size,
138 dma_addr_t *dma_handle, gfp_t flag,
139 struct dma_attrs *attrs)
Anton Blanchard33ff9102007-10-16 14:54:33 -0500140{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000141 struct dma_map_ops *dma_ops = get_dma_ops(dev);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000142 void *cpu_addr;
Anton Blanchard33ff9102007-10-16 14:54:33 -0500143
144 BUG_ON(!dma_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000145
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +0100146 cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000147
148 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
149
150 return cpu_addr;
Anton Blanchard33ff9102007-10-16 14:54:33 -0500151}
152
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +0100153#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
154
155static inline void dma_free_attrs(struct device *dev, size_t size,
156 void *cpu_addr, dma_addr_t dma_handle,
157 struct dma_attrs *attrs)
Anton Blanchard33ff9102007-10-16 14:54:33 -0500158{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000159 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500160
161 BUG_ON(!dma_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000162
163 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
164
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +0100165 dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500166}
167
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700168static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100169{
FUJITA Tomonori4a9a6bf2009-08-04 19:08:27 +0000170 struct dma_map_ops *dma_ops = get_dma_ops(dev);
171
172 if (dma_ops->mapping_error)
173 return dma_ops->mapping_error(dev, dma_addr);
174
Stephen Rothwell78b09732005-11-19 01:40:46 +1100175#ifdef CONFIG_PPC64
176 return (dma_addr == DMA_ERROR_CODE);
177#else
178 return 0;
179#endif
180}
181
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900182static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
183{
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000184#ifdef CONFIG_SWIOTLB
185 struct dev_archdata *sd = &dev->archdata;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900186
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000187 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900188 return 0;
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000189#endif
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900190
191 if (!dev->dma_mask)
192 return 0;
193
Jan Beulichac2b3e62009-12-15 16:47:43 -0800194 return addr + size - 1 <= *dev->dma_mask;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900195}
196
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900197static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
198{
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000199 return paddr + get_dma_offset(dev);
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900200}
201
202static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
203{
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000204 return daddr - get_dma_offset(dev);
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900205}
206
Stephen Rothwell78b09732005-11-19 01:40:46 +1100207#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
208#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100209
Benjamin Herrenschmidt60909122011-03-24 20:50:06 +0000210extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
211 void *, dma_addr_t, size_t);
212#define ARCH_HAS_DMA_MMAP_COHERENT
213
214
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800215static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100216 enum dma_data_direction direction)
217{
218 BUG_ON(direction == DMA_NONE);
219 __dma_sync(vaddr, size, (int)direction);
220}
221
Arnd Bergmann88ced032005-12-16 22:43:46 +0100222#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100223#endif /* _ASM_DMA_MAPPING_H */