Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004 IBM |
| 3 | * |
| 4 | * Implements the generic device dma API for powerpc. |
| 5 | * the pci and vio busses |
| 6 | */ |
| 7 | #ifndef _ASM_DMA_MAPPING_H |
| 8 | #define _ASM_DMA_MAPPING_H |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 9 | |
| 10 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 11 | size_t size, |
| 12 | enum dma_data_direction direction) |
| 13 | { |
| 14 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 15 | |
| 16 | BUG_ON(!dma_ops); |
| 17 | dma_ops->unmap_single(dev, dma_address, size, direction); |
| 18 | } |
| 19 | |
| 20 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 21 | int nents, enum dma_data_direction direction) |
| 22 | { |
| 23 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 24 | |
| 25 | BUG_ON(!dma_ops); |
| 26 | return dma_ops->map_sg(dev, sg, nents, direction); |
| 27 | } |
| 28 | |
| 29 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 30 | int nhwentries, |
| 31 | enum dma_data_direction direction) |
| 32 | { |
| 33 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 34 | |
| 35 | BUG_ON(!dma_ops); |
| 36 | dma_ops->unmap_sg(dev, sg, nhwentries, direction); |
| 37 | } |
| 38 | |
| 39 | |
| 40 | /* |
| 41 | * Available generic sets of operations |
| 42 | */ |
| 43 | extern struct dma_mapping_ops dma_iommu_ops; |
| 44 | extern struct dma_mapping_ops dma_direct_ops; |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 45 | |
Benjamin Herrenschmidt | 92b20c4 | 2006-11-11 17:25:14 +1100 | [diff] [blame] | 46 | extern unsigned long dma_direct_offset; |
| 47 | |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 48 | #else /* CONFIG_PPC64 */ |
| 49 | |
| 50 | #define dma_supported(dev, mask) (1) |
| 51 | |
| 52 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
| 53 | { |
| 54 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 55 | return -EIO; |
| 56 | |
| 57 | *dev->dma_mask = dma_mask; |
| 58 | |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 63 | dma_addr_t * dma_handle, |
| 64 | gfp_t gfp) |
| 65 | { |
| 66 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 67 | return __dma_alloc_coherent(size, dma_handle, gfp); |
| 68 | #else |
| 69 | void *ret; |
| 70 | /* ignore region specifiers */ |
| 71 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 72 | |
| 73 | if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) |
| 74 | gfp |= GFP_DMA; |
| 75 | |
| 76 | ret = (void *)__get_free_pages(gfp, get_order(size)); |
| 77 | |
| 78 | if (ret != NULL) { |
| 79 | memset(ret, 0, size); |
| 80 | *dma_handle = virt_to_bus(ret); |
| 81 | } |
| 82 | |
| 83 | return ret; |
| 84 | #endif |
| 85 | } |
| 86 | |
| 87 | static inline void |
| 88 | dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 89 | dma_addr_t dma_handle) |
| 90 | { |
| 91 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 92 | __dma_free_coherent(size, vaddr); |
| 93 | #else |
| 94 | free_pages((unsigned long)vaddr, get_order(size)); |
| 95 | #endif |
| 96 | } |
| 97 | |
| 98 | static inline dma_addr_t |
| 99 | dma_map_single(struct device *dev, void *ptr, size_t size, |
| 100 | enum dma_data_direction direction) |
| 101 | { |
| 102 | BUG_ON(direction == DMA_NONE); |
| 103 | |
| 104 | __dma_sync(ptr, size, direction); |
| 105 | |
| 106 | return virt_to_bus(ptr); |
| 107 | } |
| 108 | |
Segher Boessenkool | f774216 | 2007-08-02 01:41:15 +1000 | [diff] [blame] | 109 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
| 110 | size_t size, |
| 111 | enum dma_data_direction direction) |
| 112 | { |
| 113 | /* We do nothing. */ |
| 114 | } |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 115 | |
| 116 | static inline dma_addr_t |
| 117 | dma_map_page(struct device *dev, struct page *page, |
| 118 | unsigned long offset, size_t size, |
| 119 | enum dma_data_direction direction) |
| 120 | { |
| 121 | BUG_ON(direction == DMA_NONE); |
| 122 | |
| 123 | __dma_sync_page(page, offset, size, direction); |
| 124 | |
| 125 | return page_to_bus(page) + offset; |
| 126 | } |
| 127 | |
Segher Boessenkool | f774216 | 2007-08-02 01:41:15 +1000 | [diff] [blame] | 128 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 129 | size_t size, |
| 130 | enum dma_data_direction direction) |
| 131 | { |
| 132 | /* We do nothing. */ |
| 133 | } |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 134 | |
| 135 | static inline int |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 136 | dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 137 | enum dma_data_direction direction) |
| 138 | { |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 139 | struct scatterlist *sg; |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 140 | int i; |
| 141 | |
| 142 | BUG_ON(direction == DMA_NONE); |
| 143 | |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 144 | for_each_sg(sgl, sg, nents, i) { |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 145 | BUG_ON(!sg->page); |
| 146 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); |
| 147 | sg->dma_address = page_to_bus(sg->page) + sg->offset; |
| 148 | } |
| 149 | |
| 150 | return nents; |
| 151 | } |
| 152 | |
Segher Boessenkool | f774216 | 2007-08-02 01:41:15 +1000 | [diff] [blame] | 153 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 154 | int nhwentries, |
| 155 | enum dma_data_direction direction) |
| 156 | { |
| 157 | /* We don't do anything here. */ |
| 158 | } |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 159 | |
| 160 | #endif /* CONFIG_PPC64 */ |
| 161 | |
| 162 | static inline void dma_sync_single_for_cpu(struct device *dev, |
| 163 | dma_addr_t dma_handle, size_t size, |
| 164 | enum dma_data_direction direction) |
| 165 | { |
| 166 | BUG_ON(direction == DMA_NONE); |
| 167 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
| 168 | } |
| 169 | |
| 170 | static inline void dma_sync_single_for_device(struct device *dev, |
| 171 | dma_addr_t dma_handle, size_t size, |
| 172 | enum dma_data_direction direction) |
| 173 | { |
| 174 | BUG_ON(direction == DMA_NONE); |
| 175 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
| 176 | } |
| 177 | |
| 178 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 179 | struct scatterlist *sgl, int nents, |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 180 | enum dma_data_direction direction) |
| 181 | { |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 182 | struct scatterlist *sg; |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 183 | int i; |
| 184 | |
| 185 | BUG_ON(direction == DMA_NONE); |
| 186 | |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 187 | for_each_sg(sgl, sg, nents, i) |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 188 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); |
| 189 | } |
| 190 | |
| 191 | static inline void dma_sync_sg_for_device(struct device *dev, |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 192 | struct scatterlist *sgl, int nents, |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 193 | enum dma_data_direction direction) |
| 194 | { |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 195 | struct scatterlist *sg; |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 196 | int i; |
| 197 | |
| 198 | BUG_ON(direction == DMA_NONE); |
| 199 | |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame^] | 200 | for_each_sg(sgl, sg, nents, i) |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 201 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); |
| 202 | } |
| 203 | |
| 204 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
| 205 | { |
| 206 | #ifdef CONFIG_PPC64 |
| 207 | return (dma_addr == DMA_ERROR_CODE); |
| 208 | #else |
| 209 | return 0; |
| 210 | #endif |
| 211 | } |
| 212 | |
| 213 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 214 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 215 | #ifdef CONFIG_NOT_COHERENT_CACHE |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 216 | #define dma_is_consistent(d, h) (0) |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 217 | #else |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 218 | #define dma_is_consistent(d, h) (1) |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 219 | #endif |
| 220 | |
| 221 | static inline int dma_get_cache_alignment(void) |
| 222 | { |
| 223 | #ifdef CONFIG_PPC64 |
| 224 | /* no easy way to get cache size on all processors, so return |
| 225 | * the maximum possible, to be safe */ |
Ravikiran G Thirumalai | 1fd73c6 | 2006-01-08 01:01:28 -0800 | [diff] [blame] | 226 | return (1 << INTERNODE_CACHE_SHIFT); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 227 | #else |
| 228 | /* |
| 229 | * Each processor family will define its own L1_CACHE_SHIFT, |
| 230 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
| 231 | */ |
| 232 | return L1_CACHE_BYTES; |
| 233 | #endif |
| 234 | } |
| 235 | |
| 236 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 237 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
| 238 | enum dma_data_direction direction) |
| 239 | { |
| 240 | /* just sync everything for now */ |
| 241 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); |
| 242 | } |
| 243 | |
| 244 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 245 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
| 246 | enum dma_data_direction direction) |
| 247 | { |
| 248 | /* just sync everything for now */ |
| 249 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); |
| 250 | } |
| 251 | |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 252 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 253 | enum dma_data_direction direction) |
| 254 | { |
| 255 | BUG_ON(direction == DMA_NONE); |
| 256 | __dma_sync(vaddr, size, (int)direction); |
| 257 | } |
| 258 | |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 259 | #endif /* __KERNEL__ */ |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 260 | #endif /* _ASM_DMA_MAPPING_H */ |