Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_DMA_MAPPING_H |
| 2 | #define __ASM_SH_DMA_MAPPING_H |
| 3 | |
| 4 | #include <linux/config.h> |
| 5 | #include <linux/mm.h> |
| 6 | #include <asm/scatterlist.h> |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 7 | #include <asm/cacheflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/io.h> |
| 9 | |
| 10 | extern struct bus_type pci_bus_type; |
| 11 | |
| 12 | /* arch/sh/mm/consistent.c */ |
Al Viro | 6dae2c2 | 2005-10-21 03:21:38 -0400 | [diff] [blame] | 13 | extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | extern void consistent_free(void *vaddr, size_t size); |
| 15 | extern void consistent_sync(void *vaddr, size_t size, int direction); |
| 16 | |
| 17 | #define dma_supported(dev, mask) (1) |
| 18 | |
| 19 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 20 | { |
| 21 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 22 | return -EIO; |
| 23 | |
| 24 | *dev->dma_mask = mask; |
| 25 | |
| 26 | return 0; |
| 27 | } |
| 28 | |
| 29 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
Al Viro | 6dae2c2 | 2005-10-21 03:21:38 -0400 | [diff] [blame] | 30 | dma_addr_t *dma_handle, gfp_t flag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | { |
| 32 | if (sh_mv.mv_consistent_alloc) { |
| 33 | void *ret; |
| 34 | |
| 35 | ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); |
| 36 | if (ret != NULL) |
| 37 | return ret; |
| 38 | } |
| 39 | |
| 40 | return consistent_alloc(flag, size, dma_handle); |
| 41 | } |
| 42 | |
| 43 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 44 | void *vaddr, dma_addr_t dma_handle) |
| 45 | { |
| 46 | if (sh_mv.mv_consistent_free) { |
| 47 | int ret; |
| 48 | |
| 49 | ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle); |
| 50 | if (ret == 0) |
| 51 | return; |
| 52 | } |
| 53 | |
| 54 | consistent_free(vaddr, size); |
| 55 | } |
| 56 | |
| 57 | static inline void dma_cache_sync(void *vaddr, size_t size, |
| 58 | enum dma_data_direction dir) |
| 59 | { |
| 60 | consistent_sync(vaddr, size, (int)dir); |
| 61 | } |
| 62 | |
| 63 | static inline dma_addr_t dma_map_single(struct device *dev, |
| 64 | void *ptr, size_t size, |
| 65 | enum dma_data_direction dir) |
| 66 | { |
| 67 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 68 | if (dev->bus == &pci_bus_type) |
| 69 | return virt_to_bus(ptr); |
| 70 | #endif |
| 71 | dma_cache_sync(ptr, size, dir); |
| 72 | |
| 73 | return virt_to_bus(ptr); |
| 74 | } |
| 75 | |
| 76 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) |
| 77 | |
| 78 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 79 | int nents, enum dma_data_direction dir) |
| 80 | { |
| 81 | int i; |
| 82 | |
| 83 | for (i = 0; i < nents; i++) { |
| 84 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 85 | dma_cache_sync(page_address(sg[i].page) + sg[i].offset, |
| 86 | sg[i].length, dir); |
| 87 | #endif |
| 88 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; |
| 89 | } |
| 90 | |
| 91 | return nents; |
| 92 | } |
| 93 | |
| 94 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) |
| 95 | |
| 96 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 97 | unsigned long offset, size_t size, |
| 98 | enum dma_data_direction dir) |
| 99 | { |
| 100 | return dma_map_single(dev, page_address(page) + offset, size, dir); |
| 101 | } |
| 102 | |
| 103 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 104 | size_t size, enum dma_data_direction dir) |
| 105 | { |
| 106 | dma_unmap_single(dev, dma_address, size, dir); |
| 107 | } |
| 108 | |
| 109 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, |
| 110 | size_t size, enum dma_data_direction dir) |
| 111 | { |
| 112 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 113 | if (dev->bus == &pci_bus_type) |
| 114 | return; |
| 115 | #endif |
| 116 | dma_cache_sync(bus_to_virt(dma_handle), size, dir); |
| 117 | } |
| 118 | |
| 119 | static inline void dma_sync_single_range(struct device *dev, |
| 120 | dma_addr_t dma_handle, |
| 121 | unsigned long offset, size_t size, |
| 122 | enum dma_data_direction dir) |
| 123 | { |
| 124 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 125 | if (dev->bus == &pci_bus_type) |
| 126 | return; |
| 127 | #endif |
| 128 | dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); |
| 129 | } |
| 130 | |
| 131 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, |
| 132 | int nelems, enum dma_data_direction dir) |
| 133 | { |
| 134 | int i; |
| 135 | |
| 136 | for (i = 0; i < nelems; i++) { |
| 137 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 138 | dma_cache_sync(page_address(sg[i].page) + sg[i].offset, |
| 139 | sg[i].length, dir); |
| 140 | #endif |
| 141 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; |
| 142 | } |
| 143 | } |
| 144 | |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 145 | static void dma_sync_single_for_cpu(struct device *dev, |
| 146 | dma_addr_t dma_handle, size_t size, |
| 147 | enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | __attribute__ ((alias("dma_sync_single"))); |
| 149 | |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 150 | static void dma_sync_single_for_device(struct device *dev, |
| 151 | dma_addr_t dma_handle, size_t size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | enum dma_data_direction dir) |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 153 | __attribute__ ((alias("dma_sync_single"))); |
| 154 | |
| 155 | static void dma_sync_sg_for_cpu(struct device *dev, |
| 156 | struct scatterlist *sg, int nelems, |
| 157 | enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | __attribute__ ((alias("dma_sync_sg"))); |
| 159 | |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 160 | static void dma_sync_sg_for_device(struct device *dev, |
| 161 | struct scatterlist *sg, int nelems, |
| 162 | enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | __attribute__ ((alias("dma_sync_sg"))); |
| 164 | |
| 165 | static inline int dma_get_cache_alignment(void) |
| 166 | { |
| 167 | /* |
| 168 | * Each processor family will define its own L1_CACHE_SHIFT, |
| 169 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
| 170 | */ |
| 171 | return L1_CACHE_BYTES; |
| 172 | } |
| 173 | |
| 174 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
| 175 | { |
| 176 | return dma_addr == 0; |
| 177 | } |
| 178 | |
| 179 | #endif /* __ASM_SH_DMA_MAPPING_H */ |
| 180 | |