Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> |
| 3 | * Copyright (C) 2009 Wind River Systems Inc |
| 4 | * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com |
| 5 | * |
| 6 | * Based on DMA code from MIPS. |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/types.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/export.h> |
| 16 | #include <linux/string.h> |
| 17 | #include <linux/scatterlist.h> |
| 18 | #include <linux/dma-mapping.h> |
| 19 | #include <linux/io.h> |
| 20 | #include <linux/cache.h> |
| 21 | #include <asm/cacheflush.h> |
| 22 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 23 | static inline void __dma_sync_for_device(void *vaddr, size_t size, |
| 24 | enum dma_data_direction direction) |
| 25 | { |
| 26 | switch (direction) { |
| 27 | case DMA_FROM_DEVICE: |
| 28 | invalidate_dcache_range((unsigned long)vaddr, |
| 29 | (unsigned long)(vaddr + size)); |
| 30 | break; |
| 31 | case DMA_TO_DEVICE: |
| 32 | /* |
| 33 | * We just need to flush the caches here , but Nios2 flush |
| 34 | * instruction will do both writeback and invalidate. |
| 35 | */ |
| 36 | case DMA_BIDIRECTIONAL: /* flush and invalidate */ |
| 37 | flush_dcache_range((unsigned long)vaddr, |
| 38 | (unsigned long)(vaddr + size)); |
| 39 | break; |
| 40 | default: |
| 41 | BUG(); |
| 42 | } |
| 43 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 44 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 45 | static inline void __dma_sync_for_cpu(void *vaddr, size_t size, |
| 46 | enum dma_data_direction direction) |
| 47 | { |
| 48 | switch (direction) { |
| 49 | case DMA_BIDIRECTIONAL: |
| 50 | case DMA_FROM_DEVICE: |
| 51 | invalidate_dcache_range((unsigned long)vaddr, |
| 52 | (unsigned long)(vaddr + size)); |
| 53 | break; |
| 54 | case DMA_TO_DEVICE: |
| 55 | break; |
| 56 | default: |
| 57 | BUG(); |
| 58 | } |
| 59 | } |
| 60 | |
| 61 | static void *nios2_dma_alloc(struct device *dev, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 62 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 63 | { |
| 64 | void *ret; |
| 65 | |
| 66 | /* ignore region specifiers */ |
| 67 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 68 | |
| 69 | /* optimized page clearing */ |
| 70 | gfp |= __GFP_ZERO; |
| 71 | |
| 72 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) |
| 73 | gfp |= GFP_DMA; |
| 74 | |
| 75 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
| 76 | if (ret != NULL) { |
| 77 | *dma_handle = virt_to_phys(ret); |
| 78 | flush_dcache_range((unsigned long) ret, |
| 79 | (unsigned long) ret + size); |
| 80 | ret = UNCAC_ADDR(ret); |
| 81 | } |
| 82 | |
| 83 | return ret; |
| 84 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 85 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 86 | static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 87 | dma_addr_t dma_handle, unsigned long attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 88 | { |
| 89 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); |
| 90 | |
| 91 | free_pages(addr, get_order(size)); |
| 92 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 93 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 94 | static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 95 | int nents, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 96 | unsigned long attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 97 | { |
| 98 | int i; |
| 99 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 100 | for_each_sg(sg, sg, nents, i) { |
Alexander Duyck | abdf479 | 2016-12-14 15:05:00 -0800 | [diff] [blame] | 101 | void *addr = sg_virt(sg); |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 102 | |
Alexander Duyck | abdf479 | 2016-12-14 15:05:00 -0800 | [diff] [blame] | 103 | if (!addr) |
| 104 | continue; |
| 105 | |
| 106 | sg->dma_address = sg_phys(sg); |
| 107 | |
| 108 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
| 109 | continue; |
| 110 | |
| 111 | __dma_sync_for_device(addr, sg->length, direction); |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | return nents; |
| 115 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 116 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 117 | static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 118 | unsigned long offset, size_t size, |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 119 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 120 | unsigned long attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 121 | { |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 122 | void *addr = page_address(page) + offset; |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 123 | |
Alexander Duyck | abdf479 | 2016-12-14 15:05:00 -0800 | [diff] [blame] | 124 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 125 | __dma_sync_for_device(addr, size, direction); |
| 126 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 127 | return page_to_phys(page) + offset; |
| 128 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 129 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 130 | static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 131 | size_t size, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 132 | unsigned long attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 133 | { |
Alexander Duyck | abdf479 | 2016-12-14 15:05:00 -0800 | [diff] [blame] | 134 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 135 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 136 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 137 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 138 | static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 139 | int nhwentries, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 140 | unsigned long attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 141 | { |
| 142 | void *addr; |
| 143 | int i; |
| 144 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 145 | if (direction == DMA_TO_DEVICE) |
| 146 | return; |
| 147 | |
Alexander Duyck | abdf479 | 2016-12-14 15:05:00 -0800 | [diff] [blame] | 148 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
| 149 | return; |
| 150 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 151 | for_each_sg(sg, sg, nhwentries, i) { |
| 152 | addr = sg_virt(sg); |
| 153 | if (addr) |
| 154 | __dma_sync_for_cpu(addr, sg->length, direction); |
| 155 | } |
| 156 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 157 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 158 | static void nios2_dma_sync_single_for_cpu(struct device *dev, |
| 159 | dma_addr_t dma_handle, size_t size, |
| 160 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 161 | { |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 162 | __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); |
| 163 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 164 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 165 | static void nios2_dma_sync_single_for_device(struct device *dev, |
| 166 | dma_addr_t dma_handle, size_t size, |
| 167 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 168 | { |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 169 | __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); |
| 170 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 171 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 172 | static void nios2_dma_sync_sg_for_cpu(struct device *dev, |
| 173 | struct scatterlist *sg, int nelems, |
| 174 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 175 | { |
| 176 | int i; |
| 177 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 178 | /* Make sure that gcc doesn't leave the empty loop body. */ |
| 179 | for_each_sg(sg, sg, nelems, i) |
| 180 | __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); |
| 181 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 182 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 183 | static void nios2_dma_sync_sg_for_device(struct device *dev, |
| 184 | struct scatterlist *sg, int nelems, |
| 185 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 186 | { |
| 187 | int i; |
| 188 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 189 | /* Make sure that gcc doesn't leave the empty loop body. */ |
| 190 | for_each_sg(sg, sg, nelems, i) |
| 191 | __dma_sync_for_device(sg_virt(sg), sg->length, direction); |
| 192 | |
| 193 | } |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 194 | |
| 195 | struct dma_map_ops nios2_dma_ops = { |
| 196 | .alloc = nios2_dma_alloc, |
| 197 | .free = nios2_dma_free, |
| 198 | .map_page = nios2_dma_map_page, |
| 199 | .unmap_page = nios2_dma_unmap_page, |
| 200 | .map_sg = nios2_dma_map_sg, |
| 201 | .unmap_sg = nios2_dma_unmap_sg, |
| 202 | .sync_single_for_device = nios2_dma_sync_single_for_device, |
| 203 | .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, |
| 204 | .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, |
| 205 | .sync_sg_for_device = nios2_dma_sync_sg_for_device, |
| 206 | }; |
| 207 | EXPORT_SYMBOL(nios2_dma_ops); |