Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * DMA Coherent API Notes |
| 11 | * |
| 12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is |
| 13 | * implemented by accessintg it using a kernel virtual address, with |
| 14 | * Cache bit off in the TLB entry. |
| 15 | * |
| 16 | * The default DMA address == Phy address which is 0x8000_0000 based. |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 17 | */ |
| 18 | |
| 19 | #include <linux/dma-mapping.h> |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 20 | #include <asm/cache.h> |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 21 | #include <asm/cacheflush.h> |
| 22 | |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 23 | |
| 24 | static void *arc_dma_alloc(struct device *dev, size_t size, |
| 25 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 26 | { |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame^] | 27 | unsigned long order = get_order(size); |
| 28 | struct page *page; |
| 29 | phys_addr_t paddr; |
| 30 | void *kvaddr; |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 31 | |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame^] | 32 | page = alloc_pages(gfp, order); |
| 33 | if (!page) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 34 | return NULL; |
| 35 | |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame^] | 36 | /* This is linear addr (0x8000_0000 based) */ |
| 37 | paddr = page_to_phys(page); |
| 38 | |
| 39 | /* For now bus address is exactly same as paddr */ |
| 40 | *dma_handle = paddr; |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 41 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 42 | /* |
| 43 | * IOC relies on all data (even coherent DMA data) being in cache |
| 44 | * Thus allocate normal cached memory |
| 45 | * |
| 46 | * The gains with IOC are two pronged: |
| 47 | * -For streaming data, elides needs for cache maintenance, saving |
| 48 | * cycles in flush code, and bus bandwidth as all the lines of a |
| 49 | * buffer need to be flushed out to memory |
| 50 | * -For coherent data, Read/Write to buffers terminate early in cache |
| 51 | * (vs. always going to memory - thus are faster) |
| 52 | */ |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 53 | if ((is_isa_arcv2() && ioc_exists) || |
| 54 | dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) |
| 55 | return paddr; |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 56 | |
| 57 | /* This is kernel Virtual address (0x7000_0000 based) */ |
| 58 | kvaddr = ioremap_nocache((unsigned long)paddr, size); |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame^] | 59 | if (kvaddr == NULL) { |
| 60 | __free_pages(page, order); |
Vineet Gupta | f718c2e | 2015-07-03 10:40:43 +0530 | [diff] [blame] | 61 | return NULL; |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame^] | 62 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 63 | |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 64 | /* |
| 65 | * Evict any existing L1 and/or L2 lines for the backing page |
| 66 | * in case it was used earlier as a normal "cached" page. |
| 67 | * Yeah this bit us - STAR 9000898266 |
| 68 | * |
| 69 | * Although core does call flush_cache_vmap(), it gets kvaddr hence |
| 70 | * can't be used to efficiently flush L1 and/or L2 which need paddr |
| 71 | * Currently flush_cache_vmap nukes the L1 cache completely which |
| 72 | * will be optimized as a separate commit |
| 73 | */ |
| 74 | dma_cache_wback_inv((unsigned long)paddr, size); |
| 75 | |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 76 | return kvaddr; |
| 77 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 78 | |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 79 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
| 80 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 81 | { |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame^] | 82 | struct page *page = virt_to_page(dma_handle); |
| 83 | |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 84 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && |
| 85 | !(is_isa_arcv2() && ioc_exists)) |
| 86 | iounmap((void __force __iomem *)vaddr); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 87 | |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame^] | 88 | __free_pages(page, get_order(size)); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 89 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 90 | |
| 91 | /* |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 92 | * streaming DMA Mapping API... |
| 93 | * CPU accesses page via normal paddr, thus needs to explicitly made |
| 94 | * consistent before each use |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 95 | */ |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 96 | static void _dma_cache_sync(unsigned long paddr, size_t size, |
| 97 | enum dma_data_direction dir) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 98 | { |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 99 | switch (dir) { |
| 100 | case DMA_FROM_DEVICE: |
| 101 | dma_cache_inv(paddr, size); |
| 102 | break; |
| 103 | case DMA_TO_DEVICE: |
| 104 | dma_cache_wback(paddr, size); |
| 105 | break; |
| 106 | case DMA_BIDIRECTIONAL: |
| 107 | dma_cache_wback_inv(paddr, size); |
| 108 | break; |
| 109 | default: |
| 110 | pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); |
| 111 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 112 | } |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 113 | |
| 114 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, |
| 115 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 116 | struct dma_attrs *attrs) |
| 117 | { |
| 118 | unsigned long paddr = page_to_phys(page) + offset; |
| 119 | _dma_cache_sync(paddr, size, dir); |
| 120 | return (dma_addr_t)paddr; |
| 121 | } |
| 122 | |
| 123 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 124 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) |
| 125 | { |
| 126 | struct scatterlist *s; |
| 127 | int i; |
| 128 | |
| 129 | for_each_sg(sg, s, nents, i) |
| 130 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, |
| 131 | s->length, dir); |
| 132 | |
| 133 | return nents; |
| 134 | } |
| 135 | |
| 136 | static void arc_dma_sync_single_for_cpu(struct device *dev, |
| 137 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
| 138 | { |
| 139 | _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); |
| 140 | } |
| 141 | |
| 142 | static void arc_dma_sync_single_for_device(struct device *dev, |
| 143 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
| 144 | { |
| 145 | _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); |
| 146 | } |
| 147 | |
| 148 | static void arc_dma_sync_sg_for_cpu(struct device *dev, |
| 149 | struct scatterlist *sglist, int nelems, |
| 150 | enum dma_data_direction dir) |
| 151 | { |
| 152 | int i; |
| 153 | struct scatterlist *sg; |
| 154 | |
| 155 | for_each_sg(sglist, sg, nelems, i) |
| 156 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); |
| 157 | } |
| 158 | |
| 159 | static void arc_dma_sync_sg_for_device(struct device *dev, |
| 160 | struct scatterlist *sglist, int nelems, |
| 161 | enum dma_data_direction dir) |
| 162 | { |
| 163 | int i; |
| 164 | struct scatterlist *sg; |
| 165 | |
| 166 | for_each_sg(sglist, sg, nelems, i) |
| 167 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); |
| 168 | } |
| 169 | |
| 170 | static int arc_dma_supported(struct device *dev, u64 dma_mask) |
| 171 | { |
| 172 | /* Support 32 bit DMA mask exclusively */ |
| 173 | return dma_mask == DMA_BIT_MASK(32); |
| 174 | } |
| 175 | |
| 176 | struct dma_map_ops arc_dma_ops = { |
| 177 | .alloc = arc_dma_alloc, |
| 178 | .free = arc_dma_free, |
| 179 | .map_page = arc_dma_map_page, |
| 180 | .map_sg = arc_dma_map_sg, |
| 181 | .sync_single_for_device = arc_dma_sync_single_for_device, |
| 182 | .sync_single_for_cpu = arc_dma_sync_single_for_cpu, |
| 183 | .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, |
| 184 | .sync_sg_for_device = arc_dma_sync_sg_for_device, |
| 185 | .dma_supported = arc_dma_supported, |
| 186 | }; |
| 187 | EXPORT_SYMBOL(arc_dma_ops); |