Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 3 | * DMA operations that map physical memory directly without using an IOMMU or |
| 4 | * flushing caches. |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 5 | */ |
| 6 | #include <linux/export.h> |
| 7 | #include <linux/mm.h> |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 8 | #include <linux/dma-direct.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 9 | #include <linux/scatterlist.h> |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 10 | #include <linux/dma-contiguous.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 11 | #include <linux/pfn.h> |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 12 | #include <linux/set_memory.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 13 | |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 14 | #define DIRECT_MAPPING_ERROR 0 |
| 15 | |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 16 | /* |
| 17 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but |
| 18 | * some use it for entirely different regions: |
| 19 | */ |
| 20 | #ifndef ARCH_ZONE_DMA_BITS |
| 21 | #define ARCH_ZONE_DMA_BITS 24 |
| 22 | #endif |
| 23 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 24 | /* |
| 25 | * For AMD SEV all DMA must be to unencrypted addresses. |
| 26 | */ |
| 27 | static inline bool force_dma_unencrypted(void) |
| 28 | { |
| 29 | return sev_active(); |
| 30 | } |
| 31 | |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 32 | static bool |
| 33 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 34 | const char *caller) |
| 35 | { |
| 36 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { |
Christoph Hellwig | 2550bbf | 2018-05-29 16:15:12 +0200 | [diff] [blame] | 37 | if (!dev->dma_mask) { |
| 38 | dev_err(dev, |
| 39 | "%s: call on device without dma_mask\n", |
| 40 | caller); |
| 41 | return false; |
| 42 | } |
| 43 | |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 44 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { |
| 45 | dev_err(dev, |
| 46 | "%s: overflow %pad+%zu of device mask %llx\n", |
| 47 | caller, &dma_addr, size, *dev->dma_mask); |
| 48 | } |
| 49 | return false; |
| 50 | } |
| 51 | return true; |
| 52 | } |
| 53 | |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 54 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
| 55 | { |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 56 | dma_addr_t addr = force_dma_unencrypted() ? |
| 57 | __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); |
| 58 | return addr + size - 1 <= dev->coherent_dma_mask; |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 59 | } |
| 60 | |
Christoph Hellwig | 19dca8c | 2017-12-23 13:46:06 +0100 | [diff] [blame] | 61 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 62 | gfp_t gfp, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 63 | { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 64 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 65 | int page_order = get_order(size); |
| 66 | struct page *page = NULL; |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 67 | void *ret; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 68 | |
Christoph Hellwig | e89f5b3 | 2018-03-28 15:35:35 +0200 | [diff] [blame] | 69 | /* we always manually zero the memory once we are done: */ |
| 70 | gfp &= ~__GFP_ZERO; |
| 71 | |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 72 | /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ |
| 73 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) |
| 74 | gfp |= GFP_DMA; |
| 75 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
| 76 | gfp |= GFP_DMA32; |
| 77 | |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 78 | again: |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 79 | /* CMA can be used only in the context which permits sleeping */ |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 80 | if (gfpflags_allow_blocking(gfp)) { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 81 | page = dma_alloc_from_contiguous(dev, count, page_order, gfp); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 82 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 83 | dma_release_from_contiguous(dev, page, count); |
| 84 | page = NULL; |
| 85 | } |
| 86 | } |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 87 | if (!page) |
Christoph Hellwig | 21f237e | 2017-12-22 11:55:23 +0100 | [diff] [blame] | 88 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 89 | |
| 90 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 91 | __free_pages(page, page_order); |
| 92 | page = NULL; |
| 93 | |
Takashi Iwai | de7eab3 | 2018-04-16 17:18:19 +0200 | [diff] [blame] | 94 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
| 95 | dev->coherent_dma_mask < DMA_BIT_MASK(64) && |
| 96 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
| 97 | gfp |= GFP_DMA32; |
| 98 | goto again; |
| 99 | } |
| 100 | |
Takashi Iwai | 504a918 | 2018-04-15 11:08:07 +0200 | [diff] [blame] | 101 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
| 102 | dev->coherent_dma_mask < DMA_BIT_MASK(32) && |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 103 | !(gfp & GFP_DMA)) { |
| 104 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
| 105 | goto again; |
| 106 | } |
| 107 | } |
| 108 | |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 109 | if (!page) |
| 110 | return NULL; |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 111 | ret = page_address(page); |
| 112 | if (force_dma_unencrypted()) { |
| 113 | set_memory_decrypted((unsigned long)ret, 1 << page_order); |
| 114 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); |
| 115 | } else { |
| 116 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
| 117 | } |
| 118 | memset(ret, 0, size); |
| 119 | return ret; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 120 | } |
| 121 | |
Christoph Hellwig | 42ed645 | 2018-02-02 09:51:14 +0100 | [diff] [blame] | 122 | /* |
| 123 | * NOTE: this function must never look at the dma_addr argument, because we want |
| 124 | * to be able to use it as a helper for iommu implementations as well. |
| 125 | */ |
Christoph Hellwig | 19dca8c | 2017-12-23 13:46:06 +0100 | [diff] [blame] | 126 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 127 | dma_addr_t dma_addr, unsigned long attrs) |
| 128 | { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 129 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 130 | unsigned int page_order = get_order(size); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 131 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 132 | if (force_dma_unencrypted()) |
| 133 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 134 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 135 | free_pages((unsigned long)cpu_addr, page_order); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 136 | } |
| 137 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 138 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 139 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 140 | unsigned long attrs) |
| 141 | { |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 142 | dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; |
| 143 | |
| 144 | if (!check_addr(dev, dma_addr, size, __func__)) |
| 145 | return DIRECT_MAPPING_ERROR; |
| 146 | return dma_addr; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 147 | } |
| 148 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 149 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 150 | enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 151 | { |
| 152 | int i; |
| 153 | struct scatterlist *sg; |
| 154 | |
| 155 | for_each_sg(sgl, sg, nents, i) { |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 156 | BUG_ON(!sg_page(sg)); |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 157 | |
| 158 | sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 159 | if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) |
| 160 | return 0; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 161 | sg_dma_len(sg) = sg->length; |
| 162 | } |
| 163 | |
| 164 | return nents; |
| 165 | } |
| 166 | |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 167 | int dma_direct_supported(struct device *dev, u64 mask) |
| 168 | { |
| 169 | #ifdef CONFIG_ZONE_DMA |
| 170 | if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) |
| 171 | return 0; |
| 172 | #else |
| 173 | /* |
| 174 | * Because 32-bit DMA masks are so common we expect every architecture |
| 175 | * to be able to satisfy them - either by not supporting more physical |
| 176 | * memory, or by providing a ZONE_DMA32. If neither is the case, the |
| 177 | * architecture needs to use an IOMMU instead of the direct mapping. |
| 178 | */ |
| 179 | if (mask < DMA_BIT_MASK(32)) |
| 180 | return 0; |
| 181 | #endif |
Christoph Hellwig | f068fe3 | 2018-04-27 09:02:55 +0200 | [diff] [blame] | 182 | /* |
| 183 | * Various PCI/PCIe bridges have broken support for > 32bit DMA even |
| 184 | * if the device itself might support it. |
| 185 | */ |
| 186 | if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) |
| 187 | return 0; |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 188 | return 1; |
| 189 | } |
| 190 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 191 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 192 | { |
| 193 | return dma_addr == DIRECT_MAPPING_ERROR; |
| 194 | } |
| 195 | |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 196 | const struct dma_map_ops dma_direct_ops = { |
| 197 | .alloc = dma_direct_alloc, |
| 198 | .free = dma_direct_free, |
| 199 | .map_page = dma_direct_map_page, |
| 200 | .map_sg = dma_direct_map_sg, |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 201 | .dma_supported = dma_direct_supported, |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 202 | .mapping_error = dma_direct_mapping_error, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 203 | }; |
| 204 | EXPORT_SYMBOL(dma_direct_ops); |