Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 3 | * DMA operations that map physical memory directly without using an IOMMU or |
| 4 | * flushing caches. |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 5 | */ |
| 6 | #include <linux/export.h> |
| 7 | #include <linux/mm.h> |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 8 | #include <linux/dma-direct.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 9 | #include <linux/scatterlist.h> |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 10 | #include <linux/dma-contiguous.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 11 | #include <linux/pfn.h> |
| 12 | |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 13 | #define DIRECT_MAPPING_ERROR 0 |
| 14 | |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 15 | /* |
| 16 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but |
| 17 | * some use it for entirely different regions: |
| 18 | */ |
| 19 | #ifndef ARCH_ZONE_DMA_BITS |
| 20 | #define ARCH_ZONE_DMA_BITS 24 |
| 21 | #endif |
| 22 | |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 23 | static bool |
| 24 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 25 | const char *caller) |
| 26 | { |
| 27 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { |
| 28 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { |
| 29 | dev_err(dev, |
| 30 | "%s: overflow %pad+%zu of device mask %llx\n", |
| 31 | caller, &dma_addr, size, *dev->dma_mask); |
| 32 | } |
| 33 | return false; |
| 34 | } |
| 35 | return true; |
| 36 | } |
| 37 | |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 38 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
| 39 | { |
| 40 | return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; |
| 41 | } |
| 42 | |
Christoph Hellwig | 19dca8c | 2017-12-23 13:46:06 +0100 | [diff] [blame^] | 43 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 44 | gfp_t gfp, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 45 | { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 46 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 47 | int page_order = get_order(size); |
| 48 | struct page *page = NULL; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 49 | |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 50 | /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ |
| 51 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) |
| 52 | gfp |= GFP_DMA; |
| 53 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
| 54 | gfp |= GFP_DMA32; |
| 55 | |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 56 | again: |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 57 | /* CMA can be used only in the context which permits sleeping */ |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 58 | if (gfpflags_allow_blocking(gfp)) { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 59 | page = dma_alloc_from_contiguous(dev, count, page_order, gfp); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 60 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 61 | dma_release_from_contiguous(dev, page, count); |
| 62 | page = NULL; |
| 63 | } |
| 64 | } |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 65 | if (!page) |
Christoph Hellwig | 21f237e | 2017-12-22 11:55:23 +0100 | [diff] [blame] | 66 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 67 | |
| 68 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 69 | __free_pages(page, page_order); |
| 70 | page = NULL; |
| 71 | |
| 72 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && |
| 73 | !(gfp & GFP_DMA)) { |
| 74 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
| 75 | goto again; |
| 76 | } |
| 77 | } |
| 78 | |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 79 | if (!page) |
| 80 | return NULL; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 81 | |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 82 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
| 83 | memset(page_address(page), 0, size); |
| 84 | return page_address(page); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 85 | } |
| 86 | |
Christoph Hellwig | 19dca8c | 2017-12-23 13:46:06 +0100 | [diff] [blame^] | 87 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 88 | dma_addr_t dma_addr, unsigned long attrs) |
| 89 | { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 90 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 91 | |
| 92 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) |
| 93 | free_pages((unsigned long)cpu_addr, get_order(size)); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
| 97 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 98 | unsigned long attrs) |
| 99 | { |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 100 | dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; |
| 101 | |
| 102 | if (!check_addr(dev, dma_addr, size, __func__)) |
| 103 | return DIRECT_MAPPING_ERROR; |
| 104 | return dma_addr; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
| 108 | int nents, enum dma_data_direction dir, unsigned long attrs) |
| 109 | { |
| 110 | int i; |
| 111 | struct scatterlist *sg; |
| 112 | |
| 113 | for_each_sg(sgl, sg, nents, i) { |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 114 | BUG_ON(!sg_page(sg)); |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 115 | |
| 116 | sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 117 | if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) |
| 118 | return 0; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 119 | sg_dma_len(sg) = sg->length; |
| 120 | } |
| 121 | |
| 122 | return nents; |
| 123 | } |
| 124 | |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 125 | static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 126 | { |
| 127 | return dma_addr == DIRECT_MAPPING_ERROR; |
| 128 | } |
| 129 | |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 130 | const struct dma_map_ops dma_direct_ops = { |
| 131 | .alloc = dma_direct_alloc, |
| 132 | .free = dma_direct_free, |
| 133 | .map_page = dma_direct_map_page, |
| 134 | .map_sg = dma_direct_map_sg, |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 135 | .mapping_error = dma_direct_mapping_error, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 136 | }; |
| 137 | EXPORT_SYMBOL(dma_direct_ops); |