blob: c9e8e21cb33406f33b539cfd25d82b5749071422 [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwig2e86a042017-12-22 11:29:51 +01003 * DMA operations that map physical memory directly without using an IOMMU or
4 * flushing caches.
Christoph Hellwig002e6742018-01-09 16:30:23 +01005 */
6#include <linux/export.h>
7#include <linux/mm.h>
Christoph Hellwig2e86a042017-12-22 11:29:51 +01008#include <linux/dma-direct.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +01009#include <linux/scatterlist.h>
Christoph Hellwig080321d2017-12-22 11:51:44 +010010#include <linux/dma-contiguous.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/pfn.h>
12
Christoph Hellwig27975962018-01-09 16:30:47 +010013#define DIRECT_MAPPING_ERROR 0
14
Christoph Hellwigc61e9632018-01-09 23:39:03 +010015/*
16 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
17 * some use it for entirely different regions:
18 */
19#ifndef ARCH_ZONE_DMA_BITS
20#define ARCH_ZONE_DMA_BITS 24
21#endif
22
Christoph Hellwig27975962018-01-09 16:30:47 +010023static bool
24check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
25 const char *caller)
26{
27 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
28 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
29 dev_err(dev,
30 "%s: overflow %pad+%zu of device mask %llx\n",
31 caller, &dma_addr, size, *dev->dma_mask);
32 }
33 return false;
34 }
35 return true;
36}
37
Christoph Hellwig95f18392018-01-09 23:40:57 +010038static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
39{
40 return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
41}
42
Christoph Hellwig19dca8c2017-12-23 13:46:06 +010043void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
44 gfp_t gfp, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +010045{
Christoph Hellwig080321d2017-12-22 11:51:44 +010046 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
47 int page_order = get_order(size);
48 struct page *page = NULL;
Christoph Hellwig002e6742018-01-09 16:30:23 +010049
Christoph Hellwigc61e9632018-01-09 23:39:03 +010050 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
51 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
52 gfp |= GFP_DMA;
53 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
54 gfp |= GFP_DMA32;
55
Christoph Hellwig95f18392018-01-09 23:40:57 +010056again:
Christoph Hellwig080321d2017-12-22 11:51:44 +010057 /* CMA can be used only in the context which permits sleeping */
Christoph Hellwig95f18392018-01-09 23:40:57 +010058 if (gfpflags_allow_blocking(gfp)) {
Christoph Hellwig080321d2017-12-22 11:51:44 +010059 page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
Christoph Hellwig95f18392018-01-09 23:40:57 +010060 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
61 dma_release_from_contiguous(dev, page, count);
62 page = NULL;
63 }
64 }
Christoph Hellwig080321d2017-12-22 11:51:44 +010065 if (!page)
Christoph Hellwig21f237e2017-12-22 11:55:23 +010066 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
Christoph Hellwig95f18392018-01-09 23:40:57 +010067
68 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
69 __free_pages(page, page_order);
70 page = NULL;
71
72 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
73 !(gfp & GFP_DMA)) {
74 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
75 goto again;
76 }
77 }
78
Christoph Hellwig080321d2017-12-22 11:51:44 +010079 if (!page)
80 return NULL;
Christoph Hellwig002e6742018-01-09 16:30:23 +010081
Christoph Hellwig080321d2017-12-22 11:51:44 +010082 *dma_handle = phys_to_dma(dev, page_to_phys(page));
83 memset(page_address(page), 0, size);
84 return page_address(page);
Christoph Hellwig002e6742018-01-09 16:30:23 +010085}
86
Christoph Hellwig42ed6452018-02-02 09:51:14 +010087/*
88 * NOTE: this function must never look at the dma_addr argument, because we want
89 * to be able to use it as a helper for iommu implementations as well.
90 */
Christoph Hellwig19dca8c2017-12-23 13:46:06 +010091void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
Christoph Hellwig002e6742018-01-09 16:30:23 +010092 dma_addr_t dma_addr, unsigned long attrs)
93{
Christoph Hellwig080321d2017-12-22 11:51:44 +010094 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
95
96 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
97 free_pages((unsigned long)cpu_addr, get_order(size));
Christoph Hellwig002e6742018-01-09 16:30:23 +010098}
99
100static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
101 unsigned long offset, size_t size, enum dma_data_direction dir,
102 unsigned long attrs)
103{
Christoph Hellwig27975962018-01-09 16:30:47 +0100104 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
105
106 if (!check_addr(dev, dma_addr, size, __func__))
107 return DIRECT_MAPPING_ERROR;
108 return dma_addr;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100109}
110
111static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
112 int nents, enum dma_data_direction dir, unsigned long attrs)
113{
114 int i;
115 struct scatterlist *sg;
116
117 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig002e6742018-01-09 16:30:23 +0100118 BUG_ON(!sg_page(sg));
Christoph Hellwig2e86a042017-12-22 11:29:51 +0100119
120 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
Christoph Hellwig27975962018-01-09 16:30:47 +0100121 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
122 return 0;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100123 sg_dma_len(sg) = sg->length;
124 }
125
126 return nents;
127}
128
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100129int dma_direct_supported(struct device *dev, u64 mask)
130{
131#ifdef CONFIG_ZONE_DMA
132 if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
133 return 0;
134#else
135 /*
136 * Because 32-bit DMA masks are so common we expect every architecture
137 * to be able to satisfy them - either by not supporting more physical
138 * memory, or by providing a ZONE_DMA32. If neither is the case, the
139 * architecture needs to use an IOMMU instead of the direct mapping.
140 */
141 if (mask < DMA_BIT_MASK(32))
142 return 0;
143#endif
144 return 1;
145}
146
Christoph Hellwig27975962018-01-09 16:30:47 +0100147static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
148{
149 return dma_addr == DIRECT_MAPPING_ERROR;
150}
151
Christoph Hellwig002e6742018-01-09 16:30:23 +0100152const struct dma_map_ops dma_direct_ops = {
153 .alloc = dma_direct_alloc,
154 .free = dma_direct_free,
155 .map_page = dma_direct_map_page,
156 .map_sg = dma_direct_map_sg,
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100157 .dma_supported = dma_direct_supported,
Christoph Hellwig27975962018-01-09 16:30:47 +0100158 .mapping_error = dma_direct_mapping_error,
Christoph Hellwigf25e6f62018-02-02 09:21:07 +0100159 .is_phys = 1,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100160};
161EXPORT_SYMBOL(dma_direct_ops);