blob: bbfb229aa0675ed73d438ce2700e674891758310 [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwig2e86a042017-12-22 11:29:51 +01003 * DMA operations that map physical memory directly without using an IOMMU or
4 * flushing caches.
Christoph Hellwig002e6742018-01-09 16:30:23 +01005 */
6#include <linux/export.h>
7#include <linux/mm.h>
Christoph Hellwig2e86a042017-12-22 11:29:51 +01008#include <linux/dma-direct.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +01009#include <linux/scatterlist.h>
Christoph Hellwig080321d2017-12-22 11:51:44 +010010#include <linux/dma-contiguous.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/pfn.h>
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010012#include <linux/set_memory.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010013
Christoph Hellwig27975962018-01-09 16:30:47 +010014#define DIRECT_MAPPING_ERROR 0
15
Christoph Hellwigc61e9632018-01-09 23:39:03 +010016/*
17 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
18 * some use it for entirely different regions:
19 */
20#ifndef ARCH_ZONE_DMA_BITS
21#define ARCH_ZONE_DMA_BITS 24
22#endif
23
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010024/*
25 * For AMD SEV all DMA must be to unencrypted addresses.
26 */
27static inline bool force_dma_unencrypted(void)
28{
29 return sev_active();
30}
31
Christoph Hellwig27975962018-01-09 16:30:47 +010032static bool
33check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
34 const char *caller)
35{
36 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
37 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
38 dev_err(dev,
39 "%s: overflow %pad+%zu of device mask %llx\n",
40 caller, &dma_addr, size, *dev->dma_mask);
41 }
42 return false;
43 }
44 return true;
45}
46
Christoph Hellwig95f18392018-01-09 23:40:57 +010047static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
48{
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010049 dma_addr_t addr = force_dma_unencrypted() ?
50 __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
51 return addr + size - 1 <= dev->coherent_dma_mask;
Christoph Hellwig95f18392018-01-09 23:40:57 +010052}
53
Christoph Hellwig19dca8c2017-12-23 13:46:06 +010054void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
55 gfp_t gfp, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +010056{
Christoph Hellwig080321d2017-12-22 11:51:44 +010057 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
58 int page_order = get_order(size);
59 struct page *page = NULL;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010060 void *ret;
Christoph Hellwig002e6742018-01-09 16:30:23 +010061
Christoph Hellwige89f5b32018-03-28 15:35:35 +020062 /* we always manually zero the memory once we are done: */
63 gfp &= ~__GFP_ZERO;
64
Christoph Hellwigc61e9632018-01-09 23:39:03 +010065 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
66 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
67 gfp |= GFP_DMA;
68 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
69 gfp |= GFP_DMA32;
70
Christoph Hellwig95f18392018-01-09 23:40:57 +010071again:
Christoph Hellwig080321d2017-12-22 11:51:44 +010072 /* CMA can be used only in the context which permits sleeping */
Christoph Hellwig95f18392018-01-09 23:40:57 +010073 if (gfpflags_allow_blocking(gfp)) {
Christoph Hellwig080321d2017-12-22 11:51:44 +010074 page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
Christoph Hellwig95f18392018-01-09 23:40:57 +010075 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
76 dma_release_from_contiguous(dev, page, count);
77 page = NULL;
78 }
79 }
Christoph Hellwig080321d2017-12-22 11:51:44 +010080 if (!page)
Christoph Hellwig21f237e2017-12-22 11:55:23 +010081 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
Christoph Hellwig95f18392018-01-09 23:40:57 +010082
83 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
84 __free_pages(page, page_order);
85 page = NULL;
86
Takashi Iwai504a9182018-04-15 11:08:07 +020087 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
88 dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
Christoph Hellwig95f18392018-01-09 23:40:57 +010089 !(gfp & GFP_DMA)) {
90 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
91 goto again;
92 }
93 }
94
Christoph Hellwig080321d2017-12-22 11:51:44 +010095 if (!page)
96 return NULL;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010097 ret = page_address(page);
98 if (force_dma_unencrypted()) {
99 set_memory_decrypted((unsigned long)ret, 1 << page_order);
100 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
101 } else {
102 *dma_handle = phys_to_dma(dev, page_to_phys(page));
103 }
104 memset(ret, 0, size);
105 return ret;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100106}
107
Christoph Hellwig42ed6452018-02-02 09:51:14 +0100108/*
109 * NOTE: this function must never look at the dma_addr argument, because we want
110 * to be able to use it as a helper for iommu implementations as well.
111 */
Christoph Hellwig19dca8c2017-12-23 13:46:06 +0100112void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100113 dma_addr_t dma_addr, unsigned long attrs)
114{
Christoph Hellwig080321d2017-12-22 11:51:44 +0100115 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100116 unsigned int page_order = get_order(size);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100117
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100118 if (force_dma_unencrypted())
119 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100120 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100121 free_pages((unsigned long)cpu_addr, page_order);
Christoph Hellwig002e6742018-01-09 16:30:23 +0100122}
123
124static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
125 unsigned long offset, size_t size, enum dma_data_direction dir,
126 unsigned long attrs)
127{
Christoph Hellwig27975962018-01-09 16:30:47 +0100128 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
129
130 if (!check_addr(dev, dma_addr, size, __func__))
131 return DIRECT_MAPPING_ERROR;
132 return dma_addr;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100133}
134
135static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
136 int nents, enum dma_data_direction dir, unsigned long attrs)
137{
138 int i;
139 struct scatterlist *sg;
140
141 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig002e6742018-01-09 16:30:23 +0100142 BUG_ON(!sg_page(sg));
Christoph Hellwig2e86a042017-12-22 11:29:51 +0100143
144 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
Christoph Hellwig27975962018-01-09 16:30:47 +0100145 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
146 return 0;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100147 sg_dma_len(sg) = sg->length;
148 }
149
150 return nents;
151}
152
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100153int dma_direct_supported(struct device *dev, u64 mask)
154{
155#ifdef CONFIG_ZONE_DMA
156 if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
157 return 0;
158#else
159 /*
160 * Because 32-bit DMA masks are so common we expect every architecture
161 * to be able to satisfy them - either by not supporting more physical
162 * memory, or by providing a ZONE_DMA32. If neither is the case, the
163 * architecture needs to use an IOMMU instead of the direct mapping.
164 */
165 if (mask < DMA_BIT_MASK(32))
166 return 0;
167#endif
168 return 1;
169}
170
Christoph Hellwig27975962018-01-09 16:30:47 +0100171static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
172{
173 return dma_addr == DIRECT_MAPPING_ERROR;
174}
175
Christoph Hellwig002e6742018-01-09 16:30:23 +0100176const struct dma_map_ops dma_direct_ops = {
177 .alloc = dma_direct_alloc,
178 .free = dma_direct_free,
179 .map_page = dma_direct_map_page,
180 .map_sg = dma_direct_map_sg,
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100181 .dma_supported = dma_direct_supported,
Christoph Hellwig27975962018-01-09 16:30:47 +0100182 .mapping_error = dma_direct_mapping_error,
Christoph Hellwigf25e6f62018-02-02 09:21:07 +0100183 .is_phys = 1,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100184};
185EXPORT_SYMBOL(dma_direct_ops);