blob: b46575954032483cdc0984e3e2fa6309a78c796f [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
Arnd Bergmann1dccb592015-11-16 17:25:48 +010021#include <linux/acpi.h>
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080022#include <linux/bootmem.h>
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080023#include <linux/cache.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/export.h>
25#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070026#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000027#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000028#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000029#include <linux/vmalloc.h>
30#include <linux/swiotlb.h>
31
32#include <asm/cacheflush.h>
33
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080034static int swiotlb __ro_after_init;
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080035
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070036static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
Laura Abbott214fdbe2014-03-14 19:52:24 +000037 bool coherent)
38{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070039 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
Laura Abbott214fdbe2014-03-14 19:52:24 +000040 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000041 return prot;
42}
43
Laura Abbottd4932f92014-10-09 15:26:44 -070044static struct gen_pool *atomic_pool;
45
46#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
Jisheng Zhanga7c61a32015-11-20 17:59:10 +080047static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
Laura Abbottd4932f92014-10-09 15:26:44 -070048
49static int __init early_coherent_pool(char *p)
50{
51 atomic_pool_size = memparse(p, &p);
52 return 0;
53}
54early_param("coherent_pool", early_coherent_pool);
55
Suzuki K. Poulose71328132015-03-19 18:17:09 +000056static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070057{
58 unsigned long val;
59 void *ptr = NULL;
60
61 if (!atomic_pool) {
62 WARN(1, "coherent pool not initialised!\n");
63 return NULL;
64 }
65
66 val = gen_pool_alloc(atomic_pool, size);
67 if (val) {
68 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
69
70 *ret_page = phys_to_page(phys);
71 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010072 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070073 }
74
75 return ptr;
76}
77
78static bool __in_atomic_pool(void *start, size_t size)
79{
80 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
81}
82
83static int __free_from_pool(void *start, size_t size)
84{
85 if (!__in_atomic_pool(start, size))
86 return 0;
87
88 gen_pool_free(atomic_pool, (unsigned long)start, size);
89
90 return 1;
91}
92
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053093static void *__dma_alloc_coherent(struct device *dev, size_t size,
94 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070095 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000096{
Laura Abbottc666e8d2013-12-12 19:28:32 +000097 if (dev == NULL) {
98 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
99 return NULL;
100 }
101
Catalin Marinas19e76402014-02-27 12:09:22 +0000102 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +0000103 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000104 flags |= GFP_DMA;
Mel Gormand0164ad2015-11-06 16:28:21 -0800105 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000106 struct page *page;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000107 void *addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000108
109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
Lucas Stach712c6042017-02-24 14:58:44 -0800110 get_order(size), flags);
Laura Abbott6ac21042013-12-12 19:28:33 +0000111 if (!page)
112 return NULL;
113
114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000115 addr = page_address(page);
Marek Szyprowski6829e272015-04-23 12:46:16 +0100116 memset(addr, 0, size);
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000117 return addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000118 } else {
119 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
120 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000121}
122
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530123static void __dma_free_coherent(struct device *dev, size_t size,
124 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700125 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000126{
Laura Abbottd4932f92014-10-09 15:26:44 -0700127 bool freed;
128 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
129
Laura Abbottc666e8d2013-12-12 19:28:32 +0000130 if (dev == NULL) {
131 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
132 return;
133 }
134
Laura Abbottd4932f92014-10-09 15:26:44 -0700135 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000136 phys_to_page(paddr),
137 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700138 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000139 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000140}
141
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000142static void *__dma_alloc(struct device *dev, size_t size,
143 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700144 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100145{
Laura Abbottd4932f92014-10-09 15:26:44 -0700146 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100147 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000148 bool coherent = is_device_dma_coherent(dev);
Robin Murphy97942c22015-07-31 18:28:34 +0100149 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
Catalin Marinas73635902013-05-21 17:35:19 +0100150
151 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700152
Mel Gormand0164ad2015-11-06 16:28:21 -0800153 if (!coherent && !gfpflags_allow_blocking(flags)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700154 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000155 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700156
157 if (addr)
158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
159
160 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700161 }
Catalin Marinas73635902013-05-21 17:35:19 +0100162
163 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
164 if (!ptr)
165 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100166
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000167 /* no need for non-cacheable mapping if coherent */
168 if (coherent)
169 return ptr;
170
Catalin Marinas73635902013-05-21 17:35:19 +0100171 /* remove any dirty cache lines on the kernel alias */
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900172 __dma_flush_area(ptr, size);
Catalin Marinas73635902013-05-21 17:35:19 +0100173
174 /* create a coherent mapping */
175 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700176 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
Robin Murphy97942c22015-07-31 18:28:34 +0100177 prot, NULL);
Catalin Marinas73635902013-05-21 17:35:19 +0100178 if (!coherent_ptr)
179 goto no_map;
180
181 return coherent_ptr;
182
183no_map:
184 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
185no_mem:
Sean Paula52ce122014-10-01 16:31:50 +0100186 *dma_handle = DMA_ERROR_CODE;
Catalin Marinas73635902013-05-21 17:35:19 +0100187 return NULL;
188}
189
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000190static void __dma_free(struct device *dev, size_t size,
191 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700192 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100193{
194 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
195
Dean Nelson2cff98b2015-04-29 16:09:18 +0100196 size = PAGE_ALIGN(size);
197
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000198 if (!is_device_dma_coherent(dev)) {
199 if (__free_from_pool(vaddr, size))
200 return;
201 vunmap(vaddr);
202 }
Catalin Marinas73635902013-05-21 17:35:19 +0100203 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
204}
205
206static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
207 unsigned long offset, size_t size,
208 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700209 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100210{
211 dma_addr_t dev_addr;
212
213 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100214 if (!is_device_dma_coherent(dev) &&
215 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000216 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100217
218 return dev_addr;
219}
220
221
222static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
223 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700224 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100225{
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100226 if (!is_device_dma_coherent(dev) &&
227 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000228 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100229 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
230}
231
232static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
233 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700234 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100235{
236 struct scatterlist *sg;
237 int i, ret;
238
239 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100240 if (!is_device_dma_coherent(dev) &&
241 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000242 for_each_sg(sgl, sg, ret, i)
243 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
244 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100245
246 return ret;
247}
248
249static void __swiotlb_unmap_sg_attrs(struct device *dev,
250 struct scatterlist *sgl, int nelems,
251 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700252 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100253{
254 struct scatterlist *sg;
255 int i;
256
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100257 if (!is_device_dma_coherent(dev) &&
258 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000259 for_each_sg(sgl, sg, nelems, i)
260 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
261 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100262 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
263}
264
265static void __swiotlb_sync_single_for_cpu(struct device *dev,
266 dma_addr_t dev_addr, size_t size,
267 enum dma_data_direction dir)
268{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000269 if (!is_device_dma_coherent(dev))
270 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100271 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
272}
273
274static void __swiotlb_sync_single_for_device(struct device *dev,
275 dma_addr_t dev_addr, size_t size,
276 enum dma_data_direction dir)
277{
278 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000279 if (!is_device_dma_coherent(dev))
280 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100281}
282
283static void __swiotlb_sync_sg_for_cpu(struct device *dev,
284 struct scatterlist *sgl, int nelems,
285 enum dma_data_direction dir)
286{
287 struct scatterlist *sg;
288 int i;
289
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000290 if (!is_device_dma_coherent(dev))
291 for_each_sg(sgl, sg, nelems, i)
292 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
293 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100294 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
295}
296
297static void __swiotlb_sync_sg_for_device(struct device *dev,
298 struct scatterlist *sgl, int nelems,
299 enum dma_data_direction dir)
300{
301 struct scatterlist *sg;
302 int i;
303
304 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000305 if (!is_device_dma_coherent(dev))
306 for_each_sg(sgl, sg, nelems, i)
307 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
308 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100309}
310
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100311static int __swiotlb_mmap(struct device *dev,
312 struct vm_area_struct *vma,
313 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700314 unsigned long attrs)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000315{
316 int ret = -ENXIO;
317 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
318 PAGE_SHIFT;
319 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
320 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
321 unsigned long off = vma->vm_pgoff;
322
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100323 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
324 is_device_dma_coherent(dev));
325
Laura Abbott6e8d7962014-03-14 19:52:23 +0000326 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
327 return ret;
328
329 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
330 ret = remap_pfn_range(vma, vma->vm_start,
331 pfn + off,
332 vma->vm_end - vma->vm_start,
333 vma->vm_page_prot);
334 }
335
336 return ret;
337}
338
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100339static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
340 void *cpu_addr, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700341 unsigned long attrs)
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100342{
343 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
344
345 if (!ret)
346 sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
347 PAGE_ALIGN(size), 0);
348
349 return ret;
350}
351
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800352static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
353{
354 if (swiotlb)
355 return swiotlb_dma_supported(hwdev, mask);
356 return 1;
357}
358
Robin Murphyadbe7e22017-01-25 18:31:31 +0000359static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
360{
361 if (swiotlb)
362 return swiotlb_dma_mapping_error(hwdev, addr);
363 return 0;
364}
365
Bart Van Assche52997092017-01-20 13:04:01 -0800366static const struct dma_map_ops swiotlb_dma_ops = {
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000367 .alloc = __dma_alloc,
368 .free = __dma_free,
369 .mmap = __swiotlb_mmap,
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100370 .get_sgtable = __swiotlb_get_sgtable,
Catalin Marinas73635902013-05-21 17:35:19 +0100371 .map_page = __swiotlb_map_page,
372 .unmap_page = __swiotlb_unmap_page,
373 .map_sg = __swiotlb_map_sg_attrs,
374 .unmap_sg = __swiotlb_unmap_sg_attrs,
375 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
376 .sync_single_for_device = __swiotlb_sync_single_for_device,
377 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
378 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800379 .dma_supported = __swiotlb_dma_supported,
Robin Murphyadbe7e22017-01-25 18:31:31 +0000380 .mapping_error = __swiotlb_dma_mapping_error,
Catalin Marinas73635902013-05-21 17:35:19 +0100381};
Catalin Marinas09b55412012-03-05 11:49:30 +0000382
Laura Abbottd4932f92014-10-09 15:26:44 -0700383static int __init atomic_pool_init(void)
384{
385 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
386 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
387 struct page *page;
388 void *addr;
389 unsigned int pool_size_order = get_order(atomic_pool_size);
390
391 if (dev_get_cma_area(NULL))
392 page = dma_alloc_from_contiguous(NULL, nr_pages,
Lucas Stach712c6042017-02-24 14:58:44 -0800393 pool_size_order, GFP_KERNEL);
Laura Abbottd4932f92014-10-09 15:26:44 -0700394 else
395 page = alloc_pages(GFP_DMA, pool_size_order);
396
397 if (page) {
398 int ret;
399 void *page_addr = page_address(page);
400
401 memset(page_addr, 0, atomic_pool_size);
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900402 __dma_flush_area(page_addr, atomic_pool_size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700403
404 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
405 if (!atomic_pool)
406 goto free_page;
407
408 addr = dma_common_contiguous_remap(page, atomic_pool_size,
409 VM_USERMAP, prot, atomic_pool_init);
410
411 if (!addr)
412 goto destroy_genpool;
413
414 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
415 page_to_phys(page),
416 atomic_pool_size, -1);
417 if (ret)
418 goto remove_mapping;
419
420 gen_pool_set_algo(atomic_pool,
421 gen_pool_first_fit_order_align,
422 (void *)PAGE_SHIFT);
423
424 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
425 atomic_pool_size / 1024);
426 return 0;
427 }
428 goto out;
429
430remove_mapping:
431 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
432destroy_genpool:
433 gen_pool_destroy(atomic_pool);
434 atomic_pool = NULL;
435free_page:
436 if (!dma_release_from_contiguous(NULL, page, nr_pages))
437 __free_pages(page, pool_size_order);
438out:
439 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
440 atomic_pool_size / 1024);
441 return -ENOMEM;
442}
443
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500444/********************************************
445 * The following APIs are for dummy DMA ops *
446 ********************************************/
447
448static void *__dummy_alloc(struct device *dev, size_t size,
449 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700450 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500451{
452 return NULL;
453}
454
455static void __dummy_free(struct device *dev, size_t size,
456 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700457 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500458{
459}
460
461static int __dummy_mmap(struct device *dev,
462 struct vm_area_struct *vma,
463 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700464 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500465{
466 return -ENXIO;
467}
468
469static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
470 unsigned long offset, size_t size,
471 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700472 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500473{
474 return DMA_ERROR_CODE;
475}
476
477static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
478 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700479 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500480{
481}
482
483static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
484 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700485 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500486{
487 return 0;
488}
489
490static void __dummy_unmap_sg(struct device *dev,
491 struct scatterlist *sgl, int nelems,
492 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700493 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500494{
495}
496
497static void __dummy_sync_single(struct device *dev,
498 dma_addr_t dev_addr, size_t size,
499 enum dma_data_direction dir)
500{
501}
502
503static void __dummy_sync_sg(struct device *dev,
504 struct scatterlist *sgl, int nelems,
505 enum dma_data_direction dir)
506{
507}
508
509static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
510{
511 return 1;
512}
513
514static int __dummy_dma_supported(struct device *hwdev, u64 mask)
515{
516 return 0;
517}
518
Bart Van Assche52997092017-01-20 13:04:01 -0800519const struct dma_map_ops dummy_dma_ops = {
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500520 .alloc = __dummy_alloc,
521 .free = __dummy_free,
522 .mmap = __dummy_mmap,
523 .map_page = __dummy_map_page,
524 .unmap_page = __dummy_unmap_page,
525 .map_sg = __dummy_map_sg,
526 .unmap_sg = __dummy_unmap_sg,
527 .sync_single_for_cpu = __dummy_sync_single,
528 .sync_single_for_device = __dummy_sync_single,
529 .sync_sg_for_cpu = __dummy_sync_sg,
530 .sync_sg_for_device = __dummy_sync_sg,
531 .mapping_error = __dummy_mapping_error,
532 .dma_supported = __dummy_dma_supported,
533};
534EXPORT_SYMBOL(dummy_dma_ops);
535
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000536static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000537{
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100538 if (swiotlb_force == SWIOTLB_FORCE ||
539 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800540 swiotlb = 1;
541
Arnd Bergmann1dccb592015-11-16 17:25:48 +0100542 return atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700543}
544arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000545
546#define PREALLOC_DMA_DEBUG_ENTRIES 4096
547
548static int __init dma_debug_do_init(void)
549{
550 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
551 return 0;
552}
553fs_initcall(dma_debug_do_init);
Robin Murphy13b86292015-10-01 20:13:59 +0100554
555
556#ifdef CONFIG_IOMMU_DMA
557#include <linux/dma-iommu.h>
558#include <linux/platform_device.h>
559#include <linux/amba/bus.h>
560
561/* Thankfully, all cache ops are by VA so we can ignore phys here */
562static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
563{
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900564 __dma_flush_area(virt, PAGE_SIZE);
Robin Murphy13b86292015-10-01 20:13:59 +0100565}
566
567static void *__iommu_alloc_attrs(struct device *dev, size_t size,
568 dma_addr_t *handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700569 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100570{
571 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530572 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000573 size_t iosize = size;
Robin Murphy13b86292015-10-01 20:13:59 +0100574 void *addr;
575
576 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
577 return NULL;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000578
579 size = PAGE_ALIGN(size);
580
Robin Murphy13b86292015-10-01 20:13:59 +0100581 /*
582 * Some drivers rely on this, and we probably don't want the
583 * possibility of stale kernel data being read by devices anyway.
584 */
585 gfp |= __GFP_ZERO;
586
Andrew Mortonce5c2d22015-11-07 16:06:59 -0800587 if (gfpflags_allow_blocking(gfp)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100588 struct page **pages;
589 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
590
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100591 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
592 handle, flush_page);
Robin Murphy13b86292015-10-01 20:13:59 +0100593 if (!pages)
594 return NULL;
595
596 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
597 __builtin_return_address(0));
598 if (!addr)
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000599 iommu_dma_free(dev, pages, iosize, handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100600 } else {
601 struct page *page;
602 /*
603 * In atomic context we can't remap anything, so we'll only
604 * get the virtually contiguous buffer we need by way of a
605 * physically contiguous allocation.
606 */
607 if (coherent) {
608 page = alloc_pages(gfp, get_order(size));
609 addr = page ? page_address(page) : NULL;
610 } else {
611 addr = __alloc_from_pool(size, &page, gfp);
612 }
613 if (!addr)
614 return NULL;
615
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000616 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
Robin Murphy13b86292015-10-01 20:13:59 +0100617 if (iommu_dma_mapping_error(dev, *handle)) {
618 if (coherent)
619 __free_pages(page, get_order(size));
620 else
621 __free_from_pool(addr, size);
622 addr = NULL;
623 }
624 }
625 return addr;
626}
627
628static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700629 dma_addr_t handle, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100630{
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000631 size_t iosize = size;
632
633 size = PAGE_ALIGN(size);
Robin Murphy13b86292015-10-01 20:13:59 +0100634 /*
635 * @cpu_addr will be one of 3 things depending on how it was allocated:
636 * - A remapped array of pages from iommu_dma_alloc(), for all
637 * non-atomic allocations.
638 * - A non-cacheable alias from the atomic pool, for atomic
639 * allocations by non-coherent devices.
640 * - A normal lowmem address, for atomic allocations by
641 * coherent devices.
642 * Hence how dodgy the below logic looks...
643 */
644 if (__in_atomic_pool(cpu_addr, size)) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700645 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100646 __free_from_pool(cpu_addr, size);
647 } else if (is_vmalloc_addr(cpu_addr)){
648 struct vm_struct *area = find_vm_area(cpu_addr);
649
650 if (WARN_ON(!area || !area->pages))
651 return;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000652 iommu_dma_free(dev, area->pages, iosize, &handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100653 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
654 } else {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700655 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100656 __free_pages(virt_to_page(cpu_addr), get_order(size));
657 }
658}
659
660static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
661 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700662 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100663{
664 struct vm_struct *area;
665 int ret;
666
667 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
668 is_device_dma_coherent(dev));
669
670 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
671 return ret;
672
673 area = find_vm_area(cpu_addr);
674 if (WARN_ON(!area || !area->pages))
675 return -ENXIO;
676
677 return iommu_dma_mmap(area->pages, size, vma);
678}
679
680static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
681 void *cpu_addr, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700682 size_t size, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100683{
684 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
685 struct vm_struct *area = find_vm_area(cpu_addr);
686
687 if (WARN_ON(!area || !area->pages))
688 return -ENXIO;
689
690 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
691 GFP_KERNEL);
692}
693
694static void __iommu_sync_single_for_cpu(struct device *dev,
695 dma_addr_t dev_addr, size_t size,
696 enum dma_data_direction dir)
697{
698 phys_addr_t phys;
699
700 if (is_device_dma_coherent(dev))
701 return;
702
703 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
704 __dma_unmap_area(phys_to_virt(phys), size, dir);
705}
706
707static void __iommu_sync_single_for_device(struct device *dev,
708 dma_addr_t dev_addr, size_t size,
709 enum dma_data_direction dir)
710{
711 phys_addr_t phys;
712
713 if (is_device_dma_coherent(dev))
714 return;
715
716 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
717 __dma_map_area(phys_to_virt(phys), size, dir);
718}
719
720static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
721 unsigned long offset, size_t size,
722 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700723 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100724{
725 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530726 int prot = dma_info_to_prot(dir, coherent, attrs);
Robin Murphy13b86292015-10-01 20:13:59 +0100727 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
728
729 if (!iommu_dma_mapping_error(dev, dev_addr) &&
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700730 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100731 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
732
733 return dev_addr;
734}
735
736static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
737 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700738 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100739{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700740 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100741 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
742
743 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
744}
745
746static void __iommu_sync_sg_for_cpu(struct device *dev,
747 struct scatterlist *sgl, int nelems,
748 enum dma_data_direction dir)
749{
750 struct scatterlist *sg;
751 int i;
752
753 if (is_device_dma_coherent(dev))
754 return;
755
756 for_each_sg(sgl, sg, nelems, i)
757 __dma_unmap_area(sg_virt(sg), sg->length, dir);
758}
759
760static void __iommu_sync_sg_for_device(struct device *dev,
761 struct scatterlist *sgl, int nelems,
762 enum dma_data_direction dir)
763{
764 struct scatterlist *sg;
765 int i;
766
767 if (is_device_dma_coherent(dev))
768 return;
769
770 for_each_sg(sgl, sg, nelems, i)
771 __dma_map_area(sg_virt(sg), sg->length, dir);
772}
773
774static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
775 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700776 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100777{
778 bool coherent = is_device_dma_coherent(dev);
779
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700780 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100781 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
782
783 return iommu_dma_map_sg(dev, sgl, nelems,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530784 dma_info_to_prot(dir, coherent, attrs));
Robin Murphy13b86292015-10-01 20:13:59 +0100785}
786
787static void __iommu_unmap_sg_attrs(struct device *dev,
788 struct scatterlist *sgl, int nelems,
789 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700790 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100791{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700792 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100793 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
794
795 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
796}
797
Bart Van Assche52997092017-01-20 13:04:01 -0800798static const struct dma_map_ops iommu_dma_ops = {
Robin Murphy13b86292015-10-01 20:13:59 +0100799 .alloc = __iommu_alloc_attrs,
800 .free = __iommu_free_attrs,
801 .mmap = __iommu_mmap_attrs,
802 .get_sgtable = __iommu_get_sgtable,
803 .map_page = __iommu_map_page,
804 .unmap_page = __iommu_unmap_page,
805 .map_sg = __iommu_map_sg_attrs,
806 .unmap_sg = __iommu_unmap_sg_attrs,
807 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
808 .sync_single_for_device = __iommu_sync_single_for_device,
809 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
810 .sync_sg_for_device = __iommu_sync_sg_for_device,
Robin Murphy60c4e802016-11-14 12:16:27 +0000811 .map_resource = iommu_dma_map_resource,
812 .unmap_resource = iommu_dma_unmap_resource,
Robin Murphy13b86292015-10-01 20:13:59 +0100813 .mapping_error = iommu_dma_mapping_error,
814};
815
Sricharan Rb913efe2017-04-10 16:51:04 +0530816static int __init __iommu_dma_init(void)
Robin Murphy13b86292015-10-01 20:13:59 +0100817{
Sricharan Rb913efe2017-04-10 16:51:04 +0530818 return iommu_dma_init();
819}
820arch_initcall(__iommu_dma_init);
821
822static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
823 const struct iommu_ops *ops)
824{
825 struct iommu_domain *domain;
826
827 if (!ops)
828 return;
Robin Murphy13b86292015-10-01 20:13:59 +0100829
830 /*
Sricharan Rb913efe2017-04-10 16:51:04 +0530831 * The IOMMU core code allocates the default DMA domain, which the
832 * underlying IOMMU driver needs to support via the dma-iommu layer.
Robin Murphy13b86292015-10-01 20:13:59 +0100833 */
Sricharan Rb913efe2017-04-10 16:51:04 +0530834 domain = iommu_get_domain_for_dev(dev);
835
Will Deacon4a8d8a12017-01-06 10:49:12 +0000836 if (!domain)
837 goto out_err;
838
839 if (domain->type == IOMMU_DOMAIN_DMA) {
840 if (iommu_dma_init_domain(domain, dma_base, size, dev))
841 goto out_err;
842
Linus Torvaldsac1820f2017-02-25 13:45:43 -0800843 dev->dma_ops = &iommu_dma_ops;
Robin Murphy13b86292015-10-01 20:13:59 +0100844 }
845
Sricharan Rb913efe2017-04-10 16:51:04 +0530846 return;
847
Will Deacon4a8d8a12017-01-06 10:49:12 +0000848out_err:
Sricharan Rb913efe2017-04-10 16:51:04 +0530849 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
Will Deacon4a8d8a12017-01-06 10:49:12 +0000850 dev_name(dev));
Robin Murphy13b86292015-10-01 20:13:59 +0100851}
852
Robin Murphy876945d2015-10-01 20:14:00 +0100853void arch_teardown_dma_ops(struct device *dev)
854{
Bart Van Assche56579332017-01-20 13:04:02 -0800855 dev->dma_ops = NULL;
Robin Murphy876945d2015-10-01 20:14:00 +0100856}
857
Robin Murphy13b86292015-10-01 20:13:59 +0100858#else
859
860static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100861 const struct iommu_ops *iommu)
Robin Murphy13b86292015-10-01 20:13:59 +0100862{ }
863
864#endif /* CONFIG_IOMMU_DMA */
865
Robin Murphy876945d2015-10-01 20:14:00 +0100866void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100867 const struct iommu_ops *iommu, bool coherent)
Robin Murphy876945d2015-10-01 20:14:00 +0100868{
Bart Van Assche56579332017-01-20 13:04:02 -0800869 if (!dev->dma_ops)
870 dev->dma_ops = &swiotlb_dma_ops;
Robin Murphy876945d2015-10-01 20:14:00 +0100871
872 dev->archdata.dma_coherent = coherent;
873 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
874}