blob: 61e93f0b548228f57a08f25a14291a1e46437115 [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
Arnd Bergmann1dccb592015-11-16 17:25:48 +010021#include <linux/acpi.h>
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080022#include <linux/bootmem.h>
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080023#include <linux/cache.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/export.h>
25#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070026#include <linux/genalloc.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010027#include <linux/dma-direct.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000028#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000029#include <linux/vmalloc.h>
30#include <linux/swiotlb.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020031#include <linux/pci.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000032
33#include <asm/cacheflush.h>
34
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080035static int swiotlb __ro_after_init;
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080036
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070037static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
Laura Abbott214fdbe2014-03-14 19:52:24 +000038 bool coherent)
39{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070040 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
Laura Abbott214fdbe2014-03-14 19:52:24 +000041 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000042 return prot;
43}
44
Vladimir Murzin8165f702017-08-14 09:55:47 +010045static struct gen_pool *atomic_pool __ro_after_init;
Laura Abbottd4932f92014-10-09 15:26:44 -070046
47#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
Jisheng Zhanga7c61a32015-11-20 17:59:10 +080048static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
Laura Abbottd4932f92014-10-09 15:26:44 -070049
50static int __init early_coherent_pool(char *p)
51{
52 atomic_pool_size = memparse(p, &p);
53 return 0;
54}
55early_param("coherent_pool", early_coherent_pool);
56
Suzuki K. Poulose71328132015-03-19 18:17:09 +000057static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070058{
59 unsigned long val;
60 void *ptr = NULL;
61
62 if (!atomic_pool) {
63 WARN(1, "coherent pool not initialised!\n");
64 return NULL;
65 }
66
67 val = gen_pool_alloc(atomic_pool, size);
68 if (val) {
69 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
70
71 *ret_page = phys_to_page(phys);
72 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010073 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070074 }
75
76 return ptr;
77}
78
79static bool __in_atomic_pool(void *start, size_t size)
80{
81 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
82}
83
84static int __free_from_pool(void *start, size_t size)
85{
86 if (!__in_atomic_pool(start, size))
87 return 0;
88
89 gen_pool_free(atomic_pool, (unsigned long)start, size);
90
91 return 1;
92}
93
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +000094static void *__dma_alloc(struct device *dev, size_t size,
95 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +010097{
Laura Abbottd4932f92014-10-09 15:26:44 -070098 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +010099 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000100 bool coherent = is_device_dma_coherent(dev);
Robin Murphy97942c22015-07-31 18:28:34 +0100101 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
Catalin Marinas73635902013-05-21 17:35:19 +0100102
103 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700104
Mel Gormand0164ad2015-11-06 16:28:21 -0800105 if (!coherent && !gfpflags_allow_blocking(flags)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700106 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000107 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700108
109 if (addr)
110 *dma_handle = phys_to_dma(dev, page_to_phys(page));
111
112 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700113 }
Catalin Marinas73635902013-05-21 17:35:19 +0100114
Christoph Hellwig0d8488a2017-12-24 13:53:50 +0100115 ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
Catalin Marinas73635902013-05-21 17:35:19 +0100116 if (!ptr)
117 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100118
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000119 /* no need for non-cacheable mapping if coherent */
120 if (coherent)
121 return ptr;
122
Catalin Marinas73635902013-05-21 17:35:19 +0100123 /* remove any dirty cache lines on the kernel alias */
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900124 __dma_flush_area(ptr, size);
Catalin Marinas73635902013-05-21 17:35:19 +0100125
126 /* create a coherent mapping */
127 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700128 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
Matthieu CASTET359be672017-10-02 15:01:55 +0200129 prot, __builtin_return_address(0));
Catalin Marinas73635902013-05-21 17:35:19 +0100130 if (!coherent_ptr)
131 goto no_map;
132
133 return coherent_ptr;
134
135no_map:
Christoph Hellwig0d8488a2017-12-24 13:53:50 +0100136 swiotlb_free(dev, size, ptr, *dma_handle, attrs);
Catalin Marinas73635902013-05-21 17:35:19 +0100137no_mem:
Catalin Marinas73635902013-05-21 17:35:19 +0100138 return NULL;
139}
140
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000141static void __dma_free(struct device *dev, size_t size,
142 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700143 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100144{
145 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
146
Dean Nelson2cff98b2015-04-29 16:09:18 +0100147 size = PAGE_ALIGN(size);
148
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000149 if (!is_device_dma_coherent(dev)) {
150 if (__free_from_pool(vaddr, size))
151 return;
152 vunmap(vaddr);
153 }
Christoph Hellwig0d8488a2017-12-24 13:53:50 +0100154 swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
Catalin Marinas73635902013-05-21 17:35:19 +0100155}
156
157static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
158 unsigned long offset, size_t size,
159 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700160 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100161{
162 dma_addr_t dev_addr;
163
164 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100165 if (!is_device_dma_coherent(dev) &&
166 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000167 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100168
169 return dev_addr;
170}
171
172
173static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
174 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700175 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100176{
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100177 if (!is_device_dma_coherent(dev) &&
178 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000179 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100180 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
181}
182
183static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
184 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700185 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100186{
187 struct scatterlist *sg;
188 int i, ret;
189
190 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100191 if (!is_device_dma_coherent(dev) &&
192 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000193 for_each_sg(sgl, sg, ret, i)
194 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
195 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100196
197 return ret;
198}
199
200static void __swiotlb_unmap_sg_attrs(struct device *dev,
201 struct scatterlist *sgl, int nelems,
202 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700203 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100204{
205 struct scatterlist *sg;
206 int i;
207
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100208 if (!is_device_dma_coherent(dev) &&
209 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000210 for_each_sg(sgl, sg, nelems, i)
211 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
212 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100213 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
214}
215
216static void __swiotlb_sync_single_for_cpu(struct device *dev,
217 dma_addr_t dev_addr, size_t size,
218 enum dma_data_direction dir)
219{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000220 if (!is_device_dma_coherent(dev))
221 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100222 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
223}
224
225static void __swiotlb_sync_single_for_device(struct device *dev,
226 dma_addr_t dev_addr, size_t size,
227 enum dma_data_direction dir)
228{
229 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000230 if (!is_device_dma_coherent(dev))
231 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100232}
233
234static void __swiotlb_sync_sg_for_cpu(struct device *dev,
235 struct scatterlist *sgl, int nelems,
236 enum dma_data_direction dir)
237{
238 struct scatterlist *sg;
239 int i;
240
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000241 if (!is_device_dma_coherent(dev))
242 for_each_sg(sgl, sg, nelems, i)
243 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
244 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100245 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
246}
247
248static void __swiotlb_sync_sg_for_device(struct device *dev,
249 struct scatterlist *sgl, int nelems,
250 enum dma_data_direction dir)
251{
252 struct scatterlist *sg;
253 int i;
254
255 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000256 if (!is_device_dma_coherent(dev))
257 for_each_sg(sgl, sg, nelems, i)
258 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
259 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100260}
261
Catalin Marinas92f66f82017-04-25 15:42:31 +0100262static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
263 unsigned long pfn, size_t size)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000264{
265 int ret = -ENXIO;
Thomas Meyerb4f4a272017-09-21 00:29:36 +0200266 unsigned long nr_vma_pages = vma_pages(vma);
Laura Abbott6e8d7962014-03-14 19:52:23 +0000267 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
Laura Abbott6e8d7962014-03-14 19:52:23 +0000268 unsigned long off = vma->vm_pgoff;
269
Laura Abbott6e8d7962014-03-14 19:52:23 +0000270 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
271 ret = remap_pfn_range(vma, vma->vm_start,
272 pfn + off,
273 vma->vm_end - vma->vm_start,
274 vma->vm_page_prot);
275 }
276
277 return ret;
278}
279
Catalin Marinas92f66f82017-04-25 15:42:31 +0100280static int __swiotlb_mmap(struct device *dev,
281 struct vm_area_struct *vma,
282 void *cpu_addr, dma_addr_t dma_addr, size_t size,
283 unsigned long attrs)
284{
285 int ret;
286 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
287
288 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
289 is_device_dma_coherent(dev));
290
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100291 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Catalin Marinas92f66f82017-04-25 15:42:31 +0100292 return ret;
293
294 return __swiotlb_mmap_pfn(vma, pfn, size);
295}
296
297static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
298 struct page *page, size_t size)
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100299{
300 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
301
302 if (!ret)
Catalin Marinas92f66f82017-04-25 15:42:31 +0100303 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100304
305 return ret;
306}
307
Catalin Marinas92f66f82017-04-25 15:42:31 +0100308static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
309 void *cpu_addr, dma_addr_t handle, size_t size,
310 unsigned long attrs)
311{
312 struct page *page = phys_to_page(dma_to_phys(dev, handle));
313
314 return __swiotlb_get_sgtable_page(sgt, page, size);
315}
316
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800317static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
318{
319 if (swiotlb)
320 return swiotlb_dma_supported(hwdev, mask);
321 return 1;
322}
323
Robin Murphyadbe7e22017-01-25 18:31:31 +0000324static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
325{
326 if (swiotlb)
327 return swiotlb_dma_mapping_error(hwdev, addr);
328 return 0;
329}
330
Christoph Hellwigb7da4092017-12-23 14:00:35 +0100331static const struct dma_map_ops arm64_swiotlb_dma_ops = {
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000332 .alloc = __dma_alloc,
333 .free = __dma_free,
334 .mmap = __swiotlb_mmap,
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100335 .get_sgtable = __swiotlb_get_sgtable,
Catalin Marinas73635902013-05-21 17:35:19 +0100336 .map_page = __swiotlb_map_page,
337 .unmap_page = __swiotlb_unmap_page,
338 .map_sg = __swiotlb_map_sg_attrs,
339 .unmap_sg = __swiotlb_unmap_sg_attrs,
340 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
341 .sync_single_for_device = __swiotlb_sync_single_for_device,
342 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
343 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800344 .dma_supported = __swiotlb_dma_supported,
Robin Murphyadbe7e22017-01-25 18:31:31 +0000345 .mapping_error = __swiotlb_dma_mapping_error,
Catalin Marinas73635902013-05-21 17:35:19 +0100346};
Catalin Marinas09b55412012-03-05 11:49:30 +0000347
Laura Abbottd4932f92014-10-09 15:26:44 -0700348static int __init atomic_pool_init(void)
349{
350 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
351 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
352 struct page *page;
353 void *addr;
354 unsigned int pool_size_order = get_order(atomic_pool_size);
355
356 if (dev_get_cma_area(NULL))
357 page = dma_alloc_from_contiguous(NULL, nr_pages,
Lucas Stach712c6042017-02-24 14:58:44 -0800358 pool_size_order, GFP_KERNEL);
Laura Abbottd4932f92014-10-09 15:26:44 -0700359 else
Christoph Hellwigad67f5a2017-12-24 13:52:03 +0100360 page = alloc_pages(GFP_DMA32, pool_size_order);
Laura Abbottd4932f92014-10-09 15:26:44 -0700361
362 if (page) {
363 int ret;
364 void *page_addr = page_address(page);
365
366 memset(page_addr, 0, atomic_pool_size);
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900367 __dma_flush_area(page_addr, atomic_pool_size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700368
369 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
370 if (!atomic_pool)
371 goto free_page;
372
373 addr = dma_common_contiguous_remap(page, atomic_pool_size,
374 VM_USERMAP, prot, atomic_pool_init);
375
376 if (!addr)
377 goto destroy_genpool;
378
379 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
380 page_to_phys(page),
381 atomic_pool_size, -1);
382 if (ret)
383 goto remove_mapping;
384
385 gen_pool_set_algo(atomic_pool,
386 gen_pool_first_fit_order_align,
Vladimir Murzin2fa59ec2017-08-14 09:55:46 +0100387 NULL);
Laura Abbottd4932f92014-10-09 15:26:44 -0700388
389 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
390 atomic_pool_size / 1024);
391 return 0;
392 }
393 goto out;
394
395remove_mapping:
396 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
397destroy_genpool:
398 gen_pool_destroy(atomic_pool);
399 atomic_pool = NULL;
400free_page:
401 if (!dma_release_from_contiguous(NULL, page, nr_pages))
402 __free_pages(page, pool_size_order);
403out:
404 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
405 atomic_pool_size / 1024);
406 return -ENOMEM;
407}
408
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500409/********************************************
410 * The following APIs are for dummy DMA ops *
411 ********************************************/
412
413static void *__dummy_alloc(struct device *dev, size_t size,
414 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700415 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500416{
417 return NULL;
418}
419
420static void __dummy_free(struct device *dev, size_t size,
421 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700422 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500423{
424}
425
426static int __dummy_mmap(struct device *dev,
427 struct vm_area_struct *vma,
428 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700429 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500430{
431 return -ENXIO;
432}
433
434static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
435 unsigned long offset, size_t size,
436 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700437 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500438{
Christoph Hellwige0d60ac2017-05-21 12:21:50 +0200439 return 0;
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500440}
441
442static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
443 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700444 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500445{
446}
447
448static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
449 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700450 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500451{
452 return 0;
453}
454
455static void __dummy_unmap_sg(struct device *dev,
456 struct scatterlist *sgl, int nelems,
457 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700458 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500459{
460}
461
462static void __dummy_sync_single(struct device *dev,
463 dma_addr_t dev_addr, size_t size,
464 enum dma_data_direction dir)
465{
466}
467
468static void __dummy_sync_sg(struct device *dev,
469 struct scatterlist *sgl, int nelems,
470 enum dma_data_direction dir)
471{
472}
473
474static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
475{
476 return 1;
477}
478
479static int __dummy_dma_supported(struct device *hwdev, u64 mask)
480{
481 return 0;
482}
483
Bart Van Assche52997092017-01-20 13:04:01 -0800484const struct dma_map_ops dummy_dma_ops = {
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500485 .alloc = __dummy_alloc,
486 .free = __dummy_free,
487 .mmap = __dummy_mmap,
488 .map_page = __dummy_map_page,
489 .unmap_page = __dummy_unmap_page,
490 .map_sg = __dummy_map_sg,
491 .unmap_sg = __dummy_unmap_sg,
492 .sync_single_for_cpu = __dummy_sync_single,
493 .sync_single_for_device = __dummy_sync_single,
494 .sync_sg_for_cpu = __dummy_sync_sg,
495 .sync_sg_for_device = __dummy_sync_sg,
496 .mapping_error = __dummy_mapping_error,
497 .dma_supported = __dummy_dma_supported,
498};
499EXPORT_SYMBOL(dummy_dma_ops);
500
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000501static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000502{
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100503 if (swiotlb_force == SWIOTLB_FORCE ||
504 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800505 swiotlb = 1;
506
Catalin Marinasebc7e212018-05-11 13:33:12 +0100507 WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
508 TAINT_CPU_OUT_OF_SPEC,
509 "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
510 ARCH_DMA_MINALIGN, cache_line_size());
511
Arnd Bergmann1dccb592015-11-16 17:25:48 +0100512 return atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700513}
514arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000515
Robin Murphy13b86292015-10-01 20:13:59 +0100516#ifdef CONFIG_IOMMU_DMA
517#include <linux/dma-iommu.h>
518#include <linux/platform_device.h>
519#include <linux/amba/bus.h>
520
521/* Thankfully, all cache ops are by VA so we can ignore phys here */
522static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
523{
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900524 __dma_flush_area(virt, PAGE_SIZE);
Robin Murphy13b86292015-10-01 20:13:59 +0100525}
526
527static void *__iommu_alloc_attrs(struct device *dev, size_t size,
528 dma_addr_t *handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700529 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100530{
531 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530532 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000533 size_t iosize = size;
Robin Murphy13b86292015-10-01 20:13:59 +0100534 void *addr;
535
536 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
537 return NULL;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000538
539 size = PAGE_ALIGN(size);
540
Robin Murphy13b86292015-10-01 20:13:59 +0100541 /*
542 * Some drivers rely on this, and we probably don't want the
543 * possibility of stale kernel data being read by devices anyway.
544 */
545 gfp |= __GFP_ZERO;
546
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100547 if (!gfpflags_allow_blocking(gfp)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100548 struct page *page;
549 /*
550 * In atomic context we can't remap anything, so we'll only
551 * get the virtually contiguous buffer we need by way of a
552 * physically contiguous allocation.
553 */
554 if (coherent) {
555 page = alloc_pages(gfp, get_order(size));
556 addr = page ? page_address(page) : NULL;
557 } else {
558 addr = __alloc_from_pool(size, &page, gfp);
559 }
560 if (!addr)
561 return NULL;
562
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000563 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
Robin Murphy13b86292015-10-01 20:13:59 +0100564 if (iommu_dma_mapping_error(dev, *handle)) {
565 if (coherent)
566 __free_pages(page, get_order(size));
567 else
568 __free_from_pool(addr, size);
569 addr = NULL;
570 }
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100571 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
572 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
573 struct page *page;
574
575 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
576 get_order(size), gfp);
577 if (!page)
578 return NULL;
579
580 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
581 if (iommu_dma_mapping_error(dev, *handle)) {
582 dma_release_from_contiguous(dev, page,
583 size >> PAGE_SHIFT);
584 return NULL;
585 }
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100586 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
587 prot,
588 __builtin_return_address(0));
Marek Szyprowskidd65a942018-06-12 13:08:40 +0200589 if (addr) {
590 memset(addr, 0, size);
591 if (!coherent)
592 __dma_flush_area(page_to_virt(page), iosize);
593 } else {
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100594 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
595 dma_release_from_contiguous(dev, page,
596 size >> PAGE_SHIFT);
597 }
598 } else {
599 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
600 struct page **pages;
601
602 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
603 handle, flush_page);
604 if (!pages)
605 return NULL;
606
607 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
608 __builtin_return_address(0));
609 if (!addr)
610 iommu_dma_free(dev, pages, iosize, handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100611 }
612 return addr;
613}
614
615static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700616 dma_addr_t handle, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100617{
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000618 size_t iosize = size;
619
620 size = PAGE_ALIGN(size);
Robin Murphy13b86292015-10-01 20:13:59 +0100621 /*
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100622 * @cpu_addr will be one of 4 things depending on how it was allocated:
623 * - A remapped array of pages for contiguous allocations.
Robin Murphy13b86292015-10-01 20:13:59 +0100624 * - A remapped array of pages from iommu_dma_alloc(), for all
625 * non-atomic allocations.
626 * - A non-cacheable alias from the atomic pool, for atomic
627 * allocations by non-coherent devices.
628 * - A normal lowmem address, for atomic allocations by
629 * coherent devices.
630 * Hence how dodgy the below logic looks...
631 */
632 if (__in_atomic_pool(cpu_addr, size)) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700633 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100634 __free_from_pool(cpu_addr, size);
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100635 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
636 struct page *page = vmalloc_to_page(cpu_addr);
637
638 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
639 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
640 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
Robin Murphy13b86292015-10-01 20:13:59 +0100641 } else if (is_vmalloc_addr(cpu_addr)){
642 struct vm_struct *area = find_vm_area(cpu_addr);
643
644 if (WARN_ON(!area || !area->pages))
645 return;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000646 iommu_dma_free(dev, area->pages, iosize, &handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100647 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
648 } else {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700649 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100650 __free_pages(virt_to_page(cpu_addr), get_order(size));
651 }
652}
653
654static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
655 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700656 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100657{
658 struct vm_struct *area;
659 int ret;
660
661 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
662 is_device_dma_coherent(dev));
663
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100664 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Robin Murphy13b86292015-10-01 20:13:59 +0100665 return ret;
666
Catalin Marinas92f66f82017-04-25 15:42:31 +0100667 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
668 /*
669 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
670 * hence in the vmalloc space.
671 */
672 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
673 return __swiotlb_mmap_pfn(vma, pfn, size);
674 }
675
Robin Murphy13b86292015-10-01 20:13:59 +0100676 area = find_vm_area(cpu_addr);
677 if (WARN_ON(!area || !area->pages))
678 return -ENXIO;
679
680 return iommu_dma_mmap(area->pages, size, vma);
681}
682
683static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
684 void *cpu_addr, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700685 size_t size, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100686{
687 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
688 struct vm_struct *area = find_vm_area(cpu_addr);
689
Catalin Marinas92f66f82017-04-25 15:42:31 +0100690 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
691 /*
692 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
693 * hence in the vmalloc space.
694 */
695 struct page *page = vmalloc_to_page(cpu_addr);
696 return __swiotlb_get_sgtable_page(sgt, page, size);
697 }
698
Robin Murphy13b86292015-10-01 20:13:59 +0100699 if (WARN_ON(!area || !area->pages))
700 return -ENXIO;
701
702 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
703 GFP_KERNEL);
704}
705
706static void __iommu_sync_single_for_cpu(struct device *dev,
707 dma_addr_t dev_addr, size_t size,
708 enum dma_data_direction dir)
709{
710 phys_addr_t phys;
711
712 if (is_device_dma_coherent(dev))
713 return;
714
715 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
716 __dma_unmap_area(phys_to_virt(phys), size, dir);
717}
718
719static void __iommu_sync_single_for_device(struct device *dev,
720 dma_addr_t dev_addr, size_t size,
721 enum dma_data_direction dir)
722{
723 phys_addr_t phys;
724
725 if (is_device_dma_coherent(dev))
726 return;
727
728 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
729 __dma_map_area(phys_to_virt(phys), size, dir);
730}
731
732static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
733 unsigned long offset, size_t size,
734 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700735 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100736{
737 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530738 int prot = dma_info_to_prot(dir, coherent, attrs);
Robin Murphy13b86292015-10-01 20:13:59 +0100739 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
740
741 if (!iommu_dma_mapping_error(dev, dev_addr) &&
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700742 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100743 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
744
745 return dev_addr;
746}
747
748static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
749 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700750 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100751{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700752 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100753 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
754
755 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
756}
757
758static void __iommu_sync_sg_for_cpu(struct device *dev,
759 struct scatterlist *sgl, int nelems,
760 enum dma_data_direction dir)
761{
762 struct scatterlist *sg;
763 int i;
764
765 if (is_device_dma_coherent(dev))
766 return;
767
768 for_each_sg(sgl, sg, nelems, i)
769 __dma_unmap_area(sg_virt(sg), sg->length, dir);
770}
771
772static void __iommu_sync_sg_for_device(struct device *dev,
773 struct scatterlist *sgl, int nelems,
774 enum dma_data_direction dir)
775{
776 struct scatterlist *sg;
777 int i;
778
779 if (is_device_dma_coherent(dev))
780 return;
781
782 for_each_sg(sgl, sg, nelems, i)
783 __dma_map_area(sg_virt(sg), sg->length, dir);
784}
785
786static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
787 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700788 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100789{
790 bool coherent = is_device_dma_coherent(dev);
791
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700792 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100793 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
794
795 return iommu_dma_map_sg(dev, sgl, nelems,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530796 dma_info_to_prot(dir, coherent, attrs));
Robin Murphy13b86292015-10-01 20:13:59 +0100797}
798
799static void __iommu_unmap_sg_attrs(struct device *dev,
800 struct scatterlist *sgl, int nelems,
801 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700802 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100803{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700804 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100805 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
806
807 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
808}
809
Bart Van Assche52997092017-01-20 13:04:01 -0800810static const struct dma_map_ops iommu_dma_ops = {
Robin Murphy13b86292015-10-01 20:13:59 +0100811 .alloc = __iommu_alloc_attrs,
812 .free = __iommu_free_attrs,
813 .mmap = __iommu_mmap_attrs,
814 .get_sgtable = __iommu_get_sgtable,
815 .map_page = __iommu_map_page,
816 .unmap_page = __iommu_unmap_page,
817 .map_sg = __iommu_map_sg_attrs,
818 .unmap_sg = __iommu_unmap_sg_attrs,
819 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
820 .sync_single_for_device = __iommu_sync_single_for_device,
821 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
822 .sync_sg_for_device = __iommu_sync_sg_for_device,
Robin Murphy60c4e802016-11-14 12:16:27 +0000823 .map_resource = iommu_dma_map_resource,
824 .unmap_resource = iommu_dma_unmap_resource,
Robin Murphy13b86292015-10-01 20:13:59 +0100825 .mapping_error = iommu_dma_mapping_error,
826};
827
Sricharan Rb913efe2017-04-10 16:51:04 +0530828static int __init __iommu_dma_init(void)
Robin Murphy13b86292015-10-01 20:13:59 +0100829{
Sricharan Rb913efe2017-04-10 16:51:04 +0530830 return iommu_dma_init();
831}
832arch_initcall(__iommu_dma_init);
833
834static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
835 const struct iommu_ops *ops)
836{
837 struct iommu_domain *domain;
838
839 if (!ops)
840 return;
Robin Murphy13b86292015-10-01 20:13:59 +0100841
842 /*
Sricharan Rb913efe2017-04-10 16:51:04 +0530843 * The IOMMU core code allocates the default DMA domain, which the
844 * underlying IOMMU driver needs to support via the dma-iommu layer.
Robin Murphy13b86292015-10-01 20:13:59 +0100845 */
Sricharan Rb913efe2017-04-10 16:51:04 +0530846 domain = iommu_get_domain_for_dev(dev);
847
Will Deacon4a8d8a12017-01-06 10:49:12 +0000848 if (!domain)
849 goto out_err;
850
851 if (domain->type == IOMMU_DOMAIN_DMA) {
852 if (iommu_dma_init_domain(domain, dma_base, size, dev))
853 goto out_err;
854
Linus Torvaldsac1820f2017-02-25 13:45:43 -0800855 dev->dma_ops = &iommu_dma_ops;
Robin Murphy13b86292015-10-01 20:13:59 +0100856 }
857
Sricharan Rb913efe2017-04-10 16:51:04 +0530858 return;
859
Will Deacon4a8d8a12017-01-06 10:49:12 +0000860out_err:
Sricharan Rb913efe2017-04-10 16:51:04 +0530861 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
Will Deacon4a8d8a12017-01-06 10:49:12 +0000862 dev_name(dev));
Robin Murphy13b86292015-10-01 20:13:59 +0100863}
864
Robin Murphy876945d2015-10-01 20:14:00 +0100865void arch_teardown_dma_ops(struct device *dev)
866{
Bart Van Assche56579332017-01-20 13:04:02 -0800867 dev->dma_ops = NULL;
Robin Murphy876945d2015-10-01 20:14:00 +0100868}
869
Robin Murphy13b86292015-10-01 20:13:59 +0100870#else
871
872static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100873 const struct iommu_ops *iommu)
Robin Murphy13b86292015-10-01 20:13:59 +0100874{ }
875
876#endif /* CONFIG_IOMMU_DMA */
877
Robin Murphy876945d2015-10-01 20:14:00 +0100878void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100879 const struct iommu_ops *iommu, bool coherent)
Robin Murphy876945d2015-10-01 20:14:00 +0100880{
Bart Van Assche56579332017-01-20 13:04:02 -0800881 if (!dev->dma_ops)
Christoph Hellwigb7da4092017-12-23 14:00:35 +0100882 dev->dma_ops = &arm64_swiotlb_dma_ops;
Robin Murphy876945d2015-10-01 20:14:00 +0100883
884 dev->archdata.dma_coherent = coherent;
885 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
Stefano Stabellinie0586322017-04-13 14:04:21 -0700886
887#ifdef CONFIG_XEN
888 if (xen_initial_domain()) {
889 dev->archdata.dev_dma_ops = dev->dma_ops;
890 dev->dma_ops = xen_dma_ops;
891 }
892#endif
Robin Murphy876945d2015-10-01 20:14:00 +0100893}