blob: c9e53dec36950c500d7562ad3c08b6d4b7904255 [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
Arnd Bergmann1dccb592015-11-16 17:25:48 +010021#include <linux/acpi.h>
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080022#include <linux/bootmem.h>
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080023#include <linux/cache.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/export.h>
25#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070026#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000027#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000028#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000029#include <linux/vmalloc.h>
30#include <linux/swiotlb.h>
31
32#include <asm/cacheflush.h>
33
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080034static int swiotlb __ro_after_init;
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080035
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070036static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
Laura Abbott214fdbe2014-03-14 19:52:24 +000037 bool coherent)
38{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070039 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
Laura Abbott214fdbe2014-03-14 19:52:24 +000040 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000041 return prot;
42}
43
Laura Abbottd4932f92014-10-09 15:26:44 -070044static struct gen_pool *atomic_pool;
45
46#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
Jisheng Zhanga7c61a32015-11-20 17:59:10 +080047static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
Laura Abbottd4932f92014-10-09 15:26:44 -070048
49static int __init early_coherent_pool(char *p)
50{
51 atomic_pool_size = memparse(p, &p);
52 return 0;
53}
54early_param("coherent_pool", early_coherent_pool);
55
Suzuki K. Poulose71328132015-03-19 18:17:09 +000056static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070057{
58 unsigned long val;
59 void *ptr = NULL;
60
61 if (!atomic_pool) {
62 WARN(1, "coherent pool not initialised!\n");
63 return NULL;
64 }
65
66 val = gen_pool_alloc(atomic_pool, size);
67 if (val) {
68 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
69
70 *ret_page = phys_to_page(phys);
71 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010072 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070073 }
74
75 return ptr;
76}
77
78static bool __in_atomic_pool(void *start, size_t size)
79{
80 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
81}
82
83static int __free_from_pool(void *start, size_t size)
84{
85 if (!__in_atomic_pool(start, size))
86 return 0;
87
88 gen_pool_free(atomic_pool, (unsigned long)start, size);
89
90 return 1;
91}
92
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053093static void *__dma_alloc_coherent(struct device *dev, size_t size,
94 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070095 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000096{
Laura Abbottc666e8d2013-12-12 19:28:32 +000097 if (dev == NULL) {
98 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
99 return NULL;
100 }
101
Catalin Marinas19e76402014-02-27 12:09:22 +0000102 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +0000103 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000104 flags |= GFP_DMA;
Mel Gormand0164ad2015-11-06 16:28:21 -0800105 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000106 struct page *page;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000107 void *addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000108
109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
Lucas Stach712c6042017-02-24 14:58:44 -0800110 get_order(size), flags);
Laura Abbott6ac21042013-12-12 19:28:33 +0000111 if (!page)
112 return NULL;
113
114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000115 addr = page_address(page);
Marek Szyprowski6829e272015-04-23 12:46:16 +0100116 memset(addr, 0, size);
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000117 return addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000118 } else {
119 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
120 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000121}
122
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530123static void __dma_free_coherent(struct device *dev, size_t size,
124 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700125 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000126{
Laura Abbottd4932f92014-10-09 15:26:44 -0700127 bool freed;
128 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
129
Laura Abbottc666e8d2013-12-12 19:28:32 +0000130 if (dev == NULL) {
131 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
132 return;
133 }
134
Laura Abbottd4932f92014-10-09 15:26:44 -0700135 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000136 phys_to_page(paddr),
137 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700138 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000139 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000140}
141
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000142static void *__dma_alloc(struct device *dev, size_t size,
143 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700144 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100145{
Laura Abbottd4932f92014-10-09 15:26:44 -0700146 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100147 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000148 bool coherent = is_device_dma_coherent(dev);
Robin Murphy97942c22015-07-31 18:28:34 +0100149 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
Catalin Marinas73635902013-05-21 17:35:19 +0100150
151 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700152
Mel Gormand0164ad2015-11-06 16:28:21 -0800153 if (!coherent && !gfpflags_allow_blocking(flags)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700154 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000155 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700156
157 if (addr)
158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
159
160 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700161 }
Catalin Marinas73635902013-05-21 17:35:19 +0100162
163 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
164 if (!ptr)
165 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100166
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000167 /* no need for non-cacheable mapping if coherent */
168 if (coherent)
169 return ptr;
170
Catalin Marinas73635902013-05-21 17:35:19 +0100171 /* remove any dirty cache lines on the kernel alias */
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900172 __dma_flush_area(ptr, size);
Catalin Marinas73635902013-05-21 17:35:19 +0100173
174 /* create a coherent mapping */
175 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700176 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
Robin Murphy97942c22015-07-31 18:28:34 +0100177 prot, NULL);
Catalin Marinas73635902013-05-21 17:35:19 +0100178 if (!coherent_ptr)
179 goto no_map;
180
181 return coherent_ptr;
182
183no_map:
184 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
185no_mem:
Sean Paula52ce122014-10-01 16:31:50 +0100186 *dma_handle = DMA_ERROR_CODE;
Catalin Marinas73635902013-05-21 17:35:19 +0100187 return NULL;
188}
189
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000190static void __dma_free(struct device *dev, size_t size,
191 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700192 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100193{
194 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
195
Dean Nelson2cff98b2015-04-29 16:09:18 +0100196 size = PAGE_ALIGN(size);
197
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000198 if (!is_device_dma_coherent(dev)) {
199 if (__free_from_pool(vaddr, size))
200 return;
201 vunmap(vaddr);
202 }
Catalin Marinas73635902013-05-21 17:35:19 +0100203 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
204}
205
206static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
207 unsigned long offset, size_t size,
208 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700209 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100210{
211 dma_addr_t dev_addr;
212
213 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100214 if (!is_device_dma_coherent(dev) &&
215 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000216 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100217
218 return dev_addr;
219}
220
221
222static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
223 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700224 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100225{
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100226 if (!is_device_dma_coherent(dev) &&
227 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000228 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100229 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
230}
231
232static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
233 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700234 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100235{
236 struct scatterlist *sg;
237 int i, ret;
238
239 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100240 if (!is_device_dma_coherent(dev) &&
241 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000242 for_each_sg(sgl, sg, ret, i)
243 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
244 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100245
246 return ret;
247}
248
249static void __swiotlb_unmap_sg_attrs(struct device *dev,
250 struct scatterlist *sgl, int nelems,
251 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700252 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100253{
254 struct scatterlist *sg;
255 int i;
256
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100257 if (!is_device_dma_coherent(dev) &&
258 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000259 for_each_sg(sgl, sg, nelems, i)
260 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
261 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100262 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
263}
264
265static void __swiotlb_sync_single_for_cpu(struct device *dev,
266 dma_addr_t dev_addr, size_t size,
267 enum dma_data_direction dir)
268{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000269 if (!is_device_dma_coherent(dev))
270 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100271 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
272}
273
274static void __swiotlb_sync_single_for_device(struct device *dev,
275 dma_addr_t dev_addr, size_t size,
276 enum dma_data_direction dir)
277{
278 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000279 if (!is_device_dma_coherent(dev))
280 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100281}
282
283static void __swiotlb_sync_sg_for_cpu(struct device *dev,
284 struct scatterlist *sgl, int nelems,
285 enum dma_data_direction dir)
286{
287 struct scatterlist *sg;
288 int i;
289
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000290 if (!is_device_dma_coherent(dev))
291 for_each_sg(sgl, sg, nelems, i)
292 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
293 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100294 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
295}
296
297static void __swiotlb_sync_sg_for_device(struct device *dev,
298 struct scatterlist *sgl, int nelems,
299 enum dma_data_direction dir)
300{
301 struct scatterlist *sg;
302 int i;
303
304 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000305 if (!is_device_dma_coherent(dev))
306 for_each_sg(sgl, sg, nelems, i)
307 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
308 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100309}
310
Catalin Marinas92f66f82017-04-25 15:42:31 +0100311static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
312 unsigned long pfn, size_t size)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000313{
314 int ret = -ENXIO;
315 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
316 PAGE_SHIFT;
317 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
Laura Abbott6e8d7962014-03-14 19:52:23 +0000318 unsigned long off = vma->vm_pgoff;
319
Laura Abbott6e8d7962014-03-14 19:52:23 +0000320 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
321 ret = remap_pfn_range(vma, vma->vm_start,
322 pfn + off,
323 vma->vm_end - vma->vm_start,
324 vma->vm_page_prot);
325 }
326
327 return ret;
328}
329
Catalin Marinas92f66f82017-04-25 15:42:31 +0100330static int __swiotlb_mmap(struct device *dev,
331 struct vm_area_struct *vma,
332 void *cpu_addr, dma_addr_t dma_addr, size_t size,
333 unsigned long attrs)
334{
335 int ret;
336 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
337
338 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
339 is_device_dma_coherent(dev));
340
341 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
342 return ret;
343
344 return __swiotlb_mmap_pfn(vma, pfn, size);
345}
346
347static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
348 struct page *page, size_t size)
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100349{
350 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
351
352 if (!ret)
Catalin Marinas92f66f82017-04-25 15:42:31 +0100353 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100354
355 return ret;
356}
357
Catalin Marinas92f66f82017-04-25 15:42:31 +0100358static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
359 void *cpu_addr, dma_addr_t handle, size_t size,
360 unsigned long attrs)
361{
362 struct page *page = phys_to_page(dma_to_phys(dev, handle));
363
364 return __swiotlb_get_sgtable_page(sgt, page, size);
365}
366
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800367static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
368{
369 if (swiotlb)
370 return swiotlb_dma_supported(hwdev, mask);
371 return 1;
372}
373
Robin Murphyadbe7e22017-01-25 18:31:31 +0000374static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
375{
376 if (swiotlb)
377 return swiotlb_dma_mapping_error(hwdev, addr);
378 return 0;
379}
380
Bart Van Assche52997092017-01-20 13:04:01 -0800381static const struct dma_map_ops swiotlb_dma_ops = {
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000382 .alloc = __dma_alloc,
383 .free = __dma_free,
384 .mmap = __swiotlb_mmap,
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100385 .get_sgtable = __swiotlb_get_sgtable,
Catalin Marinas73635902013-05-21 17:35:19 +0100386 .map_page = __swiotlb_map_page,
387 .unmap_page = __swiotlb_unmap_page,
388 .map_sg = __swiotlb_map_sg_attrs,
389 .unmap_sg = __swiotlb_unmap_sg_attrs,
390 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
391 .sync_single_for_device = __swiotlb_sync_single_for_device,
392 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
393 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800394 .dma_supported = __swiotlb_dma_supported,
Robin Murphyadbe7e22017-01-25 18:31:31 +0000395 .mapping_error = __swiotlb_dma_mapping_error,
Catalin Marinas73635902013-05-21 17:35:19 +0100396};
Catalin Marinas09b55412012-03-05 11:49:30 +0000397
Laura Abbottd4932f92014-10-09 15:26:44 -0700398static int __init atomic_pool_init(void)
399{
400 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
401 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
402 struct page *page;
403 void *addr;
404 unsigned int pool_size_order = get_order(atomic_pool_size);
405
406 if (dev_get_cma_area(NULL))
407 page = dma_alloc_from_contiguous(NULL, nr_pages,
Lucas Stach712c6042017-02-24 14:58:44 -0800408 pool_size_order, GFP_KERNEL);
Laura Abbottd4932f92014-10-09 15:26:44 -0700409 else
410 page = alloc_pages(GFP_DMA, pool_size_order);
411
412 if (page) {
413 int ret;
414 void *page_addr = page_address(page);
415
416 memset(page_addr, 0, atomic_pool_size);
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900417 __dma_flush_area(page_addr, atomic_pool_size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700418
419 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
420 if (!atomic_pool)
421 goto free_page;
422
423 addr = dma_common_contiguous_remap(page, atomic_pool_size,
424 VM_USERMAP, prot, atomic_pool_init);
425
426 if (!addr)
427 goto destroy_genpool;
428
429 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
430 page_to_phys(page),
431 atomic_pool_size, -1);
432 if (ret)
433 goto remove_mapping;
434
435 gen_pool_set_algo(atomic_pool,
436 gen_pool_first_fit_order_align,
437 (void *)PAGE_SHIFT);
438
439 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
440 atomic_pool_size / 1024);
441 return 0;
442 }
443 goto out;
444
445remove_mapping:
446 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
447destroy_genpool:
448 gen_pool_destroy(atomic_pool);
449 atomic_pool = NULL;
450free_page:
451 if (!dma_release_from_contiguous(NULL, page, nr_pages))
452 __free_pages(page, pool_size_order);
453out:
454 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
455 atomic_pool_size / 1024);
456 return -ENOMEM;
457}
458
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500459/********************************************
460 * The following APIs are for dummy DMA ops *
461 ********************************************/
462
463static void *__dummy_alloc(struct device *dev, size_t size,
464 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700465 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500466{
467 return NULL;
468}
469
470static void __dummy_free(struct device *dev, size_t size,
471 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700472 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500473{
474}
475
476static int __dummy_mmap(struct device *dev,
477 struct vm_area_struct *vma,
478 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700479 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500480{
481 return -ENXIO;
482}
483
484static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
485 unsigned long offset, size_t size,
486 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700487 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500488{
489 return DMA_ERROR_CODE;
490}
491
492static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
493 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700494 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500495{
496}
497
498static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
499 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700500 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500501{
502 return 0;
503}
504
505static void __dummy_unmap_sg(struct device *dev,
506 struct scatterlist *sgl, int nelems,
507 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700508 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500509{
510}
511
512static void __dummy_sync_single(struct device *dev,
513 dma_addr_t dev_addr, size_t size,
514 enum dma_data_direction dir)
515{
516}
517
518static void __dummy_sync_sg(struct device *dev,
519 struct scatterlist *sgl, int nelems,
520 enum dma_data_direction dir)
521{
522}
523
524static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
525{
526 return 1;
527}
528
529static int __dummy_dma_supported(struct device *hwdev, u64 mask)
530{
531 return 0;
532}
533
Bart Van Assche52997092017-01-20 13:04:01 -0800534const struct dma_map_ops dummy_dma_ops = {
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500535 .alloc = __dummy_alloc,
536 .free = __dummy_free,
537 .mmap = __dummy_mmap,
538 .map_page = __dummy_map_page,
539 .unmap_page = __dummy_unmap_page,
540 .map_sg = __dummy_map_sg,
541 .unmap_sg = __dummy_unmap_sg,
542 .sync_single_for_cpu = __dummy_sync_single,
543 .sync_single_for_device = __dummy_sync_single,
544 .sync_sg_for_cpu = __dummy_sync_sg,
545 .sync_sg_for_device = __dummy_sync_sg,
546 .mapping_error = __dummy_mapping_error,
547 .dma_supported = __dummy_dma_supported,
548};
549EXPORT_SYMBOL(dummy_dma_ops);
550
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000551static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000552{
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100553 if (swiotlb_force == SWIOTLB_FORCE ||
554 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800555 swiotlb = 1;
556
Arnd Bergmann1dccb592015-11-16 17:25:48 +0100557 return atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700558}
559arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000560
561#define PREALLOC_DMA_DEBUG_ENTRIES 4096
562
563static int __init dma_debug_do_init(void)
564{
565 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
566 return 0;
567}
568fs_initcall(dma_debug_do_init);
Robin Murphy13b86292015-10-01 20:13:59 +0100569
570
571#ifdef CONFIG_IOMMU_DMA
572#include <linux/dma-iommu.h>
573#include <linux/platform_device.h>
574#include <linux/amba/bus.h>
575
576/* Thankfully, all cache ops are by VA so we can ignore phys here */
577static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
578{
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900579 __dma_flush_area(virt, PAGE_SIZE);
Robin Murphy13b86292015-10-01 20:13:59 +0100580}
581
582static void *__iommu_alloc_attrs(struct device *dev, size_t size,
583 dma_addr_t *handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700584 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100585{
586 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530587 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000588 size_t iosize = size;
Robin Murphy13b86292015-10-01 20:13:59 +0100589 void *addr;
590
591 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
592 return NULL;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000593
594 size = PAGE_ALIGN(size);
595
Robin Murphy13b86292015-10-01 20:13:59 +0100596 /*
597 * Some drivers rely on this, and we probably don't want the
598 * possibility of stale kernel data being read by devices anyway.
599 */
600 gfp |= __GFP_ZERO;
601
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100602 if (!gfpflags_allow_blocking(gfp)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100603 struct page *page;
604 /*
605 * In atomic context we can't remap anything, so we'll only
606 * get the virtually contiguous buffer we need by way of a
607 * physically contiguous allocation.
608 */
609 if (coherent) {
610 page = alloc_pages(gfp, get_order(size));
611 addr = page ? page_address(page) : NULL;
612 } else {
613 addr = __alloc_from_pool(size, &page, gfp);
614 }
615 if (!addr)
616 return NULL;
617
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000618 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
Robin Murphy13b86292015-10-01 20:13:59 +0100619 if (iommu_dma_mapping_error(dev, *handle)) {
620 if (coherent)
621 __free_pages(page, get_order(size));
622 else
623 __free_from_pool(addr, size);
624 addr = NULL;
625 }
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100626 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
627 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
628 struct page *page;
629
630 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
631 get_order(size), gfp);
632 if (!page)
633 return NULL;
634
635 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
636 if (iommu_dma_mapping_error(dev, *handle)) {
637 dma_release_from_contiguous(dev, page,
638 size >> PAGE_SHIFT);
639 return NULL;
640 }
641 if (!coherent)
642 __dma_flush_area(page_to_virt(page), iosize);
643
644 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
645 prot,
646 __builtin_return_address(0));
647 if (!addr) {
648 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
649 dma_release_from_contiguous(dev, page,
650 size >> PAGE_SHIFT);
651 }
652 } else {
653 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
654 struct page **pages;
655
656 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
657 handle, flush_page);
658 if (!pages)
659 return NULL;
660
661 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
662 __builtin_return_address(0));
663 if (!addr)
664 iommu_dma_free(dev, pages, iosize, handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100665 }
666 return addr;
667}
668
669static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700670 dma_addr_t handle, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100671{
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000672 size_t iosize = size;
673
674 size = PAGE_ALIGN(size);
Robin Murphy13b86292015-10-01 20:13:59 +0100675 /*
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100676 * @cpu_addr will be one of 4 things depending on how it was allocated:
677 * - A remapped array of pages for contiguous allocations.
Robin Murphy13b86292015-10-01 20:13:59 +0100678 * - A remapped array of pages from iommu_dma_alloc(), for all
679 * non-atomic allocations.
680 * - A non-cacheable alias from the atomic pool, for atomic
681 * allocations by non-coherent devices.
682 * - A normal lowmem address, for atomic allocations by
683 * coherent devices.
684 * Hence how dodgy the below logic looks...
685 */
686 if (__in_atomic_pool(cpu_addr, size)) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700687 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100688 __free_from_pool(cpu_addr, size);
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100689 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
690 struct page *page = vmalloc_to_page(cpu_addr);
691
692 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
693 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
694 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
Robin Murphy13b86292015-10-01 20:13:59 +0100695 } else if (is_vmalloc_addr(cpu_addr)){
696 struct vm_struct *area = find_vm_area(cpu_addr);
697
698 if (WARN_ON(!area || !area->pages))
699 return;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000700 iommu_dma_free(dev, area->pages, iosize, &handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100701 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
702 } else {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700703 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100704 __free_pages(virt_to_page(cpu_addr), get_order(size));
705 }
706}
707
708static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
709 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700710 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100711{
712 struct vm_struct *area;
713 int ret;
714
715 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
716 is_device_dma_coherent(dev));
717
718 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
719 return ret;
720
Catalin Marinas92f66f82017-04-25 15:42:31 +0100721 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
722 /*
723 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
724 * hence in the vmalloc space.
725 */
726 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
727 return __swiotlb_mmap_pfn(vma, pfn, size);
728 }
729
Robin Murphy13b86292015-10-01 20:13:59 +0100730 area = find_vm_area(cpu_addr);
731 if (WARN_ON(!area || !area->pages))
732 return -ENXIO;
733
734 return iommu_dma_mmap(area->pages, size, vma);
735}
736
737static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
738 void *cpu_addr, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700739 size_t size, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100740{
741 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
742 struct vm_struct *area = find_vm_area(cpu_addr);
743
Catalin Marinas92f66f82017-04-25 15:42:31 +0100744 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
745 /*
746 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
747 * hence in the vmalloc space.
748 */
749 struct page *page = vmalloc_to_page(cpu_addr);
750 return __swiotlb_get_sgtable_page(sgt, page, size);
751 }
752
Robin Murphy13b86292015-10-01 20:13:59 +0100753 if (WARN_ON(!area || !area->pages))
754 return -ENXIO;
755
756 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
757 GFP_KERNEL);
758}
759
760static void __iommu_sync_single_for_cpu(struct device *dev,
761 dma_addr_t dev_addr, size_t size,
762 enum dma_data_direction dir)
763{
764 phys_addr_t phys;
765
766 if (is_device_dma_coherent(dev))
767 return;
768
769 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
770 __dma_unmap_area(phys_to_virt(phys), size, dir);
771}
772
773static void __iommu_sync_single_for_device(struct device *dev,
774 dma_addr_t dev_addr, size_t size,
775 enum dma_data_direction dir)
776{
777 phys_addr_t phys;
778
779 if (is_device_dma_coherent(dev))
780 return;
781
782 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
783 __dma_map_area(phys_to_virt(phys), size, dir);
784}
785
786static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
787 unsigned long offset, size_t size,
788 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700789 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100790{
791 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530792 int prot = dma_info_to_prot(dir, coherent, attrs);
Robin Murphy13b86292015-10-01 20:13:59 +0100793 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
794
795 if (!iommu_dma_mapping_error(dev, dev_addr) &&
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700796 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100797 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
798
799 return dev_addr;
800}
801
802static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
803 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700804 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100805{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700806 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100807 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
808
809 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
810}
811
812static void __iommu_sync_sg_for_cpu(struct device *dev,
813 struct scatterlist *sgl, int nelems,
814 enum dma_data_direction dir)
815{
816 struct scatterlist *sg;
817 int i;
818
819 if (is_device_dma_coherent(dev))
820 return;
821
822 for_each_sg(sgl, sg, nelems, i)
823 __dma_unmap_area(sg_virt(sg), sg->length, dir);
824}
825
826static void __iommu_sync_sg_for_device(struct device *dev,
827 struct scatterlist *sgl, int nelems,
828 enum dma_data_direction dir)
829{
830 struct scatterlist *sg;
831 int i;
832
833 if (is_device_dma_coherent(dev))
834 return;
835
836 for_each_sg(sgl, sg, nelems, i)
837 __dma_map_area(sg_virt(sg), sg->length, dir);
838}
839
840static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
841 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700842 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100843{
844 bool coherent = is_device_dma_coherent(dev);
845
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700846 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100847 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
848
849 return iommu_dma_map_sg(dev, sgl, nelems,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530850 dma_info_to_prot(dir, coherent, attrs));
Robin Murphy13b86292015-10-01 20:13:59 +0100851}
852
853static void __iommu_unmap_sg_attrs(struct device *dev,
854 struct scatterlist *sgl, int nelems,
855 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700856 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100857{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700858 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100859 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
860
861 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
862}
863
Bart Van Assche52997092017-01-20 13:04:01 -0800864static const struct dma_map_ops iommu_dma_ops = {
Robin Murphy13b86292015-10-01 20:13:59 +0100865 .alloc = __iommu_alloc_attrs,
866 .free = __iommu_free_attrs,
867 .mmap = __iommu_mmap_attrs,
868 .get_sgtable = __iommu_get_sgtable,
869 .map_page = __iommu_map_page,
870 .unmap_page = __iommu_unmap_page,
871 .map_sg = __iommu_map_sg_attrs,
872 .unmap_sg = __iommu_unmap_sg_attrs,
873 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
874 .sync_single_for_device = __iommu_sync_single_for_device,
875 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
876 .sync_sg_for_device = __iommu_sync_sg_for_device,
Robin Murphy60c4e802016-11-14 12:16:27 +0000877 .map_resource = iommu_dma_map_resource,
878 .unmap_resource = iommu_dma_unmap_resource,
Robin Murphy13b86292015-10-01 20:13:59 +0100879 .mapping_error = iommu_dma_mapping_error,
880};
881
882/*
883 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
884 * everything it needs to - the device is only partially created and the
885 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
886 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
887 * to move the arch_setup_dma_ops() call later, all the notifier bits below
888 * become unnecessary, and will go away.
889 */
890struct iommu_dma_notifier_data {
891 struct list_head list;
892 struct device *dev;
893 const struct iommu_ops *ops;
894 u64 dma_base;
895 u64 size;
896};
897static LIST_HEAD(iommu_dma_masters);
898static DEFINE_MUTEX(iommu_dma_notifier_lock);
899
Robin Murphy13b86292015-10-01 20:13:59 +0100900static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
901 u64 dma_base, u64 size)
902{
903 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
904
905 /*
Robin Murphy921b1f52016-04-19 17:01:32 +0100906 * If the IOMMU driver has the DMA domain support that we require,
907 * then the IOMMU core will have already configured a group for this
908 * device, and allocated the default domain for that group.
Robin Murphy13b86292015-10-01 20:13:59 +0100909 */
Will Deacon4a8d8a12017-01-06 10:49:12 +0000910 if (!domain)
911 goto out_err;
912
913 if (domain->type == IOMMU_DOMAIN_DMA) {
914 if (iommu_dma_init_domain(domain, dma_base, size, dev))
915 goto out_err;
916
Linus Torvaldsac1820f2017-02-25 13:45:43 -0800917 dev->dma_ops = &iommu_dma_ops;
Robin Murphy13b86292015-10-01 20:13:59 +0100918 }
919
Robin Murphy13b86292015-10-01 20:13:59 +0100920 return true;
Will Deacon4a8d8a12017-01-06 10:49:12 +0000921out_err:
922 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
923 dev_name(dev));
924 return false;
Robin Murphy13b86292015-10-01 20:13:59 +0100925}
926
927static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
928 u64 dma_base, u64 size)
929{
930 struct iommu_dma_notifier_data *iommudata;
931
932 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
933 if (!iommudata)
934 return;
935
936 iommudata->dev = dev;
937 iommudata->ops = ops;
938 iommudata->dma_base = dma_base;
939 iommudata->size = size;
940
941 mutex_lock(&iommu_dma_notifier_lock);
942 list_add(&iommudata->list, &iommu_dma_masters);
943 mutex_unlock(&iommu_dma_notifier_lock);
944}
945
946static int __iommu_attach_notifier(struct notifier_block *nb,
947 unsigned long action, void *data)
948{
949 struct iommu_dma_notifier_data *master, *tmp;
950
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100951 if (action != BUS_NOTIFY_BIND_DRIVER)
Robin Murphy13b86292015-10-01 20:13:59 +0100952 return 0;
953
954 mutex_lock(&iommu_dma_notifier_lock);
955 list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100956 if (data == master->dev && do_iommu_attach(master->dev,
957 master->ops, master->dma_base, master->size)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100958 list_del(&master->list);
959 kfree(master);
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100960 break;
Robin Murphy13b86292015-10-01 20:13:59 +0100961 }
962 }
963 mutex_unlock(&iommu_dma_notifier_lock);
964 return 0;
965}
966
Jisheng Zhanga7c61a32015-11-20 17:59:10 +0800967static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
Robin Murphy13b86292015-10-01 20:13:59 +0100968{
969 struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
970 int ret;
971
972 if (!nb)
973 return -ENOMEM;
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100974
Robin Murphy13b86292015-10-01 20:13:59 +0100975 nb->notifier_call = __iommu_attach_notifier;
Robin Murphy13b86292015-10-01 20:13:59 +0100976
977 ret = bus_register_notifier(bus, nb);
978 if (ret) {
979 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
980 bus->name);
981 kfree(nb);
982 }
983 return ret;
984}
985
986static int __init __iommu_dma_init(void)
987{
988 int ret;
989
990 ret = iommu_dma_init();
991 if (!ret)
992 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
993 if (!ret)
994 ret = register_iommu_dma_ops_notifier(&amba_bustype);
Robin Murphy226d89c2016-04-19 17:01:31 +0100995#ifdef CONFIG_PCI
996 if (!ret)
997 ret = register_iommu_dma_ops_notifier(&pci_bus_type);
998#endif
Robin Murphy13b86292015-10-01 20:13:59 +0100999 return ret;
1000}
1001arch_initcall(__iommu_dma_init);
1002
1003static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1004 const struct iommu_ops *ops)
1005{
1006 struct iommu_group *group;
1007
1008 if (!ops)
1009 return;
1010 /*
1011 * TODO: As a concession to the future, we're ready to handle being
1012 * called both early and late (i.e. after bus_add_device). Once all
1013 * the platform bus code is reworked to call us late and the notifier
1014 * junk above goes away, move the body of do_iommu_attach here.
1015 */
1016 group = iommu_group_get(dev);
1017 if (group) {
1018 do_iommu_attach(dev, ops, dma_base, size);
1019 iommu_group_put(group);
1020 } else {
1021 queue_iommu_attach(dev, ops, dma_base, size);
1022 }
1023}
1024
Robin Murphy876945d2015-10-01 20:14:00 +01001025void arch_teardown_dma_ops(struct device *dev)
1026{
Bart Van Assche56579332017-01-20 13:04:02 -08001027 dev->dma_ops = NULL;
Robin Murphy876945d2015-10-01 20:14:00 +01001028}
1029
Robin Murphy13b86292015-10-01 20:13:59 +01001030#else
1031
1032static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +01001033 const struct iommu_ops *iommu)
Robin Murphy13b86292015-10-01 20:13:59 +01001034{ }
1035
1036#endif /* CONFIG_IOMMU_DMA */
1037
Robin Murphy876945d2015-10-01 20:14:00 +01001038void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +01001039 const struct iommu_ops *iommu, bool coherent)
Robin Murphy876945d2015-10-01 20:14:00 +01001040{
Bart Van Assche56579332017-01-20 13:04:02 -08001041 if (!dev->dma_ops)
1042 dev->dma_ops = &swiotlb_dma_ops;
Robin Murphy876945d2015-10-01 20:14:00 +01001043
1044 dev->archdata.dma_coherent = coherent;
1045 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
1046}