blob: f906d5f4cbd8e203f4b567dcee32cdff0f6874d6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King0ddbccd2008-09-25 15:59:19 +01002 * linux/arch/arm/mm/dma-mapping.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010020#include <linux/dma-contiguous.h>
Nicolas Pitre39af22a2010-12-15 15:14:45 -050021#include <linux/highmem.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010022#include <linux/memblock.h>
Jon Medhurst99d17172011-08-02 17:28:27 +010023#include <linux/slab.h>
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +020024#include <linux/iommu.h>
Marek Szyprowskie9da6e92012-07-30 09:11:33 +020025#include <linux/io.h>
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +020026#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Lennert Buytenhek23759dc2006-04-02 00:07:39 +010028#include <asm/memory.h>
Nicolas Pitre43377452009-03-12 22:52:09 -040029#include <asm/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/tlbflush.h>
Kevin Hilman37134cd2006-01-12 16:12:21 +000032#include <asm/sizes.h>
Jon Medhurst99d17172011-08-02 17:28:27 +010033#include <asm/mach/arch.h>
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +020034#include <asm/dma-iommu.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010035#include <asm/mach/map.h>
36#include <asm/system_info.h>
37#include <asm/dma-contiguous.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Russell King022ae532011-07-08 21:26:59 +010039#include "mm.h"
40
Marek Szyprowski15237e12012-02-10 19:55:20 +010041/*
42 * The DMA API is built upon the notion of "buffer ownership". A buffer
43 * is either exclusively owned by the CPU (and therefore may be accessed
44 * by it) or exclusively owned by the DMA device. These helper functions
45 * represent the transitions between these two ownership states.
46 *
47 * Note, however, that on later ARMs, this notion does not work due to
48 * speculative prefetches. We model our approach on the assumption that
49 * the CPU does do speculative prefetches, which means we clean caches
50 * before transfers and delay cache invalidation until transfer completion.
51 *
Marek Szyprowski15237e12012-02-10 19:55:20 +010052 */
Marek Szyprowski51fde3492012-02-10 19:55:20 +010053static void __dma_page_cpu_to_dev(struct page *, unsigned long,
Marek Szyprowski15237e12012-02-10 19:55:20 +010054 size_t, enum dma_data_direction);
Marek Szyprowski51fde3492012-02-10 19:55:20 +010055static void __dma_page_dev_to_cpu(struct page *, unsigned long,
Marek Szyprowski15237e12012-02-10 19:55:20 +010056 size_t, enum dma_data_direction);
57
Marek Szyprowski2dc6a012012-02-10 19:55:20 +010058/**
59 * arm_dma_map_page - map a portion of a page for streaming DMA
60 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
61 * @page: page that buffer resides in
62 * @offset: offset into page for start of buffer
63 * @size: size of buffer to map
64 * @dir: DMA transfer direction
65 *
66 * Ensure that any data held in the cache is appropriately discarded
67 * or written back.
68 *
69 * The device owns this memory once this call has completed. The CPU
70 * can regain ownership by calling dma_unmap_page().
71 */
Marek Szyprowski51fde3492012-02-10 19:55:20 +010072static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
Marek Szyprowski2dc6a012012-02-10 19:55:20 +010073 unsigned long offset, size_t size, enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75{
Marek Szyprowski51fde3492012-02-10 19:55:20 +010076 if (!arch_is_coherent())
77 __dma_page_cpu_to_dev(page, offset, size, dir);
78 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
Marek Szyprowski2dc6a012012-02-10 19:55:20 +010079}
80
81/**
82 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
83 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
84 * @handle: DMA address of buffer
85 * @size: size of buffer (same as passed to dma_map_page)
86 * @dir: DMA transfer direction (same as passed to dma_map_page)
87 *
88 * Unmap a page streaming mode DMA translation. The handle and size
89 * must match what was provided in the previous dma_map_page() call.
90 * All other usages are undefined.
91 *
92 * After this call, reads by the CPU to the buffer are guaranteed to see
93 * whatever the device wrote there.
94 */
Marek Szyprowski51fde3492012-02-10 19:55:20 +010095static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
Marek Szyprowski2dc6a012012-02-10 19:55:20 +010096 size_t size, enum dma_data_direction dir,
97 struct dma_attrs *attrs)
98{
Marek Szyprowski51fde3492012-02-10 19:55:20 +010099 if (!arch_is_coherent())
100 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
101 handle & ~PAGE_MASK, size, dir);
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100102}
103
Marek Szyprowski51fde3492012-02-10 19:55:20 +0100104static void arm_dma_sync_single_for_cpu(struct device *dev,
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100105 dma_addr_t handle, size_t size, enum dma_data_direction dir)
106{
107 unsigned int offset = handle & (PAGE_SIZE - 1);
108 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
Marek Szyprowski51fde3492012-02-10 19:55:20 +0100109 if (!arch_is_coherent())
110 __dma_page_dev_to_cpu(page, offset, size, dir);
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100111}
112
Marek Szyprowski51fde3492012-02-10 19:55:20 +0100113static void arm_dma_sync_single_for_device(struct device *dev,
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100114 dma_addr_t handle, size_t size, enum dma_data_direction dir)
115{
116 unsigned int offset = handle & (PAGE_SIZE - 1);
117 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
Marek Szyprowski51fde3492012-02-10 19:55:20 +0100118 if (!arch_is_coherent())
119 __dma_page_cpu_to_dev(page, offset, size, dir);
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100120}
121
122static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
123
124struct dma_map_ops arm_dma_ops = {
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200125 .alloc = arm_dma_alloc,
126 .free = arm_dma_free,
127 .mmap = arm_dma_mmap,
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100128 .map_page = arm_dma_map_page,
129 .unmap_page = arm_dma_unmap_page,
130 .map_sg = arm_dma_map_sg,
131 .unmap_sg = arm_dma_unmap_sg,
132 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
133 .sync_single_for_device = arm_dma_sync_single_for_device,
134 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
135 .sync_sg_for_device = arm_dma_sync_sg_for_device,
136 .set_dma_mask = arm_dma_set_mask,
137};
138EXPORT_SYMBOL(arm_dma_ops);
139
Catalin Marinasab6494f2009-07-24 12:35:02 +0100140static u64 get_coherent_dma_mask(struct device *dev)
141{
Russell King022ae532011-07-08 21:26:59 +0100142 u64 mask = (u64)arm_dma_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Catalin Marinasab6494f2009-07-24 12:35:02 +0100144 if (dev) {
145 mask = dev->coherent_dma_mask;
146
147 /*
148 * Sanity check the DMA mask - it must be non-zero, and
149 * must be able to be satisfied by a DMA allocation.
150 */
151 if (mask == 0) {
152 dev_warn(dev, "coherent DMA mask is unset\n");
153 return 0;
154 }
155
Russell King022ae532011-07-08 21:26:59 +0100156 if ((~mask) & (u64)arm_dma_limit) {
Catalin Marinasab6494f2009-07-24 12:35:02 +0100157 dev_warn(dev, "coherent DMA mask %#llx is smaller "
158 "than system GFP_DMA mask %#llx\n",
Russell King022ae532011-07-08 21:26:59 +0100159 mask, (u64)arm_dma_limit);
Catalin Marinasab6494f2009-07-24 12:35:02 +0100160 return 0;
161 }
162 }
163
164 return mask;
165}
166
Marek Szyprowskic7909502011-12-29 13:09:51 +0100167static void __dma_clear_buffer(struct page *page, size_t size)
168{
169 void *ptr;
170 /*
171 * Ensure that the allocated pages are zeroed, and that any data
172 * lurking in the kernel direct-mapped region is invalidated.
173 */
174 ptr = page_address(page);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200175 if (ptr) {
176 memset(ptr, 0, size);
177 dmac_flush_range(ptr, ptr + size);
178 outer_flush_range(__pa(ptr), __pa(ptr) + size);
179 }
Marek Szyprowskic7909502011-12-29 13:09:51 +0100180}
181
Russell King7a9a32a2009-11-19 15:31:07 +0000182/*
183 * Allocate a DMA buffer for 'dev' of size 'size' using the
184 * specified gfp mask. Note that 'size' must be page aligned.
185 */
186static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
187{
188 unsigned long order = get_order(size);
189 struct page *page, *p, *e;
Russell King7a9a32a2009-11-19 15:31:07 +0000190
191 page = alloc_pages(gfp, order);
192 if (!page)
193 return NULL;
194
195 /*
196 * Now split the huge page and free the excess pages
197 */
198 split_page(page, order);
199 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
200 __free_page(p);
201
Marek Szyprowskic7909502011-12-29 13:09:51 +0100202 __dma_clear_buffer(page, size);
Russell King7a9a32a2009-11-19 15:31:07 +0000203
204 return page;
205}
206
207/*
208 * Free a DMA buffer. 'size' must be page aligned.
209 */
210static void __dma_free_buffer(struct page *page, size_t size)
211{
212 struct page *e = page + (size >> PAGE_SHIFT);
213
214 while (page < e) {
215 __free_page(page);
216 page++;
217 }
218}
219
Catalin Marinasab6494f2009-07-24 12:35:02 +0100220#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221#ifdef CONFIG_HUGETLB_PAGE
222#error ARM Coherent DMA allocator does not (yet) support huge TLB
223#endif
224
Marek Szyprowskic7909502011-12-29 13:09:51 +0100225static void *__alloc_from_contiguous(struct device *dev, size_t size,
226 pgprot_t prot, struct page **ret_page);
227
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200228static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
229 pgprot_t prot, struct page **ret_page,
230 const void *caller);
231
232static void *
233__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
234 const void *caller)
235{
236 struct vm_struct *area;
237 unsigned long addr;
238
239 /*
240 * DMA allocation can be mapped to user space, so lets
241 * set VM_USERMAP flags too.
242 */
243 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
244 caller);
245 if (!area)
246 return NULL;
247 addr = (unsigned long)area->addr;
248 area->phys_addr = __pfn_to_phys(page_to_pfn(page));
249
250 if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
251 vunmap((void *)addr);
252 return NULL;
253 }
254 return (void *)addr;
255}
256
257static void __dma_free_remap(void *cpu_addr, size_t size)
258{
259 unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
260 struct vm_struct *area = find_vm_area(cpu_addr);
261 if (!area || (area->flags & flags) != flags) {
262 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
263 return;
264 }
265 unmap_kernel_range((unsigned long)cpu_addr, size);
266 vunmap(cpu_addr);
267}
268
269struct dma_pool {
270 size_t size;
271 spinlock_t lock;
272 unsigned long *bitmap;
273 unsigned long nr_pages;
274 void *vaddr;
275 struct page *page;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100276};
277
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200278static struct dma_pool atomic_pool = {
279 .size = SZ_256K,
280};
Marek Szyprowskic7909502011-12-29 13:09:51 +0100281
282static int __init early_coherent_pool(char *p)
283{
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200284 atomic_pool.size = memparse(p, &p);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100285 return 0;
286}
287early_param("coherent_pool", early_coherent_pool);
288
289/*
290 * Initialise the coherent pool for atomic allocations.
291 */
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200292static int __init atomic_pool_init(void)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100293{
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200294 struct dma_pool *pool = &atomic_pool;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100295 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200296 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
297 unsigned long *bitmap;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100298 struct page *page;
299 void *ptr;
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200300 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100301
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200302 bitmap = kzalloc(bitmap_size, GFP_KERNEL);
303 if (!bitmap)
304 goto no_bitmap;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100305
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200306 if (IS_ENABLED(CONFIG_CMA))
307 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
308 else
309 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
310 &page, NULL);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100311 if (ptr) {
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200312 spin_lock_init(&pool->lock);
313 pool->vaddr = ptr;
314 pool->page = page;
315 pool->bitmap = bitmap;
316 pool->nr_pages = nr_pages;
317 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
318 (unsigned)pool->size / 1024);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100319 return 0;
320 }
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200321 kfree(bitmap);
322no_bitmap:
323 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
324 (unsigned)pool->size / 1024);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100325 return -ENOMEM;
326}
327/*
328 * CMA is activated by core_initcall, so we must be called after it.
329 */
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200330postcore_initcall(atomic_pool_init);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100331
332struct dma_contig_early_reserve {
333 phys_addr_t base;
334 unsigned long size;
335};
336
337static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
338
339static int dma_mmu_remap_num __initdata;
340
341void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
342{
343 dma_mmu_remap[dma_mmu_remap_num].base = base;
344 dma_mmu_remap[dma_mmu_remap_num].size = size;
345 dma_mmu_remap_num++;
346}
347
348void __init dma_contiguous_remap(void)
349{
350 int i;
351 for (i = 0; i < dma_mmu_remap_num; i++) {
352 phys_addr_t start = dma_mmu_remap[i].base;
353 phys_addr_t end = start + dma_mmu_remap[i].size;
354 struct map_desc map;
355 unsigned long addr;
356
357 if (end > arm_lowmem_limit)
358 end = arm_lowmem_limit;
359 if (start >= end)
360 return;
361
362 map.pfn = __phys_to_pfn(start);
363 map.virtual = __phys_to_virt(start);
364 map.length = end - start;
365 map.type = MT_MEMORY_DMA_READY;
366
367 /*
368 * Clear previous low-memory mapping
369 */
370 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
Vitaly Andrianov61f6c7a2012-05-14 13:49:56 -0400371 addr += PMD_SIZE)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100372 pmd_clear(pmd_off_k(addr));
373
374 iotable_init(&map, 1);
375 }
376}
377
Marek Szyprowskic7909502011-12-29 13:09:51 +0100378static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
379 void *data)
380{
381 struct page *page = virt_to_page(addr);
382 pgprot_t prot = *(pgprot_t *)data;
383
384 set_pte_ext(pte, mk_pte(page, prot), 0);
385 return 0;
386}
387
388static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
389{
390 unsigned long start = (unsigned long) page_address(page);
391 unsigned end = start + size;
392
393 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
394 dsb();
395 flush_tlb_kernel_range(start, end);
396}
397
398static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
399 pgprot_t prot, struct page **ret_page,
400 const void *caller)
401{
402 struct page *page;
403 void *ptr;
404 page = __dma_alloc_buffer(dev, size, gfp);
405 if (!page)
406 return NULL;
407
408 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
409 if (!ptr) {
410 __dma_free_buffer(page, size);
411 return NULL;
412 }
413
414 *ret_page = page;
415 return ptr;
416}
417
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200418static void *__alloc_from_pool(size_t size, struct page **ret_page)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100419{
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200420 struct dma_pool *pool = &atomic_pool;
421 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
422 unsigned int pageno;
423 unsigned long flags;
424 void *ptr = NULL;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100425 size_t align;
426
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200427 if (!pool->vaddr) {
428 WARN(1, "coherent pool not initialised!\n");
Marek Szyprowskic7909502011-12-29 13:09:51 +0100429 return NULL;
430 }
431
432 /*
433 * Align the region allocation - allocations from pool are rather
434 * small, so align them to their order in pages, minimum is a page
435 * size. This helps reduce fragmentation of the DMA space.
436 */
437 align = PAGE_SIZE << get_order(size);
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200438
439 spin_lock_irqsave(&pool->lock, flags);
440 pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
441 0, count, (1 << align) - 1);
442 if (pageno < pool->nr_pages) {
443 bitmap_set(pool->bitmap, pageno, count);
444 ptr = pool->vaddr + PAGE_SIZE * pageno;
445 *ret_page = pool->page + pageno;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100446 }
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200447 spin_unlock_irqrestore(&pool->lock, flags);
448
449 return ptr;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100450}
451
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200452static int __free_from_pool(void *start, size_t size)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100453{
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200454 struct dma_pool *pool = &atomic_pool;
455 unsigned long pageno, count;
456 unsigned long flags;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100457
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200458 if (start < pool->vaddr || start > pool->vaddr + pool->size)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100459 return 0;
460
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200461 if (start + size > pool->vaddr + pool->size) {
462 WARN(1, "freeing wrong coherent size from pool\n");
463 return 0;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100464 }
465
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200466 pageno = (start - pool->vaddr) >> PAGE_SHIFT;
467 count = size >> PAGE_SHIFT;
468
469 spin_lock_irqsave(&pool->lock, flags);
470 bitmap_clear(pool->bitmap, pageno, count);
471 spin_unlock_irqrestore(&pool->lock, flags);
472
Marek Szyprowskic7909502011-12-29 13:09:51 +0100473 return 1;
474}
475
476static void *__alloc_from_contiguous(struct device *dev, size_t size,
477 pgprot_t prot, struct page **ret_page)
478{
479 unsigned long order = get_order(size);
480 size_t count = size >> PAGE_SHIFT;
481 struct page *page;
482
483 page = dma_alloc_from_contiguous(dev, count, order);
484 if (!page)
485 return NULL;
486
487 __dma_clear_buffer(page, size);
488 __dma_remap(page, size, prot);
489
490 *ret_page = page;
491 return page_address(page);
492}
493
494static void __free_from_contiguous(struct device *dev, struct page *page,
495 size_t size)
496{
497 __dma_remap(page, size, pgprot_kernel);
498 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
499}
500
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200501static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
502{
503 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
504 pgprot_writecombine(prot) :
505 pgprot_dmacoherent(prot);
506 return prot;
507}
508
Marek Szyprowskic7909502011-12-29 13:09:51 +0100509#define nommu() 0
510
Catalin Marinasab6494f2009-07-24 12:35:02 +0100511#else /* !CONFIG_MMU */
Russell King695ae0a2009-11-19 16:31:39 +0000512
Marek Szyprowskic7909502011-12-29 13:09:51 +0100513#define nommu() 1
514
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200515#define __get_dma_pgprot(attrs, prot) __pgprot(0)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100516#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200517#define __alloc_from_pool(size, ret_page) NULL
Marek Szyprowskic7909502011-12-29 13:09:51 +0100518#define __alloc_from_contiguous(dev, size, prot, ret) NULL
519#define __free_from_pool(cpu_addr, size) 0
520#define __free_from_contiguous(dev, page, size) do { } while (0)
521#define __dma_free_remap(cpu_addr, size) do { } while (0)
Russell King31ebf942009-11-19 21:12:17 +0000522
523#endif /* CONFIG_MMU */
524
Marek Szyprowskic7909502011-12-29 13:09:51 +0100525static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
526 struct page **ret_page)
Catalin Marinasab6494f2009-07-24 12:35:02 +0100527{
Russell King04da5692009-11-19 15:54:45 +0000528 struct page *page;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100529 page = __dma_alloc_buffer(dev, size, gfp);
530 if (!page)
531 return NULL;
532
533 *ret_page = page;
534 return page_address(page);
535}
536
537
538
539static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
540 gfp_t gfp, pgprot_t prot, const void *caller)
541{
542 u64 mask = get_coherent_dma_mask(dev);
543 struct page *page;
Russell King31ebf942009-11-19 21:12:17 +0000544 void *addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100545
Marek Szyprowskic7909502011-12-29 13:09:51 +0100546#ifdef CONFIG_DMA_API_DEBUG
547 u64 limit = (mask + 1) & ~mask;
548 if (limit && size >= limit) {
549 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
550 size, mask);
551 return NULL;
552 }
553#endif
554
555 if (!mask)
556 return NULL;
557
558 if (mask < 0xffffffffULL)
559 gfp |= GFP_DMA;
560
Sumit Bhattacharyaea2e7052011-11-24 00:47:12 +0100561 /*
562 * Following is a work-around (a.k.a. hack) to prevent pages
563 * with __GFP_COMP being passed to split_page() which cannot
564 * handle them. The real problem is that this flag probably
565 * should be 0 on ARM as it is not supported on this
566 * platform; see CONFIG_HUGETLBFS.
567 */
568 gfp &= ~(__GFP_COMP);
569
Marek Szyprowski553ac782012-02-29 14:45:28 +0100570 *handle = DMA_ERROR_CODE;
Russell King04da5692009-11-19 15:54:45 +0000571 size = PAGE_ALIGN(size);
572
Marek Szyprowskic7909502011-12-29 13:09:51 +0100573 if (arch_is_coherent() || nommu())
574 addr = __alloc_simple_buffer(dev, size, gfp, &page);
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200575 else if (gfp & GFP_ATOMIC)
576 addr = __alloc_from_pool(size, &page);
Marek Szyprowskif1ae98d2012-05-30 10:48:29 +0200577 else if (!IS_ENABLED(CONFIG_CMA))
Marek Szyprowskic7909502011-12-29 13:09:51 +0100578 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
Russell King31ebf942009-11-19 21:12:17 +0000579 else
Marek Szyprowskic7909502011-12-29 13:09:51 +0100580 addr = __alloc_from_contiguous(dev, size, prot, &page);
Russell King31ebf942009-11-19 21:12:17 +0000581
582 if (addr)
Russell King9eedd962011-01-03 00:00:17 +0000583 *handle = pfn_to_dma(dev, page_to_pfn(page));
Russell King31ebf942009-11-19 21:12:17 +0000584
585 return addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100586}
Russell King695ae0a2009-11-19 16:31:39 +0000587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588/*
589 * Allocate DMA-coherent memory space and return both the kernel remapped
590 * virtual and bus address for that space.
591 */
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200592void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
593 gfp_t gfp, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594{
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200595 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400596 void *memory;
597
598 if (dma_alloc_from_coherent(dev, size, handle, &memory))
599 return memory;
600
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200601 return __dma_alloc(dev, size, handle, gfp, prot,
Russell King45cd5292012-01-12 23:08:07 +0000602 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605/*
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200606 * Create userspace mapping for the DMA-coherent memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 */
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200608int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
609 void *cpu_addr, dma_addr_t dma_addr, size_t size,
610 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100612 int ret = -ENXIO;
613#ifdef CONFIG_MMU
Marek Szyprowskic7909502011-12-29 13:09:51 +0100614 unsigned long pfn = dma_to_pfn(dev, dma_addr);
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200615 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
616
Marek Szyprowski47142f02012-05-15 19:04:13 +0200617 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
618 return ret;
619
Marek Szyprowskic7909502011-12-29 13:09:51 +0100620 ret = remap_pfn_range(vma, vma->vm_start,
621 pfn + vma->vm_pgoff,
622 vma->vm_end - vma->vm_start,
623 vma->vm_page_prot);
Catalin Marinasab6494f2009-07-24 12:35:02 +0100624#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626 return ret;
627}
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629/*
Marek Szyprowskic7909502011-12-29 13:09:51 +0100630 * Free a buffer as defined by the above mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 */
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200632void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
633 dma_addr_t handle, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
Marek Szyprowskic7909502011-12-29 13:09:51 +0100635 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
Russell King5edf71a2005-11-25 15:52:51 +0000636
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400637 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
638 return;
639
Russell King3e82d012009-11-19 15:38:12 +0000640 size = PAGE_ALIGN(size);
641
Marek Szyprowskic7909502011-12-29 13:09:51 +0100642 if (arch_is_coherent() || nommu()) {
643 __dma_free_buffer(page, size);
Marek Szyprowskif1ae98d2012-05-30 10:48:29 +0200644 } else if (!IS_ENABLED(CONFIG_CMA)) {
Russell King695ae0a2009-11-19 16:31:39 +0000645 __dma_free_remap(cpu_addr, size);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100646 __dma_free_buffer(page, size);
647 } else {
648 if (__free_from_pool(cpu_addr, size))
649 return;
650 /*
651 * Non-atomic allocations cannot be freed with IRQs disabled
652 */
653 WARN_ON(irqs_disabled());
654 __free_from_contiguous(dev, page, size);
655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
Russell Kingafd1a322008-09-25 16:30:57 +0100657
Russell King65af1912009-11-24 17:53:33 +0000658static void dma_cache_maint_page(struct page *page, unsigned long offset,
Russell Kinga9c91472009-11-26 16:19:58 +0000659 size_t size, enum dma_data_direction dir,
660 void (*op)(const void *, size_t, int))
Russell King65af1912009-11-24 17:53:33 +0000661{
662 /*
663 * A single sg entry may refer to multiple physically contiguous
664 * pages. But we still need to process highmem pages individually.
665 * If highmem is not configured then the bulk of this loop gets
666 * optimized out.
667 */
668 size_t left = size;
669 do {
670 size_t len = left;
Russell King93f1d622009-11-24 14:41:01 +0000671 void *vaddr;
672
673 if (PageHighMem(page)) {
674 if (len + offset > PAGE_SIZE) {
675 if (offset >= PAGE_SIZE) {
676 page += offset / PAGE_SIZE;
677 offset %= PAGE_SIZE;
678 }
679 len = PAGE_SIZE - offset;
Russell King65af1912009-11-24 17:53:33 +0000680 }
Russell King93f1d622009-11-24 14:41:01 +0000681 vaddr = kmap_high_get(page);
682 if (vaddr) {
683 vaddr += offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000684 op(vaddr, len, dir);
Russell King93f1d622009-11-24 14:41:01 +0000685 kunmap_high(page);
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100686 } else if (cache_is_vipt()) {
Nicolas Pitre39af22a2010-12-15 15:14:45 -0500687 /* unmapped pages might still be cached */
688 vaddr = kmap_atomic(page);
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100689 op(vaddr + offset, len, dir);
Nicolas Pitre39af22a2010-12-15 15:14:45 -0500690 kunmap_atomic(vaddr);
Russell King93f1d622009-11-24 14:41:01 +0000691 }
692 } else {
693 vaddr = page_address(page) + offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000694 op(vaddr, len, dir);
Russell King65af1912009-11-24 17:53:33 +0000695 }
Russell King65af1912009-11-24 17:53:33 +0000696 offset = 0;
697 page++;
698 left -= len;
699 } while (left);
700}
701
Marek Szyprowski51fde3492012-02-10 19:55:20 +0100702/*
703 * Make an area consistent for devices.
704 * Note: Drivers should NOT use this function directly, as it will break
705 * platforms with CONFIG_DMABOUNCE.
706 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
707 */
708static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
Russell King65af1912009-11-24 17:53:33 +0000709 size_t size, enum dma_data_direction dir)
710{
Nicolas Pitre43377452009-03-12 22:52:09 -0400711 unsigned long paddr;
Nicolas Pitre43377452009-03-12 22:52:09 -0400712
Russell Kinga9c91472009-11-26 16:19:58 +0000713 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
Nicolas Pitre43377452009-03-12 22:52:09 -0400714
Russell King65af1912009-11-24 17:53:33 +0000715 paddr = page_to_phys(page) + off;
Russell King2ffe2da2009-10-31 16:52:16 +0000716 if (dir == DMA_FROM_DEVICE) {
717 outer_inv_range(paddr, paddr + size);
718 } else {
719 outer_clean_range(paddr, paddr + size);
720 }
721 /* FIXME: non-speculating: flush on bidirectional mappings? */
Nicolas Pitre43377452009-03-12 22:52:09 -0400722}
Russell King4ea0d732009-11-24 16:27:17 +0000723
Marek Szyprowski51fde3492012-02-10 19:55:20 +0100724static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
Russell King4ea0d732009-11-24 16:27:17 +0000725 size_t size, enum dma_data_direction dir)
726{
Russell King2ffe2da2009-10-31 16:52:16 +0000727 unsigned long paddr = page_to_phys(page) + off;
728
729 /* FIXME: non-speculating: not required */
730 /* don't bother invalidating if DMA to device */
731 if (dir != DMA_TO_DEVICE)
732 outer_inv_range(paddr, paddr + size);
733
Russell Kinga9c91472009-11-26 16:19:58 +0000734 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
Catalin Marinasc0177802010-09-13 15:57:36 +0100735
736 /*
737 * Mark the D-cache clean for this page to avoid extra flushing.
738 */
739 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
740 set_bit(PG_dcache_clean, &page->flags);
Russell King4ea0d732009-11-24 16:27:17 +0000741}
Nicolas Pitre43377452009-03-12 22:52:09 -0400742
Russell Kingafd1a322008-09-25 16:30:57 +0100743/**
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100744 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
Russell Kingafd1a322008-09-25 16:30:57 +0100745 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
746 * @sg: list of buffers
747 * @nents: number of buffers to map
748 * @dir: DMA transfer direction
749 *
750 * Map a set of buffers described by scatterlist in streaming mode for DMA.
751 * This is the scatter-gather version of the dma_map_single interface.
752 * Here the scatter gather list elements are each tagged with the
753 * appropriate dma address and length. They are obtained via
754 * sg_dma_{address,length}.
755 *
756 * Device ownership issues as mentioned for dma_map_single are the same
757 * here.
758 */
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100759int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
760 enum dma_data_direction dir, struct dma_attrs *attrs)
Russell Kingafd1a322008-09-25 16:30:57 +0100761{
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100762 struct dma_map_ops *ops = get_dma_ops(dev);
Russell Kingafd1a322008-09-25 16:30:57 +0100763 struct scatterlist *s;
Russell King01135d922008-09-25 21:05:02 +0100764 int i, j;
Russell Kingafd1a322008-09-25 16:30:57 +0100765
766 for_each_sg(sg, s, nents, i) {
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200767#ifdef CONFIG_NEED_SG_DMA_LENGTH
768 s->dma_length = s->length;
769#endif
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100770 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
771 s->length, dir, attrs);
Russell King01135d922008-09-25 21:05:02 +0100772 if (dma_mapping_error(dev, s->dma_address))
773 goto bad_mapping;
Russell Kingafd1a322008-09-25 16:30:57 +0100774 }
Russell Kingafd1a322008-09-25 16:30:57 +0100775 return nents;
Russell King01135d922008-09-25 21:05:02 +0100776
777 bad_mapping:
778 for_each_sg(sg, s, i, j)
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100779 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
Russell King01135d922008-09-25 21:05:02 +0100780 return 0;
Russell Kingafd1a322008-09-25 16:30:57 +0100781}
Russell Kingafd1a322008-09-25 16:30:57 +0100782
783/**
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100784 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
Russell Kingafd1a322008-09-25 16:30:57 +0100785 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
786 * @sg: list of buffers
Linus Walleij0adfca62011-01-12 18:50:37 +0100787 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
Russell Kingafd1a322008-09-25 16:30:57 +0100788 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
789 *
790 * Unmap a set of streaming mode DMA translations. Again, CPU access
791 * rules concerning calls here are the same as for dma_unmap_single().
792 */
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100793void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
794 enum dma_data_direction dir, struct dma_attrs *attrs)
Russell Kingafd1a322008-09-25 16:30:57 +0100795{
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100796 struct dma_map_ops *ops = get_dma_ops(dev);
Russell King01135d922008-09-25 21:05:02 +0100797 struct scatterlist *s;
Russell King01135d922008-09-25 21:05:02 +0100798
Russell King01135d922008-09-25 21:05:02 +0100799 int i;
Russell King24056f52011-01-03 11:29:28 +0000800
Russell King01135d922008-09-25 21:05:02 +0100801 for_each_sg(sg, s, nents, i)
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100802 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
Russell Kingafd1a322008-09-25 16:30:57 +0100803}
Russell Kingafd1a322008-09-25 16:30:57 +0100804
805/**
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100806 * arm_dma_sync_sg_for_cpu
Russell Kingafd1a322008-09-25 16:30:57 +0100807 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
808 * @sg: list of buffers
809 * @nents: number of buffers to map (returned from dma_map_sg)
810 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
811 */
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100812void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
Russell Kingafd1a322008-09-25 16:30:57 +0100813 int nents, enum dma_data_direction dir)
814{
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100815 struct dma_map_ops *ops = get_dma_ops(dev);
Russell Kingafd1a322008-09-25 16:30:57 +0100816 struct scatterlist *s;
817 int i;
818
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100819 for_each_sg(sg, s, nents, i)
820 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
821 dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100822}
Russell Kingafd1a322008-09-25 16:30:57 +0100823
824/**
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100825 * arm_dma_sync_sg_for_device
Russell Kingafd1a322008-09-25 16:30:57 +0100826 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
827 * @sg: list of buffers
828 * @nents: number of buffers to map (returned from dma_map_sg)
829 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
830 */
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100831void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
Russell Kingafd1a322008-09-25 16:30:57 +0100832 int nents, enum dma_data_direction dir)
833{
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100834 struct dma_map_ops *ops = get_dma_ops(dev);
Russell Kingafd1a322008-09-25 16:30:57 +0100835 struct scatterlist *s;
836 int i;
837
Marek Szyprowski2a550e72012-02-10 19:55:20 +0100838 for_each_sg(sg, s, nents, i)
839 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
840 dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100841}
Russell King24056f52011-01-03 11:29:28 +0000842
Russell King022ae532011-07-08 21:26:59 +0100843/*
844 * Return whether the given device DMA address mask can be supported
845 * properly. For example, if your device can only drive the low 24-bits
846 * during bus mastering, then you would pass 0x00ffffff as the mask
847 * to this function.
848 */
849int dma_supported(struct device *dev, u64 mask)
850{
851 if (mask < (u64)arm_dma_limit)
852 return 0;
853 return 1;
854}
855EXPORT_SYMBOL(dma_supported);
856
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100857static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
Russell King022ae532011-07-08 21:26:59 +0100858{
859 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
860 return -EIO;
861
Russell King022ae532011-07-08 21:26:59 +0100862 *dev->dma_mask = dma_mask;
Russell King022ae532011-07-08 21:26:59 +0100863
864 return 0;
865}
Russell King022ae532011-07-08 21:26:59 +0100866
Russell King24056f52011-01-03 11:29:28 +0000867#define PREALLOC_DMA_DEBUG_ENTRIES 4096
868
869static int __init dma_debug_do_init(void)
870{
871 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
872 return 0;
873}
874fs_initcall(dma_debug_do_init);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200875
876#ifdef CONFIG_ARM_DMA_USE_IOMMU
877
878/* IOMMU */
879
880static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
881 size_t size)
882{
883 unsigned int order = get_order(size);
884 unsigned int align = 0;
885 unsigned int count, start;
886 unsigned long flags;
887
888 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
889 (1 << mapping->order) - 1) >> mapping->order;
890
891 if (order > mapping->order)
892 align = (1 << (order - mapping->order)) - 1;
893
894 spin_lock_irqsave(&mapping->lock, flags);
895 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
896 count, align);
897 if (start > mapping->bits) {
898 spin_unlock_irqrestore(&mapping->lock, flags);
899 return DMA_ERROR_CODE;
900 }
901
902 bitmap_set(mapping->bitmap, start, count);
903 spin_unlock_irqrestore(&mapping->lock, flags);
904
905 return mapping->base + (start << (mapping->order + PAGE_SHIFT));
906}
907
908static inline void __free_iova(struct dma_iommu_mapping *mapping,
909 dma_addr_t addr, size_t size)
910{
911 unsigned int start = (addr - mapping->base) >>
912 (mapping->order + PAGE_SHIFT);
913 unsigned int count = ((size >> PAGE_SHIFT) +
914 (1 << mapping->order) - 1) >> mapping->order;
915 unsigned long flags;
916
917 spin_lock_irqsave(&mapping->lock, flags);
918 bitmap_clear(mapping->bitmap, start, count);
919 spin_unlock_irqrestore(&mapping->lock, flags);
920}
921
922static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
923{
924 struct page **pages;
925 int count = size >> PAGE_SHIFT;
926 int array_size = count * sizeof(struct page *);
927 int i = 0;
928
929 if (array_size <= PAGE_SIZE)
930 pages = kzalloc(array_size, gfp);
931 else
932 pages = vzalloc(array_size);
933 if (!pages)
934 return NULL;
935
936 while (count) {
Marek Szyprowski593f4732012-06-21 11:48:11 +0200937 int j, order = __fls(count);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200938
939 pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
940 while (!pages[i] && order)
941 pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
942 if (!pages[i])
943 goto error;
944
945 if (order)
946 split_page(pages[i], order);
947 j = 1 << order;
948 while (--j)
949 pages[i + j] = pages[i] + j;
950
951 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
952 i += 1 << order;
953 count -= 1 << order;
954 }
955
956 return pages;
957error:
958 while (--i)
959 if (pages[i])
960 __free_pages(pages[i], 0);
Prathyush K46c87852012-07-16 08:59:55 +0200961 if (array_size <= PAGE_SIZE)
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200962 kfree(pages);
963 else
964 vfree(pages);
965 return NULL;
966}
967
968static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
969{
970 int count = size >> PAGE_SHIFT;
971 int array_size = count * sizeof(struct page *);
972 int i;
973 for (i = 0; i < count; i++)
974 if (pages[i])
975 __free_pages(pages[i], 0);
Prathyush K46c87852012-07-16 08:59:55 +0200976 if (array_size <= PAGE_SIZE)
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200977 kfree(pages);
978 else
979 vfree(pages);
980 return 0;
981}
982
983/*
984 * Create a CPU mapping for a specified pages
985 */
986static void *
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200987__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
988 const void *caller)
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200989{
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200990 unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
991 struct vm_struct *area;
992 unsigned long p;
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200993
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200994 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
995 caller);
996 if (!area)
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +0200997 return NULL;
Marek Szyprowskie9da6e92012-07-30 09:11:33 +0200998
999 area->pages = pages;
1000 area->nr_pages = nr_pages;
1001 p = (unsigned long)area->addr;
1002
1003 for (i = 0; i < nr_pages; i++) {
1004 phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
1005 if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
1006 goto err;
1007 p += PAGE_SIZE;
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001008 }
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001009 return area->addr;
1010err:
1011 unmap_kernel_range((unsigned long)area->addr, size);
1012 vunmap(area->addr);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001013 return NULL;
1014}
1015
1016/*
1017 * Create a mapping in device IO address space for specified pages
1018 */
1019static dma_addr_t
1020__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1021{
1022 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1023 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1024 dma_addr_t dma_addr, iova;
1025 int i, ret = DMA_ERROR_CODE;
1026
1027 dma_addr = __alloc_iova(mapping, size);
1028 if (dma_addr == DMA_ERROR_CODE)
1029 return dma_addr;
1030
1031 iova = dma_addr;
1032 for (i = 0; i < count; ) {
1033 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1034 phys_addr_t phys = page_to_phys(pages[i]);
1035 unsigned int len, j;
1036
1037 for (j = i + 1; j < count; j++, next_pfn++)
1038 if (page_to_pfn(pages[j]) != next_pfn)
1039 break;
1040
1041 len = (j - i) << PAGE_SHIFT;
1042 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1043 if (ret < 0)
1044 goto fail;
1045 iova += len;
1046 i = j;
1047 }
1048 return dma_addr;
1049fail:
1050 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1051 __free_iova(mapping, dma_addr, size);
1052 return DMA_ERROR_CODE;
1053}
1054
1055static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1056{
1057 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1058
1059 /*
1060 * add optional in-page offset from iova to size and align
1061 * result to page size
1062 */
1063 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1064 iova &= PAGE_MASK;
1065
1066 iommu_unmap(mapping->domain, iova, size);
1067 __free_iova(mapping, iova, size);
1068 return 0;
1069}
1070
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001071static struct page **__iommu_get_pages(void *cpu_addr)
1072{
1073 struct vm_struct *area;
1074
1075 area = find_vm_area(cpu_addr);
1076 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1077 return area->pages;
1078 return NULL;
1079}
1080
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001081static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1082 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1083{
1084 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
1085 struct page **pages;
1086 void *addr = NULL;
1087
1088 *handle = DMA_ERROR_CODE;
1089 size = PAGE_ALIGN(size);
1090
1091 pages = __iommu_alloc_buffer(dev, size, gfp);
1092 if (!pages)
1093 return NULL;
1094
1095 *handle = __iommu_create_mapping(dev, pages, size);
1096 if (*handle == DMA_ERROR_CODE)
1097 goto err_buffer;
1098
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001099 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1100 __builtin_return_address(0));
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001101 if (!addr)
1102 goto err_mapping;
1103
1104 return addr;
1105
1106err_mapping:
1107 __iommu_remove_mapping(dev, *handle, size);
1108err_buffer:
1109 __iommu_free_buffer(dev, pages, size);
1110 return NULL;
1111}
1112
1113static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1114 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1115 struct dma_attrs *attrs)
1116{
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001117 unsigned long uaddr = vma->vm_start;
1118 unsigned long usize = vma->vm_end - vma->vm_start;
1119 struct page **pages = __iommu_get_pages(cpu_addr);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001120
1121 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001122
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001123 if (!pages)
1124 return -ENXIO;
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001125
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001126 do {
1127 int ret = vm_insert_page(vma, uaddr, *pages++);
1128 if (ret) {
1129 pr_err("Remapping memory failed: %d\n", ret);
1130 return ret;
1131 }
1132 uaddr += PAGE_SIZE;
1133 usize -= PAGE_SIZE;
1134 } while (usize > 0);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001135
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001136 return 0;
1137}
1138
1139/*
1140 * free a page as defined by the above mapping.
1141 * Must not be called with IRQs disabled.
1142 */
1143void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1144 dma_addr_t handle, struct dma_attrs *attrs)
1145{
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001146 struct page **pages = __iommu_get_pages(cpu_addr);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001147 size = PAGE_ALIGN(size);
1148
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001149 if (!pages) {
1150 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1151 return;
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001152 }
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02001153
1154 unmap_kernel_range((unsigned long)cpu_addr, size);
1155 vunmap(cpu_addr);
1156
1157 __iommu_remove_mapping(dev, handle, size);
1158 __iommu_free_buffer(dev, pages, size);
Marek Szyprowski4ce63fc2012-05-16 15:48:21 +02001159}
1160
1161/*
1162 * Map a part of the scatter-gather list into contiguous io address space
1163 */
1164static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1165 size_t size, dma_addr_t *handle,
1166 enum dma_data_direction dir)
1167{
1168 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1169 dma_addr_t iova, iova_base;
1170 int ret = 0;
1171 unsigned int count;
1172 struct scatterlist *s;
1173
1174 size = PAGE_ALIGN(size);
1175 *handle = DMA_ERROR_CODE;
1176
1177 iova_base = iova = __alloc_iova(mapping, size);
1178 if (iova == DMA_ERROR_CODE)
1179 return -ENOMEM;
1180
1181 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1182 phys_addr_t phys = page_to_phys(sg_page(s));
1183 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1184
1185 if (!arch_is_coherent())
1186 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1187
1188 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1189 if (ret < 0)
1190 goto fail;
1191 count += len >> PAGE_SHIFT;
1192 iova += len;
1193 }
1194 *handle = iova_base;
1195
1196 return 0;
1197fail:
1198 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1199 __free_iova(mapping, iova_base, size);
1200 return ret;
1201}
1202
1203/**
1204 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1205 * @dev: valid struct device pointer
1206 * @sg: list of buffers
1207 * @nents: number of buffers to map
1208 * @dir: DMA transfer direction
1209 *
1210 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1211 * The scatter gather list elements are merged together (if possible) and
1212 * tagged with the appropriate dma address and length. They are obtained via
1213 * sg_dma_{address,length}.
1214 */
1215int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1216 enum dma_data_direction dir, struct dma_attrs *attrs)
1217{
1218 struct scatterlist *s = sg, *dma = sg, *start = sg;
1219 int i, count = 0;
1220 unsigned int offset = s->offset;
1221 unsigned int size = s->offset + s->length;
1222 unsigned int max = dma_get_max_seg_size(dev);
1223
1224 for (i = 1; i < nents; i++) {
1225 s = sg_next(s);
1226
1227 s->dma_address = DMA_ERROR_CODE;
1228 s->dma_length = 0;
1229
1230 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1231 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1232 dir) < 0)
1233 goto bad_mapping;
1234
1235 dma->dma_address += offset;
1236 dma->dma_length = size - offset;
1237
1238 size = offset = s->offset;
1239 start = s;
1240 dma = sg_next(dma);
1241 count += 1;
1242 }
1243 size += s->length;
1244 }
1245 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
1246 goto bad_mapping;
1247
1248 dma->dma_address += offset;
1249 dma->dma_length = size - offset;
1250
1251 return count+1;
1252
1253bad_mapping:
1254 for_each_sg(sg, s, count, i)
1255 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1256 return 0;
1257}
1258
1259/**
1260 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1261 * @dev: valid struct device pointer
1262 * @sg: list of buffers
1263 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1264 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1265 *
1266 * Unmap a set of streaming mode DMA translations. Again, CPU access
1267 * rules concerning calls here are the same as for dma_unmap_single().
1268 */
1269void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1270 enum dma_data_direction dir, struct dma_attrs *attrs)
1271{
1272 struct scatterlist *s;
1273 int i;
1274
1275 for_each_sg(sg, s, nents, i) {
1276 if (sg_dma_len(s))
1277 __iommu_remove_mapping(dev, sg_dma_address(s),
1278 sg_dma_len(s));
1279 if (!arch_is_coherent())
1280 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1281 s->length, dir);
1282 }
1283}
1284
1285/**
1286 * arm_iommu_sync_sg_for_cpu
1287 * @dev: valid struct device pointer
1288 * @sg: list of buffers
1289 * @nents: number of buffers to map (returned from dma_map_sg)
1290 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1291 */
1292void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1293 int nents, enum dma_data_direction dir)
1294{
1295 struct scatterlist *s;
1296 int i;
1297
1298 for_each_sg(sg, s, nents, i)
1299 if (!arch_is_coherent())
1300 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1301
1302}
1303
1304/**
1305 * arm_iommu_sync_sg_for_device
1306 * @dev: valid struct device pointer
1307 * @sg: list of buffers
1308 * @nents: number of buffers to map (returned from dma_map_sg)
1309 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1310 */
1311void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1312 int nents, enum dma_data_direction dir)
1313{
1314 struct scatterlist *s;
1315 int i;
1316
1317 for_each_sg(sg, s, nents, i)
1318 if (!arch_is_coherent())
1319 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1320}
1321
1322
1323/**
1324 * arm_iommu_map_page
1325 * @dev: valid struct device pointer
1326 * @page: page that buffer resides in
1327 * @offset: offset into page for start of buffer
1328 * @size: size of buffer to map
1329 * @dir: DMA transfer direction
1330 *
1331 * IOMMU aware version of arm_dma_map_page()
1332 */
1333static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1334 unsigned long offset, size_t size, enum dma_data_direction dir,
1335 struct dma_attrs *attrs)
1336{
1337 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1338 dma_addr_t dma_addr;
1339 int ret, len = PAGE_ALIGN(size + offset);
1340
1341 if (!arch_is_coherent())
1342 __dma_page_cpu_to_dev(page, offset, size, dir);
1343
1344 dma_addr = __alloc_iova(mapping, len);
1345 if (dma_addr == DMA_ERROR_CODE)
1346 return dma_addr;
1347
1348 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
1349 if (ret < 0)
1350 goto fail;
1351
1352 return dma_addr + offset;
1353fail:
1354 __free_iova(mapping, dma_addr, len);
1355 return DMA_ERROR_CODE;
1356}
1357
1358/**
1359 * arm_iommu_unmap_page
1360 * @dev: valid struct device pointer
1361 * @handle: DMA address of buffer
1362 * @size: size of buffer (same as passed to dma_map_page)
1363 * @dir: DMA transfer direction (same as passed to dma_map_page)
1364 *
1365 * IOMMU aware version of arm_dma_unmap_page()
1366 */
1367static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1368 size_t size, enum dma_data_direction dir,
1369 struct dma_attrs *attrs)
1370{
1371 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1372 dma_addr_t iova = handle & PAGE_MASK;
1373 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1374 int offset = handle & ~PAGE_MASK;
1375 int len = PAGE_ALIGN(size + offset);
1376
1377 if (!iova)
1378 return;
1379
1380 if (!arch_is_coherent())
1381 __dma_page_dev_to_cpu(page, offset, size, dir);
1382
1383 iommu_unmap(mapping->domain, iova, len);
1384 __free_iova(mapping, iova, len);
1385}
1386
1387static void arm_iommu_sync_single_for_cpu(struct device *dev,
1388 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1389{
1390 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1391 dma_addr_t iova = handle & PAGE_MASK;
1392 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1393 unsigned int offset = handle & ~PAGE_MASK;
1394
1395 if (!iova)
1396 return;
1397
1398 if (!arch_is_coherent())
1399 __dma_page_dev_to_cpu(page, offset, size, dir);
1400}
1401
1402static void arm_iommu_sync_single_for_device(struct device *dev,
1403 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1404{
1405 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1406 dma_addr_t iova = handle & PAGE_MASK;
1407 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1408 unsigned int offset = handle & ~PAGE_MASK;
1409
1410 if (!iova)
1411 return;
1412
1413 __dma_page_cpu_to_dev(page, offset, size, dir);
1414}
1415
1416struct dma_map_ops iommu_ops = {
1417 .alloc = arm_iommu_alloc_attrs,
1418 .free = arm_iommu_free_attrs,
1419 .mmap = arm_iommu_mmap_attrs,
1420
1421 .map_page = arm_iommu_map_page,
1422 .unmap_page = arm_iommu_unmap_page,
1423 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1424 .sync_single_for_device = arm_iommu_sync_single_for_device,
1425
1426 .map_sg = arm_iommu_map_sg,
1427 .unmap_sg = arm_iommu_unmap_sg,
1428 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1429 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1430};
1431
1432/**
1433 * arm_iommu_create_mapping
1434 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1435 * @base: start address of the valid IO address space
1436 * @size: size of the valid IO address space
1437 * @order: accuracy of the IO addresses allocations
1438 *
1439 * Creates a mapping structure which holds information about used/unused
1440 * IO address ranges, which is required to perform memory allocation and
1441 * mapping with IOMMU aware functions.
1442 *
1443 * The client device need to be attached to the mapping with
1444 * arm_iommu_attach_device function.
1445 */
1446struct dma_iommu_mapping *
1447arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1448 int order)
1449{
1450 unsigned int count = size >> (PAGE_SHIFT + order);
1451 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1452 struct dma_iommu_mapping *mapping;
1453 int err = -ENOMEM;
1454
1455 if (!count)
1456 return ERR_PTR(-EINVAL);
1457
1458 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1459 if (!mapping)
1460 goto err;
1461
1462 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1463 if (!mapping->bitmap)
1464 goto err2;
1465
1466 mapping->base = base;
1467 mapping->bits = BITS_PER_BYTE * bitmap_size;
1468 mapping->order = order;
1469 spin_lock_init(&mapping->lock);
1470
1471 mapping->domain = iommu_domain_alloc(bus);
1472 if (!mapping->domain)
1473 goto err3;
1474
1475 kref_init(&mapping->kref);
1476 return mapping;
1477err3:
1478 kfree(mapping->bitmap);
1479err2:
1480 kfree(mapping);
1481err:
1482 return ERR_PTR(err);
1483}
1484
1485static void release_iommu_mapping(struct kref *kref)
1486{
1487 struct dma_iommu_mapping *mapping =
1488 container_of(kref, struct dma_iommu_mapping, kref);
1489
1490 iommu_domain_free(mapping->domain);
1491 kfree(mapping->bitmap);
1492 kfree(mapping);
1493}
1494
1495void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1496{
1497 if (mapping)
1498 kref_put(&mapping->kref, release_iommu_mapping);
1499}
1500
1501/**
1502 * arm_iommu_attach_device
1503 * @dev: valid struct device pointer
1504 * @mapping: io address space mapping structure (returned from
1505 * arm_iommu_create_mapping)
1506 *
1507 * Attaches specified io address space mapping to the provided device,
1508 * this replaces the dma operations (dma_map_ops pointer) with the
1509 * IOMMU aware version. More than one client might be attached to
1510 * the same io address space mapping.
1511 */
1512int arm_iommu_attach_device(struct device *dev,
1513 struct dma_iommu_mapping *mapping)
1514{
1515 int err;
1516
1517 err = iommu_attach_device(mapping->domain, dev);
1518 if (err)
1519 return err;
1520
1521 kref_get(&mapping->kref);
1522 dev->archdata.mapping = mapping;
1523 set_dma_ops(dev, &iommu_ops);
1524
1525 pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
1526 return 0;
1527}
1528
1529#endif