blob: eac7b410a68813dc7e4d47fef86fd85835d0dd2f [file] [log] [blame]
Prakash Gupta83cc9bb42018-02-09 14:35:51 +05301/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/dma-contiguous.h>
14#include <linux/dma-mapping.h>
15#include <linux/dma-mapping-fast.h>
16#include <linux/io-pgtable-fast.h>
Patrick Dalyde1c64d2017-09-12 16:30:12 -070017#include <linux/pci.h>
Patrick Daly7bcb5462016-08-03 17:27:36 -070018#include <linux/vmalloc.h>
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070019#include <asm/cacheflush.h>
20#include <asm/dma-iommu.h>
Charan Teja Reddy29f61402017-02-09 20:44:29 +053021#include <linux/slab.h>
22#include <linux/vmalloc.h>
Patrick Daly1323e412017-10-05 21:16:25 -070023#include <trace/events/iommu.h>
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070024
Patrick Daly23301482017-10-12 16:18:25 -070025#include <soc/qcom/secure_buffer.h>
26#include <linux/arm-smmu-errata.h>
27
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070028/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
29#define FAST_PAGE_SHIFT 12
30#define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
31#define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
32#define FAST_PTE_ADDR_MASK ((av8l_fast_iopte)0xfffffffff000)
Liam Mark83a9f86e2017-02-08 09:37:17 -080033#define FAST_MAIR_ATTR_IDX_CACHE 1
34#define FAST_PTE_ATTRINDX_SHIFT 2
35#define FAST_PTE_ATTRINDX_MASK 0x7
36#define FAST_PTE_SH_SHIFT 8
37#define FAST_PTE_SH_MASK (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT)
38#define FAST_PTE_SH_OS (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT)
39#define FAST_PTE_SH_IS (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070040
Mitchel Humpherys425d03d2016-06-23 13:25:12 -070041static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
42 bool coherent)
43{
44 if (attrs & DMA_ATTR_STRONGLY_ORDERED)
45 return pgprot_noncached(prot);
46 else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
47 return pgprot_writecombine(prot);
48 return prot;
49}
50
51static int __get_iommu_pgprot(unsigned long attrs, int prot,
52 bool coherent)
53{
54 if (!(attrs & DMA_ATTR_EXEC_MAPPING))
55 prot |= IOMMU_NOEXEC;
56 if ((attrs & DMA_ATTR_STRONGLY_ORDERED))
57 prot |= IOMMU_MMIO;
58 if (coherent)
59 prot |= IOMMU_CACHE;
60
61 return prot;
62}
63
Mitchel Humpherys9de66db2016-06-07 11:09:44 -070064static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
65 void *start, void *end)
66{
67 if (!mapping->is_smmu_pt_coherent)
68 dmac_clean_range(start, end);
69}
70
Liam Mark83a9f86e2017-02-08 09:37:17 -080071static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep)
72{
73 int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK <<
74 FAST_PTE_ATTRINDX_SHIFT)) >>
75 FAST_PTE_ATTRINDX_SHIFT;
76
77 if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) &&
78 (((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) ||
79 (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS))
80 return true;
81
82 return false;
83}
84
85static bool is_dma_coherent(struct device *dev, unsigned long attrs)
86{
87 bool is_coherent;
88
89 if (attrs & DMA_ATTR_FORCE_COHERENT)
90 is_coherent = true;
91 else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
92 is_coherent = false;
93 else if (is_device_dma_coherent(dev))
94 is_coherent = true;
95 else
96 is_coherent = false;
97
98 return is_coherent;
99}
100
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700101/*
102 * Checks if the allocated range (ending at @end) covered the upcoming
103 * stale bit. We don't need to know exactly where the range starts since
104 * we already know where the candidate search range started. If, starting
105 * from the beginning of the candidate search range, we had to step over
106 * (or landed directly on top of) the upcoming stale bit, then we return
107 * true.
108 *
109 * Due to wrapping, there are two scenarios we'll need to check: (1) if the
110 * range [search_start, upcoming_stale] spans 0 (i.e. search_start >
111 * upcoming_stale), and, (2) if the range: [search_start, upcoming_stale]
112 * does *not* span 0 (i.e. search_start <= upcoming_stale). And for each
113 * of those two scenarios we need to handle three cases: (1) the bit was
114 * found before wrapping or
115 */
116static bool __bit_covered_stale(unsigned long upcoming_stale,
117 unsigned long search_start,
118 unsigned long end)
119{
120 if (search_start > upcoming_stale) {
121 if (end >= search_start) {
122 /*
123 * We started searching above upcoming_stale and we
124 * didn't wrap, so we couldn't have crossed
125 * upcoming_stale.
126 */
127 return false;
128 }
129 /*
130 * We wrapped. Did we cross (or land on top of)
131 * upcoming_stale?
132 */
133 return end >= upcoming_stale;
134 }
135
136 if (search_start <= upcoming_stale) {
137 if (end >= search_start) {
138 /*
139 * We didn't wrap. Did we cross (or land on top
140 * of) upcoming_stale?
141 */
142 return end >= upcoming_stale;
143 }
144 /*
145 * We wrapped. So we must have crossed upcoming_stale
146 * (since we started searching below it).
147 */
148 return true;
149 }
150
151 /* we should have covered all logical combinations... */
152 WARN_ON(1);
153 return true;
154}
155
156static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800157 unsigned long attrs,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700158 size_t size)
159{
Patrick Daly23301482017-10-12 16:18:25 -0700160 unsigned long bit, prev_search_start, nbits;
161 unsigned long align;
162 unsigned long guard_len;
163 dma_addr_t iova;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700164
Patrick Daly23301482017-10-12 16:18:25 -0700165 if (mapping->min_iova_align)
166 guard_len = ALIGN(size, mapping->min_iova_align) - size;
167 else
168 guard_len = 0;
169
170 nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
171 align = (1 << get_order(size + guard_len)) - 1;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700172 bit = bitmap_find_next_zero_area(
173 mapping->bitmap, mapping->num_4k_pages, mapping->next_start,
174 nbits, align);
175 if (unlikely(bit > mapping->num_4k_pages)) {
176 /* try wrapping */
177 mapping->next_start = 0; /* TODO: SHOULD I REALLY DO THIS?!? */
178 bit = bitmap_find_next_zero_area(
179 mapping->bitmap, mapping->num_4k_pages, 0, nbits,
180 align);
181 if (unlikely(bit > mapping->num_4k_pages))
182 return DMA_ERROR_CODE;
183 }
184
185 bitmap_set(mapping->bitmap, bit, nbits);
186 prev_search_start = mapping->next_start;
187 mapping->next_start = bit + nbits;
188 if (unlikely(mapping->next_start >= mapping->num_4k_pages))
189 mapping->next_start = 0;
190
191 /*
192 * If we just re-allocated a VA whose TLB hasn't been invalidated
193 * since it was last used and unmapped, we need to invalidate it
194 * here. We actually invalidate the entire TLB so that we don't
195 * have to invalidate the TLB again until we wrap back around.
196 */
197 if (mapping->have_stale_tlbs &&
198 __bit_covered_stale(mapping->upcoming_stale_bit,
199 prev_search_start,
200 bit + nbits - 1)) {
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800201 bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
202
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700203 iommu_tlbiall(mapping->domain);
204 mapping->have_stale_tlbs = false;
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800205 av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700206 }
207
Patrick Daly23301482017-10-12 16:18:25 -0700208 iova = (bit << FAST_PAGE_SHIFT) + mapping->base;
209 if (guard_len &&
210 iommu_map(mapping->domain, iova + size,
211 page_to_phys(mapping->guard_page),
212 guard_len, ARM_SMMU_GUARD_PROT)) {
213
214 bitmap_clear(mapping->bitmap, bit, nbits);
215 return DMA_ERROR_CODE;
216 }
217 return iova;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700218}
219
220/*
221 * Checks whether the candidate bit will be allocated sooner than the
222 * current upcoming stale bit. We can say candidate will be upcoming
223 * sooner than the current upcoming stale bit if it lies between the
224 * starting bit of the next search range and the upcoming stale bit
225 * (allowing for wrap-around).
226 *
227 * Stated differently, we're checking the relative ordering of three
228 * unsigned numbers. So we need to check all 6 (i.e. 3!) permutations,
229 * namely:
230 *
231 * 0 |---A---B---C---| TOP (Case 1)
232 * 0 |---A---C---B---| TOP (Case 2)
233 * 0 |---B---A---C---| TOP (Case 3)
234 * 0 |---B---C---A---| TOP (Case 4)
235 * 0 |---C---A---B---| TOP (Case 5)
236 * 0 |---C---B---A---| TOP (Case 6)
237 *
238 * Note that since we're allowing numbers to wrap, the following three
239 * scenarios are all equivalent for Case 1:
240 *
241 * 0 |---A---B---C---| TOP
242 * 0 |---C---A---B---| TOP (C has wrapped. This is Case 5.)
243 * 0 |---B---C---A---| TOP (C and B have wrapped. This is Case 4.)
244 *
245 * In any of these cases, if we start searching from A, we will find B
246 * before we find C.
247 *
248 * We can also find two equivalent cases for Case 2:
249 *
250 * 0 |---A---C---B---| TOP
251 * 0 |---B---A---C---| TOP (B has wrapped. This is Case 3.)
252 * 0 |---C---B---A---| TOP (B and C have wrapped. This is Case 6.)
253 *
254 * In any of these cases, if we start searching from A, we will find C
255 * before we find B.
256 */
257static bool __bit_is_sooner(unsigned long candidate,
258 struct dma_fast_smmu_mapping *mapping)
259{
260 unsigned long A = mapping->next_start;
261 unsigned long B = candidate;
262 unsigned long C = mapping->upcoming_stale_bit;
263
264 if ((A < B && B < C) || /* Case 1 */
265 (C < A && A < B) || /* Case 5 */
266 (B < C && C < A)) /* Case 4 */
267 return true;
268
269 if ((A < C && C < B) || /* Case 2 */
270 (B < A && A < C) || /* Case 3 */
271 (C < B && B < A)) /* Case 6 */
272 return false;
273
274 /*
275 * For simplicity, we've been ignoring the possibility of any of
276 * our three numbers being equal. Handle those cases here (they
277 * shouldn't happen very often, (I think?)).
278 */
279
280 /*
281 * If candidate is the next bit to be searched then it's definitely
282 * sooner.
283 */
284 if (A == B)
285 return true;
286
287 /*
288 * If candidate is the next upcoming stale bit we'll return false
289 * to avoid doing `upcoming = candidate' in the caller (which would
290 * be useless since they're already equal)
291 */
292 if (B == C)
293 return false;
294
295 /*
296 * If next start is the upcoming stale bit then candidate can't
297 * possibly be sooner. The "soonest" bit is already selected.
298 */
299 if (A == C)
300 return false;
301
302 /* We should have covered all logical combinations. */
303 WARN(1, "Well, that's awkward. A=%ld, B=%ld, C=%ld\n", A, B, C);
304 return true;
305}
306
307static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
308 dma_addr_t iova, size_t size)
309{
310 unsigned long start_bit = (iova - mapping->base) >> FAST_PAGE_SHIFT;
Patrick Daly23301482017-10-12 16:18:25 -0700311 unsigned long nbits;
312 unsigned long guard_len;
313
Patrick Dalyd79c4b92017-11-03 18:48:14 -0700314 if (mapping->min_iova_align) {
Patrick Daly23301482017-10-12 16:18:25 -0700315 guard_len = ALIGN(size, mapping->min_iova_align) - size;
Patrick Dalyd79c4b92017-11-03 18:48:14 -0700316 iommu_unmap(mapping->domain, iova + size, guard_len);
317 } else {
Patrick Daly23301482017-10-12 16:18:25 -0700318 guard_len = 0;
Patrick Dalyd79c4b92017-11-03 18:48:14 -0700319 }
Patrick Daly23301482017-10-12 16:18:25 -0700320 nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
321
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700322
323 /*
324 * We don't invalidate TLBs on unmap. We invalidate TLBs on map
325 * when we're about to re-allocate a VA that was previously
326 * unmapped but hasn't yet been invalidated. So we need to keep
327 * track of which bit is the closest to being re-allocated here.
328 */
329 if (__bit_is_sooner(start_bit, mapping))
330 mapping->upcoming_stale_bit = start_bit;
331
332 bitmap_clear(mapping->bitmap, start_bit, nbits);
333 mapping->have_stale_tlbs = true;
334}
335
336
337static void __fast_dma_page_cpu_to_dev(struct page *page, unsigned long off,
338 size_t size, enum dma_data_direction dir)
339{
340 __dma_map_area(page_address(page) + off, size, dir);
341}
342
343static void __fast_dma_page_dev_to_cpu(struct page *page, unsigned long off,
344 size_t size, enum dma_data_direction dir)
345{
346 __dma_unmap_area(page_address(page) + off, size, dir);
347
348 /* TODO: WHAT IS THIS? */
349 /*
350 * Mark the D-cache clean for this page to avoid extra flushing.
351 */
352 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
353 set_bit(PG_dcache_clean, &page->flags);
354}
355
356static int __fast_dma_direction_to_prot(enum dma_data_direction dir)
357{
358 switch (dir) {
359 case DMA_BIDIRECTIONAL:
360 return IOMMU_READ | IOMMU_WRITE;
361 case DMA_TO_DEVICE:
362 return IOMMU_READ;
363 case DMA_FROM_DEVICE:
364 return IOMMU_WRITE;
365 default:
366 return 0;
367 }
368}
369
370static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
371 unsigned long offset, size_t size,
372 enum dma_data_direction dir,
373 unsigned long attrs)
374{
375 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
376 dma_addr_t iova;
377 unsigned long flags;
378 av8l_fast_iopte *pmd;
379 phys_addr_t phys_plus_off = page_to_phys(page) + offset;
380 phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE);
381 unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK;
382 size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE);
383 int nptes = len >> FAST_PAGE_SHIFT;
384 bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
385 int prot = __fast_dma_direction_to_prot(dir);
Liam Mark83a9f86e2017-02-08 09:37:17 -0800386 bool is_coherent = is_dma_coherent(dev, attrs);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700387
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700388 prot = __get_iommu_pgprot(attrs, prot, is_coherent);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700389
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700390 if (!skip_sync && !is_coherent)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700391 __fast_dma_page_cpu_to_dev(phys_to_page(phys_to_map),
392 offset_from_phys_to_map, size, dir);
393
394 spin_lock_irqsave(&mapping->lock, flags);
395
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800396 iova = __fast_smmu_alloc_iova(mapping, attrs, len);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700397
398 if (unlikely(iova == DMA_ERROR_CODE))
399 goto fail;
400
401 pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
402
403 if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
404 goto fail_free_iova;
405
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700406 fast_dmac_clean_range(mapping, pmd, pmd + nptes);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700407
408 spin_unlock_irqrestore(&mapping->lock, flags);
Patrick Daly1323e412017-10-05 21:16:25 -0700409
410 trace_map(mapping->domain, iova, phys_to_map, len, prot);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700411 return iova + offset_from_phys_to_map;
412
413fail_free_iova:
414 __fast_smmu_free_iova(mapping, iova, size);
415fail:
416 spin_unlock_irqrestore(&mapping->lock, flags);
417 return DMA_ERROR_CODE;
418}
419
420static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
421 size_t size, enum dma_data_direction dir,
422 unsigned long attrs)
423{
424 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
425 unsigned long flags;
426 av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
427 unsigned long offset = iova & ~FAST_PAGE_MASK;
428 size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
429 int nptes = len >> FAST_PAGE_SHIFT;
430 struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
431 bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
Liam Mark83a9f86e2017-02-08 09:37:17 -0800432 bool is_coherent = is_dma_coherent(dev, attrs);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700433
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700434 if (!skip_sync && !is_coherent)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700435 __fast_dma_page_dev_to_cpu(page, offset, size, dir);
436
437 spin_lock_irqsave(&mapping->lock, flags);
438 av8l_fast_unmap_public(pmd, len);
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700439 fast_dmac_clean_range(mapping, pmd, pmd + nptes);
Patrick Dalyd79c4b92017-11-03 18:48:14 -0700440 __fast_smmu_free_iova(mapping, iova - offset, len);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700441 spin_unlock_irqrestore(&mapping->lock, flags);
Patrick Daly1323e412017-10-05 21:16:25 -0700442
443 trace_unmap(mapping->domain, iova - offset, len, len);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700444}
445
Liam Mark78d7fb52016-12-01 13:05:31 -0800446static void fast_smmu_sync_single_for_cpu(struct device *dev,
447 dma_addr_t iova, size_t size, enum dma_data_direction dir)
448{
449 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
450 av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
451 unsigned long offset = iova & ~FAST_PAGE_MASK;
452 struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
453
Liam Mark83a9f86e2017-02-08 09:37:17 -0800454 if (!__fast_is_pte_coherent(pmd))
Liam Mark78d7fb52016-12-01 13:05:31 -0800455 __fast_dma_page_dev_to_cpu(page, offset, size, dir);
456}
457
458static void fast_smmu_sync_single_for_device(struct device *dev,
459 dma_addr_t iova, size_t size, enum dma_data_direction dir)
460{
461 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
462 av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
463 unsigned long offset = iova & ~FAST_PAGE_MASK;
464 struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
465
Liam Mark83a9f86e2017-02-08 09:37:17 -0800466 if (!__fast_is_pte_coherent(pmd))
Liam Mark78d7fb52016-12-01 13:05:31 -0800467 __fast_dma_page_cpu_to_dev(page, offset, size, dir);
468}
469
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700470static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
471 int nents, enum dma_data_direction dir,
472 unsigned long attrs)
473{
Patrick Daly36c547a2017-09-06 19:13:02 -0700474 /* 0 indicates error */
475 return 0;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700476}
477
478static void fast_smmu_unmap_sg(struct device *dev,
479 struct scatterlist *sg, int nents,
480 enum dma_data_direction dir,
481 unsigned long attrs)
482{
483 WARN_ON_ONCE(1);
484}
485
Liam Mark78d7fb52016-12-01 13:05:31 -0800486static void fast_smmu_sync_sg_for_cpu(struct device *dev,
487 struct scatterlist *sg, int nents, enum dma_data_direction dir)
488{
489 WARN_ON_ONCE(1);
490}
491
492static void fast_smmu_sync_sg_for_device(struct device *dev,
493 struct scatterlist *sg, int nents, enum dma_data_direction dir)
494{
495 WARN_ON_ONCE(1);
496}
497
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700498static void __fast_smmu_free_pages(struct page **pages, int count)
499{
500 int i;
501
502 for (i = 0; i < count; i++)
503 __free_page(pages[i]);
504 kvfree(pages);
505}
506
507static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
508{
509 struct page **pages;
510 unsigned int i = 0, array_size = count * sizeof(*pages);
511
512 if (array_size <= PAGE_SIZE)
513 pages = kzalloc(array_size, GFP_KERNEL);
514 else
515 pages = vzalloc(array_size);
516 if (!pages)
517 return NULL;
518
519 /* IOMMU can map any pages, so himem can also be used here */
520 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
521
522 for (i = 0; i < count; ++i) {
523 struct page *page = alloc_page(gfp);
524
525 if (!page) {
526 __fast_smmu_free_pages(pages, i);
527 return NULL;
528 }
529 pages[i] = page;
530 }
531 return pages;
532}
533
534static void *fast_smmu_alloc(struct device *dev, size_t size,
535 dma_addr_t *handle, gfp_t gfp,
536 unsigned long attrs)
537{
538 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
539 struct sg_table sgt;
540 dma_addr_t dma_addr, iova_iter;
541 void *addr;
542 av8l_fast_iopte *ptep;
543 unsigned long flags;
544 struct sg_mapping_iter miter;
545 unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
546 int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
Liam Mark83a9f86e2017-02-08 09:37:17 -0800547 bool is_coherent = is_dma_coherent(dev, attrs);
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700548 pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700549 struct page **pages;
550
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700551 prot = __get_iommu_pgprot(attrs, prot, is_coherent);
552
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700553 *handle = DMA_ERROR_CODE;
554
555 pages = __fast_smmu_alloc_pages(count, gfp);
556 if (!pages) {
557 dev_err(dev, "no pages\n");
558 return NULL;
559 }
560
561 size = ALIGN(size, SZ_4K);
562 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
563 dev_err(dev, "no sg tablen\n");
564 goto out_free_pages;
565 }
566
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700567 if (!is_coherent) {
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700568 /*
569 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
570 * sufficient here, so skip it by using the "wrong" direction.
571 */
572 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
573 SG_MITER_FROM_SG);
574 while (sg_miter_next(&miter))
Kyle Yan65be4a52016-10-31 15:05:00 -0700575 __dma_flush_area(miter.addr, miter.length);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700576 sg_miter_stop(&miter);
577 }
578
579 spin_lock_irqsave(&mapping->lock, flags);
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800580 dma_addr = __fast_smmu_alloc_iova(mapping, attrs, size);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700581 if (dma_addr == DMA_ERROR_CODE) {
582 dev_err(dev, "no iova\n");
583 spin_unlock_irqrestore(&mapping->lock, flags);
584 goto out_free_sg;
585 }
586 iova_iter = dma_addr;
587 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
588 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
589 while (sg_miter_next(&miter)) {
590 int nptes = miter.length >> FAST_PAGE_SHIFT;
591
592 ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter);
593 if (unlikely(av8l_fast_map_public(
594 ptep, page_to_phys(miter.page),
595 miter.length, prot))) {
596 dev_err(dev, "no map public\n");
597 /* TODO: unwind previously successful mappings */
598 goto out_free_iova;
599 }
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700600 fast_dmac_clean_range(mapping, ptep, ptep + nptes);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700601 iova_iter += miter.length;
602 }
603 sg_miter_stop(&miter);
604 spin_unlock_irqrestore(&mapping->lock, flags);
605
606 addr = dma_common_pages_remap(pages, size, VM_USERMAP, remap_prot,
607 __builtin_return_address(0));
608 if (!addr) {
609 dev_err(dev, "no common pages\n");
610 goto out_unmap;
611 }
612
613 *handle = dma_addr;
614 sg_free_table(&sgt);
615 return addr;
616
617out_unmap:
618 /* need to take the lock again for page tables and iova */
619 spin_lock_irqsave(&mapping->lock, flags);
620 ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
621 av8l_fast_unmap_public(ptep, size);
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700622 fast_dmac_clean_range(mapping, ptep, ptep + count);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700623out_free_iova:
624 __fast_smmu_free_iova(mapping, dma_addr, size);
625 spin_unlock_irqrestore(&mapping->lock, flags);
626out_free_sg:
627 sg_free_table(&sgt);
628out_free_pages:
629 __fast_smmu_free_pages(pages, count);
630 return NULL;
631}
632
633static void fast_smmu_free(struct device *dev, size_t size,
634 void *vaddr, dma_addr_t dma_handle,
635 unsigned long attrs)
636{
637 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
638 struct vm_struct *area;
639 struct page **pages;
640 size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
641 av8l_fast_iopte *ptep;
642 unsigned long flags;
643
644 size = ALIGN(size, SZ_4K);
645
646 area = find_vm_area(vaddr);
647 if (WARN_ON_ONCE(!area))
648 return;
649
650 pages = area->pages;
651 dma_common_free_remap(vaddr, size, VM_USERMAP, false);
652 ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
653 spin_lock_irqsave(&mapping->lock, flags);
654 av8l_fast_unmap_public(ptep, size);
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700655 fast_dmac_clean_range(mapping, ptep, ptep + count);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700656 __fast_smmu_free_iova(mapping, dma_handle, size);
657 spin_unlock_irqrestore(&mapping->lock, flags);
658 __fast_smmu_free_pages(pages, count);
659}
660
Patrick Daly7bcb5462016-08-03 17:27:36 -0700661static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
662 void *cpu_addr, dma_addr_t dma_addr,
663 size_t size, unsigned long attrs)
664{
665 struct vm_struct *area;
666 unsigned long uaddr = vma->vm_start;
667 struct page **pages;
668 int i, nr_pages, ret = 0;
Liam Mark83a9f86e2017-02-08 09:37:17 -0800669 bool coherent = is_dma_coherent(dev, attrs);
Patrick Daly7bcb5462016-08-03 17:27:36 -0700670
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700671 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
672 coherent);
Patrick Daly7bcb5462016-08-03 17:27:36 -0700673 area = find_vm_area(cpu_addr);
674 if (!area)
675 return -EINVAL;
676
677 pages = area->pages;
678 nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
679 for (i = vma->vm_pgoff; i < nr_pages && uaddr < vma->vm_end; i++) {
680 ret = vm_insert_page(vma, uaddr, pages[i]);
681 if (ret)
682 break;
683 uaddr += PAGE_SIZE;
684 }
685
686 return ret;
687}
688
Patrick Daly9c79f382017-06-12 18:15:25 -0700689static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
690 void *cpu_addr, dma_addr_t dma_addr,
691 size_t size, unsigned long attrs)
692{
693 unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
694 struct vm_struct *area;
695
696 area = find_vm_area(cpu_addr);
697 if (!area || !area->pages)
698 return -EINVAL;
699
700 return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size,
701 GFP_KERNEL);
702}
703
Patrick Daly199fa672017-05-04 15:30:16 -0700704static dma_addr_t fast_smmu_dma_map_resource(
705 struct device *dev, phys_addr_t phys_addr,
706 size_t size, enum dma_data_direction dir,
707 unsigned long attrs)
708{
709 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
710 size_t offset = phys_addr & ~FAST_PAGE_MASK;
711 size_t len = round_up(size + offset, FAST_PAGE_SIZE);
712 dma_addr_t dma_addr;
713 int prot;
714 unsigned long flags;
715
716 spin_lock_irqsave(&mapping->lock, flags);
717 dma_addr = __fast_smmu_alloc_iova(mapping, attrs, len);
718 spin_unlock_irqrestore(&mapping->lock, flags);
719
720 if (dma_addr == DMA_ERROR_CODE)
721 return dma_addr;
722
723 prot = __fast_dma_direction_to_prot(dir);
724 prot |= IOMMU_MMIO;
725
726 if (iommu_map(mapping->domain, dma_addr, phys_addr - offset,
727 len, prot)) {
728 spin_lock_irqsave(&mapping->lock, flags);
729 __fast_smmu_free_iova(mapping, dma_addr, len);
730 spin_unlock_irqrestore(&mapping->lock, flags);
731 return DMA_ERROR_CODE;
732 }
733 return dma_addr + offset;
734}
735
736static void fast_smmu_dma_unmap_resource(
737 struct device *dev, dma_addr_t addr,
738 size_t size, enum dma_data_direction dir,
739 unsigned long attrs)
740{
741 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
742 size_t offset = addr & ~FAST_PAGE_MASK;
743 size_t len = round_up(size + offset, FAST_PAGE_SIZE);
744 unsigned long flags;
745
746 iommu_unmap(mapping->domain, addr - offset, len);
747 spin_lock_irqsave(&mapping->lock, flags);
Patrick Dalyd79c4b92017-11-03 18:48:14 -0700748 __fast_smmu_free_iova(mapping, addr - offset, len);
Patrick Daly199fa672017-05-04 15:30:16 -0700749 spin_unlock_irqrestore(&mapping->lock, flags);
750}
751
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700752static int fast_smmu_mapping_error(struct device *dev,
753 dma_addr_t dma_addr)
754{
755 return dma_addr == DMA_ERROR_CODE;
756}
757
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800758static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
759 void *data)
760{
761 av8l_fast_iopte *ptep = data;
762 dma_addr_t iova;
763 unsigned long bitmap_idx;
764
765 bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds);
766 iova = bitmap_idx << FAST_PAGE_SHIFT;
767 dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
768 dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
769 dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530770 fast->pgtbl_pmds, bitmap_idx);
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800771 print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
772 32, 8, fast->bitmap, fast->bitmap_size, false);
773}
774
775static int fast_smmu_notify(struct notifier_block *self,
776 unsigned long action, void *data)
777{
778 struct dma_fast_smmu_mapping *fast = container_of(
779 self, struct dma_fast_smmu_mapping, notifier);
780
781 switch (action) {
782 case MAPPED_OVER_STALE_TLB:
783 __fast_smmu_mapped_over_stale(fast, data);
784 return NOTIFY_OK;
785 default:
786 WARN(1, "Unhandled notifier action");
787 return NOTIFY_DONE;
788 }
789}
790
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700791static const struct dma_map_ops fast_smmu_dma_ops = {
792 .alloc = fast_smmu_alloc,
793 .free = fast_smmu_free,
Patrick Daly7bcb5462016-08-03 17:27:36 -0700794 .mmap = fast_smmu_mmap_attrs,
Patrick Daly9c79f382017-06-12 18:15:25 -0700795 .get_sgtable = fast_smmu_get_sgtable,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700796 .map_page = fast_smmu_map_page,
797 .unmap_page = fast_smmu_unmap_page,
Liam Mark78d7fb52016-12-01 13:05:31 -0800798 .sync_single_for_cpu = fast_smmu_sync_single_for_cpu,
799 .sync_single_for_device = fast_smmu_sync_single_for_device,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700800 .map_sg = fast_smmu_map_sg,
801 .unmap_sg = fast_smmu_unmap_sg,
Liam Mark78d7fb52016-12-01 13:05:31 -0800802 .sync_sg_for_cpu = fast_smmu_sync_sg_for_cpu,
803 .sync_sg_for_device = fast_smmu_sync_sg_for_device,
Patrick Daly199fa672017-05-04 15:30:16 -0700804 .map_resource = fast_smmu_dma_map_resource,
805 .unmap_resource = fast_smmu_dma_unmap_resource,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700806 .mapping_error = fast_smmu_mapping_error,
807};
808
809/**
810 * __fast_smmu_create_mapping_sized
811 * @base: bottom of the VA range
812 * @size: size of the VA range in bytes
813 *
814 * Creates a mapping structure which holds information about used/unused IO
815 * address ranges, which is required to perform mapping with IOMMU aware
816 * functions. The only VA range supported is [0, 4GB).
817 *
818 * The client device need to be attached to the mapping with
819 * fast_smmu_attach_device function.
820 */
821static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530822 dma_addr_t base, u64 size)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700823{
824 struct dma_fast_smmu_mapping *fast;
825
826 fast = kzalloc(sizeof(struct dma_fast_smmu_mapping), GFP_KERNEL);
827 if (!fast)
828 goto err;
829
830 fast->base = base;
831 fast->size = size;
832 fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
833 fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
834
Liam Mark4d4fbba2017-02-08 10:30:49 -0800835 fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN |
836 __GFP_NORETRY);
837 if (!fast->bitmap)
838 fast->bitmap = vzalloc(fast->bitmap_size);
839
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700840 if (!fast->bitmap)
841 goto err2;
842
843 spin_lock_init(&fast->lock);
844
845 return fast;
846err2:
847 kfree(fast);
848err:
849 return ERR_PTR(-ENOMEM);
850}
851
Patrick Dalybc8b1cb2017-05-04 17:10:10 -0700852/*
853 * Based off of similar code from dma-iommu.c, but modified to use a different
854 * iova allocator
855 */
856static void fast_smmu_reserve_pci_windows(struct device *dev,
857 struct dma_fast_smmu_mapping *mapping)
858{
859 struct pci_host_bridge *bridge;
860 struct resource_entry *window;
861 phys_addr_t start, end;
862 struct pci_dev *pci_dev;
863 unsigned long flags;
864
865 if (!dev_is_pci(dev))
866 return;
867
868 pci_dev = to_pci_dev(dev);
869 bridge = pci_find_host_bridge(pci_dev->bus);
870
871 spin_lock_irqsave(&mapping->lock, flags);
872 resource_list_for_each_entry(window, &bridge->windows) {
873 if (resource_type(window->res) != IORESOURCE_MEM &&
874 resource_type(window->res) != IORESOURCE_IO)
875 continue;
876
877 start = round_down(window->res->start - window->offset,
878 FAST_PAGE_SIZE);
879 end = round_up(window->res->end - window->offset,
880 FAST_PAGE_SIZE);
881 start = max_t(unsigned long, mapping->base, start);
882 end = min_t(unsigned long, mapping->base + mapping->size, end);
883 if (start >= end)
884 continue;
885
886 dev_dbg(dev, "iova allocator reserved 0x%pa-0x%pa\n",
887 &start, &end);
888
889 start = (start - mapping->base) >> FAST_PAGE_SHIFT;
890 end = (end - mapping->base) >> FAST_PAGE_SHIFT;
891 bitmap_set(mapping->bitmap, start, end - start);
892 }
893 spin_unlock_irqrestore(&mapping->lock, flags);
894}
895
Patrick Daly23301482017-10-12 16:18:25 -0700896static int fast_smmu_errata_init(struct dma_iommu_mapping *mapping)
897{
898 struct dma_fast_smmu_mapping *fast = mapping->fast;
899 int vmid = VMID_HLOS;
900 int min_iova_align = 0;
901
902 iommu_domain_get_attr(mapping->domain,
Patrick Daly83174c12017-10-26 12:31:15 -0700903 DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
Patrick Daly23301482017-10-12 16:18:25 -0700904 &min_iova_align);
905 iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
906 if (vmid >= VMID_LAST || vmid < 0)
907 vmid = VMID_HLOS;
908
909 if (min_iova_align) {
910 fast->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
911 fast->guard_page = arm_smmu_errata_get_guard_page(vmid);
912 if (!fast->guard_page)
913 return -ENOMEM;
914 }
915 return 0;
916}
917
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700918/**
Patrick Daly1748f082017-09-05 21:32:52 -0700919 * fast_smmu_init_mapping
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700920 * @dev: valid struct device pointer
921 * @mapping: io address space mapping structure (returned from
Patrick Daly1748f082017-09-05 21:32:52 -0700922 * arm_iommu_create_mapping)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700923 *
Patrick Daly1748f082017-09-05 21:32:52 -0700924 * Called the first time a device is attached to this mapping.
925 * Not for dma client use.
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700926 */
Patrick Daly1748f082017-09-05 21:32:52 -0700927int fast_smmu_init_mapping(struct device *dev,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700928 struct dma_iommu_mapping *mapping)
929{
Prakash Gupta83cc9bb42018-02-09 14:35:51 +0530930 int err = 0;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700931 struct iommu_domain *domain = mapping->domain;
932 struct iommu_pgtbl_info info;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530933 u64 size = (u64)mapping->bits << PAGE_SHIFT;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700934
Patrick Daly1748f082017-09-05 21:32:52 -0700935 if (mapping->base + size > (SZ_1G * 4ULL)) {
936 dev_err(dev, "Iova end address too large\n");
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700937 return -EINVAL;
Patrick Daly1748f082017-09-05 21:32:52 -0700938 }
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700939
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700940 mapping->fast = __fast_smmu_create_mapping_sized(mapping->base, size);
941 if (IS_ERR(mapping->fast))
942 return -ENOMEM;
943 mapping->fast->domain = domain;
944 mapping->fast->dev = dev;
945
Patrick Daly23301482017-10-12 16:18:25 -0700946 if (fast_smmu_errata_init(mapping))
947 goto release_mapping;
948
Patrick Dalybc8b1cb2017-05-04 17:10:10 -0700949 fast_smmu_reserve_pci_windows(dev, mapping->fast);
950
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700951 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
952 &info)) {
953 dev_err(dev, "Couldn't get page table info\n");
Patrick Daly1748f082017-09-05 21:32:52 -0700954 err = -EINVAL;
Patrick Daly121a9a12017-10-11 17:43:29 -0700955 goto release_mapping;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700956 }
957 mapping->fast->pgtbl_pmds = info.pmds;
958
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700959 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
Patrick Daly1748f082017-09-05 21:32:52 -0700960 &mapping->fast->is_smmu_pt_coherent)) {
961 err = -EINVAL;
Patrick Daly121a9a12017-10-11 17:43:29 -0700962 goto release_mapping;
Patrick Daly1748f082017-09-05 21:32:52 -0700963 }
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700964
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800965 mapping->fast->notifier.notifier_call = fast_smmu_notify;
966 av8l_register_notify(&mapping->fast->notifier);
967
Patrick Daly1748f082017-09-05 21:32:52 -0700968 mapping->ops = &fast_smmu_dma_ops;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700969 return 0;
Patrick Daly1748f082017-09-05 21:32:52 -0700970
Patrick Daly1748f082017-09-05 21:32:52 -0700971release_mapping:
972 kfree(mapping->fast->bitmap);
973 kfree(mapping->fast);
974 return err;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700975}
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700976
977/**
Patrick Daly1748f082017-09-05 21:32:52 -0700978 * fast_smmu_release_mapping
979 * @kref: dma_iommu_mapping->kref
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700980 *
Patrick Daly1748f082017-09-05 21:32:52 -0700981 * Cleans up the given iommu mapping.
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700982 */
Patrick Daly1748f082017-09-05 21:32:52 -0700983void fast_smmu_release_mapping(struct kref *kref)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700984{
Patrick Daly1748f082017-09-05 21:32:52 -0700985 struct dma_iommu_mapping *mapping =
986 container_of(kref, struct dma_iommu_mapping, kref);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700987
Liam Mark4d4fbba2017-02-08 10:30:49 -0800988 kvfree(mapping->fast->bitmap);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700989 kfree(mapping->fast);
Patrick Daly1748f082017-09-05 21:32:52 -0700990 iommu_domain_free(mapping->domain);
991 kfree(mapping);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700992}