blob: 7e6287c986ef3b232bfbb72c2f471acd8b4dc97a [file] [log] [blame]
Prakash Gupta83cc9bb42018-02-09 14:35:51 +05301/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/dma-contiguous.h>
14#include <linux/dma-mapping.h>
15#include <linux/dma-mapping-fast.h>
16#include <linux/io-pgtable-fast.h>
Patrick Dalyde1c64d2017-09-12 16:30:12 -070017#include <linux/pci.h>
Patrick Daly7bcb5462016-08-03 17:27:36 -070018#include <linux/vmalloc.h>
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070019#include <asm/cacheflush.h>
20#include <asm/dma-iommu.h>
Charan Teja Reddy29f61402017-02-09 20:44:29 +053021#include <linux/slab.h>
22#include <linux/vmalloc.h>
Patrick Daly1323e412017-10-05 21:16:25 -070023#include <trace/events/iommu.h>
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070024
Patrick Daly23301482017-10-12 16:18:25 -070025#include <soc/qcom/secure_buffer.h>
26#include <linux/arm-smmu-errata.h>
27
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070028/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
29#define FAST_PAGE_SHIFT 12
30#define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
31#define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
32#define FAST_PTE_ADDR_MASK ((av8l_fast_iopte)0xfffffffff000)
Liam Mark83a9f86e2017-02-08 09:37:17 -080033#define FAST_MAIR_ATTR_IDX_CACHE 1
34#define FAST_PTE_ATTRINDX_SHIFT 2
35#define FAST_PTE_ATTRINDX_MASK 0x7
36#define FAST_PTE_SH_SHIFT 8
37#define FAST_PTE_SH_MASK (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT)
38#define FAST_PTE_SH_OS (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT)
39#define FAST_PTE_SH_IS (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -070040
Mitchel Humpherys425d03d2016-06-23 13:25:12 -070041static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
42 bool coherent)
43{
44 if (attrs & DMA_ATTR_STRONGLY_ORDERED)
45 return pgprot_noncached(prot);
46 else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
47 return pgprot_writecombine(prot);
48 return prot;
49}
50
51static int __get_iommu_pgprot(unsigned long attrs, int prot,
52 bool coherent)
53{
54 if (!(attrs & DMA_ATTR_EXEC_MAPPING))
55 prot |= IOMMU_NOEXEC;
56 if ((attrs & DMA_ATTR_STRONGLY_ORDERED))
57 prot |= IOMMU_MMIO;
58 if (coherent)
59 prot |= IOMMU_CACHE;
60
61 return prot;
62}
63
Mitchel Humpherys9de66db2016-06-07 11:09:44 -070064static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
65 void *start, void *end)
66{
67 if (!mapping->is_smmu_pt_coherent)
68 dmac_clean_range(start, end);
69}
70
Liam Mark83a9f86e2017-02-08 09:37:17 -080071static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep)
72{
73 int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK <<
74 FAST_PTE_ATTRINDX_SHIFT)) >>
75 FAST_PTE_ATTRINDX_SHIFT;
76
77 if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) &&
78 (((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) ||
79 (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS))
80 return true;
81
82 return false;
83}
84
85static bool is_dma_coherent(struct device *dev, unsigned long attrs)
86{
87 bool is_coherent;
88
89 if (attrs & DMA_ATTR_FORCE_COHERENT)
90 is_coherent = true;
91 else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
92 is_coherent = false;
93 else if (is_device_dma_coherent(dev))
94 is_coherent = true;
95 else
96 is_coherent = false;
97
98 return is_coherent;
99}
100
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700101/*
102 * Checks if the allocated range (ending at @end) covered the upcoming
103 * stale bit. We don't need to know exactly where the range starts since
104 * we already know where the candidate search range started. If, starting
105 * from the beginning of the candidate search range, we had to step over
106 * (or landed directly on top of) the upcoming stale bit, then we return
107 * true.
108 *
109 * Due to wrapping, there are two scenarios we'll need to check: (1) if the
110 * range [search_start, upcoming_stale] spans 0 (i.e. search_start >
111 * upcoming_stale), and, (2) if the range: [search_start, upcoming_stale]
112 * does *not* span 0 (i.e. search_start <= upcoming_stale). And for each
113 * of those two scenarios we need to handle three cases: (1) the bit was
114 * found before wrapping or
115 */
116static bool __bit_covered_stale(unsigned long upcoming_stale,
117 unsigned long search_start,
118 unsigned long end)
119{
120 if (search_start > upcoming_stale) {
121 if (end >= search_start) {
122 /*
123 * We started searching above upcoming_stale and we
124 * didn't wrap, so we couldn't have crossed
125 * upcoming_stale.
126 */
127 return false;
128 }
129 /*
130 * We wrapped. Did we cross (or land on top of)
131 * upcoming_stale?
132 */
133 return end >= upcoming_stale;
134 }
135
136 if (search_start <= upcoming_stale) {
137 if (end >= search_start) {
138 /*
139 * We didn't wrap. Did we cross (or land on top
140 * of) upcoming_stale?
141 */
142 return end >= upcoming_stale;
143 }
144 /*
145 * We wrapped. So we must have crossed upcoming_stale
146 * (since we started searching below it).
147 */
148 return true;
149 }
150
151 /* we should have covered all logical combinations... */
152 WARN_ON(1);
153 return true;
154}
155
156static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800157 unsigned long attrs,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700158 size_t size)
159{
Patrick Daly23301482017-10-12 16:18:25 -0700160 unsigned long bit, prev_search_start, nbits;
161 unsigned long align;
162 unsigned long guard_len;
163 dma_addr_t iova;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700164
Patrick Daly23301482017-10-12 16:18:25 -0700165 if (mapping->min_iova_align)
Prakash Guptac2e909a2018-03-29 11:23:06 +0530166 guard_len = ALIGN(size + mapping->force_guard_page_len,
167 mapping->min_iova_align) - size;
Patrick Daly23301482017-10-12 16:18:25 -0700168 else
169 guard_len = 0;
170
171 nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
172 align = (1 << get_order(size + guard_len)) - 1;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700173 bit = bitmap_find_next_zero_area(
174 mapping->bitmap, mapping->num_4k_pages, mapping->next_start,
175 nbits, align);
176 if (unlikely(bit > mapping->num_4k_pages)) {
177 /* try wrapping */
178 mapping->next_start = 0; /* TODO: SHOULD I REALLY DO THIS?!? */
179 bit = bitmap_find_next_zero_area(
180 mapping->bitmap, mapping->num_4k_pages, 0, nbits,
181 align);
182 if (unlikely(bit > mapping->num_4k_pages))
183 return DMA_ERROR_CODE;
184 }
185
186 bitmap_set(mapping->bitmap, bit, nbits);
187 prev_search_start = mapping->next_start;
188 mapping->next_start = bit + nbits;
189 if (unlikely(mapping->next_start >= mapping->num_4k_pages))
190 mapping->next_start = 0;
191
192 /*
193 * If we just re-allocated a VA whose TLB hasn't been invalidated
194 * since it was last used and unmapped, we need to invalidate it
195 * here. We actually invalidate the entire TLB so that we don't
196 * have to invalidate the TLB again until we wrap back around.
197 */
198 if (mapping->have_stale_tlbs &&
199 __bit_covered_stale(mapping->upcoming_stale_bit,
200 prev_search_start,
201 bit + nbits - 1)) {
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800202 bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
203
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700204 iommu_tlbiall(mapping->domain);
205 mapping->have_stale_tlbs = false;
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800206 av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700207 }
208
Patrick Daly23301482017-10-12 16:18:25 -0700209 iova = (bit << FAST_PAGE_SHIFT) + mapping->base;
210 if (guard_len &&
211 iommu_map(mapping->domain, iova + size,
212 page_to_phys(mapping->guard_page),
213 guard_len, ARM_SMMU_GUARD_PROT)) {
214
215 bitmap_clear(mapping->bitmap, bit, nbits);
216 return DMA_ERROR_CODE;
217 }
218 return iova;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700219}
220
221/*
222 * Checks whether the candidate bit will be allocated sooner than the
223 * current upcoming stale bit. We can say candidate will be upcoming
224 * sooner than the current upcoming stale bit if it lies between the
225 * starting bit of the next search range and the upcoming stale bit
226 * (allowing for wrap-around).
227 *
228 * Stated differently, we're checking the relative ordering of three
229 * unsigned numbers. So we need to check all 6 (i.e. 3!) permutations,
230 * namely:
231 *
232 * 0 |---A---B---C---| TOP (Case 1)
233 * 0 |---A---C---B---| TOP (Case 2)
234 * 0 |---B---A---C---| TOP (Case 3)
235 * 0 |---B---C---A---| TOP (Case 4)
236 * 0 |---C---A---B---| TOP (Case 5)
237 * 0 |---C---B---A---| TOP (Case 6)
238 *
239 * Note that since we're allowing numbers to wrap, the following three
240 * scenarios are all equivalent for Case 1:
241 *
242 * 0 |---A---B---C---| TOP
243 * 0 |---C---A---B---| TOP (C has wrapped. This is Case 5.)
244 * 0 |---B---C---A---| TOP (C and B have wrapped. This is Case 4.)
245 *
246 * In any of these cases, if we start searching from A, we will find B
247 * before we find C.
248 *
249 * We can also find two equivalent cases for Case 2:
250 *
251 * 0 |---A---C---B---| TOP
252 * 0 |---B---A---C---| TOP (B has wrapped. This is Case 3.)
253 * 0 |---C---B---A---| TOP (B and C have wrapped. This is Case 6.)
254 *
255 * In any of these cases, if we start searching from A, we will find C
256 * before we find B.
257 */
258static bool __bit_is_sooner(unsigned long candidate,
259 struct dma_fast_smmu_mapping *mapping)
260{
261 unsigned long A = mapping->next_start;
262 unsigned long B = candidate;
263 unsigned long C = mapping->upcoming_stale_bit;
264
265 if ((A < B && B < C) || /* Case 1 */
266 (C < A && A < B) || /* Case 5 */
267 (B < C && C < A)) /* Case 4 */
268 return true;
269
270 if ((A < C && C < B) || /* Case 2 */
271 (B < A && A < C) || /* Case 3 */
272 (C < B && B < A)) /* Case 6 */
273 return false;
274
275 /*
276 * For simplicity, we've been ignoring the possibility of any of
277 * our three numbers being equal. Handle those cases here (they
278 * shouldn't happen very often, (I think?)).
279 */
280
281 /*
282 * If candidate is the next bit to be searched then it's definitely
283 * sooner.
284 */
285 if (A == B)
286 return true;
287
288 /*
289 * If candidate is the next upcoming stale bit we'll return false
290 * to avoid doing `upcoming = candidate' in the caller (which would
291 * be useless since they're already equal)
292 */
293 if (B == C)
294 return false;
295
296 /*
297 * If next start is the upcoming stale bit then candidate can't
298 * possibly be sooner. The "soonest" bit is already selected.
299 */
300 if (A == C)
301 return false;
302
303 /* We should have covered all logical combinations. */
304 WARN(1, "Well, that's awkward. A=%ld, B=%ld, C=%ld\n", A, B, C);
305 return true;
306}
307
308static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
309 dma_addr_t iova, size_t size)
310{
311 unsigned long start_bit = (iova - mapping->base) >> FAST_PAGE_SHIFT;
Patrick Daly23301482017-10-12 16:18:25 -0700312 unsigned long nbits;
313 unsigned long guard_len;
314
Prakash Guptac2e909a2018-03-29 11:23:06 +0530315 if (mapping->min_iova_align)
316 guard_len = ALIGN(size + mapping->force_guard_page_len,
317 mapping->min_iova_align) - size;
318 else
Patrick Daly23301482017-10-12 16:18:25 -0700319 guard_len = 0;
Prakash Guptac2e909a2018-03-29 11:23:06 +0530320
321 if (guard_len)
322 iommu_unmap(mapping->domain, iova + size, guard_len);
323
Patrick Daly23301482017-10-12 16:18:25 -0700324 nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
325
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700326
327 /*
328 * We don't invalidate TLBs on unmap. We invalidate TLBs on map
329 * when we're about to re-allocate a VA that was previously
330 * unmapped but hasn't yet been invalidated. So we need to keep
331 * track of which bit is the closest to being re-allocated here.
332 */
333 if (__bit_is_sooner(start_bit, mapping))
334 mapping->upcoming_stale_bit = start_bit;
335
336 bitmap_clear(mapping->bitmap, start_bit, nbits);
337 mapping->have_stale_tlbs = true;
338}
339
340
341static void __fast_dma_page_cpu_to_dev(struct page *page, unsigned long off,
342 size_t size, enum dma_data_direction dir)
343{
344 __dma_map_area(page_address(page) + off, size, dir);
345}
346
347static void __fast_dma_page_dev_to_cpu(struct page *page, unsigned long off,
348 size_t size, enum dma_data_direction dir)
349{
350 __dma_unmap_area(page_address(page) + off, size, dir);
351
352 /* TODO: WHAT IS THIS? */
353 /*
354 * Mark the D-cache clean for this page to avoid extra flushing.
355 */
356 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
357 set_bit(PG_dcache_clean, &page->flags);
358}
359
360static int __fast_dma_direction_to_prot(enum dma_data_direction dir)
361{
362 switch (dir) {
363 case DMA_BIDIRECTIONAL:
364 return IOMMU_READ | IOMMU_WRITE;
365 case DMA_TO_DEVICE:
366 return IOMMU_READ;
367 case DMA_FROM_DEVICE:
368 return IOMMU_WRITE;
369 default:
370 return 0;
371 }
372}
373
374static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
375 unsigned long offset, size_t size,
376 enum dma_data_direction dir,
377 unsigned long attrs)
378{
379 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
380 dma_addr_t iova;
381 unsigned long flags;
382 av8l_fast_iopte *pmd;
383 phys_addr_t phys_plus_off = page_to_phys(page) + offset;
384 phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE);
385 unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK;
386 size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE);
387 int nptes = len >> FAST_PAGE_SHIFT;
388 bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
389 int prot = __fast_dma_direction_to_prot(dir);
Liam Mark83a9f86e2017-02-08 09:37:17 -0800390 bool is_coherent = is_dma_coherent(dev, attrs);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700391
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700392 prot = __get_iommu_pgprot(attrs, prot, is_coherent);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700393
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700394 if (!skip_sync && !is_coherent)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700395 __fast_dma_page_cpu_to_dev(phys_to_page(phys_to_map),
396 offset_from_phys_to_map, size, dir);
397
398 spin_lock_irqsave(&mapping->lock, flags);
399
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800400 iova = __fast_smmu_alloc_iova(mapping, attrs, len);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700401
402 if (unlikely(iova == DMA_ERROR_CODE))
403 goto fail;
404
405 pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
406
407 if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
408 goto fail_free_iova;
409
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700410 fast_dmac_clean_range(mapping, pmd, pmd + nptes);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700411
412 spin_unlock_irqrestore(&mapping->lock, flags);
Patrick Daly1323e412017-10-05 21:16:25 -0700413
414 trace_map(mapping->domain, iova, phys_to_map, len, prot);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700415 return iova + offset_from_phys_to_map;
416
417fail_free_iova:
418 __fast_smmu_free_iova(mapping, iova, size);
419fail:
420 spin_unlock_irqrestore(&mapping->lock, flags);
421 return DMA_ERROR_CODE;
422}
423
424static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
425 size_t size, enum dma_data_direction dir,
426 unsigned long attrs)
427{
428 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
429 unsigned long flags;
430 av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
431 unsigned long offset = iova & ~FAST_PAGE_MASK;
432 size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
433 int nptes = len >> FAST_PAGE_SHIFT;
434 struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
435 bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
Liam Mark83a9f86e2017-02-08 09:37:17 -0800436 bool is_coherent = is_dma_coherent(dev, attrs);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700437
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700438 if (!skip_sync && !is_coherent)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700439 __fast_dma_page_dev_to_cpu(page, offset, size, dir);
440
441 spin_lock_irqsave(&mapping->lock, flags);
442 av8l_fast_unmap_public(pmd, len);
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700443 fast_dmac_clean_range(mapping, pmd, pmd + nptes);
Patrick Dalyd79c4b92017-11-03 18:48:14 -0700444 __fast_smmu_free_iova(mapping, iova - offset, len);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700445 spin_unlock_irqrestore(&mapping->lock, flags);
Patrick Daly1323e412017-10-05 21:16:25 -0700446
447 trace_unmap(mapping->domain, iova - offset, len, len);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700448}
449
Liam Mark78d7fb52016-12-01 13:05:31 -0800450static void fast_smmu_sync_single_for_cpu(struct device *dev,
451 dma_addr_t iova, size_t size, enum dma_data_direction dir)
452{
453 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
454 av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
455 unsigned long offset = iova & ~FAST_PAGE_MASK;
456 struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
457
Liam Mark83a9f86e2017-02-08 09:37:17 -0800458 if (!__fast_is_pte_coherent(pmd))
Liam Mark78d7fb52016-12-01 13:05:31 -0800459 __fast_dma_page_dev_to_cpu(page, offset, size, dir);
460}
461
462static void fast_smmu_sync_single_for_device(struct device *dev,
463 dma_addr_t iova, size_t size, enum dma_data_direction dir)
464{
465 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
466 av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
467 unsigned long offset = iova & ~FAST_PAGE_MASK;
468 struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
469
Liam Mark83a9f86e2017-02-08 09:37:17 -0800470 if (!__fast_is_pte_coherent(pmd))
Liam Mark78d7fb52016-12-01 13:05:31 -0800471 __fast_dma_page_cpu_to_dev(page, offset, size, dir);
472}
473
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700474static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
475 int nents, enum dma_data_direction dir,
476 unsigned long attrs)
477{
Patrick Daly36c547a2017-09-06 19:13:02 -0700478 /* 0 indicates error */
479 return 0;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700480}
481
482static void fast_smmu_unmap_sg(struct device *dev,
483 struct scatterlist *sg, int nents,
484 enum dma_data_direction dir,
485 unsigned long attrs)
486{
487 WARN_ON_ONCE(1);
488}
489
Liam Mark78d7fb52016-12-01 13:05:31 -0800490static void fast_smmu_sync_sg_for_cpu(struct device *dev,
491 struct scatterlist *sg, int nents, enum dma_data_direction dir)
492{
493 WARN_ON_ONCE(1);
494}
495
496static void fast_smmu_sync_sg_for_device(struct device *dev,
497 struct scatterlist *sg, int nents, enum dma_data_direction dir)
498{
499 WARN_ON_ONCE(1);
500}
501
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700502static void __fast_smmu_free_pages(struct page **pages, int count)
503{
504 int i;
505
506 for (i = 0; i < count; i++)
507 __free_page(pages[i]);
508 kvfree(pages);
509}
510
511static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
512{
513 struct page **pages;
514 unsigned int i = 0, array_size = count * sizeof(*pages);
515
516 if (array_size <= PAGE_SIZE)
517 pages = kzalloc(array_size, GFP_KERNEL);
518 else
519 pages = vzalloc(array_size);
520 if (!pages)
521 return NULL;
522
523 /* IOMMU can map any pages, so himem can also be used here */
524 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
525
526 for (i = 0; i < count; ++i) {
527 struct page *page = alloc_page(gfp);
528
529 if (!page) {
530 __fast_smmu_free_pages(pages, i);
531 return NULL;
532 }
533 pages[i] = page;
534 }
535 return pages;
536}
537
538static void *fast_smmu_alloc(struct device *dev, size_t size,
539 dma_addr_t *handle, gfp_t gfp,
540 unsigned long attrs)
541{
542 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
543 struct sg_table sgt;
544 dma_addr_t dma_addr, iova_iter;
545 void *addr;
546 av8l_fast_iopte *ptep;
547 unsigned long flags;
548 struct sg_mapping_iter miter;
549 unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
550 int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
Liam Mark83a9f86e2017-02-08 09:37:17 -0800551 bool is_coherent = is_dma_coherent(dev, attrs);
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700552 pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700553 struct page **pages;
554
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700555 prot = __get_iommu_pgprot(attrs, prot, is_coherent);
556
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700557 *handle = DMA_ERROR_CODE;
558
559 pages = __fast_smmu_alloc_pages(count, gfp);
560 if (!pages) {
561 dev_err(dev, "no pages\n");
562 return NULL;
563 }
564
565 size = ALIGN(size, SZ_4K);
566 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
567 dev_err(dev, "no sg tablen\n");
568 goto out_free_pages;
569 }
570
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700571 if (!is_coherent) {
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700572 /*
573 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
574 * sufficient here, so skip it by using the "wrong" direction.
575 */
576 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
577 SG_MITER_FROM_SG);
578 while (sg_miter_next(&miter))
Kyle Yan65be4a52016-10-31 15:05:00 -0700579 __dma_flush_area(miter.addr, miter.length);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700580 sg_miter_stop(&miter);
581 }
582
583 spin_lock_irqsave(&mapping->lock, flags);
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800584 dma_addr = __fast_smmu_alloc_iova(mapping, attrs, size);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700585 if (dma_addr == DMA_ERROR_CODE) {
586 dev_err(dev, "no iova\n");
587 spin_unlock_irqrestore(&mapping->lock, flags);
588 goto out_free_sg;
589 }
590 iova_iter = dma_addr;
591 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
592 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
593 while (sg_miter_next(&miter)) {
594 int nptes = miter.length >> FAST_PAGE_SHIFT;
595
596 ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter);
597 if (unlikely(av8l_fast_map_public(
598 ptep, page_to_phys(miter.page),
599 miter.length, prot))) {
600 dev_err(dev, "no map public\n");
601 /* TODO: unwind previously successful mappings */
602 goto out_free_iova;
603 }
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700604 fast_dmac_clean_range(mapping, ptep, ptep + nptes);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700605 iova_iter += miter.length;
606 }
607 sg_miter_stop(&miter);
608 spin_unlock_irqrestore(&mapping->lock, flags);
609
610 addr = dma_common_pages_remap(pages, size, VM_USERMAP, remap_prot,
611 __builtin_return_address(0));
612 if (!addr) {
613 dev_err(dev, "no common pages\n");
614 goto out_unmap;
615 }
616
617 *handle = dma_addr;
618 sg_free_table(&sgt);
619 return addr;
620
621out_unmap:
622 /* need to take the lock again for page tables and iova */
623 spin_lock_irqsave(&mapping->lock, flags);
624 ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
625 av8l_fast_unmap_public(ptep, size);
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700626 fast_dmac_clean_range(mapping, ptep, ptep + count);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700627out_free_iova:
628 __fast_smmu_free_iova(mapping, dma_addr, size);
629 spin_unlock_irqrestore(&mapping->lock, flags);
630out_free_sg:
631 sg_free_table(&sgt);
632out_free_pages:
633 __fast_smmu_free_pages(pages, count);
634 return NULL;
635}
636
637static void fast_smmu_free(struct device *dev, size_t size,
638 void *vaddr, dma_addr_t dma_handle,
639 unsigned long attrs)
640{
641 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
642 struct vm_struct *area;
643 struct page **pages;
644 size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
645 av8l_fast_iopte *ptep;
646 unsigned long flags;
647
648 size = ALIGN(size, SZ_4K);
649
650 area = find_vm_area(vaddr);
651 if (WARN_ON_ONCE(!area))
652 return;
653
654 pages = area->pages;
655 dma_common_free_remap(vaddr, size, VM_USERMAP, false);
656 ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
657 spin_lock_irqsave(&mapping->lock, flags);
658 av8l_fast_unmap_public(ptep, size);
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700659 fast_dmac_clean_range(mapping, ptep, ptep + count);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700660 __fast_smmu_free_iova(mapping, dma_handle, size);
661 spin_unlock_irqrestore(&mapping->lock, flags);
662 __fast_smmu_free_pages(pages, count);
663}
664
Patrick Daly7bcb5462016-08-03 17:27:36 -0700665static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
666 void *cpu_addr, dma_addr_t dma_addr,
667 size_t size, unsigned long attrs)
668{
669 struct vm_struct *area;
670 unsigned long uaddr = vma->vm_start;
671 struct page **pages;
672 int i, nr_pages, ret = 0;
Liam Mark83a9f86e2017-02-08 09:37:17 -0800673 bool coherent = is_dma_coherent(dev, attrs);
Patrick Daly7bcb5462016-08-03 17:27:36 -0700674
Mitchel Humpherys425d03d2016-06-23 13:25:12 -0700675 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
676 coherent);
Patrick Daly7bcb5462016-08-03 17:27:36 -0700677 area = find_vm_area(cpu_addr);
678 if (!area)
679 return -EINVAL;
680
681 pages = area->pages;
682 nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
683 for (i = vma->vm_pgoff; i < nr_pages && uaddr < vma->vm_end; i++) {
684 ret = vm_insert_page(vma, uaddr, pages[i]);
685 if (ret)
686 break;
687 uaddr += PAGE_SIZE;
688 }
689
690 return ret;
691}
692
Patrick Daly9c79f382017-06-12 18:15:25 -0700693static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
694 void *cpu_addr, dma_addr_t dma_addr,
695 size_t size, unsigned long attrs)
696{
697 unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
698 struct vm_struct *area;
699
700 area = find_vm_area(cpu_addr);
701 if (!area || !area->pages)
702 return -EINVAL;
703
704 return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size,
705 GFP_KERNEL);
706}
707
Patrick Daly199fa672017-05-04 15:30:16 -0700708static dma_addr_t fast_smmu_dma_map_resource(
709 struct device *dev, phys_addr_t phys_addr,
710 size_t size, enum dma_data_direction dir,
711 unsigned long attrs)
712{
713 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
714 size_t offset = phys_addr & ~FAST_PAGE_MASK;
715 size_t len = round_up(size + offset, FAST_PAGE_SIZE);
716 dma_addr_t dma_addr;
717 int prot;
718 unsigned long flags;
719
720 spin_lock_irqsave(&mapping->lock, flags);
721 dma_addr = __fast_smmu_alloc_iova(mapping, attrs, len);
722 spin_unlock_irqrestore(&mapping->lock, flags);
723
724 if (dma_addr == DMA_ERROR_CODE)
725 return dma_addr;
726
727 prot = __fast_dma_direction_to_prot(dir);
728 prot |= IOMMU_MMIO;
729
730 if (iommu_map(mapping->domain, dma_addr, phys_addr - offset,
731 len, prot)) {
732 spin_lock_irqsave(&mapping->lock, flags);
733 __fast_smmu_free_iova(mapping, dma_addr, len);
734 spin_unlock_irqrestore(&mapping->lock, flags);
735 return DMA_ERROR_CODE;
736 }
737 return dma_addr + offset;
738}
739
740static void fast_smmu_dma_unmap_resource(
741 struct device *dev, dma_addr_t addr,
742 size_t size, enum dma_data_direction dir,
743 unsigned long attrs)
744{
745 struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
746 size_t offset = addr & ~FAST_PAGE_MASK;
747 size_t len = round_up(size + offset, FAST_PAGE_SIZE);
748 unsigned long flags;
749
750 iommu_unmap(mapping->domain, addr - offset, len);
751 spin_lock_irqsave(&mapping->lock, flags);
Patrick Dalyd79c4b92017-11-03 18:48:14 -0700752 __fast_smmu_free_iova(mapping, addr - offset, len);
Patrick Daly199fa672017-05-04 15:30:16 -0700753 spin_unlock_irqrestore(&mapping->lock, flags);
754}
755
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700756static int fast_smmu_mapping_error(struct device *dev,
757 dma_addr_t dma_addr)
758{
759 return dma_addr == DMA_ERROR_CODE;
760}
761
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800762static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
763 void *data)
764{
765 av8l_fast_iopte *ptep = data;
766 dma_addr_t iova;
767 unsigned long bitmap_idx;
768
769 bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds);
770 iova = bitmap_idx << FAST_PAGE_SHIFT;
771 dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
772 dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
773 dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530774 fast->pgtbl_pmds, bitmap_idx);
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800775 print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
776 32, 8, fast->bitmap, fast->bitmap_size, false);
777}
778
779static int fast_smmu_notify(struct notifier_block *self,
780 unsigned long action, void *data)
781{
782 struct dma_fast_smmu_mapping *fast = container_of(
783 self, struct dma_fast_smmu_mapping, notifier);
784
785 switch (action) {
786 case MAPPED_OVER_STALE_TLB:
787 __fast_smmu_mapped_over_stale(fast, data);
788 return NOTIFY_OK;
789 default:
790 WARN(1, "Unhandled notifier action");
791 return NOTIFY_DONE;
792 }
793}
794
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700795static const struct dma_map_ops fast_smmu_dma_ops = {
796 .alloc = fast_smmu_alloc,
797 .free = fast_smmu_free,
Patrick Daly7bcb5462016-08-03 17:27:36 -0700798 .mmap = fast_smmu_mmap_attrs,
Patrick Daly9c79f382017-06-12 18:15:25 -0700799 .get_sgtable = fast_smmu_get_sgtable,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700800 .map_page = fast_smmu_map_page,
801 .unmap_page = fast_smmu_unmap_page,
Liam Mark78d7fb52016-12-01 13:05:31 -0800802 .sync_single_for_cpu = fast_smmu_sync_single_for_cpu,
803 .sync_single_for_device = fast_smmu_sync_single_for_device,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700804 .map_sg = fast_smmu_map_sg,
805 .unmap_sg = fast_smmu_unmap_sg,
Liam Mark78d7fb52016-12-01 13:05:31 -0800806 .sync_sg_for_cpu = fast_smmu_sync_sg_for_cpu,
807 .sync_sg_for_device = fast_smmu_sync_sg_for_device,
Patrick Daly199fa672017-05-04 15:30:16 -0700808 .map_resource = fast_smmu_dma_map_resource,
809 .unmap_resource = fast_smmu_dma_unmap_resource,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700810 .mapping_error = fast_smmu_mapping_error,
811};
812
813/**
814 * __fast_smmu_create_mapping_sized
815 * @base: bottom of the VA range
816 * @size: size of the VA range in bytes
817 *
818 * Creates a mapping structure which holds information about used/unused IO
819 * address ranges, which is required to perform mapping with IOMMU aware
820 * functions. The only VA range supported is [0, 4GB).
821 *
822 * The client device need to be attached to the mapping with
823 * fast_smmu_attach_device function.
824 */
825static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530826 dma_addr_t base, u64 size)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700827{
828 struct dma_fast_smmu_mapping *fast;
829
830 fast = kzalloc(sizeof(struct dma_fast_smmu_mapping), GFP_KERNEL);
831 if (!fast)
832 goto err;
833
834 fast->base = base;
835 fast->size = size;
836 fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
837 fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
838
Liam Mark4d4fbba2017-02-08 10:30:49 -0800839 fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN |
840 __GFP_NORETRY);
841 if (!fast->bitmap)
842 fast->bitmap = vzalloc(fast->bitmap_size);
843
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700844 if (!fast->bitmap)
845 goto err2;
846
847 spin_lock_init(&fast->lock);
848
849 return fast;
850err2:
851 kfree(fast);
852err:
853 return ERR_PTR(-ENOMEM);
854}
855
Patrick Dalybc8b1cb2017-05-04 17:10:10 -0700856/*
857 * Based off of similar code from dma-iommu.c, but modified to use a different
858 * iova allocator
859 */
860static void fast_smmu_reserve_pci_windows(struct device *dev,
861 struct dma_fast_smmu_mapping *mapping)
862{
863 struct pci_host_bridge *bridge;
864 struct resource_entry *window;
865 phys_addr_t start, end;
866 struct pci_dev *pci_dev;
867 unsigned long flags;
868
869 if (!dev_is_pci(dev))
870 return;
871
872 pci_dev = to_pci_dev(dev);
873 bridge = pci_find_host_bridge(pci_dev->bus);
874
875 spin_lock_irqsave(&mapping->lock, flags);
876 resource_list_for_each_entry(window, &bridge->windows) {
877 if (resource_type(window->res) != IORESOURCE_MEM &&
878 resource_type(window->res) != IORESOURCE_IO)
879 continue;
880
881 start = round_down(window->res->start - window->offset,
882 FAST_PAGE_SIZE);
883 end = round_up(window->res->end - window->offset,
884 FAST_PAGE_SIZE);
885 start = max_t(unsigned long, mapping->base, start);
886 end = min_t(unsigned long, mapping->base + mapping->size, end);
887 if (start >= end)
888 continue;
889
890 dev_dbg(dev, "iova allocator reserved 0x%pa-0x%pa\n",
891 &start, &end);
892
893 start = (start - mapping->base) >> FAST_PAGE_SHIFT;
894 end = (end - mapping->base) >> FAST_PAGE_SHIFT;
895 bitmap_set(mapping->bitmap, start, end - start);
896 }
897 spin_unlock_irqrestore(&mapping->lock, flags);
898}
899
Patrick Daly23301482017-10-12 16:18:25 -0700900static int fast_smmu_errata_init(struct dma_iommu_mapping *mapping)
901{
902 struct dma_fast_smmu_mapping *fast = mapping->fast;
903 int vmid = VMID_HLOS;
904 int min_iova_align = 0;
Prakash Guptac2e909a2018-03-29 11:23:06 +0530905 int force_iova_guard_page = 0;
Patrick Daly23301482017-10-12 16:18:25 -0700906
907 iommu_domain_get_attr(mapping->domain,
Prakash Guptac2e909a2018-03-29 11:23:06 +0530908 DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
909 &min_iova_align);
Patrick Daly23301482017-10-12 16:18:25 -0700910 iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
Prakash Guptac2e909a2018-03-29 11:23:06 +0530911 iommu_domain_get_attr(mapping->domain,
912 DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
913 &force_iova_guard_page);
914
Patrick Daly23301482017-10-12 16:18:25 -0700915 if (vmid >= VMID_LAST || vmid < 0)
916 vmid = VMID_HLOS;
917
Prakash Guptac2e909a2018-03-29 11:23:06 +0530918 fast->min_iova_align = (min_iova_align) ? ARM_SMMU_MIN_IOVA_ALIGN :
919 PAGE_SIZE;
920
921 if (force_iova_guard_page)
922 fast->force_guard_page_len = PAGE_SIZE;
923
924 fast->guard_page =
925 arm_smmu_errata_get_guard_page(vmid);
926 if (!fast->guard_page)
927 return -ENOMEM;
928
Patrick Daly23301482017-10-12 16:18:25 -0700929 return 0;
930}
931
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700932/**
Patrick Daly1748f082017-09-05 21:32:52 -0700933 * fast_smmu_init_mapping
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700934 * @dev: valid struct device pointer
935 * @mapping: io address space mapping structure (returned from
Patrick Daly1748f082017-09-05 21:32:52 -0700936 * arm_iommu_create_mapping)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700937 *
Patrick Daly1748f082017-09-05 21:32:52 -0700938 * Called the first time a device is attached to this mapping.
939 * Not for dma client use.
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700940 */
Patrick Daly1748f082017-09-05 21:32:52 -0700941int fast_smmu_init_mapping(struct device *dev,
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700942 struct dma_iommu_mapping *mapping)
943{
Prakash Gupta83cc9bb42018-02-09 14:35:51 +0530944 int err = 0;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700945 struct iommu_domain *domain = mapping->domain;
946 struct iommu_pgtbl_info info;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530947 u64 size = (u64)mapping->bits << PAGE_SHIFT;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700948
Patrick Daly1748f082017-09-05 21:32:52 -0700949 if (mapping->base + size > (SZ_1G * 4ULL)) {
950 dev_err(dev, "Iova end address too large\n");
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700951 return -EINVAL;
Patrick Daly1748f082017-09-05 21:32:52 -0700952 }
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700953
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700954 mapping->fast = __fast_smmu_create_mapping_sized(mapping->base, size);
955 if (IS_ERR(mapping->fast))
956 return -ENOMEM;
957 mapping->fast->domain = domain;
958 mapping->fast->dev = dev;
959
Patrick Daly23301482017-10-12 16:18:25 -0700960 if (fast_smmu_errata_init(mapping))
961 goto release_mapping;
962
Patrick Dalybc8b1cb2017-05-04 17:10:10 -0700963 fast_smmu_reserve_pci_windows(dev, mapping->fast);
964
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700965 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
966 &info)) {
967 dev_err(dev, "Couldn't get page table info\n");
Patrick Daly1748f082017-09-05 21:32:52 -0700968 err = -EINVAL;
Patrick Daly121a9a12017-10-11 17:43:29 -0700969 goto release_mapping;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700970 }
971 mapping->fast->pgtbl_pmds = info.pmds;
972
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700973 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
Patrick Daly1748f082017-09-05 21:32:52 -0700974 &mapping->fast->is_smmu_pt_coherent)) {
975 err = -EINVAL;
Patrick Daly121a9a12017-10-11 17:43:29 -0700976 goto release_mapping;
Patrick Daly1748f082017-09-05 21:32:52 -0700977 }
Mitchel Humpherys9de66db2016-06-07 11:09:44 -0700978
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800979 mapping->fast->notifier.notifier_call = fast_smmu_notify;
980 av8l_register_notify(&mapping->fast->notifier);
981
Patrick Daly1748f082017-09-05 21:32:52 -0700982 mapping->ops = &fast_smmu_dma_ops;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700983 return 0;
Patrick Daly1748f082017-09-05 21:32:52 -0700984
Patrick Daly1748f082017-09-05 21:32:52 -0700985release_mapping:
986 kfree(mapping->fast->bitmap);
987 kfree(mapping->fast);
988 return err;
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700989}
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700990
991/**
Patrick Daly1748f082017-09-05 21:32:52 -0700992 * fast_smmu_release_mapping
993 * @kref: dma_iommu_mapping->kref
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700994 *
Patrick Daly1748f082017-09-05 21:32:52 -0700995 * Cleans up the given iommu mapping.
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700996 */
Patrick Daly1748f082017-09-05 21:32:52 -0700997void fast_smmu_release_mapping(struct kref *kref)
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -0700998{
Patrick Daly1748f082017-09-05 21:32:52 -0700999 struct dma_iommu_mapping *mapping =
1000 container_of(kref, struct dma_iommu_mapping, kref);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -07001001
Liam Mark4d4fbba2017-02-08 10:30:49 -08001002 kvfree(mapping->fast->bitmap);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -07001003 kfree(mapping->fast);
Patrick Daly1748f082017-09-05 21:32:52 -07001004 iommu_domain_free(mapping->domain);
1005 kfree(mapping);
Mitchel Humpherys0e43f0a2015-10-08 15:03:09 -07001006}