blob: a9df1232cbd3ca5cc29b1e4bb525844a354f43ba [file] [log] [blame]
Liam Marke7ebb932017-02-08 16:01:59 -08001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys86a560e2015-09-30 14:23:58 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "io-pgtable-fast: " fmt
14
15#include <linux/iommu.h>
16#include <linux/kernel.h>
17#include <linux/scatterlist.h>
18#include <linux/sizes.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/io-pgtable-fast.h>
22#include <asm/cacheflush.h>
23
24#include "io-pgtable.h"
25
26#define AV8L_FAST_MAX_ADDR_BITS 48
27
28/* Struct accessors */
29#define iof_pgtable_to_data(x) \
30 container_of((x), struct av8l_fast_io_pgtable, iop)
31
32#define iof_pgtable_ops_to_pgtable(x) \
33 container_of((x), struct io_pgtable, ops)
34
35#define iof_pgtable_ops_to_data(x) \
36 iof_pgtable_to_data(iof_pgtable_ops_to_pgtable(x))
37
38struct av8l_fast_io_pgtable {
39 struct io_pgtable iop;
40 av8l_fast_iopte *pgd;
41 av8l_fast_iopte *puds[4];
42 av8l_fast_iopte *pmds;
43 struct page **pages; /* page table memory */
44};
45
46/* Page table bits */
47#define AV8L_FAST_PTE_TYPE_SHIFT 0
48#define AV8L_FAST_PTE_TYPE_MASK 0x3
49
50#define AV8L_FAST_PTE_TYPE_BLOCK 1
51#define AV8L_FAST_PTE_TYPE_TABLE 3
52#define AV8L_FAST_PTE_TYPE_PAGE 3
53
54#define AV8L_FAST_PTE_NSTABLE (((av8l_fast_iopte)1) << 63)
55#define AV8L_FAST_PTE_XN (((av8l_fast_iopte)3) << 53)
56#define AV8L_FAST_PTE_AF (((av8l_fast_iopte)1) << 10)
57#define AV8L_FAST_PTE_SH_NS (((av8l_fast_iopte)0) << 8)
58#define AV8L_FAST_PTE_SH_OS (((av8l_fast_iopte)2) << 8)
59#define AV8L_FAST_PTE_SH_IS (((av8l_fast_iopte)3) << 8)
60#define AV8L_FAST_PTE_NS (((av8l_fast_iopte)1) << 5)
61#define AV8L_FAST_PTE_VALID (((av8l_fast_iopte)1) << 0)
62
63#define AV8L_FAST_PTE_ATTR_LO_MASK (((av8l_fast_iopte)0x3ff) << 2)
64/* Ignore the contiguous bit for block splitting */
65#define AV8L_FAST_PTE_ATTR_HI_MASK (((av8l_fast_iopte)6) << 52)
66#define AV8L_FAST_PTE_ATTR_MASK (AV8L_FAST_PTE_ATTR_LO_MASK | \
67 AV8L_FAST_PTE_ATTR_HI_MASK)
68#define AV8L_FAST_PTE_ADDR_MASK ((av8l_fast_iopte)0xfffffffff000)
69
70
71/* Stage-1 PTE */
72#define AV8L_FAST_PTE_AP_PRIV_RW (((av8l_fast_iopte)0) << 6)
73#define AV8L_FAST_PTE_AP_RW (((av8l_fast_iopte)1) << 6)
74#define AV8L_FAST_PTE_AP_PRIV_RO (((av8l_fast_iopte)2) << 6)
75#define AV8L_FAST_PTE_AP_RO (((av8l_fast_iopte)3) << 6)
76#define AV8L_FAST_PTE_ATTRINDX_SHIFT 2
77#define AV8L_FAST_PTE_nG (((av8l_fast_iopte)1) << 11)
78
79/* Stage-2 PTE */
80#define AV8L_FAST_PTE_HAP_FAULT (((av8l_fast_iopte)0) << 6)
81#define AV8L_FAST_PTE_HAP_READ (((av8l_fast_iopte)1) << 6)
82#define AV8L_FAST_PTE_HAP_WRITE (((av8l_fast_iopte)2) << 6)
83#define AV8L_FAST_PTE_MEMATTR_OIWB (((av8l_fast_iopte)0xf) << 2)
84#define AV8L_FAST_PTE_MEMATTR_NC (((av8l_fast_iopte)0x5) << 2)
85#define AV8L_FAST_PTE_MEMATTR_DEV (((av8l_fast_iopte)0x1) << 2)
86
87/* Register bits */
88#define ARM_32_LPAE_TCR_EAE (1 << 31)
89#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
90
91#define AV8L_FAST_TCR_TG0_4K (0 << 14)
92#define AV8L_FAST_TCR_TG0_64K (1 << 14)
93#define AV8L_FAST_TCR_TG0_16K (2 << 14)
94
95#define AV8L_FAST_TCR_SH0_SHIFT 12
96#define AV8L_FAST_TCR_SH0_MASK 0x3
97#define AV8L_FAST_TCR_SH_NS 0
98#define AV8L_FAST_TCR_SH_OS 2
99#define AV8L_FAST_TCR_SH_IS 3
100
101#define AV8L_FAST_TCR_ORGN0_SHIFT 10
102#define AV8L_FAST_TCR_IRGN0_SHIFT 8
103#define AV8L_FAST_TCR_RGN_MASK 0x3
104#define AV8L_FAST_TCR_RGN_NC 0
105#define AV8L_FAST_TCR_RGN_WBWA 1
106#define AV8L_FAST_TCR_RGN_WT 2
107#define AV8L_FAST_TCR_RGN_WB 3
108
109#define AV8L_FAST_TCR_SL0_SHIFT 6
110#define AV8L_FAST_TCR_SL0_MASK 0x3
111
112#define AV8L_FAST_TCR_T0SZ_SHIFT 0
113#define AV8L_FAST_TCR_SZ_MASK 0xf
114
115#define AV8L_FAST_TCR_PS_SHIFT 16
116#define AV8L_FAST_TCR_PS_MASK 0x7
117
118#define AV8L_FAST_TCR_IPS_SHIFT 32
119#define AV8L_FAST_TCR_IPS_MASK 0x7
120
121#define AV8L_FAST_TCR_PS_32_BIT 0x0ULL
122#define AV8L_FAST_TCR_PS_36_BIT 0x1ULL
123#define AV8L_FAST_TCR_PS_40_BIT 0x2ULL
124#define AV8L_FAST_TCR_PS_42_BIT 0x3ULL
125#define AV8L_FAST_TCR_PS_44_BIT 0x4ULL
126#define AV8L_FAST_TCR_PS_48_BIT 0x5ULL
127
128#define AV8L_FAST_TCR_EPD1_SHIFT 23
129#define AV8L_FAST_TCR_EPD1_FAULT 1
130
131#define AV8L_FAST_MAIR_ATTR_SHIFT(n) ((n) << 3)
132#define AV8L_FAST_MAIR_ATTR_MASK 0xff
133#define AV8L_FAST_MAIR_ATTR_DEVICE 0x04
134#define AV8L_FAST_MAIR_ATTR_NC 0x44
135#define AV8L_FAST_MAIR_ATTR_WBRWA 0xff
Patrick Dalybf762272016-11-03 16:49:44 -0700136#define AV8L_FAST_MAIR_ATTR_UPSTREAM 0xf4
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700137#define AV8L_FAST_MAIR_ATTR_IDX_NC 0
138#define AV8L_FAST_MAIR_ATTR_IDX_CACHE 1
139#define AV8L_FAST_MAIR_ATTR_IDX_DEV 2
Patrick Dalybf762272016-11-03 16:49:44 -0700140#define AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM 3
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700141
142#define AV8L_FAST_PAGE_SHIFT 12
143
144
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800145#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB
146
147#include <asm/cacheflush.h>
148#include <linux/notifier.h>
149
150static ATOMIC_NOTIFIER_HEAD(av8l_notifier_list);
151
152void av8l_register_notify(struct notifier_block *nb)
153{
154 atomic_notifier_chain_register(&av8l_notifier_list, nb);
155}
156EXPORT_SYMBOL(av8l_register_notify);
157
158static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
159{
160 if (unlikely(*ptep)) {
161 atomic_notifier_call_chain(
162 &av8l_notifier_list, MAPPED_OVER_STALE_TLB,
163 (void *) ptep);
164 pr_err("Tried to map over a non-vacant pte: 0x%llx @ %p\n",
165 *ptep, ptep);
166 pr_err("Nearby memory:\n");
167 print_hex_dump(KERN_ERR, "pgtbl: ", DUMP_PREFIX_ADDRESS,
168 32, 8, ptep - 16, 32 * sizeof(*ptep), false);
169 }
170}
171
172void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync)
173{
174 int i;
175 av8l_fast_iopte *pmdp = pmds;
176
177 for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) {
178 if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
179 *pmdp = 0;
180 if (!skip_sync)
181 dmac_clean_range(pmdp, pmdp + 1);
182 }
183 pmdp++;
184 }
185}
186#else
187static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
188{
189}
190#endif
191
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700192/* caller must take care of cache maintenance on *ptep */
193int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
194 int prot)
195{
196 int i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
197 av8l_fast_iopte pte = AV8L_FAST_PTE_XN
198 | AV8L_FAST_PTE_TYPE_PAGE
199 | AV8L_FAST_PTE_AF
200 | AV8L_FAST_PTE_nG
Mitchel Humpherys741eee52016-06-17 14:59:21 -0700201 | AV8L_FAST_PTE_SH_OS;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700202
203 if (prot & IOMMU_MMIO)
204 pte |= (AV8L_FAST_MAIR_ATTR_IDX_DEV
205 << AV8L_FAST_PTE_ATTRINDX_SHIFT);
206 else if (prot & IOMMU_CACHE)
207 pte |= (AV8L_FAST_MAIR_ATTR_IDX_CACHE
208 << AV8L_FAST_PTE_ATTRINDX_SHIFT);
Patrick Dalybf762272016-11-03 16:49:44 -0700209 else if (prot & IOMMU_USE_UPSTREAM_HINT)
210 pte |= (AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM
211 << AV8L_FAST_PTE_ATTRINDX_SHIFT);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700212
213 if (!(prot & IOMMU_WRITE))
214 pte |= AV8L_FAST_PTE_AP_RO;
215 else
216 pte |= AV8L_FAST_PTE_AP_RW;
217
218 paddr &= AV8L_FAST_PTE_ADDR_MASK;
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800219 for (i = 0; i < nptes; i++, paddr += SZ_4K) {
220 __av8l_check_for_stale_tlb(ptep + i);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700221 *(ptep + i) = pte | paddr;
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800222 }
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700223
224 return 0;
225}
226
227static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
228 phys_addr_t paddr, size_t size, int prot)
229{
230 struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
231 av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
232 unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
233
234 av8l_fast_map_public(ptep, paddr, size, prot);
235 dmac_clean_range(ptep, ptep + nptes);
236
237 return 0;
238}
239
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800240static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size,
241 bool need_stale_tlb_tracking)
242{
243 unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
244 int val = need_stale_tlb_tracking
245 ? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI
246 : 0;
247
248 memset(ptep, val, sizeof(*ptep) * nptes);
249}
250
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700251/* caller must take care of cache maintenance on *ptep */
252void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size)
253{
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800254 __av8l_fast_unmap(ptep, size, true);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700255}
256
257/* upper layer must take care of TLB invalidation */
258static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
259 size_t size)
260{
261 struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
262 av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
263 unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
264
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800265 __av8l_fast_unmap(ptep, size, false);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700266 dmac_clean_range(ptep, ptep + nptes);
267
268 return size;
269}
270
271static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
272 unsigned long iova)
273{
274 struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
275 av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
276 phys_addr_t phys;
277 const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
278 const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
279 const unsigned long ptt = AV8L_FAST_PTE_TYPE_TABLE;
280 const unsigned long ptp = AV8L_FAST_PTE_TYPE_PAGE;
281 const av8l_fast_iopte am = AV8L_FAST_PTE_ADDR_MASK;
282
283 /* TODO: clean up some of these magic numbers... */
284
285 pgdp = (av8l_fast_iopte *)
286 (((unsigned long)data->pgd) | ((iova & 0x7fc0000000) >> 27));
287 pte = *pgdp;
288 if (((pte >> pts) & ptm) != ptt)
289 return 0;
290 pudp = phys_to_virt((pte & am) | ((iova & 0x3fe00000) >> 18));
291
292 pte = *pudp;
293 if (((pte >> pts) & ptm) != ptt)
294 return 0;
295 pmdp = phys_to_virt((pte & am) | ((iova & 0x1ff000) >> 9));
296
297 pte = *pmdp;
298 if (((pte >> pts) & ptm) != ptp)
299 return 0;
300 phys = pte & am;
301
302 return phys | (iova & 0xfff);
303}
304
305static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
306 struct scatterlist *sg, unsigned int nents,
307 int prot, size_t *size)
308{
309 return -ENODEV;
310}
311
312static struct av8l_fast_io_pgtable *
313av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
314{
315 struct av8l_fast_io_pgtable *data;
316
317 data = kmalloc(sizeof(*data), GFP_KERNEL);
318 if (!data)
319 return NULL;
320
321 data->iop.ops = (struct io_pgtable_ops) {
322 .map = av8l_fast_map,
323 .map_sg = av8l_fast_map_sg,
324 .unmap = av8l_fast_unmap,
325 .iova_to_phys = av8l_fast_iova_to_phys,
326 };
327
328 return data;
329}
330
331/*
332 * We need 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and
333 * 2048 pages for pmds (each pud page contains 512 table entries, each
334 * pointing to a pmd).
335 */
336#define NUM_PGD_PAGES 1
337#define NUM_PUD_PAGES 4
338#define NUM_PMD_PAGES 2048
339#define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES)
340
341static int
342av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
343 struct io_pgtable_cfg *cfg, void *cookie)
344{
345 int i, j, pg = 0;
346 struct page **pages, *page;
347
Liam Marke7ebb932017-02-08 16:01:59 -0800348 pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
349 __GFP_NORETRY);
350
351 if (!pages)
352 pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES);
353
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700354 if (!pages)
355 return -ENOMEM;
356
357 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
358 if (!page)
359 goto err_free_pages_arr;
360 pages[pg++] = page;
361 data->pgd = page_address(page);
362
363 /*
364 * We need 2048 entries at level 2 to map 4GB of VA space. A page
365 * can hold 512 entries, so we need 4 pages.
366 */
367 for (i = 0; i < 4; ++i) {
368 av8l_fast_iopte pte, *ptep;
369
370 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
371 if (!page)
372 goto err_free_pages;
373 pages[pg++] = page;
374 data->puds[i] = page_address(page);
375 pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
376 ptep = ((av8l_fast_iopte *)data->pgd) + i;
377 *ptep = pte;
378 }
379 dmac_clean_range(data->pgd, data->pgd + 4);
380
381 /*
382 * We have 4 puds, each of which can point to 512 pmds, so we'll
383 * have 2048 pmds, each of which can hold 512 ptes, for a grand
384 * total of 2048*512=1048576 PTEs.
385 */
386 for (i = 0; i < 4; ++i) {
387 for (j = 0; j < 512; ++j) {
388 av8l_fast_iopte pte, *pudp;
389
390 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
391 if (!page)
392 goto err_free_pages;
393 pages[pg++] = page;
394 pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
395 pudp = data->puds[i] + j;
396 *pudp = pte;
397 }
398 dmac_clean_range(data->puds[i], data->puds[i] + 512);
399 }
400
401 if (WARN_ON(pg != NUM_PGTBL_PAGES))
402 goto err_free_pages;
403
404 /*
405 * We map the pmds into a virtually contiguous space so that we
406 * don't have to traverse the first two levels of the page tables
407 * to find the appropriate pud. Instead, it will be a simple
408 * offset from the virtual base of the pmds.
409 */
410 data->pmds = vmap(&pages[NUM_PGD_PAGES + NUM_PUD_PAGES], NUM_PMD_PAGES,
411 VM_IOREMAP, PAGE_KERNEL);
412 if (!data->pmds)
413 goto err_free_pages;
414
415 data->pages = pages;
416 return 0;
417
418err_free_pages:
419 for (i = 0; i < pg; ++i)
420 __free_page(pages[i]);
421err_free_pages_arr:
Liam Marke7ebb932017-02-08 16:01:59 -0800422 kvfree(pages);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700423 return -ENOMEM;
424}
425
426static struct io_pgtable *
427av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
428{
429 u64 reg;
430 struct av8l_fast_io_pgtable *data =
431 av8l_fast_alloc_pgtable_data(cfg);
432
Shiraz Hashim06f31212016-07-04 15:05:14 +0530433 if (!data)
434 return NULL;
435
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700436 /* restrict according to the fast map requirements */
437 cfg->ias = 32;
438 cfg->pgsize_bitmap = SZ_4K;
439
440 /* TCR */
Liam Mark5649c822016-12-19 14:35:08 -0800441 if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
Patrick Dalyce6786f2016-11-09 14:19:23 -0800442 reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
443 (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
444 (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
Liam Mark13509252016-12-20 11:34:53 -0800445 else if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
Mitchel Humpherys6bdd4b32016-06-07 14:17:14 -0700446 reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
447 (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_IRGN0_SHIFT) |
448 (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
Patrick Dalyce6786f2016-11-09 14:19:23 -0800449 else
Mitchel Humpherys741eee52016-06-17 14:59:21 -0700450 reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
Mitchel Humpherys6bdd4b32016-06-07 14:17:14 -0700451 (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
452 (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_ORGN0_SHIFT);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700453
454 reg |= AV8L_FAST_TCR_TG0_4K;
455
456 switch (cfg->oas) {
457 case 32:
458 reg |= (AV8L_FAST_TCR_PS_32_BIT << AV8L_FAST_TCR_IPS_SHIFT);
459 break;
460 case 36:
461 reg |= (AV8L_FAST_TCR_PS_36_BIT << AV8L_FAST_TCR_IPS_SHIFT);
462 break;
463 case 40:
464 reg |= (AV8L_FAST_TCR_PS_40_BIT << AV8L_FAST_TCR_IPS_SHIFT);
465 break;
466 case 42:
467 reg |= (AV8L_FAST_TCR_PS_42_BIT << AV8L_FAST_TCR_IPS_SHIFT);
468 break;
469 case 44:
470 reg |= (AV8L_FAST_TCR_PS_44_BIT << AV8L_FAST_TCR_IPS_SHIFT);
471 break;
472 case 48:
473 reg |= (AV8L_FAST_TCR_PS_48_BIT << AV8L_FAST_TCR_IPS_SHIFT);
474 break;
475 default:
476 goto out_free_data;
477 }
478
479 reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
480 reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
481 cfg->av8l_fast_cfg.tcr = reg;
482
483 /* MAIRs */
484 reg = (AV8L_FAST_MAIR_ATTR_NC
485 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_NC)) |
486 (AV8L_FAST_MAIR_ATTR_WBRWA
487 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_CACHE)) |
488 (AV8L_FAST_MAIR_ATTR_DEVICE
Patrick Dalybf762272016-11-03 16:49:44 -0700489 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_DEV)) |
490 (AV8L_FAST_MAIR_ATTR_UPSTREAM
491 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM));
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700492
493 cfg->av8l_fast_cfg.mair[0] = reg;
494 cfg->av8l_fast_cfg.mair[1] = 0;
495
496 /* Allocate all page table memory! */
497 if (av8l_fast_prepopulate_pgtables(data, cfg, cookie))
498 goto out_free_data;
499
500 cfg->av8l_fast_cfg.pmds = data->pmds;
501
502 /* TTBRs */
503 cfg->av8l_fast_cfg.ttbr[0] = virt_to_phys(data->pgd);
504 cfg->av8l_fast_cfg.ttbr[1] = 0;
505 return &data->iop;
506
507out_free_data:
508 kfree(data);
509 return NULL;
510}
511
512static void av8l_fast_free_pgtable(struct io_pgtable *iop)
513{
514 int i;
515 struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop);
516
517 vunmap(data->pmds);
518 for (i = 0; i < NUM_PGTBL_PAGES; ++i)
519 __free_page(data->pages[i]);
Liam Marke7ebb932017-02-08 16:01:59 -0800520 kvfree(data->pages);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700521 kfree(data);
522}
523
524struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns = {
525 .alloc = av8l_fast_alloc_pgtable,
526 .free = av8l_fast_free_pgtable,
527};
528
529
530#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST
531
532#include <linux/dma-contiguous.h>
533
534static struct io_pgtable_cfg *cfg_cookie;
535
536static void dummy_tlb_flush_all(void *cookie)
537{
538 WARN_ON(cookie != cfg_cookie);
539}
540
541static void dummy_tlb_add_flush(unsigned long iova, size_t size, size_t granule,
542 bool leaf, void *cookie)
543{
544 WARN_ON(cookie != cfg_cookie);
545 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
546}
547
548static void dummy_tlb_sync(void *cookie)
549{
550 WARN_ON(cookie != cfg_cookie);
551}
552
553static struct iommu_gather_ops dummy_tlb_ops __initdata = {
554 .tlb_flush_all = dummy_tlb_flush_all,
555 .tlb_add_flush = dummy_tlb_add_flush,
556 .tlb_sync = dummy_tlb_sync,
557};
558
559/*
560 * Returns true if the iova range is successfully mapped to the contiguous
561 * phys range in ops.
562 */
563static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
564 const unsigned long iova_start,
565 const phys_addr_t phys_start,
566 const size_t size)
567{
568 unsigned long iova = iova_start;
569 phys_addr_t phys = phys_start;
570
571 while (iova < (iova_start + size)) {
572 /* + 42 just to make sure offsetting is working */
573 if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
574 return false;
575 iova += SZ_4K;
576 phys += SZ_4K;
577 }
578 return true;
579}
580
581static int __init av8l_fast_positive_testing(void)
582{
583 int failed = 0;
584 unsigned long iova;
585 struct io_pgtable_ops *ops;
586 struct io_pgtable_cfg cfg;
587 struct av8l_fast_io_pgtable *data;
588 av8l_fast_iopte *pmds;
589
590 cfg = (struct io_pgtable_cfg) {
Liam Mark13509252016-12-20 11:34:53 -0800591 .quirks = 0,
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700592 .tlb = &dummy_tlb_ops,
593 .ias = 32,
594 .oas = 32,
595 .pgsize_bitmap = SZ_4K,
596 };
597
598 cfg_cookie = &cfg;
599 ops = alloc_io_pgtable_ops(ARM_V8L_FAST, &cfg, &cfg);
600
601 if (WARN_ON(!ops))
602 return 1;
603
604 data = iof_pgtable_ops_to_data(ops);
605 pmds = data->pmds;
606
607 /* map the entire 4GB VA space with 4K map calls */
608 for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
609 if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
610 failed++;
611 continue;
612 }
613 }
614
615 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
616 SZ_1G * 4UL)))
617 failed++;
618
619 /* unmap it all */
620 for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
621 if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
622 failed++;
623 }
624
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800625 /* sweep up TLB proving PTEs */
626 av8l_fast_clear_stale_ptes(pmds, false);
627
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700628 /* map the entire 4GB VA space with 8K map calls */
629 for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
630 if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
631 failed++;
632 continue;
633 }
634 }
635
636 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
637 SZ_1G * 4UL)))
638 failed++;
639
640 /* unmap it all with 8K unmap calls */
641 for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
642 if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
643 failed++;
644 }
645
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800646 /* sweep up TLB proving PTEs */
647 av8l_fast_clear_stale_ptes(pmds, false);
648
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700649 /* map the entire 4GB VA space with 16K map calls */
650 for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
651 if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
652 failed++;
653 continue;
654 }
655 }
656
657 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
658 SZ_1G * 4UL)))
659 failed++;
660
661 /* unmap it all */
662 for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
663 if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
664 failed++;
665 }
666
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800667 /* sweep up TLB proving PTEs */
668 av8l_fast_clear_stale_ptes(pmds, false);
669
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700670 /* map the entire 4GB VA space with 64K map calls */
671 for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_64K) {
672 if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
673 failed++;
674 continue;
675 }
676 }
677
678 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
679 SZ_1G * 4UL)))
680 failed++;
681
682 /* unmap it all at once */
683 if (WARN_ON(ops->unmap(ops, 0, SZ_1G * 4UL) != SZ_1G * 4UL))
684 failed++;
685
686 free_io_pgtable_ops(ops);
687 return failed;
688}
689
690static int __init av8l_fast_do_selftests(void)
691{
692 int failed = 0;
693
694 failed += av8l_fast_positive_testing();
695
696 pr_err("selftest: completed with %d failures\n", failed);
697
698 return 0;
699}
700subsys_initcall(av8l_fast_do_selftests);
701#endif