blob: 04a9d7fd781354dfa52d31b54690656260fff152 [file] [log] [blame]
Liam Marke7ebb932017-02-08 16:01:59 -08001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys86a560e2015-09-30 14:23:58 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "io-pgtable-fast: " fmt
14
15#include <linux/iommu.h>
16#include <linux/kernel.h>
17#include <linux/scatterlist.h>
18#include <linux/sizes.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/io-pgtable-fast.h>
22#include <asm/cacheflush.h>
Charan Teja Reddy29f61402017-02-09 20:44:29 +053023#include <linux/vmalloc.h>
Mitchel Humpherys86a560e2015-09-30 14:23:58 -070024
25#include "io-pgtable.h"
26
27#define AV8L_FAST_MAX_ADDR_BITS 48
28
29/* Struct accessors */
30#define iof_pgtable_to_data(x) \
31 container_of((x), struct av8l_fast_io_pgtable, iop)
32
33#define iof_pgtable_ops_to_pgtable(x) \
34 container_of((x), struct io_pgtable, ops)
35
36#define iof_pgtable_ops_to_data(x) \
37 iof_pgtable_to_data(iof_pgtable_ops_to_pgtable(x))
38
39struct av8l_fast_io_pgtable {
40 struct io_pgtable iop;
41 av8l_fast_iopte *pgd;
42 av8l_fast_iopte *puds[4];
43 av8l_fast_iopte *pmds;
44 struct page **pages; /* page table memory */
45};
46
47/* Page table bits */
48#define AV8L_FAST_PTE_TYPE_SHIFT 0
49#define AV8L_FAST_PTE_TYPE_MASK 0x3
50
51#define AV8L_FAST_PTE_TYPE_BLOCK 1
52#define AV8L_FAST_PTE_TYPE_TABLE 3
53#define AV8L_FAST_PTE_TYPE_PAGE 3
54
55#define AV8L_FAST_PTE_NSTABLE (((av8l_fast_iopte)1) << 63)
56#define AV8L_FAST_PTE_XN (((av8l_fast_iopte)3) << 53)
57#define AV8L_FAST_PTE_AF (((av8l_fast_iopte)1) << 10)
58#define AV8L_FAST_PTE_SH_NS (((av8l_fast_iopte)0) << 8)
59#define AV8L_FAST_PTE_SH_OS (((av8l_fast_iopte)2) << 8)
60#define AV8L_FAST_PTE_SH_IS (((av8l_fast_iopte)3) << 8)
61#define AV8L_FAST_PTE_NS (((av8l_fast_iopte)1) << 5)
62#define AV8L_FAST_PTE_VALID (((av8l_fast_iopte)1) << 0)
63
64#define AV8L_FAST_PTE_ATTR_LO_MASK (((av8l_fast_iopte)0x3ff) << 2)
65/* Ignore the contiguous bit for block splitting */
66#define AV8L_FAST_PTE_ATTR_HI_MASK (((av8l_fast_iopte)6) << 52)
67#define AV8L_FAST_PTE_ATTR_MASK (AV8L_FAST_PTE_ATTR_LO_MASK | \
68 AV8L_FAST_PTE_ATTR_HI_MASK)
69#define AV8L_FAST_PTE_ADDR_MASK ((av8l_fast_iopte)0xfffffffff000)
70
71
72/* Stage-1 PTE */
73#define AV8L_FAST_PTE_AP_PRIV_RW (((av8l_fast_iopte)0) << 6)
74#define AV8L_FAST_PTE_AP_RW (((av8l_fast_iopte)1) << 6)
75#define AV8L_FAST_PTE_AP_PRIV_RO (((av8l_fast_iopte)2) << 6)
76#define AV8L_FAST_PTE_AP_RO (((av8l_fast_iopte)3) << 6)
77#define AV8L_FAST_PTE_ATTRINDX_SHIFT 2
78#define AV8L_FAST_PTE_nG (((av8l_fast_iopte)1) << 11)
79
80/* Stage-2 PTE */
81#define AV8L_FAST_PTE_HAP_FAULT (((av8l_fast_iopte)0) << 6)
82#define AV8L_FAST_PTE_HAP_READ (((av8l_fast_iopte)1) << 6)
83#define AV8L_FAST_PTE_HAP_WRITE (((av8l_fast_iopte)2) << 6)
84#define AV8L_FAST_PTE_MEMATTR_OIWB (((av8l_fast_iopte)0xf) << 2)
85#define AV8L_FAST_PTE_MEMATTR_NC (((av8l_fast_iopte)0x5) << 2)
86#define AV8L_FAST_PTE_MEMATTR_DEV (((av8l_fast_iopte)0x1) << 2)
87
88/* Register bits */
89#define ARM_32_LPAE_TCR_EAE (1 << 31)
90#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
91
92#define AV8L_FAST_TCR_TG0_4K (0 << 14)
93#define AV8L_FAST_TCR_TG0_64K (1 << 14)
94#define AV8L_FAST_TCR_TG0_16K (2 << 14)
95
96#define AV8L_FAST_TCR_SH0_SHIFT 12
97#define AV8L_FAST_TCR_SH0_MASK 0x3
98#define AV8L_FAST_TCR_SH_NS 0
99#define AV8L_FAST_TCR_SH_OS 2
100#define AV8L_FAST_TCR_SH_IS 3
101
102#define AV8L_FAST_TCR_ORGN0_SHIFT 10
103#define AV8L_FAST_TCR_IRGN0_SHIFT 8
104#define AV8L_FAST_TCR_RGN_MASK 0x3
105#define AV8L_FAST_TCR_RGN_NC 0
106#define AV8L_FAST_TCR_RGN_WBWA 1
107#define AV8L_FAST_TCR_RGN_WT 2
108#define AV8L_FAST_TCR_RGN_WB 3
109
110#define AV8L_FAST_TCR_SL0_SHIFT 6
111#define AV8L_FAST_TCR_SL0_MASK 0x3
112
113#define AV8L_FAST_TCR_T0SZ_SHIFT 0
114#define AV8L_FAST_TCR_SZ_MASK 0xf
115
116#define AV8L_FAST_TCR_PS_SHIFT 16
117#define AV8L_FAST_TCR_PS_MASK 0x7
118
119#define AV8L_FAST_TCR_IPS_SHIFT 32
120#define AV8L_FAST_TCR_IPS_MASK 0x7
121
122#define AV8L_FAST_TCR_PS_32_BIT 0x0ULL
123#define AV8L_FAST_TCR_PS_36_BIT 0x1ULL
124#define AV8L_FAST_TCR_PS_40_BIT 0x2ULL
125#define AV8L_FAST_TCR_PS_42_BIT 0x3ULL
126#define AV8L_FAST_TCR_PS_44_BIT 0x4ULL
127#define AV8L_FAST_TCR_PS_48_BIT 0x5ULL
128
129#define AV8L_FAST_TCR_EPD1_SHIFT 23
130#define AV8L_FAST_TCR_EPD1_FAULT 1
131
132#define AV8L_FAST_MAIR_ATTR_SHIFT(n) ((n) << 3)
133#define AV8L_FAST_MAIR_ATTR_MASK 0xff
134#define AV8L_FAST_MAIR_ATTR_DEVICE 0x04
135#define AV8L_FAST_MAIR_ATTR_NC 0x44
136#define AV8L_FAST_MAIR_ATTR_WBRWA 0xff
Patrick Dalybf762272016-11-03 16:49:44 -0700137#define AV8L_FAST_MAIR_ATTR_UPSTREAM 0xf4
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700138#define AV8L_FAST_MAIR_ATTR_IDX_NC 0
139#define AV8L_FAST_MAIR_ATTR_IDX_CACHE 1
140#define AV8L_FAST_MAIR_ATTR_IDX_DEV 2
Patrick Dalybf762272016-11-03 16:49:44 -0700141#define AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM 3
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700142
143#define AV8L_FAST_PAGE_SHIFT 12
144
145
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800146#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB
147
148#include <asm/cacheflush.h>
149#include <linux/notifier.h>
150
151static ATOMIC_NOTIFIER_HEAD(av8l_notifier_list);
152
153void av8l_register_notify(struct notifier_block *nb)
154{
155 atomic_notifier_chain_register(&av8l_notifier_list, nb);
156}
157EXPORT_SYMBOL(av8l_register_notify);
158
159static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
160{
161 if (unlikely(*ptep)) {
162 atomic_notifier_call_chain(
163 &av8l_notifier_list, MAPPED_OVER_STALE_TLB,
164 (void *) ptep);
165 pr_err("Tried to map over a non-vacant pte: 0x%llx @ %p\n",
166 *ptep, ptep);
167 pr_err("Nearby memory:\n");
168 print_hex_dump(KERN_ERR, "pgtbl: ", DUMP_PREFIX_ADDRESS,
169 32, 8, ptep - 16, 32 * sizeof(*ptep), false);
170 }
171}
172
173void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync)
174{
175 int i;
176 av8l_fast_iopte *pmdp = pmds;
177
178 for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) {
179 if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
180 *pmdp = 0;
181 if (!skip_sync)
182 dmac_clean_range(pmdp, pmdp + 1);
183 }
184 pmdp++;
185 }
186}
187#else
188static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
189{
190}
191#endif
192
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700193/* caller must take care of cache maintenance on *ptep */
194int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
195 int prot)
196{
197 int i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
198 av8l_fast_iopte pte = AV8L_FAST_PTE_XN
199 | AV8L_FAST_PTE_TYPE_PAGE
200 | AV8L_FAST_PTE_AF
201 | AV8L_FAST_PTE_nG
Mitchel Humpherys741eee52016-06-17 14:59:21 -0700202 | AV8L_FAST_PTE_SH_OS;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700203
204 if (prot & IOMMU_MMIO)
205 pte |= (AV8L_FAST_MAIR_ATTR_IDX_DEV
206 << AV8L_FAST_PTE_ATTRINDX_SHIFT);
207 else if (prot & IOMMU_CACHE)
208 pte |= (AV8L_FAST_MAIR_ATTR_IDX_CACHE
209 << AV8L_FAST_PTE_ATTRINDX_SHIFT);
Patrick Dalybf762272016-11-03 16:49:44 -0700210 else if (prot & IOMMU_USE_UPSTREAM_HINT)
211 pte |= (AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM
212 << AV8L_FAST_PTE_ATTRINDX_SHIFT);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700213
214 if (!(prot & IOMMU_WRITE))
215 pte |= AV8L_FAST_PTE_AP_RO;
216 else
217 pte |= AV8L_FAST_PTE_AP_RW;
218
219 paddr &= AV8L_FAST_PTE_ADDR_MASK;
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800220 for (i = 0; i < nptes; i++, paddr += SZ_4K) {
221 __av8l_check_for_stale_tlb(ptep + i);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700222 *(ptep + i) = pte | paddr;
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800223 }
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700224
225 return 0;
226}
227
228static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
229 phys_addr_t paddr, size_t size, int prot)
230{
231 struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
232 av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
233 unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
234
235 av8l_fast_map_public(ptep, paddr, size, prot);
236 dmac_clean_range(ptep, ptep + nptes);
237
238 return 0;
239}
240
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800241static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size,
242 bool need_stale_tlb_tracking)
243{
244 unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
245 int val = need_stale_tlb_tracking
246 ? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI
247 : 0;
248
249 memset(ptep, val, sizeof(*ptep) * nptes);
250}
251
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700252/* caller must take care of cache maintenance on *ptep */
253void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size)
254{
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800255 __av8l_fast_unmap(ptep, size, true);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700256}
257
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700258static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
259 size_t size)
260{
261 struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
Liam Markb9798f22017-04-04 12:25:30 -0700262 struct io_pgtable *iop = &data->iop;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700263 av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
264 unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
265
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800266 __av8l_fast_unmap(ptep, size, false);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700267 dmac_clean_range(ptep, ptep + nptes);
Liam Markb9798f22017-04-04 12:25:30 -0700268 io_pgtable_tlb_flush_all(iop);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700269
270 return size;
271}
272
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530273#if defined(CONFIG_ARM64)
274#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27)
275#elif defined(CONFIG_ARM)
276#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27)
277#endif
278
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700279static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
280 unsigned long iova)
281{
282 struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
283 av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530284 unsigned long pgd;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700285 phys_addr_t phys;
286 const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
287 const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
288 const unsigned long ptt = AV8L_FAST_PTE_TYPE_TABLE;
289 const unsigned long ptp = AV8L_FAST_PTE_TYPE_PAGE;
290 const av8l_fast_iopte am = AV8L_FAST_PTE_ADDR_MASK;
291
292 /* TODO: clean up some of these magic numbers... */
293
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530294 pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova);
295 pgdp = (av8l_fast_iopte *)pgd;
296
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700297 pte = *pgdp;
298 if (((pte >> pts) & ptm) != ptt)
299 return 0;
300 pudp = phys_to_virt((pte & am) | ((iova & 0x3fe00000) >> 18));
301
302 pte = *pudp;
303 if (((pte >> pts) & ptm) != ptt)
304 return 0;
305 pmdp = phys_to_virt((pte & am) | ((iova & 0x1ff000) >> 9));
306
307 pte = *pmdp;
308 if (((pte >> pts) & ptm) != ptp)
309 return 0;
310 phys = pte & am;
311
312 return phys | (iova & 0xfff);
313}
314
315static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
316 struct scatterlist *sg, unsigned int nents,
317 int prot, size_t *size)
318{
319 return -ENODEV;
320}
321
322static struct av8l_fast_io_pgtable *
323av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
324{
325 struct av8l_fast_io_pgtable *data;
326
327 data = kmalloc(sizeof(*data), GFP_KERNEL);
328 if (!data)
329 return NULL;
330
331 data->iop.ops = (struct io_pgtable_ops) {
332 .map = av8l_fast_map,
333 .map_sg = av8l_fast_map_sg,
334 .unmap = av8l_fast_unmap,
335 .iova_to_phys = av8l_fast_iova_to_phys,
336 };
337
338 return data;
339}
340
341/*
342 * We need 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and
343 * 2048 pages for pmds (each pud page contains 512 table entries, each
344 * pointing to a pmd).
345 */
346#define NUM_PGD_PAGES 1
347#define NUM_PUD_PAGES 4
348#define NUM_PMD_PAGES 2048
349#define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES)
350
351static int
352av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
353 struct io_pgtable_cfg *cfg, void *cookie)
354{
355 int i, j, pg = 0;
356 struct page **pages, *page;
357
Liam Marke7ebb932017-02-08 16:01:59 -0800358 pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
359 __GFP_NORETRY);
360
361 if (!pages)
362 pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES);
363
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700364 if (!pages)
365 return -ENOMEM;
366
367 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
368 if (!page)
369 goto err_free_pages_arr;
370 pages[pg++] = page;
371 data->pgd = page_address(page);
372
373 /*
374 * We need 2048 entries at level 2 to map 4GB of VA space. A page
375 * can hold 512 entries, so we need 4 pages.
376 */
377 for (i = 0; i < 4; ++i) {
378 av8l_fast_iopte pte, *ptep;
379
380 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
381 if (!page)
382 goto err_free_pages;
383 pages[pg++] = page;
384 data->puds[i] = page_address(page);
385 pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
386 ptep = ((av8l_fast_iopte *)data->pgd) + i;
387 *ptep = pte;
388 }
389 dmac_clean_range(data->pgd, data->pgd + 4);
390
391 /*
392 * We have 4 puds, each of which can point to 512 pmds, so we'll
393 * have 2048 pmds, each of which can hold 512 ptes, for a grand
394 * total of 2048*512=1048576 PTEs.
395 */
396 for (i = 0; i < 4; ++i) {
397 for (j = 0; j < 512; ++j) {
398 av8l_fast_iopte pte, *pudp;
Shiraz Hashim5b156942017-03-01 14:40:16 +0530399 void *addr;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700400
401 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
402 if (!page)
403 goto err_free_pages;
404 pages[pg++] = page;
Shiraz Hashim5b156942017-03-01 14:40:16 +0530405
406 addr = page_address(page);
407 dmac_clean_range(addr, addr + SZ_4K);
408
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700409 pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
410 pudp = data->puds[i] + j;
411 *pudp = pte;
412 }
413 dmac_clean_range(data->puds[i], data->puds[i] + 512);
414 }
415
416 if (WARN_ON(pg != NUM_PGTBL_PAGES))
417 goto err_free_pages;
418
419 /*
420 * We map the pmds into a virtually contiguous space so that we
421 * don't have to traverse the first two levels of the page tables
422 * to find the appropriate pud. Instead, it will be a simple
423 * offset from the virtual base of the pmds.
424 */
425 data->pmds = vmap(&pages[NUM_PGD_PAGES + NUM_PUD_PAGES], NUM_PMD_PAGES,
426 VM_IOREMAP, PAGE_KERNEL);
427 if (!data->pmds)
428 goto err_free_pages;
429
430 data->pages = pages;
431 return 0;
432
433err_free_pages:
434 for (i = 0; i < pg; ++i)
435 __free_page(pages[i]);
436err_free_pages_arr:
Liam Marke7ebb932017-02-08 16:01:59 -0800437 kvfree(pages);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700438 return -ENOMEM;
439}
440
441static struct io_pgtable *
442av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
443{
444 u64 reg;
445 struct av8l_fast_io_pgtable *data =
446 av8l_fast_alloc_pgtable_data(cfg);
447
Shiraz Hashim06f31212016-07-04 15:05:14 +0530448 if (!data)
449 return NULL;
450
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700451 /* restrict according to the fast map requirements */
452 cfg->ias = 32;
453 cfg->pgsize_bitmap = SZ_4K;
454
455 /* TCR */
Liam Mark5649c822016-12-19 14:35:08 -0800456 if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
Patrick Dalyce6786f2016-11-09 14:19:23 -0800457 reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
458 (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
459 (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
Liam Mark13509252016-12-20 11:34:53 -0800460 else if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
Mitchel Humpherys6bdd4b32016-06-07 14:17:14 -0700461 reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
462 (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_IRGN0_SHIFT) |
463 (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
Patrick Dalyce6786f2016-11-09 14:19:23 -0800464 else
Mitchel Humpherys741eee52016-06-17 14:59:21 -0700465 reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
Mitchel Humpherys6bdd4b32016-06-07 14:17:14 -0700466 (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
467 (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_ORGN0_SHIFT);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700468
469 reg |= AV8L_FAST_TCR_TG0_4K;
470
471 switch (cfg->oas) {
472 case 32:
473 reg |= (AV8L_FAST_TCR_PS_32_BIT << AV8L_FAST_TCR_IPS_SHIFT);
474 break;
475 case 36:
476 reg |= (AV8L_FAST_TCR_PS_36_BIT << AV8L_FAST_TCR_IPS_SHIFT);
477 break;
478 case 40:
479 reg |= (AV8L_FAST_TCR_PS_40_BIT << AV8L_FAST_TCR_IPS_SHIFT);
480 break;
481 case 42:
482 reg |= (AV8L_FAST_TCR_PS_42_BIT << AV8L_FAST_TCR_IPS_SHIFT);
483 break;
484 case 44:
485 reg |= (AV8L_FAST_TCR_PS_44_BIT << AV8L_FAST_TCR_IPS_SHIFT);
486 break;
487 case 48:
488 reg |= (AV8L_FAST_TCR_PS_48_BIT << AV8L_FAST_TCR_IPS_SHIFT);
489 break;
490 default:
491 goto out_free_data;
492 }
493
494 reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
495 reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530496#if defined(CONFIG_ARM)
497 reg |= ARM_32_LPAE_TCR_EAE;
498#endif
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700499 cfg->av8l_fast_cfg.tcr = reg;
500
501 /* MAIRs */
502 reg = (AV8L_FAST_MAIR_ATTR_NC
503 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_NC)) |
504 (AV8L_FAST_MAIR_ATTR_WBRWA
505 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_CACHE)) |
506 (AV8L_FAST_MAIR_ATTR_DEVICE
Patrick Dalybf762272016-11-03 16:49:44 -0700507 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_DEV)) |
508 (AV8L_FAST_MAIR_ATTR_UPSTREAM
509 << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM));
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700510
511 cfg->av8l_fast_cfg.mair[0] = reg;
512 cfg->av8l_fast_cfg.mair[1] = 0;
513
514 /* Allocate all page table memory! */
515 if (av8l_fast_prepopulate_pgtables(data, cfg, cookie))
516 goto out_free_data;
517
518 cfg->av8l_fast_cfg.pmds = data->pmds;
519
520 /* TTBRs */
521 cfg->av8l_fast_cfg.ttbr[0] = virt_to_phys(data->pgd);
522 cfg->av8l_fast_cfg.ttbr[1] = 0;
523 return &data->iop;
524
525out_free_data:
526 kfree(data);
527 return NULL;
528}
529
530static void av8l_fast_free_pgtable(struct io_pgtable *iop)
531{
532 int i;
533 struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop);
534
535 vunmap(data->pmds);
536 for (i = 0; i < NUM_PGTBL_PAGES; ++i)
537 __free_page(data->pages[i]);
Liam Marke7ebb932017-02-08 16:01:59 -0800538 kvfree(data->pages);
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700539 kfree(data);
540}
541
542struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns = {
543 .alloc = av8l_fast_alloc_pgtable,
544 .free = av8l_fast_free_pgtable,
545};
546
547
548#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST
549
550#include <linux/dma-contiguous.h>
551
552static struct io_pgtable_cfg *cfg_cookie;
553
554static void dummy_tlb_flush_all(void *cookie)
555{
556 WARN_ON(cookie != cfg_cookie);
557}
558
559static void dummy_tlb_add_flush(unsigned long iova, size_t size, size_t granule,
560 bool leaf, void *cookie)
561{
562 WARN_ON(cookie != cfg_cookie);
563 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
564}
565
566static void dummy_tlb_sync(void *cookie)
567{
568 WARN_ON(cookie != cfg_cookie);
569}
570
571static struct iommu_gather_ops dummy_tlb_ops __initdata = {
572 .tlb_flush_all = dummy_tlb_flush_all,
573 .tlb_add_flush = dummy_tlb_add_flush,
574 .tlb_sync = dummy_tlb_sync,
575};
576
577/*
578 * Returns true if the iova range is successfully mapped to the contiguous
579 * phys range in ops.
580 */
581static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
582 const unsigned long iova_start,
583 const phys_addr_t phys_start,
584 const size_t size)
585{
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530586 u64 iova = iova_start;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700587 phys_addr_t phys = phys_start;
588
589 while (iova < (iova_start + size)) {
590 /* + 42 just to make sure offsetting is working */
591 if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
592 return false;
593 iova += SZ_4K;
594 phys += SZ_4K;
595 }
596 return true;
597}
598
599static int __init av8l_fast_positive_testing(void)
600{
601 int failed = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530602 u64 iova;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700603 struct io_pgtable_ops *ops;
604 struct io_pgtable_cfg cfg;
605 struct av8l_fast_io_pgtable *data;
606 av8l_fast_iopte *pmds;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530607 u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700608
609 cfg = (struct io_pgtable_cfg) {
Liam Mark13509252016-12-20 11:34:53 -0800610 .quirks = 0,
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700611 .tlb = &dummy_tlb_ops,
612 .ias = 32,
613 .oas = 32,
614 .pgsize_bitmap = SZ_4K,
615 };
616
617 cfg_cookie = &cfg;
618 ops = alloc_io_pgtable_ops(ARM_V8L_FAST, &cfg, &cfg);
619
620 if (WARN_ON(!ops))
621 return 1;
622
623 data = iof_pgtable_ops_to_data(ops);
624 pmds = data->pmds;
625
626 /* map the entire 4GB VA space with 4K map calls */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530627 for (iova = 0; iova < max; iova += SZ_4K) {
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700628 if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
629 failed++;
630 continue;
631 }
632 }
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700633 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530634 max)))
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700635 failed++;
636
637 /* unmap it all */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530638 for (iova = 0; iova < max; iova += SZ_4K) {
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700639 if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
640 failed++;
641 }
642
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800643 /* sweep up TLB proving PTEs */
644 av8l_fast_clear_stale_ptes(pmds, false);
645
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700646 /* map the entire 4GB VA space with 8K map calls */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530647 for (iova = 0; iova < max; iova += SZ_8K) {
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700648 if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
649 failed++;
650 continue;
651 }
652 }
653
654 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530655 max)))
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700656 failed++;
657
658 /* unmap it all with 8K unmap calls */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530659 for (iova = 0; iova < max; iova += SZ_8K) {
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700660 if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
661 failed++;
662 }
663
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800664 /* sweep up TLB proving PTEs */
665 av8l_fast_clear_stale_ptes(pmds, false);
666
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700667 /* map the entire 4GB VA space with 16K map calls */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530668 for (iova = 0; iova < max; iova += SZ_16K) {
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700669 if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
670 failed++;
671 continue;
672 }
673 }
674
675 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530676 max)))
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700677 failed++;
678
679 /* unmap it all */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530680 for (iova = 0; iova < max; iova += SZ_16K) {
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700681 if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
682 failed++;
683 }
684
Mitchel Humpherys5c704e02015-12-21 15:06:34 -0800685 /* sweep up TLB proving PTEs */
686 av8l_fast_clear_stale_ptes(pmds, false);
687
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700688 /* map the entire 4GB VA space with 64K map calls */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530689 for (iova = 0; iova < max; iova += SZ_64K) {
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700690 if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
691 failed++;
692 continue;
693 }
694 }
695
696 if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530697 max)))
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700698 failed++;
699
700 /* unmap it all at once */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530701 if (WARN_ON(ops->unmap(ops, 0, max) != max))
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700702 failed++;
703
704 free_io_pgtable_ops(ops);
705 return failed;
706}
707
708static int __init av8l_fast_do_selftests(void)
709{
710 int failed = 0;
711
712 failed += av8l_fast_positive_testing();
713
714 pr_err("selftest: completed with %d failures\n", failed);
715
716 return 0;
717}
718subsys_initcall(av8l_fast_do_selftests);
719#endif