blob: 952a8fd298e43d470429306be2d2aa724b71ac49 [file] [log] [blame]
Will Deacone1d3c0f2014-11-14 17:18:23 +00001/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
Mitchel Humpherysdaab0412015-04-23 16:19:05 -070025#include <linux/scatterlist.h>
Will Deacone1d3c0f2014-11-14 17:18:23 +000026#include <linux/sizes.h>
27#include <linux/slab.h>
28#include <linux/types.h>
Lada Trimasova8f6aff92016-01-27 11:10:32 +000029#include <linux/dma-mapping.h>
Will Deacone1d3c0f2014-11-14 17:18:23 +000030
Robin Murphy87a91b12015-07-29 19:46:09 +010031#include <asm/barrier.h>
32
Will Deacone1d3c0f2014-11-14 17:18:23 +000033#include "io-pgtable.h"
34
35#define ARM_LPAE_MAX_ADDR_BITS 48
36#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
37#define ARM_LPAE_MAX_LEVELS 4
38
39/* Struct accessors */
40#define io_pgtable_to_data(x) \
41 container_of((x), struct arm_lpae_io_pgtable, iop)
42
Will Deacone1d3c0f2014-11-14 17:18:23 +000043#define io_pgtable_ops_to_data(x) \
44 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45
46/*
47 * For consistency with the architecture, we always consider
48 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 */
50#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
51
52/*
53 * Calculate the right shift amount to get to the portion describing level l
54 * in a virtual address mapped by the pagetable in d.
55 */
56#define ARM_LPAE_LVL_SHIFT(l,d) \
57 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
58 * (d)->bits_per_level) + (d)->pg_shift)
59
Robin Murphy06c610e2015-12-07 18:18:53 +000060#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
61
Will Deacon367bd972015-02-16 18:38:20 +000062#define ARM_LPAE_PAGES_PER_PGD(d) \
Robin Murphy06c610e2015-12-07 18:18:53 +000063 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
Will Deacone1d3c0f2014-11-14 17:18:23 +000064
65/*
66 * Calculate the index at level l used to map virtual address a using the
67 * pagetable in d.
68 */
69#define ARM_LPAE_PGD_IDX(l,d) \
70 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71
72#define ARM_LPAE_LVL_IDX(a,l,d) \
Will Deacon367bd972015-02-16 18:38:20 +000073 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
Will Deacone1d3c0f2014-11-14 17:18:23 +000074 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
75
76/* Calculate the block/page mapping size at level l for pagetable in d. */
77#define ARM_LPAE_BLOCK_SIZE(l,d) \
78 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
79 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
80
81/* Page table bits */
82#define ARM_LPAE_PTE_TYPE_SHIFT 0
83#define ARM_LPAE_PTE_TYPE_MASK 0x3
84
85#define ARM_LPAE_PTE_TYPE_BLOCK 1
86#define ARM_LPAE_PTE_TYPE_TABLE 3
87#define ARM_LPAE_PTE_TYPE_PAGE 3
88
Laurent Pinchartc896c132014-12-14 23:34:50 +020089#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
Will Deacone1d3c0f2014-11-14 17:18:23 +000090#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
91#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
92#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
93#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
94#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
Laurent Pinchartc896c132014-12-14 23:34:50 +020095#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
Will Deacone1d3c0f2014-11-14 17:18:23 +000096#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
97
98#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
99/* Ignore the contiguous bit for block splitting */
100#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
101#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
102 ARM_LPAE_PTE_ATTR_HI_MASK)
103
104/* Stage-1 PTE */
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600105#define ARM_LPAE_PTE_AP_PRIV_RW (((arm_lpae_iopte)0) << 6)
106#define ARM_LPAE_PTE_AP_RW (((arm_lpae_iopte)1) << 6)
107#define ARM_LPAE_PTE_AP_PRIV_RO (((arm_lpae_iopte)2) << 6)
108#define ARM_LPAE_PTE_AP_RO (((arm_lpae_iopte)3) << 6)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000109#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
110#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
111
112/* Stage-2 PTE */
113#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
114#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
115#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
116#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
117#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
118#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
119
120/* Register bits */
121#define ARM_32_LPAE_TCR_EAE (1 << 31)
122#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
123
Will Deacon63979b82015-03-18 10:22:18 +0000124#define ARM_LPAE_TCR_EPD1 (1 << 23)
125
Will Deacone1d3c0f2014-11-14 17:18:23 +0000126#define ARM_LPAE_TCR_TG0_4K (0 << 14)
127#define ARM_LPAE_TCR_TG0_64K (1 << 14)
128#define ARM_LPAE_TCR_TG0_16K (2 << 14)
129
130#define ARM_LPAE_TCR_SH0_SHIFT 12
131#define ARM_LPAE_TCR_SH0_MASK 0x3
132#define ARM_LPAE_TCR_SH_NS 0
133#define ARM_LPAE_TCR_SH_OS 2
134#define ARM_LPAE_TCR_SH_IS 3
135
136#define ARM_LPAE_TCR_ORGN0_SHIFT 10
137#define ARM_LPAE_TCR_IRGN0_SHIFT 8
138#define ARM_LPAE_TCR_RGN_MASK 0x3
139#define ARM_LPAE_TCR_RGN_NC 0
140#define ARM_LPAE_TCR_RGN_WBWA 1
141#define ARM_LPAE_TCR_RGN_WT 2
142#define ARM_LPAE_TCR_RGN_WB 3
143
144#define ARM_LPAE_TCR_SL0_SHIFT 6
145#define ARM_LPAE_TCR_SL0_MASK 0x3
146
147#define ARM_LPAE_TCR_T0SZ_SHIFT 0
148#define ARM_LPAE_TCR_SZ_MASK 0xf
149
150#define ARM_LPAE_TCR_PS_SHIFT 16
151#define ARM_LPAE_TCR_PS_MASK 0x7
152
153#define ARM_LPAE_TCR_IPS_SHIFT 32
154#define ARM_LPAE_TCR_IPS_MASK 0x7
155
156#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
157#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
158#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
159#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
160#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
161#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
162
163#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
164#define ARM_LPAE_MAIR_ATTR_MASK 0xff
165#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
166#define ARM_LPAE_MAIR_ATTR_NC 0x44
167#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
168#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
169#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
170#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
171
172/* IOPTE accessors */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700173#define iopte_deref(pte, d) \
174 (__va(iopte_val(pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
Robin Murphy06c610e2015-12-07 18:18:53 +0000175 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000176
177#define iopte_type(pte,l) \
178 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
179
180#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
181
182#define iopte_leaf(pte,l) \
183 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
184 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
185 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
186
187#define iopte_to_pfn(pte,d) \
188 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
189
190#define pfn_to_iopte(pfn,d) \
191 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
192
193struct arm_lpae_io_pgtable {
194 struct io_pgtable iop;
195
196 int levels;
197 size_t pgd_size;
198 unsigned long pg_shift;
199 unsigned long bits_per_level;
200
201 void *pgd;
202};
203
204typedef u64 arm_lpae_iopte;
205
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700206/*
207 * We'll use some ignored bits in table entries to keep track of the number
208 * of page mappings beneath the table. The maximum number of entries
209 * beneath any table mapping in armv8 is 8192 (which is possible at the
210 * 2nd- and 3rd-level when using a 64K granule size). The bits at our
211 * disposal are:
212 *
213 * 4k granule: [58..52], [11..2]
214 * 64k granule: [58..52], [15..2]
215 *
216 * [58..52], [11..2] is enough bits for tracking table mappings at any
217 * level for any granule, so we'll use those.
218 */
219#define BOTTOM_IGNORED_MASK 0x3ff
220#define BOTTOM_IGNORED_SHIFT 2
221#define BOTTOM_IGNORED_NUM_BITS 10
222#define TOP_IGNORED_MASK 0x7fULL
223#define TOP_IGNORED_SHIFT 52
224#define IOPTE_RESERVED_MASK ((BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT) | \
225 (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
226
227static arm_lpae_iopte iopte_val(arm_lpae_iopte table_pte)
228{
229 return table_pte & ~IOPTE_RESERVED_MASK;
230}
231
232static arm_lpae_iopte _iopte_bottom_ignored_val(arm_lpae_iopte table_pte)
233{
234 return (table_pte & (BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT))
235 >> BOTTOM_IGNORED_SHIFT;
236}
237
238static arm_lpae_iopte _iopte_top_ignored_val(arm_lpae_iopte table_pte)
239{
240 return (table_pte & (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
241 >> TOP_IGNORED_SHIFT;
242}
243
244static int iopte_tblcnt(arm_lpae_iopte table_pte)
245{
246 return (_iopte_bottom_ignored_val(table_pte) |
247 (_iopte_top_ignored_val(table_pte) << BOTTOM_IGNORED_NUM_BITS));
248}
249
250static void iopte_tblcnt_set(arm_lpae_iopte *table_pte, int val)
251{
252 arm_lpae_iopte pte = iopte_val(*table_pte);
253
254 pte |= ((val & BOTTOM_IGNORED_MASK) << BOTTOM_IGNORED_SHIFT) |
255 (((val & (TOP_IGNORED_MASK << BOTTOM_IGNORED_NUM_BITS))
256 >> BOTTOM_IGNORED_NUM_BITS) << TOP_IGNORED_SHIFT);
257 *table_pte = pte;
258}
259
260static void iopte_tblcnt_sub(arm_lpae_iopte *table_ptep, int cnt)
261{
262 arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
263
264 current_cnt -= cnt;
265 iopte_tblcnt_set(table_ptep, current_cnt);
266}
267
268static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt)
269{
270 arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
271
272 current_cnt += cnt;
273 iopte_tblcnt_set(table_ptep, current_cnt);
274}
275
Will Deaconfe4b9912014-11-17 23:31:12 +0000276static bool selftest_running = false;
277
Robin Murphyffcb6d12015-09-17 17:42:16 +0100278static dma_addr_t __arm_lpae_dma_addr(void *pages)
Robin Murphyf8d54962015-07-29 19:46:04 +0100279{
Robin Murphyffcb6d12015-09-17 17:42:16 +0100280 return (dma_addr_t)virt_to_phys(pages);
Robin Murphyf8d54962015-07-29 19:46:04 +0100281}
282
283static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
284 struct io_pgtable_cfg *cfg)
285{
286 struct device *dev = cfg->iommu_dev;
287 dma_addr_t dma;
288 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
289
290 if (!pages)
291 return NULL;
292
Robin Murphy87a91b12015-07-29 19:46:09 +0100293 if (!selftest_running) {
Robin Murphyf8d54962015-07-29 19:46:04 +0100294 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
295 if (dma_mapping_error(dev, dma))
296 goto out_free;
297 /*
298 * We depend on the IOMMU being able to work with any physical
Robin Murphyffcb6d12015-09-17 17:42:16 +0100299 * address directly, so if the DMA layer suggests otherwise by
300 * translating or truncating them, that bodes very badly...
Robin Murphyf8d54962015-07-29 19:46:04 +0100301 */
Robin Murphyffcb6d12015-09-17 17:42:16 +0100302 if (dma != virt_to_phys(pages))
Robin Murphyf8d54962015-07-29 19:46:04 +0100303 goto out_unmap;
304 }
305
306 return pages;
307
308out_unmap:
309 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
310 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
311out_free:
312 free_pages_exact(pages, size);
313 return NULL;
314}
315
316static void __arm_lpae_free_pages(void *pages, size_t size,
317 struct io_pgtable_cfg *cfg)
318{
Robin Murphy87a91b12015-07-29 19:46:09 +0100319 if (!selftest_running)
Robin Murphyffcb6d12015-09-17 17:42:16 +0100320 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
Robin Murphyf8d54962015-07-29 19:46:04 +0100321 size, DMA_TO_DEVICE);
322 free_pages_exact(pages, size);
323}
324
325static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
Robin Murphy87a91b12015-07-29 19:46:09 +0100326 struct io_pgtable_cfg *cfg)
Robin Murphyf8d54962015-07-29 19:46:04 +0100327{
Robin Murphyf8d54962015-07-29 19:46:04 +0100328 *ptep = pte;
329
Robin Murphy87a91b12015-07-29 19:46:09 +0100330 if (!selftest_running)
Robin Murphyffcb6d12015-09-17 17:42:16 +0100331 dma_sync_single_for_device(cfg->iommu_dev,
332 __arm_lpae_dma_addr(ptep),
Robin Murphyf8d54962015-07-29 19:46:04 +0100333 sizeof(pte), DMA_TO_DEVICE);
Robin Murphyf8d54962015-07-29 19:46:04 +0100334}
335
Will Deacone1d3c0f2014-11-14 17:18:23 +0000336static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
337 unsigned long iova, phys_addr_t paddr,
338 arm_lpae_iopte prot, int lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700339 arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000340{
341 arm_lpae_iopte pte = prot;
Robin Murphyf8d54962015-07-29 19:46:04 +0100342 struct io_pgtable_cfg *cfg = &data->iop.cfg;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000343
Patrick Daly67ba8eb2016-06-27 18:44:42 -0700344 /* We require an unmap first */
Mitchel Humpherys1b0313e2015-09-23 13:56:27 -0700345 if (*ptep & ARM_LPAE_PTE_VALID) {
346 BUG_ON(!selftest_running);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000347 return -EEXIST;
Will Deaconfe4b9912014-11-17 23:31:12 +0000348 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000349
Robin Murphyf8d54962015-07-29 19:46:04 +0100350 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
Laurent Pinchartc896c132014-12-14 23:34:50 +0200351 pte |= ARM_LPAE_PTE_NS;
352
Will Deacone1d3c0f2014-11-14 17:18:23 +0000353 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
354 pte |= ARM_LPAE_PTE_TYPE_PAGE;
355 else
356 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
357
358 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
359 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
360
Robin Murphy87a91b12015-07-29 19:46:09 +0100361 __arm_lpae_set_pte(ptep, pte, cfg);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700362
363 if (prev_ptep)
364 iopte_tblcnt_add(prev_ptep, 1);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000365 return 0;
366}
367
368static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
369 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700370 int lvl, arm_lpae_iopte *ptep,
371 arm_lpae_iopte *prev_ptep)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000372{
373 arm_lpae_iopte *cptep, pte;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000374 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
Robin Murphyf8d54962015-07-29 19:46:04 +0100375 struct io_pgtable_cfg *cfg = &data->iop.cfg;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000376
377 /* Find our entry at the current level */
378 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
379
380 /* If we can install a leaf entry at this level, then do so */
Robin Murphyf8d54962015-07-29 19:46:04 +0100381 if (size == block_size && (size & cfg->pgsize_bitmap))
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700382 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep,
383 prev_ptep);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000384
385 /* We can't allocate tables at the final level */
386 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
387 return -EINVAL;
388
389 /* Grab a pointer to the next level */
390 pte = *ptep;
391 if (!pte) {
Robin Murphy06c610e2015-12-07 18:18:53 +0000392 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
Robin Murphyf8d54962015-07-29 19:46:04 +0100393 GFP_ATOMIC, cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000394 if (!cptep)
395 return -ENOMEM;
396
Will Deacone1d3c0f2014-11-14 17:18:23 +0000397 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
Robin Murphyf8d54962015-07-29 19:46:04 +0100398 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
Laurent Pinchartc896c132014-12-14 23:34:50 +0200399 pte |= ARM_LPAE_PTE_NSTABLE;
Robin Murphy87a91b12015-07-29 19:46:09 +0100400 __arm_lpae_set_pte(ptep, pte, cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000401 } else {
402 cptep = iopte_deref(pte, data);
403 }
404
405 /* Rinse, repeat */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700406 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep,
407 ptep);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000408}
409
410static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
411 int prot)
412{
413 arm_lpae_iopte pte;
414
415 if (data->iop.fmt == ARM_64_LPAE_S1 ||
416 data->iop.fmt == ARM_32_LPAE_S1) {
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600417 pte = ARM_LPAE_PTE_nG;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000418
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600419 if (prot & IOMMU_WRITE)
420 pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RW
421 : ARM_LPAE_PTE_AP_RW;
422 else
423 pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO
424 : ARM_LPAE_PTE_AP_RO;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000425
Robin Murphyfb948252016-04-05 12:39:31 +0100426 if (prot & IOMMU_MMIO)
427 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
428 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
429 else if (prot & IOMMU_CACHE)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000430 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
431 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
432 } else {
433 pte = ARM_LPAE_PTE_HAP_FAULT;
434 if (prot & IOMMU_READ)
435 pte |= ARM_LPAE_PTE_HAP_READ;
436 if (prot & IOMMU_WRITE)
437 pte |= ARM_LPAE_PTE_HAP_WRITE;
Robin Murphyfb948252016-04-05 12:39:31 +0100438 if (prot & IOMMU_MMIO)
439 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
440 else if (prot & IOMMU_CACHE)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000441 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
442 else
443 pte |= ARM_LPAE_PTE_MEMATTR_NC;
444 }
445
446 if (prot & IOMMU_NOEXEC)
447 pte |= ARM_LPAE_PTE_XN;
448
449 return pte;
450}
451
452static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
453 phys_addr_t paddr, size_t size, int iommu_prot)
454{
455 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
456 arm_lpae_iopte *ptep = data->pgd;
Robin Murphy87a91b12015-07-29 19:46:09 +0100457 int ret, lvl = ARM_LPAE_START_LVL(data);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000458 arm_lpae_iopte prot;
459
460 /* If no access, then nothing to do */
461 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
462 return 0;
463
464 prot = arm_lpae_prot_to_pte(data, iommu_prot);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700465 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, NULL);
Robin Murphy87a91b12015-07-29 19:46:09 +0100466 /*
467 * Synchronise all PTE updates for the new mapping before there's
468 * a chance for anything to kick off a table walk for the new iova.
469 */
470 wmb();
471
472 return ret;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000473}
474
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700475static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
476 struct scatterlist *sg, unsigned int nents,
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700477 int iommu_prot, size_t *size)
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700478{
479 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
480 arm_lpae_iopte *ptep = data->pgd;
481 int lvl = ARM_LPAE_START_LVL(data);
482 arm_lpae_iopte prot;
483 struct scatterlist *s;
484 size_t mapped = 0;
485 int i, ret;
486 unsigned int min_pagesz;
487
488 /* If no access, then nothing to do */
489 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700490 goto out_err;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700491
492 prot = arm_lpae_prot_to_pte(data, iommu_prot);
493
494 min_pagesz = 1 << __ffs(data->iop.cfg.pgsize_bitmap);
495
496 for_each_sg(sg, s, nents, i) {
497 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
498 size_t size = s->length;
499
500 /*
501 * We are mapping on IOMMU page boundaries, so offset within
502 * the page must be 0. However, the IOMMU may support pages
503 * smaller than PAGE_SIZE, so s->offset may still represent
504 * an offset of that boundary within the CPU page.
505 */
506 if (!IS_ALIGNED(s->offset, min_pagesz))
507 goto out_err;
508
509 while (size) {
510 size_t pgsize = iommu_pgsize(
511 data->iop.cfg.pgsize_bitmap, iova | phys, size);
512 ret = __arm_lpae_map(data, iova, phys, pgsize, prot,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700513 lvl, ptep, NULL);
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700514 if (ret)
515 goto out_err;
516
517 iova += pgsize;
518 mapped += pgsize;
519 phys += pgsize;
520 size -= pgsize;
521 }
522 }
523
524 return mapped;
525
526out_err:
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700527 /* Return the size of the partial mapping so that they can be undone */
528 *size = mapped;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700529 return 0;
530}
531
Will Deacone1d3c0f2014-11-14 17:18:23 +0000532static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
533 arm_lpae_iopte *ptep)
534{
535 arm_lpae_iopte *start, *end;
536 unsigned long table_size;
537
Will Deacone1d3c0f2014-11-14 17:18:23 +0000538 if (lvl == ARM_LPAE_START_LVL(data))
539 table_size = data->pgd_size;
540 else
Robin Murphy06c610e2015-12-07 18:18:53 +0000541 table_size = ARM_LPAE_GRANULE(data);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000542
543 start = ptep;
Will Deacon12c2ab02015-12-15 16:08:12 +0000544
545 /* Only leaf entries at the last level */
546 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
547 end = ptep;
548 else
549 end = (void *)ptep + table_size;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000550
551 while (ptep != end) {
552 arm_lpae_iopte pte = *ptep++;
553
554 if (!pte || iopte_leaf(pte, lvl))
555 continue;
556
557 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
558 }
559
Robin Murphyf8d54962015-07-29 19:46:04 +0100560 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000561}
562
563static void arm_lpae_free_pgtable(struct io_pgtable *iop)
564{
565 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
566
567 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
568 kfree(data);
569}
570
571static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
572 unsigned long iova, size_t size,
573 arm_lpae_iopte prot, int lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700574 arm_lpae_iopte *ptep,
575 arm_lpae_iopte *prev_ptep, size_t blk_size)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000576{
577 unsigned long blk_start, blk_end;
578 phys_addr_t blk_paddr;
579 arm_lpae_iopte table = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000580
581 blk_start = iova & ~(blk_size - 1);
582 blk_end = blk_start + blk_size;
583 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700584 size = ARM_LPAE_BLOCK_SIZE(lvl + 1, data);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000585
586 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
587 arm_lpae_iopte *tablep;
588
589 /* Unmap! */
590 if (blk_start == iova)
591 continue;
592
593 /* __arm_lpae_map expects a pointer to the start of the table */
594 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
595 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700596 tablep, prev_ptep) < 0) {
Will Deacone1d3c0f2014-11-14 17:18:23 +0000597 if (table) {
598 /* Free the table we allocated */
599 tablep = iopte_deref(table, data);
600 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
601 }
602 return 0; /* Bytes unmapped */
603 }
604 }
605
Robin Murphy507e4c92016-01-26 17:13:14 +0000606 __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000607 iova &= ~(blk_size - 1);
Robin Murphy507e4c92016-01-26 17:13:14 +0000608 io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000609 return size;
610}
611
612static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
613 unsigned long iova, size_t size, int lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700614 arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000615{
616 arm_lpae_iopte pte;
Robin Murphy507e4c92016-01-26 17:13:14 +0000617 struct io_pgtable *iop = &data->iop;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000618 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
619
Robin Murphy2eb97c72015-12-04 17:52:58 +0000620 /* Something went horribly wrong and we ran out of page table */
621 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
622 return 0;
623
Will Deacone1d3c0f2014-11-14 17:18:23 +0000624 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
625 pte = *ptep;
Robin Murphy2eb97c72015-12-04 17:52:58 +0000626 if (WARN_ON(!pte))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000627 return 0;
628
629 /* If the size matches this level, we're in the right place */
630 if (size == blk_size) {
Robin Murphy507e4c92016-01-26 17:13:14 +0000631 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000632
633 if (!iopte_leaf(pte, lvl)) {
634 /* Also flush any partial walks */
Robin Murphy507e4c92016-01-26 17:13:14 +0000635 io_pgtable_tlb_add_flush(iop, iova, size,
636 ARM_LPAE_GRANULE(data), false);
637 io_pgtable_tlb_sync(iop);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000638 ptep = iopte_deref(pte, data);
639 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
640 } else {
Robin Murphy507e4c92016-01-26 17:13:14 +0000641 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000642 }
643
644 return size;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700645 } else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) {
646 arm_lpae_iopte *table = iopte_deref(pte, data);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700647 arm_lpae_iopte *table_base = table;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700648 int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
649 int entry_size = ARM_LPAE_GRANULE(data);
650 int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
651 int entries = min_t(int, size / entry_size,
652 max_entries - tl_offset);
653 int table_len = entries * sizeof(*table);
654
655 /*
656 * This isn't a block mapping so it must be a table mapping
657 * and since it's the 2nd-to-last level the next level has
658 * to be all page mappings. Zero them all out in one fell
659 * swoop.
660 */
661
662 table += tl_offset;
663
664 memset(table, 0, table_len);
665 dma_sync_single_for_device(iop->cfg.iommu_dev,
666 __arm_lpae_dma_addr(table),
667 table_len, DMA_TO_DEVICE);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700668
669 iopte_tblcnt_sub(ptep, entries);
670 if (!iopte_tblcnt(*ptep)) {
671 /* no valid mappings left under this table. free it. */
672 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
673 io_pgtable_tlb_add_flush(iop, iova,
674 entries * entry_size,
675 ARM_LPAE_GRANULE(data),
676 false);
677 __arm_lpae_free_pgtable(data, lvl + 1, table_base);
678 } else {
679 io_pgtable_tlb_add_flush(iop, iova,
680 entries * entry_size,
681 ARM_LPAE_GRANULE(data),
682 true);
683 }
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700684
685 return entries * entry_size;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000686 } else if (iopte_leaf(pte, lvl)) {
687 /*
688 * Insert a table at the next level to map the old region,
689 * minus the part we want to unmap
690 */
691 return arm_lpae_split_blk_unmap(data, iova, size,
692 iopte_prot(pte), lvl, ptep,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700693 prev_ptep, blk_size);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000694 }
695
696 /* Keep on walkin' */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700697 prev_ptep = ptep;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000698 ptep = iopte_deref(pte, data);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700699 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep, prev_ptep);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000700}
701
Mitchel Humpherys5e050592015-05-21 14:11:22 -0700702static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
Will Deacone1d3c0f2014-11-14 17:18:23 +0000703 size_t size)
704{
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700705 size_t unmapped = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000706 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000707 arm_lpae_iopte *ptep = data->pgd;
708 int lvl = ARM_LPAE_START_LVL(data);
709
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700710 while (unmapped < size) {
711 size_t ret, size_to_unmap, remaining;
712
713 remaining = (size - unmapped);
Patrick Dalyf145f052016-06-27 18:38:09 -0700714 size_to_unmap = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova,
715 remaining);
716 size_to_unmap = size_to_unmap >= SZ_2M ?
717 size_to_unmap :
718 min_t(unsigned long, remaining,
719 (ALIGN(iova + 1, SZ_2M) - iova));
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700720 ret = __arm_lpae_unmap(data, iova, size_to_unmap, lvl, ptep,
721 NULL);
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700722 if (ret == 0)
723 break;
724 unmapped += ret;
725 iova += ret;
726 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000727 if (unmapped)
Robin Murphy507e4c92016-01-26 17:13:14 +0000728 io_pgtable_tlb_sync(&data->iop);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000729
730 return unmapped;
731}
732
733static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
734 unsigned long iova)
735{
736 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
737 arm_lpae_iopte pte, *ptep = data->pgd;
738 int lvl = ARM_LPAE_START_LVL(data);
739
740 do {
741 /* Valid IOPTE pointer? */
742 if (!ptep)
743 return 0;
744
745 /* Grab the IOPTE we're interested in */
746 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
747
748 /* Valid entry? */
749 if (!pte)
750 return 0;
751
752 /* Leaf entry? */
753 if (iopte_leaf(pte,lvl))
754 goto found_translation;
755
756 /* Take it to the next level */
757 ptep = iopte_deref(pte, data);
758 } while (++lvl < ARM_LPAE_MAX_LEVELS);
759
760 /* Ran out of page tables to walk */
761 return 0;
762
763found_translation:
Will Deacon7c6d90e2016-06-16 18:21:19 +0100764 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000765 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
766}
767
768static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
769{
770 unsigned long granule;
771
772 /*
773 * We need to restrict the supported page sizes to match the
774 * translation regime for a particular granule. Aim to match
775 * the CPU page size if possible, otherwise prefer smaller sizes.
776 * While we're at it, restrict the block sizes to match the
777 * chosen granule.
778 */
779 if (cfg->pgsize_bitmap & PAGE_SIZE)
780 granule = PAGE_SIZE;
781 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
782 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
783 else if (cfg->pgsize_bitmap & PAGE_MASK)
784 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
785 else
786 granule = 0;
787
788 switch (granule) {
789 case SZ_4K:
790 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
791 break;
792 case SZ_16K:
793 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
794 break;
795 case SZ_64K:
796 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
797 break;
798 default:
799 cfg->pgsize_bitmap = 0;
800 }
801}
802
803static struct arm_lpae_io_pgtable *
804arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
805{
806 unsigned long va_bits, pgd_bits;
807 struct arm_lpae_io_pgtable *data;
808
809 arm_lpae_restrict_pgsizes(cfg);
810
811 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
812 return NULL;
813
814 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
815 return NULL;
816
817 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
818 return NULL;
819
Robin Murphyffcb6d12015-09-17 17:42:16 +0100820 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
821 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
822 return NULL;
823 }
824
Will Deacone1d3c0f2014-11-14 17:18:23 +0000825 data = kmalloc(sizeof(*data), GFP_KERNEL);
826 if (!data)
827 return NULL;
828
829 data->pg_shift = __ffs(cfg->pgsize_bitmap);
830 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
831
832 va_bits = cfg->ias - data->pg_shift;
833 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
834
835 /* Calculate the actual size of our pgd (without concatenation) */
836 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
837 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
838
839 data->iop.ops = (struct io_pgtable_ops) {
840 .map = arm_lpae_map,
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700841 .map_sg = arm_lpae_map_sg,
Will Deacone1d3c0f2014-11-14 17:18:23 +0000842 .unmap = arm_lpae_unmap,
843 .iova_to_phys = arm_lpae_iova_to_phys,
844 };
845
846 return data;
847}
848
849static struct io_pgtable *
850arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
851{
852 u64 reg;
Robin Murphy3850db42016-02-12 17:09:46 +0000853 struct arm_lpae_io_pgtable *data;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000854
Robin Murphy3850db42016-02-12 17:09:46 +0000855 if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
856 return NULL;
857
858 data = arm_lpae_alloc_pgtable(cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000859 if (!data)
860 return NULL;
861
862 /* TCR */
863 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
864 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
865 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
866
Robin Murphy06c610e2015-12-07 18:18:53 +0000867 switch (ARM_LPAE_GRANULE(data)) {
Will Deacone1d3c0f2014-11-14 17:18:23 +0000868 case SZ_4K:
869 reg |= ARM_LPAE_TCR_TG0_4K;
870 break;
871 case SZ_16K:
872 reg |= ARM_LPAE_TCR_TG0_16K;
873 break;
874 case SZ_64K:
875 reg |= ARM_LPAE_TCR_TG0_64K;
876 break;
877 }
878
879 switch (cfg->oas) {
880 case 32:
881 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
882 break;
883 case 36:
884 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
885 break;
886 case 40:
887 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
888 break;
889 case 42:
890 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
891 break;
892 case 44:
893 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
894 break;
895 case 48:
896 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
897 break;
898 default:
899 goto out_free_data;
900 }
901
902 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
Will Deacon63979b82015-03-18 10:22:18 +0000903
904 /* Disable speculative walks through TTBR1 */
905 reg |= ARM_LPAE_TCR_EPD1;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000906 cfg->arm_lpae_s1_cfg.tcr = reg;
907
908 /* MAIRs */
909 reg = (ARM_LPAE_MAIR_ATTR_NC
910 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
911 (ARM_LPAE_MAIR_ATTR_WBRWA
912 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
913 (ARM_LPAE_MAIR_ATTR_DEVICE
914 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
915
916 cfg->arm_lpae_s1_cfg.mair[0] = reg;
917 cfg->arm_lpae_s1_cfg.mair[1] = 0;
918
919 /* Looking good; allocate a pgd */
Robin Murphyf8d54962015-07-29 19:46:04 +0100920 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000921 if (!data->pgd)
922 goto out_free_data;
923
Robin Murphy87a91b12015-07-29 19:46:09 +0100924 /* Ensure the empty pgd is visible before any actual TTBR write */
925 wmb();
Will Deacone1d3c0f2014-11-14 17:18:23 +0000926
927 /* TTBRs */
928 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
929 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
930 return &data->iop;
931
932out_free_data:
933 kfree(data);
934 return NULL;
935}
936
937static struct io_pgtable *
938arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
939{
940 u64 reg, sl;
Robin Murphy3850db42016-02-12 17:09:46 +0000941 struct arm_lpae_io_pgtable *data;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000942
Robin Murphy3850db42016-02-12 17:09:46 +0000943 /* The NS quirk doesn't apply at stage 2 */
944 if (cfg->quirks)
945 return NULL;
946
947 data = arm_lpae_alloc_pgtable(cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000948 if (!data)
949 return NULL;
950
951 /*
952 * Concatenate PGDs at level 1 if possible in order to reduce
953 * the depth of the stage-2 walk.
954 */
955 if (data->levels == ARM_LPAE_MAX_LEVELS) {
956 unsigned long pgd_pages;
957
958 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
959 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
960 data->pgd_size = pgd_pages << data->pg_shift;
961 data->levels--;
962 }
963 }
964
965 /* VTCR */
966 reg = ARM_64_LPAE_S2_TCR_RES1 |
967 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
968 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
969 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
970
971 sl = ARM_LPAE_START_LVL(data);
972
Robin Murphy06c610e2015-12-07 18:18:53 +0000973 switch (ARM_LPAE_GRANULE(data)) {
Will Deacone1d3c0f2014-11-14 17:18:23 +0000974 case SZ_4K:
975 reg |= ARM_LPAE_TCR_TG0_4K;
976 sl++; /* SL0 format is different for 4K granule size */
977 break;
978 case SZ_16K:
979 reg |= ARM_LPAE_TCR_TG0_16K;
980 break;
981 case SZ_64K:
982 reg |= ARM_LPAE_TCR_TG0_64K;
983 break;
984 }
985
986 switch (cfg->oas) {
987 case 32:
988 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
989 break;
990 case 36:
991 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
992 break;
993 case 40:
994 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
995 break;
996 case 42:
997 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
998 break;
999 case 44:
1000 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
1001 break;
1002 case 48:
1003 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
1004 break;
1005 default:
1006 goto out_free_data;
1007 }
1008
1009 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
1010 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
1011 cfg->arm_lpae_s2_cfg.vtcr = reg;
1012
1013 /* Allocate pgd pages */
Robin Murphyf8d54962015-07-29 19:46:04 +01001014 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001015 if (!data->pgd)
1016 goto out_free_data;
1017
Robin Murphy87a91b12015-07-29 19:46:09 +01001018 /* Ensure the empty pgd is visible before any actual TTBR write */
1019 wmb();
Will Deacone1d3c0f2014-11-14 17:18:23 +00001020
1021 /* VTTBR */
1022 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1023 return &data->iop;
1024
1025out_free_data:
1026 kfree(data);
1027 return NULL;
1028}
1029
1030static struct io_pgtable *
1031arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1032{
1033 struct io_pgtable *iop;
1034
1035 if (cfg->ias > 32 || cfg->oas > 40)
1036 return NULL;
1037
1038 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1039 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1040 if (iop) {
1041 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
1042 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
1043 }
1044
1045 return iop;
1046}
1047
1048static struct io_pgtable *
1049arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1050{
1051 struct io_pgtable *iop;
1052
1053 if (cfg->ias > 40 || cfg->oas > 40)
1054 return NULL;
1055
1056 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1057 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1058 if (iop)
1059 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1060
1061 return iop;
1062}
1063
1064struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1065 .alloc = arm_64_lpae_alloc_pgtable_s1,
1066 .free = arm_lpae_free_pgtable,
1067};
1068
1069struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1070 .alloc = arm_64_lpae_alloc_pgtable_s2,
1071 .free = arm_lpae_free_pgtable,
1072};
1073
1074struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1075 .alloc = arm_32_lpae_alloc_pgtable_s1,
1076 .free = arm_lpae_free_pgtable,
1077};
1078
1079struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1080 .alloc = arm_32_lpae_alloc_pgtable_s2,
1081 .free = arm_lpae_free_pgtable,
1082};
Will Deaconfe4b9912014-11-17 23:31:12 +00001083
1084#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1085
1086static struct io_pgtable_cfg *cfg_cookie;
1087
1088static void dummy_tlb_flush_all(void *cookie)
1089{
1090 WARN_ON(cookie != cfg_cookie);
1091}
1092
Robin Murphy06c610e2015-12-07 18:18:53 +00001093static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1094 size_t granule, bool leaf, void *cookie)
Will Deaconfe4b9912014-11-17 23:31:12 +00001095{
1096 WARN_ON(cookie != cfg_cookie);
1097 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1098}
1099
1100static void dummy_tlb_sync(void *cookie)
1101{
1102 WARN_ON(cookie != cfg_cookie);
1103}
1104
Will Deaconfe4b9912014-11-17 23:31:12 +00001105static struct iommu_gather_ops dummy_tlb_ops __initdata = {
1106 .tlb_flush_all = dummy_tlb_flush_all,
1107 .tlb_add_flush = dummy_tlb_add_flush,
1108 .tlb_sync = dummy_tlb_sync,
Will Deaconfe4b9912014-11-17 23:31:12 +00001109};
1110
1111static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1112{
1113 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1114 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1115
1116 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1117 cfg->pgsize_bitmap, cfg->ias);
1118 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1119 data->levels, data->pgd_size, data->pg_shift,
1120 data->bits_per_level, data->pgd);
1121}
1122
1123#define __FAIL(ops, i) ({ \
1124 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1125 arm_lpae_dump_ops(ops); \
1126 selftest_running = false; \
1127 -EFAULT; \
1128})
1129
1130static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1131{
1132 static const enum io_pgtable_fmt fmts[] = {
1133 ARM_64_LPAE_S1,
1134 ARM_64_LPAE_S2,
1135 };
1136
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001137 int i, j, k;
Will Deaconfe4b9912014-11-17 23:31:12 +00001138 unsigned long iova;
1139 size_t size;
1140 struct io_pgtable_ops *ops;
Will Deaconfe4b9912014-11-17 23:31:12 +00001141 selftest_running = true;
1142
1143 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001144 unsigned long test_sg_sizes[] = { SZ_4K, SZ_64K, SZ_2M,
1145 SZ_1M * 12, SZ_1M * 20 };
1146
Will Deaconfe4b9912014-11-17 23:31:12 +00001147 cfg_cookie = cfg;
1148 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1149 if (!ops) {
1150 pr_err("selftest: failed to allocate io pgtable ops\n");
1151 return -ENOMEM;
1152 }
1153
1154 /*
1155 * Initial sanity checks.
1156 * Empty page tables shouldn't provide any translations.
1157 */
1158 if (ops->iova_to_phys(ops, 42))
1159 return __FAIL(ops, i);
1160
1161 if (ops->iova_to_phys(ops, SZ_1G + 42))
1162 return __FAIL(ops, i);
1163
1164 if (ops->iova_to_phys(ops, SZ_2G + 42))
1165 return __FAIL(ops, i);
1166
1167 /*
1168 * Distinct mappings of different granule sizes.
1169 */
1170 iova = 0;
1171 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1172 while (j != BITS_PER_LONG) {
1173 size = 1UL << j;
1174
1175 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1176 IOMMU_WRITE |
1177 IOMMU_NOEXEC |
1178 IOMMU_CACHE))
1179 return __FAIL(ops, i);
1180
1181 /* Overlapping mappings */
1182 if (!ops->map(ops, iova, iova + size, size,
1183 IOMMU_READ | IOMMU_NOEXEC))
1184 return __FAIL(ops, i);
1185
1186 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1187 return __FAIL(ops, i);
1188
1189 iova += SZ_1G;
1190 j++;
1191 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1192 }
1193
1194 /* Partial unmap */
1195 size = 1UL << __ffs(cfg->pgsize_bitmap);
1196 if (ops->unmap(ops, SZ_1G + size, size) != size)
1197 return __FAIL(ops, i);
1198
1199 /* Remap of partial unmap */
1200 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1201 return __FAIL(ops, i);
1202
1203 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1204 return __FAIL(ops, i);
1205
1206 /* Full unmap */
1207 iova = 0;
1208 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1209 while (j != BITS_PER_LONG) {
1210 size = 1UL << j;
1211
1212 if (ops->unmap(ops, iova, size) != size)
1213 return __FAIL(ops, i);
1214
1215 if (ops->iova_to_phys(ops, iova + 42))
1216 return __FAIL(ops, i);
1217
1218 /* Remap full block */
1219 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1220 return __FAIL(ops, i);
1221
1222 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1223 return __FAIL(ops, i);
1224
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001225 if (ops->unmap(ops, iova, size) != size)
1226 return __FAIL(ops, i);
1227
Will Deaconfe4b9912014-11-17 23:31:12 +00001228 iova += SZ_1G;
1229 j++;
1230 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1231 }
1232
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001233 /* map_sg */
1234 for (j = 0; j < ARRAY_SIZE(test_sg_sizes); ++j) {
1235 size_t mapped;
1236 size_t unused;
1237 struct page *page;
1238 phys_addr_t page_phys;
1239 struct sg_table table;
1240 struct scatterlist *sg;
1241 unsigned long total_size = test_sg_sizes[j];
1242 int chunk_size = 1UL << find_first_bit(
1243 &cfg->pgsize_bitmap, BITS_PER_LONG);
1244 int nents = total_size / chunk_size;
1245
1246 if (total_size < chunk_size)
1247 continue;
1248
1249 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
1250 page_phys = page_to_phys(page);
1251
1252 iova = 0;
1253 BUG_ON(sg_alloc_table(&table, nents, GFP_KERNEL));
1254 BUG_ON(!page);
1255 for_each_sg(table.sgl, sg, table.nents, k)
1256 sg_set_page(sg, page, chunk_size, 0);
1257
1258 mapped = ops->map_sg(ops, iova, table.sgl, table.nents,
1259 IOMMU_READ | IOMMU_WRITE, &unused);
1260
1261 if (mapped != total_size)
1262 return __FAIL(ops, i);
1263
1264 for_each_sg(table.sgl, sg, table.nents, k) {
1265 dma_addr_t newphys =
1266 ops->iova_to_phys(ops, iova + 42);
1267 if (newphys != (page_phys + 42))
1268 return __FAIL(ops, i);
1269 iova += chunk_size;
1270 }
1271
1272 if (ops->unmap(ops, 0, total_size) != total_size)
1273 return __FAIL(ops, i);
1274
1275 sg_free_table(&table);
1276 __free_pages(page, get_order(chunk_size));
1277 }
1278
Will Deaconfe4b9912014-11-17 23:31:12 +00001279 free_io_pgtable_ops(ops);
1280 }
1281
1282 selftest_running = false;
1283 return 0;
1284}
1285
1286static int __init arm_lpae_do_selftests(void)
1287{
1288 static const unsigned long pgsize[] = {
1289 SZ_4K | SZ_2M | SZ_1G,
1290 SZ_16K | SZ_32M,
1291 SZ_64K | SZ_512M,
1292 };
1293
1294 static const unsigned int ias[] = {
1295 32, 36, 40, 42, 44, 48,
1296 };
1297
1298 int i, j, pass = 0, fail = 0;
1299 struct io_pgtable_cfg cfg = {
1300 .tlb = &dummy_tlb_ops,
1301 .oas = 48,
1302 };
1303
1304 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1305 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1306 cfg.pgsize_bitmap = pgsize[i];
1307 cfg.ias = ias[j];
1308 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1309 pgsize[i], ias[j]);
1310 if (arm_lpae_run_tests(&cfg))
1311 fail++;
1312 else
1313 pass++;
1314 }
1315 }
1316
1317 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1318 return fail ? -EFAULT : 0;
1319}
1320subsys_initcall(arm_lpae_do_selftests);
1321#endif