blob: 6fbade99577dcc1fb4e16c648dd68bcb3f482a9a [file] [log] [blame]
Will Deacone1d3c0f2014-11-14 17:18:23 +00001/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
Mitchel Humpherysdaab0412015-04-23 16:19:05 -070025#include <linux/scatterlist.h>
Will Deacone1d3c0f2014-11-14 17:18:23 +000026#include <linux/sizes.h>
27#include <linux/slab.h>
28#include <linux/types.h>
Lada Trimasova8f6aff92016-01-27 11:10:32 +000029#include <linux/dma-mapping.h>
Will Deacone1d3c0f2014-11-14 17:18:23 +000030
Robin Murphy87a91b12015-07-29 19:46:09 +010031#include <asm/barrier.h>
32
Will Deacone1d3c0f2014-11-14 17:18:23 +000033#include "io-pgtable.h"
34
35#define ARM_LPAE_MAX_ADDR_BITS 48
36#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
37#define ARM_LPAE_MAX_LEVELS 4
38
39/* Struct accessors */
40#define io_pgtable_to_data(x) \
41 container_of((x), struct arm_lpae_io_pgtable, iop)
42
Will Deacone1d3c0f2014-11-14 17:18:23 +000043#define io_pgtable_ops_to_data(x) \
44 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45
46/*
47 * For consistency with the architecture, we always consider
48 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 */
50#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
51
52/*
53 * Calculate the right shift amount to get to the portion describing level l
54 * in a virtual address mapped by the pagetable in d.
55 */
56#define ARM_LPAE_LVL_SHIFT(l,d) \
57 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
58 * (d)->bits_per_level) + (d)->pg_shift)
59
Robin Murphy06c610e2015-12-07 18:18:53 +000060#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
61
Will Deacon367bd972015-02-16 18:38:20 +000062#define ARM_LPAE_PAGES_PER_PGD(d) \
Robin Murphy06c610e2015-12-07 18:18:53 +000063 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
Will Deacone1d3c0f2014-11-14 17:18:23 +000064
65/*
66 * Calculate the index at level l used to map virtual address a using the
67 * pagetable in d.
68 */
69#define ARM_LPAE_PGD_IDX(l,d) \
70 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71
Patrick Dalyd35ec7c2016-11-23 15:04:24 -080072#define ARM_LPAE_LVL_MASK(l, d) \
73 ((l) == ARM_LPAE_START_LVL(d) ? (1 << (d)->pgd_bits) - 1 : \
74 (1 << (d)->bits_per_level) - 1)
Will Deacone1d3c0f2014-11-14 17:18:23 +000075#define ARM_LPAE_LVL_IDX(a,l,d) \
Will Deacon367bd972015-02-16 18:38:20 +000076 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
Patrick Dalyd35ec7c2016-11-23 15:04:24 -080077 ARM_LPAE_LVL_MASK(l, d))
Will Deacone1d3c0f2014-11-14 17:18:23 +000078
79/* Calculate the block/page mapping size at level l for pagetable in d. */
80#define ARM_LPAE_BLOCK_SIZE(l,d) \
Patrick Daly3b264572017-04-03 18:20:37 -070081 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
Will Deacone1d3c0f2014-11-14 17:18:23 +000082 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
83
84/* Page table bits */
85#define ARM_LPAE_PTE_TYPE_SHIFT 0
86#define ARM_LPAE_PTE_TYPE_MASK 0x3
87
88#define ARM_LPAE_PTE_TYPE_BLOCK 1
89#define ARM_LPAE_PTE_TYPE_TABLE 3
90#define ARM_LPAE_PTE_TYPE_PAGE 3
91
Liam Mark1402f942017-03-17 10:26:49 -070092#define ARM_LPAE_PTE_SH_MASK (((arm_lpae_iopte)0x3) << 8)
Laurent Pinchartc896c132014-12-14 23:34:50 +020093#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
Will Deacone1d3c0f2014-11-14 17:18:23 +000094#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
95#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
96#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
97#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
98#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
Laurent Pinchartc896c132014-12-14 23:34:50 +020099#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000100#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
101
102#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
103/* Ignore the contiguous bit for block splitting */
104#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
105#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
106 ARM_LPAE_PTE_ATTR_HI_MASK)
107
108/* Stage-1 PTE */
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600109#define ARM_LPAE_PTE_AP_PRIV_RW (((arm_lpae_iopte)0) << 6)
110#define ARM_LPAE_PTE_AP_RW (((arm_lpae_iopte)1) << 6)
111#define ARM_LPAE_PTE_AP_PRIV_RO (((arm_lpae_iopte)2) << 6)
112#define ARM_LPAE_PTE_AP_RO (((arm_lpae_iopte)3) << 6)
Liam Mark17f31802016-12-09 14:30:10 -0800113#define ARM_LPAE_PTE_ATTRINDX_MASK 0x7
Will Deacone1d3c0f2014-11-14 17:18:23 +0000114#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
115#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
116
117/* Stage-2 PTE */
118#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
119#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
120#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
121#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
122#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
123#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
124
125/* Register bits */
126#define ARM_32_LPAE_TCR_EAE (1 << 31)
127#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
128
Will Deacon63979b82015-03-18 10:22:18 +0000129#define ARM_LPAE_TCR_EPD1 (1 << 23)
130
Will Deacone1d3c0f2014-11-14 17:18:23 +0000131#define ARM_LPAE_TCR_TG0_4K (0 << 14)
132#define ARM_LPAE_TCR_TG0_64K (1 << 14)
133#define ARM_LPAE_TCR_TG0_16K (2 << 14)
134
135#define ARM_LPAE_TCR_SH0_SHIFT 12
136#define ARM_LPAE_TCR_SH0_MASK 0x3
137#define ARM_LPAE_TCR_SH_NS 0
138#define ARM_LPAE_TCR_SH_OS 2
139#define ARM_LPAE_TCR_SH_IS 3
140
141#define ARM_LPAE_TCR_ORGN0_SHIFT 10
142#define ARM_LPAE_TCR_IRGN0_SHIFT 8
143#define ARM_LPAE_TCR_RGN_MASK 0x3
144#define ARM_LPAE_TCR_RGN_NC 0
145#define ARM_LPAE_TCR_RGN_WBWA 1
146#define ARM_LPAE_TCR_RGN_WT 2
147#define ARM_LPAE_TCR_RGN_WB 3
148
149#define ARM_LPAE_TCR_SL0_SHIFT 6
150#define ARM_LPAE_TCR_SL0_MASK 0x3
151
152#define ARM_LPAE_TCR_T0SZ_SHIFT 0
153#define ARM_LPAE_TCR_SZ_MASK 0xf
154
155#define ARM_LPAE_TCR_PS_SHIFT 16
156#define ARM_LPAE_TCR_PS_MASK 0x7
157
158#define ARM_LPAE_TCR_IPS_SHIFT 32
159#define ARM_LPAE_TCR_IPS_MASK 0x7
160
161#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
162#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
163#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
164#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
165#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
166#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
167
168#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
169#define ARM_LPAE_MAIR_ATTR_MASK 0xff
170#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
171#define ARM_LPAE_MAIR_ATTR_NC 0x44
172#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
Patrick Dalybf762272016-11-03 16:49:44 -0700173#define ARM_LPAE_MAIR_ATTR_UPSTREAM 0xf4
Will Deacone1d3c0f2014-11-14 17:18:23 +0000174#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
175#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
176#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
Patrick Dalybf762272016-11-03 16:49:44 -0700177#define ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM 3
Will Deacone1d3c0f2014-11-14 17:18:23 +0000178
179/* IOPTE accessors */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700180#define iopte_deref(pte, d) \
181 (__va(iopte_val(pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
Robin Murphy06c610e2015-12-07 18:18:53 +0000182 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000183
184#define iopte_type(pte,l) \
185 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
186
187#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
188
189#define iopte_leaf(pte,l) \
190 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
191 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
192 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
193
194#define iopte_to_pfn(pte,d) \
195 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
196
197#define pfn_to_iopte(pfn,d) \
198 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
199
200struct arm_lpae_io_pgtable {
201 struct io_pgtable iop;
202
203 int levels;
Patrick Dalyd35ec7c2016-11-23 15:04:24 -0800204 unsigned int pgd_bits;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000205 size_t pgd_size;
206 unsigned long pg_shift;
207 unsigned long bits_per_level;
208
209 void *pgd;
Patrick Dalyd8fd2752018-02-05 19:18:24 -0800210 void *pgd_ttbr1;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000211};
212
213typedef u64 arm_lpae_iopte;
214
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700215/*
216 * We'll use some ignored bits in table entries to keep track of the number
217 * of page mappings beneath the table. The maximum number of entries
218 * beneath any table mapping in armv8 is 8192 (which is possible at the
219 * 2nd- and 3rd-level when using a 64K granule size). The bits at our
220 * disposal are:
221 *
222 * 4k granule: [58..52], [11..2]
223 * 64k granule: [58..52], [15..2]
224 *
225 * [58..52], [11..2] is enough bits for tracking table mappings at any
226 * level for any granule, so we'll use those.
227 */
228#define BOTTOM_IGNORED_MASK 0x3ff
229#define BOTTOM_IGNORED_SHIFT 2
230#define BOTTOM_IGNORED_NUM_BITS 10
231#define TOP_IGNORED_MASK 0x7fULL
232#define TOP_IGNORED_SHIFT 52
233#define IOPTE_RESERVED_MASK ((BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT) | \
234 (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
235
236static arm_lpae_iopte iopte_val(arm_lpae_iopte table_pte)
237{
238 return table_pte & ~IOPTE_RESERVED_MASK;
239}
240
241static arm_lpae_iopte _iopte_bottom_ignored_val(arm_lpae_iopte table_pte)
242{
243 return (table_pte & (BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT))
244 >> BOTTOM_IGNORED_SHIFT;
245}
246
247static arm_lpae_iopte _iopte_top_ignored_val(arm_lpae_iopte table_pte)
248{
249 return (table_pte & (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
250 >> TOP_IGNORED_SHIFT;
251}
252
253static int iopte_tblcnt(arm_lpae_iopte table_pte)
254{
255 return (_iopte_bottom_ignored_val(table_pte) |
256 (_iopte_top_ignored_val(table_pte) << BOTTOM_IGNORED_NUM_BITS));
257}
258
259static void iopte_tblcnt_set(arm_lpae_iopte *table_pte, int val)
260{
261 arm_lpae_iopte pte = iopte_val(*table_pte);
262
263 pte |= ((val & BOTTOM_IGNORED_MASK) << BOTTOM_IGNORED_SHIFT) |
264 (((val & (TOP_IGNORED_MASK << BOTTOM_IGNORED_NUM_BITS))
265 >> BOTTOM_IGNORED_NUM_BITS) << TOP_IGNORED_SHIFT);
266 *table_pte = pte;
267}
268
269static void iopte_tblcnt_sub(arm_lpae_iopte *table_ptep, int cnt)
270{
271 arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
272
273 current_cnt -= cnt;
274 iopte_tblcnt_set(table_ptep, current_cnt);
275}
276
277static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt)
278{
279 arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
280
281 current_cnt += cnt;
282 iopte_tblcnt_set(table_ptep, current_cnt);
283}
284
Will Deaconfe4b9912014-11-17 23:31:12 +0000285static bool selftest_running = false;
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -0700286static bool suppress_map_failures;
Will Deaconfe4b9912014-11-17 23:31:12 +0000287
Robin Murphyffcb6d12015-09-17 17:42:16 +0100288static dma_addr_t __arm_lpae_dma_addr(void *pages)
Robin Murphyf8d54962015-07-29 19:46:04 +0100289{
Robin Murphyffcb6d12015-09-17 17:42:16 +0100290 return (dma_addr_t)virt_to_phys(pages);
Robin Murphyf8d54962015-07-29 19:46:04 +0100291}
292
Liam Mark04b0c852016-12-20 11:34:04 -0800293static inline void pgtable_dma_sync_single_for_device(
294 struct io_pgtable_cfg *cfg,
295 dma_addr_t addr, size_t size,
296 enum dma_data_direction dir)
297{
298 if (!(cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT))
299 dma_sync_single_for_device(cfg->iommu_dev, addr, size,
300 dir);
301}
302
Robin Murphyf8d54962015-07-29 19:46:04 +0100303static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
Patrick Dalyc11d1082016-09-01 15:52:44 -0700304 struct io_pgtable_cfg *cfg, void *cookie)
Robin Murphyf8d54962015-07-29 19:46:04 +0100305{
306 struct device *dev = cfg->iommu_dev;
307 dma_addr_t dma;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700308 void *pages = io_pgtable_alloc_pages_exact(cfg, cookie, size,
309 gfp | __GFP_ZERO);
Robin Murphyf8d54962015-07-29 19:46:04 +0100310
311 if (!pages)
312 return NULL;
313
Robin Murphy87a91b12015-07-29 19:46:09 +0100314 if (!selftest_running) {
Robin Murphyf8d54962015-07-29 19:46:04 +0100315 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
316 if (dma_mapping_error(dev, dma))
317 goto out_free;
318 /*
319 * We depend on the IOMMU being able to work with any physical
Robin Murphyffcb6d12015-09-17 17:42:16 +0100320 * address directly, so if the DMA layer suggests otherwise by
321 * translating or truncating them, that bodes very badly...
Robin Murphyf8d54962015-07-29 19:46:04 +0100322 */
Robin Murphyffcb6d12015-09-17 17:42:16 +0100323 if (dma != virt_to_phys(pages))
Robin Murphyf8d54962015-07-29 19:46:04 +0100324 goto out_unmap;
325 }
326
327 return pages;
328
329out_unmap:
330 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
331 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
332out_free:
Patrick Dalyc11d1082016-09-01 15:52:44 -0700333 io_pgtable_free_pages_exact(cfg, cookie, pages, size);
Robin Murphyf8d54962015-07-29 19:46:04 +0100334 return NULL;
335}
336
337static void __arm_lpae_free_pages(void *pages, size_t size,
Patrick Dalyc11d1082016-09-01 15:52:44 -0700338 struct io_pgtable_cfg *cfg, void *cookie)
Robin Murphyf8d54962015-07-29 19:46:04 +0100339{
Robin Murphy87a91b12015-07-29 19:46:09 +0100340 if (!selftest_running)
Robin Murphyffcb6d12015-09-17 17:42:16 +0100341 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
Robin Murphyf8d54962015-07-29 19:46:04 +0100342 size, DMA_TO_DEVICE);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700343 io_pgtable_free_pages_exact(cfg, cookie, pages, size);
Robin Murphyf8d54962015-07-29 19:46:04 +0100344}
345
346static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
Robin Murphy87a91b12015-07-29 19:46:09 +0100347 struct io_pgtable_cfg *cfg)
Robin Murphyf8d54962015-07-29 19:46:04 +0100348{
Robin Murphyf8d54962015-07-29 19:46:04 +0100349 *ptep = pte;
350
Robin Murphy87a91b12015-07-29 19:46:09 +0100351 if (!selftest_running)
Liam Mark04b0c852016-12-20 11:34:04 -0800352 pgtable_dma_sync_single_for_device(cfg,
Robin Murphyffcb6d12015-09-17 17:42:16 +0100353 __arm_lpae_dma_addr(ptep),
Robin Murphyf8d54962015-07-29 19:46:04 +0100354 sizeof(pte), DMA_TO_DEVICE);
Robin Murphyf8d54962015-07-29 19:46:04 +0100355}
356
Will Deacone1d3c0f2014-11-14 17:18:23 +0000357static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
358 unsigned long iova, phys_addr_t paddr,
359 arm_lpae_iopte prot, int lvl,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700360 arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep,
361 bool flush)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000362{
363 arm_lpae_iopte pte = prot;
Robin Murphyf8d54962015-07-29 19:46:04 +0100364 struct io_pgtable_cfg *cfg = &data->iop.cfg;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000365
Patrick Daly67ba8eb2016-06-27 18:44:42 -0700366 /* We require an unmap first */
Mitchel Humpherys1b0313e2015-09-23 13:56:27 -0700367 if (*ptep & ARM_LPAE_PTE_VALID) {
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -0700368 BUG_ON(!suppress_map_failures);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000369 return -EEXIST;
Will Deaconfe4b9912014-11-17 23:31:12 +0000370 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000371
Robin Murphyf8d54962015-07-29 19:46:04 +0100372 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
Laurent Pinchartc896c132014-12-14 23:34:50 +0200373 pte |= ARM_LPAE_PTE_NS;
374
Will Deacone1d3c0f2014-11-14 17:18:23 +0000375 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
376 pte |= ARM_LPAE_PTE_TYPE_PAGE;
377 else
378 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
379
Liam Marka8a228d2016-10-04 13:40:53 -0700380 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000381 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
382
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700383 if (flush)
384 __arm_lpae_set_pte(ptep, pte, cfg);
385 else
386 *ptep = pte;
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700387
388 if (prev_ptep)
389 iopte_tblcnt_add(prev_ptep, 1);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000390 return 0;
391}
392
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700393struct map_state {
394 unsigned long iova_end;
395 unsigned int pgsize;
396 arm_lpae_iopte *pgtable;
397 arm_lpae_iopte *prev_pgtable;
398 arm_lpae_iopte *pte_start;
399 unsigned int num_pte;
400};
401/* map state optimization works at level 3 (the 2nd-to-last level) */
402#define MAP_STATE_LVL 3
403
Will Deacone1d3c0f2014-11-14 17:18:23 +0000404static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
405 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700406 int lvl, arm_lpae_iopte *ptep,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700407 arm_lpae_iopte *prev_ptep, struct map_state *ms)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000408{
409 arm_lpae_iopte *cptep, pte;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000410 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
Robin Murphyf8d54962015-07-29 19:46:04 +0100411 struct io_pgtable_cfg *cfg = &data->iop.cfg;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700412 void *cookie = data->iop.cookie;
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700413 arm_lpae_iopte *pgtable = ptep;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000414
415 /* Find our entry at the current level */
416 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
417
418 /* If we can install a leaf entry at this level, then do so */
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700419 if (size == block_size && (size & cfg->pgsize_bitmap)) {
420 if (!ms)
421 return arm_lpae_init_pte(data, iova, paddr, prot, lvl,
422 ptep, prev_ptep, true);
423
424 if (lvl == MAP_STATE_LVL) {
425 if (ms->pgtable)
Liam Mark04b0c852016-12-20 11:34:04 -0800426 pgtable_dma_sync_single_for_device(cfg,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700427 __arm_lpae_dma_addr(ms->pte_start),
428 ms->num_pte * sizeof(*ptep),
429 DMA_TO_DEVICE);
430
431 ms->iova_end = round_down(iova, SZ_2M) + SZ_2M;
432 ms->pgtable = pgtable;
433 ms->prev_pgtable = prev_ptep;
434 ms->pgsize = size;
435 ms->pte_start = ptep;
436 ms->num_pte = 1;
437 } else {
438 /*
439 * We have some map state from previous page
440 * mappings, but we're about to set up a block
441 * mapping. Flush out the previous page mappings.
442 */
443 if (ms->pgtable)
Liam Mark04b0c852016-12-20 11:34:04 -0800444 pgtable_dma_sync_single_for_device(cfg,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700445 __arm_lpae_dma_addr(ms->pte_start),
446 ms->num_pte * sizeof(*ptep),
447 DMA_TO_DEVICE);
448 memset(ms, 0, sizeof(*ms));
449 ms = NULL;
450 }
451
452 return arm_lpae_init_pte(data, iova, paddr, prot, lvl,
453 ptep, prev_ptep, ms == NULL);
454 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000455
456 /* We can't allocate tables at the final level */
457 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
458 return -EINVAL;
459
460 /* Grab a pointer to the next level */
461 pte = *ptep;
462 if (!pte) {
Robin Murphy06c610e2015-12-07 18:18:53 +0000463 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
Patrick Dalyc11d1082016-09-01 15:52:44 -0700464 GFP_ATOMIC, cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000465 if (!cptep)
466 return -ENOMEM;
467
Will Deacone1d3c0f2014-11-14 17:18:23 +0000468 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
Robin Murphyf8d54962015-07-29 19:46:04 +0100469 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
Laurent Pinchartc896c132014-12-14 23:34:50 +0200470 pte |= ARM_LPAE_PTE_NSTABLE;
Robin Murphy87a91b12015-07-29 19:46:09 +0100471 __arm_lpae_set_pte(ptep, pte, cfg);
Oleksandr Tyshchenko2d595302017-02-27 14:30:25 +0200472 } else if (!iopte_leaf(pte, lvl)) {
Will Deacone1d3c0f2014-11-14 17:18:23 +0000473 cptep = iopte_deref(pte, data);
Oleksandr Tyshchenko2d595302017-02-27 14:30:25 +0200474 } else {
475 /* We require an unmap first */
476 WARN_ON(!selftest_running);
477 return -EEXIST;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000478 }
479
480 /* Rinse, repeat */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700481 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700482 ptep, ms);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000483}
484
485static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
486 int prot)
487{
488 arm_lpae_iopte pte;
489
490 if (data->iop.fmt == ARM_64_LPAE_S1 ||
491 data->iop.fmt == ARM_32_LPAE_S1) {
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600492 pte = ARM_LPAE_PTE_nG;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000493
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600494 if (prot & IOMMU_WRITE)
495 pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RW
496 : ARM_LPAE_PTE_AP_RW;
497 else
498 pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO
499 : ARM_LPAE_PTE_AP_RO;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000500
Liam Marka8a228d2016-10-04 13:40:53 -0700501 if (prot & IOMMU_MMIO)
Robin Murphyfb948252016-04-05 12:39:31 +0100502 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
503 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
Liam Marka8a228d2016-10-04 13:40:53 -0700504 else if (prot & IOMMU_CACHE)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000505 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
506 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
Patrick Dalybf762272016-11-03 16:49:44 -0700507 else if (prot & IOMMU_USE_UPSTREAM_HINT)
508 pte |= (ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM
509 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000510 } else {
511 pte = ARM_LPAE_PTE_HAP_FAULT;
512 if (prot & IOMMU_READ)
513 pte |= ARM_LPAE_PTE_HAP_READ;
514 if (prot & IOMMU_WRITE)
515 pte |= ARM_LPAE_PTE_HAP_WRITE;
Robin Murphyfb948252016-04-05 12:39:31 +0100516 if (prot & IOMMU_MMIO)
517 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
518 else if (prot & IOMMU_CACHE)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000519 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
520 else
521 pte |= ARM_LPAE_PTE_MEMATTR_NC;
522 }
523
524 if (prot & IOMMU_NOEXEC)
525 pte |= ARM_LPAE_PTE_XN;
526
527 return pte;
528}
529
530static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
531 phys_addr_t paddr, size_t size, int iommu_prot)
532{
533 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
534 arm_lpae_iopte *ptep = data->pgd;
Robin Murphy87a91b12015-07-29 19:46:09 +0100535 int ret, lvl = ARM_LPAE_START_LVL(data);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000536 arm_lpae_iopte prot;
537
538 /* If no access, then nothing to do */
539 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
540 return 0;
541
542 prot = arm_lpae_prot_to_pte(data, iommu_prot);
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700543 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, NULL,
544 NULL);
Robin Murphy87a91b12015-07-29 19:46:09 +0100545 /*
546 * Synchronise all PTE updates for the new mapping before there's
547 * a chance for anything to kick off a table walk for the new iova.
548 */
549 wmb();
550
551 return ret;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000552}
553
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700554static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
555 struct scatterlist *sg, unsigned int nents,
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700556 int iommu_prot, size_t *size)
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700557{
558 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
559 arm_lpae_iopte *ptep = data->pgd;
560 int lvl = ARM_LPAE_START_LVL(data);
561 arm_lpae_iopte prot;
562 struct scatterlist *s;
563 size_t mapped = 0;
564 int i, ret;
565 unsigned int min_pagesz;
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700566 struct io_pgtable_cfg *cfg = &data->iop.cfg;
567 struct map_state ms;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700568
569 /* If no access, then nothing to do */
570 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700571 goto out_err;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700572
573 prot = arm_lpae_prot_to_pte(data, iommu_prot);
574
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700575 min_pagesz = 1 << __ffs(cfg->pgsize_bitmap);
576
577 memset(&ms, 0, sizeof(ms));
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700578
579 for_each_sg(sg, s, nents, i) {
580 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
581 size_t size = s->length;
582
583 /*
584 * We are mapping on IOMMU page boundaries, so offset within
585 * the page must be 0. However, the IOMMU may support pages
586 * smaller than PAGE_SIZE, so s->offset may still represent
587 * an offset of that boundary within the CPU page.
588 */
589 if (!IS_ALIGNED(s->offset, min_pagesz))
590 goto out_err;
591
592 while (size) {
593 size_t pgsize = iommu_pgsize(
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700594 cfg->pgsize_bitmap, iova | phys, size);
595
596 if (ms.pgtable && (iova < ms.iova_end)) {
597 arm_lpae_iopte *ptep = ms.pgtable +
598 ARM_LPAE_LVL_IDX(iova, MAP_STATE_LVL,
599 data);
600 arm_lpae_init_pte(
601 data, iova, phys, prot, MAP_STATE_LVL,
602 ptep, ms.prev_pgtable, false);
603 ms.num_pte++;
604 } else {
605 ret = __arm_lpae_map(data, iova, phys, pgsize,
606 prot, lvl, ptep, NULL, &ms);
607 if (ret)
608 goto out_err;
609 }
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700610
611 iova += pgsize;
612 mapped += pgsize;
613 phys += pgsize;
614 size -= pgsize;
615 }
616 }
617
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700618 if (ms.pgtable)
Liam Mark04b0c852016-12-20 11:34:04 -0800619 pgtable_dma_sync_single_for_device(cfg,
620 __arm_lpae_dma_addr(ms.pte_start),
621 ms.num_pte * sizeof(*ms.pte_start),
622 DMA_TO_DEVICE);
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700623
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700624 return mapped;
625
626out_err:
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700627 /* Return the size of the partial mapping so that they can be undone */
628 *size = mapped;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700629 return 0;
630}
631
Will Deacone1d3c0f2014-11-14 17:18:23 +0000632static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
633 arm_lpae_iopte *ptep)
634{
635 arm_lpae_iopte *start, *end;
636 unsigned long table_size;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700637 void *cookie = data->iop.cookie;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000638
Will Deacone1d3c0f2014-11-14 17:18:23 +0000639 if (lvl == ARM_LPAE_START_LVL(data))
640 table_size = data->pgd_size;
641 else
Robin Murphy06c610e2015-12-07 18:18:53 +0000642 table_size = ARM_LPAE_GRANULE(data);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000643
644 start = ptep;
Will Deacon12c2ab02015-12-15 16:08:12 +0000645
646 /* Only leaf entries at the last level */
647 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
648 end = ptep;
649 else
650 end = (void *)ptep + table_size;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000651
652 while (ptep != end) {
653 arm_lpae_iopte pte = *ptep++;
654
655 if (!pte || iopte_leaf(pte, lvl))
656 continue;
657
658 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
659 }
660
Patrick Dalyc11d1082016-09-01 15:52:44 -0700661 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000662}
663
664static void arm_lpae_free_pgtable(struct io_pgtable *iop)
665{
666 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
667
668 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
Patrick Dalyd8fd2752018-02-05 19:18:24 -0800669 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
670 data->pgd_ttbr1);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000671 kfree(data);
672}
673
674static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
675 unsigned long iova, size_t size,
676 arm_lpae_iopte prot, int lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700677 arm_lpae_iopte *ptep,
678 arm_lpae_iopte *prev_ptep, size_t blk_size)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000679{
680 unsigned long blk_start, blk_end;
681 phys_addr_t blk_paddr;
682 arm_lpae_iopte table = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000683
684 blk_start = iova & ~(blk_size - 1);
685 blk_end = blk_start + blk_size;
686 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
Patrick Daly781558f2016-10-13 16:03:27 -0700687 size = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, size);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000688
689 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
690 arm_lpae_iopte *tablep;
691
692 /* Unmap! */
693 if (blk_start == iova)
694 continue;
695
696 /* __arm_lpae_map expects a pointer to the start of the table */
697 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
698 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700699 tablep, prev_ptep, NULL) < 0) {
Will Deacone1d3c0f2014-11-14 17:18:23 +0000700 if (table) {
701 /* Free the table we allocated */
702 tablep = iopte_deref(table, data);
703 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
704 }
705 return 0; /* Bytes unmapped */
706 }
707 }
708
Robin Murphy507e4c92016-01-26 17:13:14 +0000709 __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000710 return size;
711}
712
713static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
714 unsigned long iova, size_t size, int lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700715 arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000716{
717 arm_lpae_iopte pte;
Robin Murphy507e4c92016-01-26 17:13:14 +0000718 struct io_pgtable *iop = &data->iop;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000719 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
720
Robin Murphy2eb97c72015-12-04 17:52:58 +0000721 /* Something went horribly wrong and we ran out of page table */
722 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
723 return 0;
724
Will Deacone1d3c0f2014-11-14 17:18:23 +0000725 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
726 pte = *ptep;
Robin Murphy2eb97c72015-12-04 17:52:58 +0000727 if (WARN_ON(!pte))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000728 return 0;
729
730 /* If the size matches this level, we're in the right place */
731 if (size == blk_size) {
Robin Murphy507e4c92016-01-26 17:13:14 +0000732 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000733
734 if (!iopte_leaf(pte, lvl)) {
735 /* Also flush any partial walks */
Will Deacone1d3c0f2014-11-14 17:18:23 +0000736 ptep = iopte_deref(pte, data);
737 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000738 }
739
740 return size;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700741 } else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) {
742 arm_lpae_iopte *table = iopte_deref(pte, data);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700743 arm_lpae_iopte *table_base = table;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700744 int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
745 int entry_size = ARM_LPAE_GRANULE(data);
Sudarshan Rajagopalanb1280402017-06-06 10:57:09 -0700746 int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) >>
747 data->pg_shift;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700748 int entries = min_t(int, size / entry_size,
749 max_entries - tl_offset);
750 int table_len = entries * sizeof(*table);
751
752 /*
753 * This isn't a block mapping so it must be a table mapping
754 * and since it's the 2nd-to-last level the next level has
755 * to be all page mappings. Zero them all out in one fell
756 * swoop.
757 */
758
759 table += tl_offset;
760
761 memset(table, 0, table_len);
Liam Mark04b0c852016-12-20 11:34:04 -0800762 pgtable_dma_sync_single_for_device(&iop->cfg,
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700763 __arm_lpae_dma_addr(table),
764 table_len, DMA_TO_DEVICE);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700765
766 iopte_tblcnt_sub(ptep, entries);
767 if (!iopte_tblcnt(*ptep)) {
768 /* no valid mappings left under this table. free it. */
769 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700770 __arm_lpae_free_pgtable(data, lvl + 1, table_base);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700771 }
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700772
773 return entries * entry_size;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000774 } else if (iopte_leaf(pte, lvl)) {
775 /*
776 * Insert a table at the next level to map the old region,
777 * minus the part we want to unmap
778 */
779 return arm_lpae_split_blk_unmap(data, iova, size,
780 iopte_prot(pte), lvl, ptep,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700781 prev_ptep, blk_size);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000782 }
783
784 /* Keep on walkin' */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700785 prev_ptep = ptep;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000786 ptep = iopte_deref(pte, data);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700787 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep, prev_ptep);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000788}
789
Mitchel Humpherys5e050592015-05-21 14:11:22 -0700790static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
Will Deacone1d3c0f2014-11-14 17:18:23 +0000791 size_t size)
792{
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700793 size_t unmapped = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000794 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000795 arm_lpae_iopte *ptep = data->pgd;
796 int lvl = ARM_LPAE_START_LVL(data);
797
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700798 while (unmapped < size) {
799 size_t ret, size_to_unmap, remaining;
800
801 remaining = (size - unmapped);
Patrick Dalyf145f052016-06-27 18:38:09 -0700802 size_to_unmap = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova,
803 remaining);
804 size_to_unmap = size_to_unmap >= SZ_2M ?
805 size_to_unmap :
806 min_t(unsigned long, remaining,
807 (ALIGN(iova + 1, SZ_2M) - iova));
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700808 ret = __arm_lpae_unmap(data, iova, size_to_unmap, lvl, ptep,
809 NULL);
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700810 if (ret == 0)
811 break;
812 unmapped += ret;
813 iova += ret;
814 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000815 if (unmapped)
Mitchel Humpherysfaa87fc2015-04-24 17:10:59 -0700816 io_pgtable_tlb_flush_all(&data->iop);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000817
818 return unmapped;
819}
820
Liam Mark17f31802016-12-09 14:30:10 -0800821static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
822 unsigned long iova, int *plvl_ret,
823 arm_lpae_iopte *ptep_ret)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000824{
Will Deacone1d3c0f2014-11-14 17:18:23 +0000825 arm_lpae_iopte pte, *ptep = data->pgd;
Liam Mark17f31802016-12-09 14:30:10 -0800826 *plvl_ret = ARM_LPAE_START_LVL(data);
827 *ptep_ret = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000828
829 do {
830 /* Valid IOPTE pointer? */
831 if (!ptep)
Liam Mark17f31802016-12-09 14:30:10 -0800832 return -EINVAL;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000833
834 /* Grab the IOPTE we're interested in */
Liam Mark17f31802016-12-09 14:30:10 -0800835 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, *plvl_ret, data));
Will Deacone1d3c0f2014-11-14 17:18:23 +0000836
837 /* Valid entry? */
838 if (!pte)
Liam Mark17f31802016-12-09 14:30:10 -0800839 return -EINVAL;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000840
841 /* Leaf entry? */
Liam Mark17f31802016-12-09 14:30:10 -0800842 if (iopte_leaf(pte, *plvl_ret))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000843 goto found_translation;
844
845 /* Take it to the next level */
846 ptep = iopte_deref(pte, data);
Liam Mark17f31802016-12-09 14:30:10 -0800847 } while (++(*plvl_ret) < ARM_LPAE_MAX_LEVELS);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000848
849 /* Ran out of page tables to walk */
Liam Mark17f31802016-12-09 14:30:10 -0800850 return -EINVAL;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000851
852found_translation:
Liam Mark17f31802016-12-09 14:30:10 -0800853 *ptep_ret = pte;
854 return 0;
855}
856
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700857static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops,
858 unsigned long iova)
859{
860 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
861 arm_lpae_iopte pte;
862 int lvl;
863
864 if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
865 return pte;
866
867 return 0;
868}
869
Liam Mark17f31802016-12-09 14:30:10 -0800870static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
871 unsigned long iova)
872{
873 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
874 arm_lpae_iopte pte;
875 int lvl;
876 phys_addr_t phys = 0;
877
878 if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) {
879 iova &= ((1 << ARM_LPAE_LVL_SHIFT(lvl, data)) - 1);
880 phys = ((phys_addr_t)iopte_to_pfn(pte, data)
881 << data->pg_shift) | iova;
882 }
883
884 return phys;
885}
886
887static bool __arm_lpae_is_iova_coherent(struct arm_lpae_io_pgtable *data,
888 arm_lpae_iopte *ptep)
889{
890 if (data->iop.fmt == ARM_64_LPAE_S1 ||
891 data->iop.fmt == ARM_32_LPAE_S1) {
892 int attr_idx = (*ptep & (ARM_LPAE_PTE_ATTRINDX_MASK <<
893 ARM_LPAE_PTE_ATTRINDX_SHIFT)) >>
894 ARM_LPAE_PTE_ATTRINDX_SHIFT;
895 if ((attr_idx == ARM_LPAE_MAIR_ATTR_IDX_CACHE) &&
Liam Mark1402f942017-03-17 10:26:49 -0700896 (((*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_IS)
897 ||
898 (*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_OS))
Liam Mark17f31802016-12-09 14:30:10 -0800899 return true;
900 } else {
901 if (*ptep & ARM_LPAE_PTE_MEMATTR_OIWB)
902 return true;
903 }
904
905 return false;
906}
907
908static bool arm_lpae_is_iova_coherent(struct io_pgtable_ops *ops,
909 unsigned long iova)
910{
911 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
912 arm_lpae_iopte pte;
913 int lvl;
914 bool ret = false;
915
916 if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
917 ret = __arm_lpae_is_iova_coherent(data, &pte);
918
919 return ret;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000920}
921
922static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
923{
924 unsigned long granule;
925
926 /*
927 * We need to restrict the supported page sizes to match the
928 * translation regime for a particular granule. Aim to match
929 * the CPU page size if possible, otherwise prefer smaller sizes.
930 * While we're at it, restrict the block sizes to match the
931 * chosen granule.
932 */
933 if (cfg->pgsize_bitmap & PAGE_SIZE)
934 granule = PAGE_SIZE;
935 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
936 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
937 else if (cfg->pgsize_bitmap & PAGE_MASK)
938 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
939 else
940 granule = 0;
941
942 switch (granule) {
943 case SZ_4K:
944 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
945 break;
946 case SZ_16K:
947 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
948 break;
949 case SZ_64K:
950 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
951 break;
952 default:
953 cfg->pgsize_bitmap = 0;
954 }
955}
956
957static struct arm_lpae_io_pgtable *
958arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
959{
960 unsigned long va_bits, pgd_bits;
961 struct arm_lpae_io_pgtable *data;
962
963 arm_lpae_restrict_pgsizes(cfg);
964
965 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
966 return NULL;
967
968 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
969 return NULL;
970
971 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
972 return NULL;
973
Robin Murphyffcb6d12015-09-17 17:42:16 +0100974 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
975 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
976 return NULL;
977 }
978
Will Deacone1d3c0f2014-11-14 17:18:23 +0000979 data = kmalloc(sizeof(*data), GFP_KERNEL);
980 if (!data)
981 return NULL;
982
983 data->pg_shift = __ffs(cfg->pgsize_bitmap);
984 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
985
986 va_bits = cfg->ias - data->pg_shift;
987 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
988
989 /* Calculate the actual size of our pgd (without concatenation) */
990 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
Patrick Dalyd35ec7c2016-11-23 15:04:24 -0800991 data->pgd_bits = pgd_bits;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000992 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
993
994 data->iop.ops = (struct io_pgtable_ops) {
995 .map = arm_lpae_map,
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700996 .map_sg = arm_lpae_map_sg,
Will Deacone1d3c0f2014-11-14 17:18:23 +0000997 .unmap = arm_lpae_unmap,
998 .iova_to_phys = arm_lpae_iova_to_phys,
Liam Mark17f31802016-12-09 14:30:10 -0800999 .is_iova_coherent = arm_lpae_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001000 .iova_to_pte = arm_lpae_iova_get_pte,
Will Deacone1d3c0f2014-11-14 17:18:23 +00001001 };
1002
1003 return data;
1004}
1005
1006static struct io_pgtable *
1007arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1008{
1009 u64 reg;
Robin Murphy3850db42016-02-12 17:09:46 +00001010 struct arm_lpae_io_pgtable *data;
Will Deacone1d3c0f2014-11-14 17:18:23 +00001011
Robin Murphy3850db42016-02-12 17:09:46 +00001012 data = arm_lpae_alloc_pgtable(cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001013 if (!data)
1014 return NULL;
1015
1016 /* TCR */
Liam Mark04b0c852016-12-20 11:34:04 -08001017 if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
Mitchel Humpherys45b2e972016-06-07 14:18:22 -07001018 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
1019 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
1020 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
Patrick Daly49ccf332017-09-27 15:10:29 -07001021 else if ((cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1022 (cfg->quirks & IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE))
1023 reg = (ARM_LPAE_TCR_SH_NS << ARM_LPAE_TCR_SH0_SHIFT) |
1024 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
1025 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
Liam Mark5649c822016-12-19 14:35:08 -08001026 else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
Patrick Dalyce6786f2016-11-09 14:19:23 -08001027 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
1028 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
1029 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
Mitchel Humpherys45b2e972016-06-07 14:18:22 -07001030 else
Liam Marka8a228d2016-10-04 13:40:53 -07001031 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
Mitchel Humpherys45b2e972016-06-07 14:18:22 -07001032 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
1033 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001034
Robin Murphy06c610e2015-12-07 18:18:53 +00001035 switch (ARM_LPAE_GRANULE(data)) {
Will Deacone1d3c0f2014-11-14 17:18:23 +00001036 case SZ_4K:
1037 reg |= ARM_LPAE_TCR_TG0_4K;
1038 break;
1039 case SZ_16K:
1040 reg |= ARM_LPAE_TCR_TG0_16K;
1041 break;
1042 case SZ_64K:
1043 reg |= ARM_LPAE_TCR_TG0_64K;
1044 break;
1045 }
1046
1047 switch (cfg->oas) {
1048 case 32:
1049 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1050 break;
1051 case 36:
1052 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1053 break;
1054 case 40:
1055 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1056 break;
1057 case 42:
1058 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1059 break;
1060 case 44:
1061 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1062 break;
1063 case 48:
1064 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1065 break;
1066 default:
1067 goto out_free_data;
1068 }
1069
1070 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
Will Deacon63979b82015-03-18 10:22:18 +00001071
1072 /* Disable speculative walks through TTBR1 */
1073 reg |= ARM_LPAE_TCR_EPD1;
Will Deacone1d3c0f2014-11-14 17:18:23 +00001074 cfg->arm_lpae_s1_cfg.tcr = reg;
1075
1076 /* MAIRs */
1077 reg = (ARM_LPAE_MAIR_ATTR_NC
1078 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1079 (ARM_LPAE_MAIR_ATTR_WBRWA
1080 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1081 (ARM_LPAE_MAIR_ATTR_DEVICE
Patrick Dalybf762272016-11-03 16:49:44 -07001082 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
1083 (ARM_LPAE_MAIR_ATTR_UPSTREAM
1084 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM));
Will Deacone1d3c0f2014-11-14 17:18:23 +00001085
1086 cfg->arm_lpae_s1_cfg.mair[0] = reg;
1087 cfg->arm_lpae_s1_cfg.mair[1] = 0;
1088
1089 /* Looking good; allocate a pgd */
Patrick Dalyc11d1082016-09-01 15:52:44 -07001090 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
1091 cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001092 if (!data->pgd)
1093 goto out_free_data;
1094
Patrick Dalyd8fd2752018-02-05 19:18:24 -08001095 data->pgd_ttbr1 = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
1096 cfg, cookie);
1097 if (!data->pgd_ttbr1)
1098 goto out_free_pgd;
1099
Robin Murphy87a91b12015-07-29 19:46:09 +01001100 /* Ensure the empty pgd is visible before any actual TTBR write */
1101 wmb();
Will Deacone1d3c0f2014-11-14 17:18:23 +00001102
1103 /* TTBRs */
1104 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
Patrick Dalyd8fd2752018-02-05 19:18:24 -08001105 cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd_ttbr1);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001106 return &data->iop;
1107
Patrick Dalyd8fd2752018-02-05 19:18:24 -08001108out_free_pgd:
1109 __arm_lpae_free_pages(data->pgd, data->pgd_size, cfg, cookie);
1110
Will Deacone1d3c0f2014-11-14 17:18:23 +00001111out_free_data:
1112 kfree(data);
1113 return NULL;
1114}
1115
1116static struct io_pgtable *
1117arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1118{
1119 u64 reg, sl;
Robin Murphy3850db42016-02-12 17:09:46 +00001120 struct arm_lpae_io_pgtable *data;
Will Deacone1d3c0f2014-11-14 17:18:23 +00001121
Robin Murphy3850db42016-02-12 17:09:46 +00001122 /* The NS quirk doesn't apply at stage 2 */
1123 if (cfg->quirks)
1124 return NULL;
1125
1126 data = arm_lpae_alloc_pgtable(cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001127 if (!data)
1128 return NULL;
1129
1130 /*
1131 * Concatenate PGDs at level 1 if possible in order to reduce
1132 * the depth of the stage-2 walk.
1133 */
1134 if (data->levels == ARM_LPAE_MAX_LEVELS) {
1135 unsigned long pgd_pages;
1136
1137 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
1138 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
1139 data->pgd_size = pgd_pages << data->pg_shift;
1140 data->levels--;
1141 }
1142 }
1143
1144 /* VTCR */
1145 reg = ARM_64_LPAE_S2_TCR_RES1 |
1146 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
1147 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
1148 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
1149
1150 sl = ARM_LPAE_START_LVL(data);
1151
Robin Murphy06c610e2015-12-07 18:18:53 +00001152 switch (ARM_LPAE_GRANULE(data)) {
Will Deacone1d3c0f2014-11-14 17:18:23 +00001153 case SZ_4K:
1154 reg |= ARM_LPAE_TCR_TG0_4K;
1155 sl++; /* SL0 format is different for 4K granule size */
1156 break;
1157 case SZ_16K:
1158 reg |= ARM_LPAE_TCR_TG0_16K;
1159 break;
1160 case SZ_64K:
1161 reg |= ARM_LPAE_TCR_TG0_64K;
1162 break;
1163 }
1164
1165 switch (cfg->oas) {
1166 case 32:
1167 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
1168 break;
1169 case 36:
1170 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
1171 break;
1172 case 40:
1173 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
1174 break;
1175 case 42:
1176 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
1177 break;
1178 case 44:
1179 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
1180 break;
1181 case 48:
1182 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
1183 break;
1184 default:
1185 goto out_free_data;
1186 }
1187
1188 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
1189 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
1190 cfg->arm_lpae_s2_cfg.vtcr = reg;
1191
1192 /* Allocate pgd pages */
Patrick Dalyc11d1082016-09-01 15:52:44 -07001193 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
1194 cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001195 if (!data->pgd)
1196 goto out_free_data;
1197
Robin Murphy87a91b12015-07-29 19:46:09 +01001198 /* Ensure the empty pgd is visible before any actual TTBR write */
1199 wmb();
Will Deacone1d3c0f2014-11-14 17:18:23 +00001200
1201 /* VTTBR */
1202 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1203 return &data->iop;
1204
1205out_free_data:
1206 kfree(data);
1207 return NULL;
1208}
1209
1210static struct io_pgtable *
1211arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1212{
1213 struct io_pgtable *iop;
1214
1215 if (cfg->ias > 32 || cfg->oas > 40)
1216 return NULL;
1217
1218 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1219 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1220 if (iop) {
1221 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
1222 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
1223 }
1224
1225 return iop;
1226}
1227
1228static struct io_pgtable *
1229arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1230{
1231 struct io_pgtable *iop;
1232
1233 if (cfg->ias > 40 || cfg->oas > 40)
1234 return NULL;
1235
1236 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1237 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1238 if (iop)
1239 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1240
1241 return iop;
1242}
1243
1244struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1245 .alloc = arm_64_lpae_alloc_pgtable_s1,
1246 .free = arm_lpae_free_pgtable,
1247};
1248
1249struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1250 .alloc = arm_64_lpae_alloc_pgtable_s2,
1251 .free = arm_lpae_free_pgtable,
1252};
1253
1254struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1255 .alloc = arm_32_lpae_alloc_pgtable_s1,
1256 .free = arm_lpae_free_pgtable,
1257};
1258
1259struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1260 .alloc = arm_32_lpae_alloc_pgtable_s2,
1261 .free = arm_lpae_free_pgtable,
1262};
Will Deaconfe4b9912014-11-17 23:31:12 +00001263
1264#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1265
1266static struct io_pgtable_cfg *cfg_cookie;
1267
1268static void dummy_tlb_flush_all(void *cookie)
1269{
1270 WARN_ON(cookie != cfg_cookie);
1271}
1272
Robin Murphy06c610e2015-12-07 18:18:53 +00001273static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1274 size_t granule, bool leaf, void *cookie)
Will Deaconfe4b9912014-11-17 23:31:12 +00001275{
1276 WARN_ON(cookie != cfg_cookie);
Will Deaconfe4b9912014-11-17 23:31:12 +00001277}
1278
1279static void dummy_tlb_sync(void *cookie)
1280{
1281 WARN_ON(cookie != cfg_cookie);
1282}
1283
Will Deaconfe4b9912014-11-17 23:31:12 +00001284static struct iommu_gather_ops dummy_tlb_ops __initdata = {
1285 .tlb_flush_all = dummy_tlb_flush_all,
1286 .tlb_add_flush = dummy_tlb_add_flush,
1287 .tlb_sync = dummy_tlb_sync,
Will Deaconfe4b9912014-11-17 23:31:12 +00001288};
1289
1290static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1291{
1292 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1293 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1294
1295 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1296 cfg->pgsize_bitmap, cfg->ias);
1297 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1298 data->levels, data->pgd_size, data->pg_shift,
1299 data->bits_per_level, data->pgd);
1300}
1301
1302#define __FAIL(ops, i) ({ \
1303 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1304 arm_lpae_dump_ops(ops); \
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001305 suppress_map_failures = false; \
Will Deaconfe4b9912014-11-17 23:31:12 +00001306 selftest_running = false; \
1307 -EFAULT; \
1308})
1309
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001310/*
Mitchel Humpherys601ebd32015-06-01 16:12:26 -07001311 * Returns true if there's any mapping in the given iova range in ops.
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001312 */
1313static bool arm_lpae_range_has_mapping(struct io_pgtable_ops *ops,
1314 unsigned long iova_start, size_t size)
1315{
1316 unsigned long iova = iova_start;
1317
1318 while (iova < (iova_start + size)) {
Mitchel Humpherys601ebd32015-06-01 16:12:26 -07001319 if (ops->iova_to_phys(ops, iova + 42))
1320 return true;
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001321 iova += SZ_4K;
1322 }
Mitchel Humpherys601ebd32015-06-01 16:12:26 -07001323 return false;
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001324}
1325
1326/*
1327 * Returns true if the iova range is successfully mapped to the contiguous
1328 * phys range in ops.
1329 */
1330static bool arm_lpae_range_has_specific_mapping(struct io_pgtable_ops *ops,
1331 const unsigned long iova_start,
1332 const phys_addr_t phys_start,
1333 const size_t size)
1334{
1335 unsigned long iova = iova_start;
1336 phys_addr_t phys = phys_start;
1337
1338 while (iova < (iova_start + size)) {
1339 if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
1340 return false;
1341 iova += SZ_4K;
1342 phys += SZ_4K;
1343 }
1344 return true;
1345}
1346
Will Deaconfe4b9912014-11-17 23:31:12 +00001347static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1348{
1349 static const enum io_pgtable_fmt fmts[] = {
1350 ARM_64_LPAE_S1,
1351 ARM_64_LPAE_S2,
1352 };
1353
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001354 int i, j, k;
Will Deaconfe4b9912014-11-17 23:31:12 +00001355 unsigned long iova;
1356 size_t size;
1357 struct io_pgtable_ops *ops;
Will Deaconfe4b9912014-11-17 23:31:12 +00001358 selftest_running = true;
1359
1360 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001361 unsigned long test_sg_sizes[] = { SZ_4K, SZ_64K, SZ_2M,
1362 SZ_1M * 12, SZ_1M * 20 };
1363
Will Deaconfe4b9912014-11-17 23:31:12 +00001364 cfg_cookie = cfg;
1365 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1366 if (!ops) {
1367 pr_err("selftest: failed to allocate io pgtable ops\n");
1368 return -ENOMEM;
1369 }
1370
1371 /*
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001372 * Initial sanity checks. Empty page tables shouldn't
1373 * provide any translations. TODO: check entire supported
1374 * range for these ops rather than first 2G
Will Deaconfe4b9912014-11-17 23:31:12 +00001375 */
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001376 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
Will Deaconfe4b9912014-11-17 23:31:12 +00001377 return __FAIL(ops, i);
1378
1379 /*
1380 * Distinct mappings of different granule sizes.
1381 */
1382 iova = 0;
1383 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1384 while (j != BITS_PER_LONG) {
1385 size = 1UL << j;
1386
1387 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1388 IOMMU_WRITE |
1389 IOMMU_NOEXEC |
1390 IOMMU_CACHE))
1391 return __FAIL(ops, i);
1392
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001393 suppress_map_failures = true;
Will Deaconfe4b9912014-11-17 23:31:12 +00001394 /* Overlapping mappings */
1395 if (!ops->map(ops, iova, iova + size, size,
1396 IOMMU_READ | IOMMU_NOEXEC))
1397 return __FAIL(ops, i);
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001398 suppress_map_failures = false;
Will Deaconfe4b9912014-11-17 23:31:12 +00001399
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001400 if (!arm_lpae_range_has_specific_mapping(ops, iova,
1401 iova, size))
Will Deaconfe4b9912014-11-17 23:31:12 +00001402 return __FAIL(ops, i);
1403
1404 iova += SZ_1G;
1405 j++;
1406 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1407 }
1408
1409 /* Partial unmap */
1410 size = 1UL << __ffs(cfg->pgsize_bitmap);
1411 if (ops->unmap(ops, SZ_1G + size, size) != size)
1412 return __FAIL(ops, i);
1413
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001414 if (arm_lpae_range_has_mapping(ops, SZ_1G + size, size))
1415 return __FAIL(ops, i);
1416
Will Deaconfe4b9912014-11-17 23:31:12 +00001417 /* Remap of partial unmap */
1418 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1419 return __FAIL(ops, i);
1420
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001421 if (!arm_lpae_range_has_specific_mapping(ops, SZ_1G + size,
1422 size, size))
Will Deaconfe4b9912014-11-17 23:31:12 +00001423 return __FAIL(ops, i);
1424
1425 /* Full unmap */
1426 iova = 0;
1427 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1428 while (j != BITS_PER_LONG) {
1429 size = 1UL << j;
1430
1431 if (ops->unmap(ops, iova, size) != size)
1432 return __FAIL(ops, i);
1433
1434 if (ops->iova_to_phys(ops, iova + 42))
1435 return __FAIL(ops, i);
1436
1437 /* Remap full block */
1438 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1439 return __FAIL(ops, i);
1440
1441 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1442 return __FAIL(ops, i);
1443
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001444 if (ops->unmap(ops, iova, size) != size)
1445 return __FAIL(ops, i);
1446
Will Deaconfe4b9912014-11-17 23:31:12 +00001447 iova += SZ_1G;
1448 j++;
1449 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1450 }
1451
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001452 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1453 return __FAIL(ops, i);
1454
Mitchel Humpheryse4012a62015-06-01 15:44:49 -07001455 if ((cfg->pgsize_bitmap & SZ_2M) &&
1456 (cfg->pgsize_bitmap & SZ_4K)) {
1457 /* mixed block + page mappings */
1458 iova = 0;
1459 if (ops->map(ops, iova, iova, SZ_2M, IOMMU_READ))
1460 return __FAIL(ops, i);
1461
1462 if (ops->map(ops, iova + SZ_2M, iova + SZ_2M, SZ_4K,
1463 IOMMU_READ))
1464 return __FAIL(ops, i);
1465
1466 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1467 return __FAIL(ops, i);
1468
1469 if (ops->iova_to_phys(ops, iova + SZ_2M + 42) !=
1470 (iova + SZ_2M + 42))
1471 return __FAIL(ops, i);
1472
1473 /* unmap both mappings at once */
1474 if (ops->unmap(ops, iova, SZ_2M + SZ_4K) !=
1475 (SZ_2M + SZ_4K))
1476 return __FAIL(ops, i);
1477
1478 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1479 return __FAIL(ops, i);
1480 }
1481
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001482 /* map_sg */
1483 for (j = 0; j < ARRAY_SIZE(test_sg_sizes); ++j) {
1484 size_t mapped;
1485 size_t unused;
1486 struct page *page;
1487 phys_addr_t page_phys;
1488 struct sg_table table;
1489 struct scatterlist *sg;
1490 unsigned long total_size = test_sg_sizes[j];
1491 int chunk_size = 1UL << find_first_bit(
1492 &cfg->pgsize_bitmap, BITS_PER_LONG);
1493 int nents = total_size / chunk_size;
1494
1495 if (total_size < chunk_size)
1496 continue;
1497
1498 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
1499 page_phys = page_to_phys(page);
1500
1501 iova = 0;
1502 BUG_ON(sg_alloc_table(&table, nents, GFP_KERNEL));
1503 BUG_ON(!page);
1504 for_each_sg(table.sgl, sg, table.nents, k)
1505 sg_set_page(sg, page, chunk_size, 0);
1506
1507 mapped = ops->map_sg(ops, iova, table.sgl, table.nents,
1508 IOMMU_READ | IOMMU_WRITE, &unused);
1509
1510 if (mapped != total_size)
1511 return __FAIL(ops, i);
1512
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001513 if (!arm_lpae_range_has_mapping(ops, iova, total_size))
1514 return __FAIL(ops, i);
1515
1516 if (arm_lpae_range_has_mapping(ops, iova + total_size,
1517 SZ_2G - (iova + total_size)))
1518 return __FAIL(ops, i);
1519
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001520 for_each_sg(table.sgl, sg, table.nents, k) {
1521 dma_addr_t newphys =
1522 ops->iova_to_phys(ops, iova + 42);
1523 if (newphys != (page_phys + 42))
1524 return __FAIL(ops, i);
1525 iova += chunk_size;
1526 }
1527
1528 if (ops->unmap(ops, 0, total_size) != total_size)
1529 return __FAIL(ops, i);
1530
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001531 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1532 return __FAIL(ops, i);
1533
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001534 sg_free_table(&table);
1535 __free_pages(page, get_order(chunk_size));
1536 }
1537
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001538 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1539 return __FAIL(ops, i);
1540
Will Deaconfe4b9912014-11-17 23:31:12 +00001541 free_io_pgtable_ops(ops);
1542 }
1543
1544 selftest_running = false;
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001545 suppress_map_failures = false;
Will Deaconfe4b9912014-11-17 23:31:12 +00001546 return 0;
1547}
1548
1549static int __init arm_lpae_do_selftests(void)
1550{
1551 static const unsigned long pgsize[] = {
1552 SZ_4K | SZ_2M | SZ_1G,
Will Deaconfe4b9912014-11-17 23:31:12 +00001553 };
1554
1555 static const unsigned int ias[] = {
1556 32, 36, 40, 42, 44, 48,
1557 };
1558
1559 int i, j, pass = 0, fail = 0;
1560 struct io_pgtable_cfg cfg = {
1561 .tlb = &dummy_tlb_ops,
1562 .oas = 48,
1563 };
1564
1565 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1566 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1567 cfg.pgsize_bitmap = pgsize[i];
1568 cfg.ias = ias[j];
1569 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1570 pgsize[i], ias[j]);
1571 if (arm_lpae_run_tests(&cfg))
1572 fail++;
1573 else
1574 pass++;
1575 }
1576 }
1577
1578 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1579 return fail ? -EFAULT : 0;
1580}
1581subsys_initcall(arm_lpae_do_selftests);
1582#endif