blob: 393e20c4fd9a31df7985aa09724fe0f246ad5b52 [file] [log] [blame]
Will Deacone1d3c0f2014-11-14 17:18:23 +00001/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
Mitchel Humpherysdaab0412015-04-23 16:19:05 -070025#include <linux/scatterlist.h>
Will Deacone1d3c0f2014-11-14 17:18:23 +000026#include <linux/sizes.h>
27#include <linux/slab.h>
28#include <linux/types.h>
Lada Trimasova8f6aff92016-01-27 11:10:32 +000029#include <linux/dma-mapping.h>
Will Deacone1d3c0f2014-11-14 17:18:23 +000030
Robin Murphy87a91b12015-07-29 19:46:09 +010031#include <asm/barrier.h>
32
Will Deacone1d3c0f2014-11-14 17:18:23 +000033#include "io-pgtable.h"
34
35#define ARM_LPAE_MAX_ADDR_BITS 48
36#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
37#define ARM_LPAE_MAX_LEVELS 4
38
39/* Struct accessors */
40#define io_pgtable_to_data(x) \
41 container_of((x), struct arm_lpae_io_pgtable, iop)
42
Will Deacone1d3c0f2014-11-14 17:18:23 +000043#define io_pgtable_ops_to_data(x) \
44 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45
46/*
47 * For consistency with the architecture, we always consider
48 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 */
50#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
51
52/*
53 * Calculate the right shift amount to get to the portion describing level l
54 * in a virtual address mapped by the pagetable in d.
55 */
56#define ARM_LPAE_LVL_SHIFT(l,d) \
57 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
58 * (d)->bits_per_level) + (d)->pg_shift)
59
Robin Murphy06c610e2015-12-07 18:18:53 +000060#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
61
Will Deacon367bd972015-02-16 18:38:20 +000062#define ARM_LPAE_PAGES_PER_PGD(d) \
Robin Murphy06c610e2015-12-07 18:18:53 +000063 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
Will Deacone1d3c0f2014-11-14 17:18:23 +000064
65/*
66 * Calculate the index at level l used to map virtual address a using the
67 * pagetable in d.
68 */
69#define ARM_LPAE_PGD_IDX(l,d) \
70 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71
Patrick Dalyd35ec7c2016-11-23 15:04:24 -080072#define ARM_LPAE_LVL_MASK(l, d) \
73 ((l) == ARM_LPAE_START_LVL(d) ? (1 << (d)->pgd_bits) - 1 : \
74 (1 << (d)->bits_per_level) - 1)
Will Deacone1d3c0f2014-11-14 17:18:23 +000075#define ARM_LPAE_LVL_IDX(a,l,d) \
Will Deacon367bd972015-02-16 18:38:20 +000076 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
Patrick Dalyd35ec7c2016-11-23 15:04:24 -080077 ARM_LPAE_LVL_MASK(l, d))
Will Deacone1d3c0f2014-11-14 17:18:23 +000078
79/* Calculate the block/page mapping size at level l for pagetable in d. */
80#define ARM_LPAE_BLOCK_SIZE(l,d) \
81 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
82 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
83
84/* Page table bits */
85#define ARM_LPAE_PTE_TYPE_SHIFT 0
86#define ARM_LPAE_PTE_TYPE_MASK 0x3
87
88#define ARM_LPAE_PTE_TYPE_BLOCK 1
89#define ARM_LPAE_PTE_TYPE_TABLE 3
90#define ARM_LPAE_PTE_TYPE_PAGE 3
91
Laurent Pinchartc896c132014-12-14 23:34:50 +020092#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
Will Deacone1d3c0f2014-11-14 17:18:23 +000093#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
94#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
95#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
96#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
97#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
Laurent Pinchartc896c132014-12-14 23:34:50 +020098#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
Will Deacone1d3c0f2014-11-14 17:18:23 +000099#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
100
101#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
102/* Ignore the contiguous bit for block splitting */
103#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
104#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
105 ARM_LPAE_PTE_ATTR_HI_MASK)
106
107/* Stage-1 PTE */
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600108#define ARM_LPAE_PTE_AP_PRIV_RW (((arm_lpae_iopte)0) << 6)
109#define ARM_LPAE_PTE_AP_RW (((arm_lpae_iopte)1) << 6)
110#define ARM_LPAE_PTE_AP_PRIV_RO (((arm_lpae_iopte)2) << 6)
111#define ARM_LPAE_PTE_AP_RO (((arm_lpae_iopte)3) << 6)
Liam Mark17f31802016-12-09 14:30:10 -0800112#define ARM_LPAE_PTE_ATTRINDX_MASK 0x7
Will Deacone1d3c0f2014-11-14 17:18:23 +0000113#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
114#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
115
116/* Stage-2 PTE */
117#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
118#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
119#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
120#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
121#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
122#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
123
124/* Register bits */
125#define ARM_32_LPAE_TCR_EAE (1 << 31)
126#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
127
Will Deacon63979b82015-03-18 10:22:18 +0000128#define ARM_LPAE_TCR_EPD1 (1 << 23)
129
Will Deacone1d3c0f2014-11-14 17:18:23 +0000130#define ARM_LPAE_TCR_TG0_4K (0 << 14)
131#define ARM_LPAE_TCR_TG0_64K (1 << 14)
132#define ARM_LPAE_TCR_TG0_16K (2 << 14)
133
134#define ARM_LPAE_TCR_SH0_SHIFT 12
135#define ARM_LPAE_TCR_SH0_MASK 0x3
136#define ARM_LPAE_TCR_SH_NS 0
137#define ARM_LPAE_TCR_SH_OS 2
138#define ARM_LPAE_TCR_SH_IS 3
139
140#define ARM_LPAE_TCR_ORGN0_SHIFT 10
141#define ARM_LPAE_TCR_IRGN0_SHIFT 8
142#define ARM_LPAE_TCR_RGN_MASK 0x3
143#define ARM_LPAE_TCR_RGN_NC 0
144#define ARM_LPAE_TCR_RGN_WBWA 1
145#define ARM_LPAE_TCR_RGN_WT 2
146#define ARM_LPAE_TCR_RGN_WB 3
147
148#define ARM_LPAE_TCR_SL0_SHIFT 6
149#define ARM_LPAE_TCR_SL0_MASK 0x3
150
151#define ARM_LPAE_TCR_T0SZ_SHIFT 0
152#define ARM_LPAE_TCR_SZ_MASK 0xf
153
154#define ARM_LPAE_TCR_PS_SHIFT 16
155#define ARM_LPAE_TCR_PS_MASK 0x7
156
157#define ARM_LPAE_TCR_IPS_SHIFT 32
158#define ARM_LPAE_TCR_IPS_MASK 0x7
159
160#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
161#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
162#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
163#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
164#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
165#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
166
167#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
168#define ARM_LPAE_MAIR_ATTR_MASK 0xff
169#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
170#define ARM_LPAE_MAIR_ATTR_NC 0x44
171#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
Patrick Dalybf762272016-11-03 16:49:44 -0700172#define ARM_LPAE_MAIR_ATTR_UPSTREAM 0xf4
Will Deacone1d3c0f2014-11-14 17:18:23 +0000173#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
174#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
175#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
Patrick Dalybf762272016-11-03 16:49:44 -0700176#define ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM 3
Will Deacone1d3c0f2014-11-14 17:18:23 +0000177
178/* IOPTE accessors */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700179#define iopte_deref(pte, d) \
180 (__va(iopte_val(pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
Robin Murphy06c610e2015-12-07 18:18:53 +0000181 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000182
183#define iopte_type(pte,l) \
184 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
185
186#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
187
188#define iopte_leaf(pte,l) \
189 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
190 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
191 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
192
193#define iopte_to_pfn(pte,d) \
194 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
195
196#define pfn_to_iopte(pfn,d) \
197 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
198
199struct arm_lpae_io_pgtable {
200 struct io_pgtable iop;
201
202 int levels;
Patrick Dalyd35ec7c2016-11-23 15:04:24 -0800203 unsigned int pgd_bits;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000204 size_t pgd_size;
205 unsigned long pg_shift;
206 unsigned long bits_per_level;
207
208 void *pgd;
209};
210
211typedef u64 arm_lpae_iopte;
212
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700213/*
214 * We'll use some ignored bits in table entries to keep track of the number
215 * of page mappings beneath the table. The maximum number of entries
216 * beneath any table mapping in armv8 is 8192 (which is possible at the
217 * 2nd- and 3rd-level when using a 64K granule size). The bits at our
218 * disposal are:
219 *
220 * 4k granule: [58..52], [11..2]
221 * 64k granule: [58..52], [15..2]
222 *
223 * [58..52], [11..2] is enough bits for tracking table mappings at any
224 * level for any granule, so we'll use those.
225 */
226#define BOTTOM_IGNORED_MASK 0x3ff
227#define BOTTOM_IGNORED_SHIFT 2
228#define BOTTOM_IGNORED_NUM_BITS 10
229#define TOP_IGNORED_MASK 0x7fULL
230#define TOP_IGNORED_SHIFT 52
231#define IOPTE_RESERVED_MASK ((BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT) | \
232 (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
233
234static arm_lpae_iopte iopte_val(arm_lpae_iopte table_pte)
235{
236 return table_pte & ~IOPTE_RESERVED_MASK;
237}
238
239static arm_lpae_iopte _iopte_bottom_ignored_val(arm_lpae_iopte table_pte)
240{
241 return (table_pte & (BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT))
242 >> BOTTOM_IGNORED_SHIFT;
243}
244
245static arm_lpae_iopte _iopte_top_ignored_val(arm_lpae_iopte table_pte)
246{
247 return (table_pte & (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
248 >> TOP_IGNORED_SHIFT;
249}
250
251static int iopte_tblcnt(arm_lpae_iopte table_pte)
252{
253 return (_iopte_bottom_ignored_val(table_pte) |
254 (_iopte_top_ignored_val(table_pte) << BOTTOM_IGNORED_NUM_BITS));
255}
256
257static void iopte_tblcnt_set(arm_lpae_iopte *table_pte, int val)
258{
259 arm_lpae_iopte pte = iopte_val(*table_pte);
260
261 pte |= ((val & BOTTOM_IGNORED_MASK) << BOTTOM_IGNORED_SHIFT) |
262 (((val & (TOP_IGNORED_MASK << BOTTOM_IGNORED_NUM_BITS))
263 >> BOTTOM_IGNORED_NUM_BITS) << TOP_IGNORED_SHIFT);
264 *table_pte = pte;
265}
266
267static void iopte_tblcnt_sub(arm_lpae_iopte *table_ptep, int cnt)
268{
269 arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
270
271 current_cnt -= cnt;
272 iopte_tblcnt_set(table_ptep, current_cnt);
273}
274
275static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt)
276{
277 arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
278
279 current_cnt += cnt;
280 iopte_tblcnt_set(table_ptep, current_cnt);
281}
282
Will Deaconfe4b9912014-11-17 23:31:12 +0000283static bool selftest_running = false;
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -0700284static bool suppress_map_failures;
Will Deaconfe4b9912014-11-17 23:31:12 +0000285
Robin Murphyffcb6d12015-09-17 17:42:16 +0100286static dma_addr_t __arm_lpae_dma_addr(void *pages)
Robin Murphyf8d54962015-07-29 19:46:04 +0100287{
Robin Murphyffcb6d12015-09-17 17:42:16 +0100288 return (dma_addr_t)virt_to_phys(pages);
Robin Murphyf8d54962015-07-29 19:46:04 +0100289}
290
Liam Mark04b0c852016-12-20 11:34:04 -0800291static inline void pgtable_dma_sync_single_for_device(
292 struct io_pgtable_cfg *cfg,
293 dma_addr_t addr, size_t size,
294 enum dma_data_direction dir)
295{
296 if (!(cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT))
297 dma_sync_single_for_device(cfg->iommu_dev, addr, size,
298 dir);
299}
300
Robin Murphyf8d54962015-07-29 19:46:04 +0100301static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
Patrick Dalyc11d1082016-09-01 15:52:44 -0700302 struct io_pgtable_cfg *cfg, void *cookie)
Robin Murphyf8d54962015-07-29 19:46:04 +0100303{
304 struct device *dev = cfg->iommu_dev;
305 dma_addr_t dma;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700306 void *pages = io_pgtable_alloc_pages_exact(cfg, cookie, size,
307 gfp | __GFP_ZERO);
Robin Murphyf8d54962015-07-29 19:46:04 +0100308
309 if (!pages)
310 return NULL;
311
Robin Murphy87a91b12015-07-29 19:46:09 +0100312 if (!selftest_running) {
Robin Murphyf8d54962015-07-29 19:46:04 +0100313 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
314 if (dma_mapping_error(dev, dma))
315 goto out_free;
316 /*
317 * We depend on the IOMMU being able to work with any physical
Robin Murphyffcb6d12015-09-17 17:42:16 +0100318 * address directly, so if the DMA layer suggests otherwise by
319 * translating or truncating them, that bodes very badly...
Robin Murphyf8d54962015-07-29 19:46:04 +0100320 */
Robin Murphyffcb6d12015-09-17 17:42:16 +0100321 if (dma != virt_to_phys(pages))
Robin Murphyf8d54962015-07-29 19:46:04 +0100322 goto out_unmap;
323 }
324
325 return pages;
326
327out_unmap:
328 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
329 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
330out_free:
Patrick Dalyc11d1082016-09-01 15:52:44 -0700331 io_pgtable_free_pages_exact(cfg, cookie, pages, size);
Robin Murphyf8d54962015-07-29 19:46:04 +0100332 return NULL;
333}
334
335static void __arm_lpae_free_pages(void *pages, size_t size,
Patrick Dalyc11d1082016-09-01 15:52:44 -0700336 struct io_pgtable_cfg *cfg, void *cookie)
Robin Murphyf8d54962015-07-29 19:46:04 +0100337{
Robin Murphy87a91b12015-07-29 19:46:09 +0100338 if (!selftest_running)
Robin Murphyffcb6d12015-09-17 17:42:16 +0100339 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
Robin Murphyf8d54962015-07-29 19:46:04 +0100340 size, DMA_TO_DEVICE);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700341 io_pgtable_free_pages_exact(cfg, cookie, pages, size);
Robin Murphyf8d54962015-07-29 19:46:04 +0100342}
343
344static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
Robin Murphy87a91b12015-07-29 19:46:09 +0100345 struct io_pgtable_cfg *cfg)
Robin Murphyf8d54962015-07-29 19:46:04 +0100346{
Robin Murphyf8d54962015-07-29 19:46:04 +0100347 *ptep = pte;
348
Robin Murphy87a91b12015-07-29 19:46:09 +0100349 if (!selftest_running)
Liam Mark04b0c852016-12-20 11:34:04 -0800350 pgtable_dma_sync_single_for_device(cfg,
Robin Murphyffcb6d12015-09-17 17:42:16 +0100351 __arm_lpae_dma_addr(ptep),
Robin Murphyf8d54962015-07-29 19:46:04 +0100352 sizeof(pte), DMA_TO_DEVICE);
Robin Murphyf8d54962015-07-29 19:46:04 +0100353}
354
Will Deacone1d3c0f2014-11-14 17:18:23 +0000355static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
356 unsigned long iova, phys_addr_t paddr,
357 arm_lpae_iopte prot, int lvl,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700358 arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep,
359 bool flush)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000360{
361 arm_lpae_iopte pte = prot;
Robin Murphyf8d54962015-07-29 19:46:04 +0100362 struct io_pgtable_cfg *cfg = &data->iop.cfg;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000363
Patrick Daly67ba8eb2016-06-27 18:44:42 -0700364 /* We require an unmap first */
Mitchel Humpherys1b0313e2015-09-23 13:56:27 -0700365 if (*ptep & ARM_LPAE_PTE_VALID) {
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -0700366 BUG_ON(!suppress_map_failures);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000367 return -EEXIST;
Will Deaconfe4b9912014-11-17 23:31:12 +0000368 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000369
Robin Murphyf8d54962015-07-29 19:46:04 +0100370 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
Laurent Pinchartc896c132014-12-14 23:34:50 +0200371 pte |= ARM_LPAE_PTE_NS;
372
Will Deacone1d3c0f2014-11-14 17:18:23 +0000373 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
374 pte |= ARM_LPAE_PTE_TYPE_PAGE;
375 else
376 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
377
Liam Marka8a228d2016-10-04 13:40:53 -0700378 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000379 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
380
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700381 if (flush)
382 __arm_lpae_set_pte(ptep, pte, cfg);
383 else
384 *ptep = pte;
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700385
386 if (prev_ptep)
387 iopte_tblcnt_add(prev_ptep, 1);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000388 return 0;
389}
390
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700391struct map_state {
392 unsigned long iova_end;
393 unsigned int pgsize;
394 arm_lpae_iopte *pgtable;
395 arm_lpae_iopte *prev_pgtable;
396 arm_lpae_iopte *pte_start;
397 unsigned int num_pte;
398};
399/* map state optimization works at level 3 (the 2nd-to-last level) */
400#define MAP_STATE_LVL 3
401
Will Deacone1d3c0f2014-11-14 17:18:23 +0000402static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
403 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700404 int lvl, arm_lpae_iopte *ptep,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700405 arm_lpae_iopte *prev_ptep, struct map_state *ms)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000406{
407 arm_lpae_iopte *cptep, pte;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000408 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
Robin Murphyf8d54962015-07-29 19:46:04 +0100409 struct io_pgtable_cfg *cfg = &data->iop.cfg;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700410 void *cookie = data->iop.cookie;
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700411 arm_lpae_iopte *pgtable = ptep;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000412
413 /* Find our entry at the current level */
414 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
415
416 /* If we can install a leaf entry at this level, then do so */
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700417 if (size == block_size && (size & cfg->pgsize_bitmap)) {
418 if (!ms)
419 return arm_lpae_init_pte(data, iova, paddr, prot, lvl,
420 ptep, prev_ptep, true);
421
422 if (lvl == MAP_STATE_LVL) {
423 if (ms->pgtable)
Liam Mark04b0c852016-12-20 11:34:04 -0800424 pgtable_dma_sync_single_for_device(cfg,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700425 __arm_lpae_dma_addr(ms->pte_start),
426 ms->num_pte * sizeof(*ptep),
427 DMA_TO_DEVICE);
428
429 ms->iova_end = round_down(iova, SZ_2M) + SZ_2M;
430 ms->pgtable = pgtable;
431 ms->prev_pgtable = prev_ptep;
432 ms->pgsize = size;
433 ms->pte_start = ptep;
434 ms->num_pte = 1;
435 } else {
436 /*
437 * We have some map state from previous page
438 * mappings, but we're about to set up a block
439 * mapping. Flush out the previous page mappings.
440 */
441 if (ms->pgtable)
Liam Mark04b0c852016-12-20 11:34:04 -0800442 pgtable_dma_sync_single_for_device(cfg,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700443 __arm_lpae_dma_addr(ms->pte_start),
444 ms->num_pte * sizeof(*ptep),
445 DMA_TO_DEVICE);
446 memset(ms, 0, sizeof(*ms));
447 ms = NULL;
448 }
449
450 return arm_lpae_init_pte(data, iova, paddr, prot, lvl,
451 ptep, prev_ptep, ms == NULL);
452 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000453
454 /* We can't allocate tables at the final level */
455 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
456 return -EINVAL;
457
458 /* Grab a pointer to the next level */
459 pte = *ptep;
460 if (!pte) {
Robin Murphy06c610e2015-12-07 18:18:53 +0000461 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
Patrick Dalyc11d1082016-09-01 15:52:44 -0700462 GFP_ATOMIC, cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000463 if (!cptep)
464 return -ENOMEM;
465
Will Deacone1d3c0f2014-11-14 17:18:23 +0000466 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
Robin Murphyf8d54962015-07-29 19:46:04 +0100467 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
Laurent Pinchartc896c132014-12-14 23:34:50 +0200468 pte |= ARM_LPAE_PTE_NSTABLE;
Robin Murphy87a91b12015-07-29 19:46:09 +0100469 __arm_lpae_set_pte(ptep, pte, cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000470 } else {
471 cptep = iopte_deref(pte, data);
472 }
473
474 /* Rinse, repeat */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700475 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700476 ptep, ms);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000477}
478
479static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
480 int prot)
481{
482 arm_lpae_iopte pte;
483
484 if (data->iop.fmt == ARM_64_LPAE_S1 ||
485 data->iop.fmt == ARM_32_LPAE_S1) {
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600486 pte = ARM_LPAE_PTE_nG;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000487
Jeremy Gebbenf96739f2015-09-16 14:04:42 -0600488 if (prot & IOMMU_WRITE)
489 pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RW
490 : ARM_LPAE_PTE_AP_RW;
491 else
492 pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO
493 : ARM_LPAE_PTE_AP_RO;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000494
Liam Marka8a228d2016-10-04 13:40:53 -0700495 if (prot & IOMMU_MMIO)
Robin Murphyfb948252016-04-05 12:39:31 +0100496 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
497 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
Liam Marka8a228d2016-10-04 13:40:53 -0700498 else if (prot & IOMMU_CACHE)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000499 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
500 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
Patrick Dalybf762272016-11-03 16:49:44 -0700501 else if (prot & IOMMU_USE_UPSTREAM_HINT)
502 pte |= (ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM
503 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000504 } else {
505 pte = ARM_LPAE_PTE_HAP_FAULT;
506 if (prot & IOMMU_READ)
507 pte |= ARM_LPAE_PTE_HAP_READ;
508 if (prot & IOMMU_WRITE)
509 pte |= ARM_LPAE_PTE_HAP_WRITE;
Robin Murphyfb948252016-04-05 12:39:31 +0100510 if (prot & IOMMU_MMIO)
511 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
512 else if (prot & IOMMU_CACHE)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000513 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
514 else
515 pte |= ARM_LPAE_PTE_MEMATTR_NC;
516 }
517
518 if (prot & IOMMU_NOEXEC)
519 pte |= ARM_LPAE_PTE_XN;
520
521 return pte;
522}
523
524static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
525 phys_addr_t paddr, size_t size, int iommu_prot)
526{
527 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
528 arm_lpae_iopte *ptep = data->pgd;
Robin Murphy87a91b12015-07-29 19:46:09 +0100529 int ret, lvl = ARM_LPAE_START_LVL(data);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000530 arm_lpae_iopte prot;
531
532 /* If no access, then nothing to do */
533 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
534 return 0;
535
536 prot = arm_lpae_prot_to_pte(data, iommu_prot);
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700537 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, NULL,
538 NULL);
Robin Murphy87a91b12015-07-29 19:46:09 +0100539 /*
540 * Synchronise all PTE updates for the new mapping before there's
541 * a chance for anything to kick off a table walk for the new iova.
542 */
543 wmb();
544
545 return ret;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000546}
547
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700548static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
549 struct scatterlist *sg, unsigned int nents,
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700550 int iommu_prot, size_t *size)
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700551{
552 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
553 arm_lpae_iopte *ptep = data->pgd;
554 int lvl = ARM_LPAE_START_LVL(data);
555 arm_lpae_iopte prot;
556 struct scatterlist *s;
557 size_t mapped = 0;
558 int i, ret;
559 unsigned int min_pagesz;
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700560 struct io_pgtable_cfg *cfg = &data->iop.cfg;
561 struct map_state ms;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700562
563 /* If no access, then nothing to do */
564 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700565 goto out_err;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700566
567 prot = arm_lpae_prot_to_pte(data, iommu_prot);
568
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700569 min_pagesz = 1 << __ffs(cfg->pgsize_bitmap);
570
571 memset(&ms, 0, sizeof(ms));
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700572
573 for_each_sg(sg, s, nents, i) {
574 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
575 size_t size = s->length;
576
577 /*
578 * We are mapping on IOMMU page boundaries, so offset within
579 * the page must be 0. However, the IOMMU may support pages
580 * smaller than PAGE_SIZE, so s->offset may still represent
581 * an offset of that boundary within the CPU page.
582 */
583 if (!IS_ALIGNED(s->offset, min_pagesz))
584 goto out_err;
585
586 while (size) {
587 size_t pgsize = iommu_pgsize(
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700588 cfg->pgsize_bitmap, iova | phys, size);
589
590 if (ms.pgtable && (iova < ms.iova_end)) {
591 arm_lpae_iopte *ptep = ms.pgtable +
592 ARM_LPAE_LVL_IDX(iova, MAP_STATE_LVL,
593 data);
594 arm_lpae_init_pte(
595 data, iova, phys, prot, MAP_STATE_LVL,
596 ptep, ms.prev_pgtable, false);
597 ms.num_pte++;
598 } else {
599 ret = __arm_lpae_map(data, iova, phys, pgsize,
600 prot, lvl, ptep, NULL, &ms);
601 if (ret)
602 goto out_err;
603 }
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700604
605 iova += pgsize;
606 mapped += pgsize;
607 phys += pgsize;
608 size -= pgsize;
609 }
610 }
611
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700612 if (ms.pgtable)
Liam Mark04b0c852016-12-20 11:34:04 -0800613 pgtable_dma_sync_single_for_device(cfg,
614 __arm_lpae_dma_addr(ms.pte_start),
615 ms.num_pte * sizeof(*ms.pte_start),
616 DMA_TO_DEVICE);
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700617
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700618 return mapped;
619
620out_err:
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700621 /* Return the size of the partial mapping so that they can be undone */
622 *size = mapped;
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700623 return 0;
624}
625
Will Deacone1d3c0f2014-11-14 17:18:23 +0000626static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
627 arm_lpae_iopte *ptep)
628{
629 arm_lpae_iopte *start, *end;
630 unsigned long table_size;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700631 void *cookie = data->iop.cookie;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000632
Will Deacone1d3c0f2014-11-14 17:18:23 +0000633 if (lvl == ARM_LPAE_START_LVL(data))
634 table_size = data->pgd_size;
635 else
Robin Murphy06c610e2015-12-07 18:18:53 +0000636 table_size = ARM_LPAE_GRANULE(data);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000637
638 start = ptep;
Will Deacon12c2ab02015-12-15 16:08:12 +0000639
640 /* Only leaf entries at the last level */
641 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
642 end = ptep;
643 else
644 end = (void *)ptep + table_size;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000645
646 while (ptep != end) {
647 arm_lpae_iopte pte = *ptep++;
648
649 if (!pte || iopte_leaf(pte, lvl))
650 continue;
651
652 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
653 }
654
Patrick Dalyc11d1082016-09-01 15:52:44 -0700655 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000656}
657
658static void arm_lpae_free_pgtable(struct io_pgtable *iop)
659{
660 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
661
662 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
663 kfree(data);
664}
665
666static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
667 unsigned long iova, size_t size,
668 arm_lpae_iopte prot, int lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700669 arm_lpae_iopte *ptep,
670 arm_lpae_iopte *prev_ptep, size_t blk_size)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000671{
672 unsigned long blk_start, blk_end;
673 phys_addr_t blk_paddr;
674 arm_lpae_iopte table = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000675
676 blk_start = iova & ~(blk_size - 1);
677 blk_end = blk_start + blk_size;
678 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
Patrick Daly781558f2016-10-13 16:03:27 -0700679 size = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, size);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000680
681 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
682 arm_lpae_iopte *tablep;
683
684 /* Unmap! */
685 if (blk_start == iova)
686 continue;
687
688 /* __arm_lpae_map expects a pointer to the start of the table */
689 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
690 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
Stepan Moskovchenko47b48362015-06-09 20:23:04 -0700691 tablep, prev_ptep, NULL) < 0) {
Will Deacone1d3c0f2014-11-14 17:18:23 +0000692 if (table) {
693 /* Free the table we allocated */
694 tablep = iopte_deref(table, data);
695 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
696 }
697 return 0; /* Bytes unmapped */
698 }
699 }
700
Robin Murphy507e4c92016-01-26 17:13:14 +0000701 __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000702 return size;
703}
704
705static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
706 unsigned long iova, size_t size, int lvl,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700707 arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000708{
709 arm_lpae_iopte pte;
Robin Murphy507e4c92016-01-26 17:13:14 +0000710 struct io_pgtable *iop = &data->iop;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000711 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
712
Robin Murphy2eb97c72015-12-04 17:52:58 +0000713 /* Something went horribly wrong and we ran out of page table */
714 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
715 return 0;
716
Will Deacone1d3c0f2014-11-14 17:18:23 +0000717 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
718 pte = *ptep;
Robin Murphy2eb97c72015-12-04 17:52:58 +0000719 if (WARN_ON(!pte))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000720 return 0;
721
722 /* If the size matches this level, we're in the right place */
723 if (size == blk_size) {
Robin Murphy507e4c92016-01-26 17:13:14 +0000724 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000725
726 if (!iopte_leaf(pte, lvl)) {
727 /* Also flush any partial walks */
Will Deacone1d3c0f2014-11-14 17:18:23 +0000728 ptep = iopte_deref(pte, data);
729 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000730 }
731
732 return size;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700733 } else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) {
734 arm_lpae_iopte *table = iopte_deref(pte, data);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700735 arm_lpae_iopte *table_base = table;
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700736 int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
737 int entry_size = ARM_LPAE_GRANULE(data);
738 int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
739 int entries = min_t(int, size / entry_size,
740 max_entries - tl_offset);
741 int table_len = entries * sizeof(*table);
742
743 /*
744 * This isn't a block mapping so it must be a table mapping
745 * and since it's the 2nd-to-last level the next level has
746 * to be all page mappings. Zero them all out in one fell
747 * swoop.
748 */
749
750 table += tl_offset;
751
752 memset(table, 0, table_len);
Liam Mark04b0c852016-12-20 11:34:04 -0800753 pgtable_dma_sync_single_for_device(&iop->cfg,
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700754 __arm_lpae_dma_addr(table),
755 table_len, DMA_TO_DEVICE);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700756
757 iopte_tblcnt_sub(ptep, entries);
758 if (!iopte_tblcnt(*ptep)) {
759 /* no valid mappings left under this table. free it. */
760 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
761 io_pgtable_tlb_add_flush(iop, iova,
762 entries * entry_size,
763 ARM_LPAE_GRANULE(data),
764 false);
765 __arm_lpae_free_pgtable(data, lvl + 1, table_base);
766 } else {
767 io_pgtable_tlb_add_flush(iop, iova,
768 entries * entry_size,
769 ARM_LPAE_GRANULE(data),
770 true);
771 }
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700772
773 return entries * entry_size;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000774 } else if (iopte_leaf(pte, lvl)) {
775 /*
776 * Insert a table at the next level to map the old region,
777 * minus the part we want to unmap
778 */
779 return arm_lpae_split_blk_unmap(data, iova, size,
780 iopte_prot(pte), lvl, ptep,
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700781 prev_ptep, blk_size);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000782 }
783
784 /* Keep on walkin' */
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700785 prev_ptep = ptep;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000786 ptep = iopte_deref(pte, data);
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700787 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep, prev_ptep);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000788}
789
Mitchel Humpherys5e050592015-05-21 14:11:22 -0700790static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
Will Deacone1d3c0f2014-11-14 17:18:23 +0000791 size_t size)
792{
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700793 size_t unmapped = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000794 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000795 arm_lpae_iopte *ptep = data->pgd;
796 int lvl = ARM_LPAE_START_LVL(data);
797
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700798 while (unmapped < size) {
799 size_t ret, size_to_unmap, remaining;
800
801 remaining = (size - unmapped);
Patrick Dalyf145f052016-06-27 18:38:09 -0700802 size_to_unmap = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova,
803 remaining);
804 size_to_unmap = size_to_unmap >= SZ_2M ?
805 size_to_unmap :
806 min_t(unsigned long, remaining,
807 (ALIGN(iova + 1, SZ_2M) - iova));
Mitchel Humpherysdeb3e832015-07-14 16:41:29 -0700808 ret = __arm_lpae_unmap(data, iova, size_to_unmap, lvl, ptep,
809 NULL);
Mitchel Humpherys5f92f322015-04-30 09:49:29 -0700810 if (ret == 0)
811 break;
812 unmapped += ret;
813 iova += ret;
814 }
Will Deacone1d3c0f2014-11-14 17:18:23 +0000815 if (unmapped)
Mitchel Humpherysfaa87fc2015-04-24 17:10:59 -0700816 io_pgtable_tlb_flush_all(&data->iop);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000817
818 return unmapped;
819}
820
Liam Mark17f31802016-12-09 14:30:10 -0800821static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
822 unsigned long iova, int *plvl_ret,
823 arm_lpae_iopte *ptep_ret)
Will Deacone1d3c0f2014-11-14 17:18:23 +0000824{
Will Deacone1d3c0f2014-11-14 17:18:23 +0000825 arm_lpae_iopte pte, *ptep = data->pgd;
Liam Mark17f31802016-12-09 14:30:10 -0800826 *plvl_ret = ARM_LPAE_START_LVL(data);
827 *ptep_ret = 0;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000828
829 do {
830 /* Valid IOPTE pointer? */
831 if (!ptep)
Liam Mark17f31802016-12-09 14:30:10 -0800832 return -EINVAL;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000833
834 /* Grab the IOPTE we're interested in */
Liam Mark17f31802016-12-09 14:30:10 -0800835 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, *plvl_ret, data));
Will Deacone1d3c0f2014-11-14 17:18:23 +0000836
837 /* Valid entry? */
838 if (!pte)
Liam Mark17f31802016-12-09 14:30:10 -0800839 return -EINVAL;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000840
841 /* Leaf entry? */
Liam Mark17f31802016-12-09 14:30:10 -0800842 if (iopte_leaf(pte, *plvl_ret))
Will Deacone1d3c0f2014-11-14 17:18:23 +0000843 goto found_translation;
844
845 /* Take it to the next level */
846 ptep = iopte_deref(pte, data);
Liam Mark17f31802016-12-09 14:30:10 -0800847 } while (++(*plvl_ret) < ARM_LPAE_MAX_LEVELS);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000848
849 /* Ran out of page tables to walk */
Liam Mark17f31802016-12-09 14:30:10 -0800850 return -EINVAL;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000851
852found_translation:
Liam Mark17f31802016-12-09 14:30:10 -0800853 *ptep_ret = pte;
854 return 0;
855}
856
857static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
858 unsigned long iova)
859{
860 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
861 arm_lpae_iopte pte;
862 int lvl;
863 phys_addr_t phys = 0;
864
865 if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) {
866 iova &= ((1 << ARM_LPAE_LVL_SHIFT(lvl, data)) - 1);
867 phys = ((phys_addr_t)iopte_to_pfn(pte, data)
868 << data->pg_shift) | iova;
869 }
870
871 return phys;
872}
873
874static bool __arm_lpae_is_iova_coherent(struct arm_lpae_io_pgtable *data,
875 arm_lpae_iopte *ptep)
876{
877 if (data->iop.fmt == ARM_64_LPAE_S1 ||
878 data->iop.fmt == ARM_32_LPAE_S1) {
879 int attr_idx = (*ptep & (ARM_LPAE_PTE_ATTRINDX_MASK <<
880 ARM_LPAE_PTE_ATTRINDX_SHIFT)) >>
881 ARM_LPAE_PTE_ATTRINDX_SHIFT;
882 if ((attr_idx == ARM_LPAE_MAIR_ATTR_IDX_CACHE) &&
883 ((*ptep & ARM_LPAE_PTE_SH_IS) ||
884 (*ptep & ARM_LPAE_PTE_SH_OS)))
885 return true;
886 } else {
887 if (*ptep & ARM_LPAE_PTE_MEMATTR_OIWB)
888 return true;
889 }
890
891 return false;
892}
893
894static bool arm_lpae_is_iova_coherent(struct io_pgtable_ops *ops,
895 unsigned long iova)
896{
897 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
898 arm_lpae_iopte pte;
899 int lvl;
900 bool ret = false;
901
902 if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
903 ret = __arm_lpae_is_iova_coherent(data, &pte);
904
905 return ret;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000906}
907
908static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
909{
910 unsigned long granule;
911
912 /*
913 * We need to restrict the supported page sizes to match the
914 * translation regime for a particular granule. Aim to match
915 * the CPU page size if possible, otherwise prefer smaller sizes.
916 * While we're at it, restrict the block sizes to match the
917 * chosen granule.
918 */
919 if (cfg->pgsize_bitmap & PAGE_SIZE)
920 granule = PAGE_SIZE;
921 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
922 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
923 else if (cfg->pgsize_bitmap & PAGE_MASK)
924 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
925 else
926 granule = 0;
927
928 switch (granule) {
929 case SZ_4K:
930 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
931 break;
932 case SZ_16K:
933 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
934 break;
935 case SZ_64K:
936 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
937 break;
938 default:
939 cfg->pgsize_bitmap = 0;
940 }
941}
942
943static struct arm_lpae_io_pgtable *
944arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
945{
946 unsigned long va_bits, pgd_bits;
947 struct arm_lpae_io_pgtable *data;
948
949 arm_lpae_restrict_pgsizes(cfg);
950
951 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
952 return NULL;
953
954 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
955 return NULL;
956
957 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
958 return NULL;
959
Robin Murphyffcb6d12015-09-17 17:42:16 +0100960 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
961 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
962 return NULL;
963 }
964
Will Deacone1d3c0f2014-11-14 17:18:23 +0000965 data = kmalloc(sizeof(*data), GFP_KERNEL);
966 if (!data)
967 return NULL;
968
969 data->pg_shift = __ffs(cfg->pgsize_bitmap);
970 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
971
972 va_bits = cfg->ias - data->pg_shift;
973 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
974
975 /* Calculate the actual size of our pgd (without concatenation) */
976 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
Patrick Dalyd35ec7c2016-11-23 15:04:24 -0800977 data->pgd_bits = pgd_bits;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000978 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
979
980 data->iop.ops = (struct io_pgtable_ops) {
981 .map = arm_lpae_map,
Mitchel Humpherysdaab0412015-04-23 16:19:05 -0700982 .map_sg = arm_lpae_map_sg,
Will Deacone1d3c0f2014-11-14 17:18:23 +0000983 .unmap = arm_lpae_unmap,
984 .iova_to_phys = arm_lpae_iova_to_phys,
Liam Mark17f31802016-12-09 14:30:10 -0800985 .is_iova_coherent = arm_lpae_is_iova_coherent,
Will Deacone1d3c0f2014-11-14 17:18:23 +0000986 };
987
988 return data;
989}
990
991static struct io_pgtable *
992arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
993{
994 u64 reg;
Robin Murphy3850db42016-02-12 17:09:46 +0000995 struct arm_lpae_io_pgtable *data;
Will Deacone1d3c0f2014-11-14 17:18:23 +0000996
Robin Murphy3850db42016-02-12 17:09:46 +0000997 data = arm_lpae_alloc_pgtable(cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +0000998 if (!data)
999 return NULL;
1000
1001 /* TCR */
Liam Mark04b0c852016-12-20 11:34:04 -08001002 if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
Mitchel Humpherys45b2e972016-06-07 14:18:22 -07001003 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
1004 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
1005 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
Liam Mark5649c822016-12-19 14:35:08 -08001006 else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
Patrick Dalyce6786f2016-11-09 14:19:23 -08001007 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
1008 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
1009 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
Mitchel Humpherys45b2e972016-06-07 14:18:22 -07001010 else
Liam Marka8a228d2016-10-04 13:40:53 -07001011 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
Mitchel Humpherys45b2e972016-06-07 14:18:22 -07001012 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
1013 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001014
Robin Murphy06c610e2015-12-07 18:18:53 +00001015 switch (ARM_LPAE_GRANULE(data)) {
Will Deacone1d3c0f2014-11-14 17:18:23 +00001016 case SZ_4K:
1017 reg |= ARM_LPAE_TCR_TG0_4K;
1018 break;
1019 case SZ_16K:
1020 reg |= ARM_LPAE_TCR_TG0_16K;
1021 break;
1022 case SZ_64K:
1023 reg |= ARM_LPAE_TCR_TG0_64K;
1024 break;
1025 }
1026
1027 switch (cfg->oas) {
1028 case 32:
1029 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1030 break;
1031 case 36:
1032 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1033 break;
1034 case 40:
1035 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1036 break;
1037 case 42:
1038 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1039 break;
1040 case 44:
1041 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1042 break;
1043 case 48:
1044 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
1045 break;
1046 default:
1047 goto out_free_data;
1048 }
1049
1050 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
Will Deacon63979b82015-03-18 10:22:18 +00001051
1052 /* Disable speculative walks through TTBR1 */
1053 reg |= ARM_LPAE_TCR_EPD1;
Will Deacone1d3c0f2014-11-14 17:18:23 +00001054 cfg->arm_lpae_s1_cfg.tcr = reg;
1055
1056 /* MAIRs */
1057 reg = (ARM_LPAE_MAIR_ATTR_NC
1058 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1059 (ARM_LPAE_MAIR_ATTR_WBRWA
1060 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1061 (ARM_LPAE_MAIR_ATTR_DEVICE
Patrick Dalybf762272016-11-03 16:49:44 -07001062 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
1063 (ARM_LPAE_MAIR_ATTR_UPSTREAM
1064 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM));
Will Deacone1d3c0f2014-11-14 17:18:23 +00001065
1066 cfg->arm_lpae_s1_cfg.mair[0] = reg;
1067 cfg->arm_lpae_s1_cfg.mair[1] = 0;
1068
1069 /* Looking good; allocate a pgd */
Patrick Dalyc11d1082016-09-01 15:52:44 -07001070 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
1071 cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001072 if (!data->pgd)
1073 goto out_free_data;
1074
Robin Murphy87a91b12015-07-29 19:46:09 +01001075 /* Ensure the empty pgd is visible before any actual TTBR write */
1076 wmb();
Will Deacone1d3c0f2014-11-14 17:18:23 +00001077
1078 /* TTBRs */
1079 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
1080 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
1081 return &data->iop;
1082
1083out_free_data:
1084 kfree(data);
1085 return NULL;
1086}
1087
1088static struct io_pgtable *
1089arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1090{
1091 u64 reg, sl;
Robin Murphy3850db42016-02-12 17:09:46 +00001092 struct arm_lpae_io_pgtable *data;
Will Deacone1d3c0f2014-11-14 17:18:23 +00001093
Robin Murphy3850db42016-02-12 17:09:46 +00001094 /* The NS quirk doesn't apply at stage 2 */
1095 if (cfg->quirks)
1096 return NULL;
1097
1098 data = arm_lpae_alloc_pgtable(cfg);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001099 if (!data)
1100 return NULL;
1101
1102 /*
1103 * Concatenate PGDs at level 1 if possible in order to reduce
1104 * the depth of the stage-2 walk.
1105 */
1106 if (data->levels == ARM_LPAE_MAX_LEVELS) {
1107 unsigned long pgd_pages;
1108
1109 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
1110 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
1111 data->pgd_size = pgd_pages << data->pg_shift;
1112 data->levels--;
1113 }
1114 }
1115
1116 /* VTCR */
1117 reg = ARM_64_LPAE_S2_TCR_RES1 |
1118 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
1119 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
1120 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
1121
1122 sl = ARM_LPAE_START_LVL(data);
1123
Robin Murphy06c610e2015-12-07 18:18:53 +00001124 switch (ARM_LPAE_GRANULE(data)) {
Will Deacone1d3c0f2014-11-14 17:18:23 +00001125 case SZ_4K:
1126 reg |= ARM_LPAE_TCR_TG0_4K;
1127 sl++; /* SL0 format is different for 4K granule size */
1128 break;
1129 case SZ_16K:
1130 reg |= ARM_LPAE_TCR_TG0_16K;
1131 break;
1132 case SZ_64K:
1133 reg |= ARM_LPAE_TCR_TG0_64K;
1134 break;
1135 }
1136
1137 switch (cfg->oas) {
1138 case 32:
1139 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
1140 break;
1141 case 36:
1142 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
1143 break;
1144 case 40:
1145 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
1146 break;
1147 case 42:
1148 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
1149 break;
1150 case 44:
1151 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
1152 break;
1153 case 48:
1154 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
1155 break;
1156 default:
1157 goto out_free_data;
1158 }
1159
1160 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
1161 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
1162 cfg->arm_lpae_s2_cfg.vtcr = reg;
1163
1164 /* Allocate pgd pages */
Patrick Dalyc11d1082016-09-01 15:52:44 -07001165 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
1166 cfg, cookie);
Will Deacone1d3c0f2014-11-14 17:18:23 +00001167 if (!data->pgd)
1168 goto out_free_data;
1169
Robin Murphy87a91b12015-07-29 19:46:09 +01001170 /* Ensure the empty pgd is visible before any actual TTBR write */
1171 wmb();
Will Deacone1d3c0f2014-11-14 17:18:23 +00001172
1173 /* VTTBR */
1174 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1175 return &data->iop;
1176
1177out_free_data:
1178 kfree(data);
1179 return NULL;
1180}
1181
1182static struct io_pgtable *
1183arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1184{
1185 struct io_pgtable *iop;
1186
1187 if (cfg->ias > 32 || cfg->oas > 40)
1188 return NULL;
1189
1190 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1191 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1192 if (iop) {
1193 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
1194 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
1195 }
1196
1197 return iop;
1198}
1199
1200static struct io_pgtable *
1201arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1202{
1203 struct io_pgtable *iop;
1204
1205 if (cfg->ias > 40 || cfg->oas > 40)
1206 return NULL;
1207
1208 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1209 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1210 if (iop)
1211 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1212
1213 return iop;
1214}
1215
1216struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1217 .alloc = arm_64_lpae_alloc_pgtable_s1,
1218 .free = arm_lpae_free_pgtable,
1219};
1220
1221struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1222 .alloc = arm_64_lpae_alloc_pgtable_s2,
1223 .free = arm_lpae_free_pgtable,
1224};
1225
1226struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1227 .alloc = arm_32_lpae_alloc_pgtable_s1,
1228 .free = arm_lpae_free_pgtable,
1229};
1230
1231struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1232 .alloc = arm_32_lpae_alloc_pgtable_s2,
1233 .free = arm_lpae_free_pgtable,
1234};
Will Deaconfe4b9912014-11-17 23:31:12 +00001235
1236#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1237
1238static struct io_pgtable_cfg *cfg_cookie;
1239
1240static void dummy_tlb_flush_all(void *cookie)
1241{
1242 WARN_ON(cookie != cfg_cookie);
1243}
1244
Robin Murphy06c610e2015-12-07 18:18:53 +00001245static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1246 size_t granule, bool leaf, void *cookie)
Will Deaconfe4b9912014-11-17 23:31:12 +00001247{
1248 WARN_ON(cookie != cfg_cookie);
Will Deaconfe4b9912014-11-17 23:31:12 +00001249}
1250
1251static void dummy_tlb_sync(void *cookie)
1252{
1253 WARN_ON(cookie != cfg_cookie);
1254}
1255
Will Deaconfe4b9912014-11-17 23:31:12 +00001256static struct iommu_gather_ops dummy_tlb_ops __initdata = {
1257 .tlb_flush_all = dummy_tlb_flush_all,
1258 .tlb_add_flush = dummy_tlb_add_flush,
1259 .tlb_sync = dummy_tlb_sync,
Will Deaconfe4b9912014-11-17 23:31:12 +00001260};
1261
1262static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1263{
1264 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1265 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1266
1267 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1268 cfg->pgsize_bitmap, cfg->ias);
1269 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1270 data->levels, data->pgd_size, data->pg_shift,
1271 data->bits_per_level, data->pgd);
1272}
1273
1274#define __FAIL(ops, i) ({ \
1275 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1276 arm_lpae_dump_ops(ops); \
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001277 suppress_map_failures = false; \
Will Deaconfe4b9912014-11-17 23:31:12 +00001278 selftest_running = false; \
1279 -EFAULT; \
1280})
1281
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001282/*
Mitchel Humpherys601ebd32015-06-01 16:12:26 -07001283 * Returns true if there's any mapping in the given iova range in ops.
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001284 */
1285static bool arm_lpae_range_has_mapping(struct io_pgtable_ops *ops,
1286 unsigned long iova_start, size_t size)
1287{
1288 unsigned long iova = iova_start;
1289
1290 while (iova < (iova_start + size)) {
Mitchel Humpherys601ebd32015-06-01 16:12:26 -07001291 if (ops->iova_to_phys(ops, iova + 42))
1292 return true;
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001293 iova += SZ_4K;
1294 }
Mitchel Humpherys601ebd32015-06-01 16:12:26 -07001295 return false;
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001296}
1297
1298/*
1299 * Returns true if the iova range is successfully mapped to the contiguous
1300 * phys range in ops.
1301 */
1302static bool arm_lpae_range_has_specific_mapping(struct io_pgtable_ops *ops,
1303 const unsigned long iova_start,
1304 const phys_addr_t phys_start,
1305 const size_t size)
1306{
1307 unsigned long iova = iova_start;
1308 phys_addr_t phys = phys_start;
1309
1310 while (iova < (iova_start + size)) {
1311 if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
1312 return false;
1313 iova += SZ_4K;
1314 phys += SZ_4K;
1315 }
1316 return true;
1317}
1318
Will Deaconfe4b9912014-11-17 23:31:12 +00001319static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1320{
1321 static const enum io_pgtable_fmt fmts[] = {
1322 ARM_64_LPAE_S1,
1323 ARM_64_LPAE_S2,
1324 };
1325
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001326 int i, j, k;
Will Deaconfe4b9912014-11-17 23:31:12 +00001327 unsigned long iova;
1328 size_t size;
1329 struct io_pgtable_ops *ops;
Will Deaconfe4b9912014-11-17 23:31:12 +00001330 selftest_running = true;
1331
1332 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001333 unsigned long test_sg_sizes[] = { SZ_4K, SZ_64K, SZ_2M,
1334 SZ_1M * 12, SZ_1M * 20 };
1335
Will Deaconfe4b9912014-11-17 23:31:12 +00001336 cfg_cookie = cfg;
1337 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1338 if (!ops) {
1339 pr_err("selftest: failed to allocate io pgtable ops\n");
1340 return -ENOMEM;
1341 }
1342
1343 /*
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001344 * Initial sanity checks. Empty page tables shouldn't
1345 * provide any translations. TODO: check entire supported
1346 * range for these ops rather than first 2G
Will Deaconfe4b9912014-11-17 23:31:12 +00001347 */
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001348 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
Will Deaconfe4b9912014-11-17 23:31:12 +00001349 return __FAIL(ops, i);
1350
1351 /*
1352 * Distinct mappings of different granule sizes.
1353 */
1354 iova = 0;
1355 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1356 while (j != BITS_PER_LONG) {
1357 size = 1UL << j;
1358
1359 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1360 IOMMU_WRITE |
1361 IOMMU_NOEXEC |
1362 IOMMU_CACHE))
1363 return __FAIL(ops, i);
1364
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001365 suppress_map_failures = true;
Will Deaconfe4b9912014-11-17 23:31:12 +00001366 /* Overlapping mappings */
1367 if (!ops->map(ops, iova, iova + size, size,
1368 IOMMU_READ | IOMMU_NOEXEC))
1369 return __FAIL(ops, i);
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001370 suppress_map_failures = false;
Will Deaconfe4b9912014-11-17 23:31:12 +00001371
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001372 if (!arm_lpae_range_has_specific_mapping(ops, iova,
1373 iova, size))
Will Deaconfe4b9912014-11-17 23:31:12 +00001374 return __FAIL(ops, i);
1375
1376 iova += SZ_1G;
1377 j++;
1378 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1379 }
1380
1381 /* Partial unmap */
1382 size = 1UL << __ffs(cfg->pgsize_bitmap);
1383 if (ops->unmap(ops, SZ_1G + size, size) != size)
1384 return __FAIL(ops, i);
1385
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001386 if (arm_lpae_range_has_mapping(ops, SZ_1G + size, size))
1387 return __FAIL(ops, i);
1388
Will Deaconfe4b9912014-11-17 23:31:12 +00001389 /* Remap of partial unmap */
1390 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1391 return __FAIL(ops, i);
1392
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001393 if (!arm_lpae_range_has_specific_mapping(ops, SZ_1G + size,
1394 size, size))
Will Deaconfe4b9912014-11-17 23:31:12 +00001395 return __FAIL(ops, i);
1396
1397 /* Full unmap */
1398 iova = 0;
1399 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1400 while (j != BITS_PER_LONG) {
1401 size = 1UL << j;
1402
1403 if (ops->unmap(ops, iova, size) != size)
1404 return __FAIL(ops, i);
1405
1406 if (ops->iova_to_phys(ops, iova + 42))
1407 return __FAIL(ops, i);
1408
1409 /* Remap full block */
1410 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1411 return __FAIL(ops, i);
1412
1413 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1414 return __FAIL(ops, i);
1415
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001416 if (ops->unmap(ops, iova, size) != size)
1417 return __FAIL(ops, i);
1418
Will Deaconfe4b9912014-11-17 23:31:12 +00001419 iova += SZ_1G;
1420 j++;
1421 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1422 }
1423
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001424 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1425 return __FAIL(ops, i);
1426
Mitchel Humpheryse4012a62015-06-01 15:44:49 -07001427 if ((cfg->pgsize_bitmap & SZ_2M) &&
1428 (cfg->pgsize_bitmap & SZ_4K)) {
1429 /* mixed block + page mappings */
1430 iova = 0;
1431 if (ops->map(ops, iova, iova, SZ_2M, IOMMU_READ))
1432 return __FAIL(ops, i);
1433
1434 if (ops->map(ops, iova + SZ_2M, iova + SZ_2M, SZ_4K,
1435 IOMMU_READ))
1436 return __FAIL(ops, i);
1437
1438 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1439 return __FAIL(ops, i);
1440
1441 if (ops->iova_to_phys(ops, iova + SZ_2M + 42) !=
1442 (iova + SZ_2M + 42))
1443 return __FAIL(ops, i);
1444
1445 /* unmap both mappings at once */
1446 if (ops->unmap(ops, iova, SZ_2M + SZ_4K) !=
1447 (SZ_2M + SZ_4K))
1448 return __FAIL(ops, i);
1449
1450 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1451 return __FAIL(ops, i);
1452 }
1453
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001454 /* map_sg */
1455 for (j = 0; j < ARRAY_SIZE(test_sg_sizes); ++j) {
1456 size_t mapped;
1457 size_t unused;
1458 struct page *page;
1459 phys_addr_t page_phys;
1460 struct sg_table table;
1461 struct scatterlist *sg;
1462 unsigned long total_size = test_sg_sizes[j];
1463 int chunk_size = 1UL << find_first_bit(
1464 &cfg->pgsize_bitmap, BITS_PER_LONG);
1465 int nents = total_size / chunk_size;
1466
1467 if (total_size < chunk_size)
1468 continue;
1469
1470 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
1471 page_phys = page_to_phys(page);
1472
1473 iova = 0;
1474 BUG_ON(sg_alloc_table(&table, nents, GFP_KERNEL));
1475 BUG_ON(!page);
1476 for_each_sg(table.sgl, sg, table.nents, k)
1477 sg_set_page(sg, page, chunk_size, 0);
1478
1479 mapped = ops->map_sg(ops, iova, table.sgl, table.nents,
1480 IOMMU_READ | IOMMU_WRITE, &unused);
1481
1482 if (mapped != total_size)
1483 return __FAIL(ops, i);
1484
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001485 if (!arm_lpae_range_has_mapping(ops, iova, total_size))
1486 return __FAIL(ops, i);
1487
1488 if (arm_lpae_range_has_mapping(ops, iova + total_size,
1489 SZ_2G - (iova + total_size)))
1490 return __FAIL(ops, i);
1491
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001492 for_each_sg(table.sgl, sg, table.nents, k) {
1493 dma_addr_t newphys =
1494 ops->iova_to_phys(ops, iova + 42);
1495 if (newphys != (page_phys + 42))
1496 return __FAIL(ops, i);
1497 iova += chunk_size;
1498 }
1499
1500 if (ops->unmap(ops, 0, total_size) != total_size)
1501 return __FAIL(ops, i);
1502
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001503 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1504 return __FAIL(ops, i);
1505
Mitchel Humpherysdf18a9a2015-04-23 13:41:31 -07001506 sg_free_table(&table);
1507 __free_pages(page, get_order(chunk_size));
1508 }
1509
Mitchel Humpherysfdf212a2015-05-11 13:40:38 -07001510 if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
1511 return __FAIL(ops, i);
1512
Will Deaconfe4b9912014-11-17 23:31:12 +00001513 free_io_pgtable_ops(ops);
1514 }
1515
1516 selftest_running = false;
Mitchel Humpherys9739d9b2015-06-01 16:10:20 -07001517 suppress_map_failures = false;
Will Deaconfe4b9912014-11-17 23:31:12 +00001518 return 0;
1519}
1520
1521static int __init arm_lpae_do_selftests(void)
1522{
1523 static const unsigned long pgsize[] = {
1524 SZ_4K | SZ_2M | SZ_1G,
Will Deaconfe4b9912014-11-17 23:31:12 +00001525 };
1526
1527 static const unsigned int ias[] = {
1528 32, 36, 40, 42, 44, 48,
1529 };
1530
1531 int i, j, pass = 0, fail = 0;
1532 struct io_pgtable_cfg cfg = {
1533 .tlb = &dummy_tlb_ops,
1534 .oas = 48,
1535 };
1536
1537 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1538 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1539 cfg.pgsize_bitmap = pgsize[i];
1540 cfg.ias = ias[j];
1541 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1542 pgsize[i], ias[j]);
1543 if (arm_lpae_run_tests(&cfg))
1544 fail++;
1545 else
1546 pass++;
1547 }
1548 }
1549
1550 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1551 return fail ? -EFAULT : 0;
1552}
1553subsys_initcall(arm_lpae_do_selftests);
1554#endif