blob: 5a2ee2d1c8f357d3ba5f5147513a6d2821e67e08 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070047#include <soc/qcom/secure_buffer.h>
Patrick Daly2764f952016-09-06 19:22:44 -070048#include <linux/msm-bus.h>
49#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020056#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
58/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
61/* Maximum number of mapping groups per SMMU */
62#define ARM_SMMU_MAX_SMRS 128
63
Will Deacon45ae7cf2013-06-24 18:31:25 +010064/* SMMU global address space */
65#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010066#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000068/*
69 * SMMU global address space with conditional offset to access secure
70 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
71 * nsGFSYNR0: 0x450)
72 */
73#define ARM_SMMU_GR0_NS(smmu) \
74 ((smmu)->base + \
75 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 ? 0x400 : 0))
77
Robin Murphyf9a05f02016-04-13 18:13:01 +010078/*
79 * Some 64-bit registers only make sense to write atomically, but in such
80 * cases all the data relevant to AArch32 formats lies within the lower word,
81 * therefore this actually makes more sense than it might first appear.
82 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010086#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#endif
88
Will Deacon45ae7cf2013-06-24 18:31:25 +010089/* Configuration registers */
90#define ARM_SMMU_GR0_sCR0 0x0
91#define sCR0_CLIENTPD (1 << 0)
92#define sCR0_GFRE (1 << 1)
93#define sCR0_GFIE (1 << 2)
94#define sCR0_GCFGFRE (1 << 4)
95#define sCR0_GCFGFIE (1 << 5)
96#define sCR0_USFCFG (1 << 10)
97#define sCR0_VMIDPNE (1 << 11)
98#define sCR0_PTM (1 << 12)
99#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800100#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101#define sCR0_BSU_SHIFT 14
102#define sCR0_BSU_MASK 0x3
103
Peng Fan3ca37122016-05-03 21:50:30 +0800104/* Auxiliary Configuration register */
105#define ARM_SMMU_GR0_sACR 0x10
106
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107/* Identification registers */
108#define ARM_SMMU_GR0_ID0 0x20
109#define ARM_SMMU_GR0_ID1 0x24
110#define ARM_SMMU_GR0_ID2 0x28
111#define ARM_SMMU_GR0_ID3 0x2c
112#define ARM_SMMU_GR0_ID4 0x30
113#define ARM_SMMU_GR0_ID5 0x34
114#define ARM_SMMU_GR0_ID6 0x38
115#define ARM_SMMU_GR0_ID7 0x3c
116#define ARM_SMMU_GR0_sGFSR 0x48
117#define ARM_SMMU_GR0_sGFSYNR0 0x50
118#define ARM_SMMU_GR0_sGFSYNR1 0x54
119#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120
121#define ID0_S1TS (1 << 30)
122#define ID0_S2TS (1 << 29)
123#define ID0_NTS (1 << 28)
124#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000125#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100126#define ID0_PTFS_NO_AARCH32 (1 << 25)
127#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128#define ID0_CTTW (1 << 14)
129#define ID0_NUMIRPT_SHIFT 16
130#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700131#define ID0_NUMSIDB_SHIFT 9
132#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133#define ID0_NUMSMRG_SHIFT 0
134#define ID0_NUMSMRG_MASK 0xff
135
136#define ID1_PAGESIZE (1 << 31)
137#define ID1_NUMPAGENDXB_SHIFT 28
138#define ID1_NUMPAGENDXB_MASK 7
139#define ID1_NUMS2CB_SHIFT 16
140#define ID1_NUMS2CB_MASK 0xff
141#define ID1_NUMCB_SHIFT 0
142#define ID1_NUMCB_MASK 0xff
143
144#define ID2_OAS_SHIFT 4
145#define ID2_OAS_MASK 0xf
146#define ID2_IAS_SHIFT 0
147#define ID2_IAS_MASK 0xf
148#define ID2_UBS_SHIFT 8
149#define ID2_UBS_MASK 0xf
150#define ID2_PTFS_4K (1 << 12)
151#define ID2_PTFS_16K (1 << 13)
152#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800153#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Peng Fan3ca37122016-05-03 21:50:30 +0800155#define ID7_MAJOR_SHIFT 4
156#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159#define ARM_SMMU_GR0_TLBIVMID 0x64
160#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
161#define ARM_SMMU_GR0_TLBIALLH 0x6c
162#define ARM_SMMU_GR0_sTLBGSYNC 0x70
163#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
164#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800165#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
171#define SMR_MASK_MASK 0x7fff
172#define SMR_ID_SHIFT 0
173#define SMR_ID_MASK 0x7fff
174
175#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
176#define S2CR_CBNDX_SHIFT 0
177#define S2CR_CBNDX_MASK 0xff
178#define S2CR_TYPE_SHIFT 16
179#define S2CR_TYPE_MASK 0x3
180#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
181#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
182#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
183
184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700228#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100229#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100232#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700236#define ARM_SMMU_CB_TLBSYNC 0x7f0
237#define ARM_SMMU_CB_TLBSTATUS 0x7f4
238#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100239#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000240#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241
242#define SCTLR_S1_ASIDPNE (1 << 12)
243#define SCTLR_CFCFG (1 << 7)
244#define SCTLR_CFIE (1 << 6)
245#define SCTLR_CFRE (1 << 5)
246#define SCTLR_E (1 << 4)
247#define SCTLR_AFE (1 << 2)
248#define SCTLR_TRE (1 << 1)
249#define SCTLR_M (1 << 0)
250#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
251
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100252#define ARM_MMU500_ACTLR_CPRE (1 << 1)
253
Peng Fan3ca37122016-05-03 21:50:30 +0800254#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
255
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700256/* Definitions for implementation-defined registers */
257#define ACTLR_QCOM_OSH_SHIFT 28
258#define ACTLR_QCOM_OSH 1
259
260#define ACTLR_QCOM_ISH_SHIFT 29
261#define ACTLR_QCOM_ISH 1
262
263#define ACTLR_QCOM_NSH_SHIFT 30
264#define ACTLR_QCOM_NSH 1
265
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700266#define ARM_SMMU_IMPL_DEF0(smmu) \
267 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
268#define ARM_SMMU_IMPL_DEF1(smmu) \
269 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
270#define IMPL_DEF1_MICRO_MMU_CTRL 0
271#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
272#define MICRO_MMU_CTRL_IDLE (1 << 3)
273
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000274#define CB_PAR_F (1 << 0)
275
276#define ATSR_ACTIVE (1 << 0)
277
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278#define RESUME_RETRY (0 << 0)
279#define RESUME_TERMINATE (1 << 0)
280
Will Deacon45ae7cf2013-06-24 18:31:25 +0100281#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100282#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100283
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100284#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285
286#define FSR_MULTI (1 << 31)
287#define FSR_SS (1 << 30)
288#define FSR_UUT (1 << 8)
289#define FSR_ASF (1 << 7)
290#define FSR_TLBLKF (1 << 6)
291#define FSR_TLBMCF (1 << 5)
292#define FSR_EF (1 << 4)
293#define FSR_PF (1 << 3)
294#define FSR_AFF (1 << 2)
295#define FSR_TF (1 << 1)
296
Mitchel Humpherys29073202014-07-08 09:52:18 -0700297#define FSR_IGN (FSR_AFF | FSR_ASF | \
298 FSR_TLBMCF | FSR_TLBLKF)
299#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100300 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100301
302#define FSYNR0_WNR (1 << 4)
303
Will Deacon4cf740b2014-07-14 19:47:39 +0100304static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000305module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100306MODULE_PARM_DESC(force_stage,
307 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000308static bool disable_bypass;
309module_param(disable_bypass, bool, S_IRUGO);
310MODULE_PARM_DESC(disable_bypass,
311 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100312
Robin Murphy09360402014-08-28 17:51:59 +0100313enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100314 ARM_SMMU_V1,
315 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100316 ARM_SMMU_V2,
317};
318
Robin Murphy67b65a32016-04-13 18:12:57 +0100319enum arm_smmu_implementation {
320 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100321 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100322 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700323 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100324};
325
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700326struct arm_smmu_impl_def_reg {
327 u32 offset;
328 u32 value;
329};
330
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331struct arm_smmu_smr {
332 u8 idx;
333 u16 mask;
334 u16 id;
335};
336
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100338 int num_streamids;
339 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340 struct arm_smmu_smr *smrs;
341};
342
Will Deacona9a1b0b2014-05-01 18:05:08 +0100343struct arm_smmu_master {
344 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100345 struct rb_node node;
346 struct arm_smmu_master_cfg cfg;
347};
348
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349struct arm_smmu_device {
350 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351
352 void __iomem *base;
353 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100354 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
357#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
358#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
359#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
360#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000361#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800362#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100363#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
364#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
365#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
366#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
367#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000369
370#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800371#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800372#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700373#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000374 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100375 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100376 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377
378 u32 num_context_banks;
379 u32 num_s2_context_banks;
380 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
381 atomic_t irptndx;
382
383 u32 num_mapping_groups;
384 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
385
Will Deacon518f7132014-11-14 17:17:54 +0000386 unsigned long va_size;
387 unsigned long ipa_size;
388 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100389 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390
391 u32 num_global_irqs;
392 u32 num_context_irqs;
393 unsigned int *irqs;
394
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395 struct list_head list;
396 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800397
398 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700399 /* Specific to QCOM */
400 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
401 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800402
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700403 int num_clocks;
404 struct clk **clocks;
405
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700406 struct regulator *gdsc;
407
Patrick Daly2764f952016-09-06 19:22:44 -0700408 struct msm_bus_client_handle *bus_client;
409 char *bus_client_name;
410
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700411 /* Protects power_count */
412 struct mutex power_lock;
413 int power_count;
Patrick Daly8befb662016-08-17 20:03:28 -0700414 /* Protects clock_refs_count */
415 spinlock_t clock_refs_lock;
416 int clock_refs_count;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700417
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800418 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700419
420 /* protects idr */
421 struct mutex idr_mutex;
422 struct idr asid_idr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423};
424
Robin Murphy7602b872016-04-28 17:12:09 +0100425enum arm_smmu_context_fmt {
426 ARM_SMMU_CTX_FMT_NONE,
427 ARM_SMMU_CTX_FMT_AARCH64,
428 ARM_SMMU_CTX_FMT_AARCH32_L,
429 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430};
431
432struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433 u8 cbndx;
434 u8 irptndx;
435 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600436 u32 procid;
437 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100438 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100440#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600441#define INVALID_CBNDX 0xff
442#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700443/*
444 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
445 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
446 */
447#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100448
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600449#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800450#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100451
Will Deaconc752ce42014-06-25 22:46:31 +0100452enum arm_smmu_domain_stage {
453 ARM_SMMU_DOMAIN_S1 = 0,
454 ARM_SMMU_DOMAIN_S2,
455 ARM_SMMU_DOMAIN_NESTED,
456};
457
Patrick Dalyc11d1082016-09-01 15:52:44 -0700458struct arm_smmu_pte_info {
459 void *virt_addr;
460 size_t size;
461 struct list_head entry;
462};
463
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100465 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000466 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700467 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000468 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100469 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100470 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000471 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700472 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700473 u32 secure_vmid;
474 struct list_head pte_info_list;
475 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700476 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700477 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100478 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479};
480
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200481struct arm_smmu_phandle_args {
482 struct device_node *np;
483 int args_count;
484 uint32_t args[MAX_MASTER_STREAMIDS];
485};
486
Will Deacon45ae7cf2013-06-24 18:31:25 +0100487static DEFINE_SPINLOCK(arm_smmu_devices_lock);
488static LIST_HEAD(arm_smmu_devices);
489
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000490struct arm_smmu_option_prop {
491 u32 opt;
492 const char *prop;
493};
494
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800495static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
496
Mitchel Humpherys29073202014-07-08 09:52:18 -0700497static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000498 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800499 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800500 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700501 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000502 { 0, NULL},
503};
504
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800505static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700506static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
507static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800508static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800509static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
510 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700511static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
512 dma_addr_t iova);
513static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
514 struct iommu_domain *domain, dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600515static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800516
Patrick Dalyc11d1082016-09-01 15:52:44 -0700517static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
518static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700519static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700520static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
521
Joerg Roedel1d672632015-03-26 13:43:10 +0100522static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
523{
524 return container_of(dom, struct arm_smmu_domain, domain);
525}
526
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000527static void parse_driver_options(struct arm_smmu_device *smmu)
528{
529 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700530
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000531 do {
532 if (of_property_read_bool(smmu->dev->of_node,
533 arm_smmu_options[i].prop)) {
534 smmu->options |= arm_smmu_options[i].opt;
535 dev_notice(smmu->dev, "option %s\n",
536 arm_smmu_options[i].prop);
537 }
538 } while (arm_smmu_options[++i].opt);
539}
540
Patrick Dalyc190d932016-08-30 17:23:28 -0700541static bool is_dynamic_domain(struct iommu_domain *domain)
542{
543 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
544
545 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
546}
547
Patrick Dalye271f212016-10-04 13:24:49 -0700548static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
549{
550 return (smmu_domain->secure_vmid != VMID_INVAL);
551}
552
553static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
554{
555 if (arm_smmu_is_domain_secure(smmu_domain))
556 mutex_lock(&smmu_domain->assign_lock);
557}
558
559static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
560{
561 if (arm_smmu_is_domain_secure(smmu_domain))
562 mutex_unlock(&smmu_domain->assign_lock);
563}
564
Will Deacon8f68f8e2014-07-15 11:27:08 +0100565static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100566{
567 if (dev_is_pci(dev)) {
568 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700569
Will Deacona9a1b0b2014-05-01 18:05:08 +0100570 while (!pci_is_root_bus(bus))
571 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100572 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100573 }
574
Will Deacon8f68f8e2014-07-15 11:27:08 +0100575 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100576}
577
Will Deacon45ae7cf2013-06-24 18:31:25 +0100578static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
579 struct device_node *dev_node)
580{
581 struct rb_node *node = smmu->masters.rb_node;
582
583 while (node) {
584 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700585
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586 master = container_of(node, struct arm_smmu_master, node);
587
588 if (dev_node < master->of_node)
589 node = node->rb_left;
590 else if (dev_node > master->of_node)
591 node = node->rb_right;
592 else
593 return master;
594 }
595
596 return NULL;
597}
598
Will Deacona9a1b0b2014-05-01 18:05:08 +0100599static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100600find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100601{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100602 struct arm_smmu_master_cfg *cfg = NULL;
603 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100604
Will Deacon8f68f8e2014-07-15 11:27:08 +0100605 if (group) {
606 cfg = iommu_group_get_iommudata(group);
607 iommu_group_put(group);
608 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100609
Will Deacon8f68f8e2014-07-15 11:27:08 +0100610 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100611}
612
Will Deacon45ae7cf2013-06-24 18:31:25 +0100613static int insert_smmu_master(struct arm_smmu_device *smmu,
614 struct arm_smmu_master *master)
615{
616 struct rb_node **new, *parent;
617
618 new = &smmu->masters.rb_node;
619 parent = NULL;
620 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700621 struct arm_smmu_master *this
622 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100623
624 parent = *new;
625 if (master->of_node < this->of_node)
626 new = &((*new)->rb_left);
627 else if (master->of_node > this->of_node)
628 new = &((*new)->rb_right);
629 else
630 return -EEXIST;
631 }
632
633 rb_link_node(&master->node, parent, new);
634 rb_insert_color(&master->node, &smmu->masters);
635 return 0;
636}
637
638static int register_smmu_master(struct arm_smmu_device *smmu,
639 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200640 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100641{
642 int i;
643 struct arm_smmu_master *master;
644
645 master = find_smmu_master(smmu, masterspec->np);
646 if (master) {
647 dev_err(dev,
648 "rejecting multiple registrations for master device %s\n",
649 masterspec->np->name);
650 return -EBUSY;
651 }
652
653 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
654 dev_err(dev,
655 "reached maximum number (%d) of stream IDs for master device %s\n",
656 MAX_MASTER_STREAMIDS, masterspec->np->name);
657 return -ENOSPC;
658 }
659
660 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
661 if (!master)
662 return -ENOMEM;
663
Will Deacona9a1b0b2014-05-01 18:05:08 +0100664 master->of_node = masterspec->np;
665 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100666
Olav Haugan3c8766d2014-08-22 17:12:32 -0700667 for (i = 0; i < master->cfg.num_streamids; ++i) {
668 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100669
Olav Haugan3c8766d2014-08-22 17:12:32 -0700670 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
671 (streamid >= smmu->num_mapping_groups)) {
672 dev_err(dev,
673 "stream ID for master device %s greater than maximum allowed (%d)\n",
674 masterspec->np->name, smmu->num_mapping_groups);
675 return -ERANGE;
676 }
677 master->cfg.streamids[i] = streamid;
678 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100679 return insert_smmu_master(smmu, master);
680}
681
Will Deacon44680ee2014-06-25 11:29:12 +0100682static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100683{
Will Deacon44680ee2014-06-25 11:29:12 +0100684 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100685 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100686 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100687
688 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100689 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100690 master = find_smmu_master(smmu, dev_node);
691 if (master)
692 break;
693 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100694 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100695
Will Deacona9a1b0b2014-05-01 18:05:08 +0100696 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100697}
698
699static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
700{
701 int idx;
702
703 do {
704 idx = find_next_zero_bit(map, end, start);
705 if (idx == end)
706 return -ENOSPC;
707 } while (test_and_set_bit(idx, map));
708
709 return idx;
710}
711
712static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
713{
714 clear_bit(idx, map);
715}
716
Patrick Daly8befb662016-08-17 20:03:28 -0700717static int arm_smmu_prepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700718{
719 int i, ret = 0;
720
721 for (i = 0; i < smmu->num_clocks; ++i) {
Patrick Daly8befb662016-08-17 20:03:28 -0700722 ret = clk_prepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700723 if (ret) {
Patrick Daly8befb662016-08-17 20:03:28 -0700724 dev_err(smmu->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700725 while (i--)
Patrick Daly8befb662016-08-17 20:03:28 -0700726 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700727 break;
728 }
729 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700730 return ret;
731}
732
Patrick Daly8befb662016-08-17 20:03:28 -0700733static void arm_smmu_unprepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700734{
735 int i;
736
Liam Mark3ddf8d12016-04-13 12:42:01 -0700737 for (i = smmu->num_clocks; i; --i)
738 clk_unprepare(smmu->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700739}
740
Patrick Daly8befb662016-08-17 20:03:28 -0700741/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
742static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu)
743{
744 int i, ret = 0;
745 unsigned long flags;
746
747 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
748 if (smmu->clock_refs_count > 0) {
749 smmu->clock_refs_count++;
750 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
751 return 0;
752 }
753
754 for (i = 0; i < smmu->num_clocks; ++i) {
755 ret = clk_enable(smmu->clocks[i]);
756 if (ret) {
757 dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
758 while (i--)
759 clk_disable(smmu->clocks[i]);
760 break;
761 }
762 }
763
764 if (!ret)
765 smmu->clock_refs_count++;
766
767 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
768 return ret;
769}
770
771/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
772static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu)
773{
774 int i;
775 unsigned long flags;
776
777 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
778 WARN_ON(smmu->clock_refs_count == 0);
779 if (smmu->clock_refs_count > 1) {
780 smmu->clock_refs_count--;
781 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
782 return;
783 }
784
Liam Mark3ddf8d12016-04-13 12:42:01 -0700785 for (i = smmu->num_clocks; i; --i)
786 clk_disable(smmu->clocks[i - 1]);
Patrick Daly8befb662016-08-17 20:03:28 -0700787
788 smmu->clock_refs_count--;
789 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
790}
791
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700792static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
793{
794 if (!smmu->gdsc)
795 return 0;
796
797 return regulator_enable(smmu->gdsc);
798}
799
800static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
801{
802 if (!smmu->gdsc)
803 return 0;
804
805 return regulator_disable(smmu->gdsc);
806}
807
Patrick Daly2764f952016-09-06 19:22:44 -0700808static int arm_smmu_request_bus(struct arm_smmu_device *smmu)
809{
810 if (!smmu->bus_client)
811 return 0;
812 return msm_bus_scale_update_bw(smmu->bus_client, 0, 1000);
813}
814
815static int arm_smmu_unrequest_bus(struct arm_smmu_device *smmu)
816{
817 if (!smmu->bus_client)
818 return 0;
819 return msm_bus_scale_update_bw(smmu->bus_client, 0, 0);
820}
821
822
Patrick Daly8befb662016-08-17 20:03:28 -0700823static int arm_smmu_power_on_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700824{
825 int ret;
826
827 mutex_lock(&smmu->power_lock);
828 if (smmu->power_count > 0) {
829 smmu->power_count += 1;
830 mutex_unlock(&smmu->power_lock);
831 return 0;
832 }
833
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700834 ret = arm_smmu_enable_regulators(smmu);
835 if (ret)
836 goto out_unlock;
837
Patrick Daly2764f952016-09-06 19:22:44 -0700838 ret = arm_smmu_request_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700839 if (ret)
840 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700841
Patrick Daly2764f952016-09-06 19:22:44 -0700842 ret = arm_smmu_prepare_clocks(smmu);
843 if (ret)
844 goto out_disable_bus;
845
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700846 smmu->power_count += 1;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700847 mutex_unlock(&smmu->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700848 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700849
Patrick Daly2764f952016-09-06 19:22:44 -0700850out_disable_bus:
851 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700852out_disable_regulators:
853 arm_smmu_disable_regulators(smmu);
854out_unlock:
855 mutex_unlock(&smmu->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700856 return ret;
857}
858
Patrick Daly8befb662016-08-17 20:03:28 -0700859static void arm_smmu_power_off_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700860{
861 mutex_lock(&smmu->power_lock);
862 smmu->power_count--;
863 WARN_ON(smmu->power_count < 0);
864
865 if (smmu->power_count > 0) {
866 mutex_unlock(&smmu->power_lock);
867 return;
868 }
869
Patrick Daly8befb662016-08-17 20:03:28 -0700870 arm_smmu_unprepare_clocks(smmu);
Patrick Daly2764f952016-09-06 19:22:44 -0700871 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700872 arm_smmu_disable_regulators(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700873
874 mutex_unlock(&smmu->power_lock);
875}
876
Patrick Daly8befb662016-08-17 20:03:28 -0700877static int arm_smmu_power_on(struct arm_smmu_device *smmu)
878{
879 int ret;
880
881 ret = arm_smmu_power_on_slow(smmu);
882 if (ret)
883 return ret;
884
885 ret = arm_smmu_enable_clocks_atomic(smmu);
886 if (ret)
887 goto out_disable;
888
889 return 0;
890
891out_disable:
892 arm_smmu_power_off_slow(smmu);
893 return ret;
894}
895
896static void arm_smmu_power_off(struct arm_smmu_device *smmu)
897{
898 arm_smmu_disable_clocks_atomic(smmu);
899 arm_smmu_power_off_slow(smmu);
900}
901
902/*
903 * Must be used instead of arm_smmu_power_on if it may be called from
904 * atomic context
905 */
906static int arm_smmu_domain_power_on(struct iommu_domain *domain,
907 struct arm_smmu_device *smmu)
908{
909 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
910 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
911
912 if (atomic_domain)
913 return arm_smmu_enable_clocks_atomic(smmu);
914
915 return arm_smmu_power_on(smmu);
916}
917
918/*
919 * Must be used instead of arm_smmu_power_on if it may be called from
920 * atomic context
921 */
922static void arm_smmu_domain_power_off(struct iommu_domain *domain,
923 struct arm_smmu_device *smmu)
924{
925 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
926 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
927
928 if (atomic_domain) {
929 arm_smmu_disable_clocks_atomic(smmu);
930 return;
931 }
932
933 arm_smmu_power_off(smmu);
934}
935
Will Deacon45ae7cf2013-06-24 18:31:25 +0100936/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700937static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
938 int cbndx)
939{
940 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
941 u32 val;
942
943 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
944 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
945 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700946 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700947 dev_err(smmu->dev, "TLBSYNC timeout!\n");
948}
949
Will Deacon518f7132014-11-14 17:17:54 +0000950static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100951{
952 int count = 0;
953 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
954
955 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
956 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
957 & sTLBGSTATUS_GSACTIVE) {
958 cpu_relax();
959 if (++count == TLB_LOOP_TIMEOUT) {
960 dev_err_ratelimited(smmu->dev,
961 "TLB sync timed out -- SMMU may be deadlocked\n");
962 return;
963 }
964 udelay(1);
965 }
966}
967
Will Deacon518f7132014-11-14 17:17:54 +0000968static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100969{
Will Deacon518f7132014-11-14 17:17:54 +0000970 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700971 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000972}
973
Patrick Daly8befb662016-08-17 20:03:28 -0700974/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000975static void arm_smmu_tlb_inv_context(void *cookie)
976{
977 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100978 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
979 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100980 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000981 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100982
983 if (stage1) {
984 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800985 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100986 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700987 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100988 } else {
989 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800990 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100991 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700992 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100993 }
Will Deacon1463fe42013-07-31 19:21:27 +0100994}
995
Will Deacon518f7132014-11-14 17:17:54 +0000996static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000997 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000998{
999 struct arm_smmu_domain *smmu_domain = cookie;
1000 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1001 struct arm_smmu_device *smmu = smmu_domain->smmu;
1002 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1003 void __iomem *reg;
1004
1005 if (stage1) {
1006 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1007 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1008
Robin Murphy7602b872016-04-28 17:12:09 +01001009 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001010 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001011 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001012 do {
1013 writel_relaxed(iova, reg);
1014 iova += granule;
1015 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001016 } else {
1017 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001018 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001019 do {
1020 writeq_relaxed(iova, reg);
1021 iova += granule >> 12;
1022 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001023 }
Will Deacon518f7132014-11-14 17:17:54 +00001024 } else if (smmu->version == ARM_SMMU_V2) {
1025 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1026 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1027 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001028 iova >>= 12;
1029 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001030 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001031 iova += granule >> 12;
1032 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001033 } else {
1034 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001035 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001036 }
1037}
1038
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001039struct arm_smmu_secure_pool_chunk {
1040 void *addr;
1041 size_t size;
1042 struct list_head list;
1043};
1044
1045static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1046 size_t size)
1047{
1048 struct arm_smmu_secure_pool_chunk *it;
1049
1050 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1051 if (it->size == size) {
1052 void *addr = it->addr;
1053
1054 list_del(&it->list);
1055 kfree(it);
1056 return addr;
1057 }
1058 }
1059
1060 return NULL;
1061}
1062
1063static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1064 void *addr, size_t size)
1065{
1066 struct arm_smmu_secure_pool_chunk *chunk;
1067
1068 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1069 if (!chunk)
1070 return -ENOMEM;
1071
1072 chunk->addr = addr;
1073 chunk->size = size;
1074 memset(addr, 0, size);
1075 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1076
1077 return 0;
1078}
1079
1080static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1081{
1082 struct arm_smmu_secure_pool_chunk *it, *i;
1083
1084 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1085 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1086 /* pages will be freed later (after being unassigned) */
1087 kfree(it);
1088 }
1089}
1090
Patrick Dalyc11d1082016-09-01 15:52:44 -07001091static void *arm_smmu_alloc_pages_exact(void *cookie,
1092 size_t size, gfp_t gfp_mask)
1093{
1094 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001095 void *page;
1096 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001097
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001098 if (!arm_smmu_is_domain_secure(smmu_domain))
1099 return alloc_pages_exact(size, gfp_mask);
1100
1101 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1102 if (page)
1103 return page;
1104
1105 page = alloc_pages_exact(size, gfp_mask);
1106 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001107 ret = arm_smmu_prepare_pgtable(page, cookie);
1108 if (ret) {
1109 free_pages_exact(page, size);
1110 return NULL;
1111 }
1112 }
1113
1114 return page;
1115}
1116
1117static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1118{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001119 struct arm_smmu_domain *smmu_domain = cookie;
1120
1121 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1122 free_pages_exact(virt, size);
1123 return;
1124 }
1125
1126 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1127 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001128}
1129
Will Deacon518f7132014-11-14 17:17:54 +00001130static struct iommu_gather_ops arm_smmu_gather_ops = {
1131 .tlb_flush_all = arm_smmu_tlb_inv_context,
1132 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1133 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001134 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1135 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001136};
1137
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001138static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1139 dma_addr_t iova, u32 fsr)
1140{
1141 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1142 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1143 struct arm_smmu_device *smmu;
1144 void __iomem *cb_base;
1145 u64 sctlr, sctlr_orig;
1146 phys_addr_t phys;
1147
1148 smmu = smmu_domain->smmu;
1149 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1150
1151 arm_smmu_halt_nowait(smmu);
1152
1153 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
1154
1155 arm_smmu_wait_for_halt(smmu);
1156
1157 /* clear FSR to allow ATOS to log any faults */
1158 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
1159
1160 /* disable stall mode momentarily */
1161 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
1162 sctlr = sctlr_orig & ~SCTLR_CFCFG;
1163 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
1164
1165 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1166
1167 if (!phys) {
1168 dev_err(smmu->dev,
1169 "ATOS failed. Will issue a TLBIALL and try again...\n");
1170 arm_smmu_tlb_inv_context(smmu_domain);
1171 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1172 if (phys)
1173 dev_err(smmu->dev,
1174 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
1175 else
1176 dev_err(smmu->dev,
1177 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1178 }
1179
1180 /* restore SCTLR */
1181 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
1182
1183 arm_smmu_resume(smmu);
1184
1185 return phys;
1186}
1187
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1189{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001190 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001191 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 unsigned long iova;
1193 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001194 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001195 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1196 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001198 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001199 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001200 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001201 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001202 bool non_fatal_fault = !!(smmu_domain->attributes &
1203 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001205 static DEFINE_RATELIMIT_STATE(_rs,
1206 DEFAULT_RATELIMIT_INTERVAL,
1207 DEFAULT_RATELIMIT_BURST);
1208
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001209 ret = arm_smmu_power_on(smmu);
1210 if (ret)
1211 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001212
Shalaj Jain04059c52015-03-03 13:34:59 -08001213 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001214 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001215 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1216
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001217 if (!(fsr & FSR_FAULT)) {
1218 ret = IRQ_NONE;
1219 goto out_power_off;
1220 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001222 if (fatal_asf && (fsr & FSR_ASF)) {
1223 dev_err(smmu->dev,
1224 "Took an address size fault. Refusing to recover.\n");
1225 BUG();
1226 }
1227
Will Deacon45ae7cf2013-06-24 18:31:25 +01001228 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001229 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001230 if (fsr & FSR_TF)
1231 flags |= IOMMU_FAULT_TRANSLATION;
1232 if (fsr & FSR_PF)
1233 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001234 if (fsr & FSR_EF)
1235 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001236 if (fsr & FSR_SS)
1237 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001238
Robin Murphyf9a05f02016-04-13 18:13:01 +01001239 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001240 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001241 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1242 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001243 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1244 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001245 dev_dbg(smmu->dev,
1246 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1247 iova, fsr, fsynr, cfg->cbndx);
1248 dev_dbg(smmu->dev,
1249 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001250 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001251 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001252 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001253 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1254 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001255 if (__ratelimit(&_rs)) {
1256 dev_err(smmu->dev,
1257 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1258 iova, fsr, fsynr, cfg->cbndx);
1259 dev_err(smmu->dev, "FAR = %016lx\n",
1260 (unsigned long)iova);
1261 dev_err(smmu->dev,
1262 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1263 fsr,
1264 (fsr & 0x02) ? "TF " : "",
1265 (fsr & 0x04) ? "AFF " : "",
1266 (fsr & 0x08) ? "PF " : "",
1267 (fsr & 0x10) ? "EF " : "",
1268 (fsr & 0x20) ? "TLBMCF " : "",
1269 (fsr & 0x40) ? "TLBLKF " : "",
1270 (fsr & 0x80) ? "MHF " : "",
1271 (fsr & 0x40000000) ? "SS " : "",
1272 (fsr & 0x80000000) ? "MULTI " : "");
1273 dev_err(smmu->dev,
1274 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001275 if (!phys_soft)
1276 dev_err(smmu->dev,
1277 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1278 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001279 dev_err(smmu->dev,
1280 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1281 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1282 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001283 ret = IRQ_NONE;
1284 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001285 if (!non_fatal_fault) {
1286 dev_err(smmu->dev,
1287 "Unhandled arm-smmu context fault!\n");
1288 BUG();
1289 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001290 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001291
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001292 /*
1293 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1294 * if stalled. This is required to keep the IOMMU client stalled on
1295 * the outstanding fault. This gives the client a chance to take any
1296 * debug action and then terminate the stalled transaction.
1297 * So, the sequence in case of stall on fault should be:
1298 * 1) Do not clear FSR or write to RESUME here
1299 * 2) Client takes any debug action
1300 * 3) Client terminates the stalled transaction and resumes the IOMMU
1301 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1302 * not before so that the fault remains outstanding. This ensures
1303 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1304 * need to be terminated.
1305 */
1306 if (tmp != -EBUSY) {
1307 /* Clear the faulting FSR */
1308 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001309
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001310 /*
1311 * Barrier required to ensure that the FSR is cleared
1312 * before resuming SMMU operation
1313 */
1314 wmb();
1315
1316 /* Retry or terminate any stalled transactions */
1317 if (fsr & FSR_SS)
1318 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1319 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001320
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001321out_power_off:
1322 arm_smmu_power_off(smmu);
1323
Patrick Daly5ba28112016-08-30 19:18:52 -07001324 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325}
1326
1327static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1328{
1329 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1330 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001331 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001333 if (arm_smmu_power_on(smmu))
1334 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001335
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1337 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1338 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1339 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1340
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001341 if (!gfsr) {
1342 arm_smmu_power_off(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001343 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001344 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001345
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346 dev_err_ratelimited(smmu->dev,
1347 "Unexpected global fault, this could be serious\n");
1348 dev_err_ratelimited(smmu->dev,
1349 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1350 gfsr, gfsynr0, gfsynr1, gfsynr2);
1351
1352 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001353 arm_smmu_power_off(smmu);
Will Deaconadaba322013-07-31 19:21:26 +01001354 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001355}
1356
Will Deacon518f7132014-11-14 17:17:54 +00001357static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1358 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359{
1360 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001361 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001362 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001363 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1364 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001365 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001368 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1369 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001370
Will Deacon4a1c93c2015-03-04 12:21:03 +00001371 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001372 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1373 reg = CBA2R_RW64_64BIT;
1374 else
1375 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001376 /* 16-bit VMIDs live in CBA2R */
1377 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001378 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001379
Will Deacon4a1c93c2015-03-04 12:21:03 +00001380 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1381 }
1382
Will Deacon45ae7cf2013-06-24 18:31:25 +01001383 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001384 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001385 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001386 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387
Will Deacon57ca90f2014-02-06 14:59:05 +00001388 /*
1389 * Use the weakest shareability/memory types, so they are
1390 * overridden by the ttbcr/pte.
1391 */
1392 if (stage1) {
1393 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1394 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001395 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1396 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001397 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001398 }
Will Deacon44680ee2014-06-25 11:29:12 +01001399 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400
Will Deacon518f7132014-11-14 17:17:54 +00001401 /* TTBRs */
1402 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001403 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001405 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001406 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001407
1408 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001409 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001410 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001411 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001412 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001413 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001414 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001415
Will Deacon518f7132014-11-14 17:17:54 +00001416 /* TTBCR */
1417 if (stage1) {
1418 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1419 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1420 if (smmu->version > ARM_SMMU_V1) {
1421 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001422 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001423 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424 }
1425 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001426 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1427 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428 }
1429
Will Deacon518f7132014-11-14 17:17:54 +00001430 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001431 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001432 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001433 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001434 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1435 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436 }
1437
Will Deacon45ae7cf2013-06-24 18:31:25 +01001438 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001439 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1440
1441 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1442 !stage1)
1443 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001444 if (stage1)
1445 reg |= SCTLR_S1_ASIDPNE;
1446#ifdef __BIG_ENDIAN
1447 reg |= SCTLR_E;
1448#endif
Will Deacon25724842013-08-21 13:49:53 +01001449 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001450}
1451
Patrick Dalyc190d932016-08-30 17:23:28 -07001452static int arm_smmu_init_asid(struct iommu_domain *domain,
1453 struct arm_smmu_device *smmu)
1454{
1455 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1456 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1457 bool dynamic = is_dynamic_domain(domain);
1458 int ret;
1459
1460 if (!dynamic) {
1461 cfg->asid = cfg->cbndx + 1;
1462 } else {
1463 mutex_lock(&smmu->idr_mutex);
1464 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1465 smmu->num_context_banks + 2,
1466 MAX_ASID + 1, GFP_KERNEL);
1467
1468 mutex_unlock(&smmu->idr_mutex);
1469 if (ret < 0) {
1470 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1471 ret);
1472 return ret;
1473 }
1474 cfg->asid = ret;
1475 }
1476 return 0;
1477}
1478
1479static void arm_smmu_free_asid(struct iommu_domain *domain)
1480{
1481 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1482 struct arm_smmu_device *smmu = smmu_domain->smmu;
1483 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1484 bool dynamic = is_dynamic_domain(domain);
1485
1486 if (cfg->asid == INVALID_ASID || !dynamic)
1487 return;
1488
1489 mutex_lock(&smmu->idr_mutex);
1490 idr_remove(&smmu->asid_idr, cfg->asid);
1491 mutex_unlock(&smmu->idr_mutex);
1492}
1493
Will Deacon45ae7cf2013-06-24 18:31:25 +01001494static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001495 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001496{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001497 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001498 unsigned long ias, oas;
1499 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001500 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001501 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001502 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Patrick Dalyc190d932016-08-30 17:23:28 -07001503 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001504
Will Deacon518f7132014-11-14 17:17:54 +00001505 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001506 if (smmu_domain->smmu)
1507 goto out_unlock;
1508
Patrick Dalyc190d932016-08-30 17:23:28 -07001509 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1510 smmu_domain->cfg.asid = INVALID_ASID;
1511
Robin Murphy98006992016-04-20 14:53:33 +01001512 /* We're bypassing these SIDs, so don't allocate an actual context */
1513 if (domain->type == IOMMU_DOMAIN_DMA) {
1514 smmu_domain->smmu = smmu;
1515 goto out_unlock;
1516 }
1517
Patrick Dalyc190d932016-08-30 17:23:28 -07001518 dynamic = is_dynamic_domain(domain);
1519 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1520 dev_err(smmu->dev, "dynamic domains not supported\n");
1521 ret = -EPERM;
1522 goto out_unlock;
1523 }
1524
Will Deaconc752ce42014-06-25 22:46:31 +01001525 /*
1526 * Mapping the requested stage onto what we support is surprisingly
1527 * complicated, mainly because the spec allows S1+S2 SMMUs without
1528 * support for nested translation. That means we end up with the
1529 * following table:
1530 *
1531 * Requested Supported Actual
1532 * S1 N S1
1533 * S1 S1+S2 S1
1534 * S1 S2 S2
1535 * S1 S1 S1
1536 * N N N
1537 * N S1+S2 S2
1538 * N S2 S2
1539 * N S1 S1
1540 *
1541 * Note that you can't actually request stage-2 mappings.
1542 */
1543 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1544 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1545 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1546 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1547
Robin Murphy7602b872016-04-28 17:12:09 +01001548 /*
1549 * Choosing a suitable context format is even more fiddly. Until we
1550 * grow some way for the caller to express a preference, and/or move
1551 * the decision into the io-pgtable code where it arguably belongs,
1552 * just aim for the closest thing to the rest of the system, and hope
1553 * that the hardware isn't esoteric enough that we can't assume AArch64
1554 * support to be a superset of AArch32 support...
1555 */
1556 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1557 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1558 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1559 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1560 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1561 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1562 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1563
1564 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1565 ret = -EINVAL;
1566 goto out_unlock;
1567 }
1568
Will Deaconc752ce42014-06-25 22:46:31 +01001569 switch (smmu_domain->stage) {
1570 case ARM_SMMU_DOMAIN_S1:
1571 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1572 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001573 ias = smmu->va_size;
1574 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001575 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001576 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001577 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001578 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001579 ias = min(ias, 32UL);
1580 oas = min(oas, 40UL);
1581 }
Will Deaconc752ce42014-06-25 22:46:31 +01001582 break;
1583 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584 /*
1585 * We will likely want to change this if/when KVM gets
1586 * involved.
1587 */
Will Deaconc752ce42014-06-25 22:46:31 +01001588 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001589 cfg->cbar = CBAR_TYPE_S2_TRANS;
1590 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001591 ias = smmu->ipa_size;
1592 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001593 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001594 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001595 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001596 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001597 ias = min(ias, 40UL);
1598 oas = min(oas, 40UL);
1599 }
Will Deaconc752ce42014-06-25 22:46:31 +01001600 break;
1601 default:
1602 ret = -EINVAL;
1603 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001604 }
1605
Patrick Dalyc190d932016-08-30 17:23:28 -07001606 /* Dynamic domains must set cbndx through domain attribute */
1607 if (!dynamic) {
1608 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001609 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001610 if (ret < 0)
1611 goto out_unlock;
1612 cfg->cbndx = ret;
1613 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001614 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001615 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1616 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001617 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001618 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001619 }
1620
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001621 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001622 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001623 .ias = ias,
1624 .oas = oas,
1625 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001626 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001627 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001628
Will Deacon518f7132014-11-14 17:17:54 +00001629 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001630 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1631 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001632 if (!pgtbl_ops) {
1633 ret = -ENOMEM;
1634 goto out_clear_smmu;
1635 }
1636
Patrick Dalyc11d1082016-09-01 15:52:44 -07001637 /*
1638 * assign any page table memory that might have been allocated
1639 * during alloc_io_pgtable_ops
1640 */
Patrick Dalye271f212016-10-04 13:24:49 -07001641 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001642 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001643 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001644
Robin Murphyd5466352016-05-09 17:20:09 +01001645 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001646 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001647
Patrick Dalyc190d932016-08-30 17:23:28 -07001648 /* Assign an asid */
1649 ret = arm_smmu_init_asid(domain, smmu);
1650 if (ret)
1651 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001652
Patrick Dalyc190d932016-08-30 17:23:28 -07001653 if (!dynamic) {
1654 /* Initialise the context bank with our page table cfg */
1655 arm_smmu_init_context_bank(smmu_domain,
1656 &smmu_domain->pgtbl_cfg);
1657
1658 /*
1659 * Request context fault interrupt. Do this last to avoid the
1660 * handler seeing a half-initialised domain state.
1661 */
1662 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1663 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001664 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1665 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001666 if (ret < 0) {
1667 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1668 cfg->irptndx, irq);
1669 cfg->irptndx = INVALID_IRPTNDX;
1670 goto out_clear_smmu;
1671 }
1672 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001673 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674 }
Will Deacon518f7132014-11-14 17:17:54 +00001675 mutex_unlock(&smmu_domain->init_mutex);
1676
1677 /* Publish page table ops for map/unmap */
1678 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001679 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680
Will Deacon518f7132014-11-14 17:17:54 +00001681out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001682 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001683 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001684out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001685 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686 return ret;
1687}
1688
1689static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1690{
Joerg Roedel1d672632015-03-26 13:43:10 +01001691 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001692 struct arm_smmu_device *smmu = smmu_domain->smmu;
1693 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001694 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001696 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001697 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698
Robin Murphy98006992016-04-20 14:53:33 +01001699 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001700 return;
1701
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001702 ret = arm_smmu_power_on(smmu);
1703 if (ret) {
1704 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1705 smmu);
1706 return;
1707 }
1708
Patrick Dalyc190d932016-08-30 17:23:28 -07001709 dynamic = is_dynamic_domain(domain);
1710 if (dynamic) {
1711 arm_smmu_free_asid(domain);
1712 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001713 arm_smmu_power_off(smmu);
Patrick Dalye271f212016-10-04 13:24:49 -07001714 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001715 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001716 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001717 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001718 return;
1719 }
1720
Will Deacon518f7132014-11-14 17:17:54 +00001721 /*
1722 * Disable the context bank and free the page tables before freeing
1723 * it.
1724 */
Will Deacon44680ee2014-06-25 11:29:12 +01001725 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001726 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001727
Will Deacon44680ee2014-06-25 11:29:12 +01001728 if (cfg->irptndx != INVALID_IRPTNDX) {
1729 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001730 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001731 }
1732
Markus Elfring44830b02015-11-06 18:32:41 +01001733 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001734 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001735 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001736 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001737 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001738 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001739
1740 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001741}
1742
Joerg Roedel1d672632015-03-26 13:43:10 +01001743static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744{
1745 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746
Patrick Daly09801312016-08-29 17:02:52 -07001747 /* Do not support DOMAIN_DMA for now */
1748 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001749 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001750 /*
1751 * Allocate the domain and initialise some of its data structures.
1752 * We can't really do anything meaningful until we've added a
1753 * master.
1754 */
1755 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1756 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001757 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758
Robin Murphy9adb9592016-01-26 18:06:36 +00001759 if (type == IOMMU_DOMAIN_DMA &&
1760 iommu_get_dma_cookie(&smmu_domain->domain)) {
1761 kfree(smmu_domain);
1762 return NULL;
1763 }
1764
Will Deacon518f7132014-11-14 17:17:54 +00001765 mutex_init(&smmu_domain->init_mutex);
1766 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001767 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001768 smmu_domain->secure_vmid = VMID_INVAL;
1769 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1770 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001771 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001772 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Joerg Roedel1d672632015-03-26 13:43:10 +01001773
1774 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775}
1776
Joerg Roedel1d672632015-03-26 13:43:10 +01001777static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001778{
Joerg Roedel1d672632015-03-26 13:43:10 +01001779 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001780
1781 /*
1782 * Free the domain resources. We assume that all devices have
1783 * already been detached.
1784 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001785 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787 kfree(smmu_domain);
1788}
1789
1790static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001791 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792{
1793 int i;
1794 struct arm_smmu_smr *smrs;
1795 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1796
1797 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1798 return 0;
1799
Will Deacona9a1b0b2014-05-01 18:05:08 +01001800 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001801 return -EEXIST;
1802
Mitchel Humpherys29073202014-07-08 09:52:18 -07001803 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001805 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1806 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807 return -ENOMEM;
1808 }
1809
Will Deacon44680ee2014-06-25 11:29:12 +01001810 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001811 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1813 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001814 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815 dev_err(smmu->dev, "failed to allocate free SMR\n");
1816 goto err_free_smrs;
1817 }
1818
1819 smrs[i] = (struct arm_smmu_smr) {
1820 .idx = idx,
1821 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001822 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823 };
1824 }
1825
1826 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001827 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001828 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1829 smrs[i].mask << SMR_MASK_SHIFT;
1830 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1831 }
1832
Will Deacona9a1b0b2014-05-01 18:05:08 +01001833 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001834 return 0;
1835
1836err_free_smrs:
1837 while (--i >= 0)
1838 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1839 kfree(smrs);
1840 return -ENOSPC;
1841}
1842
1843static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001844 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001845{
1846 int i;
1847 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001848 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001849
Will Deacon43b412b2014-07-15 11:22:24 +01001850 if (!smrs)
1851 return;
1852
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001854 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001856
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1858 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1859 }
1860
Will Deacona9a1b0b2014-05-01 18:05:08 +01001861 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862 kfree(smrs);
1863}
1864
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001866 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001867{
1868 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001869 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001870 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1871
Will Deacon5f634952016-04-20 14:53:32 +01001872 /*
1873 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1874 * for all devices behind the SMMU. Note that we need to take
1875 * care configuring SMRs for devices both a platform_device and
1876 * and a PCI device (i.e. a PCI host controller)
1877 */
1878 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1879 return 0;
1880
Will Deacon8f68f8e2014-07-15 11:27:08 +01001881 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001882 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001883 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001884 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885
Will Deacona9a1b0b2014-05-01 18:05:08 +01001886 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001888
Will Deacona9a1b0b2014-05-01 18:05:08 +01001889 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001890 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001891 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1893 }
1894
1895 return 0;
1896}
1897
1898static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001899 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900{
Will Deacon43b412b2014-07-15 11:22:24 +01001901 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001902 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001903 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904
Will Deacon8f68f8e2014-07-15 11:27:08 +01001905 /* An IOMMU group is torn down by the first device to be removed */
1906 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1907 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001908
1909 /*
1910 * We *must* clear the S2CR first, because freeing the SMR means
1911 * that it can be re-allocated immediately.
1912 */
Will Deacon43b412b2014-07-15 11:22:24 +01001913 for (i = 0; i < cfg->num_streamids; ++i) {
1914 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001915 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001916
Robin Murphy25a1c962016-02-10 14:25:33 +00001917 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001918 }
1919
Will Deacona9a1b0b2014-05-01 18:05:08 +01001920 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001921}
1922
Patrick Daly09801312016-08-29 17:02:52 -07001923static void arm_smmu_detach_dev(struct iommu_domain *domain,
1924 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001925{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001926 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001927 struct arm_smmu_device *smmu = smmu_domain->smmu;
1928 struct arm_smmu_master_cfg *cfg;
1929 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001930 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001931
1932 if (dynamic)
1933 return;
1934
1935 cfg = find_smmu_master_cfg(dev);
1936 if (!cfg)
1937 return;
1938
1939 if (!smmu) {
1940 dev_err(dev, "Domain not attached; cannot detach!\n");
1941 return;
1942 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001943
1944 dev->archdata.iommu = NULL;
1945 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07001946
1947 /* Remove additional vote for atomic power */
1948 if (atomic_domain) {
1949 WARN_ON(arm_smmu_enable_clocks_atomic(smmu));
1950 arm_smmu_power_off(smmu);
1951 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001952}
1953
Patrick Dalye271f212016-10-04 13:24:49 -07001954static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07001955{
Patrick Dalye271f212016-10-04 13:24:49 -07001956 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001957 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1958 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
1959 int source_vmid = VMID_HLOS;
1960 struct arm_smmu_pte_info *pte_info, *temp;
1961
Patrick Dalye271f212016-10-04 13:24:49 -07001962 if (!arm_smmu_is_domain_secure(smmu_domain))
1963 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001964
Patrick Dalye271f212016-10-04 13:24:49 -07001965 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001966 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
1967 PAGE_SIZE, &source_vmid, 1,
1968 dest_vmids, dest_perms, 2);
1969 if (WARN_ON(ret))
1970 break;
1971 }
1972
1973 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
1974 entry) {
1975 list_del(&pte_info->entry);
1976 kfree(pte_info);
1977 }
Patrick Dalye271f212016-10-04 13:24:49 -07001978 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001979}
1980
1981static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
1982{
1983 int ret;
1984 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07001985 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001986 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1987 struct arm_smmu_pte_info *pte_info, *temp;
1988
Patrick Dalye271f212016-10-04 13:24:49 -07001989 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07001990 return;
1991
1992 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
1993 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
1994 PAGE_SIZE, source_vmlist, 2,
1995 &dest_vmids, &dest_perms, 1);
1996 if (WARN_ON(ret))
1997 break;
1998 free_pages_exact(pte_info->virt_addr, pte_info->size);
1999 }
2000
2001 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2002 entry) {
2003 list_del(&pte_info->entry);
2004 kfree(pte_info);
2005 }
2006}
2007
2008static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2009{
2010 struct arm_smmu_domain *smmu_domain = cookie;
2011 struct arm_smmu_pte_info *pte_info;
2012
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002013 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002014
2015 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2016 if (!pte_info)
2017 return;
2018
2019 pte_info->virt_addr = addr;
2020 pte_info->size = size;
2021 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2022}
2023
2024static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2025{
2026 struct arm_smmu_domain *smmu_domain = cookie;
2027 struct arm_smmu_pte_info *pte_info;
2028
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002029 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002030
2031 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2032 if (!pte_info)
2033 return -ENOMEM;
2034 pte_info->virt_addr = addr;
2035 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2036 return 0;
2037}
2038
Will Deacon45ae7cf2013-06-24 18:31:25 +01002039static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2040{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002041 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01002042 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002043 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002044 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07002045 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002046
Will Deacon8f68f8e2014-07-15 11:27:08 +01002047 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01002048 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002049 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2050 return -ENXIO;
2051 }
2052
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002053 /* Enable Clocks and Power */
2054 ret = arm_smmu_power_on(smmu);
2055 if (ret)
2056 return ret;
2057
Patrick Daly8befb662016-08-17 20:03:28 -07002058 /*
2059 * Keep an additional vote for non-atomic power until domain is
2060 * detached
2061 */
2062 if (atomic_domain) {
2063 ret = arm_smmu_power_on(smmu);
2064 if (ret)
2065 goto out_power_off;
2066
2067 arm_smmu_disable_clocks_atomic(smmu);
2068 }
2069
Will Deacon518f7132014-11-14 17:17:54 +00002070 /* Ensure that the domain is finalised */
2071 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002072 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002073 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002074
Patrick Dalyc190d932016-08-30 17:23:28 -07002075 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002076 if (is_dynamic_domain(domain)) {
2077 ret = 0;
2078 goto out_power_off;
2079 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002080
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002082 * Sanity check the domain. We don't support domains across
2083 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002084 */
Will Deacon518f7132014-11-14 17:17:54 +00002085 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002086 dev_err(dev,
2087 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002088 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002089 ret = -EINVAL;
2090 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002091 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002092
2093 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01002094 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002095 if (!cfg) {
2096 ret = -ENODEV;
2097 goto out_power_off;
2098 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002099
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002100 /* Detach the dev from its current domain */
2101 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07002102 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002103
Will Deacon844e35b2014-07-17 11:23:51 +01002104 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
2105 if (!ret)
2106 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002107
2108out_power_off:
2109 arm_smmu_power_off(smmu);
2110
Will Deacon45ae7cf2013-06-24 18:31:25 +01002111 return ret;
2112}
2113
Will Deacon45ae7cf2013-06-24 18:31:25 +01002114static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002115 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002116{
Will Deacon518f7132014-11-14 17:17:54 +00002117 int ret;
2118 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002119 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002120 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121
Will Deacon518f7132014-11-14 17:17:54 +00002122 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123 return -ENODEV;
2124
Patrick Dalye271f212016-10-04 13:24:49 -07002125 arm_smmu_secure_domain_lock(smmu_domain);
2126
Will Deacon518f7132014-11-14 17:17:54 +00002127 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2128 ret = ops->map(ops, iova, paddr, size, prot);
2129 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002130
2131 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002132 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002133
Will Deacon518f7132014-11-14 17:17:54 +00002134 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135}
2136
2137static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2138 size_t size)
2139{
Will Deacon518f7132014-11-14 17:17:54 +00002140 size_t ret;
2141 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002142 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002143 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002144
Will Deacon518f7132014-11-14 17:17:54 +00002145 if (!ops)
2146 return 0;
2147
Patrick Daly8befb662016-08-17 20:03:28 -07002148 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002149 if (ret)
2150 return ret;
2151
Patrick Dalye271f212016-10-04 13:24:49 -07002152 arm_smmu_secure_domain_lock(smmu_domain);
2153
Will Deacon518f7132014-11-14 17:17:54 +00002154 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2155 ret = ops->unmap(ops, iova, size);
2156 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002157
Patrick Daly8befb662016-08-17 20:03:28 -07002158 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002159 /*
2160 * While splitting up block mappings, we might allocate page table
2161 * memory during unmap, so the vmids needs to be assigned to the
2162 * memory here as well.
2163 */
2164 arm_smmu_assign_table(smmu_domain);
2165 /* Also unassign any pages that were free'd during unmap */
2166 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002167 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002168 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002169}
2170
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002171static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2172 struct scatterlist *sg, unsigned int nents, int prot)
2173{
2174 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002175 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002176 unsigned long flags;
2177 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2178 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2179
2180 if (!ops)
2181 return -ENODEV;
2182
Patrick Daly8befb662016-08-17 20:03:28 -07002183 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002184 if (ret)
2185 return ret;
2186
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002187 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002188 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002189 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002190
2191 if (!ret)
2192 arm_smmu_unmap(domain, iova, size);
2193
Patrick Daly8befb662016-08-17 20:03:28 -07002194 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002195 arm_smmu_assign_table(smmu_domain);
2196
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002197 return ret;
2198}
2199
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002200static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002201 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002202{
Joerg Roedel1d672632015-03-26 13:43:10 +01002203 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002204 struct arm_smmu_device *smmu = smmu_domain->smmu;
2205 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2206 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2207 struct device *dev = smmu->dev;
2208 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002209 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002210 u32 tmp;
2211 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002212 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002213
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002214 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002215 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002216 phys = 0;
2217 goto out_unlock;
2218 }
2219
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002220 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2221
Robin Murphy661d9622015-05-27 17:09:34 +01002222 /* ATS1 registers can only be written atomically */
2223 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002224 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002225 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2226 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002227 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002228
2229 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2230 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002231 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002232 dev_err(dev,
2233 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2234 &iova, &phys);
2235 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002236 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002237 }
2238
Robin Murphyf9a05f02016-04-13 18:13:01 +01002239 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002240 if (phys & CB_PAR_F) {
2241 dev_err(dev, "translation fault!\n");
2242 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002243 phys = 0;
2244 } else {
2245 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002246 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002247out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002248 if (do_halt)
2249 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002250out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002251 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002252 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002253}
2254
Will Deacon45ae7cf2013-06-24 18:31:25 +01002255static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002256 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257{
Will Deacon518f7132014-11-14 17:17:54 +00002258 phys_addr_t ret;
2259 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002260 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002261 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002262
Will Deacon518f7132014-11-14 17:17:54 +00002263 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002264 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002265
Will Deacon518f7132014-11-14 17:17:54 +00002266 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002267 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002268 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002269
Will Deacon518f7132014-11-14 17:17:54 +00002270 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002271}
2272
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002273/*
2274 * This function can sleep, and cannot be called from atomic context. Will
2275 * power on register block if required. This restriction does not apply to the
2276 * original iova_to_phys() op.
2277 */
2278static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2279 dma_addr_t iova)
2280{
2281 phys_addr_t ret = 0;
2282 unsigned long flags;
2283 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002284 int err;
2285
2286 err = arm_smmu_power_on(smmu_domain->smmu);
2287 if (err)
2288 return 0;
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002289
2290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2292 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002293 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002294
2295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2296
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002297 arm_smmu_power_off(smmu_domain->smmu);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002298 return ret;
2299}
2300
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002301static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
2302 struct iommu_domain *domain, dma_addr_t iova)
2303{
2304 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
2305}
2306
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002307static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002308{
Will Deacond0948942014-06-24 17:30:10 +01002309 switch (cap) {
2310 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002311 /*
2312 * Return true here as the SMMU can always send out coherent
2313 * requests.
2314 */
2315 return true;
Will Deacond0948942014-06-24 17:30:10 +01002316 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002317 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002318 case IOMMU_CAP_NOEXEC:
2319 return true;
Will Deacond0948942014-06-24 17:30:10 +01002320 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002321 return false;
Will Deacond0948942014-06-24 17:30:10 +01002322 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002323}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002324
Will Deacona9a1b0b2014-05-01 18:05:08 +01002325static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2326{
2327 *((u16 *)data) = alias;
2328 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002329}
2330
Will Deacon8f68f8e2014-07-15 11:27:08 +01002331static void __arm_smmu_release_pci_iommudata(void *data)
2332{
2333 kfree(data);
2334}
2335
Joerg Roedelaf659932015-10-21 23:51:41 +02002336static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2337 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338{
Will Deacon03edb222015-01-19 14:27:33 +00002339 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002340 u16 sid;
2341 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002342
Will Deacon03edb222015-01-19 14:27:33 +00002343 cfg = iommu_group_get_iommudata(group);
2344 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002345 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002346 if (!cfg)
2347 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002348
Will Deacon03edb222015-01-19 14:27:33 +00002349 iommu_group_set_iommudata(group, cfg,
2350 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002351 }
2352
Joerg Roedelaf659932015-10-21 23:51:41 +02002353 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2354 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002355
Will Deacon03edb222015-01-19 14:27:33 +00002356 /*
2357 * Assume Stream ID == Requester ID for now.
2358 * We need a way to describe the ID mappings in FDT.
2359 */
2360 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2361 for (i = 0; i < cfg->num_streamids; ++i)
2362 if (cfg->streamids[i] == sid)
2363 break;
2364
2365 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2366 if (i == cfg->num_streamids)
2367 cfg->streamids[cfg->num_streamids++] = sid;
2368
2369 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002370}
2371
Joerg Roedelaf659932015-10-21 23:51:41 +02002372static int arm_smmu_init_platform_device(struct device *dev,
2373 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002374{
Will Deacon03edb222015-01-19 14:27:33 +00002375 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002376 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002377
2378 if (!smmu)
2379 return -ENODEV;
2380
2381 master = find_smmu_master(smmu, dev->of_node);
2382 if (!master)
2383 return -ENODEV;
2384
Will Deacon03edb222015-01-19 14:27:33 +00002385 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002386
2387 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002388}
2389
2390static int arm_smmu_add_device(struct device *dev)
2391{
Joerg Roedelaf659932015-10-21 23:51:41 +02002392 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002393
Joerg Roedelaf659932015-10-21 23:51:41 +02002394 group = iommu_group_get_for_dev(dev);
2395 if (IS_ERR(group))
2396 return PTR_ERR(group);
2397
Peng Fan9a4a9d82015-11-20 16:56:18 +08002398 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002399 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002400}
2401
Will Deacon45ae7cf2013-06-24 18:31:25 +01002402static void arm_smmu_remove_device(struct device *dev)
2403{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002404 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002405}
2406
Joerg Roedelaf659932015-10-21 23:51:41 +02002407static struct iommu_group *arm_smmu_device_group(struct device *dev)
2408{
2409 struct iommu_group *group;
2410 int ret;
2411
2412 if (dev_is_pci(dev))
2413 group = pci_device_group(dev);
2414 else
2415 group = generic_device_group(dev);
2416
2417 if (IS_ERR(group))
2418 return group;
2419
2420 if (dev_is_pci(dev))
2421 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2422 else
2423 ret = arm_smmu_init_platform_device(dev, group);
2424
2425 if (ret) {
2426 iommu_group_put(group);
2427 group = ERR_PTR(ret);
2428 }
2429
2430 return group;
2431}
2432
Will Deaconc752ce42014-06-25 22:46:31 +01002433static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2434 enum iommu_attr attr, void *data)
2435{
Joerg Roedel1d672632015-03-26 13:43:10 +01002436 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002437 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002438
2439 switch (attr) {
2440 case DOMAIN_ATTR_NESTING:
2441 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2442 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002443 case DOMAIN_ATTR_PT_BASE_ADDR:
2444 *((phys_addr_t *)data) =
2445 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2446 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002447 case DOMAIN_ATTR_CONTEXT_BANK:
2448 /* context bank index isn't valid until we are attached */
2449 if (smmu_domain->smmu == NULL)
2450 return -ENODEV;
2451
2452 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2453 ret = 0;
2454 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002455 case DOMAIN_ATTR_TTBR0: {
2456 u64 val;
2457 struct arm_smmu_device *smmu = smmu_domain->smmu;
2458 /* not valid until we are attached */
2459 if (smmu == NULL)
2460 return -ENODEV;
2461
2462 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2463 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2464 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2465 << (TTBRn_ASID_SHIFT);
2466 *((u64 *)data) = val;
2467 ret = 0;
2468 break;
2469 }
2470 case DOMAIN_ATTR_CONTEXTIDR:
2471 /* not valid until attached */
2472 if (smmu_domain->smmu == NULL)
2473 return -ENODEV;
2474 *((u32 *)data) = smmu_domain->cfg.procid;
2475 ret = 0;
2476 break;
2477 case DOMAIN_ATTR_PROCID:
2478 *((u32 *)data) = smmu_domain->cfg.procid;
2479 ret = 0;
2480 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002481 case DOMAIN_ATTR_DYNAMIC:
2482 *((int *)data) = !!(smmu_domain->attributes
2483 & (1 << DOMAIN_ATTR_DYNAMIC));
2484 ret = 0;
2485 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002486 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2487 *((int *)data) = !!(smmu_domain->attributes
2488 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2489 ret = 0;
2490 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002491 case DOMAIN_ATTR_S1_BYPASS:
2492 *((int *)data) = !!(smmu_domain->attributes
2493 & (1 << DOMAIN_ATTR_S1_BYPASS));
2494 ret = 0;
2495 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002496 case DOMAIN_ATTR_SECURE_VMID:
2497 *((int *)data) = smmu_domain->secure_vmid;
2498 ret = 0;
2499 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002500 default:
2501 return -ENODEV;
2502 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002503 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002504}
2505
2506static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2507 enum iommu_attr attr, void *data)
2508{
Will Deacon518f7132014-11-14 17:17:54 +00002509 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002510 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002511
Will Deacon518f7132014-11-14 17:17:54 +00002512 mutex_lock(&smmu_domain->init_mutex);
2513
Will Deaconc752ce42014-06-25 22:46:31 +01002514 switch (attr) {
2515 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002516 if (smmu_domain->smmu) {
2517 ret = -EPERM;
2518 goto out_unlock;
2519 }
2520
Will Deaconc752ce42014-06-25 22:46:31 +01002521 if (*(int *)data)
2522 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2523 else
2524 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2525
Will Deacon518f7132014-11-14 17:17:54 +00002526 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002527 case DOMAIN_ATTR_PROCID:
2528 if (smmu_domain->smmu != NULL) {
2529 dev_err(smmu_domain->smmu->dev,
2530 "cannot change procid attribute while attached\n");
2531 ret = -EBUSY;
2532 break;
2533 }
2534 smmu_domain->cfg.procid = *((u32 *)data);
2535 ret = 0;
2536 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002537 case DOMAIN_ATTR_DYNAMIC: {
2538 int dynamic = *((int *)data);
2539
2540 if (smmu_domain->smmu != NULL) {
2541 dev_err(smmu_domain->smmu->dev,
2542 "cannot change dynamic attribute while attached\n");
2543 ret = -EBUSY;
2544 break;
2545 }
2546
2547 if (dynamic)
2548 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2549 else
2550 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2551 ret = 0;
2552 break;
2553 }
2554 case DOMAIN_ATTR_CONTEXT_BANK:
2555 /* context bank can't be set while attached */
2556 if (smmu_domain->smmu != NULL) {
2557 ret = -EBUSY;
2558 break;
2559 }
2560 /* ... and it can only be set for dynamic contexts. */
2561 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2562 ret = -EINVAL;
2563 break;
2564 }
2565
2566 /* this will be validated during attach */
2567 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2568 ret = 0;
2569 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002570 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2571 u32 non_fatal_faults = *((int *)data);
2572
2573 if (non_fatal_faults)
2574 smmu_domain->attributes |=
2575 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2576 else
2577 smmu_domain->attributes &=
2578 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2579 ret = 0;
2580 break;
2581 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002582 case DOMAIN_ATTR_S1_BYPASS: {
2583 int bypass = *((int *)data);
2584
2585 /* bypass can't be changed while attached */
2586 if (smmu_domain->smmu != NULL) {
2587 ret = -EBUSY;
2588 break;
2589 }
2590 if (bypass)
2591 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2592 else
2593 smmu_domain->attributes &=
2594 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2595
2596 ret = 0;
2597 break;
2598 }
Patrick Daly8befb662016-08-17 20:03:28 -07002599 case DOMAIN_ATTR_ATOMIC:
2600 {
2601 int atomic_ctx = *((int *)data);
2602
2603 /* can't be changed while attached */
2604 if (smmu_domain->smmu != NULL) {
2605 ret = -EBUSY;
2606 break;
2607 }
2608 if (atomic_ctx)
2609 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2610 else
2611 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2612 break;
2613 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002614 case DOMAIN_ATTR_SECURE_VMID:
2615 if (smmu_domain->secure_vmid != VMID_INVAL) {
2616 ret = -ENODEV;
2617 WARN(1, "secure vmid already set!");
2618 break;
2619 }
2620 smmu_domain->secure_vmid = *((int *)data);
2621 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002622 default:
Will Deacon518f7132014-11-14 17:17:54 +00002623 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002624 }
Will Deacon518f7132014-11-14 17:17:54 +00002625
2626out_unlock:
2627 mutex_unlock(&smmu_domain->init_mutex);
2628 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002629}
2630
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002631static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2632 unsigned long flags)
2633{
2634 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2635 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2636 struct arm_smmu_device *smmu;
2637 void __iomem *cb_base;
2638
2639 if (!smmu_domain->smmu) {
2640 pr_err("Can't trigger faults on non-attached domains\n");
2641 return;
2642 }
2643
2644 smmu = smmu_domain->smmu;
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002645 if (arm_smmu_power_on(smmu))
2646 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002647
2648 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2649 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2650 flags, cfg->cbndx);
2651 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002652 /* give the interrupt time to fire... */
2653 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002654
2655 arm_smmu_power_off(smmu);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002656}
2657
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002658static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2659 unsigned long offset)
2660{
2661 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2662 struct arm_smmu_device *smmu;
2663 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2664 void __iomem *cb_base;
2665 unsigned long val;
2666
2667 if (offset >= SZ_4K) {
2668 pr_err("Invalid offset: 0x%lx\n", offset);
2669 return 0;
2670 }
2671
2672 smmu = smmu_domain->smmu;
2673 if (!smmu) {
2674 WARN(1, "Can't read registers of a detached domain\n");
2675 val = 0;
2676 return val;
2677 }
2678
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002679 if (arm_smmu_power_on(smmu))
2680 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002681
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002682 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2683 val = readl_relaxed(cb_base + offset);
2684
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002685 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002686 return val;
2687}
2688
2689static void arm_smmu_reg_write(struct iommu_domain *domain,
2690 unsigned long offset, unsigned long val)
2691{
2692 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2693 struct arm_smmu_device *smmu;
2694 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2695 void __iomem *cb_base;
2696
2697 if (offset >= SZ_4K) {
2698 pr_err("Invalid offset: 0x%lx\n", offset);
2699 return;
2700 }
2701
2702 smmu = smmu_domain->smmu;
2703 if (!smmu) {
2704 WARN(1, "Can't read registers of a detached domain\n");
2705 return;
2706 }
2707
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002708 if (arm_smmu_power_on(smmu))
2709 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002710
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002711 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2712 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002713
2714 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002715}
2716
Will Deacon518f7132014-11-14 17:17:54 +00002717static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002718 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002719 .domain_alloc = arm_smmu_domain_alloc,
2720 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002721 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002722 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002723 .map = arm_smmu_map,
2724 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002725 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002726 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002727 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002728 .add_device = arm_smmu_add_device,
2729 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002730 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002731 .domain_get_attr = arm_smmu_domain_get_attr,
2732 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002733 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002734 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002735 .reg_read = arm_smmu_reg_read,
2736 .reg_write = arm_smmu_reg_write,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002737};
2738
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002739static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002740{
2741 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002742 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002743
2744 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2745 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2746 0, 30000)) {
2747 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2748 return -EBUSY;
2749 }
2750
2751 return 0;
2752}
2753
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002754static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
2755{
2756 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2757 u32 reg;
2758
2759 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2760 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2761 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2762
2763 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
2764}
2765
2766static int arm_smmu_halt(struct arm_smmu_device *smmu)
2767{
2768 return __arm_smmu_halt(smmu, true);
2769}
2770
2771static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
2772{
2773 return __arm_smmu_halt(smmu, false);
2774}
2775
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002776static void arm_smmu_resume(struct arm_smmu_device *smmu)
2777{
2778 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2779 u32 reg;
2780
2781 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2782 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2783 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2784}
2785
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002786static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
2787{
2788 int i;
2789 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
2790
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002791 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002792 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2793 writel_relaxed(regs[i].value,
2794 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002795 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002796}
2797
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002798static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002799{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002800 int i;
2801 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002802 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002803 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002804
Peng Fan3ca37122016-05-03 21:50:30 +08002805 /*
2806 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
2807 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
2808 * bit is only present in MMU-500r2 onwards.
2809 */
2810 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
2811 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
2812 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
2813 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
2814 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
2815 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
2816 }
2817
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002818 /* Make sure all context banks are disabled and clear CB_FSR */
2819 for (i = 0; i < smmu->num_context_banks; ++i) {
2820 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2821 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
2822 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002823 /*
2824 * Disable MMU-500's not-particularly-beneficial next-page
2825 * prefetcher for the sake of errata #841119 and #826419.
2826 */
2827 if (smmu->model == ARM_MMU500) {
2828 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
2829 reg &= ~ARM_MMU500_ACTLR_CPRE;
2830 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2831 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002832
2833 if (smmu->model == QCOM_SMMUV2) {
2834 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2835 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2836 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2837 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2838 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002839 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002840}
2841
2842static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
2843{
2844 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2845 int i = 0;
2846 u32 reg;
2847
2848 /* clear global FSR */
2849 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2850 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2851
2852 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2853 /*
2854 * Mark all SMRn as invalid and all S2CRn as bypass unless
2855 * overridden
2856 */
2857 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
2858 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2859 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
2860 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
2861 }
2862
2863 arm_smmu_context_bank_reset(smmu);
2864 }
Will Deacon1463fe42013-07-31 19:21:27 +01002865
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002866 /* Program implementation defined registers */
2867 arm_smmu_impl_def_programming(smmu);
2868
Will Deacon45ae7cf2013-06-24 18:31:25 +01002869 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002870 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
2871 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
2872
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002873 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002874
Will Deacon45ae7cf2013-06-24 18:31:25 +01002875 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002876 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002877
2878 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002879 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002880
Robin Murphy25a1c962016-02-10 14:25:33 +00002881 /* Enable client access, handling unmatched streams as appropriate */
2882 reg &= ~sCR0_CLIENTPD;
2883 if (disable_bypass)
2884 reg |= sCR0_USFCFG;
2885 else
2886 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002887
2888 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002889 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002890
2891 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002892 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002893
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002894 if (smmu->features & ARM_SMMU_FEAT_VMID16)
2895 reg |= sCR0_VMID16EN;
2896
Will Deacon45ae7cf2013-06-24 18:31:25 +01002897 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00002898 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002899 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002900}
2901
2902static int arm_smmu_id_size_to_bits(int size)
2903{
2904 switch (size) {
2905 case 0:
2906 return 32;
2907 case 1:
2908 return 36;
2909 case 2:
2910 return 40;
2911 case 3:
2912 return 42;
2913 case 4:
2914 return 44;
2915 case 5:
2916 default:
2917 return 48;
2918 }
2919}
2920
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002921static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
2922{
2923 struct device *dev = smmu->dev;
2924 int i, ntuples, ret;
2925 u32 *tuples;
2926 struct arm_smmu_impl_def_reg *regs, *regit;
2927
2928 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
2929 return 0;
2930
2931 ntuples /= sizeof(u32);
2932 if (ntuples % 2) {
2933 dev_err(dev,
2934 "Invalid number of attach-impl-defs registers: %d\n",
2935 ntuples);
2936 return -EINVAL;
2937 }
2938
2939 regs = devm_kmalloc(
2940 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
2941 GFP_KERNEL);
2942 if (!regs)
2943 return -ENOMEM;
2944
2945 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
2946 if (!tuples)
2947 return -ENOMEM;
2948
2949 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
2950 tuples, ntuples);
2951 if (ret)
2952 return ret;
2953
2954 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
2955 regit->offset = tuples[i];
2956 regit->value = tuples[i + 1];
2957 }
2958
2959 devm_kfree(dev, tuples);
2960
2961 smmu->impl_def_attach_registers = regs;
2962 smmu->num_impl_def_attach_registers = ntuples / 2;
2963
2964 return 0;
2965}
2966
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002967static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
2968{
2969 const char *cname;
2970 struct property *prop;
2971 int i;
2972 struct device *dev = smmu->dev;
2973
2974 smmu->num_clocks =
2975 of_property_count_strings(dev->of_node, "clock-names");
2976
2977 if (smmu->num_clocks < 1)
2978 return 0;
2979
2980 smmu->clocks = devm_kzalloc(
2981 dev, sizeof(*smmu->clocks) * smmu->num_clocks,
2982 GFP_KERNEL);
2983
2984 if (!smmu->clocks) {
2985 dev_err(dev,
2986 "Failed to allocate memory for clocks\n");
2987 return -ENODEV;
2988 }
2989
2990 i = 0;
2991 of_property_for_each_string(dev->of_node, "clock-names",
2992 prop, cname) {
2993 struct clk *c = devm_clk_get(dev, cname);
2994
2995 if (IS_ERR(c)) {
2996 dev_err(dev, "Couldn't get clock: %s",
2997 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07002998 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002999 }
3000
3001 if (clk_get_rate(c) == 0) {
3002 long rate = clk_round_rate(c, 1000);
3003
3004 clk_set_rate(c, rate);
3005 }
3006
3007 smmu->clocks[i] = c;
3008
3009 ++i;
3010 }
3011 return 0;
3012}
3013
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003014static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
3015{
3016 struct device *dev = smmu->dev;
3017
3018 if (!of_get_property(dev->of_node, "vdd-supply", NULL))
3019 return 0;
3020
3021 smmu->gdsc = devm_regulator_get(dev, "vdd");
3022 if (IS_ERR(smmu->gdsc))
3023 return PTR_ERR(smmu->gdsc);
3024
3025 return 0;
3026}
3027
Patrick Daly2764f952016-09-06 19:22:44 -07003028static int arm_smmu_init_bus_scaling(struct platform_device *pdev,
3029 struct arm_smmu_device *smmu)
3030{
3031 u32 master_id;
3032
3033 if (of_property_read_u32(pdev->dev.of_node, "qcom,bus-master-id",
3034 &master_id)) {
3035 dev_dbg(smmu->dev, "No bus scaling info\n");
3036 return 0;
3037 }
3038
3039 smmu->bus_client_name = devm_kasprintf(
3040 smmu->dev, GFP_KERNEL, "smmu-bus-client-%s",
3041 dev_name(smmu->dev));
3042
3043 if (!smmu->bus_client_name)
3044 return -ENOMEM;
3045
3046 smmu->bus_client = msm_bus_scale_register(
3047 master_id, MSM_BUS_SLAVE_EBI_CH0, smmu->bus_client_name, true);
3048 if (IS_ERR(&smmu->bus_client)) {
3049 int ret = PTR_ERR(smmu->bus_client);
3050
3051 if (ret != -EPROBE_DEFER)
3052 dev_err(smmu->dev, "Bus client registration failed\n");
3053 return ret;
3054 }
3055
3056 return 0;
3057}
3058
Will Deacon45ae7cf2013-06-24 18:31:25 +01003059static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3060{
3061 unsigned long size;
3062 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3063 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003064 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003065
3066 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01003067 dev_notice(smmu->dev, "SMMUv%d with:\n",
3068 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003069
3070 /* ID0 */
3071 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003072
3073 /* Restrict available stages based on module parameter */
3074 if (force_stage == 1)
3075 id &= ~(ID0_S2TS | ID0_NTS);
3076 else if (force_stage == 2)
3077 id &= ~(ID0_S1TS | ID0_NTS);
3078
Will Deacon45ae7cf2013-06-24 18:31:25 +01003079 if (id & ID0_S1TS) {
3080 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
3081 dev_notice(smmu->dev, "\tstage 1 translation\n");
3082 }
3083
3084 if (id & ID0_S2TS) {
3085 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
3086 dev_notice(smmu->dev, "\tstage 2 translation\n");
3087 }
3088
3089 if (id & ID0_NTS) {
3090 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
3091 dev_notice(smmu->dev, "\tnested translation\n");
3092 }
3093
3094 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003095 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003096 dev_err(smmu->dev, "\tno translation support!\n");
3097 return -ENODEV;
3098 }
3099
Robin Murphyb7862e32016-04-13 18:13:03 +01003100 if ((id & ID0_S1TS) &&
3101 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003102 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
3103 dev_notice(smmu->dev, "\taddress translation ops\n");
3104 }
3105
Robin Murphybae2c2d2015-07-29 19:46:05 +01003106 /*
3107 * In order for DMA API calls to work properly, we must defer to what
3108 * the DT says about coherency, regardless of what the hardware claims.
3109 * Fortunately, this also opens up a workaround for systems where the
3110 * ID register value has ended up configured incorrectly.
3111 */
3112 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3113 cttw_reg = !!(id & ID0_CTTW);
3114 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003115 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003116 if (cttw_dt || cttw_reg)
3117 dev_notice(smmu->dev, "\t%scoherent table walk\n",
3118 cttw_dt ? "" : "non-");
3119 if (cttw_dt != cttw_reg)
3120 dev_notice(smmu->dev,
3121 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003122
3123 if (id & ID0_SMS) {
3124 u32 smr, sid, mask;
3125
3126 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
3127 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
3128 ID0_NUMSMRG_MASK;
3129 if (smmu->num_mapping_groups == 0) {
3130 dev_err(smmu->dev,
3131 "stream-matching supported, but no SMRs present!\n");
3132 return -ENODEV;
3133 }
3134
Dhaval Patel031d7462015-05-09 14:47:29 -07003135 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3136 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
3137 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
3138 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3139 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01003140
Dhaval Patel031d7462015-05-09 14:47:29 -07003141 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3142 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
3143 if ((mask & sid) != sid) {
3144 dev_err(smmu->dev,
3145 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
3146 mask, sid);
3147 return -ENODEV;
3148 }
3149
3150 dev_notice(smmu->dev,
3151 "\tstream matching with %u register groups, mask 0x%x",
3152 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003153 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07003154 } else {
3155 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
3156 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003157 }
3158
Robin Murphy7602b872016-04-28 17:12:09 +01003159 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3160 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3161 if (!(id & ID0_PTFS_NO_AARCH32S))
3162 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3163 }
3164
Will Deacon45ae7cf2013-06-24 18:31:25 +01003165 /* ID1 */
3166 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003167 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003168
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003169 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003170 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003171 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003172 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003173 dev_warn(smmu->dev,
3174 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3175 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003176
Will Deacon518f7132014-11-14 17:17:54 +00003177 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003178 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3179 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3180 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3181 return -ENODEV;
3182 }
3183 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
3184 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003185 /*
3186 * Cavium CN88xx erratum #27704.
3187 * Ensure ASID and VMID allocation is unique across all SMMUs in
3188 * the system.
3189 */
3190 if (smmu->model == CAVIUM_SMMUV2) {
3191 smmu->cavium_id_base =
3192 atomic_add_return(smmu->num_context_banks,
3193 &cavium_smmu_context_count);
3194 smmu->cavium_id_base -= smmu->num_context_banks;
3195 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003196
3197 /* ID2 */
3198 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3199 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003200 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003201
Will Deacon518f7132014-11-14 17:17:54 +00003202 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003203 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003204 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003205
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003206 if (id & ID2_VMID16)
3207 smmu->features |= ARM_SMMU_FEAT_VMID16;
3208
Robin Murphyf1d84542015-03-04 16:41:05 +00003209 /*
3210 * What the page table walker can address actually depends on which
3211 * descriptor format is in use, but since a) we don't know that yet,
3212 * and b) it can vary per context bank, this will have to do...
3213 */
3214 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3215 dev_warn(smmu->dev,
3216 "failed to set DMA mask for table walker\n");
3217
Robin Murphyb7862e32016-04-13 18:13:03 +01003218 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003219 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003220 if (smmu->version == ARM_SMMU_V1_64K)
3221 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003222 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003223 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003224 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003225 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003226 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003227 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003228 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003229 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003230 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003231 }
3232
Robin Murphy7602b872016-04-28 17:12:09 +01003233 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003234 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003235 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003236 if (smmu->features &
3237 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003238 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003239 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003240 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003241 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003242 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003243
Robin Murphyd5466352016-05-09 17:20:09 +01003244 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3245 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3246 else
3247 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
3248 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
3249 smmu->pgsize_bitmap);
3250
Will Deacon518f7132014-11-14 17:17:54 +00003251
Will Deacon28d60072014-09-01 16:24:48 +01003252 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
3253 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00003254 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003255
3256 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
3257 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00003258 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003259
Will Deacon45ae7cf2013-06-24 18:31:25 +01003260 return 0;
3261}
3262
Robin Murphy67b65a32016-04-13 18:12:57 +01003263struct arm_smmu_match_data {
3264 enum arm_smmu_arch_version version;
3265 enum arm_smmu_implementation model;
3266};
3267
3268#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
3269static struct arm_smmu_match_data name = { .version = ver, .model = imp }
3270
3271ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
3272ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01003273ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003274ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01003275ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003276ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01003277
Joerg Roedel09b52692014-10-02 12:24:45 +02003278static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003279 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3280 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3281 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003282 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003283 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003284 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003285 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01003286 { },
3287};
3288MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3289
Will Deacon45ae7cf2013-06-24 18:31:25 +01003290static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3291{
Robin Murphy09360402014-08-28 17:51:59 +01003292 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003293 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003294 struct resource *res;
3295 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003296 struct device *dev = &pdev->dev;
3297 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003298 struct of_phandle_iterator it;
3299 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003300 int num_irqs, i, err;
3301
3302 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3303 if (!smmu) {
3304 dev_err(dev, "failed to allocate arm_smmu_device\n");
3305 return -ENOMEM;
3306 }
3307 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003308 spin_lock_init(&smmu->atos_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003309 mutex_init(&smmu->power_lock);
Patrick Daly8befb662016-08-17 20:03:28 -07003310 spin_lock_init(&smmu->clock_refs_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003311 idr_init(&smmu->asid_idr);
3312 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003313
Robin Murphy09360402014-08-28 17:51:59 +01003314 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003315 data = of_id->data;
3316 smmu->version = data->version;
3317 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01003318
Will Deacon45ae7cf2013-06-24 18:31:25 +01003319 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003320 smmu->base = devm_ioremap_resource(dev, res);
3321 if (IS_ERR(smmu->base))
3322 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003323 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003324
3325 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3326 &smmu->num_global_irqs)) {
3327 dev_err(dev, "missing #global-interrupts property\n");
3328 return -ENODEV;
3329 }
3330
3331 num_irqs = 0;
3332 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3333 num_irqs++;
3334 if (num_irqs > smmu->num_global_irqs)
3335 smmu->num_context_irqs++;
3336 }
3337
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003338 if (!smmu->num_context_irqs) {
3339 dev_err(dev, "found %d interrupts but expected at least %d\n",
3340 num_irqs, smmu->num_global_irqs + 1);
3341 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003342 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003343
3344 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3345 GFP_KERNEL);
3346 if (!smmu->irqs) {
3347 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3348 return -ENOMEM;
3349 }
3350
3351 for (i = 0; i < num_irqs; ++i) {
3352 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003353
Will Deacon45ae7cf2013-06-24 18:31:25 +01003354 if (irq < 0) {
3355 dev_err(dev, "failed to get irq index %d\n", i);
3356 return -ENODEV;
3357 }
3358 smmu->irqs[i] = irq;
3359 }
3360
Dhaval Patel031d7462015-05-09 14:47:29 -07003361 parse_driver_options(smmu);
3362
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003363 err = arm_smmu_init_clocks(smmu);
Olav Haugan3c8766d2014-08-22 17:12:32 -07003364 if (err)
3365 return err;
3366
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003367 err = arm_smmu_init_regulators(smmu);
3368 if (err)
3369 return err;
3370
Patrick Daly2764f952016-09-06 19:22:44 -07003371 err = arm_smmu_init_bus_scaling(pdev, smmu);
3372 if (err)
3373 return err;
3374
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003375 err = arm_smmu_power_on(smmu);
3376 if (err)
3377 return err;
3378
3379 err = arm_smmu_device_cfg_probe(smmu);
3380 if (err)
3381 goto out_power_off;
3382
Will Deacon45ae7cf2013-06-24 18:31:25 +01003383 i = 0;
3384 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003385
3386 err = -ENOMEM;
3387 /* No need to zero the memory for masterspec */
3388 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
3389 if (!masterspec)
3390 goto out_put_masters;
3391
3392 of_for_each_phandle(&it, err, dev->of_node,
3393 "mmu-masters", "#stream-id-cells", 0) {
3394 int count = of_phandle_iterator_args(&it, masterspec->args,
3395 MAX_MASTER_STREAMIDS);
3396 masterspec->np = of_node_get(it.node);
3397 masterspec->args_count = count;
3398
3399 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003400 if (err) {
3401 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003402 masterspec->np->name);
3403 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003404 goto out_put_masters;
3405 }
3406
3407 i++;
3408 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003409
Will Deacon45ae7cf2013-06-24 18:31:25 +01003410 dev_notice(dev, "registered %d master devices\n", i);
3411
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003412 kfree(masterspec);
3413
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003414 err = arm_smmu_parse_impl_def_registers(smmu);
3415 if (err)
3416 goto out_put_masters;
3417
Robin Murphyb7862e32016-04-13 18:13:03 +01003418 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003419 smmu->num_context_banks != smmu->num_context_irqs) {
3420 dev_err(dev,
3421 "found only %d context interrupt(s) but %d required\n",
3422 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00003423 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01003424 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003425 }
3426
Will Deacon45ae7cf2013-06-24 18:31:25 +01003427 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003428 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3429 NULL, arm_smmu_global_fault,
3430 IRQF_ONESHOT | IRQF_SHARED,
3431 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003432 if (err) {
3433 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3434 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003435 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003436 }
3437 }
3438
3439 INIT_LIST_HEAD(&smmu->list);
3440 spin_lock(&arm_smmu_devices_lock);
3441 list_add(&smmu->list, &arm_smmu_devices);
3442 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003443
3444 arm_smmu_device_reset(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003445 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003446 return 0;
3447
Will Deacon45ae7cf2013-06-24 18:31:25 +01003448out_put_masters:
3449 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003450 struct arm_smmu_master *master
3451 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003452 of_node_put(master->of_node);
3453 }
3454
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003455out_power_off:
3456 arm_smmu_power_off(smmu);
3457
Will Deacon45ae7cf2013-06-24 18:31:25 +01003458 return err;
3459}
3460
3461static int arm_smmu_device_remove(struct platform_device *pdev)
3462{
3463 int i;
3464 struct device *dev = &pdev->dev;
3465 struct arm_smmu_device *curr, *smmu = NULL;
3466 struct rb_node *node;
3467
3468 spin_lock(&arm_smmu_devices_lock);
3469 list_for_each_entry(curr, &arm_smmu_devices, list) {
3470 if (curr->dev == dev) {
3471 smmu = curr;
3472 list_del(&smmu->list);
3473 break;
3474 }
3475 }
3476 spin_unlock(&arm_smmu_devices_lock);
3477
3478 if (!smmu)
3479 return -ENODEV;
3480
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003481 if (arm_smmu_power_on(smmu))
3482 return -EINVAL;
3483
Will Deacon45ae7cf2013-06-24 18:31:25 +01003484 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003485 struct arm_smmu_master *master
3486 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003487 of_node_put(master->of_node);
3488 }
3489
Will Deaconecfadb62013-07-31 19:21:28 +01003490 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003491 dev_err(dev, "removing device with active domains!\n");
3492
3493 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003494 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003495
Patrick Dalyc190d932016-08-30 17:23:28 -07003496 idr_destroy(&smmu->asid_idr);
3497
Will Deacon45ae7cf2013-06-24 18:31:25 +01003498 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003499 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003500 arm_smmu_power_off(smmu);
3501
Patrick Daly2764f952016-09-06 19:22:44 -07003502 msm_bus_scale_unregister(smmu->bus_client);
3503
Will Deacon45ae7cf2013-06-24 18:31:25 +01003504 return 0;
3505}
3506
Will Deacon45ae7cf2013-06-24 18:31:25 +01003507static struct platform_driver arm_smmu_driver = {
3508 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003509 .name = "arm-smmu",
3510 .of_match_table = of_match_ptr(arm_smmu_of_match),
3511 },
3512 .probe = arm_smmu_device_dt_probe,
3513 .remove = arm_smmu_device_remove,
3514};
3515
3516static int __init arm_smmu_init(void)
3517{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003518 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003519 int ret;
3520
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003521 /*
3522 * Play nice with systems that don't have an ARM SMMU by checking that
3523 * an ARM SMMU exists in the system before proceeding with the driver
3524 * and IOMMU bus operation registration.
3525 */
3526 np = of_find_matching_node(NULL, arm_smmu_of_match);
3527 if (!np)
3528 return 0;
3529
3530 of_node_put(np);
3531
Will Deacon45ae7cf2013-06-24 18:31:25 +01003532 ret = platform_driver_register(&arm_smmu_driver);
3533 if (ret)
3534 return ret;
3535
3536 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003537 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003538 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3539
Will Deacond123cf82014-02-04 22:17:53 +00003540#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003541 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003542 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003543#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003544
Will Deacona9a1b0b2014-05-01 18:05:08 +01003545#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003546 if (!iommu_present(&pci_bus_type)) {
3547 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003548 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003549 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003550#endif
3551
Will Deacon45ae7cf2013-06-24 18:31:25 +01003552 return 0;
3553}
3554
3555static void __exit arm_smmu_exit(void)
3556{
3557 return platform_driver_unregister(&arm_smmu_driver);
3558}
3559
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003560subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003561module_exit(arm_smmu_exit);
3562
3563MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
3564MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
3565MODULE_LICENSE("GPL v2");