blob: b261f73290a75ed3756d353e6f1e1089157dba51 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100222#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100224#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100227#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVAL 0x620
229#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
230#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100231#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000232#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define SCTLR_S1_ASIDPNE (1 << 12)
235#define SCTLR_CFCFG (1 << 7)
236#define SCTLR_CFIE (1 << 6)
237#define SCTLR_CFRE (1 << 5)
238#define SCTLR_E (1 << 4)
239#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245
Peng Fan3ca37122016-05-03 21:50:30 +0800246#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
247
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700248/* Definitions for implementation-defined registers */
249#define ACTLR_QCOM_OSH_SHIFT 28
250#define ACTLR_QCOM_OSH 1
251
252#define ACTLR_QCOM_ISH_SHIFT 29
253#define ACTLR_QCOM_ISH 1
254
255#define ACTLR_QCOM_NSH_SHIFT 30
256#define ACTLR_QCOM_NSH 1
257
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700258#define ARM_SMMU_IMPL_DEF0(smmu) \
259 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
260#define ARM_SMMU_IMPL_DEF1(smmu) \
261 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
262#define IMPL_DEF1_MICRO_MMU_CTRL 0
263#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
264#define MICRO_MMU_CTRL_IDLE (1 << 3)
265
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000300static bool disable_bypass;
301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100316};
317
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700318struct arm_smmu_impl_def_reg {
319 u32 offset;
320 u32 value;
321};
322
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323struct arm_smmu_smr {
324 u8 idx;
325 u16 mask;
326 u16 id;
327};
328
Will Deacona9a1b0b2014-05-01 18:05:08 +0100329struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330 int num_streamids;
331 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332 struct arm_smmu_smr *smrs;
333};
334
Will Deacona9a1b0b2014-05-01 18:05:08 +0100335struct arm_smmu_master {
336 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337 struct rb_node node;
338 struct arm_smmu_master_cfg cfg;
339};
340
Will Deacon45ae7cf2013-06-24 18:31:25 +0100341struct arm_smmu_device {
342 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100343
344 void __iomem *base;
345 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100346 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347
348#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
349#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
350#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
351#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
352#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000353#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800354#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100355#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
356#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
357#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
358#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
359#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100360 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000361
362#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800363#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000364 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100365 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100366 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367
368 u32 num_context_banks;
369 u32 num_s2_context_banks;
370 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
371 atomic_t irptndx;
372
373 u32 num_mapping_groups;
374 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
375
Will Deacon518f7132014-11-14 17:17:54 +0000376 unsigned long va_size;
377 unsigned long ipa_size;
378 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100379 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100380
381 u32 num_global_irqs;
382 u32 num_context_irqs;
383 unsigned int *irqs;
384
Will Deacon45ae7cf2013-06-24 18:31:25 +0100385 struct list_head list;
386 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800387
388 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700389 /* Specific to QCOM */
390 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
391 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800392
393 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394};
395
Robin Murphy7602b872016-04-28 17:12:09 +0100396enum arm_smmu_context_fmt {
397 ARM_SMMU_CTX_FMT_NONE,
398 ARM_SMMU_CTX_FMT_AARCH64,
399 ARM_SMMU_CTX_FMT_AARCH32_L,
400 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100401};
402
403struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404 u8 cbndx;
405 u8 irptndx;
406 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100407 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100409#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100410
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800411#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
412#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100413
Will Deaconc752ce42014-06-25 22:46:31 +0100414enum arm_smmu_domain_stage {
415 ARM_SMMU_DOMAIN_S1 = 0,
416 ARM_SMMU_DOMAIN_S2,
417 ARM_SMMU_DOMAIN_NESTED,
418};
419
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100421 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000422 struct io_pgtable_ops *pgtbl_ops;
423 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100424 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100425 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000426 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100427 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100428};
429
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200430struct arm_smmu_phandle_args {
431 struct device_node *np;
432 int args_count;
433 uint32_t args[MAX_MASTER_STREAMIDS];
434};
435
Will Deacon45ae7cf2013-06-24 18:31:25 +0100436static DEFINE_SPINLOCK(arm_smmu_devices_lock);
437static LIST_HEAD(arm_smmu_devices);
438
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000439struct arm_smmu_option_prop {
440 u32 opt;
441 const char *prop;
442};
443
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800444static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
445
Mitchel Humpherys29073202014-07-08 09:52:18 -0700446static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000447 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800448 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000449 { 0, NULL},
450};
451
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800452static int arm_smmu_halt(struct arm_smmu_device *smmu);
453static void arm_smmu_resume(struct arm_smmu_device *smmu);
454
Joerg Roedel1d672632015-03-26 13:43:10 +0100455static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
456{
457 return container_of(dom, struct arm_smmu_domain, domain);
458}
459
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000460static void parse_driver_options(struct arm_smmu_device *smmu)
461{
462 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700463
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000464 do {
465 if (of_property_read_bool(smmu->dev->of_node,
466 arm_smmu_options[i].prop)) {
467 smmu->options |= arm_smmu_options[i].opt;
468 dev_notice(smmu->dev, "option %s\n",
469 arm_smmu_options[i].prop);
470 }
471 } while (arm_smmu_options[++i].opt);
472}
473
Will Deacon8f68f8e2014-07-15 11:27:08 +0100474static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100475{
476 if (dev_is_pci(dev)) {
477 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700478
Will Deacona9a1b0b2014-05-01 18:05:08 +0100479 while (!pci_is_root_bus(bus))
480 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100481 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100482 }
483
Will Deacon8f68f8e2014-07-15 11:27:08 +0100484 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100485}
486
Will Deacon45ae7cf2013-06-24 18:31:25 +0100487static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
488 struct device_node *dev_node)
489{
490 struct rb_node *node = smmu->masters.rb_node;
491
492 while (node) {
493 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700494
Will Deacon45ae7cf2013-06-24 18:31:25 +0100495 master = container_of(node, struct arm_smmu_master, node);
496
497 if (dev_node < master->of_node)
498 node = node->rb_left;
499 else if (dev_node > master->of_node)
500 node = node->rb_right;
501 else
502 return master;
503 }
504
505 return NULL;
506}
507
Will Deacona9a1b0b2014-05-01 18:05:08 +0100508static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100509find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100510{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100511 struct arm_smmu_master_cfg *cfg = NULL;
512 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100513
Will Deacon8f68f8e2014-07-15 11:27:08 +0100514 if (group) {
515 cfg = iommu_group_get_iommudata(group);
516 iommu_group_put(group);
517 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100518
Will Deacon8f68f8e2014-07-15 11:27:08 +0100519 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100520}
521
Will Deacon45ae7cf2013-06-24 18:31:25 +0100522static int insert_smmu_master(struct arm_smmu_device *smmu,
523 struct arm_smmu_master *master)
524{
525 struct rb_node **new, *parent;
526
527 new = &smmu->masters.rb_node;
528 parent = NULL;
529 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700530 struct arm_smmu_master *this
531 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100532
533 parent = *new;
534 if (master->of_node < this->of_node)
535 new = &((*new)->rb_left);
536 else if (master->of_node > this->of_node)
537 new = &((*new)->rb_right);
538 else
539 return -EEXIST;
540 }
541
542 rb_link_node(&master->node, parent, new);
543 rb_insert_color(&master->node, &smmu->masters);
544 return 0;
545}
546
547static int register_smmu_master(struct arm_smmu_device *smmu,
548 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200549 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100550{
551 int i;
552 struct arm_smmu_master *master;
553
554 master = find_smmu_master(smmu, masterspec->np);
555 if (master) {
556 dev_err(dev,
557 "rejecting multiple registrations for master device %s\n",
558 masterspec->np->name);
559 return -EBUSY;
560 }
561
562 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
563 dev_err(dev,
564 "reached maximum number (%d) of stream IDs for master device %s\n",
565 MAX_MASTER_STREAMIDS, masterspec->np->name);
566 return -ENOSPC;
567 }
568
569 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
570 if (!master)
571 return -ENOMEM;
572
Will Deacona9a1b0b2014-05-01 18:05:08 +0100573 master->of_node = masterspec->np;
574 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100575
Olav Haugan3c8766d2014-08-22 17:12:32 -0700576 for (i = 0; i < master->cfg.num_streamids; ++i) {
577 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100578
Olav Haugan3c8766d2014-08-22 17:12:32 -0700579 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
580 (streamid >= smmu->num_mapping_groups)) {
581 dev_err(dev,
582 "stream ID for master device %s greater than maximum allowed (%d)\n",
583 masterspec->np->name, smmu->num_mapping_groups);
584 return -ERANGE;
585 }
586 master->cfg.streamids[i] = streamid;
587 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588 return insert_smmu_master(smmu, master);
589}
590
Will Deacon44680ee2014-06-25 11:29:12 +0100591static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100592{
Will Deacon44680ee2014-06-25 11:29:12 +0100593 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100594 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100595 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100596
597 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100598 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100599 master = find_smmu_master(smmu, dev_node);
600 if (master)
601 break;
602 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100603 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100604
Will Deacona9a1b0b2014-05-01 18:05:08 +0100605 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100606}
607
608static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
609{
610 int idx;
611
612 do {
613 idx = find_next_zero_bit(map, end, start);
614 if (idx == end)
615 return -ENOSPC;
616 } while (test_and_set_bit(idx, map));
617
618 return idx;
619}
620
621static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
622{
623 clear_bit(idx, map);
624}
625
626/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000627static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100628{
629 int count = 0;
630 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
631
632 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
633 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
634 & sTLBGSTATUS_GSACTIVE) {
635 cpu_relax();
636 if (++count == TLB_LOOP_TIMEOUT) {
637 dev_err_ratelimited(smmu->dev,
638 "TLB sync timed out -- SMMU may be deadlocked\n");
639 return;
640 }
641 udelay(1);
642 }
643}
644
Will Deacon518f7132014-11-14 17:17:54 +0000645static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100646{
Will Deacon518f7132014-11-14 17:17:54 +0000647 struct arm_smmu_domain *smmu_domain = cookie;
648 __arm_smmu_tlb_sync(smmu_domain->smmu);
649}
650
651static void arm_smmu_tlb_inv_context(void *cookie)
652{
653 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100654 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
655 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100656 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000657 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100658
659 if (stage1) {
660 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800661 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100662 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100663 } else {
664 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800665 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100666 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100667 }
668
Will Deacon518f7132014-11-14 17:17:54 +0000669 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100670}
671
Will Deacon518f7132014-11-14 17:17:54 +0000672static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000673 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000674{
675 struct arm_smmu_domain *smmu_domain = cookie;
676 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
677 struct arm_smmu_device *smmu = smmu_domain->smmu;
678 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
679 void __iomem *reg;
680
681 if (stage1) {
682 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
683 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
684
Robin Murphy7602b872016-04-28 17:12:09 +0100685 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000686 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800687 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000688 do {
689 writel_relaxed(iova, reg);
690 iova += granule;
691 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000692 } else {
693 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800694 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000695 do {
696 writeq_relaxed(iova, reg);
697 iova += granule >> 12;
698 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000699 }
Will Deacon518f7132014-11-14 17:17:54 +0000700 } else if (smmu->version == ARM_SMMU_V2) {
701 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
702 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
703 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000704 iova >>= 12;
705 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100706 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000707 iova += granule >> 12;
708 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000709 } else {
710 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800711 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000712 }
713}
714
Will Deacon518f7132014-11-14 17:17:54 +0000715static struct iommu_gather_ops arm_smmu_gather_ops = {
716 .tlb_flush_all = arm_smmu_tlb_inv_context,
717 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
718 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000719};
720
Will Deacon45ae7cf2013-06-24 18:31:25 +0100721static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
722{
Patrick Daly5ba28112016-08-30 19:18:52 -0700723 int flags, ret;
724 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725 unsigned long iova;
726 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100727 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100728 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
729 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730 void __iomem *cb_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800731 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100732
Will Deacon44680ee2014-06-25 11:29:12 +0100733 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
735
736 if (!(fsr & FSR_FAULT))
737 return IRQ_NONE;
738
Patrick Daly5ba28112016-08-30 19:18:52 -0700739 if (fsr & FSR_IGN)
740 dev_err_ratelimited(smmu->dev,
741 "Unexpected context fault (fsr 0x%x)\n",
742 fsr);
743
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800744 if (fatal_asf && (fsr & FSR_ASF)) {
745 dev_err(smmu->dev,
746 "Took an address size fault. Refusing to recover.\n");
747 BUG();
748 }
749
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -0700751 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
752
Robin Murphyf9a05f02016-04-13 18:13:01 +0100753 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700754 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
755 ret = IRQ_HANDLED;
756 resume = RESUME_RETRY;
757 } else {
758 dev_err_ratelimited(smmu->dev,
759 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
760 iova, fsynr, cfg->cbndx);
761 ret = IRQ_NONE;
762 resume = RESUME_TERMINATE;
763 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100764
Patrick Daly5ba28112016-08-30 19:18:52 -0700765 /* Clear the faulting FSR */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100766 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700767
768 /* Retry or terminate any stalled transactions */
769 if (fsr & FSR_SS)
770 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
771
772 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100773}
774
775static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
776{
777 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
778 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000779 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780
781 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
782 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
783 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
784 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
785
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000786 if (!gfsr)
787 return IRQ_NONE;
788
Will Deacon45ae7cf2013-06-24 18:31:25 +0100789 dev_err_ratelimited(smmu->dev,
790 "Unexpected global fault, this could be serious\n");
791 dev_err_ratelimited(smmu->dev,
792 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
793 gfsr, gfsynr0, gfsynr1, gfsynr2);
794
795 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100796 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797}
798
Will Deacon518f7132014-11-14 17:17:54 +0000799static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
800 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100801{
802 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100803 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100805 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
806 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100807 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808
Will Deacon45ae7cf2013-06-24 18:31:25 +0100809 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100810 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
811 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100812
Will Deacon4a1c93c2015-03-04 12:21:03 +0000813 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100814 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
815 reg = CBA2R_RW64_64BIT;
816 else
817 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800818 /* 16-bit VMIDs live in CBA2R */
819 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800820 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800821
Will Deacon4a1c93c2015-03-04 12:21:03 +0000822 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
823 }
824
Will Deacon45ae7cf2013-06-24 18:31:25 +0100825 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100826 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100827 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700828 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829
Will Deacon57ca90f2014-02-06 14:59:05 +0000830 /*
831 * Use the weakest shareability/memory types, so they are
832 * overridden by the ttbcr/pte.
833 */
834 if (stage1) {
835 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
836 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800837 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
838 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800839 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000840 }
Will Deacon44680ee2014-06-25 11:29:12 +0100841 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100842
Will Deacon518f7132014-11-14 17:17:54 +0000843 /* TTBRs */
844 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100845 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800847 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100848 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100849
850 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800851 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100852 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000853 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100854 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100855 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000856 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100857
Will Deacon518f7132014-11-14 17:17:54 +0000858 /* TTBCR */
859 if (stage1) {
860 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
861 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
862 if (smmu->version > ARM_SMMU_V1) {
863 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100864 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000865 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100866 }
867 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000868 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
869 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100870 }
871
Will Deacon518f7132014-11-14 17:17:54 +0000872 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100873 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000874 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100875 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000876 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
877 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100878 }
879
Will Deacon45ae7cf2013-06-24 18:31:25 +0100880 /* SCTLR */
Patrick Daly5ba28112016-08-30 19:18:52 -0700881 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100882 if (stage1)
883 reg |= SCTLR_S1_ASIDPNE;
884#ifdef __BIG_ENDIAN
885 reg |= SCTLR_E;
886#endif
Will Deacon25724842013-08-21 13:49:53 +0100887 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100888}
889
890static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100891 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100892{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100893 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000894 unsigned long ias, oas;
895 struct io_pgtable_ops *pgtbl_ops;
896 struct io_pgtable_cfg pgtbl_cfg;
897 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100898 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100899 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100900
Will Deacon518f7132014-11-14 17:17:54 +0000901 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100902 if (smmu_domain->smmu)
903 goto out_unlock;
904
Robin Murphy98006992016-04-20 14:53:33 +0100905 /* We're bypassing these SIDs, so don't allocate an actual context */
906 if (domain->type == IOMMU_DOMAIN_DMA) {
907 smmu_domain->smmu = smmu;
908 goto out_unlock;
909 }
910
Will Deaconc752ce42014-06-25 22:46:31 +0100911 /*
912 * Mapping the requested stage onto what we support is surprisingly
913 * complicated, mainly because the spec allows S1+S2 SMMUs without
914 * support for nested translation. That means we end up with the
915 * following table:
916 *
917 * Requested Supported Actual
918 * S1 N S1
919 * S1 S1+S2 S1
920 * S1 S2 S2
921 * S1 S1 S1
922 * N N N
923 * N S1+S2 S2
924 * N S2 S2
925 * N S1 S1
926 *
927 * Note that you can't actually request stage-2 mappings.
928 */
929 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
930 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
931 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
932 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
933
Robin Murphy7602b872016-04-28 17:12:09 +0100934 /*
935 * Choosing a suitable context format is even more fiddly. Until we
936 * grow some way for the caller to express a preference, and/or move
937 * the decision into the io-pgtable code where it arguably belongs,
938 * just aim for the closest thing to the rest of the system, and hope
939 * that the hardware isn't esoteric enough that we can't assume AArch64
940 * support to be a superset of AArch32 support...
941 */
942 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
943 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
944 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
945 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
946 ARM_SMMU_FEAT_FMT_AARCH64_16K |
947 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
948 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
949
950 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
951 ret = -EINVAL;
952 goto out_unlock;
953 }
954
Will Deaconc752ce42014-06-25 22:46:31 +0100955 switch (smmu_domain->stage) {
956 case ARM_SMMU_DOMAIN_S1:
957 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
958 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000959 ias = smmu->va_size;
960 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100961 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000962 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100963 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000964 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100965 ias = min(ias, 32UL);
966 oas = min(oas, 40UL);
967 }
Will Deaconc752ce42014-06-25 22:46:31 +0100968 break;
969 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100970 /*
971 * We will likely want to change this if/when KVM gets
972 * involved.
973 */
Will Deaconc752ce42014-06-25 22:46:31 +0100974 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100975 cfg->cbar = CBAR_TYPE_S2_TRANS;
976 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000977 ias = smmu->ipa_size;
978 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100979 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000980 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100981 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000982 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100983 ias = min(ias, 40UL);
984 oas = min(oas, 40UL);
985 }
Will Deaconc752ce42014-06-25 22:46:31 +0100986 break;
987 default:
988 ret = -EINVAL;
989 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100990 }
991
992 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
993 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200994 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100995 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100996
Will Deacon44680ee2014-06-25 11:29:12 +0100997 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100998 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100999 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1000 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001001 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001002 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001003 }
1004
Will Deacon518f7132014-11-14 17:17:54 +00001005 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001006 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001007 .ias = ias,
1008 .oas = oas,
1009 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001010 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001011 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001012
Will Deacon518f7132014-11-14 17:17:54 +00001013 smmu_domain->smmu = smmu;
1014 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1015 if (!pgtbl_ops) {
1016 ret = -ENOMEM;
1017 goto out_clear_smmu;
1018 }
1019
Robin Murphyd5466352016-05-09 17:20:09 +01001020 /* Update the domain's page sizes to reflect the page table format */
1021 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001022
1023 /* Initialise the context bank with our page table cfg */
1024 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1025
1026 /*
1027 * Request context fault interrupt. Do this last to avoid the
1028 * handler seeing a half-initialised domain state.
1029 */
Will Deacon44680ee2014-06-25 11:29:12 +01001030 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001031 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
1032 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1033 "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001034 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001035 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001036 cfg->irptndx, irq);
1037 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001038 }
1039
Will Deacon518f7132014-11-14 17:17:54 +00001040 mutex_unlock(&smmu_domain->init_mutex);
1041
1042 /* Publish page table ops for map/unmap */
1043 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001044 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001045
Will Deacon518f7132014-11-14 17:17:54 +00001046out_clear_smmu:
1047 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001048out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001049 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001050 return ret;
1051}
1052
1053static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1054{
Joerg Roedel1d672632015-03-26 13:43:10 +01001055 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001056 struct arm_smmu_device *smmu = smmu_domain->smmu;
1057 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001058 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001059 int irq;
1060
Robin Murphy98006992016-04-20 14:53:33 +01001061 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001062 return;
1063
Will Deacon518f7132014-11-14 17:17:54 +00001064 /*
1065 * Disable the context bank and free the page tables before freeing
1066 * it.
1067 */
Will Deacon44680ee2014-06-25 11:29:12 +01001068 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001069 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001070
Will Deacon44680ee2014-06-25 11:29:12 +01001071 if (cfg->irptndx != INVALID_IRPTNDX) {
1072 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001073 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001074 }
1075
Markus Elfring44830b02015-11-06 18:32:41 +01001076 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001077 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078}
1079
Joerg Roedel1d672632015-03-26 13:43:10 +01001080static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001081{
1082 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001083
Robin Murphy9adb9592016-01-26 18:06:36 +00001084 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001085 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001086 /*
1087 * Allocate the domain and initialise some of its data structures.
1088 * We can't really do anything meaningful until we've added a
1089 * master.
1090 */
1091 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1092 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001093 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001094
Robin Murphy9adb9592016-01-26 18:06:36 +00001095 if (type == IOMMU_DOMAIN_DMA &&
1096 iommu_get_dma_cookie(&smmu_domain->domain)) {
1097 kfree(smmu_domain);
1098 return NULL;
1099 }
1100
Will Deacon518f7132014-11-14 17:17:54 +00001101 mutex_init(&smmu_domain->init_mutex);
1102 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001103
1104 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105}
1106
Joerg Roedel1d672632015-03-26 13:43:10 +01001107static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108{
Joerg Roedel1d672632015-03-26 13:43:10 +01001109 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001110
1111 /*
1112 * Free the domain resources. We assume that all devices have
1113 * already been detached.
1114 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001115 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001117 kfree(smmu_domain);
1118}
1119
1120static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001121 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122{
1123 int i;
1124 struct arm_smmu_smr *smrs;
1125 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1126
1127 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1128 return 0;
1129
Will Deacona9a1b0b2014-05-01 18:05:08 +01001130 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131 return -EEXIST;
1132
Mitchel Humpherys29073202014-07-08 09:52:18 -07001133 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001134 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001135 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1136 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001137 return -ENOMEM;
1138 }
1139
Will Deacon44680ee2014-06-25 11:29:12 +01001140 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001141 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001142 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1143 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001144 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001145 dev_err(smmu->dev, "failed to allocate free SMR\n");
1146 goto err_free_smrs;
1147 }
1148
1149 smrs[i] = (struct arm_smmu_smr) {
1150 .idx = idx,
1151 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001152 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001153 };
1154 }
1155
1156 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001157 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001158 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1159 smrs[i].mask << SMR_MASK_SHIFT;
1160 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1161 }
1162
Will Deacona9a1b0b2014-05-01 18:05:08 +01001163 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164 return 0;
1165
1166err_free_smrs:
1167 while (--i >= 0)
1168 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1169 kfree(smrs);
1170 return -ENOSPC;
1171}
1172
1173static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001174 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175{
1176 int i;
1177 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001178 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179
Will Deacon43b412b2014-07-15 11:22:24 +01001180 if (!smrs)
1181 return;
1182
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001184 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001186
Will Deacon45ae7cf2013-06-24 18:31:25 +01001187 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1188 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1189 }
1190
Will Deacona9a1b0b2014-05-01 18:05:08 +01001191 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 kfree(smrs);
1193}
1194
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001196 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197{
1198 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001199 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1201
Will Deacon5f634952016-04-20 14:53:32 +01001202 /*
1203 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1204 * for all devices behind the SMMU. Note that we need to take
1205 * care configuring SMRs for devices both a platform_device and
1206 * and a PCI device (i.e. a PCI host controller)
1207 */
1208 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1209 return 0;
1210
Will Deacon8f68f8e2014-07-15 11:27:08 +01001211 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001212 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001213 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001214 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001215
Will Deacona9a1b0b2014-05-01 18:05:08 +01001216 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001218
Will Deacona9a1b0b2014-05-01 18:05:08 +01001219 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001220 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001221 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001222 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1223 }
1224
1225 return 0;
1226}
1227
1228static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001229 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230{
Will Deacon43b412b2014-07-15 11:22:24 +01001231 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001232 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001233 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234
Will Deacon8f68f8e2014-07-15 11:27:08 +01001235 /* An IOMMU group is torn down by the first device to be removed */
1236 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1237 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238
1239 /*
1240 * We *must* clear the S2CR first, because freeing the SMR means
1241 * that it can be re-allocated immediately.
1242 */
Will Deacon43b412b2014-07-15 11:22:24 +01001243 for (i = 0; i < cfg->num_streamids; ++i) {
1244 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001245 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001246
Robin Murphy25a1c962016-02-10 14:25:33 +00001247 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001248 }
1249
Will Deacona9a1b0b2014-05-01 18:05:08 +01001250 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001251}
1252
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001253static void arm_smmu_detach_dev(struct device *dev,
1254 struct arm_smmu_master_cfg *cfg)
1255{
1256 struct iommu_domain *domain = dev->archdata.iommu;
1257 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1258
1259 dev->archdata.iommu = NULL;
1260 arm_smmu_domain_remove_master(smmu_domain, cfg);
1261}
1262
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1264{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001265 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001266 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001267 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001268 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269
Will Deacon8f68f8e2014-07-15 11:27:08 +01001270 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001271 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1273 return -ENXIO;
1274 }
1275
Will Deacon518f7132014-11-14 17:17:54 +00001276 /* Ensure that the domain is finalised */
1277 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001278 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001279 return ret;
1280
Will Deacon45ae7cf2013-06-24 18:31:25 +01001281 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001282 * Sanity check the domain. We don't support domains across
1283 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284 */
Will Deacon518f7132014-11-14 17:17:54 +00001285 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001286 dev_err(dev,
1287 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001288 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1289 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001291
1292 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001293 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001294 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001295 return -ENODEV;
1296
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001297 /* Detach the dev from its current domain */
1298 if (dev->archdata.iommu)
1299 arm_smmu_detach_dev(dev, cfg);
1300
Will Deacon844e35b2014-07-17 11:23:51 +01001301 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1302 if (!ret)
1303 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001304 return ret;
1305}
1306
Will Deacon45ae7cf2013-06-24 18:31:25 +01001307static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001308 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309{
Will Deacon518f7132014-11-14 17:17:54 +00001310 int ret;
1311 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001312 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001313 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001314
Will Deacon518f7132014-11-14 17:17:54 +00001315 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001316 return -ENODEV;
1317
Will Deacon518f7132014-11-14 17:17:54 +00001318 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1319 ret = ops->map(ops, iova, paddr, size, prot);
1320 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1321 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322}
1323
1324static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1325 size_t size)
1326{
Will Deacon518f7132014-11-14 17:17:54 +00001327 size_t ret;
1328 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001329 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001330 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331
Will Deacon518f7132014-11-14 17:17:54 +00001332 if (!ops)
1333 return 0;
1334
1335 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1336 ret = ops->unmap(ops, iova, size);
1337 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1338 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339}
1340
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001341static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001342 dma_addr_t iova)
1343{
Joerg Roedel1d672632015-03-26 13:43:10 +01001344 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001345 struct arm_smmu_device *smmu = smmu_domain->smmu;
1346 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1347 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1348 struct device *dev = smmu->dev;
1349 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001350 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001351 u32 tmp;
1352 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001353 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001354
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001355 spin_lock_irqsave(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001356 if (arm_smmu_halt(smmu)) {
1357 phys = 0;
1358 goto out_unlock;
1359 }
1360
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001361 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1362
Robin Murphy661d9622015-05-27 17:09:34 +01001363 /* ATS1 registers can only be written atomically */
1364 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001365 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001366 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1367 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001368 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001369
1370 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1371 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001372 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001373 dev_err(dev,
1374 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1375 &iova, &phys);
1376 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001377 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001378 }
1379
Robin Murphyf9a05f02016-04-13 18:13:01 +01001380 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001381 if (phys & CB_PAR_F) {
1382 dev_err(dev, "translation fault!\n");
1383 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001384 phys = 0;
1385 } else {
1386 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001387 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001388out_resume:
1389 arm_smmu_resume(smmu);
1390out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001391 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001392 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001393}
1394
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001396 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397{
Will Deacon518f7132014-11-14 17:17:54 +00001398 phys_addr_t ret;
1399 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001400 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001401 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001402
Will Deacon518f7132014-11-14 17:17:54 +00001403 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001404 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001405
Will Deacon518f7132014-11-14 17:17:54 +00001406 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001407 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001408 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001409
Will Deacon518f7132014-11-14 17:17:54 +00001410 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411}
1412
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001413/*
1414 * This function can sleep, and cannot be called from atomic context. Will
1415 * power on register block if required. This restriction does not apply to the
1416 * original iova_to_phys() op.
1417 */
1418static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1419 dma_addr_t iova)
1420{
1421 phys_addr_t ret = 0;
1422 unsigned long flags;
1423 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1424
1425 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1426 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1427 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1428 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
1429
1430 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1431
1432 return ret;
1433}
1434
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001435static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436{
Will Deacond0948942014-06-24 17:30:10 +01001437 switch (cap) {
1438 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001439 /*
1440 * Return true here as the SMMU can always send out coherent
1441 * requests.
1442 */
1443 return true;
Will Deacond0948942014-06-24 17:30:10 +01001444 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001445 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001446 case IOMMU_CAP_NOEXEC:
1447 return true;
Will Deacond0948942014-06-24 17:30:10 +01001448 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001449 return false;
Will Deacond0948942014-06-24 17:30:10 +01001450 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001451}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001452
Will Deacona9a1b0b2014-05-01 18:05:08 +01001453static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1454{
1455 *((u16 *)data) = alias;
1456 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457}
1458
Will Deacon8f68f8e2014-07-15 11:27:08 +01001459static void __arm_smmu_release_pci_iommudata(void *data)
1460{
1461 kfree(data);
1462}
1463
Joerg Roedelaf659932015-10-21 23:51:41 +02001464static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1465 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466{
Will Deacon03edb222015-01-19 14:27:33 +00001467 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001468 u16 sid;
1469 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001470
Will Deacon03edb222015-01-19 14:27:33 +00001471 cfg = iommu_group_get_iommudata(group);
1472 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001473 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001474 if (!cfg)
1475 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001476
Will Deacon03edb222015-01-19 14:27:33 +00001477 iommu_group_set_iommudata(group, cfg,
1478 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001479 }
1480
Joerg Roedelaf659932015-10-21 23:51:41 +02001481 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1482 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001483
Will Deacon03edb222015-01-19 14:27:33 +00001484 /*
1485 * Assume Stream ID == Requester ID for now.
1486 * We need a way to describe the ID mappings in FDT.
1487 */
1488 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1489 for (i = 0; i < cfg->num_streamids; ++i)
1490 if (cfg->streamids[i] == sid)
1491 break;
1492
1493 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1494 if (i == cfg->num_streamids)
1495 cfg->streamids[cfg->num_streamids++] = sid;
1496
1497 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498}
1499
Joerg Roedelaf659932015-10-21 23:51:41 +02001500static int arm_smmu_init_platform_device(struct device *dev,
1501 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001502{
Will Deacon03edb222015-01-19 14:27:33 +00001503 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001504 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001505
1506 if (!smmu)
1507 return -ENODEV;
1508
1509 master = find_smmu_master(smmu, dev->of_node);
1510 if (!master)
1511 return -ENODEV;
1512
Will Deacon03edb222015-01-19 14:27:33 +00001513 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001514
1515 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001516}
1517
1518static int arm_smmu_add_device(struct device *dev)
1519{
Joerg Roedelaf659932015-10-21 23:51:41 +02001520 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001521
Joerg Roedelaf659932015-10-21 23:51:41 +02001522 group = iommu_group_get_for_dev(dev);
1523 if (IS_ERR(group))
1524 return PTR_ERR(group);
1525
Peng Fan9a4a9d82015-11-20 16:56:18 +08001526 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001527 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001528}
1529
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530static void arm_smmu_remove_device(struct device *dev)
1531{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001532 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001533}
1534
Joerg Roedelaf659932015-10-21 23:51:41 +02001535static struct iommu_group *arm_smmu_device_group(struct device *dev)
1536{
1537 struct iommu_group *group;
1538 int ret;
1539
1540 if (dev_is_pci(dev))
1541 group = pci_device_group(dev);
1542 else
1543 group = generic_device_group(dev);
1544
1545 if (IS_ERR(group))
1546 return group;
1547
1548 if (dev_is_pci(dev))
1549 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1550 else
1551 ret = arm_smmu_init_platform_device(dev, group);
1552
1553 if (ret) {
1554 iommu_group_put(group);
1555 group = ERR_PTR(ret);
1556 }
1557
1558 return group;
1559}
1560
Will Deaconc752ce42014-06-25 22:46:31 +01001561static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1562 enum iommu_attr attr, void *data)
1563{
Joerg Roedel1d672632015-03-26 13:43:10 +01001564 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001565
1566 switch (attr) {
1567 case DOMAIN_ATTR_NESTING:
1568 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1569 return 0;
1570 default:
1571 return -ENODEV;
1572 }
1573}
1574
1575static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1576 enum iommu_attr attr, void *data)
1577{
Will Deacon518f7132014-11-14 17:17:54 +00001578 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001579 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001580
Will Deacon518f7132014-11-14 17:17:54 +00001581 mutex_lock(&smmu_domain->init_mutex);
1582
Will Deaconc752ce42014-06-25 22:46:31 +01001583 switch (attr) {
1584 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001585 if (smmu_domain->smmu) {
1586 ret = -EPERM;
1587 goto out_unlock;
1588 }
1589
Will Deaconc752ce42014-06-25 22:46:31 +01001590 if (*(int *)data)
1591 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1592 else
1593 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1594
Will Deacon518f7132014-11-14 17:17:54 +00001595 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001596 default:
Will Deacon518f7132014-11-14 17:17:54 +00001597 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001598 }
Will Deacon518f7132014-11-14 17:17:54 +00001599
1600out_unlock:
1601 mutex_unlock(&smmu_domain->init_mutex);
1602 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001603}
1604
Will Deacon518f7132014-11-14 17:17:54 +00001605static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001606 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001607 .domain_alloc = arm_smmu_domain_alloc,
1608 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001609 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001610 .map = arm_smmu_map,
1611 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001612 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001613 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001614 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01001615 .add_device = arm_smmu_add_device,
1616 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001617 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001618 .domain_get_attr = arm_smmu_domain_get_attr,
1619 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001620 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001621};
1622
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001623static int arm_smmu_halt(struct arm_smmu_device *smmu)
1624{
1625 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001626 u32 reg, tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001627
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001628 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1629 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1630 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001631
1632 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1633 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1634 0, 30000)) {
1635 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1636 return -EBUSY;
1637 }
1638
1639 return 0;
1640}
1641
1642static void arm_smmu_resume(struct arm_smmu_device *smmu)
1643{
1644 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1645 u32 reg;
1646
1647 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1648 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1649 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1650}
1651
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001652static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1653{
1654 int i;
1655 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1656
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001657 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001658 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1659 writel_relaxed(regs[i].value,
1660 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001661 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001662}
1663
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1665{
1666 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001667 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001668 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001669 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001670
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001671 /* clear global FSR */
1672 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1673 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674
Robin Murphy25a1c962016-02-10 14:25:33 +00001675 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1676 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001678 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001679 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680 }
1681
Peng Fan3ca37122016-05-03 21:50:30 +08001682 /*
1683 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1684 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1685 * bit is only present in MMU-500r2 onwards.
1686 */
1687 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1688 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1689 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1690 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1691 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1692 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1693 }
1694
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001695 /* Make sure all context banks are disabled and clear CB_FSR */
1696 for (i = 0; i < smmu->num_context_banks; ++i) {
1697 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1698 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1699 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001700 /*
1701 * Disable MMU-500's not-particularly-beneficial next-page
1702 * prefetcher for the sake of errata #841119 and #826419.
1703 */
1704 if (smmu->model == ARM_MMU500) {
1705 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1706 reg &= ~ARM_MMU500_ACTLR_CPRE;
1707 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1708 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001709
1710 if (smmu->model == QCOM_SMMUV2) {
1711 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1712 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1713 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1714 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1715 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001716 }
Will Deacon1463fe42013-07-31 19:21:27 +01001717
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001718 /* Program implementation defined registers */
1719 arm_smmu_impl_def_programming(smmu);
1720
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001722 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1723 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1724
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001725 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001726
Will Deacon45ae7cf2013-06-24 18:31:25 +01001727 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001728 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001729
1730 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001731 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732
Robin Murphy25a1c962016-02-10 14:25:33 +00001733 /* Enable client access, handling unmatched streams as appropriate */
1734 reg &= ~sCR0_CLIENTPD;
1735 if (disable_bypass)
1736 reg |= sCR0_USFCFG;
1737 else
1738 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739
1740 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001741 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001742
1743 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001744 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001745
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001746 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1747 reg |= sCR0_VMID16EN;
1748
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001750 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001751 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752}
1753
1754static int arm_smmu_id_size_to_bits(int size)
1755{
1756 switch (size) {
1757 case 0:
1758 return 32;
1759 case 1:
1760 return 36;
1761 case 2:
1762 return 40;
1763 case 3:
1764 return 42;
1765 case 4:
1766 return 44;
1767 case 5:
1768 default:
1769 return 48;
1770 }
1771}
1772
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001773static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1774{
1775 struct device *dev = smmu->dev;
1776 int i, ntuples, ret;
1777 u32 *tuples;
1778 struct arm_smmu_impl_def_reg *regs, *regit;
1779
1780 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1781 return 0;
1782
1783 ntuples /= sizeof(u32);
1784 if (ntuples % 2) {
1785 dev_err(dev,
1786 "Invalid number of attach-impl-defs registers: %d\n",
1787 ntuples);
1788 return -EINVAL;
1789 }
1790
1791 regs = devm_kmalloc(
1792 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1793 GFP_KERNEL);
1794 if (!regs)
1795 return -ENOMEM;
1796
1797 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1798 if (!tuples)
1799 return -ENOMEM;
1800
1801 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1802 tuples, ntuples);
1803 if (ret)
1804 return ret;
1805
1806 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1807 regit->offset = tuples[i];
1808 regit->value = tuples[i + 1];
1809 }
1810
1811 devm_kfree(dev, tuples);
1812
1813 smmu->impl_def_attach_registers = regs;
1814 smmu->num_impl_def_attach_registers = ntuples / 2;
1815
1816 return 0;
1817}
1818
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1820{
1821 unsigned long size;
1822 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1823 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001824 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825
1826 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001827 dev_notice(smmu->dev, "SMMUv%d with:\n",
1828 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829
1830 /* ID0 */
1831 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001832
1833 /* Restrict available stages based on module parameter */
1834 if (force_stage == 1)
1835 id &= ~(ID0_S2TS | ID0_NTS);
1836 else if (force_stage == 2)
1837 id &= ~(ID0_S1TS | ID0_NTS);
1838
Will Deacon45ae7cf2013-06-24 18:31:25 +01001839 if (id & ID0_S1TS) {
1840 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1841 dev_notice(smmu->dev, "\tstage 1 translation\n");
1842 }
1843
1844 if (id & ID0_S2TS) {
1845 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1846 dev_notice(smmu->dev, "\tstage 2 translation\n");
1847 }
1848
1849 if (id & ID0_NTS) {
1850 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1851 dev_notice(smmu->dev, "\tnested translation\n");
1852 }
1853
1854 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001855 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856 dev_err(smmu->dev, "\tno translation support!\n");
1857 return -ENODEV;
1858 }
1859
Robin Murphyb7862e32016-04-13 18:13:03 +01001860 if ((id & ID0_S1TS) &&
1861 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001862 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1863 dev_notice(smmu->dev, "\taddress translation ops\n");
1864 }
1865
Robin Murphybae2c2d2015-07-29 19:46:05 +01001866 /*
1867 * In order for DMA API calls to work properly, we must defer to what
1868 * the DT says about coherency, regardless of what the hardware claims.
1869 * Fortunately, this also opens up a workaround for systems where the
1870 * ID register value has ended up configured incorrectly.
1871 */
1872 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1873 cttw_reg = !!(id & ID0_CTTW);
1874 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001876 if (cttw_dt || cttw_reg)
1877 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1878 cttw_dt ? "" : "non-");
1879 if (cttw_dt != cttw_reg)
1880 dev_notice(smmu->dev,
1881 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882
1883 if (id & ID0_SMS) {
1884 u32 smr, sid, mask;
1885
1886 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1887 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1888 ID0_NUMSMRG_MASK;
1889 if (smmu->num_mapping_groups == 0) {
1890 dev_err(smmu->dev,
1891 "stream-matching supported, but no SMRs present!\n");
1892 return -ENODEV;
1893 }
1894
1895 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1896 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1897 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1898 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1899
1900 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1901 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1902 if ((mask & sid) != sid) {
1903 dev_err(smmu->dev,
1904 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1905 mask, sid);
1906 return -ENODEV;
1907 }
1908
1909 dev_notice(smmu->dev,
1910 "\tstream matching with %u register groups, mask 0x%x",
1911 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001912 } else {
1913 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1914 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 }
1916
Robin Murphy7602b872016-04-28 17:12:09 +01001917 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1918 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1919 if (!(id & ID0_PTFS_NO_AARCH32S))
1920 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1921 }
1922
Will Deacon45ae7cf2013-06-24 18:31:25 +01001923 /* ID1 */
1924 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001925 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001926
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001927 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001928 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001929 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001930 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001931 dev_warn(smmu->dev,
1932 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1933 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934
Will Deacon518f7132014-11-14 17:17:54 +00001935 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001936 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1937 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1938 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1939 return -ENODEV;
1940 }
1941 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1942 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001943 /*
1944 * Cavium CN88xx erratum #27704.
1945 * Ensure ASID and VMID allocation is unique across all SMMUs in
1946 * the system.
1947 */
1948 if (smmu->model == CAVIUM_SMMUV2) {
1949 smmu->cavium_id_base =
1950 atomic_add_return(smmu->num_context_banks,
1951 &cavium_smmu_context_count);
1952 smmu->cavium_id_base -= smmu->num_context_banks;
1953 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954
1955 /* ID2 */
1956 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1957 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001958 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959
Will Deacon518f7132014-11-14 17:17:54 +00001960 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001962 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001964 if (id & ID2_VMID16)
1965 smmu->features |= ARM_SMMU_FEAT_VMID16;
1966
Robin Murphyf1d84542015-03-04 16:41:05 +00001967 /*
1968 * What the page table walker can address actually depends on which
1969 * descriptor format is in use, but since a) we don't know that yet,
1970 * and b) it can vary per context bank, this will have to do...
1971 */
1972 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1973 dev_warn(smmu->dev,
1974 "failed to set DMA mask for table walker\n");
1975
Robin Murphyb7862e32016-04-13 18:13:03 +01001976 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001977 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001978 if (smmu->version == ARM_SMMU_V1_64K)
1979 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001982 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001983 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001984 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001985 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001986 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001987 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001988 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 }
1990
Robin Murphy7602b872016-04-28 17:12:09 +01001991 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001992 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001993 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001994 if (smmu->features &
1995 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001996 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001997 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001998 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001999 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002000 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002001
Robin Murphyd5466352016-05-09 17:20:09 +01002002 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2003 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2004 else
2005 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2006 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2007 smmu->pgsize_bitmap);
2008
Will Deacon518f7132014-11-14 17:17:54 +00002009
Will Deacon28d60072014-09-01 16:24:48 +01002010 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2011 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002012 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002013
2014 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2015 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002016 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002017
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 return 0;
2019}
2020
Robin Murphy67b65a32016-04-13 18:12:57 +01002021struct arm_smmu_match_data {
2022 enum arm_smmu_arch_version version;
2023 enum arm_smmu_implementation model;
2024};
2025
2026#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2027static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2028
2029ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2030ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002031ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002032ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002033ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002034ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002035
Joerg Roedel09b52692014-10-02 12:24:45 +02002036static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002037 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2038 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2039 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002040 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002041 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002042 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002043 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002044 { },
2045};
2046MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2047
Will Deacon45ae7cf2013-06-24 18:31:25 +01002048static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2049{
Robin Murphy09360402014-08-28 17:51:59 +01002050 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002051 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002052 struct resource *res;
2053 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002054 struct device *dev = &pdev->dev;
2055 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002056 struct of_phandle_iterator it;
2057 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058 int num_irqs, i, err;
2059
2060 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2061 if (!smmu) {
2062 dev_err(dev, "failed to allocate arm_smmu_device\n");
2063 return -ENOMEM;
2064 }
2065 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002066 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002067
Robin Murphy09360402014-08-28 17:51:59 +01002068 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002069 data = of_id->data;
2070 smmu->version = data->version;
2071 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002072
Will Deacon45ae7cf2013-06-24 18:31:25 +01002073 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002074 smmu->base = devm_ioremap_resource(dev, res);
2075 if (IS_ERR(smmu->base))
2076 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002077 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002078
2079 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2080 &smmu->num_global_irqs)) {
2081 dev_err(dev, "missing #global-interrupts property\n");
2082 return -ENODEV;
2083 }
2084
2085 num_irqs = 0;
2086 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2087 num_irqs++;
2088 if (num_irqs > smmu->num_global_irqs)
2089 smmu->num_context_irqs++;
2090 }
2091
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002092 if (!smmu->num_context_irqs) {
2093 dev_err(dev, "found %d interrupts but expected at least %d\n",
2094 num_irqs, smmu->num_global_irqs + 1);
2095 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002096 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002097
2098 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2099 GFP_KERNEL);
2100 if (!smmu->irqs) {
2101 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2102 return -ENOMEM;
2103 }
2104
2105 for (i = 0; i < num_irqs; ++i) {
2106 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002107
Will Deacon45ae7cf2013-06-24 18:31:25 +01002108 if (irq < 0) {
2109 dev_err(dev, "failed to get irq index %d\n", i);
2110 return -ENODEV;
2111 }
2112 smmu->irqs[i] = irq;
2113 }
2114
Olav Haugan3c8766d2014-08-22 17:12:32 -07002115 err = arm_smmu_device_cfg_probe(smmu);
2116 if (err)
2117 return err;
2118
Will Deacon45ae7cf2013-06-24 18:31:25 +01002119 i = 0;
2120 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002121
2122 err = -ENOMEM;
2123 /* No need to zero the memory for masterspec */
2124 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2125 if (!masterspec)
2126 goto out_put_masters;
2127
2128 of_for_each_phandle(&it, err, dev->of_node,
2129 "mmu-masters", "#stream-id-cells", 0) {
2130 int count = of_phandle_iterator_args(&it, masterspec->args,
2131 MAX_MASTER_STREAMIDS);
2132 masterspec->np = of_node_get(it.node);
2133 masterspec->args_count = count;
2134
2135 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002136 if (err) {
2137 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002138 masterspec->np->name);
2139 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002140 goto out_put_masters;
2141 }
2142
2143 i++;
2144 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002145
Will Deacon45ae7cf2013-06-24 18:31:25 +01002146 dev_notice(dev, "registered %d master devices\n", i);
2147
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002148 kfree(masterspec);
2149
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002150 err = arm_smmu_parse_impl_def_registers(smmu);
2151 if (err)
2152 goto out_put_masters;
2153
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002154 parse_driver_options(smmu);
2155
Robin Murphyb7862e32016-04-13 18:13:03 +01002156 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002157 smmu->num_context_banks != smmu->num_context_irqs) {
2158 dev_err(dev,
2159 "found only %d context interrupt(s) but %d required\n",
2160 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002161 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002162 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002163 }
2164
Will Deacon45ae7cf2013-06-24 18:31:25 +01002165 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08002166 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
2167 NULL, arm_smmu_global_fault,
2168 IRQF_ONESHOT | IRQF_SHARED,
2169 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002170 if (err) {
2171 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2172 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002173 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174 }
2175 }
2176
2177 INIT_LIST_HEAD(&smmu->list);
2178 spin_lock(&arm_smmu_devices_lock);
2179 list_add(&smmu->list, &arm_smmu_devices);
2180 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002181
2182 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002183 return 0;
2184
Will Deacon45ae7cf2013-06-24 18:31:25 +01002185out_put_masters:
2186 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002187 struct arm_smmu_master *master
2188 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002189 of_node_put(master->of_node);
2190 }
2191
2192 return err;
2193}
2194
2195static int arm_smmu_device_remove(struct platform_device *pdev)
2196{
2197 int i;
2198 struct device *dev = &pdev->dev;
2199 struct arm_smmu_device *curr, *smmu = NULL;
2200 struct rb_node *node;
2201
2202 spin_lock(&arm_smmu_devices_lock);
2203 list_for_each_entry(curr, &arm_smmu_devices, list) {
2204 if (curr->dev == dev) {
2205 smmu = curr;
2206 list_del(&smmu->list);
2207 break;
2208 }
2209 }
2210 spin_unlock(&arm_smmu_devices_lock);
2211
2212 if (!smmu)
2213 return -ENODEV;
2214
Will Deacon45ae7cf2013-06-24 18:31:25 +01002215 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002216 struct arm_smmu_master *master
2217 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002218 of_node_put(master->of_node);
2219 }
2220
Will Deaconecfadb62013-07-31 19:21:28 +01002221 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002222 dev_err(dev, "removing device with active domains!\n");
2223
2224 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002225 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002226
2227 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002228 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002229 return 0;
2230}
2231
Will Deacon45ae7cf2013-06-24 18:31:25 +01002232static struct platform_driver arm_smmu_driver = {
2233 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002234 .name = "arm-smmu",
2235 .of_match_table = of_match_ptr(arm_smmu_of_match),
2236 },
2237 .probe = arm_smmu_device_dt_probe,
2238 .remove = arm_smmu_device_remove,
2239};
2240
2241static int __init arm_smmu_init(void)
2242{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002243 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002244 int ret;
2245
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002246 /*
2247 * Play nice with systems that don't have an ARM SMMU by checking that
2248 * an ARM SMMU exists in the system before proceeding with the driver
2249 * and IOMMU bus operation registration.
2250 */
2251 np = of_find_matching_node(NULL, arm_smmu_of_match);
2252 if (!np)
2253 return 0;
2254
2255 of_node_put(np);
2256
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257 ret = platform_driver_register(&arm_smmu_driver);
2258 if (ret)
2259 return ret;
2260
2261 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002262 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002263 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2264
Will Deacond123cf82014-02-04 22:17:53 +00002265#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002266 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002267 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002268#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002269
Will Deacona9a1b0b2014-05-01 18:05:08 +01002270#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002271 if (!iommu_present(&pci_bus_type)) {
2272 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002273 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002274 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002275#endif
2276
Will Deacon45ae7cf2013-06-24 18:31:25 +01002277 return 0;
2278}
2279
2280static void __exit arm_smmu_exit(void)
2281{
2282 return platform_driver_unregister(&arm_smmu_driver);
2283}
2284
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002285subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002286module_exit(arm_smmu_exit);
2287
2288MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2289MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2290MODULE_LICENSE("GPL v2");