blob: 472c9dd70e08c7dac14ebacb33ae2ebaa3e10081 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100222#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100224#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100227#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVAL 0x620
229#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
230#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100231#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000232#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define SCTLR_S1_ASIDPNE (1 << 12)
235#define SCTLR_CFCFG (1 << 7)
236#define SCTLR_CFIE (1 << 6)
237#define SCTLR_CFRE (1 << 5)
238#define SCTLR_E (1 << 4)
239#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245
Peng Fan3ca37122016-05-03 21:50:30 +0800246#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
247
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700248/* Definitions for implementation-defined registers */
249#define ACTLR_QCOM_OSH_SHIFT 28
250#define ACTLR_QCOM_OSH 1
251
252#define ACTLR_QCOM_ISH_SHIFT 29
253#define ACTLR_QCOM_ISH 1
254
255#define ACTLR_QCOM_NSH_SHIFT 30
256#define ACTLR_QCOM_NSH 1
257
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700258#define ARM_SMMU_IMPL_DEF0(smmu) \
259 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
260#define ARM_SMMU_IMPL_DEF1(smmu) \
261 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
262#define IMPL_DEF1_MICRO_MMU_CTRL 0
263#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
264#define MICRO_MMU_CTRL_IDLE (1 << 3)
265
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000300static bool disable_bypass;
301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100316};
317
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700318struct arm_smmu_impl_def_reg {
319 u32 offset;
320 u32 value;
321};
322
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323struct arm_smmu_smr {
324 u8 idx;
325 u16 mask;
326 u16 id;
327};
328
Will Deacona9a1b0b2014-05-01 18:05:08 +0100329struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330 int num_streamids;
331 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332 struct arm_smmu_smr *smrs;
333};
334
Will Deacona9a1b0b2014-05-01 18:05:08 +0100335struct arm_smmu_master {
336 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337 struct rb_node node;
338 struct arm_smmu_master_cfg cfg;
339};
340
Will Deacon45ae7cf2013-06-24 18:31:25 +0100341struct arm_smmu_device {
342 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100343
344 void __iomem *base;
345 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100346 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347
348#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
349#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
350#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
351#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
352#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000353#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800354#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100355#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
356#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
357#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
358#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
359#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100360 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000361
362#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
363 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100364 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100365 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366
367 u32 num_context_banks;
368 u32 num_s2_context_banks;
369 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
370 atomic_t irptndx;
371
372 u32 num_mapping_groups;
373 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
374
Will Deacon518f7132014-11-14 17:17:54 +0000375 unsigned long va_size;
376 unsigned long ipa_size;
377 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100378 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379
380 u32 num_global_irqs;
381 u32 num_context_irqs;
382 unsigned int *irqs;
383
Will Deacon45ae7cf2013-06-24 18:31:25 +0100384 struct list_head list;
385 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800386
387 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700388 /* Specific to QCOM */
389 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
390 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800391
392 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393};
394
Robin Murphy7602b872016-04-28 17:12:09 +0100395enum arm_smmu_context_fmt {
396 ARM_SMMU_CTX_FMT_NONE,
397 ARM_SMMU_CTX_FMT_AARCH64,
398 ARM_SMMU_CTX_FMT_AARCH32_L,
399 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100400};
401
402struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100403 u8 cbndx;
404 u8 irptndx;
405 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100406 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100408#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800410#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
411#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100412
Will Deaconc752ce42014-06-25 22:46:31 +0100413enum arm_smmu_domain_stage {
414 ARM_SMMU_DOMAIN_S1 = 0,
415 ARM_SMMU_DOMAIN_S2,
416 ARM_SMMU_DOMAIN_NESTED,
417};
418
Will Deacon45ae7cf2013-06-24 18:31:25 +0100419struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100420 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000421 struct io_pgtable_ops *pgtbl_ops;
422 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100423 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100424 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000425 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100426 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100427};
428
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200429struct arm_smmu_phandle_args {
430 struct device_node *np;
431 int args_count;
432 uint32_t args[MAX_MASTER_STREAMIDS];
433};
434
Will Deacon45ae7cf2013-06-24 18:31:25 +0100435static DEFINE_SPINLOCK(arm_smmu_devices_lock);
436static LIST_HEAD(arm_smmu_devices);
437
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000438struct arm_smmu_option_prop {
439 u32 opt;
440 const char *prop;
441};
442
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800443static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
444
Mitchel Humpherys29073202014-07-08 09:52:18 -0700445static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000446 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
447 { 0, NULL},
448};
449
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800450static int arm_smmu_halt(struct arm_smmu_device *smmu);
451static void arm_smmu_resume(struct arm_smmu_device *smmu);
452
Joerg Roedel1d672632015-03-26 13:43:10 +0100453static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
454{
455 return container_of(dom, struct arm_smmu_domain, domain);
456}
457
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000458static void parse_driver_options(struct arm_smmu_device *smmu)
459{
460 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700461
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000462 do {
463 if (of_property_read_bool(smmu->dev->of_node,
464 arm_smmu_options[i].prop)) {
465 smmu->options |= arm_smmu_options[i].opt;
466 dev_notice(smmu->dev, "option %s\n",
467 arm_smmu_options[i].prop);
468 }
469 } while (arm_smmu_options[++i].opt);
470}
471
Will Deacon8f68f8e2014-07-15 11:27:08 +0100472static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100473{
474 if (dev_is_pci(dev)) {
475 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700476
Will Deacona9a1b0b2014-05-01 18:05:08 +0100477 while (!pci_is_root_bus(bus))
478 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100479 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100480 }
481
Will Deacon8f68f8e2014-07-15 11:27:08 +0100482 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100483}
484
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
486 struct device_node *dev_node)
487{
488 struct rb_node *node = smmu->masters.rb_node;
489
490 while (node) {
491 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700492
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493 master = container_of(node, struct arm_smmu_master, node);
494
495 if (dev_node < master->of_node)
496 node = node->rb_left;
497 else if (dev_node > master->of_node)
498 node = node->rb_right;
499 else
500 return master;
501 }
502
503 return NULL;
504}
505
Will Deacona9a1b0b2014-05-01 18:05:08 +0100506static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100507find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100508{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100509 struct arm_smmu_master_cfg *cfg = NULL;
510 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100511
Will Deacon8f68f8e2014-07-15 11:27:08 +0100512 if (group) {
513 cfg = iommu_group_get_iommudata(group);
514 iommu_group_put(group);
515 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100516
Will Deacon8f68f8e2014-07-15 11:27:08 +0100517 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100518}
519
Will Deacon45ae7cf2013-06-24 18:31:25 +0100520static int insert_smmu_master(struct arm_smmu_device *smmu,
521 struct arm_smmu_master *master)
522{
523 struct rb_node **new, *parent;
524
525 new = &smmu->masters.rb_node;
526 parent = NULL;
527 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700528 struct arm_smmu_master *this
529 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100530
531 parent = *new;
532 if (master->of_node < this->of_node)
533 new = &((*new)->rb_left);
534 else if (master->of_node > this->of_node)
535 new = &((*new)->rb_right);
536 else
537 return -EEXIST;
538 }
539
540 rb_link_node(&master->node, parent, new);
541 rb_insert_color(&master->node, &smmu->masters);
542 return 0;
543}
544
545static int register_smmu_master(struct arm_smmu_device *smmu,
546 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200547 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100548{
549 int i;
550 struct arm_smmu_master *master;
551
552 master = find_smmu_master(smmu, masterspec->np);
553 if (master) {
554 dev_err(dev,
555 "rejecting multiple registrations for master device %s\n",
556 masterspec->np->name);
557 return -EBUSY;
558 }
559
560 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
561 dev_err(dev,
562 "reached maximum number (%d) of stream IDs for master device %s\n",
563 MAX_MASTER_STREAMIDS, masterspec->np->name);
564 return -ENOSPC;
565 }
566
567 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
568 if (!master)
569 return -ENOMEM;
570
Will Deacona9a1b0b2014-05-01 18:05:08 +0100571 master->of_node = masterspec->np;
572 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573
Olav Haugan3c8766d2014-08-22 17:12:32 -0700574 for (i = 0; i < master->cfg.num_streamids; ++i) {
575 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100576
Olav Haugan3c8766d2014-08-22 17:12:32 -0700577 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
578 (streamid >= smmu->num_mapping_groups)) {
579 dev_err(dev,
580 "stream ID for master device %s greater than maximum allowed (%d)\n",
581 masterspec->np->name, smmu->num_mapping_groups);
582 return -ERANGE;
583 }
584 master->cfg.streamids[i] = streamid;
585 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586 return insert_smmu_master(smmu, master);
587}
588
Will Deacon44680ee2014-06-25 11:29:12 +0100589static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100590{
Will Deacon44680ee2014-06-25 11:29:12 +0100591 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100592 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100593 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100594
595 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100596 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100597 master = find_smmu_master(smmu, dev_node);
598 if (master)
599 break;
600 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100601 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100602
Will Deacona9a1b0b2014-05-01 18:05:08 +0100603 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100604}
605
606static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
607{
608 int idx;
609
610 do {
611 idx = find_next_zero_bit(map, end, start);
612 if (idx == end)
613 return -ENOSPC;
614 } while (test_and_set_bit(idx, map));
615
616 return idx;
617}
618
619static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
620{
621 clear_bit(idx, map);
622}
623
624/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000625static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100626{
627 int count = 0;
628 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
629
630 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
631 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
632 & sTLBGSTATUS_GSACTIVE) {
633 cpu_relax();
634 if (++count == TLB_LOOP_TIMEOUT) {
635 dev_err_ratelimited(smmu->dev,
636 "TLB sync timed out -- SMMU may be deadlocked\n");
637 return;
638 }
639 udelay(1);
640 }
641}
642
Will Deacon518f7132014-11-14 17:17:54 +0000643static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100644{
Will Deacon518f7132014-11-14 17:17:54 +0000645 struct arm_smmu_domain *smmu_domain = cookie;
646 __arm_smmu_tlb_sync(smmu_domain->smmu);
647}
648
649static void arm_smmu_tlb_inv_context(void *cookie)
650{
651 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100652 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
653 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100654 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000655 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100656
657 if (stage1) {
658 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800659 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100660 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100661 } else {
662 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800663 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100664 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100665 }
666
Will Deacon518f7132014-11-14 17:17:54 +0000667 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100668}
669
Will Deacon518f7132014-11-14 17:17:54 +0000670static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000671 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000672{
673 struct arm_smmu_domain *smmu_domain = cookie;
674 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
675 struct arm_smmu_device *smmu = smmu_domain->smmu;
676 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
677 void __iomem *reg;
678
679 if (stage1) {
680 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
681 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
682
Robin Murphy7602b872016-04-28 17:12:09 +0100683 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000684 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800685 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000686 do {
687 writel_relaxed(iova, reg);
688 iova += granule;
689 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000690 } else {
691 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800692 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000693 do {
694 writeq_relaxed(iova, reg);
695 iova += granule >> 12;
696 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000697 }
Will Deacon518f7132014-11-14 17:17:54 +0000698 } else if (smmu->version == ARM_SMMU_V2) {
699 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
700 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
701 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000702 iova >>= 12;
703 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100704 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000705 iova += granule >> 12;
706 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000707 } else {
708 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800709 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000710 }
711}
712
Will Deacon518f7132014-11-14 17:17:54 +0000713static struct iommu_gather_ops arm_smmu_gather_ops = {
714 .tlb_flush_all = arm_smmu_tlb_inv_context,
715 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
716 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000717};
718
Will Deacon45ae7cf2013-06-24 18:31:25 +0100719static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
720{
Will Deacon3714ce12016-08-05 19:49:45 +0100721 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100722 unsigned long iova;
723 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100724 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100725 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
726 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100727 void __iomem *cb_base;
728
Will Deacon44680ee2014-06-25 11:29:12 +0100729 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
731
732 if (!(fsr & FSR_FAULT))
733 return IRQ_NONE;
734
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100736 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100737
Will Deacon3714ce12016-08-05 19:49:45 +0100738 dev_err_ratelimited(smmu->dev,
739 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
740 fsr, iova, fsynr, cfg->cbndx);
741
Will Deacon45ae7cf2013-06-24 18:31:25 +0100742 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce12016-08-05 19:49:45 +0100743 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100744}
745
746static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
747{
748 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
749 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000750 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751
752 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
753 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
754 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
755 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
756
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000757 if (!gfsr)
758 return IRQ_NONE;
759
Will Deacon45ae7cf2013-06-24 18:31:25 +0100760 dev_err_ratelimited(smmu->dev,
761 "Unexpected global fault, this could be serious\n");
762 dev_err_ratelimited(smmu->dev,
763 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
764 gfsr, gfsynr0, gfsynr1, gfsynr2);
765
766 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100767 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768}
769
Will Deacon518f7132014-11-14 17:17:54 +0000770static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
771 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772{
773 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100774 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100775 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100776 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
777 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100778 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100779
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100781 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
782 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783
Will Deacon4a1c93c2015-03-04 12:21:03 +0000784 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100785 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
786 reg = CBA2R_RW64_64BIT;
787 else
788 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800789 /* 16-bit VMIDs live in CBA2R */
790 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800791 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800792
Will Deacon4a1c93c2015-03-04 12:21:03 +0000793 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
794 }
795
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100797 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100798 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700799 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800
Will Deacon57ca90f2014-02-06 14:59:05 +0000801 /*
802 * Use the weakest shareability/memory types, so they are
803 * overridden by the ttbcr/pte.
804 */
805 if (stage1) {
806 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
807 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800808 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
809 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800810 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000811 }
Will Deacon44680ee2014-06-25 11:29:12 +0100812 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100813
Will Deacon518f7132014-11-14 17:17:54 +0000814 /* TTBRs */
815 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100816 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100817
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800818 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100819 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100820
821 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800822 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100823 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000824 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100825 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100826 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000827 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100828
Will Deacon518f7132014-11-14 17:17:54 +0000829 /* TTBCR */
830 if (stage1) {
831 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
832 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
833 if (smmu->version > ARM_SMMU_V1) {
834 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100835 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000836 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100837 }
838 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000839 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
840 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100841 }
842
Will Deacon518f7132014-11-14 17:17:54 +0000843 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100844 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000845 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000847 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
848 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100849 }
850
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851 /* SCTLR */
Will Deacon3714ce12016-08-05 19:49:45 +0100852 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100853 if (stage1)
854 reg |= SCTLR_S1_ASIDPNE;
855#ifdef __BIG_ENDIAN
856 reg |= SCTLR_E;
857#endif
Will Deacon25724842013-08-21 13:49:53 +0100858 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100859}
860
861static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100862 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100863{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100864 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000865 unsigned long ias, oas;
866 struct io_pgtable_ops *pgtbl_ops;
867 struct io_pgtable_cfg pgtbl_cfg;
868 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100869 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100870 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100871
Will Deacon518f7132014-11-14 17:17:54 +0000872 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100873 if (smmu_domain->smmu)
874 goto out_unlock;
875
Robin Murphy98006992016-04-20 14:53:33 +0100876 /* We're bypassing these SIDs, so don't allocate an actual context */
877 if (domain->type == IOMMU_DOMAIN_DMA) {
878 smmu_domain->smmu = smmu;
879 goto out_unlock;
880 }
881
Will Deaconc752ce42014-06-25 22:46:31 +0100882 /*
883 * Mapping the requested stage onto what we support is surprisingly
884 * complicated, mainly because the spec allows S1+S2 SMMUs without
885 * support for nested translation. That means we end up with the
886 * following table:
887 *
888 * Requested Supported Actual
889 * S1 N S1
890 * S1 S1+S2 S1
891 * S1 S2 S2
892 * S1 S1 S1
893 * N N N
894 * N S1+S2 S2
895 * N S2 S2
896 * N S1 S1
897 *
898 * Note that you can't actually request stage-2 mappings.
899 */
900 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
901 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
902 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
903 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
904
Robin Murphy7602b872016-04-28 17:12:09 +0100905 /*
906 * Choosing a suitable context format is even more fiddly. Until we
907 * grow some way for the caller to express a preference, and/or move
908 * the decision into the io-pgtable code where it arguably belongs,
909 * just aim for the closest thing to the rest of the system, and hope
910 * that the hardware isn't esoteric enough that we can't assume AArch64
911 * support to be a superset of AArch32 support...
912 */
913 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
914 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
915 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
916 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
917 ARM_SMMU_FEAT_FMT_AARCH64_16K |
918 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
919 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
920
921 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
922 ret = -EINVAL;
923 goto out_unlock;
924 }
925
Will Deaconc752ce42014-06-25 22:46:31 +0100926 switch (smmu_domain->stage) {
927 case ARM_SMMU_DOMAIN_S1:
928 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
929 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000930 ias = smmu->va_size;
931 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100932 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000933 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100934 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000935 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100936 ias = min(ias, 32UL);
937 oas = min(oas, 40UL);
938 }
Will Deaconc752ce42014-06-25 22:46:31 +0100939 break;
940 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100941 /*
942 * We will likely want to change this if/when KVM gets
943 * involved.
944 */
Will Deaconc752ce42014-06-25 22:46:31 +0100945 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100946 cfg->cbar = CBAR_TYPE_S2_TRANS;
947 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000948 ias = smmu->ipa_size;
949 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100950 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000951 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100952 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000953 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100954 ias = min(ias, 40UL);
955 oas = min(oas, 40UL);
956 }
Will Deaconc752ce42014-06-25 22:46:31 +0100957 break;
958 default:
959 ret = -EINVAL;
960 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961 }
962
963 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
964 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200965 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100966 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100967
Will Deacon44680ee2014-06-25 11:29:12 +0100968 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100969 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100970 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
971 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100972 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100973 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100974 }
975
Will Deacon518f7132014-11-14 17:17:54 +0000976 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100977 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000978 .ias = ias,
979 .oas = oas,
980 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100981 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000982 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100983
Will Deacon518f7132014-11-14 17:17:54 +0000984 smmu_domain->smmu = smmu;
985 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
986 if (!pgtbl_ops) {
987 ret = -ENOMEM;
988 goto out_clear_smmu;
989 }
990
Robin Murphyd5466352016-05-09 17:20:09 +0100991 /* Update the domain's page sizes to reflect the page table format */
992 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000993
994 /* Initialise the context bank with our page table cfg */
995 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
996
997 /*
998 * Request context fault interrupt. Do this last to avoid the
999 * handler seeing a half-initialised domain state.
1000 */
Will Deacon44680ee2014-06-25 11:29:12 +01001001 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001002 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1003 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001004 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001006 cfg->irptndx, irq);
1007 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008 }
1009
Will Deacon518f7132014-11-14 17:17:54 +00001010 mutex_unlock(&smmu_domain->init_mutex);
1011
1012 /* Publish page table ops for map/unmap */
1013 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001014 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001015
Will Deacon518f7132014-11-14 17:17:54 +00001016out_clear_smmu:
1017 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001018out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001019 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020 return ret;
1021}
1022
1023static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1024{
Joerg Roedel1d672632015-03-26 13:43:10 +01001025 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001026 struct arm_smmu_device *smmu = smmu_domain->smmu;
1027 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001028 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029 int irq;
1030
Robin Murphy98006992016-04-20 14:53:33 +01001031 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032 return;
1033
Will Deacon518f7132014-11-14 17:17:54 +00001034 /*
1035 * Disable the context bank and free the page tables before freeing
1036 * it.
1037 */
Will Deacon44680ee2014-06-25 11:29:12 +01001038 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001039 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001040
Will Deacon44680ee2014-06-25 11:29:12 +01001041 if (cfg->irptndx != INVALID_IRPTNDX) {
1042 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001043 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044 }
1045
Markus Elfring44830b02015-11-06 18:32:41 +01001046 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001047 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001048}
1049
Joerg Roedel1d672632015-03-26 13:43:10 +01001050static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001051{
1052 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001053
Robin Murphy9adb9592016-01-26 18:06:36 +00001054 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001055 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001056 /*
1057 * Allocate the domain and initialise some of its data structures.
1058 * We can't really do anything meaningful until we've added a
1059 * master.
1060 */
1061 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1062 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001063 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001064
Robin Murphy9adb9592016-01-26 18:06:36 +00001065 if (type == IOMMU_DOMAIN_DMA &&
1066 iommu_get_dma_cookie(&smmu_domain->domain)) {
1067 kfree(smmu_domain);
1068 return NULL;
1069 }
1070
Will Deacon518f7132014-11-14 17:17:54 +00001071 mutex_init(&smmu_domain->init_mutex);
1072 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001073
1074 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001075}
1076
Joerg Roedel1d672632015-03-26 13:43:10 +01001077static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078{
Joerg Roedel1d672632015-03-26 13:43:10 +01001079 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001080
1081 /*
1082 * Free the domain resources. We assume that all devices have
1083 * already been detached.
1084 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001085 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001086 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001087 kfree(smmu_domain);
1088}
1089
1090static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001091 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001092{
1093 int i;
1094 struct arm_smmu_smr *smrs;
1095 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1096
1097 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1098 return 0;
1099
Will Deacona9a1b0b2014-05-01 18:05:08 +01001100 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101 return -EEXIST;
1102
Mitchel Humpherys29073202014-07-08 09:52:18 -07001103 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001104 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001105 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1106 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001107 return -ENOMEM;
1108 }
1109
Will Deacon44680ee2014-06-25 11:29:12 +01001110 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001111 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001112 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1113 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001114 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001115 dev_err(smmu->dev, "failed to allocate free SMR\n");
1116 goto err_free_smrs;
1117 }
1118
1119 smrs[i] = (struct arm_smmu_smr) {
1120 .idx = idx,
1121 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001122 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001123 };
1124 }
1125
1126 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001127 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001128 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1129 smrs[i].mask << SMR_MASK_SHIFT;
1130 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1131 }
1132
Will Deacona9a1b0b2014-05-01 18:05:08 +01001133 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001134 return 0;
1135
1136err_free_smrs:
1137 while (--i >= 0)
1138 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1139 kfree(smrs);
1140 return -ENOSPC;
1141}
1142
1143static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001144 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001145{
1146 int i;
1147 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001148 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001149
Will Deacon43b412b2014-07-15 11:22:24 +01001150 if (!smrs)
1151 return;
1152
Will Deacon45ae7cf2013-06-24 18:31:25 +01001153 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001154 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001155 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001156
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1158 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1159 }
1160
Will Deacona9a1b0b2014-05-01 18:05:08 +01001161 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162 kfree(smrs);
1163}
1164
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001166 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167{
1168 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001169 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1171
Will Deacon5f634952016-04-20 14:53:32 +01001172 /*
1173 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1174 * for all devices behind the SMMU. Note that we need to take
1175 * care configuring SMRs for devices both a platform_device and
1176 * and a PCI device (i.e. a PCI host controller)
1177 */
1178 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1179 return 0;
1180
Will Deacon8f68f8e2014-07-15 11:27:08 +01001181 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001182 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001184 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185
Will Deacona9a1b0b2014-05-01 18:05:08 +01001186 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001187 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001188
Will Deacona9a1b0b2014-05-01 18:05:08 +01001189 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001190 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001191 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1193 }
1194
1195 return 0;
1196}
1197
1198static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001199 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200{
Will Deacon43b412b2014-07-15 11:22:24 +01001201 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001202 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001203 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204
Will Deacon8f68f8e2014-07-15 11:27:08 +01001205 /* An IOMMU group is torn down by the first device to be removed */
1206 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1207 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001208
1209 /*
1210 * We *must* clear the S2CR first, because freeing the SMR means
1211 * that it can be re-allocated immediately.
1212 */
Will Deacon43b412b2014-07-15 11:22:24 +01001213 for (i = 0; i < cfg->num_streamids; ++i) {
1214 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001215 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001216
Robin Murphy25a1c962016-02-10 14:25:33 +00001217 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001218 }
1219
Will Deacona9a1b0b2014-05-01 18:05:08 +01001220 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221}
1222
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001223static void arm_smmu_detach_dev(struct device *dev,
1224 struct arm_smmu_master_cfg *cfg)
1225{
1226 struct iommu_domain *domain = dev->archdata.iommu;
1227 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1228
1229 dev->archdata.iommu = NULL;
1230 arm_smmu_domain_remove_master(smmu_domain, cfg);
1231}
1232
Will Deacon45ae7cf2013-06-24 18:31:25 +01001233static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1234{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001235 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001236 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001237 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001238 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001239
Will Deacon8f68f8e2014-07-15 11:27:08 +01001240 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001241 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1243 return -ENXIO;
1244 }
1245
Will Deacon518f7132014-11-14 17:17:54 +00001246 /* Ensure that the domain is finalised */
1247 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001248 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001249 return ret;
1250
Will Deacon45ae7cf2013-06-24 18:31:25 +01001251 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001252 * Sanity check the domain. We don't support domains across
1253 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254 */
Will Deacon518f7132014-11-14 17:17:54 +00001255 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001256 dev_err(dev,
1257 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001258 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1259 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261
1262 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001263 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001264 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265 return -ENODEV;
1266
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001267 /* Detach the dev from its current domain */
1268 if (dev->archdata.iommu)
1269 arm_smmu_detach_dev(dev, cfg);
1270
Will Deacon844e35b2014-07-17 11:23:51 +01001271 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1272 if (!ret)
1273 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274 return ret;
1275}
1276
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001278 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279{
Will Deacon518f7132014-11-14 17:17:54 +00001280 int ret;
1281 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001282 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001283 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284
Will Deacon518f7132014-11-14 17:17:54 +00001285 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001286 return -ENODEV;
1287
Will Deacon518f7132014-11-14 17:17:54 +00001288 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1289 ret = ops->map(ops, iova, paddr, size, prot);
1290 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1291 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001292}
1293
1294static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1295 size_t size)
1296{
Will Deacon518f7132014-11-14 17:17:54 +00001297 size_t ret;
1298 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001299 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001300 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301
Will Deacon518f7132014-11-14 17:17:54 +00001302 if (!ops)
1303 return 0;
1304
1305 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1306 ret = ops->unmap(ops, iova, size);
1307 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1308 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309}
1310
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001311static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001312 dma_addr_t iova)
1313{
Joerg Roedel1d672632015-03-26 13:43:10 +01001314 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001315 struct arm_smmu_device *smmu = smmu_domain->smmu;
1316 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1317 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1318 struct device *dev = smmu->dev;
1319 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001320 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001321 u32 tmp;
1322 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001323 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001324
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001325 spin_lock_irqsave(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001326 if (arm_smmu_halt(smmu)) {
1327 phys = 0;
1328 goto out_unlock;
1329 }
1330
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001331 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1332
Robin Murphy661d9622015-05-27 17:09:34 +01001333 /* ATS1 registers can only be written atomically */
1334 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001335 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001336 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1337 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001338 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001339
1340 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1341 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001342 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001343 dev_err(dev,
1344 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1345 &iova, &phys);
1346 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001347 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001348 }
1349
Robin Murphyf9a05f02016-04-13 18:13:01 +01001350 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001351 if (phys & CB_PAR_F) {
1352 dev_err(dev, "translation fault!\n");
1353 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001354 phys = 0;
1355 } else {
1356 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001357 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001358out_resume:
1359 arm_smmu_resume(smmu);
1360out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001361 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001362 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001363}
1364
Will Deacon45ae7cf2013-06-24 18:31:25 +01001365static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001366 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367{
Will Deacon518f7132014-11-14 17:17:54 +00001368 phys_addr_t ret;
1369 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001370 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001371 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001372
Will Deacon518f7132014-11-14 17:17:54 +00001373 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001374 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375
Will Deacon518f7132014-11-14 17:17:54 +00001376 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001377 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001378 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001379
Will Deacon518f7132014-11-14 17:17:54 +00001380 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381}
1382
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001383/*
1384 * This function can sleep, and cannot be called from atomic context. Will
1385 * power on register block if required. This restriction does not apply to the
1386 * original iova_to_phys() op.
1387 */
1388static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1389 dma_addr_t iova)
1390{
1391 phys_addr_t ret = 0;
1392 unsigned long flags;
1393 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1394
1395 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1396 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1397 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1398 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
1399
1400 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1401
1402 return ret;
1403}
1404
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001405static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001406{
Will Deacond0948942014-06-24 17:30:10 +01001407 switch (cap) {
1408 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001409 /*
1410 * Return true here as the SMMU can always send out coherent
1411 * requests.
1412 */
1413 return true;
Will Deacond0948942014-06-24 17:30:10 +01001414 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001415 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001416 case IOMMU_CAP_NOEXEC:
1417 return true;
Will Deacond0948942014-06-24 17:30:10 +01001418 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001419 return false;
Will Deacond0948942014-06-24 17:30:10 +01001420 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001422
Will Deacona9a1b0b2014-05-01 18:05:08 +01001423static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1424{
1425 *((u16 *)data) = alias;
1426 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001427}
1428
Will Deacon8f68f8e2014-07-15 11:27:08 +01001429static void __arm_smmu_release_pci_iommudata(void *data)
1430{
1431 kfree(data);
1432}
1433
Joerg Roedelaf659932015-10-21 23:51:41 +02001434static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1435 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436{
Will Deacon03edb222015-01-19 14:27:33 +00001437 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001438 u16 sid;
1439 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001440
Will Deacon03edb222015-01-19 14:27:33 +00001441 cfg = iommu_group_get_iommudata(group);
1442 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001443 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001444 if (!cfg)
1445 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001446
Will Deacon03edb222015-01-19 14:27:33 +00001447 iommu_group_set_iommudata(group, cfg,
1448 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001449 }
1450
Joerg Roedelaf659932015-10-21 23:51:41 +02001451 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1452 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001453
Will Deacon03edb222015-01-19 14:27:33 +00001454 /*
1455 * Assume Stream ID == Requester ID for now.
1456 * We need a way to describe the ID mappings in FDT.
1457 */
1458 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1459 for (i = 0; i < cfg->num_streamids; ++i)
1460 if (cfg->streamids[i] == sid)
1461 break;
1462
1463 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1464 if (i == cfg->num_streamids)
1465 cfg->streamids[cfg->num_streamids++] = sid;
1466
1467 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001468}
1469
Joerg Roedelaf659932015-10-21 23:51:41 +02001470static int arm_smmu_init_platform_device(struct device *dev,
1471 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001472{
Will Deacon03edb222015-01-19 14:27:33 +00001473 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001474 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001475
1476 if (!smmu)
1477 return -ENODEV;
1478
1479 master = find_smmu_master(smmu, dev->of_node);
1480 if (!master)
1481 return -ENODEV;
1482
Will Deacon03edb222015-01-19 14:27:33 +00001483 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001484
1485 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001486}
1487
1488static int arm_smmu_add_device(struct device *dev)
1489{
Joerg Roedelaf659932015-10-21 23:51:41 +02001490 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001491
Joerg Roedelaf659932015-10-21 23:51:41 +02001492 group = iommu_group_get_for_dev(dev);
1493 if (IS_ERR(group))
1494 return PTR_ERR(group);
1495
Peng Fan9a4a9d82015-11-20 16:56:18 +08001496 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001497 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001498}
1499
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500static void arm_smmu_remove_device(struct device *dev)
1501{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001502 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001503}
1504
Joerg Roedelaf659932015-10-21 23:51:41 +02001505static struct iommu_group *arm_smmu_device_group(struct device *dev)
1506{
1507 struct iommu_group *group;
1508 int ret;
1509
1510 if (dev_is_pci(dev))
1511 group = pci_device_group(dev);
1512 else
1513 group = generic_device_group(dev);
1514
1515 if (IS_ERR(group))
1516 return group;
1517
1518 if (dev_is_pci(dev))
1519 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1520 else
1521 ret = arm_smmu_init_platform_device(dev, group);
1522
1523 if (ret) {
1524 iommu_group_put(group);
1525 group = ERR_PTR(ret);
1526 }
1527
1528 return group;
1529}
1530
Will Deaconc752ce42014-06-25 22:46:31 +01001531static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1532 enum iommu_attr attr, void *data)
1533{
Joerg Roedel1d672632015-03-26 13:43:10 +01001534 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001535
1536 switch (attr) {
1537 case DOMAIN_ATTR_NESTING:
1538 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1539 return 0;
1540 default:
1541 return -ENODEV;
1542 }
1543}
1544
1545static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1546 enum iommu_attr attr, void *data)
1547{
Will Deacon518f7132014-11-14 17:17:54 +00001548 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001549 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001550
Will Deacon518f7132014-11-14 17:17:54 +00001551 mutex_lock(&smmu_domain->init_mutex);
1552
Will Deaconc752ce42014-06-25 22:46:31 +01001553 switch (attr) {
1554 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001555 if (smmu_domain->smmu) {
1556 ret = -EPERM;
1557 goto out_unlock;
1558 }
1559
Will Deaconc752ce42014-06-25 22:46:31 +01001560 if (*(int *)data)
1561 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1562 else
1563 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1564
Will Deacon518f7132014-11-14 17:17:54 +00001565 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001566 default:
Will Deacon518f7132014-11-14 17:17:54 +00001567 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001568 }
Will Deacon518f7132014-11-14 17:17:54 +00001569
1570out_unlock:
1571 mutex_unlock(&smmu_domain->init_mutex);
1572 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001573}
1574
Will Deacon518f7132014-11-14 17:17:54 +00001575static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001576 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001577 .domain_alloc = arm_smmu_domain_alloc,
1578 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001579 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001580 .map = arm_smmu_map,
1581 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001582 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001583 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001584 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01001585 .add_device = arm_smmu_add_device,
1586 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001587 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001588 .domain_get_attr = arm_smmu_domain_get_attr,
1589 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001590 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591};
1592
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001593static int arm_smmu_halt(struct arm_smmu_device *smmu)
1594{
1595 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001596 u32 reg, tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001597
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001598 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1599 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1600 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001601
1602 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1603 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1604 0, 30000)) {
1605 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1606 return -EBUSY;
1607 }
1608
1609 return 0;
1610}
1611
1612static void arm_smmu_resume(struct arm_smmu_device *smmu)
1613{
1614 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1615 u32 reg;
1616
1617 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1618 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1619 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1620}
1621
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001622static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1623{
1624 int i;
1625 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1626
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001627 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001628 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1629 writel_relaxed(regs[i].value,
1630 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001631 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001632}
1633
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1635{
1636 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001637 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001638 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001639 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001640
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001641 /* clear global FSR */
1642 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1643 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644
Robin Murphy25a1c962016-02-10 14:25:33 +00001645 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1646 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001647 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001648 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001649 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001650 }
1651
Peng Fan3ca37122016-05-03 21:50:30 +08001652 /*
1653 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1654 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1655 * bit is only present in MMU-500r2 onwards.
1656 */
1657 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1658 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1659 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1660 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1661 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1662 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1663 }
1664
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001665 /* Make sure all context banks are disabled and clear CB_FSR */
1666 for (i = 0; i < smmu->num_context_banks; ++i) {
1667 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1668 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1669 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001670 /*
1671 * Disable MMU-500's not-particularly-beneficial next-page
1672 * prefetcher for the sake of errata #841119 and #826419.
1673 */
1674 if (smmu->model == ARM_MMU500) {
1675 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1676 reg &= ~ARM_MMU500_ACTLR_CPRE;
1677 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1678 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001679
1680 if (smmu->model == QCOM_SMMUV2) {
1681 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1682 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1683 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1684 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1685 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001686 }
Will Deacon1463fe42013-07-31 19:21:27 +01001687
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001688 /* Program implementation defined registers */
1689 arm_smmu_impl_def_programming(smmu);
1690
Will Deacon45ae7cf2013-06-24 18:31:25 +01001691 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1693 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1694
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001695 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001696
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001698 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699
1700 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001701 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702
Robin Murphy25a1c962016-02-10 14:25:33 +00001703 /* Enable client access, handling unmatched streams as appropriate */
1704 reg &= ~sCR0_CLIENTPD;
1705 if (disable_bypass)
1706 reg |= sCR0_USFCFG;
1707 else
1708 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709
1710 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001711 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001712
1713 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001714 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001716 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1717 reg |= sCR0_VMID16EN;
1718
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001720 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001721 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001722}
1723
1724static int arm_smmu_id_size_to_bits(int size)
1725{
1726 switch (size) {
1727 case 0:
1728 return 32;
1729 case 1:
1730 return 36;
1731 case 2:
1732 return 40;
1733 case 3:
1734 return 42;
1735 case 4:
1736 return 44;
1737 case 5:
1738 default:
1739 return 48;
1740 }
1741}
1742
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001743static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1744{
1745 struct device *dev = smmu->dev;
1746 int i, ntuples, ret;
1747 u32 *tuples;
1748 struct arm_smmu_impl_def_reg *regs, *regit;
1749
1750 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1751 return 0;
1752
1753 ntuples /= sizeof(u32);
1754 if (ntuples % 2) {
1755 dev_err(dev,
1756 "Invalid number of attach-impl-defs registers: %d\n",
1757 ntuples);
1758 return -EINVAL;
1759 }
1760
1761 regs = devm_kmalloc(
1762 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1763 GFP_KERNEL);
1764 if (!regs)
1765 return -ENOMEM;
1766
1767 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1768 if (!tuples)
1769 return -ENOMEM;
1770
1771 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1772 tuples, ntuples);
1773 if (ret)
1774 return ret;
1775
1776 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1777 regit->offset = tuples[i];
1778 regit->value = tuples[i + 1];
1779 }
1780
1781 devm_kfree(dev, tuples);
1782
1783 smmu->impl_def_attach_registers = regs;
1784 smmu->num_impl_def_attach_registers = ntuples / 2;
1785
1786 return 0;
1787}
1788
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1790{
1791 unsigned long size;
1792 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1793 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001794 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795
1796 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001797 dev_notice(smmu->dev, "SMMUv%d with:\n",
1798 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799
1800 /* ID0 */
1801 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001802
1803 /* Restrict available stages based on module parameter */
1804 if (force_stage == 1)
1805 id &= ~(ID0_S2TS | ID0_NTS);
1806 else if (force_stage == 2)
1807 id &= ~(ID0_S1TS | ID0_NTS);
1808
Will Deacon45ae7cf2013-06-24 18:31:25 +01001809 if (id & ID0_S1TS) {
1810 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1811 dev_notice(smmu->dev, "\tstage 1 translation\n");
1812 }
1813
1814 if (id & ID0_S2TS) {
1815 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1816 dev_notice(smmu->dev, "\tstage 2 translation\n");
1817 }
1818
1819 if (id & ID0_NTS) {
1820 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1821 dev_notice(smmu->dev, "\tnested translation\n");
1822 }
1823
1824 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001825 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001826 dev_err(smmu->dev, "\tno translation support!\n");
1827 return -ENODEV;
1828 }
1829
Robin Murphyb7862e32016-04-13 18:13:03 +01001830 if ((id & ID0_S1TS) &&
1831 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001832 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1833 dev_notice(smmu->dev, "\taddress translation ops\n");
1834 }
1835
Robin Murphybae2c2d2015-07-29 19:46:05 +01001836 /*
1837 * In order for DMA API calls to work properly, we must defer to what
1838 * the DT says about coherency, regardless of what the hardware claims.
1839 * Fortunately, this also opens up a workaround for systems where the
1840 * ID register value has ended up configured incorrectly.
1841 */
1842 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1843 cttw_reg = !!(id & ID0_CTTW);
1844 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001845 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001846 if (cttw_dt || cttw_reg)
1847 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1848 cttw_dt ? "" : "non-");
1849 if (cttw_dt != cttw_reg)
1850 dev_notice(smmu->dev,
1851 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001852
1853 if (id & ID0_SMS) {
1854 u32 smr, sid, mask;
1855
1856 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1857 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1858 ID0_NUMSMRG_MASK;
1859 if (smmu->num_mapping_groups == 0) {
1860 dev_err(smmu->dev,
1861 "stream-matching supported, but no SMRs present!\n");
1862 return -ENODEV;
1863 }
1864
1865 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1866 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1867 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1868 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1869
1870 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1871 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1872 if ((mask & sid) != sid) {
1873 dev_err(smmu->dev,
1874 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1875 mask, sid);
1876 return -ENODEV;
1877 }
1878
1879 dev_notice(smmu->dev,
1880 "\tstream matching with %u register groups, mask 0x%x",
1881 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001882 } else {
1883 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1884 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885 }
1886
Robin Murphy7602b872016-04-28 17:12:09 +01001887 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1888 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1889 if (!(id & ID0_PTFS_NO_AARCH32S))
1890 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1891 }
1892
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893 /* ID1 */
1894 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001895 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001896
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001897 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001898 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001899 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001900 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001901 dev_warn(smmu->dev,
1902 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1903 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904
Will Deacon518f7132014-11-14 17:17:54 +00001905 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1907 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1908 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1909 return -ENODEV;
1910 }
1911 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1912 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001913 /*
1914 * Cavium CN88xx erratum #27704.
1915 * Ensure ASID and VMID allocation is unique across all SMMUs in
1916 * the system.
1917 */
1918 if (smmu->model == CAVIUM_SMMUV2) {
1919 smmu->cavium_id_base =
1920 atomic_add_return(smmu->num_context_banks,
1921 &cavium_smmu_context_count);
1922 smmu->cavium_id_base -= smmu->num_context_banks;
1923 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001924
1925 /* ID2 */
1926 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1927 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001928 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001929
Will Deacon518f7132014-11-14 17:17:54 +00001930 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001931 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001932 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001933
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001934 if (id & ID2_VMID16)
1935 smmu->features |= ARM_SMMU_FEAT_VMID16;
1936
Robin Murphyf1d84542015-03-04 16:41:05 +00001937 /*
1938 * What the page table walker can address actually depends on which
1939 * descriptor format is in use, but since a) we don't know that yet,
1940 * and b) it can vary per context bank, this will have to do...
1941 */
1942 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1943 dev_warn(smmu->dev,
1944 "failed to set DMA mask for table walker\n");
1945
Robin Murphyb7862e32016-04-13 18:13:03 +01001946 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001947 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001948 if (smmu->version == ARM_SMMU_V1_64K)
1949 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001952 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001953 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001954 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001955 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001956 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001957 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001958 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959 }
1960
Robin Murphy7602b872016-04-28 17:12:09 +01001961 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001962 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001963 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001964 if (smmu->features &
1965 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001966 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001967 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001968 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001969 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001970 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001971
Robin Murphyd5466352016-05-09 17:20:09 +01001972 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1973 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1974 else
1975 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1976 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1977 smmu->pgsize_bitmap);
1978
Will Deacon518f7132014-11-14 17:17:54 +00001979
Will Deacon28d60072014-09-01 16:24:48 +01001980 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1981 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001982 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001983
1984 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1985 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001986 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001987
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988 return 0;
1989}
1990
Robin Murphy67b65a32016-04-13 18:12:57 +01001991struct arm_smmu_match_data {
1992 enum arm_smmu_arch_version version;
1993 enum arm_smmu_implementation model;
1994};
1995
1996#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1997static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1998
1999ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2000ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002001ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002002ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002003ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002004ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002005
Joerg Roedel09b52692014-10-02 12:24:45 +02002006static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002007 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2008 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2009 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002010 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002011 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002012 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002013 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002014 { },
2015};
2016MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2017
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2019{
Robin Murphy09360402014-08-28 17:51:59 +01002020 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002021 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002022 struct resource *res;
2023 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002024 struct device *dev = &pdev->dev;
2025 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002026 struct of_phandle_iterator it;
2027 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002028 int num_irqs, i, err;
2029
2030 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2031 if (!smmu) {
2032 dev_err(dev, "failed to allocate arm_smmu_device\n");
2033 return -ENOMEM;
2034 }
2035 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002036 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002037
Robin Murphy09360402014-08-28 17:51:59 +01002038 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002039 data = of_id->data;
2040 smmu->version = data->version;
2041 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002042
Will Deacon45ae7cf2013-06-24 18:31:25 +01002043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002044 smmu->base = devm_ioremap_resource(dev, res);
2045 if (IS_ERR(smmu->base))
2046 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002047 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002048
2049 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2050 &smmu->num_global_irqs)) {
2051 dev_err(dev, "missing #global-interrupts property\n");
2052 return -ENODEV;
2053 }
2054
2055 num_irqs = 0;
2056 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2057 num_irqs++;
2058 if (num_irqs > smmu->num_global_irqs)
2059 smmu->num_context_irqs++;
2060 }
2061
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002062 if (!smmu->num_context_irqs) {
2063 dev_err(dev, "found %d interrupts but expected at least %d\n",
2064 num_irqs, smmu->num_global_irqs + 1);
2065 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002066 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002067
2068 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2069 GFP_KERNEL);
2070 if (!smmu->irqs) {
2071 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2072 return -ENOMEM;
2073 }
2074
2075 for (i = 0; i < num_irqs; ++i) {
2076 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002077
Will Deacon45ae7cf2013-06-24 18:31:25 +01002078 if (irq < 0) {
2079 dev_err(dev, "failed to get irq index %d\n", i);
2080 return -ENODEV;
2081 }
2082 smmu->irqs[i] = irq;
2083 }
2084
Olav Haugan3c8766d2014-08-22 17:12:32 -07002085 err = arm_smmu_device_cfg_probe(smmu);
2086 if (err)
2087 return err;
2088
Will Deacon45ae7cf2013-06-24 18:31:25 +01002089 i = 0;
2090 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002091
2092 err = -ENOMEM;
2093 /* No need to zero the memory for masterspec */
2094 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2095 if (!masterspec)
2096 goto out_put_masters;
2097
2098 of_for_each_phandle(&it, err, dev->of_node,
2099 "mmu-masters", "#stream-id-cells", 0) {
2100 int count = of_phandle_iterator_args(&it, masterspec->args,
2101 MAX_MASTER_STREAMIDS);
2102 masterspec->np = of_node_get(it.node);
2103 masterspec->args_count = count;
2104
2105 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002106 if (err) {
2107 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002108 masterspec->np->name);
2109 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002110 goto out_put_masters;
2111 }
2112
2113 i++;
2114 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002115
Will Deacon45ae7cf2013-06-24 18:31:25 +01002116 dev_notice(dev, "registered %d master devices\n", i);
2117
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002118 kfree(masterspec);
2119
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002120 err = arm_smmu_parse_impl_def_registers(smmu);
2121 if (err)
2122 goto out_put_masters;
2123
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002124 parse_driver_options(smmu);
2125
Robin Murphyb7862e32016-04-13 18:13:03 +01002126 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002127 smmu->num_context_banks != smmu->num_context_irqs) {
2128 dev_err(dev,
2129 "found only %d context interrupt(s) but %d required\n",
2130 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002131 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002132 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002133 }
2134
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002136 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2137 arm_smmu_global_fault,
2138 IRQF_SHARED,
2139 "arm-smmu global fault",
2140 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002141 if (err) {
2142 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2143 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002144 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002145 }
2146 }
2147
2148 INIT_LIST_HEAD(&smmu->list);
2149 spin_lock(&arm_smmu_devices_lock);
2150 list_add(&smmu->list, &arm_smmu_devices);
2151 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002152
2153 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002154 return 0;
2155
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156out_put_masters:
2157 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002158 struct arm_smmu_master *master
2159 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002160 of_node_put(master->of_node);
2161 }
2162
2163 return err;
2164}
2165
2166static int arm_smmu_device_remove(struct platform_device *pdev)
2167{
2168 int i;
2169 struct device *dev = &pdev->dev;
2170 struct arm_smmu_device *curr, *smmu = NULL;
2171 struct rb_node *node;
2172
2173 spin_lock(&arm_smmu_devices_lock);
2174 list_for_each_entry(curr, &arm_smmu_devices, list) {
2175 if (curr->dev == dev) {
2176 smmu = curr;
2177 list_del(&smmu->list);
2178 break;
2179 }
2180 }
2181 spin_unlock(&arm_smmu_devices_lock);
2182
2183 if (!smmu)
2184 return -ENODEV;
2185
Will Deacon45ae7cf2013-06-24 18:31:25 +01002186 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002187 struct arm_smmu_master *master
2188 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002189 of_node_put(master->of_node);
2190 }
2191
Will Deaconecfadb62013-07-31 19:21:28 +01002192 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002193 dev_err(dev, "removing device with active domains!\n");
2194
2195 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002196 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002197
2198 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002199 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002200 return 0;
2201}
2202
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203static struct platform_driver arm_smmu_driver = {
2204 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205 .name = "arm-smmu",
2206 .of_match_table = of_match_ptr(arm_smmu_of_match),
2207 },
2208 .probe = arm_smmu_device_dt_probe,
2209 .remove = arm_smmu_device_remove,
2210};
2211
2212static int __init arm_smmu_init(void)
2213{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002214 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002215 int ret;
2216
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002217 /*
2218 * Play nice with systems that don't have an ARM SMMU by checking that
2219 * an ARM SMMU exists in the system before proceeding with the driver
2220 * and IOMMU bus operation registration.
2221 */
2222 np = of_find_matching_node(NULL, arm_smmu_of_match);
2223 if (!np)
2224 return 0;
2225
2226 of_node_put(np);
2227
Will Deacon45ae7cf2013-06-24 18:31:25 +01002228 ret = platform_driver_register(&arm_smmu_driver);
2229 if (ret)
2230 return ret;
2231
2232 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002233 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002234 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2235
Will Deacond123cf82014-02-04 22:17:53 +00002236#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002237 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002238 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002239#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002240
Will Deacona9a1b0b2014-05-01 18:05:08 +01002241#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002242 if (!iommu_present(&pci_bus_type)) {
2243 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002244 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002245 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002246#endif
2247
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248 return 0;
2249}
2250
2251static void __exit arm_smmu_exit(void)
2252{
2253 return platform_driver_unregister(&arm_smmu_driver);
2254}
2255
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002256subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257module_exit(arm_smmu_exit);
2258
2259MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2260MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2261MODULE_LICENSE("GPL v2");