blob: 7ff6092fe76fae5a6e46498ea1575d18f5e55a7a [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100222#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100224#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100227#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVAL 0x620
229#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
230#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100231#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000232#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define SCTLR_S1_ASIDPNE (1 << 12)
235#define SCTLR_CFCFG (1 << 7)
236#define SCTLR_CFIE (1 << 6)
237#define SCTLR_CFRE (1 << 5)
238#define SCTLR_E (1 << 4)
239#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245
Peng Fan3ca37122016-05-03 21:50:30 +0800246#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
247
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700248/* Definitions for implementation-defined registers */
249#define ACTLR_QCOM_OSH_SHIFT 28
250#define ACTLR_QCOM_OSH 1
251
252#define ACTLR_QCOM_ISH_SHIFT 29
253#define ACTLR_QCOM_ISH 1
254
255#define ACTLR_QCOM_NSH_SHIFT 30
256#define ACTLR_QCOM_NSH 1
257
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700258#define ARM_SMMU_IMPL_DEF0(smmu) \
259 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
260#define ARM_SMMU_IMPL_DEF1(smmu) \
261 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
262#define IMPL_DEF1_MICRO_MMU_CTRL 0
263#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
264#define MICRO_MMU_CTRL_IDLE (1 << 3)
265
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000300static bool disable_bypass;
301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100316};
317
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700318struct arm_smmu_impl_def_reg {
319 u32 offset;
320 u32 value;
321};
322
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323struct arm_smmu_smr {
324 u8 idx;
325 u16 mask;
326 u16 id;
327};
328
Will Deacona9a1b0b2014-05-01 18:05:08 +0100329struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330 int num_streamids;
331 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332 struct arm_smmu_smr *smrs;
333};
334
Will Deacona9a1b0b2014-05-01 18:05:08 +0100335struct arm_smmu_master {
336 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337 struct rb_node node;
338 struct arm_smmu_master_cfg cfg;
339};
340
Will Deacon45ae7cf2013-06-24 18:31:25 +0100341struct arm_smmu_device {
342 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100343
344 void __iomem *base;
345 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100346 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347
348#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
349#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
350#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
351#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
352#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000353#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800354#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100355#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
356#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
357#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
358#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
359#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100360 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000361
362#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
363 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100364 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100365 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366
367 u32 num_context_banks;
368 u32 num_s2_context_banks;
369 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
370 atomic_t irptndx;
371
372 u32 num_mapping_groups;
373 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
374
Will Deacon518f7132014-11-14 17:17:54 +0000375 unsigned long va_size;
376 unsigned long ipa_size;
377 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100378 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379
380 u32 num_global_irqs;
381 u32 num_context_irqs;
382 unsigned int *irqs;
383
Will Deacon45ae7cf2013-06-24 18:31:25 +0100384 struct list_head list;
385 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800386
387 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700388 /* Specific to QCOM */
389 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
390 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800391
392 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393};
394
Robin Murphy7602b872016-04-28 17:12:09 +0100395enum arm_smmu_context_fmt {
396 ARM_SMMU_CTX_FMT_NONE,
397 ARM_SMMU_CTX_FMT_AARCH64,
398 ARM_SMMU_CTX_FMT_AARCH32_L,
399 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100400};
401
402struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100403 u8 cbndx;
404 u8 irptndx;
405 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100406 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100408#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800410#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
411#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100412
Will Deaconc752ce42014-06-25 22:46:31 +0100413enum arm_smmu_domain_stage {
414 ARM_SMMU_DOMAIN_S1 = 0,
415 ARM_SMMU_DOMAIN_S2,
416 ARM_SMMU_DOMAIN_NESTED,
417};
418
Will Deacon45ae7cf2013-06-24 18:31:25 +0100419struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100420 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000421 struct io_pgtable_ops *pgtbl_ops;
422 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100423 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100424 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000425 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100426 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100427};
428
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200429struct arm_smmu_phandle_args {
430 struct device_node *np;
431 int args_count;
432 uint32_t args[MAX_MASTER_STREAMIDS];
433};
434
Will Deacon45ae7cf2013-06-24 18:31:25 +0100435static DEFINE_SPINLOCK(arm_smmu_devices_lock);
436static LIST_HEAD(arm_smmu_devices);
437
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000438struct arm_smmu_option_prop {
439 u32 opt;
440 const char *prop;
441};
442
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800443static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
444
Mitchel Humpherys29073202014-07-08 09:52:18 -0700445static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000446 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
447 { 0, NULL},
448};
449
Joerg Roedel1d672632015-03-26 13:43:10 +0100450static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
451{
452 return container_of(dom, struct arm_smmu_domain, domain);
453}
454
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000455static void parse_driver_options(struct arm_smmu_device *smmu)
456{
457 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700458
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000459 do {
460 if (of_property_read_bool(smmu->dev->of_node,
461 arm_smmu_options[i].prop)) {
462 smmu->options |= arm_smmu_options[i].opt;
463 dev_notice(smmu->dev, "option %s\n",
464 arm_smmu_options[i].prop);
465 }
466 } while (arm_smmu_options[++i].opt);
467}
468
Will Deacon8f68f8e2014-07-15 11:27:08 +0100469static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100470{
471 if (dev_is_pci(dev)) {
472 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700473
Will Deacona9a1b0b2014-05-01 18:05:08 +0100474 while (!pci_is_root_bus(bus))
475 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100476 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100477 }
478
Will Deacon8f68f8e2014-07-15 11:27:08 +0100479 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100480}
481
Will Deacon45ae7cf2013-06-24 18:31:25 +0100482static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
483 struct device_node *dev_node)
484{
485 struct rb_node *node = smmu->masters.rb_node;
486
487 while (node) {
488 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700489
Will Deacon45ae7cf2013-06-24 18:31:25 +0100490 master = container_of(node, struct arm_smmu_master, node);
491
492 if (dev_node < master->of_node)
493 node = node->rb_left;
494 else if (dev_node > master->of_node)
495 node = node->rb_right;
496 else
497 return master;
498 }
499
500 return NULL;
501}
502
Will Deacona9a1b0b2014-05-01 18:05:08 +0100503static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100504find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100505{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100506 struct arm_smmu_master_cfg *cfg = NULL;
507 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100508
Will Deacon8f68f8e2014-07-15 11:27:08 +0100509 if (group) {
510 cfg = iommu_group_get_iommudata(group);
511 iommu_group_put(group);
512 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100513
Will Deacon8f68f8e2014-07-15 11:27:08 +0100514 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100515}
516
Will Deacon45ae7cf2013-06-24 18:31:25 +0100517static int insert_smmu_master(struct arm_smmu_device *smmu,
518 struct arm_smmu_master *master)
519{
520 struct rb_node **new, *parent;
521
522 new = &smmu->masters.rb_node;
523 parent = NULL;
524 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700525 struct arm_smmu_master *this
526 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100527
528 parent = *new;
529 if (master->of_node < this->of_node)
530 new = &((*new)->rb_left);
531 else if (master->of_node > this->of_node)
532 new = &((*new)->rb_right);
533 else
534 return -EEXIST;
535 }
536
537 rb_link_node(&master->node, parent, new);
538 rb_insert_color(&master->node, &smmu->masters);
539 return 0;
540}
541
542static int register_smmu_master(struct arm_smmu_device *smmu,
543 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200544 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100545{
546 int i;
547 struct arm_smmu_master *master;
548
549 master = find_smmu_master(smmu, masterspec->np);
550 if (master) {
551 dev_err(dev,
552 "rejecting multiple registrations for master device %s\n",
553 masterspec->np->name);
554 return -EBUSY;
555 }
556
557 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
558 dev_err(dev,
559 "reached maximum number (%d) of stream IDs for master device %s\n",
560 MAX_MASTER_STREAMIDS, masterspec->np->name);
561 return -ENOSPC;
562 }
563
564 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
565 if (!master)
566 return -ENOMEM;
567
Will Deacona9a1b0b2014-05-01 18:05:08 +0100568 master->of_node = masterspec->np;
569 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100570
Olav Haugan3c8766d2014-08-22 17:12:32 -0700571 for (i = 0; i < master->cfg.num_streamids; ++i) {
572 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573
Olav Haugan3c8766d2014-08-22 17:12:32 -0700574 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
575 (streamid >= smmu->num_mapping_groups)) {
576 dev_err(dev,
577 "stream ID for master device %s greater than maximum allowed (%d)\n",
578 masterspec->np->name, smmu->num_mapping_groups);
579 return -ERANGE;
580 }
581 master->cfg.streamids[i] = streamid;
582 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100583 return insert_smmu_master(smmu, master);
584}
585
Will Deacon44680ee2014-06-25 11:29:12 +0100586static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100587{
Will Deacon44680ee2014-06-25 11:29:12 +0100588 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100589 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100590 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591
592 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100593 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100594 master = find_smmu_master(smmu, dev_node);
595 if (master)
596 break;
597 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100598 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100599
Will Deacona9a1b0b2014-05-01 18:05:08 +0100600 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100601}
602
603static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
604{
605 int idx;
606
607 do {
608 idx = find_next_zero_bit(map, end, start);
609 if (idx == end)
610 return -ENOSPC;
611 } while (test_and_set_bit(idx, map));
612
613 return idx;
614}
615
616static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
617{
618 clear_bit(idx, map);
619}
620
621/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000622static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100623{
624 int count = 0;
625 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
626
627 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
628 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
629 & sTLBGSTATUS_GSACTIVE) {
630 cpu_relax();
631 if (++count == TLB_LOOP_TIMEOUT) {
632 dev_err_ratelimited(smmu->dev,
633 "TLB sync timed out -- SMMU may be deadlocked\n");
634 return;
635 }
636 udelay(1);
637 }
638}
639
Will Deacon518f7132014-11-14 17:17:54 +0000640static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100641{
Will Deacon518f7132014-11-14 17:17:54 +0000642 struct arm_smmu_domain *smmu_domain = cookie;
643 __arm_smmu_tlb_sync(smmu_domain->smmu);
644}
645
646static void arm_smmu_tlb_inv_context(void *cookie)
647{
648 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100649 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
650 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100651 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000652 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100653
654 if (stage1) {
655 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800656 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100657 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100658 } else {
659 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800660 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100661 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100662 }
663
Will Deacon518f7132014-11-14 17:17:54 +0000664 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100665}
666
Will Deacon518f7132014-11-14 17:17:54 +0000667static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000668 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000669{
670 struct arm_smmu_domain *smmu_domain = cookie;
671 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
672 struct arm_smmu_device *smmu = smmu_domain->smmu;
673 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
674 void __iomem *reg;
675
676 if (stage1) {
677 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
678 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
679
Robin Murphy7602b872016-04-28 17:12:09 +0100680 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000681 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800682 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000683 do {
684 writel_relaxed(iova, reg);
685 iova += granule;
686 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000687 } else {
688 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800689 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000690 do {
691 writeq_relaxed(iova, reg);
692 iova += granule >> 12;
693 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000694 }
Will Deacon518f7132014-11-14 17:17:54 +0000695 } else if (smmu->version == ARM_SMMU_V2) {
696 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
697 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
698 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000699 iova >>= 12;
700 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100701 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000702 iova += granule >> 12;
703 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000704 } else {
705 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800706 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000707 }
708}
709
Will Deacon518f7132014-11-14 17:17:54 +0000710static struct iommu_gather_ops arm_smmu_gather_ops = {
711 .tlb_flush_all = arm_smmu_tlb_inv_context,
712 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
713 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000714};
715
Will Deacon45ae7cf2013-06-24 18:31:25 +0100716static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
717{
Will Deacon3714ce12016-08-05 19:49:45 +0100718 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100719 unsigned long iova;
720 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100721 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100722 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
723 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724 void __iomem *cb_base;
725
Will Deacon44680ee2014-06-25 11:29:12 +0100726 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100727 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
728
729 if (!(fsr & FSR_FAULT))
730 return IRQ_NONE;
731
Will Deacon45ae7cf2013-06-24 18:31:25 +0100732 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100733 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734
Will Deacon3714ce12016-08-05 19:49:45 +0100735 dev_err_ratelimited(smmu->dev,
736 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
737 fsr, iova, fsynr, cfg->cbndx);
738
Will Deacon45ae7cf2013-06-24 18:31:25 +0100739 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce12016-08-05 19:49:45 +0100740 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741}
742
743static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
744{
745 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
746 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000747 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748
749 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
750 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
751 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
752 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
753
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000754 if (!gfsr)
755 return IRQ_NONE;
756
Will Deacon45ae7cf2013-06-24 18:31:25 +0100757 dev_err_ratelimited(smmu->dev,
758 "Unexpected global fault, this could be serious\n");
759 dev_err_ratelimited(smmu->dev,
760 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
761 gfsr, gfsynr0, gfsynr1, gfsynr2);
762
763 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100764 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100765}
766
Will Deacon518f7132014-11-14 17:17:54 +0000767static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
768 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769{
770 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100771 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100773 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
774 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100775 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100776
Will Deacon45ae7cf2013-06-24 18:31:25 +0100777 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100778 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
779 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780
Will Deacon4a1c93c2015-03-04 12:21:03 +0000781 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100782 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
783 reg = CBA2R_RW64_64BIT;
784 else
785 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800786 /* 16-bit VMIDs live in CBA2R */
787 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800788 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800789
Will Deacon4a1c93c2015-03-04 12:21:03 +0000790 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
791 }
792
Will Deacon45ae7cf2013-06-24 18:31:25 +0100793 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100794 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100795 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700796 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797
Will Deacon57ca90f2014-02-06 14:59:05 +0000798 /*
799 * Use the weakest shareability/memory types, so they are
800 * overridden by the ttbcr/pte.
801 */
802 if (stage1) {
803 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
804 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800805 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
806 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800807 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000808 }
Will Deacon44680ee2014-06-25 11:29:12 +0100809 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100810
Will Deacon518f7132014-11-14 17:17:54 +0000811 /* TTBRs */
812 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100813 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100814
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800815 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100816 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100817
818 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800819 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100820 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000821 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100822 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100823 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000824 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100825
Will Deacon518f7132014-11-14 17:17:54 +0000826 /* TTBCR */
827 if (stage1) {
828 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
829 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
830 if (smmu->version > ARM_SMMU_V1) {
831 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100832 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000833 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100834 }
835 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000836 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
837 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838 }
839
Will Deacon518f7132014-11-14 17:17:54 +0000840 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100841 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000842 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100843 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000844 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
845 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846 }
847
Will Deacon45ae7cf2013-06-24 18:31:25 +0100848 /* SCTLR */
Will Deacon3714ce12016-08-05 19:49:45 +0100849 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100850 if (stage1)
851 reg |= SCTLR_S1_ASIDPNE;
852#ifdef __BIG_ENDIAN
853 reg |= SCTLR_E;
854#endif
Will Deacon25724842013-08-21 13:49:53 +0100855 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856}
857
858static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100859 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100860{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100861 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000862 unsigned long ias, oas;
863 struct io_pgtable_ops *pgtbl_ops;
864 struct io_pgtable_cfg pgtbl_cfg;
865 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100866 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100867 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100868
Will Deacon518f7132014-11-14 17:17:54 +0000869 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100870 if (smmu_domain->smmu)
871 goto out_unlock;
872
Robin Murphy98006992016-04-20 14:53:33 +0100873 /* We're bypassing these SIDs, so don't allocate an actual context */
874 if (domain->type == IOMMU_DOMAIN_DMA) {
875 smmu_domain->smmu = smmu;
876 goto out_unlock;
877 }
878
Will Deaconc752ce42014-06-25 22:46:31 +0100879 /*
880 * Mapping the requested stage onto what we support is surprisingly
881 * complicated, mainly because the spec allows S1+S2 SMMUs without
882 * support for nested translation. That means we end up with the
883 * following table:
884 *
885 * Requested Supported Actual
886 * S1 N S1
887 * S1 S1+S2 S1
888 * S1 S2 S2
889 * S1 S1 S1
890 * N N N
891 * N S1+S2 S2
892 * N S2 S2
893 * N S1 S1
894 *
895 * Note that you can't actually request stage-2 mappings.
896 */
897 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
898 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
899 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
900 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
901
Robin Murphy7602b872016-04-28 17:12:09 +0100902 /*
903 * Choosing a suitable context format is even more fiddly. Until we
904 * grow some way for the caller to express a preference, and/or move
905 * the decision into the io-pgtable code where it arguably belongs,
906 * just aim for the closest thing to the rest of the system, and hope
907 * that the hardware isn't esoteric enough that we can't assume AArch64
908 * support to be a superset of AArch32 support...
909 */
910 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
911 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
912 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
913 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
914 ARM_SMMU_FEAT_FMT_AARCH64_16K |
915 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
916 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
917
918 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
919 ret = -EINVAL;
920 goto out_unlock;
921 }
922
Will Deaconc752ce42014-06-25 22:46:31 +0100923 switch (smmu_domain->stage) {
924 case ARM_SMMU_DOMAIN_S1:
925 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
926 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000927 ias = smmu->va_size;
928 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100929 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000930 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100931 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000932 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100933 ias = min(ias, 32UL);
934 oas = min(oas, 40UL);
935 }
Will Deaconc752ce42014-06-25 22:46:31 +0100936 break;
937 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938 /*
939 * We will likely want to change this if/when KVM gets
940 * involved.
941 */
Will Deaconc752ce42014-06-25 22:46:31 +0100942 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100943 cfg->cbar = CBAR_TYPE_S2_TRANS;
944 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000945 ias = smmu->ipa_size;
946 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100947 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000948 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100949 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000950 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100951 ias = min(ias, 40UL);
952 oas = min(oas, 40UL);
953 }
Will Deaconc752ce42014-06-25 22:46:31 +0100954 break;
955 default:
956 ret = -EINVAL;
957 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100958 }
959
960 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
961 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200962 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100963 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100964
Will Deacon44680ee2014-06-25 11:29:12 +0100965 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100966 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100967 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
968 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100969 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100970 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100971 }
972
Will Deacon518f7132014-11-14 17:17:54 +0000973 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100974 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000975 .ias = ias,
976 .oas = oas,
977 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100978 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000979 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100980
Will Deacon518f7132014-11-14 17:17:54 +0000981 smmu_domain->smmu = smmu;
982 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
983 if (!pgtbl_ops) {
984 ret = -ENOMEM;
985 goto out_clear_smmu;
986 }
987
Robin Murphyd5466352016-05-09 17:20:09 +0100988 /* Update the domain's page sizes to reflect the page table format */
989 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000990
991 /* Initialise the context bank with our page table cfg */
992 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
993
994 /*
995 * Request context fault interrupt. Do this last to avoid the
996 * handler seeing a half-initialised domain state.
997 */
Will Deacon44680ee2014-06-25 11:29:12 +0100998 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800999 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1000 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001001 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001003 cfg->irptndx, irq);
1004 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005 }
1006
Will Deacon518f7132014-11-14 17:17:54 +00001007 mutex_unlock(&smmu_domain->init_mutex);
1008
1009 /* Publish page table ops for map/unmap */
1010 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001011 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012
Will Deacon518f7132014-11-14 17:17:54 +00001013out_clear_smmu:
1014 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001015out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001016 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017 return ret;
1018}
1019
1020static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1021{
Joerg Roedel1d672632015-03-26 13:43:10 +01001022 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001023 struct arm_smmu_device *smmu = smmu_domain->smmu;
1024 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001025 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026 int irq;
1027
Robin Murphy98006992016-04-20 14:53:33 +01001028 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029 return;
1030
Will Deacon518f7132014-11-14 17:17:54 +00001031 /*
1032 * Disable the context bank and free the page tables before freeing
1033 * it.
1034 */
Will Deacon44680ee2014-06-25 11:29:12 +01001035 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001036 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001037
Will Deacon44680ee2014-06-25 11:29:12 +01001038 if (cfg->irptndx != INVALID_IRPTNDX) {
1039 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001040 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041 }
1042
Markus Elfring44830b02015-11-06 18:32:41 +01001043 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001044 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001045}
1046
Joerg Roedel1d672632015-03-26 13:43:10 +01001047static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001048{
1049 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001050
Robin Murphy9adb9592016-01-26 18:06:36 +00001051 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001052 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001053 /*
1054 * Allocate the domain and initialise some of its data structures.
1055 * We can't really do anything meaningful until we've added a
1056 * master.
1057 */
1058 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1059 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001060 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001061
Robin Murphy9adb9592016-01-26 18:06:36 +00001062 if (type == IOMMU_DOMAIN_DMA &&
1063 iommu_get_dma_cookie(&smmu_domain->domain)) {
1064 kfree(smmu_domain);
1065 return NULL;
1066 }
1067
Will Deacon518f7132014-11-14 17:17:54 +00001068 mutex_init(&smmu_domain->init_mutex);
1069 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001070
1071 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001072}
1073
Joerg Roedel1d672632015-03-26 13:43:10 +01001074static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001075{
Joerg Roedel1d672632015-03-26 13:43:10 +01001076 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001077
1078 /*
1079 * Free the domain resources. We assume that all devices have
1080 * already been detached.
1081 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001082 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001083 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001084 kfree(smmu_domain);
1085}
1086
1087static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001088 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001089{
1090 int i;
1091 struct arm_smmu_smr *smrs;
1092 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1093
1094 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1095 return 0;
1096
Will Deacona9a1b0b2014-05-01 18:05:08 +01001097 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001098 return -EEXIST;
1099
Mitchel Humpherys29073202014-07-08 09:52:18 -07001100 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001102 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1103 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001104 return -ENOMEM;
1105 }
1106
Will Deacon44680ee2014-06-25 11:29:12 +01001107 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001108 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001109 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1110 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001111 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001112 dev_err(smmu->dev, "failed to allocate free SMR\n");
1113 goto err_free_smrs;
1114 }
1115
1116 smrs[i] = (struct arm_smmu_smr) {
1117 .idx = idx,
1118 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001119 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001120 };
1121 }
1122
1123 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001124 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001125 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1126 smrs[i].mask << SMR_MASK_SHIFT;
1127 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1128 }
1129
Will Deacona9a1b0b2014-05-01 18:05:08 +01001130 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131 return 0;
1132
1133err_free_smrs:
1134 while (--i >= 0)
1135 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1136 kfree(smrs);
1137 return -ENOSPC;
1138}
1139
1140static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001141 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001142{
1143 int i;
1144 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001145 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001146
Will Deacon43b412b2014-07-15 11:22:24 +01001147 if (!smrs)
1148 return;
1149
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001151 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001153
Will Deacon45ae7cf2013-06-24 18:31:25 +01001154 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1155 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1156 }
1157
Will Deacona9a1b0b2014-05-01 18:05:08 +01001158 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159 kfree(smrs);
1160}
1161
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001163 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164{
1165 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001166 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1168
Will Deacon5f634952016-04-20 14:53:32 +01001169 /*
1170 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1171 * for all devices behind the SMMU. Note that we need to take
1172 * care configuring SMRs for devices both a platform_device and
1173 * and a PCI device (i.e. a PCI host controller)
1174 */
1175 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1176 return 0;
1177
Will Deacon8f68f8e2014-07-15 11:27:08 +01001178 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001179 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001181 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001182
Will Deacona9a1b0b2014-05-01 18:05:08 +01001183 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001184 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001185
Will Deacona9a1b0b2014-05-01 18:05:08 +01001186 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001187 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001188 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1190 }
1191
1192 return 0;
1193}
1194
1195static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001196 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197{
Will Deacon43b412b2014-07-15 11:22:24 +01001198 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001199 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001200 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201
Will Deacon8f68f8e2014-07-15 11:27:08 +01001202 /* An IOMMU group is torn down by the first device to be removed */
1203 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1204 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205
1206 /*
1207 * We *must* clear the S2CR first, because freeing the SMR means
1208 * that it can be re-allocated immediately.
1209 */
Will Deacon43b412b2014-07-15 11:22:24 +01001210 for (i = 0; i < cfg->num_streamids; ++i) {
1211 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001212 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001213
Robin Murphy25a1c962016-02-10 14:25:33 +00001214 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001215 }
1216
Will Deacona9a1b0b2014-05-01 18:05:08 +01001217 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001218}
1219
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001220static void arm_smmu_detach_dev(struct device *dev,
1221 struct arm_smmu_master_cfg *cfg)
1222{
1223 struct iommu_domain *domain = dev->archdata.iommu;
1224 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1225
1226 dev->archdata.iommu = NULL;
1227 arm_smmu_domain_remove_master(smmu_domain, cfg);
1228}
1229
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1231{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001232 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001233 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001234 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001235 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236
Will Deacon8f68f8e2014-07-15 11:27:08 +01001237 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001238 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001239 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1240 return -ENXIO;
1241 }
1242
Will Deacon518f7132014-11-14 17:17:54 +00001243 /* Ensure that the domain is finalised */
1244 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001245 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001246 return ret;
1247
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001249 * Sanity check the domain. We don't support domains across
1250 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001251 */
Will Deacon518f7132014-11-14 17:17:54 +00001252 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253 dev_err(dev,
1254 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001255 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1256 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001257 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001258
1259 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001260 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001261 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262 return -ENODEV;
1263
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001264 /* Detach the dev from its current domain */
1265 if (dev->archdata.iommu)
1266 arm_smmu_detach_dev(dev, cfg);
1267
Will Deacon844e35b2014-07-17 11:23:51 +01001268 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1269 if (!ret)
1270 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 return ret;
1272}
1273
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001275 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276{
Will Deacon518f7132014-11-14 17:17:54 +00001277 int ret;
1278 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001279 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001280 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001281
Will Deacon518f7132014-11-14 17:17:54 +00001282 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001283 return -ENODEV;
1284
Will Deacon518f7132014-11-14 17:17:54 +00001285 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1286 ret = ops->map(ops, iova, paddr, size, prot);
1287 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1288 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289}
1290
1291static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1292 size_t size)
1293{
Will Deacon518f7132014-11-14 17:17:54 +00001294 size_t ret;
1295 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001296 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001297 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298
Will Deacon518f7132014-11-14 17:17:54 +00001299 if (!ops)
1300 return 0;
1301
1302 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1303 ret = ops->unmap(ops, iova, size);
1304 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1305 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306}
1307
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001308static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1309 dma_addr_t iova)
1310{
Joerg Roedel1d672632015-03-26 13:43:10 +01001311 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001312 struct arm_smmu_device *smmu = smmu_domain->smmu;
1313 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1314 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1315 struct device *dev = smmu->dev;
1316 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001317 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001318 u32 tmp;
1319 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001320 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001321
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001322 spin_lock_irqsave(&smmu->atos_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001323 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1324
Robin Murphy661d9622015-05-27 17:09:34 +01001325 /* ATS1 registers can only be written atomically */
1326 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001327 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001328 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1329 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001330 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001331
1332 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1333 !(tmp & ATSR_ACTIVE), 5, 50)) {
1334 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001335 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001336 &iova);
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001337 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001338 return ops->iova_to_phys(ops, iova);
1339 }
1340
Robin Murphyf9a05f02016-04-13 18:13:01 +01001341 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001342 if (phys & CB_PAR_F) {
1343 dev_err(dev, "translation fault!\n");
1344 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001345 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001346 return 0;
1347 }
1348
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001349 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001350 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1351}
1352
Will Deacon45ae7cf2013-06-24 18:31:25 +01001353static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001354 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001355{
Will Deacon518f7132014-11-14 17:17:54 +00001356 phys_addr_t ret;
1357 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001358 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001359 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001360
Will Deacon518f7132014-11-14 17:17:54 +00001361 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001362 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363
Will Deacon518f7132014-11-14 17:17:54 +00001364 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001365 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1366 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001367 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001368 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001369 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001370 }
1371
Will Deacon518f7132014-11-14 17:17:54 +00001372 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001373
Will Deacon518f7132014-11-14 17:17:54 +00001374 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375}
1376
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001377static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001378{
Will Deacond0948942014-06-24 17:30:10 +01001379 switch (cap) {
1380 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001381 /*
1382 * Return true here as the SMMU can always send out coherent
1383 * requests.
1384 */
1385 return true;
Will Deacond0948942014-06-24 17:30:10 +01001386 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001387 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001388 case IOMMU_CAP_NOEXEC:
1389 return true;
Will Deacond0948942014-06-24 17:30:10 +01001390 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001391 return false;
Will Deacond0948942014-06-24 17:30:10 +01001392 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001393}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001394
Will Deacona9a1b0b2014-05-01 18:05:08 +01001395static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1396{
1397 *((u16 *)data) = alias;
1398 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399}
1400
Will Deacon8f68f8e2014-07-15 11:27:08 +01001401static void __arm_smmu_release_pci_iommudata(void *data)
1402{
1403 kfree(data);
1404}
1405
Joerg Roedelaf659932015-10-21 23:51:41 +02001406static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1407 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001408{
Will Deacon03edb222015-01-19 14:27:33 +00001409 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001410 u16 sid;
1411 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001412
Will Deacon03edb222015-01-19 14:27:33 +00001413 cfg = iommu_group_get_iommudata(group);
1414 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001415 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001416 if (!cfg)
1417 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001418
Will Deacon03edb222015-01-19 14:27:33 +00001419 iommu_group_set_iommudata(group, cfg,
1420 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001421 }
1422
Joerg Roedelaf659932015-10-21 23:51:41 +02001423 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1424 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001425
Will Deacon03edb222015-01-19 14:27:33 +00001426 /*
1427 * Assume Stream ID == Requester ID for now.
1428 * We need a way to describe the ID mappings in FDT.
1429 */
1430 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1431 for (i = 0; i < cfg->num_streamids; ++i)
1432 if (cfg->streamids[i] == sid)
1433 break;
1434
1435 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1436 if (i == cfg->num_streamids)
1437 cfg->streamids[cfg->num_streamids++] = sid;
1438
1439 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440}
1441
Joerg Roedelaf659932015-10-21 23:51:41 +02001442static int arm_smmu_init_platform_device(struct device *dev,
1443 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001444{
Will Deacon03edb222015-01-19 14:27:33 +00001445 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001446 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001447
1448 if (!smmu)
1449 return -ENODEV;
1450
1451 master = find_smmu_master(smmu, dev->of_node);
1452 if (!master)
1453 return -ENODEV;
1454
Will Deacon03edb222015-01-19 14:27:33 +00001455 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001456
1457 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001458}
1459
1460static int arm_smmu_add_device(struct device *dev)
1461{
Joerg Roedelaf659932015-10-21 23:51:41 +02001462 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001463
Joerg Roedelaf659932015-10-21 23:51:41 +02001464 group = iommu_group_get_for_dev(dev);
1465 if (IS_ERR(group))
1466 return PTR_ERR(group);
1467
Peng Fan9a4a9d82015-11-20 16:56:18 +08001468 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001469 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001470}
1471
Will Deacon45ae7cf2013-06-24 18:31:25 +01001472static void arm_smmu_remove_device(struct device *dev)
1473{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001474 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001475}
1476
Joerg Roedelaf659932015-10-21 23:51:41 +02001477static struct iommu_group *arm_smmu_device_group(struct device *dev)
1478{
1479 struct iommu_group *group;
1480 int ret;
1481
1482 if (dev_is_pci(dev))
1483 group = pci_device_group(dev);
1484 else
1485 group = generic_device_group(dev);
1486
1487 if (IS_ERR(group))
1488 return group;
1489
1490 if (dev_is_pci(dev))
1491 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1492 else
1493 ret = arm_smmu_init_platform_device(dev, group);
1494
1495 if (ret) {
1496 iommu_group_put(group);
1497 group = ERR_PTR(ret);
1498 }
1499
1500 return group;
1501}
1502
Will Deaconc752ce42014-06-25 22:46:31 +01001503static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1504 enum iommu_attr attr, void *data)
1505{
Joerg Roedel1d672632015-03-26 13:43:10 +01001506 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001507
1508 switch (attr) {
1509 case DOMAIN_ATTR_NESTING:
1510 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1511 return 0;
1512 default:
1513 return -ENODEV;
1514 }
1515}
1516
1517static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1518 enum iommu_attr attr, void *data)
1519{
Will Deacon518f7132014-11-14 17:17:54 +00001520 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001521 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001522
Will Deacon518f7132014-11-14 17:17:54 +00001523 mutex_lock(&smmu_domain->init_mutex);
1524
Will Deaconc752ce42014-06-25 22:46:31 +01001525 switch (attr) {
1526 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001527 if (smmu_domain->smmu) {
1528 ret = -EPERM;
1529 goto out_unlock;
1530 }
1531
Will Deaconc752ce42014-06-25 22:46:31 +01001532 if (*(int *)data)
1533 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1534 else
1535 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1536
Will Deacon518f7132014-11-14 17:17:54 +00001537 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001538 default:
Will Deacon518f7132014-11-14 17:17:54 +00001539 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001540 }
Will Deacon518f7132014-11-14 17:17:54 +00001541
1542out_unlock:
1543 mutex_unlock(&smmu_domain->init_mutex);
1544 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001545}
1546
Will Deacon518f7132014-11-14 17:17:54 +00001547static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001548 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001549 .domain_alloc = arm_smmu_domain_alloc,
1550 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001551 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001552 .map = arm_smmu_map,
1553 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001554 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001555 .iova_to_phys = arm_smmu_iova_to_phys,
1556 .add_device = arm_smmu_add_device,
1557 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001558 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001559 .domain_get_attr = arm_smmu_domain_get_attr,
1560 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001561 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001562};
1563
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001564static int arm_smmu_halt(struct arm_smmu_device *smmu)
1565{
1566 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001567 u32 reg, tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001568
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001569 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1570 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1571 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001572
1573 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1574 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1575 0, 30000)) {
1576 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1577 return -EBUSY;
1578 }
1579
1580 return 0;
1581}
1582
1583static void arm_smmu_resume(struct arm_smmu_device *smmu)
1584{
1585 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1586 u32 reg;
1587
1588 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1589 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1590 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1591}
1592
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001593static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1594{
1595 int i;
1596 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1597
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001598 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001599 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1600 writel_relaxed(regs[i].value,
1601 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001602 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001603}
1604
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1606{
1607 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001608 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001609 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001610 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001611
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001612 /* clear global FSR */
1613 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1614 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615
Robin Murphy25a1c962016-02-10 14:25:33 +00001616 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1617 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001619 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001620 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001621 }
1622
Peng Fan3ca37122016-05-03 21:50:30 +08001623 /*
1624 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1625 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1626 * bit is only present in MMU-500r2 onwards.
1627 */
1628 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1629 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1630 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1631 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1632 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1633 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1634 }
1635
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001636 /* Make sure all context banks are disabled and clear CB_FSR */
1637 for (i = 0; i < smmu->num_context_banks; ++i) {
1638 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1639 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1640 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001641 /*
1642 * Disable MMU-500's not-particularly-beneficial next-page
1643 * prefetcher for the sake of errata #841119 and #826419.
1644 */
1645 if (smmu->model == ARM_MMU500) {
1646 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1647 reg &= ~ARM_MMU500_ACTLR_CPRE;
1648 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1649 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001650
1651 if (smmu->model == QCOM_SMMUV2) {
1652 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1653 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1654 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1655 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1656 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001657 }
Will Deacon1463fe42013-07-31 19:21:27 +01001658
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001659 /* Program implementation defined registers */
1660 arm_smmu_impl_def_programming(smmu);
1661
Will Deacon45ae7cf2013-06-24 18:31:25 +01001662 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001663 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1664 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1665
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001666 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001667
Will Deacon45ae7cf2013-06-24 18:31:25 +01001668 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001669 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670
1671 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001672 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673
Robin Murphy25a1c962016-02-10 14:25:33 +00001674 /* Enable client access, handling unmatched streams as appropriate */
1675 reg &= ~sCR0_CLIENTPD;
1676 if (disable_bypass)
1677 reg |= sCR0_USFCFG;
1678 else
1679 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680
1681 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001682 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001683
1684 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001685 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001687 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1688 reg |= sCR0_VMID16EN;
1689
Will Deacon45ae7cf2013-06-24 18:31:25 +01001690 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001691 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001692 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001693}
1694
1695static int arm_smmu_id_size_to_bits(int size)
1696{
1697 switch (size) {
1698 case 0:
1699 return 32;
1700 case 1:
1701 return 36;
1702 case 2:
1703 return 40;
1704 case 3:
1705 return 42;
1706 case 4:
1707 return 44;
1708 case 5:
1709 default:
1710 return 48;
1711 }
1712}
1713
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001714static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1715{
1716 struct device *dev = smmu->dev;
1717 int i, ntuples, ret;
1718 u32 *tuples;
1719 struct arm_smmu_impl_def_reg *regs, *regit;
1720
1721 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1722 return 0;
1723
1724 ntuples /= sizeof(u32);
1725 if (ntuples % 2) {
1726 dev_err(dev,
1727 "Invalid number of attach-impl-defs registers: %d\n",
1728 ntuples);
1729 return -EINVAL;
1730 }
1731
1732 regs = devm_kmalloc(
1733 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1734 GFP_KERNEL);
1735 if (!regs)
1736 return -ENOMEM;
1737
1738 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1739 if (!tuples)
1740 return -ENOMEM;
1741
1742 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1743 tuples, ntuples);
1744 if (ret)
1745 return ret;
1746
1747 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1748 regit->offset = tuples[i];
1749 regit->value = tuples[i + 1];
1750 }
1751
1752 devm_kfree(dev, tuples);
1753
1754 smmu->impl_def_attach_registers = regs;
1755 smmu->num_impl_def_attach_registers = ntuples / 2;
1756
1757 return 0;
1758}
1759
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1761{
1762 unsigned long size;
1763 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1764 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001765 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766
1767 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001768 dev_notice(smmu->dev, "SMMUv%d with:\n",
1769 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770
1771 /* ID0 */
1772 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001773
1774 /* Restrict available stages based on module parameter */
1775 if (force_stage == 1)
1776 id &= ~(ID0_S2TS | ID0_NTS);
1777 else if (force_stage == 2)
1778 id &= ~(ID0_S1TS | ID0_NTS);
1779
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780 if (id & ID0_S1TS) {
1781 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1782 dev_notice(smmu->dev, "\tstage 1 translation\n");
1783 }
1784
1785 if (id & ID0_S2TS) {
1786 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1787 dev_notice(smmu->dev, "\tstage 2 translation\n");
1788 }
1789
1790 if (id & ID0_NTS) {
1791 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1792 dev_notice(smmu->dev, "\tnested translation\n");
1793 }
1794
1795 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001796 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001797 dev_err(smmu->dev, "\tno translation support!\n");
1798 return -ENODEV;
1799 }
1800
Robin Murphyb7862e32016-04-13 18:13:03 +01001801 if ((id & ID0_S1TS) &&
1802 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001803 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1804 dev_notice(smmu->dev, "\taddress translation ops\n");
1805 }
1806
Robin Murphybae2c2d2015-07-29 19:46:05 +01001807 /*
1808 * In order for DMA API calls to work properly, we must defer to what
1809 * the DT says about coherency, regardless of what the hardware claims.
1810 * Fortunately, this also opens up a workaround for systems where the
1811 * ID register value has ended up configured incorrectly.
1812 */
1813 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1814 cttw_reg = !!(id & ID0_CTTW);
1815 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001816 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001817 if (cttw_dt || cttw_reg)
1818 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1819 cttw_dt ? "" : "non-");
1820 if (cttw_dt != cttw_reg)
1821 dev_notice(smmu->dev,
1822 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823
1824 if (id & ID0_SMS) {
1825 u32 smr, sid, mask;
1826
1827 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1828 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1829 ID0_NUMSMRG_MASK;
1830 if (smmu->num_mapping_groups == 0) {
1831 dev_err(smmu->dev,
1832 "stream-matching supported, but no SMRs present!\n");
1833 return -ENODEV;
1834 }
1835
1836 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1837 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1838 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1839 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1840
1841 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1842 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1843 if ((mask & sid) != sid) {
1844 dev_err(smmu->dev,
1845 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1846 mask, sid);
1847 return -ENODEV;
1848 }
1849
1850 dev_notice(smmu->dev,
1851 "\tstream matching with %u register groups, mask 0x%x",
1852 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001853 } else {
1854 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1855 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856 }
1857
Robin Murphy7602b872016-04-28 17:12:09 +01001858 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1859 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1860 if (!(id & ID0_PTFS_NO_AARCH32S))
1861 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1862 }
1863
Will Deacon45ae7cf2013-06-24 18:31:25 +01001864 /* ID1 */
1865 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001866 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001867
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001868 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001869 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001870 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001871 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001872 dev_warn(smmu->dev,
1873 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1874 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875
Will Deacon518f7132014-11-14 17:17:54 +00001876 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001877 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1878 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1879 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1880 return -ENODEV;
1881 }
1882 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1883 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001884 /*
1885 * Cavium CN88xx erratum #27704.
1886 * Ensure ASID and VMID allocation is unique across all SMMUs in
1887 * the system.
1888 */
1889 if (smmu->model == CAVIUM_SMMUV2) {
1890 smmu->cavium_id_base =
1891 atomic_add_return(smmu->num_context_banks,
1892 &cavium_smmu_context_count);
1893 smmu->cavium_id_base -= smmu->num_context_banks;
1894 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895
1896 /* ID2 */
1897 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1898 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001899 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900
Will Deacon518f7132014-11-14 17:17:54 +00001901 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001903 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001905 if (id & ID2_VMID16)
1906 smmu->features |= ARM_SMMU_FEAT_VMID16;
1907
Robin Murphyf1d84542015-03-04 16:41:05 +00001908 /*
1909 * What the page table walker can address actually depends on which
1910 * descriptor format is in use, but since a) we don't know that yet,
1911 * and b) it can vary per context bank, this will have to do...
1912 */
1913 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1914 dev_warn(smmu->dev,
1915 "failed to set DMA mask for table walker\n");
1916
Robin Murphyb7862e32016-04-13 18:13:03 +01001917 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001918 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001919 if (smmu->version == ARM_SMMU_V1_64K)
1920 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001921 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001922 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001923 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001924 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001925 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001926 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001927 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001928 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001929 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001930 }
1931
Robin Murphy7602b872016-04-28 17:12:09 +01001932 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001933 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001934 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001935 if (smmu->features &
1936 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001937 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001938 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001939 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001940 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001941 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001942
Robin Murphyd5466352016-05-09 17:20:09 +01001943 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1944 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1945 else
1946 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1947 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1948 smmu->pgsize_bitmap);
1949
Will Deacon518f7132014-11-14 17:17:54 +00001950
Will Deacon28d60072014-09-01 16:24:48 +01001951 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1952 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001953 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001954
1955 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1956 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001957 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001958
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959 return 0;
1960}
1961
Robin Murphy67b65a32016-04-13 18:12:57 +01001962struct arm_smmu_match_data {
1963 enum arm_smmu_arch_version version;
1964 enum arm_smmu_implementation model;
1965};
1966
1967#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1968static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1969
1970ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1971ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001972ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001973ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001974ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001975ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001976
Joerg Roedel09b52692014-10-02 12:24:45 +02001977static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001978 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1979 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1980 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001981 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001982 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001983 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001984 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001985 { },
1986};
1987MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1988
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1990{
Robin Murphy09360402014-08-28 17:51:59 +01001991 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001992 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 struct resource *res;
1994 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001995 struct device *dev = &pdev->dev;
1996 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001997 struct of_phandle_iterator it;
1998 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001999 int num_irqs, i, err;
2000
2001 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2002 if (!smmu) {
2003 dev_err(dev, "failed to allocate arm_smmu_device\n");
2004 return -ENOMEM;
2005 }
2006 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002007 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002008
Robin Murphy09360402014-08-28 17:51:59 +01002009 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002010 data = of_id->data;
2011 smmu->version = data->version;
2012 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002013
Will Deacon45ae7cf2013-06-24 18:31:25 +01002014 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002015 smmu->base = devm_ioremap_resource(dev, res);
2016 if (IS_ERR(smmu->base))
2017 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002019
2020 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2021 &smmu->num_global_irqs)) {
2022 dev_err(dev, "missing #global-interrupts property\n");
2023 return -ENODEV;
2024 }
2025
2026 num_irqs = 0;
2027 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2028 num_irqs++;
2029 if (num_irqs > smmu->num_global_irqs)
2030 smmu->num_context_irqs++;
2031 }
2032
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002033 if (!smmu->num_context_irqs) {
2034 dev_err(dev, "found %d interrupts but expected at least %d\n",
2035 num_irqs, smmu->num_global_irqs + 1);
2036 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002037 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002038
2039 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2040 GFP_KERNEL);
2041 if (!smmu->irqs) {
2042 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2043 return -ENOMEM;
2044 }
2045
2046 for (i = 0; i < num_irqs; ++i) {
2047 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002048
Will Deacon45ae7cf2013-06-24 18:31:25 +01002049 if (irq < 0) {
2050 dev_err(dev, "failed to get irq index %d\n", i);
2051 return -ENODEV;
2052 }
2053 smmu->irqs[i] = irq;
2054 }
2055
Olav Haugan3c8766d2014-08-22 17:12:32 -07002056 err = arm_smmu_device_cfg_probe(smmu);
2057 if (err)
2058 return err;
2059
Will Deacon45ae7cf2013-06-24 18:31:25 +01002060 i = 0;
2061 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002062
2063 err = -ENOMEM;
2064 /* No need to zero the memory for masterspec */
2065 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2066 if (!masterspec)
2067 goto out_put_masters;
2068
2069 of_for_each_phandle(&it, err, dev->of_node,
2070 "mmu-masters", "#stream-id-cells", 0) {
2071 int count = of_phandle_iterator_args(&it, masterspec->args,
2072 MAX_MASTER_STREAMIDS);
2073 masterspec->np = of_node_get(it.node);
2074 masterspec->args_count = count;
2075
2076 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002077 if (err) {
2078 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002079 masterspec->np->name);
2080 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081 goto out_put_masters;
2082 }
2083
2084 i++;
2085 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002086
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087 dev_notice(dev, "registered %d master devices\n", i);
2088
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002089 kfree(masterspec);
2090
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002091 err = arm_smmu_parse_impl_def_registers(smmu);
2092 if (err)
2093 goto out_put_masters;
2094
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002095 parse_driver_options(smmu);
2096
Robin Murphyb7862e32016-04-13 18:13:03 +01002097 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002098 smmu->num_context_banks != smmu->num_context_irqs) {
2099 dev_err(dev,
2100 "found only %d context interrupt(s) but %d required\n",
2101 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002102 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002103 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002104 }
2105
Will Deacon45ae7cf2013-06-24 18:31:25 +01002106 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002107 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2108 arm_smmu_global_fault,
2109 IRQF_SHARED,
2110 "arm-smmu global fault",
2111 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 if (err) {
2113 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2114 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002115 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002116 }
2117 }
2118
2119 INIT_LIST_HEAD(&smmu->list);
2120 spin_lock(&arm_smmu_devices_lock);
2121 list_add(&smmu->list, &arm_smmu_devices);
2122 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002123
2124 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002125 return 0;
2126
Will Deacon45ae7cf2013-06-24 18:31:25 +01002127out_put_masters:
2128 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002129 struct arm_smmu_master *master
2130 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002131 of_node_put(master->of_node);
2132 }
2133
2134 return err;
2135}
2136
2137static int arm_smmu_device_remove(struct platform_device *pdev)
2138{
2139 int i;
2140 struct device *dev = &pdev->dev;
2141 struct arm_smmu_device *curr, *smmu = NULL;
2142 struct rb_node *node;
2143
2144 spin_lock(&arm_smmu_devices_lock);
2145 list_for_each_entry(curr, &arm_smmu_devices, list) {
2146 if (curr->dev == dev) {
2147 smmu = curr;
2148 list_del(&smmu->list);
2149 break;
2150 }
2151 }
2152 spin_unlock(&arm_smmu_devices_lock);
2153
2154 if (!smmu)
2155 return -ENODEV;
2156
Will Deacon45ae7cf2013-06-24 18:31:25 +01002157 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002158 struct arm_smmu_master *master
2159 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002160 of_node_put(master->of_node);
2161 }
2162
Will Deaconecfadb62013-07-31 19:21:28 +01002163 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002164 dev_err(dev, "removing device with active domains!\n");
2165
2166 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002167 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002168
2169 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002170 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171 return 0;
2172}
2173
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174static struct platform_driver arm_smmu_driver = {
2175 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176 .name = "arm-smmu",
2177 .of_match_table = of_match_ptr(arm_smmu_of_match),
2178 },
2179 .probe = arm_smmu_device_dt_probe,
2180 .remove = arm_smmu_device_remove,
2181};
2182
2183static int __init arm_smmu_init(void)
2184{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002185 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002186 int ret;
2187
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002188 /*
2189 * Play nice with systems that don't have an ARM SMMU by checking that
2190 * an ARM SMMU exists in the system before proceeding with the driver
2191 * and IOMMU bus operation registration.
2192 */
2193 np = of_find_matching_node(NULL, arm_smmu_of_match);
2194 if (!np)
2195 return 0;
2196
2197 of_node_put(np);
2198
Will Deacon45ae7cf2013-06-24 18:31:25 +01002199 ret = platform_driver_register(&arm_smmu_driver);
2200 if (ret)
2201 return ret;
2202
2203 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002204 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2206
Will Deacond123cf82014-02-04 22:17:53 +00002207#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002208 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002210#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211
Will Deacona9a1b0b2014-05-01 18:05:08 +01002212#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002213 if (!iommu_present(&pci_bus_type)) {
2214 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002215 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002216 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002217#endif
2218
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219 return 0;
2220}
2221
2222static void __exit arm_smmu_exit(void)
2223{
2224 return platform_driver_unregister(&arm_smmu_driver);
2225}
2226
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002227subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002228module_exit(arm_smmu_exit);
2229
2230MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2231MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2232MODULE_LICENSE("GPL v2");