blob: 29666e956aaf12fff1b6ce4edd286f8f8a1c9337 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
223#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000224#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100225#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000229#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100230#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVAL 0x620
232#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
233#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100234#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000235#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236
237#define SCTLR_S1_ASIDPNE (1 << 12)
238#define SCTLR_CFCFG (1 << 7)
239#define SCTLR_CFIE (1 << 6)
240#define SCTLR_CFRE (1 << 5)
241#define SCTLR_E (1 << 4)
242#define SCTLR_AFE (1 << 2)
243#define SCTLR_TRE (1 << 1)
244#define SCTLR_M (1 << 0)
245#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700251/* Definitions for implementation-defined registers */
252#define ACTLR_QCOM_OSH_SHIFT 28
253#define ACTLR_QCOM_OSH 1
254
255#define ACTLR_QCOM_ISH_SHIFT 29
256#define ACTLR_QCOM_ISH 1
257
258#define ACTLR_QCOM_NSH_SHIFT 30
259#define ACTLR_QCOM_NSH 1
260
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700261#define ARM_SMMU_IMPL_DEF0(smmu) \
262 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
263#define ARM_SMMU_IMPL_DEF1(smmu) \
264 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
265#define IMPL_DEF1_MICRO_MMU_CTRL 0
266#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
267#define MICRO_MMU_CTRL_IDLE (1 << 3)
268
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000303static bool disable_bypass;
304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100319};
320
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700321struct arm_smmu_impl_def_reg {
322 u32 offset;
323 u32 value;
324};
325
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326struct arm_smmu_smr {
327 u8 idx;
328 u16 mask;
329 u16 id;
330};
331
Will Deacona9a1b0b2014-05-01 18:05:08 +0100332struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333 int num_streamids;
334 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 struct arm_smmu_smr *smrs;
336};
337
Will Deacona9a1b0b2014-05-01 18:05:08 +0100338struct arm_smmu_master {
339 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340 struct rb_node node;
341 struct arm_smmu_master_cfg cfg;
342};
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_device {
345 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346
347 void __iomem *base;
348 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100349 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350
351#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
352#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
353#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
354#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
355#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000356#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800357#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100358#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
359#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
360#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
361#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
362#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000364
365#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800366#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000367 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100368 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100369 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370
371 u32 num_context_banks;
372 u32 num_s2_context_banks;
373 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
374 atomic_t irptndx;
375
376 u32 num_mapping_groups;
377 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
378
Will Deacon518f7132014-11-14 17:17:54 +0000379 unsigned long va_size;
380 unsigned long ipa_size;
381 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100382 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383
384 u32 num_global_irqs;
385 u32 num_context_irqs;
386 unsigned int *irqs;
387
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388 struct list_head list;
389 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800390
391 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700392 /* Specific to QCOM */
393 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
394 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800395
396 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
398
Robin Murphy7602b872016-04-28 17:12:09 +0100399enum arm_smmu_context_fmt {
400 ARM_SMMU_CTX_FMT_NONE,
401 ARM_SMMU_CTX_FMT_AARCH64,
402 ARM_SMMU_CTX_FMT_AARCH32_L,
403 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404};
405
406struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407 u8 cbndx;
408 u8 irptndx;
409 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100410 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100412#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800414#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
415#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100416
Will Deaconc752ce42014-06-25 22:46:31 +0100417enum arm_smmu_domain_stage {
418 ARM_SMMU_DOMAIN_S1 = 0,
419 ARM_SMMU_DOMAIN_S2,
420 ARM_SMMU_DOMAIN_NESTED,
421};
422
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100424 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000425 struct io_pgtable_ops *pgtbl_ops;
426 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100427 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100428 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000429 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100430 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431};
432
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200433struct arm_smmu_phandle_args {
434 struct device_node *np;
435 int args_count;
436 uint32_t args[MAX_MASTER_STREAMIDS];
437};
438
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439static DEFINE_SPINLOCK(arm_smmu_devices_lock);
440static LIST_HEAD(arm_smmu_devices);
441
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000442struct arm_smmu_option_prop {
443 u32 opt;
444 const char *prop;
445};
446
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800447static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
448
Mitchel Humpherys29073202014-07-08 09:52:18 -0700449static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000450 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800451 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000452 { 0, NULL},
453};
454
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800455static int arm_smmu_halt(struct arm_smmu_device *smmu);
456static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800457static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
458 dma_addr_t iova);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800459
Joerg Roedel1d672632015-03-26 13:43:10 +0100460static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
461{
462 return container_of(dom, struct arm_smmu_domain, domain);
463}
464
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000465static void parse_driver_options(struct arm_smmu_device *smmu)
466{
467 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700468
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000469 do {
470 if (of_property_read_bool(smmu->dev->of_node,
471 arm_smmu_options[i].prop)) {
472 smmu->options |= arm_smmu_options[i].opt;
473 dev_notice(smmu->dev, "option %s\n",
474 arm_smmu_options[i].prop);
475 }
476 } while (arm_smmu_options[++i].opt);
477}
478
Will Deacon8f68f8e2014-07-15 11:27:08 +0100479static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100480{
481 if (dev_is_pci(dev)) {
482 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700483
Will Deacona9a1b0b2014-05-01 18:05:08 +0100484 while (!pci_is_root_bus(bus))
485 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100486 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100487 }
488
Will Deacon8f68f8e2014-07-15 11:27:08 +0100489 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100490}
491
Will Deacon45ae7cf2013-06-24 18:31:25 +0100492static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
493 struct device_node *dev_node)
494{
495 struct rb_node *node = smmu->masters.rb_node;
496
497 while (node) {
498 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700499
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500 master = container_of(node, struct arm_smmu_master, node);
501
502 if (dev_node < master->of_node)
503 node = node->rb_left;
504 else if (dev_node > master->of_node)
505 node = node->rb_right;
506 else
507 return master;
508 }
509
510 return NULL;
511}
512
Will Deacona9a1b0b2014-05-01 18:05:08 +0100513static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100514find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100515{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100516 struct arm_smmu_master_cfg *cfg = NULL;
517 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100518
Will Deacon8f68f8e2014-07-15 11:27:08 +0100519 if (group) {
520 cfg = iommu_group_get_iommudata(group);
521 iommu_group_put(group);
522 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100523
Will Deacon8f68f8e2014-07-15 11:27:08 +0100524 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100525}
526
Will Deacon45ae7cf2013-06-24 18:31:25 +0100527static int insert_smmu_master(struct arm_smmu_device *smmu,
528 struct arm_smmu_master *master)
529{
530 struct rb_node **new, *parent;
531
532 new = &smmu->masters.rb_node;
533 parent = NULL;
534 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700535 struct arm_smmu_master *this
536 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100537
538 parent = *new;
539 if (master->of_node < this->of_node)
540 new = &((*new)->rb_left);
541 else if (master->of_node > this->of_node)
542 new = &((*new)->rb_right);
543 else
544 return -EEXIST;
545 }
546
547 rb_link_node(&master->node, parent, new);
548 rb_insert_color(&master->node, &smmu->masters);
549 return 0;
550}
551
552static int register_smmu_master(struct arm_smmu_device *smmu,
553 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200554 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100555{
556 int i;
557 struct arm_smmu_master *master;
558
559 master = find_smmu_master(smmu, masterspec->np);
560 if (master) {
561 dev_err(dev,
562 "rejecting multiple registrations for master device %s\n",
563 masterspec->np->name);
564 return -EBUSY;
565 }
566
567 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
568 dev_err(dev,
569 "reached maximum number (%d) of stream IDs for master device %s\n",
570 MAX_MASTER_STREAMIDS, masterspec->np->name);
571 return -ENOSPC;
572 }
573
574 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
575 if (!master)
576 return -ENOMEM;
577
Will Deacona9a1b0b2014-05-01 18:05:08 +0100578 master->of_node = masterspec->np;
579 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100580
Olav Haugan3c8766d2014-08-22 17:12:32 -0700581 for (i = 0; i < master->cfg.num_streamids; ++i) {
582 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100583
Olav Haugan3c8766d2014-08-22 17:12:32 -0700584 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
585 (streamid >= smmu->num_mapping_groups)) {
586 dev_err(dev,
587 "stream ID for master device %s greater than maximum allowed (%d)\n",
588 masterspec->np->name, smmu->num_mapping_groups);
589 return -ERANGE;
590 }
591 master->cfg.streamids[i] = streamid;
592 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100593 return insert_smmu_master(smmu, master);
594}
595
Will Deacon44680ee2014-06-25 11:29:12 +0100596static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100597{
Will Deacon44680ee2014-06-25 11:29:12 +0100598 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100599 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100600 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100601
602 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100603 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100604 master = find_smmu_master(smmu, dev_node);
605 if (master)
606 break;
607 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100608 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100609
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100611}
612
613static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
614{
615 int idx;
616
617 do {
618 idx = find_next_zero_bit(map, end, start);
619 if (idx == end)
620 return -ENOSPC;
621 } while (test_and_set_bit(idx, map));
622
623 return idx;
624}
625
626static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
627{
628 clear_bit(idx, map);
629}
630
631/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000632static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100633{
634 int count = 0;
635 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
636
637 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
638 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
639 & sTLBGSTATUS_GSACTIVE) {
640 cpu_relax();
641 if (++count == TLB_LOOP_TIMEOUT) {
642 dev_err_ratelimited(smmu->dev,
643 "TLB sync timed out -- SMMU may be deadlocked\n");
644 return;
645 }
646 udelay(1);
647 }
648}
649
Will Deacon518f7132014-11-14 17:17:54 +0000650static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100651{
Will Deacon518f7132014-11-14 17:17:54 +0000652 struct arm_smmu_domain *smmu_domain = cookie;
653 __arm_smmu_tlb_sync(smmu_domain->smmu);
654}
655
656static void arm_smmu_tlb_inv_context(void *cookie)
657{
658 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100659 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
660 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100661 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000662 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100663
664 if (stage1) {
665 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800666 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100667 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100668 } else {
669 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800670 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100671 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100672 }
673
Will Deacon518f7132014-11-14 17:17:54 +0000674 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100675}
676
Will Deacon518f7132014-11-14 17:17:54 +0000677static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000678 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000679{
680 struct arm_smmu_domain *smmu_domain = cookie;
681 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
682 struct arm_smmu_device *smmu = smmu_domain->smmu;
683 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
684 void __iomem *reg;
685
686 if (stage1) {
687 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
688 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
689
Robin Murphy7602b872016-04-28 17:12:09 +0100690 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000691 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800692 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000693 do {
694 writel_relaxed(iova, reg);
695 iova += granule;
696 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000697 } else {
698 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800699 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000700 do {
701 writeq_relaxed(iova, reg);
702 iova += granule >> 12;
703 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000704 }
Will Deacon518f7132014-11-14 17:17:54 +0000705 } else if (smmu->version == ARM_SMMU_V2) {
706 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
707 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
708 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000709 iova >>= 12;
710 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100711 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000712 iova += granule >> 12;
713 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000714 } else {
715 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800716 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000717 }
718}
719
Will Deacon518f7132014-11-14 17:17:54 +0000720static struct iommu_gather_ops arm_smmu_gather_ops = {
721 .tlb_flush_all = arm_smmu_tlb_inv_context,
722 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
723 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000724};
725
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
727{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600728 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -0700729 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730 unsigned long iova;
731 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100732 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100733 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
734 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -0800736 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800737 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800738 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -0800739 u32 frsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740
Shalaj Jain04059c52015-03-03 13:34:59 -0800741 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100742 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
744
745 if (!(fsr & FSR_FAULT))
746 return IRQ_NONE;
747
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800748 if (fatal_asf && (fsr & FSR_ASF)) {
749 dev_err(smmu->dev,
750 "Took an address size fault. Refusing to recover.\n");
751 BUG();
752 }
753
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -0700755 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600756 if (fsr & FSR_TF)
757 flags |= IOMMU_FAULT_TRANSLATION;
758 if (fsr & FSR_PF)
759 flags |= IOMMU_FAULT_PERMISSION;
760 if (fsr & FSR_SS)
761 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -0700762
Robin Murphyf9a05f02016-04-13 18:13:01 +0100763 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800764 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -0800765 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
766 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600767 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
768 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800769 dev_dbg(smmu->dev,
770 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
771 iova, fsr, fsynr, cfg->cbndx);
772 dev_dbg(smmu->dev,
773 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -0700774 ret = IRQ_HANDLED;
775 resume = RESUME_RETRY;
776 } else {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800777 dev_err(smmu->dev,
778 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
779 iova, fsr, fsynr, cfg->cbndx);
780 dev_err(smmu->dev, "FAR = %016lx\n", (unsigned long)iova);
781 dev_err(smmu->dev, "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
782 (fsr & 0x02) ? "TF " : "",
783 (fsr & 0x04) ? "AFF " : "",
784 (fsr & 0x08) ? "PF " : "",
785 (fsr & 0x10) ? "EF " : "",
786 (fsr & 0x20) ? "TLBMCF " : "",
787 (fsr & 0x40) ? "TLBLKF " : "",
788 (fsr & 0x80) ? "MHF " : "",
789 (fsr & 0x40000000) ? "SS " : "",
790 (fsr & 0x80000000) ? "MULTI " : "");
791 dev_err(smmu->dev,
792 "soft iova-to-phys=%pa\n", &phys_soft);
Shalaj Jain04059c52015-03-03 13:34:59 -0800793 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
Patrick Daly5ba28112016-08-30 19:18:52 -0700794 ret = IRQ_NONE;
795 resume = RESUME_TERMINATE;
796 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600798 /*
799 * If the client returns -EBUSY, do not clear FSR and do not RESUME
800 * if stalled. This is required to keep the IOMMU client stalled on
801 * the outstanding fault. This gives the client a chance to take any
802 * debug action and then terminate the stalled transaction.
803 * So, the sequence in case of stall on fault should be:
804 * 1) Do not clear FSR or write to RESUME here
805 * 2) Client takes any debug action
806 * 3) Client terminates the stalled transaction and resumes the IOMMU
807 * 4) Client clears FSR. The FSR should only be cleared after 3) and
808 * not before so that the fault remains outstanding. This ensures
809 * SCTLR.HUPCF has the desired effect if subsequent transactions also
810 * need to be terminated.
811 */
812 if (tmp != -EBUSY) {
813 /* Clear the faulting FSR */
814 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700815
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600816 /*
817 * Barrier required to ensure that the FSR is cleared
818 * before resuming SMMU operation
819 */
820 wmb();
821
822 /* Retry or terminate any stalled transactions */
823 if (fsr & FSR_SS)
824 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
825 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700826
827 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100828}
829
830static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
831{
832 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
833 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000834 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100835
836 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
837 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
838 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
839 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
840
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000841 if (!gfsr)
842 return IRQ_NONE;
843
Will Deacon45ae7cf2013-06-24 18:31:25 +0100844 dev_err_ratelimited(smmu->dev,
845 "Unexpected global fault, this could be serious\n");
846 dev_err_ratelimited(smmu->dev,
847 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
848 gfsr, gfsynr0, gfsynr1, gfsynr2);
849
850 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100851 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100852}
853
Will Deacon518f7132014-11-14 17:17:54 +0000854static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
855 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856{
857 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100858 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100859 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100860 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
861 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100862 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100863
Will Deacon45ae7cf2013-06-24 18:31:25 +0100864 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100865 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
866 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100867
Will Deacon4a1c93c2015-03-04 12:21:03 +0000868 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100869 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
870 reg = CBA2R_RW64_64BIT;
871 else
872 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800873 /* 16-bit VMIDs live in CBA2R */
874 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800875 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800876
Will Deacon4a1c93c2015-03-04 12:21:03 +0000877 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
878 }
879
Will Deacon45ae7cf2013-06-24 18:31:25 +0100880 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100881 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100882 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700883 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100884
Will Deacon57ca90f2014-02-06 14:59:05 +0000885 /*
886 * Use the weakest shareability/memory types, so they are
887 * overridden by the ttbcr/pte.
888 */
889 if (stage1) {
890 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
891 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800892 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
893 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800894 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000895 }
Will Deacon44680ee2014-06-25 11:29:12 +0100896 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100897
Will Deacon518f7132014-11-14 17:17:54 +0000898 /* TTBRs */
899 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100900 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100901
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800902 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100903 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100904
905 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800906 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100907 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000908 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100909 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100910 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000911 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100912
Will Deacon518f7132014-11-14 17:17:54 +0000913 /* TTBCR */
914 if (stage1) {
915 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
916 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
917 if (smmu->version > ARM_SMMU_V1) {
918 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100919 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000920 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100921 }
922 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000923 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
924 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100925 }
926
Will Deacon518f7132014-11-14 17:17:54 +0000927 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000929 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000931 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
932 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100933 }
934
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935 /* SCTLR */
Patrick Daly5ba28112016-08-30 19:18:52 -0700936 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100937 if (stage1)
938 reg |= SCTLR_S1_ASIDPNE;
939#ifdef __BIG_ENDIAN
940 reg |= SCTLR_E;
941#endif
Will Deacon25724842013-08-21 13:49:53 +0100942 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100943}
944
945static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100946 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100947{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100948 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000949 unsigned long ias, oas;
950 struct io_pgtable_ops *pgtbl_ops;
951 struct io_pgtable_cfg pgtbl_cfg;
952 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100953 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100954 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100955
Will Deacon518f7132014-11-14 17:17:54 +0000956 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100957 if (smmu_domain->smmu)
958 goto out_unlock;
959
Robin Murphy98006992016-04-20 14:53:33 +0100960 /* We're bypassing these SIDs, so don't allocate an actual context */
961 if (domain->type == IOMMU_DOMAIN_DMA) {
962 smmu_domain->smmu = smmu;
963 goto out_unlock;
964 }
965
Will Deaconc752ce42014-06-25 22:46:31 +0100966 /*
967 * Mapping the requested stage onto what we support is surprisingly
968 * complicated, mainly because the spec allows S1+S2 SMMUs without
969 * support for nested translation. That means we end up with the
970 * following table:
971 *
972 * Requested Supported Actual
973 * S1 N S1
974 * S1 S1+S2 S1
975 * S1 S2 S2
976 * S1 S1 S1
977 * N N N
978 * N S1+S2 S2
979 * N S2 S2
980 * N S1 S1
981 *
982 * Note that you can't actually request stage-2 mappings.
983 */
984 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
985 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
986 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
987 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
988
Robin Murphy7602b872016-04-28 17:12:09 +0100989 /*
990 * Choosing a suitable context format is even more fiddly. Until we
991 * grow some way for the caller to express a preference, and/or move
992 * the decision into the io-pgtable code where it arguably belongs,
993 * just aim for the closest thing to the rest of the system, and hope
994 * that the hardware isn't esoteric enough that we can't assume AArch64
995 * support to be a superset of AArch32 support...
996 */
997 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
998 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
999 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1000 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1001 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1002 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1003 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1004
1005 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1006 ret = -EINVAL;
1007 goto out_unlock;
1008 }
1009
Will Deaconc752ce42014-06-25 22:46:31 +01001010 switch (smmu_domain->stage) {
1011 case ARM_SMMU_DOMAIN_S1:
1012 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1013 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001014 ias = smmu->va_size;
1015 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001016 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001017 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001018 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001019 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001020 ias = min(ias, 32UL);
1021 oas = min(oas, 40UL);
1022 }
Will Deaconc752ce42014-06-25 22:46:31 +01001023 break;
1024 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001025 /*
1026 * We will likely want to change this if/when KVM gets
1027 * involved.
1028 */
Will Deaconc752ce42014-06-25 22:46:31 +01001029 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001030 cfg->cbar = CBAR_TYPE_S2_TRANS;
1031 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001032 ias = smmu->ipa_size;
1033 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001034 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001035 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001036 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001037 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001038 ias = min(ias, 40UL);
1039 oas = min(oas, 40UL);
1040 }
Will Deaconc752ce42014-06-25 22:46:31 +01001041 break;
1042 default:
1043 ret = -EINVAL;
1044 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001045 }
1046
1047 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1048 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001049 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001050 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001051
Will Deacon44680ee2014-06-25 11:29:12 +01001052 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +01001053 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001054 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1055 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001056 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001057 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001058 }
1059
Will Deacon518f7132014-11-14 17:17:54 +00001060 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001061 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001062 .ias = ias,
1063 .oas = oas,
1064 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001065 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001066 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001067
Will Deacon518f7132014-11-14 17:17:54 +00001068 smmu_domain->smmu = smmu;
1069 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1070 if (!pgtbl_ops) {
1071 ret = -ENOMEM;
1072 goto out_clear_smmu;
1073 }
1074
Robin Murphyd5466352016-05-09 17:20:09 +01001075 /* Update the domain's page sizes to reflect the page table format */
1076 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001077
1078 /* Initialise the context bank with our page table cfg */
1079 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1080
1081 /*
1082 * Request context fault interrupt. Do this last to avoid the
1083 * handler seeing a half-initialised domain state.
1084 */
Will Deacon44680ee2014-06-25 11:29:12 +01001085 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001086 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
1087 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1088 "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001089 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001091 cfg->irptndx, irq);
1092 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093 }
1094
Will Deacon518f7132014-11-14 17:17:54 +00001095 mutex_unlock(&smmu_domain->init_mutex);
1096
1097 /* Publish page table ops for map/unmap */
1098 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001099 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001100
Will Deacon518f7132014-11-14 17:17:54 +00001101out_clear_smmu:
1102 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001103out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001104 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105 return ret;
1106}
1107
1108static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1109{
Joerg Roedel1d672632015-03-26 13:43:10 +01001110 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001111 struct arm_smmu_device *smmu = smmu_domain->smmu;
1112 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001113 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114 int irq;
1115
Robin Murphy98006992016-04-20 14:53:33 +01001116 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001117 return;
1118
Will Deacon518f7132014-11-14 17:17:54 +00001119 /*
1120 * Disable the context bank and free the page tables before freeing
1121 * it.
1122 */
Will Deacon44680ee2014-06-25 11:29:12 +01001123 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001124 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001125
Will Deacon44680ee2014-06-25 11:29:12 +01001126 if (cfg->irptndx != INVALID_IRPTNDX) {
1127 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001128 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001129 }
1130
Markus Elfring44830b02015-11-06 18:32:41 +01001131 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001132 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001133}
1134
Joerg Roedel1d672632015-03-26 13:43:10 +01001135static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001136{
1137 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001138
Robin Murphy9adb9592016-01-26 18:06:36 +00001139 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001140 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001141 /*
1142 * Allocate the domain and initialise some of its data structures.
1143 * We can't really do anything meaningful until we've added a
1144 * master.
1145 */
1146 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1147 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001148 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001149
Robin Murphy9adb9592016-01-26 18:06:36 +00001150 if (type == IOMMU_DOMAIN_DMA &&
1151 iommu_get_dma_cookie(&smmu_domain->domain)) {
1152 kfree(smmu_domain);
1153 return NULL;
1154 }
1155
Will Deacon518f7132014-11-14 17:17:54 +00001156 mutex_init(&smmu_domain->init_mutex);
1157 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001158
1159 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001160}
1161
Joerg Roedel1d672632015-03-26 13:43:10 +01001162static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163{
Joerg Roedel1d672632015-03-26 13:43:10 +01001164 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001165
1166 /*
1167 * Free the domain resources. We assume that all devices have
1168 * already been detached.
1169 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001170 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172 kfree(smmu_domain);
1173}
1174
1175static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001176 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177{
1178 int i;
1179 struct arm_smmu_smr *smrs;
1180 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1181
1182 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1183 return 0;
1184
Will Deacona9a1b0b2014-05-01 18:05:08 +01001185 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186 return -EEXIST;
1187
Mitchel Humpherys29073202014-07-08 09:52:18 -07001188 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001190 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1191 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 return -ENOMEM;
1193 }
1194
Will Deacon44680ee2014-06-25 11:29:12 +01001195 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001196 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1198 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001199 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200 dev_err(smmu->dev, "failed to allocate free SMR\n");
1201 goto err_free_smrs;
1202 }
1203
1204 smrs[i] = (struct arm_smmu_smr) {
1205 .idx = idx,
1206 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001207 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001208 };
1209 }
1210
1211 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001212 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001213 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1214 smrs[i].mask << SMR_MASK_SHIFT;
1215 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1216 }
1217
Will Deacona9a1b0b2014-05-01 18:05:08 +01001218 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001219 return 0;
1220
1221err_free_smrs:
1222 while (--i >= 0)
1223 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1224 kfree(smrs);
1225 return -ENOSPC;
1226}
1227
1228static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001229 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230{
1231 int i;
1232 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001233 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234
Will Deacon43b412b2014-07-15 11:22:24 +01001235 if (!smrs)
1236 return;
1237
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001239 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001241
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1243 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1244 }
1245
Will Deacona9a1b0b2014-05-01 18:05:08 +01001246 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001247 kfree(smrs);
1248}
1249
Will Deacon45ae7cf2013-06-24 18:31:25 +01001250static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001251 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252{
1253 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001254 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1256
Will Deacon5f634952016-04-20 14:53:32 +01001257 /*
1258 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1259 * for all devices behind the SMMU. Note that we need to take
1260 * care configuring SMRs for devices both a platform_device and
1261 * and a PCI device (i.e. a PCI host controller)
1262 */
1263 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1264 return 0;
1265
Will Deacon8f68f8e2014-07-15 11:27:08 +01001266 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001267 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001268 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001269 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001270
Will Deacona9a1b0b2014-05-01 18:05:08 +01001271 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001273
Will Deacona9a1b0b2014-05-01 18:05:08 +01001274 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001275 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001276 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1278 }
1279
1280 return 0;
1281}
1282
1283static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001284 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285{
Will Deacon43b412b2014-07-15 11:22:24 +01001286 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001287 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001288 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289
Will Deacon8f68f8e2014-07-15 11:27:08 +01001290 /* An IOMMU group is torn down by the first device to be removed */
1291 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1292 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001293
1294 /*
1295 * We *must* clear the S2CR first, because freeing the SMR means
1296 * that it can be re-allocated immediately.
1297 */
Will Deacon43b412b2014-07-15 11:22:24 +01001298 for (i = 0; i < cfg->num_streamids; ++i) {
1299 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001300 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001301
Robin Murphy25a1c962016-02-10 14:25:33 +00001302 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001303 }
1304
Will Deacona9a1b0b2014-05-01 18:05:08 +01001305 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306}
1307
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001308static void arm_smmu_detach_dev(struct device *dev,
1309 struct arm_smmu_master_cfg *cfg)
1310{
1311 struct iommu_domain *domain = dev->archdata.iommu;
1312 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1313
1314 dev->archdata.iommu = NULL;
1315 arm_smmu_domain_remove_master(smmu_domain, cfg);
1316}
1317
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1319{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001320 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001321 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001322 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001323 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001324
Will Deacon8f68f8e2014-07-15 11:27:08 +01001325 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001326 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001327 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1328 return -ENXIO;
1329 }
1330
Will Deacon518f7132014-11-14 17:17:54 +00001331 /* Ensure that the domain is finalised */
1332 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001333 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001334 return ret;
1335
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001337 * Sanity check the domain. We don't support domains across
1338 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339 */
Will Deacon518f7132014-11-14 17:17:54 +00001340 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341 dev_err(dev,
1342 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001343 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1344 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346
1347 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001348 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001349 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001350 return -ENODEV;
1351
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001352 /* Detach the dev from its current domain */
1353 if (dev->archdata.iommu)
1354 arm_smmu_detach_dev(dev, cfg);
1355
Will Deacon844e35b2014-07-17 11:23:51 +01001356 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1357 if (!ret)
1358 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359 return ret;
1360}
1361
Will Deacon45ae7cf2013-06-24 18:31:25 +01001362static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001363 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364{
Will Deacon518f7132014-11-14 17:17:54 +00001365 int ret;
1366 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001367 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001368 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001369
Will Deacon518f7132014-11-14 17:17:54 +00001370 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001371 return -ENODEV;
1372
Will Deacon518f7132014-11-14 17:17:54 +00001373 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1374 ret = ops->map(ops, iova, paddr, size, prot);
1375 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1376 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001377}
1378
1379static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1380 size_t size)
1381{
Will Deacon518f7132014-11-14 17:17:54 +00001382 size_t ret;
1383 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001384 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001385 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001386
Will Deacon518f7132014-11-14 17:17:54 +00001387 if (!ops)
1388 return 0;
1389
1390 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1391 ret = ops->unmap(ops, iova, size);
1392 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1393 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001394}
1395
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001396static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001397 dma_addr_t iova)
1398{
Joerg Roedel1d672632015-03-26 13:43:10 +01001399 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001400 struct arm_smmu_device *smmu = smmu_domain->smmu;
1401 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1402 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1403 struct device *dev = smmu->dev;
1404 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001405 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001406 u32 tmp;
1407 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001408 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001409
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001410 spin_lock_irqsave(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001411 if (arm_smmu_halt(smmu)) {
1412 phys = 0;
1413 goto out_unlock;
1414 }
1415
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001416 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1417
Robin Murphy661d9622015-05-27 17:09:34 +01001418 /* ATS1 registers can only be written atomically */
1419 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001420 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001421 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1422 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001423 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001424
1425 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1426 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001427 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001428 dev_err(dev,
1429 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1430 &iova, &phys);
1431 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001432 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001433 }
1434
Robin Murphyf9a05f02016-04-13 18:13:01 +01001435 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001436 if (phys & CB_PAR_F) {
1437 dev_err(dev, "translation fault!\n");
1438 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001439 phys = 0;
1440 } else {
1441 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001442 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001443out_resume:
1444 arm_smmu_resume(smmu);
1445out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001446 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001447 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001448}
1449
Will Deacon45ae7cf2013-06-24 18:31:25 +01001450static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001451 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001452{
Will Deacon518f7132014-11-14 17:17:54 +00001453 phys_addr_t ret;
1454 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001455 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001456 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457
Will Deacon518f7132014-11-14 17:17:54 +00001458 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001459 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001460
Will Deacon518f7132014-11-14 17:17:54 +00001461 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001462 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001463 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001464
Will Deacon518f7132014-11-14 17:17:54 +00001465 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466}
1467
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001468/*
1469 * This function can sleep, and cannot be called from atomic context. Will
1470 * power on register block if required. This restriction does not apply to the
1471 * original iova_to_phys() op.
1472 */
1473static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1474 dma_addr_t iova)
1475{
1476 phys_addr_t ret = 0;
1477 unsigned long flags;
1478 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1479
1480 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1481 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1482 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1483 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
1484
1485 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1486
1487 return ret;
1488}
1489
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001490static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001491{
Will Deacond0948942014-06-24 17:30:10 +01001492 switch (cap) {
1493 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001494 /*
1495 * Return true here as the SMMU can always send out coherent
1496 * requests.
1497 */
1498 return true;
Will Deacond0948942014-06-24 17:30:10 +01001499 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001500 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001501 case IOMMU_CAP_NOEXEC:
1502 return true;
Will Deacond0948942014-06-24 17:30:10 +01001503 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001504 return false;
Will Deacond0948942014-06-24 17:30:10 +01001505 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001506}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001507
Will Deacona9a1b0b2014-05-01 18:05:08 +01001508static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1509{
1510 *((u16 *)data) = alias;
1511 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001512}
1513
Will Deacon8f68f8e2014-07-15 11:27:08 +01001514static void __arm_smmu_release_pci_iommudata(void *data)
1515{
1516 kfree(data);
1517}
1518
Joerg Roedelaf659932015-10-21 23:51:41 +02001519static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1520 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001521{
Will Deacon03edb222015-01-19 14:27:33 +00001522 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001523 u16 sid;
1524 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001525
Will Deacon03edb222015-01-19 14:27:33 +00001526 cfg = iommu_group_get_iommudata(group);
1527 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001528 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001529 if (!cfg)
1530 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001531
Will Deacon03edb222015-01-19 14:27:33 +00001532 iommu_group_set_iommudata(group, cfg,
1533 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001534 }
1535
Joerg Roedelaf659932015-10-21 23:51:41 +02001536 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1537 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001538
Will Deacon03edb222015-01-19 14:27:33 +00001539 /*
1540 * Assume Stream ID == Requester ID for now.
1541 * We need a way to describe the ID mappings in FDT.
1542 */
1543 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1544 for (i = 0; i < cfg->num_streamids; ++i)
1545 if (cfg->streamids[i] == sid)
1546 break;
1547
1548 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1549 if (i == cfg->num_streamids)
1550 cfg->streamids[cfg->num_streamids++] = sid;
1551
1552 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001553}
1554
Joerg Roedelaf659932015-10-21 23:51:41 +02001555static int arm_smmu_init_platform_device(struct device *dev,
1556 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001557{
Will Deacon03edb222015-01-19 14:27:33 +00001558 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001559 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001560
1561 if (!smmu)
1562 return -ENODEV;
1563
1564 master = find_smmu_master(smmu, dev->of_node);
1565 if (!master)
1566 return -ENODEV;
1567
Will Deacon03edb222015-01-19 14:27:33 +00001568 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001569
1570 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001571}
1572
1573static int arm_smmu_add_device(struct device *dev)
1574{
Joerg Roedelaf659932015-10-21 23:51:41 +02001575 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001576
Joerg Roedelaf659932015-10-21 23:51:41 +02001577 group = iommu_group_get_for_dev(dev);
1578 if (IS_ERR(group))
1579 return PTR_ERR(group);
1580
Peng Fan9a4a9d82015-11-20 16:56:18 +08001581 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001582 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001583}
1584
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585static void arm_smmu_remove_device(struct device *dev)
1586{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001587 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588}
1589
Joerg Roedelaf659932015-10-21 23:51:41 +02001590static struct iommu_group *arm_smmu_device_group(struct device *dev)
1591{
1592 struct iommu_group *group;
1593 int ret;
1594
1595 if (dev_is_pci(dev))
1596 group = pci_device_group(dev);
1597 else
1598 group = generic_device_group(dev);
1599
1600 if (IS_ERR(group))
1601 return group;
1602
1603 if (dev_is_pci(dev))
1604 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1605 else
1606 ret = arm_smmu_init_platform_device(dev, group);
1607
1608 if (ret) {
1609 iommu_group_put(group);
1610 group = ERR_PTR(ret);
1611 }
1612
1613 return group;
1614}
1615
Will Deaconc752ce42014-06-25 22:46:31 +01001616static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1617 enum iommu_attr attr, void *data)
1618{
Joerg Roedel1d672632015-03-26 13:43:10 +01001619 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001620
1621 switch (attr) {
1622 case DOMAIN_ATTR_NESTING:
1623 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1624 return 0;
1625 default:
1626 return -ENODEV;
1627 }
1628}
1629
1630static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1631 enum iommu_attr attr, void *data)
1632{
Will Deacon518f7132014-11-14 17:17:54 +00001633 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001634 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001635
Will Deacon518f7132014-11-14 17:17:54 +00001636 mutex_lock(&smmu_domain->init_mutex);
1637
Will Deaconc752ce42014-06-25 22:46:31 +01001638 switch (attr) {
1639 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001640 if (smmu_domain->smmu) {
1641 ret = -EPERM;
1642 goto out_unlock;
1643 }
1644
Will Deaconc752ce42014-06-25 22:46:31 +01001645 if (*(int *)data)
1646 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1647 else
1648 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1649
Will Deacon518f7132014-11-14 17:17:54 +00001650 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001651 default:
Will Deacon518f7132014-11-14 17:17:54 +00001652 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001653 }
Will Deacon518f7132014-11-14 17:17:54 +00001654
1655out_unlock:
1656 mutex_unlock(&smmu_domain->init_mutex);
1657 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001658}
1659
Will Deacon518f7132014-11-14 17:17:54 +00001660static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001661 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001662 .domain_alloc = arm_smmu_domain_alloc,
1663 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001664 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001665 .map = arm_smmu_map,
1666 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001667 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001668 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001669 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01001670 .add_device = arm_smmu_add_device,
1671 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001672 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001673 .domain_get_attr = arm_smmu_domain_get_attr,
1674 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001675 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001676};
1677
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001678static int arm_smmu_halt(struct arm_smmu_device *smmu)
1679{
1680 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001681 u32 reg, tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001682
Mitchel Humpherys2d05a572015-09-22 11:52:53 -07001683 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1684 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1685 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001686
1687 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1688 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1689 0, 30000)) {
1690 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1691 return -EBUSY;
1692 }
1693
1694 return 0;
1695}
1696
1697static void arm_smmu_resume(struct arm_smmu_device *smmu)
1698{
1699 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1700 u32 reg;
1701
1702 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1703 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1704 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1705}
1706
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001707static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1708{
1709 int i;
1710 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1711
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001712 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001713 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1714 writel_relaxed(regs[i].value,
1715 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001716 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001717}
1718
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1720{
1721 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001722 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001723 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001724 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001725
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001726 /* clear global FSR */
1727 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1728 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001729
Robin Murphy25a1c962016-02-10 14:25:33 +00001730 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1731 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001733 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001734 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735 }
1736
Peng Fan3ca37122016-05-03 21:50:30 +08001737 /*
1738 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1739 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1740 * bit is only present in MMU-500r2 onwards.
1741 */
1742 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1743 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1744 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1745 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1746 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1747 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1748 }
1749
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001750 /* Make sure all context banks are disabled and clear CB_FSR */
1751 for (i = 0; i < smmu->num_context_banks; ++i) {
1752 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1753 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1754 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001755 /*
1756 * Disable MMU-500's not-particularly-beneficial next-page
1757 * prefetcher for the sake of errata #841119 and #826419.
1758 */
1759 if (smmu->model == ARM_MMU500) {
1760 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1761 reg &= ~ARM_MMU500_ACTLR_CPRE;
1762 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1763 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001764
1765 if (smmu->model == QCOM_SMMUV2) {
1766 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1767 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1768 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1769 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1770 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001771 }
Will Deacon1463fe42013-07-31 19:21:27 +01001772
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001773 /* Program implementation defined registers */
1774 arm_smmu_impl_def_programming(smmu);
1775
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1778 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1779
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001780 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001781
Will Deacon45ae7cf2013-06-24 18:31:25 +01001782 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001783 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784
1785 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001786 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787
Robin Murphy25a1c962016-02-10 14:25:33 +00001788 /* Enable client access, handling unmatched streams as appropriate */
1789 reg &= ~sCR0_CLIENTPD;
1790 if (disable_bypass)
1791 reg |= sCR0_USFCFG;
1792 else
1793 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794
1795 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001796 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001797
1798 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001799 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001800
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001801 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1802 reg |= sCR0_VMID16EN;
1803
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001805 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001806 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807}
1808
1809static int arm_smmu_id_size_to_bits(int size)
1810{
1811 switch (size) {
1812 case 0:
1813 return 32;
1814 case 1:
1815 return 36;
1816 case 2:
1817 return 40;
1818 case 3:
1819 return 42;
1820 case 4:
1821 return 44;
1822 case 5:
1823 default:
1824 return 48;
1825 }
1826}
1827
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001828static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1829{
1830 struct device *dev = smmu->dev;
1831 int i, ntuples, ret;
1832 u32 *tuples;
1833 struct arm_smmu_impl_def_reg *regs, *regit;
1834
1835 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1836 return 0;
1837
1838 ntuples /= sizeof(u32);
1839 if (ntuples % 2) {
1840 dev_err(dev,
1841 "Invalid number of attach-impl-defs registers: %d\n",
1842 ntuples);
1843 return -EINVAL;
1844 }
1845
1846 regs = devm_kmalloc(
1847 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1848 GFP_KERNEL);
1849 if (!regs)
1850 return -ENOMEM;
1851
1852 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1853 if (!tuples)
1854 return -ENOMEM;
1855
1856 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1857 tuples, ntuples);
1858 if (ret)
1859 return ret;
1860
1861 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1862 regit->offset = tuples[i];
1863 regit->value = tuples[i + 1];
1864 }
1865
1866 devm_kfree(dev, tuples);
1867
1868 smmu->impl_def_attach_registers = regs;
1869 smmu->num_impl_def_attach_registers = ntuples / 2;
1870
1871 return 0;
1872}
1873
Will Deacon45ae7cf2013-06-24 18:31:25 +01001874static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1875{
1876 unsigned long size;
1877 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1878 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001879 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001880
1881 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001882 dev_notice(smmu->dev, "SMMUv%d with:\n",
1883 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884
1885 /* ID0 */
1886 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001887
1888 /* Restrict available stages based on module parameter */
1889 if (force_stage == 1)
1890 id &= ~(ID0_S2TS | ID0_NTS);
1891 else if (force_stage == 2)
1892 id &= ~(ID0_S1TS | ID0_NTS);
1893
Will Deacon45ae7cf2013-06-24 18:31:25 +01001894 if (id & ID0_S1TS) {
1895 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1896 dev_notice(smmu->dev, "\tstage 1 translation\n");
1897 }
1898
1899 if (id & ID0_S2TS) {
1900 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1901 dev_notice(smmu->dev, "\tstage 2 translation\n");
1902 }
1903
1904 if (id & ID0_NTS) {
1905 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1906 dev_notice(smmu->dev, "\tnested translation\n");
1907 }
1908
1909 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001910 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911 dev_err(smmu->dev, "\tno translation support!\n");
1912 return -ENODEV;
1913 }
1914
Robin Murphyb7862e32016-04-13 18:13:03 +01001915 if ((id & ID0_S1TS) &&
1916 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001917 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1918 dev_notice(smmu->dev, "\taddress translation ops\n");
1919 }
1920
Robin Murphybae2c2d2015-07-29 19:46:05 +01001921 /*
1922 * In order for DMA API calls to work properly, we must defer to what
1923 * the DT says about coherency, regardless of what the hardware claims.
1924 * Fortunately, this also opens up a workaround for systems where the
1925 * ID register value has ended up configured incorrectly.
1926 */
1927 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1928 cttw_reg = !!(id & ID0_CTTW);
1929 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001930 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001931 if (cttw_dt || cttw_reg)
1932 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1933 cttw_dt ? "" : "non-");
1934 if (cttw_dt != cttw_reg)
1935 dev_notice(smmu->dev,
1936 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001937
1938 if (id & ID0_SMS) {
1939 u32 smr, sid, mask;
1940
1941 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1942 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1943 ID0_NUMSMRG_MASK;
1944 if (smmu->num_mapping_groups == 0) {
1945 dev_err(smmu->dev,
1946 "stream-matching supported, but no SMRs present!\n");
1947 return -ENODEV;
1948 }
1949
1950 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1951 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1952 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1953 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1954
1955 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1956 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1957 if ((mask & sid) != sid) {
1958 dev_err(smmu->dev,
1959 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1960 mask, sid);
1961 return -ENODEV;
1962 }
1963
1964 dev_notice(smmu->dev,
1965 "\tstream matching with %u register groups, mask 0x%x",
1966 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001967 } else {
1968 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1969 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970 }
1971
Robin Murphy7602b872016-04-28 17:12:09 +01001972 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1973 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1974 if (!(id & ID0_PTFS_NO_AARCH32S))
1975 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1976 }
1977
Will Deacon45ae7cf2013-06-24 18:31:25 +01001978 /* ID1 */
1979 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001980 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001982 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001983 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001984 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001985 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001986 dev_warn(smmu->dev,
1987 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1988 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989
Will Deacon518f7132014-11-14 17:17:54 +00001990 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1992 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1993 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1994 return -ENODEV;
1995 }
1996 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1997 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001998 /*
1999 * Cavium CN88xx erratum #27704.
2000 * Ensure ASID and VMID allocation is unique across all SMMUs in
2001 * the system.
2002 */
2003 if (smmu->model == CAVIUM_SMMUV2) {
2004 smmu->cavium_id_base =
2005 atomic_add_return(smmu->num_context_banks,
2006 &cavium_smmu_context_count);
2007 smmu->cavium_id_base -= smmu->num_context_banks;
2008 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002009
2010 /* ID2 */
2011 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2012 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002013 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002014
Will Deacon518f7132014-11-14 17:17:54 +00002015 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002016 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002017 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002019 if (id & ID2_VMID16)
2020 smmu->features |= ARM_SMMU_FEAT_VMID16;
2021
Robin Murphyf1d84542015-03-04 16:41:05 +00002022 /*
2023 * What the page table walker can address actually depends on which
2024 * descriptor format is in use, but since a) we don't know that yet,
2025 * and b) it can vary per context bank, this will have to do...
2026 */
2027 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2028 dev_warn(smmu->dev,
2029 "failed to set DMA mask for table walker\n");
2030
Robin Murphyb7862e32016-04-13 18:13:03 +01002031 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002032 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002033 if (smmu->version == ARM_SMMU_V1_64K)
2034 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002035 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002036 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002037 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002038 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002039 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002040 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002041 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002042 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002043 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002044 }
2045
Robin Murphy7602b872016-04-28 17:12:09 +01002046 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002047 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002048 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002049 if (smmu->features &
2050 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002051 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002052 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002053 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002054 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002055 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002056
Robin Murphyd5466352016-05-09 17:20:09 +01002057 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2058 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2059 else
2060 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2061 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2062 smmu->pgsize_bitmap);
2063
Will Deacon518f7132014-11-14 17:17:54 +00002064
Will Deacon28d60072014-09-01 16:24:48 +01002065 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2066 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002067 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002068
2069 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2070 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002071 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002072
Will Deacon45ae7cf2013-06-24 18:31:25 +01002073 return 0;
2074}
2075
Robin Murphy67b65a32016-04-13 18:12:57 +01002076struct arm_smmu_match_data {
2077 enum arm_smmu_arch_version version;
2078 enum arm_smmu_implementation model;
2079};
2080
2081#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2082static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2083
2084ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2085ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002086ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002087ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002088ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002089ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002090
Joerg Roedel09b52692014-10-02 12:24:45 +02002091static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002092 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2093 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2094 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002095 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002096 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002097 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002098 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002099 { },
2100};
2101MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2102
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2104{
Robin Murphy09360402014-08-28 17:51:59 +01002105 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002106 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002107 struct resource *res;
2108 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002109 struct device *dev = &pdev->dev;
2110 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002111 struct of_phandle_iterator it;
2112 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002113 int num_irqs, i, err;
2114
2115 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2116 if (!smmu) {
2117 dev_err(dev, "failed to allocate arm_smmu_device\n");
2118 return -ENOMEM;
2119 }
2120 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002121 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122
Robin Murphy09360402014-08-28 17:51:59 +01002123 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002124 data = of_id->data;
2125 smmu->version = data->version;
2126 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002127
Will Deacon45ae7cf2013-06-24 18:31:25 +01002128 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002129 smmu->base = devm_ioremap_resource(dev, res);
2130 if (IS_ERR(smmu->base))
2131 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002133
2134 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2135 &smmu->num_global_irqs)) {
2136 dev_err(dev, "missing #global-interrupts property\n");
2137 return -ENODEV;
2138 }
2139
2140 num_irqs = 0;
2141 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2142 num_irqs++;
2143 if (num_irqs > smmu->num_global_irqs)
2144 smmu->num_context_irqs++;
2145 }
2146
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002147 if (!smmu->num_context_irqs) {
2148 dev_err(dev, "found %d interrupts but expected at least %d\n",
2149 num_irqs, smmu->num_global_irqs + 1);
2150 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002151 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002152
2153 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2154 GFP_KERNEL);
2155 if (!smmu->irqs) {
2156 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2157 return -ENOMEM;
2158 }
2159
2160 for (i = 0; i < num_irqs; ++i) {
2161 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002162
Will Deacon45ae7cf2013-06-24 18:31:25 +01002163 if (irq < 0) {
2164 dev_err(dev, "failed to get irq index %d\n", i);
2165 return -ENODEV;
2166 }
2167 smmu->irqs[i] = irq;
2168 }
2169
Olav Haugan3c8766d2014-08-22 17:12:32 -07002170 err = arm_smmu_device_cfg_probe(smmu);
2171 if (err)
2172 return err;
2173
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174 i = 0;
2175 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002176
2177 err = -ENOMEM;
2178 /* No need to zero the memory for masterspec */
2179 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2180 if (!masterspec)
2181 goto out_put_masters;
2182
2183 of_for_each_phandle(&it, err, dev->of_node,
2184 "mmu-masters", "#stream-id-cells", 0) {
2185 int count = of_phandle_iterator_args(&it, masterspec->args,
2186 MAX_MASTER_STREAMIDS);
2187 masterspec->np = of_node_get(it.node);
2188 masterspec->args_count = count;
2189
2190 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002191 if (err) {
2192 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002193 masterspec->np->name);
2194 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002195 goto out_put_masters;
2196 }
2197
2198 i++;
2199 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002200
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201 dev_notice(dev, "registered %d master devices\n", i);
2202
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002203 kfree(masterspec);
2204
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002205 err = arm_smmu_parse_impl_def_registers(smmu);
2206 if (err)
2207 goto out_put_masters;
2208
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002209 parse_driver_options(smmu);
2210
Robin Murphyb7862e32016-04-13 18:13:03 +01002211 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002212 smmu->num_context_banks != smmu->num_context_irqs) {
2213 dev_err(dev,
2214 "found only %d context interrupt(s) but %d required\n",
2215 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002216 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002217 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002218 }
2219
Will Deacon45ae7cf2013-06-24 18:31:25 +01002220 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08002221 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
2222 NULL, arm_smmu_global_fault,
2223 IRQF_ONESHOT | IRQF_SHARED,
2224 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002225 if (err) {
2226 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2227 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002228 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002229 }
2230 }
2231
2232 INIT_LIST_HEAD(&smmu->list);
2233 spin_lock(&arm_smmu_devices_lock);
2234 list_add(&smmu->list, &arm_smmu_devices);
2235 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002236
2237 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002238 return 0;
2239
Will Deacon45ae7cf2013-06-24 18:31:25 +01002240out_put_masters:
2241 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002242 struct arm_smmu_master *master
2243 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002244 of_node_put(master->of_node);
2245 }
2246
2247 return err;
2248}
2249
2250static int arm_smmu_device_remove(struct platform_device *pdev)
2251{
2252 int i;
2253 struct device *dev = &pdev->dev;
2254 struct arm_smmu_device *curr, *smmu = NULL;
2255 struct rb_node *node;
2256
2257 spin_lock(&arm_smmu_devices_lock);
2258 list_for_each_entry(curr, &arm_smmu_devices, list) {
2259 if (curr->dev == dev) {
2260 smmu = curr;
2261 list_del(&smmu->list);
2262 break;
2263 }
2264 }
2265 spin_unlock(&arm_smmu_devices_lock);
2266
2267 if (!smmu)
2268 return -ENODEV;
2269
Will Deacon45ae7cf2013-06-24 18:31:25 +01002270 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002271 struct arm_smmu_master *master
2272 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002273 of_node_put(master->of_node);
2274 }
2275
Will Deaconecfadb62013-07-31 19:21:28 +01002276 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002277 dev_err(dev, "removing device with active domains!\n");
2278
2279 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002280 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002281
2282 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002283 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002284 return 0;
2285}
2286
Will Deacon45ae7cf2013-06-24 18:31:25 +01002287static struct platform_driver arm_smmu_driver = {
2288 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002289 .name = "arm-smmu",
2290 .of_match_table = of_match_ptr(arm_smmu_of_match),
2291 },
2292 .probe = arm_smmu_device_dt_probe,
2293 .remove = arm_smmu_device_remove,
2294};
2295
2296static int __init arm_smmu_init(void)
2297{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002298 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002299 int ret;
2300
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002301 /*
2302 * Play nice with systems that don't have an ARM SMMU by checking that
2303 * an ARM SMMU exists in the system before proceeding with the driver
2304 * and IOMMU bus operation registration.
2305 */
2306 np = of_find_matching_node(NULL, arm_smmu_of_match);
2307 if (!np)
2308 return 0;
2309
2310 of_node_put(np);
2311
Will Deacon45ae7cf2013-06-24 18:31:25 +01002312 ret = platform_driver_register(&arm_smmu_driver);
2313 if (ret)
2314 return ret;
2315
2316 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002317 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2319
Will Deacond123cf82014-02-04 22:17:53 +00002320#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002321 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002322 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002323#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002324
Will Deacona9a1b0b2014-05-01 18:05:08 +01002325#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002326 if (!iommu_present(&pci_bus_type)) {
2327 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002328 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002329 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002330#endif
2331
Will Deacon45ae7cf2013-06-24 18:31:25 +01002332 return 0;
2333}
2334
2335static void __exit arm_smmu_exit(void)
2336{
2337 return platform_driver_unregister(&arm_smmu_driver);
2338}
2339
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002340subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341module_exit(arm_smmu_exit);
2342
2343MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2344MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2345MODULE_LICENSE("GPL v2");