blob: beae57484ba013b0bd7a9426df1bca4d56c1de40 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
223#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000224#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100225#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000229#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100230#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVAL 0x620
232#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
233#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100234#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000235#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236
237#define SCTLR_S1_ASIDPNE (1 << 12)
238#define SCTLR_CFCFG (1 << 7)
239#define SCTLR_CFIE (1 << 6)
240#define SCTLR_CFRE (1 << 5)
241#define SCTLR_E (1 << 4)
242#define SCTLR_AFE (1 << 2)
243#define SCTLR_TRE (1 << 1)
244#define SCTLR_M (1 << 0)
245#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700251/* Definitions for implementation-defined registers */
252#define ACTLR_QCOM_OSH_SHIFT 28
253#define ACTLR_QCOM_OSH 1
254
255#define ACTLR_QCOM_ISH_SHIFT 29
256#define ACTLR_QCOM_ISH 1
257
258#define ACTLR_QCOM_NSH_SHIFT 30
259#define ACTLR_QCOM_NSH 1
260
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700261#define ARM_SMMU_IMPL_DEF0(smmu) \
262 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
263#define ARM_SMMU_IMPL_DEF1(smmu) \
264 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
265#define IMPL_DEF1_MICRO_MMU_CTRL 0
266#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
267#define MICRO_MMU_CTRL_IDLE (1 << 3)
268
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000303static bool disable_bypass;
304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100319};
320
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700321struct arm_smmu_impl_def_reg {
322 u32 offset;
323 u32 value;
324};
325
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326struct arm_smmu_smr {
327 u8 idx;
328 u16 mask;
329 u16 id;
330};
331
Will Deacona9a1b0b2014-05-01 18:05:08 +0100332struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333 int num_streamids;
334 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 struct arm_smmu_smr *smrs;
336};
337
Will Deacona9a1b0b2014-05-01 18:05:08 +0100338struct arm_smmu_master {
339 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340 struct rb_node node;
341 struct arm_smmu_master_cfg cfg;
342};
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_device {
345 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346
347 void __iomem *base;
348 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100349 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350
351#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
352#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
353#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
354#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
355#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000356#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800357#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100358#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
359#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
360#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
361#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
362#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000364
365#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800366#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000367 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100368 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100369 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370
371 u32 num_context_banks;
372 u32 num_s2_context_banks;
373 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
374 atomic_t irptndx;
375
376 u32 num_mapping_groups;
377 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
378
Will Deacon518f7132014-11-14 17:17:54 +0000379 unsigned long va_size;
380 unsigned long ipa_size;
381 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100382 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383
384 u32 num_global_irqs;
385 u32 num_context_irqs;
386 unsigned int *irqs;
387
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388 struct list_head list;
389 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800390
391 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700392 /* Specific to QCOM */
393 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
394 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800395
396 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
398
Robin Murphy7602b872016-04-28 17:12:09 +0100399enum arm_smmu_context_fmt {
400 ARM_SMMU_CTX_FMT_NONE,
401 ARM_SMMU_CTX_FMT_AARCH64,
402 ARM_SMMU_CTX_FMT_AARCH32_L,
403 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404};
405
406struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407 u8 cbndx;
408 u8 irptndx;
409 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100410 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100412#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800414#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
415#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100416
Will Deaconc752ce42014-06-25 22:46:31 +0100417enum arm_smmu_domain_stage {
418 ARM_SMMU_DOMAIN_S1 = 0,
419 ARM_SMMU_DOMAIN_S2,
420 ARM_SMMU_DOMAIN_NESTED,
421};
422
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100424 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000425 struct io_pgtable_ops *pgtbl_ops;
426 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100427 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100428 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000429 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100430 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431};
432
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200433struct arm_smmu_phandle_args {
434 struct device_node *np;
435 int args_count;
436 uint32_t args[MAX_MASTER_STREAMIDS];
437};
438
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439static DEFINE_SPINLOCK(arm_smmu_devices_lock);
440static LIST_HEAD(arm_smmu_devices);
441
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000442struct arm_smmu_option_prop {
443 u32 opt;
444 const char *prop;
445};
446
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800447static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
448
Mitchel Humpherys29073202014-07-08 09:52:18 -0700449static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000450 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800451 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000452 { 0, NULL},
453};
454
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800455static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700456static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
457static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800458static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800459static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
460 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700461static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
462 dma_addr_t iova);
463static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
464 struct iommu_domain *domain, dma_addr_t iova);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800465
Joerg Roedel1d672632015-03-26 13:43:10 +0100466static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
467{
468 return container_of(dom, struct arm_smmu_domain, domain);
469}
470
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000471static void parse_driver_options(struct arm_smmu_device *smmu)
472{
473 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700474
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000475 do {
476 if (of_property_read_bool(smmu->dev->of_node,
477 arm_smmu_options[i].prop)) {
478 smmu->options |= arm_smmu_options[i].opt;
479 dev_notice(smmu->dev, "option %s\n",
480 arm_smmu_options[i].prop);
481 }
482 } while (arm_smmu_options[++i].opt);
483}
484
Will Deacon8f68f8e2014-07-15 11:27:08 +0100485static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100486{
487 if (dev_is_pci(dev)) {
488 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700489
Will Deacona9a1b0b2014-05-01 18:05:08 +0100490 while (!pci_is_root_bus(bus))
491 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100492 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100493 }
494
Will Deacon8f68f8e2014-07-15 11:27:08 +0100495 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100496}
497
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
499 struct device_node *dev_node)
500{
501 struct rb_node *node = smmu->masters.rb_node;
502
503 while (node) {
504 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700505
Will Deacon45ae7cf2013-06-24 18:31:25 +0100506 master = container_of(node, struct arm_smmu_master, node);
507
508 if (dev_node < master->of_node)
509 node = node->rb_left;
510 else if (dev_node > master->of_node)
511 node = node->rb_right;
512 else
513 return master;
514 }
515
516 return NULL;
517}
518
Will Deacona9a1b0b2014-05-01 18:05:08 +0100519static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100520find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100521{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100522 struct arm_smmu_master_cfg *cfg = NULL;
523 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100524
Will Deacon8f68f8e2014-07-15 11:27:08 +0100525 if (group) {
526 cfg = iommu_group_get_iommudata(group);
527 iommu_group_put(group);
528 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100529
Will Deacon8f68f8e2014-07-15 11:27:08 +0100530 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100531}
532
Will Deacon45ae7cf2013-06-24 18:31:25 +0100533static int insert_smmu_master(struct arm_smmu_device *smmu,
534 struct arm_smmu_master *master)
535{
536 struct rb_node **new, *parent;
537
538 new = &smmu->masters.rb_node;
539 parent = NULL;
540 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700541 struct arm_smmu_master *this
542 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100543
544 parent = *new;
545 if (master->of_node < this->of_node)
546 new = &((*new)->rb_left);
547 else if (master->of_node > this->of_node)
548 new = &((*new)->rb_right);
549 else
550 return -EEXIST;
551 }
552
553 rb_link_node(&master->node, parent, new);
554 rb_insert_color(&master->node, &smmu->masters);
555 return 0;
556}
557
558static int register_smmu_master(struct arm_smmu_device *smmu,
559 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200560 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100561{
562 int i;
563 struct arm_smmu_master *master;
564
565 master = find_smmu_master(smmu, masterspec->np);
566 if (master) {
567 dev_err(dev,
568 "rejecting multiple registrations for master device %s\n",
569 masterspec->np->name);
570 return -EBUSY;
571 }
572
573 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
574 dev_err(dev,
575 "reached maximum number (%d) of stream IDs for master device %s\n",
576 MAX_MASTER_STREAMIDS, masterspec->np->name);
577 return -ENOSPC;
578 }
579
580 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
581 if (!master)
582 return -ENOMEM;
583
Will Deacona9a1b0b2014-05-01 18:05:08 +0100584 master->of_node = masterspec->np;
585 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586
Olav Haugan3c8766d2014-08-22 17:12:32 -0700587 for (i = 0; i < master->cfg.num_streamids; ++i) {
588 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589
Olav Haugan3c8766d2014-08-22 17:12:32 -0700590 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
591 (streamid >= smmu->num_mapping_groups)) {
592 dev_err(dev,
593 "stream ID for master device %s greater than maximum allowed (%d)\n",
594 masterspec->np->name, smmu->num_mapping_groups);
595 return -ERANGE;
596 }
597 master->cfg.streamids[i] = streamid;
598 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100599 return insert_smmu_master(smmu, master);
600}
601
Will Deacon44680ee2014-06-25 11:29:12 +0100602static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100603{
Will Deacon44680ee2014-06-25 11:29:12 +0100604 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100605 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100606 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100607
608 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100609 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610 master = find_smmu_master(smmu, dev_node);
611 if (master)
612 break;
613 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100614 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100615
Will Deacona9a1b0b2014-05-01 18:05:08 +0100616 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100617}
618
619static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
620{
621 int idx;
622
623 do {
624 idx = find_next_zero_bit(map, end, start);
625 if (idx == end)
626 return -ENOSPC;
627 } while (test_and_set_bit(idx, map));
628
629 return idx;
630}
631
632static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
633{
634 clear_bit(idx, map);
635}
636
637/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000638static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100639{
640 int count = 0;
641 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
642
643 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
644 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
645 & sTLBGSTATUS_GSACTIVE) {
646 cpu_relax();
647 if (++count == TLB_LOOP_TIMEOUT) {
648 dev_err_ratelimited(smmu->dev,
649 "TLB sync timed out -- SMMU may be deadlocked\n");
650 return;
651 }
652 udelay(1);
653 }
654}
655
Will Deacon518f7132014-11-14 17:17:54 +0000656static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100657{
Will Deacon518f7132014-11-14 17:17:54 +0000658 struct arm_smmu_domain *smmu_domain = cookie;
659 __arm_smmu_tlb_sync(smmu_domain->smmu);
660}
661
662static void arm_smmu_tlb_inv_context(void *cookie)
663{
664 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100665 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
666 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100667 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000668 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100669
670 if (stage1) {
671 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800672 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100673 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100674 } else {
675 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800676 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100677 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100678 }
679
Will Deacon518f7132014-11-14 17:17:54 +0000680 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100681}
682
Will Deacon518f7132014-11-14 17:17:54 +0000683static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000684 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000685{
686 struct arm_smmu_domain *smmu_domain = cookie;
687 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
688 struct arm_smmu_device *smmu = smmu_domain->smmu;
689 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
690 void __iomem *reg;
691
692 if (stage1) {
693 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
694 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
695
Robin Murphy7602b872016-04-28 17:12:09 +0100696 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000697 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800698 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000699 do {
700 writel_relaxed(iova, reg);
701 iova += granule;
702 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000703 } else {
704 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800705 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000706 do {
707 writeq_relaxed(iova, reg);
708 iova += granule >> 12;
709 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000710 }
Will Deacon518f7132014-11-14 17:17:54 +0000711 } else if (smmu->version == ARM_SMMU_V2) {
712 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
713 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
714 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000715 iova >>= 12;
716 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100717 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000718 iova += granule >> 12;
719 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000720 } else {
721 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800722 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000723 }
724}
725
Will Deacon518f7132014-11-14 17:17:54 +0000726static struct iommu_gather_ops arm_smmu_gather_ops = {
727 .tlb_flush_all = arm_smmu_tlb_inv_context,
728 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
729 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000730};
731
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700732static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
733 dma_addr_t iova, u32 fsr)
734{
735 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
736 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
737 struct arm_smmu_device *smmu;
738 void __iomem *cb_base;
739 u64 sctlr, sctlr_orig;
740 phys_addr_t phys;
741
742 smmu = smmu_domain->smmu;
743 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
744
745 arm_smmu_halt_nowait(smmu);
746
747 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
748
749 arm_smmu_wait_for_halt(smmu);
750
751 /* clear FSR to allow ATOS to log any faults */
752 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
753
754 /* disable stall mode momentarily */
755 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
756 sctlr = sctlr_orig & ~SCTLR_CFCFG;
757 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
758
759 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
760
761 if (!phys) {
762 dev_err(smmu->dev,
763 "ATOS failed. Will issue a TLBIALL and try again...\n");
764 arm_smmu_tlb_inv_context(smmu_domain);
765 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
766 if (phys)
767 dev_err(smmu->dev,
768 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
769 else
770 dev_err(smmu->dev,
771 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
772 }
773
774 /* restore SCTLR */
775 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
776
777 arm_smmu_resume(smmu);
778
779 return phys;
780}
781
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
783{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600784 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -0700785 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786 unsigned long iova;
787 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100788 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100789 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
790 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100791 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -0800792 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800793 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800794 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -0800795 u32 frsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700797 static DEFINE_RATELIMIT_STATE(_rs,
798 DEFAULT_RATELIMIT_INTERVAL,
799 DEFAULT_RATELIMIT_BURST);
800
Shalaj Jain04059c52015-03-03 13:34:59 -0800801 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100802 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
804
805 if (!(fsr & FSR_FAULT))
806 return IRQ_NONE;
807
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800808 if (fatal_asf && (fsr & FSR_ASF)) {
809 dev_err(smmu->dev,
810 "Took an address size fault. Refusing to recover.\n");
811 BUG();
812 }
813
Will Deacon45ae7cf2013-06-24 18:31:25 +0100814 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -0700815 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600816 if (fsr & FSR_TF)
817 flags |= IOMMU_FAULT_TRANSLATION;
818 if (fsr & FSR_PF)
819 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -0700820 if (fsr & FSR_EF)
821 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600822 if (fsr & FSR_SS)
823 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -0700824
Robin Murphyf9a05f02016-04-13 18:13:01 +0100825 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800826 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -0800827 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
828 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600829 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
830 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800831 dev_dbg(smmu->dev,
832 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
833 iova, fsr, fsynr, cfg->cbndx);
834 dev_dbg(smmu->dev,
835 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -0700836 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -0700837 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -0700838 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700839 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
840 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700841 if (__ratelimit(&_rs)) {
842 dev_err(smmu->dev,
843 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
844 iova, fsr, fsynr, cfg->cbndx);
845 dev_err(smmu->dev, "FAR = %016lx\n",
846 (unsigned long)iova);
847 dev_err(smmu->dev,
848 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
849 fsr,
850 (fsr & 0x02) ? "TF " : "",
851 (fsr & 0x04) ? "AFF " : "",
852 (fsr & 0x08) ? "PF " : "",
853 (fsr & 0x10) ? "EF " : "",
854 (fsr & 0x20) ? "TLBMCF " : "",
855 (fsr & 0x40) ? "TLBLKF " : "",
856 (fsr & 0x80) ? "MHF " : "",
857 (fsr & 0x40000000) ? "SS " : "",
858 (fsr & 0x80000000) ? "MULTI " : "");
859 dev_err(smmu->dev,
860 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -0800861 if (!phys_soft)
862 dev_err(smmu->dev,
863 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
864 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700865 dev_err(smmu->dev,
866 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
867 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
868 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700869 ret = IRQ_NONE;
870 resume = RESUME_TERMINATE;
871 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100872
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600873 /*
874 * If the client returns -EBUSY, do not clear FSR and do not RESUME
875 * if stalled. This is required to keep the IOMMU client stalled on
876 * the outstanding fault. This gives the client a chance to take any
877 * debug action and then terminate the stalled transaction.
878 * So, the sequence in case of stall on fault should be:
879 * 1) Do not clear FSR or write to RESUME here
880 * 2) Client takes any debug action
881 * 3) Client terminates the stalled transaction and resumes the IOMMU
882 * 4) Client clears FSR. The FSR should only be cleared after 3) and
883 * not before so that the fault remains outstanding. This ensures
884 * SCTLR.HUPCF has the desired effect if subsequent transactions also
885 * need to be terminated.
886 */
887 if (tmp != -EBUSY) {
888 /* Clear the faulting FSR */
889 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700890
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600891 /*
892 * Barrier required to ensure that the FSR is cleared
893 * before resuming SMMU operation
894 */
895 wmb();
896
897 /* Retry or terminate any stalled transactions */
898 if (fsr & FSR_SS)
899 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
900 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700901
902 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100903}
904
905static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
906{
907 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
908 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000909 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100910
911 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
912 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
913 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
914 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
915
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000916 if (!gfsr)
917 return IRQ_NONE;
918
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919 dev_err_ratelimited(smmu->dev,
920 "Unexpected global fault, this could be serious\n");
921 dev_err_ratelimited(smmu->dev,
922 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
923 gfsr, gfsynr0, gfsynr1, gfsynr2);
924
925 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100926 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927}
928
Will Deacon518f7132014-11-14 17:17:54 +0000929static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
930 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100931{
932 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100933 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100934 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100935 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
936 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100937 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938
Will Deacon45ae7cf2013-06-24 18:31:25 +0100939 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100940 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
941 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100942
Will Deacon4a1c93c2015-03-04 12:21:03 +0000943 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100944 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
945 reg = CBA2R_RW64_64BIT;
946 else
947 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800948 /* 16-bit VMIDs live in CBA2R */
949 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800950 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800951
Will Deacon4a1c93c2015-03-04 12:21:03 +0000952 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
953 }
954
Will Deacon45ae7cf2013-06-24 18:31:25 +0100955 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100956 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100957 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700958 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100959
Will Deacon57ca90f2014-02-06 14:59:05 +0000960 /*
961 * Use the weakest shareability/memory types, so they are
962 * overridden by the ttbcr/pte.
963 */
964 if (stage1) {
965 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
966 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800967 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
968 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800969 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000970 }
Will Deacon44680ee2014-06-25 11:29:12 +0100971 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100972
Will Deacon518f7132014-11-14 17:17:54 +0000973 /* TTBRs */
974 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100975 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100976
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800977 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100978 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100979
980 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800981 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100982 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000983 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100984 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100985 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000986 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100987
Will Deacon518f7132014-11-14 17:17:54 +0000988 /* TTBCR */
989 if (stage1) {
990 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
991 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
992 if (smmu->version > ARM_SMMU_V1) {
993 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100994 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000995 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100996 }
997 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000998 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
999 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000 }
1001
Will Deacon518f7132014-11-14 17:17:54 +00001002 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001003 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001004 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001006 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1007 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008 }
1009
Will Deacon45ae7cf2013-06-24 18:31:25 +01001010 /* SCTLR */
Patrick Daly5ba28112016-08-30 19:18:52 -07001011 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 if (stage1)
1013 reg |= SCTLR_S1_ASIDPNE;
1014#ifdef __BIG_ENDIAN
1015 reg |= SCTLR_E;
1016#endif
Will Deacon25724842013-08-21 13:49:53 +01001017 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001018}
1019
1020static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001021 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001022{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001023 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001024 unsigned long ias, oas;
1025 struct io_pgtable_ops *pgtbl_ops;
1026 struct io_pgtable_cfg pgtbl_cfg;
1027 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001028 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001029 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001030
Will Deacon518f7132014-11-14 17:17:54 +00001031 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001032 if (smmu_domain->smmu)
1033 goto out_unlock;
1034
Robin Murphy98006992016-04-20 14:53:33 +01001035 /* We're bypassing these SIDs, so don't allocate an actual context */
1036 if (domain->type == IOMMU_DOMAIN_DMA) {
1037 smmu_domain->smmu = smmu;
1038 goto out_unlock;
1039 }
1040
Will Deaconc752ce42014-06-25 22:46:31 +01001041 /*
1042 * Mapping the requested stage onto what we support is surprisingly
1043 * complicated, mainly because the spec allows S1+S2 SMMUs without
1044 * support for nested translation. That means we end up with the
1045 * following table:
1046 *
1047 * Requested Supported Actual
1048 * S1 N S1
1049 * S1 S1+S2 S1
1050 * S1 S2 S2
1051 * S1 S1 S1
1052 * N N N
1053 * N S1+S2 S2
1054 * N S2 S2
1055 * N S1 S1
1056 *
1057 * Note that you can't actually request stage-2 mappings.
1058 */
1059 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1060 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1061 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1062 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1063
Robin Murphy7602b872016-04-28 17:12:09 +01001064 /*
1065 * Choosing a suitable context format is even more fiddly. Until we
1066 * grow some way for the caller to express a preference, and/or move
1067 * the decision into the io-pgtable code where it arguably belongs,
1068 * just aim for the closest thing to the rest of the system, and hope
1069 * that the hardware isn't esoteric enough that we can't assume AArch64
1070 * support to be a superset of AArch32 support...
1071 */
1072 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1073 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1074 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1075 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1076 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1077 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1078 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1079
1080 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1081 ret = -EINVAL;
1082 goto out_unlock;
1083 }
1084
Will Deaconc752ce42014-06-25 22:46:31 +01001085 switch (smmu_domain->stage) {
1086 case ARM_SMMU_DOMAIN_S1:
1087 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1088 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001089 ias = smmu->va_size;
1090 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001091 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001092 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001093 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001094 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001095 ias = min(ias, 32UL);
1096 oas = min(oas, 40UL);
1097 }
Will Deaconc752ce42014-06-25 22:46:31 +01001098 break;
1099 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001100 /*
1101 * We will likely want to change this if/when KVM gets
1102 * involved.
1103 */
Will Deaconc752ce42014-06-25 22:46:31 +01001104 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001105 cfg->cbar = CBAR_TYPE_S2_TRANS;
1106 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001107 ias = smmu->ipa_size;
1108 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001109 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001110 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001111 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001112 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001113 ias = min(ias, 40UL);
1114 oas = min(oas, 40UL);
1115 }
Will Deaconc752ce42014-06-25 22:46:31 +01001116 break;
1117 default:
1118 ret = -EINVAL;
1119 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001120 }
1121
1122 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1123 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001124 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001125 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001126
Will Deacon44680ee2014-06-25 11:29:12 +01001127 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +01001128 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001129 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1130 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001132 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001133 }
1134
Will Deacon518f7132014-11-14 17:17:54 +00001135 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001136 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001137 .ias = ias,
1138 .oas = oas,
1139 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001140 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001141 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001142
Will Deacon518f7132014-11-14 17:17:54 +00001143 smmu_domain->smmu = smmu;
1144 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1145 if (!pgtbl_ops) {
1146 ret = -ENOMEM;
1147 goto out_clear_smmu;
1148 }
1149
Robin Murphyd5466352016-05-09 17:20:09 +01001150 /* Update the domain's page sizes to reflect the page table format */
1151 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001152
1153 /* Initialise the context bank with our page table cfg */
1154 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1155
1156 /*
1157 * Request context fault interrupt. Do this last to avoid the
1158 * handler seeing a half-initialised domain state.
1159 */
Will Deacon44680ee2014-06-25 11:29:12 +01001160 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001161 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
1162 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1163 "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001164 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001166 cfg->irptndx, irq);
1167 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168 }
1169
Will Deacon518f7132014-11-14 17:17:54 +00001170 mutex_unlock(&smmu_domain->init_mutex);
1171
1172 /* Publish page table ops for map/unmap */
1173 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001174 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175
Will Deacon518f7132014-11-14 17:17:54 +00001176out_clear_smmu:
1177 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001178out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001179 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180 return ret;
1181}
1182
1183static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1184{
Joerg Roedel1d672632015-03-26 13:43:10 +01001185 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001186 struct arm_smmu_device *smmu = smmu_domain->smmu;
1187 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001188 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189 int irq;
1190
Robin Murphy98006992016-04-20 14:53:33 +01001191 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 return;
1193
Will Deacon518f7132014-11-14 17:17:54 +00001194 /*
1195 * Disable the context bank and free the page tables before freeing
1196 * it.
1197 */
Will Deacon44680ee2014-06-25 11:29:12 +01001198 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001199 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001200
Will Deacon44680ee2014-06-25 11:29:12 +01001201 if (cfg->irptndx != INVALID_IRPTNDX) {
1202 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001203 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204 }
1205
Markus Elfring44830b02015-11-06 18:32:41 +01001206 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001207 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001208}
1209
Joerg Roedel1d672632015-03-26 13:43:10 +01001210static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001211{
1212 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001213
Robin Murphy9adb9592016-01-26 18:06:36 +00001214 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001215 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001216 /*
1217 * Allocate the domain and initialise some of its data structures.
1218 * We can't really do anything meaningful until we've added a
1219 * master.
1220 */
1221 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1222 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001223 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224
Robin Murphy9adb9592016-01-26 18:06:36 +00001225 if (type == IOMMU_DOMAIN_DMA &&
1226 iommu_get_dma_cookie(&smmu_domain->domain)) {
1227 kfree(smmu_domain);
1228 return NULL;
1229 }
1230
Will Deacon518f7132014-11-14 17:17:54 +00001231 mutex_init(&smmu_domain->init_mutex);
1232 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001233
1234 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235}
1236
Joerg Roedel1d672632015-03-26 13:43:10 +01001237static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238{
Joerg Roedel1d672632015-03-26 13:43:10 +01001239 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001240
1241 /*
1242 * Free the domain resources. We assume that all devices have
1243 * already been detached.
1244 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001245 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001246 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001247 kfree(smmu_domain);
1248}
1249
1250static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001251 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252{
1253 int i;
1254 struct arm_smmu_smr *smrs;
1255 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1256
1257 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1258 return 0;
1259
Will Deacona9a1b0b2014-05-01 18:05:08 +01001260 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261 return -EEXIST;
1262
Mitchel Humpherys29073202014-07-08 09:52:18 -07001263 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001264 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001265 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1266 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267 return -ENOMEM;
1268 }
1269
Will Deacon44680ee2014-06-25 11:29:12 +01001270 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001271 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1273 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001274 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001275 dev_err(smmu->dev, "failed to allocate free SMR\n");
1276 goto err_free_smrs;
1277 }
1278
1279 smrs[i] = (struct arm_smmu_smr) {
1280 .idx = idx,
1281 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001282 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001283 };
1284 }
1285
1286 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001287 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1289 smrs[i].mask << SMR_MASK_SHIFT;
1290 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1291 }
1292
Will Deacona9a1b0b2014-05-01 18:05:08 +01001293 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294 return 0;
1295
1296err_free_smrs:
1297 while (--i >= 0)
1298 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1299 kfree(smrs);
1300 return -ENOSPC;
1301}
1302
1303static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001304 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305{
1306 int i;
1307 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001308 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309
Will Deacon43b412b2014-07-15 11:22:24 +01001310 if (!smrs)
1311 return;
1312
Will Deacon45ae7cf2013-06-24 18:31:25 +01001313 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001314 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001315 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001316
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1318 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1319 }
1320
Will Deacona9a1b0b2014-05-01 18:05:08 +01001321 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322 kfree(smrs);
1323}
1324
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001326 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001327{
1328 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001329 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001330 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1331
Will Deacon5f634952016-04-20 14:53:32 +01001332 /*
1333 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1334 * for all devices behind the SMMU. Note that we need to take
1335 * care configuring SMRs for devices both a platform_device and
1336 * and a PCI device (i.e. a PCI host controller)
1337 */
1338 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1339 return 0;
1340
Will Deacon8f68f8e2014-07-15 11:27:08 +01001341 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001342 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001344 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345
Will Deacona9a1b0b2014-05-01 18:05:08 +01001346 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001347 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001348
Will Deacona9a1b0b2014-05-01 18:05:08 +01001349 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001350 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001351 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001352 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1353 }
1354
1355 return 0;
1356}
1357
1358static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001359 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001360{
Will Deacon43b412b2014-07-15 11:22:24 +01001361 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001362 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001363 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364
Will Deacon8f68f8e2014-07-15 11:27:08 +01001365 /* An IOMMU group is torn down by the first device to be removed */
1366 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1367 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001368
1369 /*
1370 * We *must* clear the S2CR first, because freeing the SMR means
1371 * that it can be re-allocated immediately.
1372 */
Will Deacon43b412b2014-07-15 11:22:24 +01001373 for (i = 0; i < cfg->num_streamids; ++i) {
1374 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001375 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001376
Robin Murphy25a1c962016-02-10 14:25:33 +00001377 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001378 }
1379
Will Deacona9a1b0b2014-05-01 18:05:08 +01001380 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381}
1382
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001383static void arm_smmu_detach_dev(struct device *dev,
1384 struct arm_smmu_master_cfg *cfg)
1385{
1386 struct iommu_domain *domain = dev->archdata.iommu;
1387 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1388
1389 dev->archdata.iommu = NULL;
1390 arm_smmu_domain_remove_master(smmu_domain, cfg);
1391}
1392
Will Deacon45ae7cf2013-06-24 18:31:25 +01001393static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1394{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001395 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001396 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001397 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001398 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399
Will Deacon8f68f8e2014-07-15 11:27:08 +01001400 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001401 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001402 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1403 return -ENXIO;
1404 }
1405
Will Deacon518f7132014-11-14 17:17:54 +00001406 /* Ensure that the domain is finalised */
1407 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001408 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001409 return ret;
1410
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001412 * Sanity check the domain. We don't support domains across
1413 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001414 */
Will Deacon518f7132014-11-14 17:17:54 +00001415 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416 dev_err(dev,
1417 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001418 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1419 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001420 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421
1422 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001423 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001424 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001425 return -ENODEV;
1426
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001427 /* Detach the dev from its current domain */
1428 if (dev->archdata.iommu)
1429 arm_smmu_detach_dev(dev, cfg);
1430
Will Deacon844e35b2014-07-17 11:23:51 +01001431 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1432 if (!ret)
1433 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434 return ret;
1435}
1436
Will Deacon45ae7cf2013-06-24 18:31:25 +01001437static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001438 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001439{
Will Deacon518f7132014-11-14 17:17:54 +00001440 int ret;
1441 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001442 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001443 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001444
Will Deacon518f7132014-11-14 17:17:54 +00001445 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001446 return -ENODEV;
1447
Will Deacon518f7132014-11-14 17:17:54 +00001448 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1449 ret = ops->map(ops, iova, paddr, size, prot);
1450 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1451 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001452}
1453
1454static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1455 size_t size)
1456{
Will Deacon518f7132014-11-14 17:17:54 +00001457 size_t ret;
1458 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001459 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001460 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001461
Will Deacon518f7132014-11-14 17:17:54 +00001462 if (!ops)
1463 return 0;
1464
1465 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1466 ret = ops->unmap(ops, iova, size);
1467 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1468 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001469}
1470
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001471static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001472 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001473{
Joerg Roedel1d672632015-03-26 13:43:10 +01001474 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001475 struct arm_smmu_device *smmu = smmu_domain->smmu;
1476 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1477 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1478 struct device *dev = smmu->dev;
1479 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001480 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001481 u32 tmp;
1482 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001483 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001484
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001485 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001486 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001487 phys = 0;
1488 goto out_unlock;
1489 }
1490
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001491 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1492
Robin Murphy661d9622015-05-27 17:09:34 +01001493 /* ATS1 registers can only be written atomically */
1494 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001495 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001496 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1497 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001498 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001499
1500 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1501 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001502 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001503 dev_err(dev,
1504 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1505 &iova, &phys);
1506 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001507 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001508 }
1509
Robin Murphyf9a05f02016-04-13 18:13:01 +01001510 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001511 if (phys & CB_PAR_F) {
1512 dev_err(dev, "translation fault!\n");
1513 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001514 phys = 0;
1515 } else {
1516 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001517 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001518out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001519 if (do_halt)
1520 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001521out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001522 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001523 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001524}
1525
Will Deacon45ae7cf2013-06-24 18:31:25 +01001526static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001527 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001528{
Will Deacon518f7132014-11-14 17:17:54 +00001529 phys_addr_t ret;
1530 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001531 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001532 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001533
Will Deacon518f7132014-11-14 17:17:54 +00001534 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001535 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001536
Will Deacon518f7132014-11-14 17:17:54 +00001537 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001538 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001539 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001540
Will Deacon518f7132014-11-14 17:17:54 +00001541 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001542}
1543
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001544/*
1545 * This function can sleep, and cannot be called from atomic context. Will
1546 * power on register block if required. This restriction does not apply to the
1547 * original iova_to_phys() op.
1548 */
1549static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1550 dma_addr_t iova)
1551{
1552 phys_addr_t ret = 0;
1553 unsigned long flags;
1554 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1555
1556 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1557 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1558 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001559 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001560
1561 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1562
1563 return ret;
1564}
1565
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001566static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
1567 struct iommu_domain *domain, dma_addr_t iova)
1568{
1569 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
1570}
1571
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001572static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001573{
Will Deacond0948942014-06-24 17:30:10 +01001574 switch (cap) {
1575 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001576 /*
1577 * Return true here as the SMMU can always send out coherent
1578 * requests.
1579 */
1580 return true;
Will Deacond0948942014-06-24 17:30:10 +01001581 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001582 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001583 case IOMMU_CAP_NOEXEC:
1584 return true;
Will Deacond0948942014-06-24 17:30:10 +01001585 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001586 return false;
Will Deacond0948942014-06-24 17:30:10 +01001587 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001589
Will Deacona9a1b0b2014-05-01 18:05:08 +01001590static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1591{
1592 *((u16 *)data) = alias;
1593 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594}
1595
Will Deacon8f68f8e2014-07-15 11:27:08 +01001596static void __arm_smmu_release_pci_iommudata(void *data)
1597{
1598 kfree(data);
1599}
1600
Joerg Roedelaf659932015-10-21 23:51:41 +02001601static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1602 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001603{
Will Deacon03edb222015-01-19 14:27:33 +00001604 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001605 u16 sid;
1606 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001607
Will Deacon03edb222015-01-19 14:27:33 +00001608 cfg = iommu_group_get_iommudata(group);
1609 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001610 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001611 if (!cfg)
1612 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001613
Will Deacon03edb222015-01-19 14:27:33 +00001614 iommu_group_set_iommudata(group, cfg,
1615 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001616 }
1617
Joerg Roedelaf659932015-10-21 23:51:41 +02001618 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1619 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001620
Will Deacon03edb222015-01-19 14:27:33 +00001621 /*
1622 * Assume Stream ID == Requester ID for now.
1623 * We need a way to describe the ID mappings in FDT.
1624 */
1625 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1626 for (i = 0; i < cfg->num_streamids; ++i)
1627 if (cfg->streamids[i] == sid)
1628 break;
1629
1630 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1631 if (i == cfg->num_streamids)
1632 cfg->streamids[cfg->num_streamids++] = sid;
1633
1634 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001635}
1636
Joerg Roedelaf659932015-10-21 23:51:41 +02001637static int arm_smmu_init_platform_device(struct device *dev,
1638 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001639{
Will Deacon03edb222015-01-19 14:27:33 +00001640 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001641 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001642
1643 if (!smmu)
1644 return -ENODEV;
1645
1646 master = find_smmu_master(smmu, dev->of_node);
1647 if (!master)
1648 return -ENODEV;
1649
Will Deacon03edb222015-01-19 14:27:33 +00001650 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001651
1652 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001653}
1654
1655static int arm_smmu_add_device(struct device *dev)
1656{
Joerg Roedelaf659932015-10-21 23:51:41 +02001657 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001658
Joerg Roedelaf659932015-10-21 23:51:41 +02001659 group = iommu_group_get_for_dev(dev);
1660 if (IS_ERR(group))
1661 return PTR_ERR(group);
1662
Peng Fan9a4a9d82015-11-20 16:56:18 +08001663 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001664 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001665}
1666
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667static void arm_smmu_remove_device(struct device *dev)
1668{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001669 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670}
1671
Joerg Roedelaf659932015-10-21 23:51:41 +02001672static struct iommu_group *arm_smmu_device_group(struct device *dev)
1673{
1674 struct iommu_group *group;
1675 int ret;
1676
1677 if (dev_is_pci(dev))
1678 group = pci_device_group(dev);
1679 else
1680 group = generic_device_group(dev);
1681
1682 if (IS_ERR(group))
1683 return group;
1684
1685 if (dev_is_pci(dev))
1686 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1687 else
1688 ret = arm_smmu_init_platform_device(dev, group);
1689
1690 if (ret) {
1691 iommu_group_put(group);
1692 group = ERR_PTR(ret);
1693 }
1694
1695 return group;
1696}
1697
Will Deaconc752ce42014-06-25 22:46:31 +01001698static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1699 enum iommu_attr attr, void *data)
1700{
Joerg Roedel1d672632015-03-26 13:43:10 +01001701 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001702
1703 switch (attr) {
1704 case DOMAIN_ATTR_NESTING:
1705 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1706 return 0;
1707 default:
1708 return -ENODEV;
1709 }
1710}
1711
1712static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1713 enum iommu_attr attr, void *data)
1714{
Will Deacon518f7132014-11-14 17:17:54 +00001715 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001716 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001717
Will Deacon518f7132014-11-14 17:17:54 +00001718 mutex_lock(&smmu_domain->init_mutex);
1719
Will Deaconc752ce42014-06-25 22:46:31 +01001720 switch (attr) {
1721 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001722 if (smmu_domain->smmu) {
1723 ret = -EPERM;
1724 goto out_unlock;
1725 }
1726
Will Deaconc752ce42014-06-25 22:46:31 +01001727 if (*(int *)data)
1728 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1729 else
1730 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1731
Will Deacon518f7132014-11-14 17:17:54 +00001732 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001733 default:
Will Deacon518f7132014-11-14 17:17:54 +00001734 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001735 }
Will Deacon518f7132014-11-14 17:17:54 +00001736
1737out_unlock:
1738 mutex_unlock(&smmu_domain->init_mutex);
1739 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001740}
1741
Will Deacon518f7132014-11-14 17:17:54 +00001742static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001743 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001744 .domain_alloc = arm_smmu_domain_alloc,
1745 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001746 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001747 .map = arm_smmu_map,
1748 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001749 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001750 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001751 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01001752 .add_device = arm_smmu_add_device,
1753 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001754 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001755 .domain_get_attr = arm_smmu_domain_get_attr,
1756 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001757 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758};
1759
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001760static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001761{
1762 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001763 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001764
1765 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1766 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1767 0, 30000)) {
1768 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1769 return -EBUSY;
1770 }
1771
1772 return 0;
1773}
1774
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001775static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
1776{
1777 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1778 u32 reg;
1779
1780 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1781 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1782 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1783
1784 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
1785}
1786
1787static int arm_smmu_halt(struct arm_smmu_device *smmu)
1788{
1789 return __arm_smmu_halt(smmu, true);
1790}
1791
1792static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
1793{
1794 return __arm_smmu_halt(smmu, false);
1795}
1796
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001797static void arm_smmu_resume(struct arm_smmu_device *smmu)
1798{
1799 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1800 u32 reg;
1801
1802 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1803 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1804 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1805}
1806
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001807static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1808{
1809 int i;
1810 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1811
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001812 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001813 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1814 writel_relaxed(regs[i].value,
1815 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001816 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001817}
1818
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1820{
1821 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001822 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001824 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001825
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001826 /* clear global FSR */
1827 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1828 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829
Robin Murphy25a1c962016-02-10 14:25:33 +00001830 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1831 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001832 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001833 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001834 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001835 }
1836
Peng Fan3ca37122016-05-03 21:50:30 +08001837 /*
1838 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1839 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1840 * bit is only present in MMU-500r2 onwards.
1841 */
1842 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1843 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1844 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1845 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1846 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1847 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1848 }
1849
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001850 /* Make sure all context banks are disabled and clear CB_FSR */
1851 for (i = 0; i < smmu->num_context_banks; ++i) {
1852 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1853 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1854 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001855 /*
1856 * Disable MMU-500's not-particularly-beneficial next-page
1857 * prefetcher for the sake of errata #841119 and #826419.
1858 */
1859 if (smmu->model == ARM_MMU500) {
1860 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1861 reg &= ~ARM_MMU500_ACTLR_CPRE;
1862 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1863 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001864
1865 if (smmu->model == QCOM_SMMUV2) {
1866 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1867 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1868 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1869 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1870 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001871 }
Will Deacon1463fe42013-07-31 19:21:27 +01001872
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001873 /* Program implementation defined registers */
1874 arm_smmu_impl_def_programming(smmu);
1875
Will Deacon45ae7cf2013-06-24 18:31:25 +01001876 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001877 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1878 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1879
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001880 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001881
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001883 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884
1885 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001886 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887
Robin Murphy25a1c962016-02-10 14:25:33 +00001888 /* Enable client access, handling unmatched streams as appropriate */
1889 reg &= ~sCR0_CLIENTPD;
1890 if (disable_bypass)
1891 reg |= sCR0_USFCFG;
1892 else
1893 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001894
1895 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001896 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001897
1898 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001899 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001901 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1902 reg |= sCR0_VMID16EN;
1903
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001905 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001906 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907}
1908
1909static int arm_smmu_id_size_to_bits(int size)
1910{
1911 switch (size) {
1912 case 0:
1913 return 32;
1914 case 1:
1915 return 36;
1916 case 2:
1917 return 40;
1918 case 3:
1919 return 42;
1920 case 4:
1921 return 44;
1922 case 5:
1923 default:
1924 return 48;
1925 }
1926}
1927
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001928static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1929{
1930 struct device *dev = smmu->dev;
1931 int i, ntuples, ret;
1932 u32 *tuples;
1933 struct arm_smmu_impl_def_reg *regs, *regit;
1934
1935 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1936 return 0;
1937
1938 ntuples /= sizeof(u32);
1939 if (ntuples % 2) {
1940 dev_err(dev,
1941 "Invalid number of attach-impl-defs registers: %d\n",
1942 ntuples);
1943 return -EINVAL;
1944 }
1945
1946 regs = devm_kmalloc(
1947 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1948 GFP_KERNEL);
1949 if (!regs)
1950 return -ENOMEM;
1951
1952 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1953 if (!tuples)
1954 return -ENOMEM;
1955
1956 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1957 tuples, ntuples);
1958 if (ret)
1959 return ret;
1960
1961 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1962 regit->offset = tuples[i];
1963 regit->value = tuples[i + 1];
1964 }
1965
1966 devm_kfree(dev, tuples);
1967
1968 smmu->impl_def_attach_registers = regs;
1969 smmu->num_impl_def_attach_registers = ntuples / 2;
1970
1971 return 0;
1972}
1973
Will Deacon45ae7cf2013-06-24 18:31:25 +01001974static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1975{
1976 unsigned long size;
1977 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1978 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001979 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980
1981 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001982 dev_notice(smmu->dev, "SMMUv%d with:\n",
1983 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001984
1985 /* ID0 */
1986 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001987
1988 /* Restrict available stages based on module parameter */
1989 if (force_stage == 1)
1990 id &= ~(ID0_S2TS | ID0_NTS);
1991 else if (force_stage == 2)
1992 id &= ~(ID0_S1TS | ID0_NTS);
1993
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 if (id & ID0_S1TS) {
1995 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1996 dev_notice(smmu->dev, "\tstage 1 translation\n");
1997 }
1998
1999 if (id & ID0_S2TS) {
2000 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2001 dev_notice(smmu->dev, "\tstage 2 translation\n");
2002 }
2003
2004 if (id & ID0_NTS) {
2005 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
2006 dev_notice(smmu->dev, "\tnested translation\n");
2007 }
2008
2009 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01002010 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002011 dev_err(smmu->dev, "\tno translation support!\n");
2012 return -ENODEV;
2013 }
2014
Robin Murphyb7862e32016-04-13 18:13:03 +01002015 if ((id & ID0_S1TS) &&
2016 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002017 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
2018 dev_notice(smmu->dev, "\taddress translation ops\n");
2019 }
2020
Robin Murphybae2c2d2015-07-29 19:46:05 +01002021 /*
2022 * In order for DMA API calls to work properly, we must defer to what
2023 * the DT says about coherency, regardless of what the hardware claims.
2024 * Fortunately, this also opens up a workaround for systems where the
2025 * ID register value has ended up configured incorrectly.
2026 */
2027 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
2028 cttw_reg = !!(id & ID0_CTTW);
2029 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002030 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002031 if (cttw_dt || cttw_reg)
2032 dev_notice(smmu->dev, "\t%scoherent table walk\n",
2033 cttw_dt ? "" : "non-");
2034 if (cttw_dt != cttw_reg)
2035 dev_notice(smmu->dev,
2036 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002037
2038 if (id & ID0_SMS) {
2039 u32 smr, sid, mask;
2040
2041 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
2042 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
2043 ID0_NUMSMRG_MASK;
2044 if (smmu->num_mapping_groups == 0) {
2045 dev_err(smmu->dev,
2046 "stream-matching supported, but no SMRs present!\n");
2047 return -ENODEV;
2048 }
2049
2050 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
2051 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
2052 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
2053 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
2054
2055 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
2056 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
2057 if ((mask & sid) != sid) {
2058 dev_err(smmu->dev,
2059 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
2060 mask, sid);
2061 return -ENODEV;
2062 }
2063
2064 dev_notice(smmu->dev,
2065 "\tstream matching with %u register groups, mask 0x%x",
2066 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07002067 } else {
2068 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
2069 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002070 }
2071
Robin Murphy7602b872016-04-28 17:12:09 +01002072 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
2073 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
2074 if (!(id & ID0_PTFS_NO_AARCH32S))
2075 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
2076 }
2077
Will Deacon45ae7cf2013-06-24 18:31:25 +01002078 /* ID1 */
2079 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01002080 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002082 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00002083 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01002084 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002085 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07002086 dev_warn(smmu->dev,
2087 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
2088 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002089
Will Deacon518f7132014-11-14 17:17:54 +00002090 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002091 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
2092 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
2093 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
2094 return -ENODEV;
2095 }
2096 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
2097 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01002098 /*
2099 * Cavium CN88xx erratum #27704.
2100 * Ensure ASID and VMID allocation is unique across all SMMUs in
2101 * the system.
2102 */
2103 if (smmu->model == CAVIUM_SMMUV2) {
2104 smmu->cavium_id_base =
2105 atomic_add_return(smmu->num_context_banks,
2106 &cavium_smmu_context_count);
2107 smmu->cavium_id_base -= smmu->num_context_banks;
2108 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002109
2110 /* ID2 */
2111 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2112 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002113 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002114
Will Deacon518f7132014-11-14 17:17:54 +00002115 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002116 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002117 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002118
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002119 if (id & ID2_VMID16)
2120 smmu->features |= ARM_SMMU_FEAT_VMID16;
2121
Robin Murphyf1d84542015-03-04 16:41:05 +00002122 /*
2123 * What the page table walker can address actually depends on which
2124 * descriptor format is in use, but since a) we don't know that yet,
2125 * and b) it can vary per context bank, this will have to do...
2126 */
2127 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2128 dev_warn(smmu->dev,
2129 "failed to set DMA mask for table walker\n");
2130
Robin Murphyb7862e32016-04-13 18:13:03 +01002131 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002132 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002133 if (smmu->version == ARM_SMMU_V1_64K)
2134 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002136 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002137 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002138 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002139 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002140 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002141 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002142 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002143 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002144 }
2145
Robin Murphy7602b872016-04-28 17:12:09 +01002146 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002147 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002148 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002149 if (smmu->features &
2150 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002151 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002152 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002153 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002154 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002155 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002156
Robin Murphyd5466352016-05-09 17:20:09 +01002157 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2158 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2159 else
2160 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2161 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2162 smmu->pgsize_bitmap);
2163
Will Deacon518f7132014-11-14 17:17:54 +00002164
Will Deacon28d60072014-09-01 16:24:48 +01002165 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2166 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002167 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002168
2169 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2170 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002171 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002172
Will Deacon45ae7cf2013-06-24 18:31:25 +01002173 return 0;
2174}
2175
Robin Murphy67b65a32016-04-13 18:12:57 +01002176struct arm_smmu_match_data {
2177 enum arm_smmu_arch_version version;
2178 enum arm_smmu_implementation model;
2179};
2180
2181#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2182static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2183
2184ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2185ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002186ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002187ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002188ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002189ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002190
Joerg Roedel09b52692014-10-02 12:24:45 +02002191static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002192 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2193 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2194 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002195 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002196 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002197 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002198 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002199 { },
2200};
2201MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2202
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2204{
Robin Murphy09360402014-08-28 17:51:59 +01002205 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002206 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207 struct resource *res;
2208 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209 struct device *dev = &pdev->dev;
2210 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002211 struct of_phandle_iterator it;
2212 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002213 int num_irqs, i, err;
2214
2215 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2216 if (!smmu) {
2217 dev_err(dev, "failed to allocate arm_smmu_device\n");
2218 return -ENOMEM;
2219 }
2220 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002221 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002222
Robin Murphy09360402014-08-28 17:51:59 +01002223 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002224 data = of_id->data;
2225 smmu->version = data->version;
2226 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002227
Will Deacon45ae7cf2013-06-24 18:31:25 +01002228 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002229 smmu->base = devm_ioremap_resource(dev, res);
2230 if (IS_ERR(smmu->base))
2231 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002232 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002233
2234 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2235 &smmu->num_global_irqs)) {
2236 dev_err(dev, "missing #global-interrupts property\n");
2237 return -ENODEV;
2238 }
2239
2240 num_irqs = 0;
2241 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2242 num_irqs++;
2243 if (num_irqs > smmu->num_global_irqs)
2244 smmu->num_context_irqs++;
2245 }
2246
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002247 if (!smmu->num_context_irqs) {
2248 dev_err(dev, "found %d interrupts but expected at least %d\n",
2249 num_irqs, smmu->num_global_irqs + 1);
2250 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002252
2253 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2254 GFP_KERNEL);
2255 if (!smmu->irqs) {
2256 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2257 return -ENOMEM;
2258 }
2259
2260 for (i = 0; i < num_irqs; ++i) {
2261 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002262
Will Deacon45ae7cf2013-06-24 18:31:25 +01002263 if (irq < 0) {
2264 dev_err(dev, "failed to get irq index %d\n", i);
2265 return -ENODEV;
2266 }
2267 smmu->irqs[i] = irq;
2268 }
2269
Olav Haugan3c8766d2014-08-22 17:12:32 -07002270 err = arm_smmu_device_cfg_probe(smmu);
2271 if (err)
2272 return err;
2273
Will Deacon45ae7cf2013-06-24 18:31:25 +01002274 i = 0;
2275 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002276
2277 err = -ENOMEM;
2278 /* No need to zero the memory for masterspec */
2279 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2280 if (!masterspec)
2281 goto out_put_masters;
2282
2283 of_for_each_phandle(&it, err, dev->of_node,
2284 "mmu-masters", "#stream-id-cells", 0) {
2285 int count = of_phandle_iterator_args(&it, masterspec->args,
2286 MAX_MASTER_STREAMIDS);
2287 masterspec->np = of_node_get(it.node);
2288 masterspec->args_count = count;
2289
2290 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002291 if (err) {
2292 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002293 masterspec->np->name);
2294 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002295 goto out_put_masters;
2296 }
2297
2298 i++;
2299 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002300
Will Deacon45ae7cf2013-06-24 18:31:25 +01002301 dev_notice(dev, "registered %d master devices\n", i);
2302
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002303 kfree(masterspec);
2304
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002305 err = arm_smmu_parse_impl_def_registers(smmu);
2306 if (err)
2307 goto out_put_masters;
2308
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002309 parse_driver_options(smmu);
2310
Robin Murphyb7862e32016-04-13 18:13:03 +01002311 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002312 smmu->num_context_banks != smmu->num_context_irqs) {
2313 dev_err(dev,
2314 "found only %d context interrupt(s) but %d required\n",
2315 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002316 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002317 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318 }
2319
Will Deacon45ae7cf2013-06-24 18:31:25 +01002320 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08002321 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
2322 NULL, arm_smmu_global_fault,
2323 IRQF_ONESHOT | IRQF_SHARED,
2324 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002325 if (err) {
2326 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2327 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002328 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002329 }
2330 }
2331
2332 INIT_LIST_HEAD(&smmu->list);
2333 spin_lock(&arm_smmu_devices_lock);
2334 list_add(&smmu->list, &arm_smmu_devices);
2335 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002336
2337 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338 return 0;
2339
Will Deacon45ae7cf2013-06-24 18:31:25 +01002340out_put_masters:
2341 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002342 struct arm_smmu_master *master
2343 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002344 of_node_put(master->of_node);
2345 }
2346
2347 return err;
2348}
2349
2350static int arm_smmu_device_remove(struct platform_device *pdev)
2351{
2352 int i;
2353 struct device *dev = &pdev->dev;
2354 struct arm_smmu_device *curr, *smmu = NULL;
2355 struct rb_node *node;
2356
2357 spin_lock(&arm_smmu_devices_lock);
2358 list_for_each_entry(curr, &arm_smmu_devices, list) {
2359 if (curr->dev == dev) {
2360 smmu = curr;
2361 list_del(&smmu->list);
2362 break;
2363 }
2364 }
2365 spin_unlock(&arm_smmu_devices_lock);
2366
2367 if (!smmu)
2368 return -ENODEV;
2369
Will Deacon45ae7cf2013-06-24 18:31:25 +01002370 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002371 struct arm_smmu_master *master
2372 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002373 of_node_put(master->of_node);
2374 }
2375
Will Deaconecfadb62013-07-31 19:21:28 +01002376 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002377 dev_err(dev, "removing device with active domains!\n");
2378
2379 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002380 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002381
2382 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002383 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002384 return 0;
2385}
2386
Will Deacon45ae7cf2013-06-24 18:31:25 +01002387static struct platform_driver arm_smmu_driver = {
2388 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002389 .name = "arm-smmu",
2390 .of_match_table = of_match_ptr(arm_smmu_of_match),
2391 },
2392 .probe = arm_smmu_device_dt_probe,
2393 .remove = arm_smmu_device_remove,
2394};
2395
2396static int __init arm_smmu_init(void)
2397{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002398 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002399 int ret;
2400
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002401 /*
2402 * Play nice with systems that don't have an ARM SMMU by checking that
2403 * an ARM SMMU exists in the system before proceeding with the driver
2404 * and IOMMU bus operation registration.
2405 */
2406 np = of_find_matching_node(NULL, arm_smmu_of_match);
2407 if (!np)
2408 return 0;
2409
2410 of_node_put(np);
2411
Will Deacon45ae7cf2013-06-24 18:31:25 +01002412 ret = platform_driver_register(&arm_smmu_driver);
2413 if (ret)
2414 return ret;
2415
2416 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002417 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002418 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2419
Will Deacond123cf82014-02-04 22:17:53 +00002420#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002421 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002422 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002423#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002424
Will Deacona9a1b0b2014-05-01 18:05:08 +01002425#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002426 if (!iommu_present(&pci_bus_type)) {
2427 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002428 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002429 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002430#endif
2431
Will Deacon45ae7cf2013-06-24 18:31:25 +01002432 return 0;
2433}
2434
2435static void __exit arm_smmu_exit(void)
2436{
2437 return platform_driver_unregister(&arm_smmu_driver);
2438}
2439
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002440subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002441module_exit(arm_smmu_exit);
2442
2443MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2444MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2445MODULE_LICENSE("GPL v2");