blob: 08b33f709451b465a367d797718ad6a4160b4652 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
223#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000224#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100225#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000229#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100230#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVAL 0x620
232#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
233#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100234#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000235#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236
237#define SCTLR_S1_ASIDPNE (1 << 12)
238#define SCTLR_CFCFG (1 << 7)
239#define SCTLR_CFIE (1 << 6)
240#define SCTLR_CFRE (1 << 5)
241#define SCTLR_E (1 << 4)
242#define SCTLR_AFE (1 << 2)
243#define SCTLR_TRE (1 << 1)
244#define SCTLR_M (1 << 0)
245#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700251/* Definitions for implementation-defined registers */
252#define ACTLR_QCOM_OSH_SHIFT 28
253#define ACTLR_QCOM_OSH 1
254
255#define ACTLR_QCOM_ISH_SHIFT 29
256#define ACTLR_QCOM_ISH 1
257
258#define ACTLR_QCOM_NSH_SHIFT 30
259#define ACTLR_QCOM_NSH 1
260
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700261#define ARM_SMMU_IMPL_DEF0(smmu) \
262 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
263#define ARM_SMMU_IMPL_DEF1(smmu) \
264 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
265#define IMPL_DEF1_MICRO_MMU_CTRL 0
266#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
267#define MICRO_MMU_CTRL_IDLE (1 << 3)
268
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000303static bool disable_bypass;
304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100319};
320
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700321struct arm_smmu_impl_def_reg {
322 u32 offset;
323 u32 value;
324};
325
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326struct arm_smmu_smr {
327 u8 idx;
328 u16 mask;
329 u16 id;
330};
331
Will Deacona9a1b0b2014-05-01 18:05:08 +0100332struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333 int num_streamids;
334 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 struct arm_smmu_smr *smrs;
336};
337
Will Deacona9a1b0b2014-05-01 18:05:08 +0100338struct arm_smmu_master {
339 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340 struct rb_node node;
341 struct arm_smmu_master_cfg cfg;
342};
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_device {
345 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346
347 void __iomem *base;
348 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100349 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350
351#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
352#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
353#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
354#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
355#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000356#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800357#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100358#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
359#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
360#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
361#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
362#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000364
365#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800366#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000367 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100368 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100369 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370
371 u32 num_context_banks;
372 u32 num_s2_context_banks;
373 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
374 atomic_t irptndx;
375
376 u32 num_mapping_groups;
377 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
378
Will Deacon518f7132014-11-14 17:17:54 +0000379 unsigned long va_size;
380 unsigned long ipa_size;
381 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100382 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383
384 u32 num_global_irqs;
385 u32 num_context_irqs;
386 unsigned int *irqs;
387
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388 struct list_head list;
389 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800390
391 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700392 /* Specific to QCOM */
393 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
394 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800395
396 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
398
Robin Murphy7602b872016-04-28 17:12:09 +0100399enum arm_smmu_context_fmt {
400 ARM_SMMU_CTX_FMT_NONE,
401 ARM_SMMU_CTX_FMT_AARCH64,
402 ARM_SMMU_CTX_FMT_AARCH32_L,
403 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404};
405
406struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407 u8 cbndx;
408 u8 irptndx;
409 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100410 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100412#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800414#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
415#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100416
Will Deaconc752ce42014-06-25 22:46:31 +0100417enum arm_smmu_domain_stage {
418 ARM_SMMU_DOMAIN_S1 = 0,
419 ARM_SMMU_DOMAIN_S2,
420 ARM_SMMU_DOMAIN_NESTED,
421};
422
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100424 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000425 struct io_pgtable_ops *pgtbl_ops;
426 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100427 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100428 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000429 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100430 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431};
432
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200433struct arm_smmu_phandle_args {
434 struct device_node *np;
435 int args_count;
436 uint32_t args[MAX_MASTER_STREAMIDS];
437};
438
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439static DEFINE_SPINLOCK(arm_smmu_devices_lock);
440static LIST_HEAD(arm_smmu_devices);
441
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000442struct arm_smmu_option_prop {
443 u32 opt;
444 const char *prop;
445};
446
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800447static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
448
Mitchel Humpherys29073202014-07-08 09:52:18 -0700449static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000450 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800451 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000452 { 0, NULL},
453};
454
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800455static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700456static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
457static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800458static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800459static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
460 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700461static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
462 dma_addr_t iova);
463static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
464 struct iommu_domain *domain, dma_addr_t iova);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800465
Joerg Roedel1d672632015-03-26 13:43:10 +0100466static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
467{
468 return container_of(dom, struct arm_smmu_domain, domain);
469}
470
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000471static void parse_driver_options(struct arm_smmu_device *smmu)
472{
473 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700474
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000475 do {
476 if (of_property_read_bool(smmu->dev->of_node,
477 arm_smmu_options[i].prop)) {
478 smmu->options |= arm_smmu_options[i].opt;
479 dev_notice(smmu->dev, "option %s\n",
480 arm_smmu_options[i].prop);
481 }
482 } while (arm_smmu_options[++i].opt);
483}
484
Will Deacon8f68f8e2014-07-15 11:27:08 +0100485static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100486{
487 if (dev_is_pci(dev)) {
488 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700489
Will Deacona9a1b0b2014-05-01 18:05:08 +0100490 while (!pci_is_root_bus(bus))
491 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100492 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100493 }
494
Will Deacon8f68f8e2014-07-15 11:27:08 +0100495 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100496}
497
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
499 struct device_node *dev_node)
500{
501 struct rb_node *node = smmu->masters.rb_node;
502
503 while (node) {
504 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700505
Will Deacon45ae7cf2013-06-24 18:31:25 +0100506 master = container_of(node, struct arm_smmu_master, node);
507
508 if (dev_node < master->of_node)
509 node = node->rb_left;
510 else if (dev_node > master->of_node)
511 node = node->rb_right;
512 else
513 return master;
514 }
515
516 return NULL;
517}
518
Will Deacona9a1b0b2014-05-01 18:05:08 +0100519static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100520find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100521{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100522 struct arm_smmu_master_cfg *cfg = NULL;
523 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100524
Will Deacon8f68f8e2014-07-15 11:27:08 +0100525 if (group) {
526 cfg = iommu_group_get_iommudata(group);
527 iommu_group_put(group);
528 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100529
Will Deacon8f68f8e2014-07-15 11:27:08 +0100530 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100531}
532
Will Deacon45ae7cf2013-06-24 18:31:25 +0100533static int insert_smmu_master(struct arm_smmu_device *smmu,
534 struct arm_smmu_master *master)
535{
536 struct rb_node **new, *parent;
537
538 new = &smmu->masters.rb_node;
539 parent = NULL;
540 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700541 struct arm_smmu_master *this
542 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100543
544 parent = *new;
545 if (master->of_node < this->of_node)
546 new = &((*new)->rb_left);
547 else if (master->of_node > this->of_node)
548 new = &((*new)->rb_right);
549 else
550 return -EEXIST;
551 }
552
553 rb_link_node(&master->node, parent, new);
554 rb_insert_color(&master->node, &smmu->masters);
555 return 0;
556}
557
558static int register_smmu_master(struct arm_smmu_device *smmu,
559 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200560 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100561{
562 int i;
563 struct arm_smmu_master *master;
564
565 master = find_smmu_master(smmu, masterspec->np);
566 if (master) {
567 dev_err(dev,
568 "rejecting multiple registrations for master device %s\n",
569 masterspec->np->name);
570 return -EBUSY;
571 }
572
573 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
574 dev_err(dev,
575 "reached maximum number (%d) of stream IDs for master device %s\n",
576 MAX_MASTER_STREAMIDS, masterspec->np->name);
577 return -ENOSPC;
578 }
579
580 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
581 if (!master)
582 return -ENOMEM;
583
Will Deacona9a1b0b2014-05-01 18:05:08 +0100584 master->of_node = masterspec->np;
585 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586
Olav Haugan3c8766d2014-08-22 17:12:32 -0700587 for (i = 0; i < master->cfg.num_streamids; ++i) {
588 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589
Olav Haugan3c8766d2014-08-22 17:12:32 -0700590 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
591 (streamid >= smmu->num_mapping_groups)) {
592 dev_err(dev,
593 "stream ID for master device %s greater than maximum allowed (%d)\n",
594 masterspec->np->name, smmu->num_mapping_groups);
595 return -ERANGE;
596 }
597 master->cfg.streamids[i] = streamid;
598 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100599 return insert_smmu_master(smmu, master);
600}
601
Will Deacon44680ee2014-06-25 11:29:12 +0100602static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100603{
Will Deacon44680ee2014-06-25 11:29:12 +0100604 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100605 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100606 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100607
608 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100609 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610 master = find_smmu_master(smmu, dev_node);
611 if (master)
612 break;
613 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100614 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100615
Will Deacona9a1b0b2014-05-01 18:05:08 +0100616 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100617}
618
619static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
620{
621 int idx;
622
623 do {
624 idx = find_next_zero_bit(map, end, start);
625 if (idx == end)
626 return -ENOSPC;
627 } while (test_and_set_bit(idx, map));
628
629 return idx;
630}
631
632static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
633{
634 clear_bit(idx, map);
635}
636
637/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000638static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100639{
640 int count = 0;
641 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
642
643 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
644 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
645 & sTLBGSTATUS_GSACTIVE) {
646 cpu_relax();
647 if (++count == TLB_LOOP_TIMEOUT) {
648 dev_err_ratelimited(smmu->dev,
649 "TLB sync timed out -- SMMU may be deadlocked\n");
650 return;
651 }
652 udelay(1);
653 }
654}
655
Will Deacon518f7132014-11-14 17:17:54 +0000656static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100657{
Will Deacon518f7132014-11-14 17:17:54 +0000658 struct arm_smmu_domain *smmu_domain = cookie;
659 __arm_smmu_tlb_sync(smmu_domain->smmu);
660}
661
662static void arm_smmu_tlb_inv_context(void *cookie)
663{
664 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100665 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
666 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100667 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000668 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100669
670 if (stage1) {
671 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800672 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100673 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100674 } else {
675 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800676 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100677 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100678 }
679
Will Deacon518f7132014-11-14 17:17:54 +0000680 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100681}
682
Will Deacon518f7132014-11-14 17:17:54 +0000683static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000684 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000685{
686 struct arm_smmu_domain *smmu_domain = cookie;
687 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
688 struct arm_smmu_device *smmu = smmu_domain->smmu;
689 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
690 void __iomem *reg;
691
692 if (stage1) {
693 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
694 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
695
Robin Murphy7602b872016-04-28 17:12:09 +0100696 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000697 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800698 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000699 do {
700 writel_relaxed(iova, reg);
701 iova += granule;
702 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000703 } else {
704 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800705 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000706 do {
707 writeq_relaxed(iova, reg);
708 iova += granule >> 12;
709 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000710 }
Will Deacon518f7132014-11-14 17:17:54 +0000711 } else if (smmu->version == ARM_SMMU_V2) {
712 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
713 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
714 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000715 iova >>= 12;
716 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100717 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000718 iova += granule >> 12;
719 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000720 } else {
721 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800722 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000723 }
724}
725
Will Deacon518f7132014-11-14 17:17:54 +0000726static struct iommu_gather_ops arm_smmu_gather_ops = {
727 .tlb_flush_all = arm_smmu_tlb_inv_context,
728 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
729 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000730};
731
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700732static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
733 dma_addr_t iova, u32 fsr)
734{
735 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
736 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
737 struct arm_smmu_device *smmu;
738 void __iomem *cb_base;
739 u64 sctlr, sctlr_orig;
740 phys_addr_t phys;
741
742 smmu = smmu_domain->smmu;
743 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
744
745 arm_smmu_halt_nowait(smmu);
746
747 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
748
749 arm_smmu_wait_for_halt(smmu);
750
751 /* clear FSR to allow ATOS to log any faults */
752 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
753
754 /* disable stall mode momentarily */
755 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
756 sctlr = sctlr_orig & ~SCTLR_CFCFG;
757 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
758
759 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
760
761 if (!phys) {
762 dev_err(smmu->dev,
763 "ATOS failed. Will issue a TLBIALL and try again...\n");
764 arm_smmu_tlb_inv_context(smmu_domain);
765 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
766 if (phys)
767 dev_err(smmu->dev,
768 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
769 else
770 dev_err(smmu->dev,
771 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
772 }
773
774 /* restore SCTLR */
775 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
776
777 arm_smmu_resume(smmu);
778
779 return phys;
780}
781
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
783{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600784 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -0700785 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786 unsigned long iova;
787 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100788 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100789 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
790 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100791 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -0800792 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800793 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800794 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -0800795 u32 frsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700797 static DEFINE_RATELIMIT_STATE(_rs,
798 DEFAULT_RATELIMIT_INTERVAL,
799 DEFAULT_RATELIMIT_BURST);
800
Shalaj Jain04059c52015-03-03 13:34:59 -0800801 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100802 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
804
805 if (!(fsr & FSR_FAULT))
806 return IRQ_NONE;
807
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800808 if (fatal_asf && (fsr & FSR_ASF)) {
809 dev_err(smmu->dev,
810 "Took an address size fault. Refusing to recover.\n");
811 BUG();
812 }
813
Will Deacon45ae7cf2013-06-24 18:31:25 +0100814 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -0700815 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600816 if (fsr & FSR_TF)
817 flags |= IOMMU_FAULT_TRANSLATION;
818 if (fsr & FSR_PF)
819 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -0700820 if (fsr & FSR_EF)
821 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600822 if (fsr & FSR_SS)
823 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -0700824
Robin Murphyf9a05f02016-04-13 18:13:01 +0100825 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800826 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -0800827 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
828 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600829 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
830 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800831 dev_dbg(smmu->dev,
832 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
833 iova, fsr, fsynr, cfg->cbndx);
834 dev_dbg(smmu->dev,
835 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -0700836 ret = IRQ_HANDLED;
837 resume = RESUME_RETRY;
838 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700839 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
840 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700841 if (__ratelimit(&_rs)) {
842 dev_err(smmu->dev,
843 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
844 iova, fsr, fsynr, cfg->cbndx);
845 dev_err(smmu->dev, "FAR = %016lx\n",
846 (unsigned long)iova);
847 dev_err(smmu->dev,
848 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
849 fsr,
850 (fsr & 0x02) ? "TF " : "",
851 (fsr & 0x04) ? "AFF " : "",
852 (fsr & 0x08) ? "PF " : "",
853 (fsr & 0x10) ? "EF " : "",
854 (fsr & 0x20) ? "TLBMCF " : "",
855 (fsr & 0x40) ? "TLBLKF " : "",
856 (fsr & 0x80) ? "MHF " : "",
857 (fsr & 0x40000000) ? "SS " : "",
858 (fsr & 0x80000000) ? "MULTI " : "");
859 dev_err(smmu->dev,
860 "soft iova-to-phys=%pa\n", &phys_soft);
861 dev_err(smmu->dev,
862 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
863 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
864 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700865 ret = IRQ_NONE;
866 resume = RESUME_TERMINATE;
867 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100868
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600869 /*
870 * If the client returns -EBUSY, do not clear FSR and do not RESUME
871 * if stalled. This is required to keep the IOMMU client stalled on
872 * the outstanding fault. This gives the client a chance to take any
873 * debug action and then terminate the stalled transaction.
874 * So, the sequence in case of stall on fault should be:
875 * 1) Do not clear FSR or write to RESUME here
876 * 2) Client takes any debug action
877 * 3) Client terminates the stalled transaction and resumes the IOMMU
878 * 4) Client clears FSR. The FSR should only be cleared after 3) and
879 * not before so that the fault remains outstanding. This ensures
880 * SCTLR.HUPCF has the desired effect if subsequent transactions also
881 * need to be terminated.
882 */
883 if (tmp != -EBUSY) {
884 /* Clear the faulting FSR */
885 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700886
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600887 /*
888 * Barrier required to ensure that the FSR is cleared
889 * before resuming SMMU operation
890 */
891 wmb();
892
893 /* Retry or terminate any stalled transactions */
894 if (fsr & FSR_SS)
895 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
896 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700897
898 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100899}
900
901static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
902{
903 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
904 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000905 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100906
907 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
908 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
909 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
910 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
911
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000912 if (!gfsr)
913 return IRQ_NONE;
914
Will Deacon45ae7cf2013-06-24 18:31:25 +0100915 dev_err_ratelimited(smmu->dev,
916 "Unexpected global fault, this could be serious\n");
917 dev_err_ratelimited(smmu->dev,
918 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
919 gfsr, gfsynr0, gfsynr1, gfsynr2);
920
921 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100922 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923}
924
Will Deacon518f7132014-11-14 17:17:54 +0000925static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
926 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927{
928 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100929 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100931 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
932 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100933 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100934
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100936 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
937 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938
Will Deacon4a1c93c2015-03-04 12:21:03 +0000939 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100940 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
941 reg = CBA2R_RW64_64BIT;
942 else
943 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800944 /* 16-bit VMIDs live in CBA2R */
945 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800946 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800947
Will Deacon4a1c93c2015-03-04 12:21:03 +0000948 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
949 }
950
Will Deacon45ae7cf2013-06-24 18:31:25 +0100951 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100952 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100953 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700954 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100955
Will Deacon57ca90f2014-02-06 14:59:05 +0000956 /*
957 * Use the weakest shareability/memory types, so they are
958 * overridden by the ttbcr/pte.
959 */
960 if (stage1) {
961 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
962 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800963 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
964 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800965 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000966 }
Will Deacon44680ee2014-06-25 11:29:12 +0100967 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100968
Will Deacon518f7132014-11-14 17:17:54 +0000969 /* TTBRs */
970 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100971 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100972
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800973 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100974 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100975
976 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800977 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100978 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000979 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100980 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100981 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000982 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100983
Will Deacon518f7132014-11-14 17:17:54 +0000984 /* TTBCR */
985 if (stage1) {
986 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
987 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
988 if (smmu->version > ARM_SMMU_V1) {
989 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100990 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000991 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100992 }
993 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000994 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
995 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100996 }
997
Will Deacon518f7132014-11-14 17:17:54 +0000998 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100999 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001000 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001001 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001002 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1003 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004 }
1005
Will Deacon45ae7cf2013-06-24 18:31:25 +01001006 /* SCTLR */
Patrick Daly5ba28112016-08-30 19:18:52 -07001007 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008 if (stage1)
1009 reg |= SCTLR_S1_ASIDPNE;
1010#ifdef __BIG_ENDIAN
1011 reg |= SCTLR_E;
1012#endif
Will Deacon25724842013-08-21 13:49:53 +01001013 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001014}
1015
1016static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001017 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001018{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001019 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001020 unsigned long ias, oas;
1021 struct io_pgtable_ops *pgtbl_ops;
1022 struct io_pgtable_cfg pgtbl_cfg;
1023 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001024 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001025 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026
Will Deacon518f7132014-11-14 17:17:54 +00001027 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001028 if (smmu_domain->smmu)
1029 goto out_unlock;
1030
Robin Murphy98006992016-04-20 14:53:33 +01001031 /* We're bypassing these SIDs, so don't allocate an actual context */
1032 if (domain->type == IOMMU_DOMAIN_DMA) {
1033 smmu_domain->smmu = smmu;
1034 goto out_unlock;
1035 }
1036
Will Deaconc752ce42014-06-25 22:46:31 +01001037 /*
1038 * Mapping the requested stage onto what we support is surprisingly
1039 * complicated, mainly because the spec allows S1+S2 SMMUs without
1040 * support for nested translation. That means we end up with the
1041 * following table:
1042 *
1043 * Requested Supported Actual
1044 * S1 N S1
1045 * S1 S1+S2 S1
1046 * S1 S2 S2
1047 * S1 S1 S1
1048 * N N N
1049 * N S1+S2 S2
1050 * N S2 S2
1051 * N S1 S1
1052 *
1053 * Note that you can't actually request stage-2 mappings.
1054 */
1055 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1056 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1057 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1058 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1059
Robin Murphy7602b872016-04-28 17:12:09 +01001060 /*
1061 * Choosing a suitable context format is even more fiddly. Until we
1062 * grow some way for the caller to express a preference, and/or move
1063 * the decision into the io-pgtable code where it arguably belongs,
1064 * just aim for the closest thing to the rest of the system, and hope
1065 * that the hardware isn't esoteric enough that we can't assume AArch64
1066 * support to be a superset of AArch32 support...
1067 */
1068 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1069 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1070 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1071 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1072 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1073 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1074 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1075
1076 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1077 ret = -EINVAL;
1078 goto out_unlock;
1079 }
1080
Will Deaconc752ce42014-06-25 22:46:31 +01001081 switch (smmu_domain->stage) {
1082 case ARM_SMMU_DOMAIN_S1:
1083 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1084 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001085 ias = smmu->va_size;
1086 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001087 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001088 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001089 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001090 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001091 ias = min(ias, 32UL);
1092 oas = min(oas, 40UL);
1093 }
Will Deaconc752ce42014-06-25 22:46:31 +01001094 break;
1095 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001096 /*
1097 * We will likely want to change this if/when KVM gets
1098 * involved.
1099 */
Will Deaconc752ce42014-06-25 22:46:31 +01001100 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001101 cfg->cbar = CBAR_TYPE_S2_TRANS;
1102 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001103 ias = smmu->ipa_size;
1104 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001105 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001106 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001107 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001108 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001109 ias = min(ias, 40UL);
1110 oas = min(oas, 40UL);
1111 }
Will Deaconc752ce42014-06-25 22:46:31 +01001112 break;
1113 default:
1114 ret = -EINVAL;
1115 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 }
1117
1118 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1119 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001120 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001121 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122
Will Deacon44680ee2014-06-25 11:29:12 +01001123 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +01001124 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001125 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1126 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001127 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001128 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001129 }
1130
Will Deacon518f7132014-11-14 17:17:54 +00001131 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001132 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001133 .ias = ias,
1134 .oas = oas,
1135 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001136 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001137 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001138
Will Deacon518f7132014-11-14 17:17:54 +00001139 smmu_domain->smmu = smmu;
1140 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1141 if (!pgtbl_ops) {
1142 ret = -ENOMEM;
1143 goto out_clear_smmu;
1144 }
1145
Robin Murphyd5466352016-05-09 17:20:09 +01001146 /* Update the domain's page sizes to reflect the page table format */
1147 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001148
1149 /* Initialise the context bank with our page table cfg */
1150 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1151
1152 /*
1153 * Request context fault interrupt. Do this last to avoid the
1154 * handler seeing a half-initialised domain state.
1155 */
Will Deacon44680ee2014-06-25 11:29:12 +01001156 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001157 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
1158 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1159 "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001160 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001162 cfg->irptndx, irq);
1163 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164 }
1165
Will Deacon518f7132014-11-14 17:17:54 +00001166 mutex_unlock(&smmu_domain->init_mutex);
1167
1168 /* Publish page table ops for map/unmap */
1169 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001170 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171
Will Deacon518f7132014-11-14 17:17:54 +00001172out_clear_smmu:
1173 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001174out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001175 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176 return ret;
1177}
1178
1179static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1180{
Joerg Roedel1d672632015-03-26 13:43:10 +01001181 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001182 struct arm_smmu_device *smmu = smmu_domain->smmu;
1183 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001184 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185 int irq;
1186
Robin Murphy98006992016-04-20 14:53:33 +01001187 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188 return;
1189
Will Deacon518f7132014-11-14 17:17:54 +00001190 /*
1191 * Disable the context bank and free the page tables before freeing
1192 * it.
1193 */
Will Deacon44680ee2014-06-25 11:29:12 +01001194 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001195 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001196
Will Deacon44680ee2014-06-25 11:29:12 +01001197 if (cfg->irptndx != INVALID_IRPTNDX) {
1198 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001199 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200 }
1201
Markus Elfring44830b02015-11-06 18:32:41 +01001202 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001203 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204}
1205
Joerg Roedel1d672632015-03-26 13:43:10 +01001206static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001207{
1208 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209
Robin Murphy9adb9592016-01-26 18:06:36 +00001210 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001211 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212 /*
1213 * Allocate the domain and initialise some of its data structures.
1214 * We can't really do anything meaningful until we've added a
1215 * master.
1216 */
1217 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1218 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001219 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001220
Robin Murphy9adb9592016-01-26 18:06:36 +00001221 if (type == IOMMU_DOMAIN_DMA &&
1222 iommu_get_dma_cookie(&smmu_domain->domain)) {
1223 kfree(smmu_domain);
1224 return NULL;
1225 }
1226
Will Deacon518f7132014-11-14 17:17:54 +00001227 mutex_init(&smmu_domain->init_mutex);
1228 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001229
1230 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001231}
1232
Joerg Roedel1d672632015-03-26 13:43:10 +01001233static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234{
Joerg Roedel1d672632015-03-26 13:43:10 +01001235 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001236
1237 /*
1238 * Free the domain resources. We assume that all devices have
1239 * already been detached.
1240 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001241 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001243 kfree(smmu_domain);
1244}
1245
1246static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001247 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248{
1249 int i;
1250 struct arm_smmu_smr *smrs;
1251 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1252
1253 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1254 return 0;
1255
Will Deacona9a1b0b2014-05-01 18:05:08 +01001256 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001257 return -EEXIST;
1258
Mitchel Humpherys29073202014-07-08 09:52:18 -07001259 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001261 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1262 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263 return -ENOMEM;
1264 }
1265
Will Deacon44680ee2014-06-25 11:29:12 +01001266 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001267 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001268 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1269 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001270 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 dev_err(smmu->dev, "failed to allocate free SMR\n");
1272 goto err_free_smrs;
1273 }
1274
1275 smrs[i] = (struct arm_smmu_smr) {
1276 .idx = idx,
1277 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001278 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279 };
1280 }
1281
1282 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001283 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1285 smrs[i].mask << SMR_MASK_SHIFT;
1286 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1287 }
1288
Will Deacona9a1b0b2014-05-01 18:05:08 +01001289 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290 return 0;
1291
1292err_free_smrs:
1293 while (--i >= 0)
1294 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1295 kfree(smrs);
1296 return -ENOSPC;
1297}
1298
1299static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001300 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301{
1302 int i;
1303 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001304 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305
Will Deacon43b412b2014-07-15 11:22:24 +01001306 if (!smrs)
1307 return;
1308
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001310 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001312
Will Deacon45ae7cf2013-06-24 18:31:25 +01001313 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1314 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1315 }
1316
Will Deacona9a1b0b2014-05-01 18:05:08 +01001317 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318 kfree(smrs);
1319}
1320
Will Deacon45ae7cf2013-06-24 18:31:25 +01001321static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001322 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001323{
1324 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001325 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001326 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1327
Will Deacon5f634952016-04-20 14:53:32 +01001328 /*
1329 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1330 * for all devices behind the SMMU. Note that we need to take
1331 * care configuring SMRs for devices both a platform_device and
1332 * and a PCI device (i.e. a PCI host controller)
1333 */
1334 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1335 return 0;
1336
Will Deacon8f68f8e2014-07-15 11:27:08 +01001337 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001338 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001340 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341
Will Deacona9a1b0b2014-05-01 18:05:08 +01001342 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001344
Will Deacona9a1b0b2014-05-01 18:05:08 +01001345 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001346 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001347 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001348 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1349 }
1350
1351 return 0;
1352}
1353
1354static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001355 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001356{
Will Deacon43b412b2014-07-15 11:22:24 +01001357 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001358 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001359 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001360
Will Deacon8f68f8e2014-07-15 11:27:08 +01001361 /* An IOMMU group is torn down by the first device to be removed */
1362 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1363 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364
1365 /*
1366 * We *must* clear the S2CR first, because freeing the SMR means
1367 * that it can be re-allocated immediately.
1368 */
Will Deacon43b412b2014-07-15 11:22:24 +01001369 for (i = 0; i < cfg->num_streamids; ++i) {
1370 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001371 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001372
Robin Murphy25a1c962016-02-10 14:25:33 +00001373 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001374 }
1375
Will Deacona9a1b0b2014-05-01 18:05:08 +01001376 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001377}
1378
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001379static void arm_smmu_detach_dev(struct device *dev,
1380 struct arm_smmu_master_cfg *cfg)
1381{
1382 struct iommu_domain *domain = dev->archdata.iommu;
1383 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1384
1385 dev->archdata.iommu = NULL;
1386 arm_smmu_domain_remove_master(smmu_domain, cfg);
1387}
1388
Will Deacon45ae7cf2013-06-24 18:31:25 +01001389static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1390{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001391 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001392 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001393 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001394 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395
Will Deacon8f68f8e2014-07-15 11:27:08 +01001396 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001397 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1399 return -ENXIO;
1400 }
1401
Will Deacon518f7132014-11-14 17:17:54 +00001402 /* Ensure that the domain is finalised */
1403 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001404 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001405 return ret;
1406
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001408 * Sanity check the domain. We don't support domains across
1409 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001410 */
Will Deacon518f7132014-11-14 17:17:54 +00001411 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412 dev_err(dev,
1413 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001414 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1415 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001417
1418 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001419 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001420 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421 return -ENODEV;
1422
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001423 /* Detach the dev from its current domain */
1424 if (dev->archdata.iommu)
1425 arm_smmu_detach_dev(dev, cfg);
1426
Will Deacon844e35b2014-07-17 11:23:51 +01001427 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1428 if (!ret)
1429 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001430 return ret;
1431}
1432
Will Deacon45ae7cf2013-06-24 18:31:25 +01001433static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001434 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001435{
Will Deacon518f7132014-11-14 17:17:54 +00001436 int ret;
1437 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001438 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001439 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440
Will Deacon518f7132014-11-14 17:17:54 +00001441 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001442 return -ENODEV;
1443
Will Deacon518f7132014-11-14 17:17:54 +00001444 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1445 ret = ops->map(ops, iova, paddr, size, prot);
1446 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1447 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001448}
1449
1450static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1451 size_t size)
1452{
Will Deacon518f7132014-11-14 17:17:54 +00001453 size_t ret;
1454 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001455 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001456 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457
Will Deacon518f7132014-11-14 17:17:54 +00001458 if (!ops)
1459 return 0;
1460
1461 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1462 ret = ops->unmap(ops, iova, size);
1463 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1464 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001465}
1466
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001467static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001468 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001469{
Joerg Roedel1d672632015-03-26 13:43:10 +01001470 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001471 struct arm_smmu_device *smmu = smmu_domain->smmu;
1472 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1473 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1474 struct device *dev = smmu->dev;
1475 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001476 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001477 u32 tmp;
1478 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001479 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001480
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001481 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001482 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001483 phys = 0;
1484 goto out_unlock;
1485 }
1486
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001487 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1488
Robin Murphy661d9622015-05-27 17:09:34 +01001489 /* ATS1 registers can only be written atomically */
1490 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001491 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001492 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1493 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001494 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001495
1496 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1497 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001498 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001499 dev_err(dev,
1500 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1501 &iova, &phys);
1502 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001503 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001504 }
1505
Robin Murphyf9a05f02016-04-13 18:13:01 +01001506 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001507 if (phys & CB_PAR_F) {
1508 dev_err(dev, "translation fault!\n");
1509 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001510 phys = 0;
1511 } else {
1512 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001513 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001514out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001515 if (do_halt)
1516 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001517out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001518 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001519 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001520}
1521
Will Deacon45ae7cf2013-06-24 18:31:25 +01001522static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001523 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001524{
Will Deacon518f7132014-11-14 17:17:54 +00001525 phys_addr_t ret;
1526 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001527 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001528 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001529
Will Deacon518f7132014-11-14 17:17:54 +00001530 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001531 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001532
Will Deacon518f7132014-11-14 17:17:54 +00001533 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001534 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001535 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001536
Will Deacon518f7132014-11-14 17:17:54 +00001537 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001538}
1539
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001540/*
1541 * This function can sleep, and cannot be called from atomic context. Will
1542 * power on register block if required. This restriction does not apply to the
1543 * original iova_to_phys() op.
1544 */
1545static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1546 dma_addr_t iova)
1547{
1548 phys_addr_t ret = 0;
1549 unsigned long flags;
1550 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1551
1552 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1553 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1554 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001555 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001556
1557 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1558
1559 return ret;
1560}
1561
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001562static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
1563 struct iommu_domain *domain, dma_addr_t iova)
1564{
1565 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
1566}
1567
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001568static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001569{
Will Deacond0948942014-06-24 17:30:10 +01001570 switch (cap) {
1571 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001572 /*
1573 * Return true here as the SMMU can always send out coherent
1574 * requests.
1575 */
1576 return true;
Will Deacond0948942014-06-24 17:30:10 +01001577 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001578 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001579 case IOMMU_CAP_NOEXEC:
1580 return true;
Will Deacond0948942014-06-24 17:30:10 +01001581 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001582 return false;
Will Deacond0948942014-06-24 17:30:10 +01001583 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585
Will Deacona9a1b0b2014-05-01 18:05:08 +01001586static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1587{
1588 *((u16 *)data) = alias;
1589 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590}
1591
Will Deacon8f68f8e2014-07-15 11:27:08 +01001592static void __arm_smmu_release_pci_iommudata(void *data)
1593{
1594 kfree(data);
1595}
1596
Joerg Roedelaf659932015-10-21 23:51:41 +02001597static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1598 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001599{
Will Deacon03edb222015-01-19 14:27:33 +00001600 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001601 u16 sid;
1602 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001603
Will Deacon03edb222015-01-19 14:27:33 +00001604 cfg = iommu_group_get_iommudata(group);
1605 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001606 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001607 if (!cfg)
1608 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001609
Will Deacon03edb222015-01-19 14:27:33 +00001610 iommu_group_set_iommudata(group, cfg,
1611 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001612 }
1613
Joerg Roedelaf659932015-10-21 23:51:41 +02001614 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1615 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001616
Will Deacon03edb222015-01-19 14:27:33 +00001617 /*
1618 * Assume Stream ID == Requester ID for now.
1619 * We need a way to describe the ID mappings in FDT.
1620 */
1621 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1622 for (i = 0; i < cfg->num_streamids; ++i)
1623 if (cfg->streamids[i] == sid)
1624 break;
1625
1626 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1627 if (i == cfg->num_streamids)
1628 cfg->streamids[cfg->num_streamids++] = sid;
1629
1630 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631}
1632
Joerg Roedelaf659932015-10-21 23:51:41 +02001633static int arm_smmu_init_platform_device(struct device *dev,
1634 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001635{
Will Deacon03edb222015-01-19 14:27:33 +00001636 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001637 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001638
1639 if (!smmu)
1640 return -ENODEV;
1641
1642 master = find_smmu_master(smmu, dev->of_node);
1643 if (!master)
1644 return -ENODEV;
1645
Will Deacon03edb222015-01-19 14:27:33 +00001646 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001647
1648 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001649}
1650
1651static int arm_smmu_add_device(struct device *dev)
1652{
Joerg Roedelaf659932015-10-21 23:51:41 +02001653 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001654
Joerg Roedelaf659932015-10-21 23:51:41 +02001655 group = iommu_group_get_for_dev(dev);
1656 if (IS_ERR(group))
1657 return PTR_ERR(group);
1658
Peng Fan9a4a9d82015-11-20 16:56:18 +08001659 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001660 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001661}
1662
Will Deacon45ae7cf2013-06-24 18:31:25 +01001663static void arm_smmu_remove_device(struct device *dev)
1664{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001665 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001666}
1667
Joerg Roedelaf659932015-10-21 23:51:41 +02001668static struct iommu_group *arm_smmu_device_group(struct device *dev)
1669{
1670 struct iommu_group *group;
1671 int ret;
1672
1673 if (dev_is_pci(dev))
1674 group = pci_device_group(dev);
1675 else
1676 group = generic_device_group(dev);
1677
1678 if (IS_ERR(group))
1679 return group;
1680
1681 if (dev_is_pci(dev))
1682 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1683 else
1684 ret = arm_smmu_init_platform_device(dev, group);
1685
1686 if (ret) {
1687 iommu_group_put(group);
1688 group = ERR_PTR(ret);
1689 }
1690
1691 return group;
1692}
1693
Will Deaconc752ce42014-06-25 22:46:31 +01001694static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1695 enum iommu_attr attr, void *data)
1696{
Joerg Roedel1d672632015-03-26 13:43:10 +01001697 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001698
1699 switch (attr) {
1700 case DOMAIN_ATTR_NESTING:
1701 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1702 return 0;
1703 default:
1704 return -ENODEV;
1705 }
1706}
1707
1708static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1709 enum iommu_attr attr, void *data)
1710{
Will Deacon518f7132014-11-14 17:17:54 +00001711 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001712 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001713
Will Deacon518f7132014-11-14 17:17:54 +00001714 mutex_lock(&smmu_domain->init_mutex);
1715
Will Deaconc752ce42014-06-25 22:46:31 +01001716 switch (attr) {
1717 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001718 if (smmu_domain->smmu) {
1719 ret = -EPERM;
1720 goto out_unlock;
1721 }
1722
Will Deaconc752ce42014-06-25 22:46:31 +01001723 if (*(int *)data)
1724 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1725 else
1726 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1727
Will Deacon518f7132014-11-14 17:17:54 +00001728 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001729 default:
Will Deacon518f7132014-11-14 17:17:54 +00001730 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001731 }
Will Deacon518f7132014-11-14 17:17:54 +00001732
1733out_unlock:
1734 mutex_unlock(&smmu_domain->init_mutex);
1735 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001736}
1737
Will Deacon518f7132014-11-14 17:17:54 +00001738static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001739 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001740 .domain_alloc = arm_smmu_domain_alloc,
1741 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001742 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001743 .map = arm_smmu_map,
1744 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001745 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001746 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001747 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01001748 .add_device = arm_smmu_add_device,
1749 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001750 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001751 .domain_get_attr = arm_smmu_domain_get_attr,
1752 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001753 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754};
1755
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001756static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001757{
1758 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001759 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001760
1761 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1762 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1763 0, 30000)) {
1764 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1765 return -EBUSY;
1766 }
1767
1768 return 0;
1769}
1770
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001771static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
1772{
1773 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1774 u32 reg;
1775
1776 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1777 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1778 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1779
1780 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
1781}
1782
1783static int arm_smmu_halt(struct arm_smmu_device *smmu)
1784{
1785 return __arm_smmu_halt(smmu, true);
1786}
1787
1788static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
1789{
1790 return __arm_smmu_halt(smmu, false);
1791}
1792
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001793static void arm_smmu_resume(struct arm_smmu_device *smmu)
1794{
1795 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1796 u32 reg;
1797
1798 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1799 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1800 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1801}
1802
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001803static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1804{
1805 int i;
1806 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1807
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001808 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001809 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1810 writel_relaxed(regs[i].value,
1811 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001812 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001813}
1814
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1816{
1817 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001818 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001820 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001821
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001822 /* clear global FSR */
1823 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1824 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825
Robin Murphy25a1c962016-02-10 14:25:33 +00001826 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1827 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001828 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001829 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001830 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001831 }
1832
Peng Fan3ca37122016-05-03 21:50:30 +08001833 /*
1834 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1835 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1836 * bit is only present in MMU-500r2 onwards.
1837 */
1838 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1839 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1840 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1841 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1842 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1843 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1844 }
1845
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001846 /* Make sure all context banks are disabled and clear CB_FSR */
1847 for (i = 0; i < smmu->num_context_banks; ++i) {
1848 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1849 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1850 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001851 /*
1852 * Disable MMU-500's not-particularly-beneficial next-page
1853 * prefetcher for the sake of errata #841119 and #826419.
1854 */
1855 if (smmu->model == ARM_MMU500) {
1856 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1857 reg &= ~ARM_MMU500_ACTLR_CPRE;
1858 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1859 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001860
1861 if (smmu->model == QCOM_SMMUV2) {
1862 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1863 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1864 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1865 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1866 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001867 }
Will Deacon1463fe42013-07-31 19:21:27 +01001868
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001869 /* Program implementation defined registers */
1870 arm_smmu_impl_def_programming(smmu);
1871
Will Deacon45ae7cf2013-06-24 18:31:25 +01001872 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001873 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1874 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1875
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001876 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001877
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001879 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001880
1881 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001882 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001883
Robin Murphy25a1c962016-02-10 14:25:33 +00001884 /* Enable client access, handling unmatched streams as appropriate */
1885 reg &= ~sCR0_CLIENTPD;
1886 if (disable_bypass)
1887 reg |= sCR0_USFCFG;
1888 else
1889 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001890
1891 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001892 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893
1894 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001895 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001896
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001897 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1898 reg |= sCR0_VMID16EN;
1899
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001901 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001902 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001903}
1904
1905static int arm_smmu_id_size_to_bits(int size)
1906{
1907 switch (size) {
1908 case 0:
1909 return 32;
1910 case 1:
1911 return 36;
1912 case 2:
1913 return 40;
1914 case 3:
1915 return 42;
1916 case 4:
1917 return 44;
1918 case 5:
1919 default:
1920 return 48;
1921 }
1922}
1923
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001924static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1925{
1926 struct device *dev = smmu->dev;
1927 int i, ntuples, ret;
1928 u32 *tuples;
1929 struct arm_smmu_impl_def_reg *regs, *regit;
1930
1931 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1932 return 0;
1933
1934 ntuples /= sizeof(u32);
1935 if (ntuples % 2) {
1936 dev_err(dev,
1937 "Invalid number of attach-impl-defs registers: %d\n",
1938 ntuples);
1939 return -EINVAL;
1940 }
1941
1942 regs = devm_kmalloc(
1943 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1944 GFP_KERNEL);
1945 if (!regs)
1946 return -ENOMEM;
1947
1948 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1949 if (!tuples)
1950 return -ENOMEM;
1951
1952 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1953 tuples, ntuples);
1954 if (ret)
1955 return ret;
1956
1957 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1958 regit->offset = tuples[i];
1959 regit->value = tuples[i + 1];
1960 }
1961
1962 devm_kfree(dev, tuples);
1963
1964 smmu->impl_def_attach_registers = regs;
1965 smmu->num_impl_def_attach_registers = ntuples / 2;
1966
1967 return 0;
1968}
1969
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1971{
1972 unsigned long size;
1973 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1974 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001975 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976
1977 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001978 dev_notice(smmu->dev, "SMMUv%d with:\n",
1979 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980
1981 /* ID0 */
1982 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001983
1984 /* Restrict available stages based on module parameter */
1985 if (force_stage == 1)
1986 id &= ~(ID0_S2TS | ID0_NTS);
1987 else if (force_stage == 2)
1988 id &= ~(ID0_S1TS | ID0_NTS);
1989
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990 if (id & ID0_S1TS) {
1991 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1992 dev_notice(smmu->dev, "\tstage 1 translation\n");
1993 }
1994
1995 if (id & ID0_S2TS) {
1996 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1997 dev_notice(smmu->dev, "\tstage 2 translation\n");
1998 }
1999
2000 if (id & ID0_NTS) {
2001 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
2002 dev_notice(smmu->dev, "\tnested translation\n");
2003 }
2004
2005 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01002006 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002007 dev_err(smmu->dev, "\tno translation support!\n");
2008 return -ENODEV;
2009 }
2010
Robin Murphyb7862e32016-04-13 18:13:03 +01002011 if ((id & ID0_S1TS) &&
2012 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002013 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
2014 dev_notice(smmu->dev, "\taddress translation ops\n");
2015 }
2016
Robin Murphybae2c2d2015-07-29 19:46:05 +01002017 /*
2018 * In order for DMA API calls to work properly, we must defer to what
2019 * the DT says about coherency, regardless of what the hardware claims.
2020 * Fortunately, this also opens up a workaround for systems where the
2021 * ID register value has ended up configured incorrectly.
2022 */
2023 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
2024 cttw_reg = !!(id & ID0_CTTW);
2025 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002026 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002027 if (cttw_dt || cttw_reg)
2028 dev_notice(smmu->dev, "\t%scoherent table walk\n",
2029 cttw_dt ? "" : "non-");
2030 if (cttw_dt != cttw_reg)
2031 dev_notice(smmu->dev,
2032 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002033
2034 if (id & ID0_SMS) {
2035 u32 smr, sid, mask;
2036
2037 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
2038 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
2039 ID0_NUMSMRG_MASK;
2040 if (smmu->num_mapping_groups == 0) {
2041 dev_err(smmu->dev,
2042 "stream-matching supported, but no SMRs present!\n");
2043 return -ENODEV;
2044 }
2045
2046 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
2047 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
2048 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
2049 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
2050
2051 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
2052 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
2053 if ((mask & sid) != sid) {
2054 dev_err(smmu->dev,
2055 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
2056 mask, sid);
2057 return -ENODEV;
2058 }
2059
2060 dev_notice(smmu->dev,
2061 "\tstream matching with %u register groups, mask 0x%x",
2062 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07002063 } else {
2064 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
2065 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002066 }
2067
Robin Murphy7602b872016-04-28 17:12:09 +01002068 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
2069 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
2070 if (!(id & ID0_PTFS_NO_AARCH32S))
2071 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
2072 }
2073
Will Deacon45ae7cf2013-06-24 18:31:25 +01002074 /* ID1 */
2075 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01002076 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002077
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002078 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00002079 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01002080 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002081 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07002082 dev_warn(smmu->dev,
2083 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
2084 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002085
Will Deacon518f7132014-11-14 17:17:54 +00002086 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
2088 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
2089 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
2090 return -ENODEV;
2091 }
2092 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
2093 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01002094 /*
2095 * Cavium CN88xx erratum #27704.
2096 * Ensure ASID and VMID allocation is unique across all SMMUs in
2097 * the system.
2098 */
2099 if (smmu->model == CAVIUM_SMMUV2) {
2100 smmu->cavium_id_base =
2101 atomic_add_return(smmu->num_context_banks,
2102 &cavium_smmu_context_count);
2103 smmu->cavium_id_base -= smmu->num_context_banks;
2104 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002105
2106 /* ID2 */
2107 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2108 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002109 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002110
Will Deacon518f7132014-11-14 17:17:54 +00002111 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002113 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002114
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002115 if (id & ID2_VMID16)
2116 smmu->features |= ARM_SMMU_FEAT_VMID16;
2117
Robin Murphyf1d84542015-03-04 16:41:05 +00002118 /*
2119 * What the page table walker can address actually depends on which
2120 * descriptor format is in use, but since a) we don't know that yet,
2121 * and b) it can vary per context bank, this will have to do...
2122 */
2123 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2124 dev_warn(smmu->dev,
2125 "failed to set DMA mask for table walker\n");
2126
Robin Murphyb7862e32016-04-13 18:13:03 +01002127 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002128 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002129 if (smmu->version == ARM_SMMU_V1_64K)
2130 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002131 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002133 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002134 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002135 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002136 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002137 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002138 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002139 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002140 }
2141
Robin Murphy7602b872016-04-28 17:12:09 +01002142 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002143 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002144 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002145 if (smmu->features &
2146 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002147 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002148 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002149 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002150 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002151 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002152
Robin Murphyd5466352016-05-09 17:20:09 +01002153 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2154 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2155 else
2156 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2157 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2158 smmu->pgsize_bitmap);
2159
Will Deacon518f7132014-11-14 17:17:54 +00002160
Will Deacon28d60072014-09-01 16:24:48 +01002161 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2162 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002163 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002164
2165 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2166 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002167 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002168
Will Deacon45ae7cf2013-06-24 18:31:25 +01002169 return 0;
2170}
2171
Robin Murphy67b65a32016-04-13 18:12:57 +01002172struct arm_smmu_match_data {
2173 enum arm_smmu_arch_version version;
2174 enum arm_smmu_implementation model;
2175};
2176
2177#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2178static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2179
2180ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2181ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002182ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002183ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002184ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002185ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002186
Joerg Roedel09b52692014-10-02 12:24:45 +02002187static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002188 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2189 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2190 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002191 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002192 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002193 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002194 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002195 { },
2196};
2197MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2198
Will Deacon45ae7cf2013-06-24 18:31:25 +01002199static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2200{
Robin Murphy09360402014-08-28 17:51:59 +01002201 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002202 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203 struct resource *res;
2204 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205 struct device *dev = &pdev->dev;
2206 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002207 struct of_phandle_iterator it;
2208 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209 int num_irqs, i, err;
2210
2211 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2212 if (!smmu) {
2213 dev_err(dev, "failed to allocate arm_smmu_device\n");
2214 return -ENOMEM;
2215 }
2216 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002217 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002218
Robin Murphy09360402014-08-28 17:51:59 +01002219 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002220 data = of_id->data;
2221 smmu->version = data->version;
2222 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002223
Will Deacon45ae7cf2013-06-24 18:31:25 +01002224 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002225 smmu->base = devm_ioremap_resource(dev, res);
2226 if (IS_ERR(smmu->base))
2227 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002228 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002229
2230 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2231 &smmu->num_global_irqs)) {
2232 dev_err(dev, "missing #global-interrupts property\n");
2233 return -ENODEV;
2234 }
2235
2236 num_irqs = 0;
2237 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2238 num_irqs++;
2239 if (num_irqs > smmu->num_global_irqs)
2240 smmu->num_context_irqs++;
2241 }
2242
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002243 if (!smmu->num_context_irqs) {
2244 dev_err(dev, "found %d interrupts but expected at least %d\n",
2245 num_irqs, smmu->num_global_irqs + 1);
2246 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002247 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248
2249 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2250 GFP_KERNEL);
2251 if (!smmu->irqs) {
2252 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2253 return -ENOMEM;
2254 }
2255
2256 for (i = 0; i < num_irqs; ++i) {
2257 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002258
Will Deacon45ae7cf2013-06-24 18:31:25 +01002259 if (irq < 0) {
2260 dev_err(dev, "failed to get irq index %d\n", i);
2261 return -ENODEV;
2262 }
2263 smmu->irqs[i] = irq;
2264 }
2265
Olav Haugan3c8766d2014-08-22 17:12:32 -07002266 err = arm_smmu_device_cfg_probe(smmu);
2267 if (err)
2268 return err;
2269
Will Deacon45ae7cf2013-06-24 18:31:25 +01002270 i = 0;
2271 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002272
2273 err = -ENOMEM;
2274 /* No need to zero the memory for masterspec */
2275 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2276 if (!masterspec)
2277 goto out_put_masters;
2278
2279 of_for_each_phandle(&it, err, dev->of_node,
2280 "mmu-masters", "#stream-id-cells", 0) {
2281 int count = of_phandle_iterator_args(&it, masterspec->args,
2282 MAX_MASTER_STREAMIDS);
2283 masterspec->np = of_node_get(it.node);
2284 masterspec->args_count = count;
2285
2286 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002287 if (err) {
2288 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002289 masterspec->np->name);
2290 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002291 goto out_put_masters;
2292 }
2293
2294 i++;
2295 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002296
Will Deacon45ae7cf2013-06-24 18:31:25 +01002297 dev_notice(dev, "registered %d master devices\n", i);
2298
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002299 kfree(masterspec);
2300
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002301 err = arm_smmu_parse_impl_def_registers(smmu);
2302 if (err)
2303 goto out_put_masters;
2304
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002305 parse_driver_options(smmu);
2306
Robin Murphyb7862e32016-04-13 18:13:03 +01002307 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002308 smmu->num_context_banks != smmu->num_context_irqs) {
2309 dev_err(dev,
2310 "found only %d context interrupt(s) but %d required\n",
2311 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002312 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002313 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002314 }
2315
Will Deacon45ae7cf2013-06-24 18:31:25 +01002316 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08002317 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
2318 NULL, arm_smmu_global_fault,
2319 IRQF_ONESHOT | IRQF_SHARED,
2320 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002321 if (err) {
2322 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2323 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002324 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002325 }
2326 }
2327
2328 INIT_LIST_HEAD(&smmu->list);
2329 spin_lock(&arm_smmu_devices_lock);
2330 list_add(&smmu->list, &arm_smmu_devices);
2331 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002332
2333 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002334 return 0;
2335
Will Deacon45ae7cf2013-06-24 18:31:25 +01002336out_put_masters:
2337 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002338 struct arm_smmu_master *master
2339 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002340 of_node_put(master->of_node);
2341 }
2342
2343 return err;
2344}
2345
2346static int arm_smmu_device_remove(struct platform_device *pdev)
2347{
2348 int i;
2349 struct device *dev = &pdev->dev;
2350 struct arm_smmu_device *curr, *smmu = NULL;
2351 struct rb_node *node;
2352
2353 spin_lock(&arm_smmu_devices_lock);
2354 list_for_each_entry(curr, &arm_smmu_devices, list) {
2355 if (curr->dev == dev) {
2356 smmu = curr;
2357 list_del(&smmu->list);
2358 break;
2359 }
2360 }
2361 spin_unlock(&arm_smmu_devices_lock);
2362
2363 if (!smmu)
2364 return -ENODEV;
2365
Will Deacon45ae7cf2013-06-24 18:31:25 +01002366 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002367 struct arm_smmu_master *master
2368 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002369 of_node_put(master->of_node);
2370 }
2371
Will Deaconecfadb62013-07-31 19:21:28 +01002372 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002373 dev_err(dev, "removing device with active domains!\n");
2374
2375 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002376 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002377
2378 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002379 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002380 return 0;
2381}
2382
Will Deacon45ae7cf2013-06-24 18:31:25 +01002383static struct platform_driver arm_smmu_driver = {
2384 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002385 .name = "arm-smmu",
2386 .of_match_table = of_match_ptr(arm_smmu_of_match),
2387 },
2388 .probe = arm_smmu_device_dt_probe,
2389 .remove = arm_smmu_device_remove,
2390};
2391
2392static int __init arm_smmu_init(void)
2393{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002394 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002395 int ret;
2396
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002397 /*
2398 * Play nice with systems that don't have an ARM SMMU by checking that
2399 * an ARM SMMU exists in the system before proceeding with the driver
2400 * and IOMMU bus operation registration.
2401 */
2402 np = of_find_matching_node(NULL, arm_smmu_of_match);
2403 if (!np)
2404 return 0;
2405
2406 of_node_put(np);
2407
Will Deacon45ae7cf2013-06-24 18:31:25 +01002408 ret = platform_driver_register(&arm_smmu_driver);
2409 if (ret)
2410 return ret;
2411
2412 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002413 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002414 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2415
Will Deacond123cf82014-02-04 22:17:53 +00002416#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002417 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002418 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002419#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002420
Will Deacona9a1b0b2014-05-01 18:05:08 +01002421#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002422 if (!iommu_present(&pci_bus_type)) {
2423 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002424 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002425 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002426#endif
2427
Will Deacon45ae7cf2013-06-24 18:31:25 +01002428 return 0;
2429}
2430
2431static void __exit arm_smmu_exit(void)
2432{
2433 return platform_driver_unregister(&arm_smmu_driver);
2434}
2435
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002436subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002437module_exit(arm_smmu_exit);
2438
2439MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2440MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2441MODULE_LICENSE("GPL v2");