blob: 4c2ce562d72947ab11896b43c13840715629ff7c [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
223#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000224#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100225#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000229#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100230#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVAL 0x620
232#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
233#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100234#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000235#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236
237#define SCTLR_S1_ASIDPNE (1 << 12)
238#define SCTLR_CFCFG (1 << 7)
239#define SCTLR_CFIE (1 << 6)
240#define SCTLR_CFRE (1 << 5)
241#define SCTLR_E (1 << 4)
242#define SCTLR_AFE (1 << 2)
243#define SCTLR_TRE (1 << 1)
244#define SCTLR_M (1 << 0)
245#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700251/* Definitions for implementation-defined registers */
252#define ACTLR_QCOM_OSH_SHIFT 28
253#define ACTLR_QCOM_OSH 1
254
255#define ACTLR_QCOM_ISH_SHIFT 29
256#define ACTLR_QCOM_ISH 1
257
258#define ACTLR_QCOM_NSH_SHIFT 30
259#define ACTLR_QCOM_NSH 1
260
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700261#define ARM_SMMU_IMPL_DEF0(smmu) \
262 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
263#define ARM_SMMU_IMPL_DEF1(smmu) \
264 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
265#define IMPL_DEF1_MICRO_MMU_CTRL 0
266#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
267#define MICRO_MMU_CTRL_IDLE (1 << 3)
268
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000303static bool disable_bypass;
304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100319};
320
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700321struct arm_smmu_impl_def_reg {
322 u32 offset;
323 u32 value;
324};
325
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326struct arm_smmu_smr {
327 u8 idx;
328 u16 mask;
329 u16 id;
330};
331
Will Deacona9a1b0b2014-05-01 18:05:08 +0100332struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333 int num_streamids;
334 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 struct arm_smmu_smr *smrs;
336};
337
Will Deacona9a1b0b2014-05-01 18:05:08 +0100338struct arm_smmu_master {
339 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340 struct rb_node node;
341 struct arm_smmu_master_cfg cfg;
342};
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_device {
345 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346
347 void __iomem *base;
348 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100349 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350
351#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
352#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
353#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
354#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
355#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000356#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800357#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100358#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
359#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
360#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
361#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
362#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000364
365#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800366#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800367#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000368 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100369 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100370 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371
372 u32 num_context_banks;
373 u32 num_s2_context_banks;
374 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
375 atomic_t irptndx;
376
377 u32 num_mapping_groups;
378 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
379
Will Deacon518f7132014-11-14 17:17:54 +0000380 unsigned long va_size;
381 unsigned long ipa_size;
382 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100383 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100384
385 u32 num_global_irqs;
386 u32 num_context_irqs;
387 unsigned int *irqs;
388
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389 struct list_head list;
390 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800391
392 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700393 /* Specific to QCOM */
394 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
395 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800396
397 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398};
399
Robin Murphy7602b872016-04-28 17:12:09 +0100400enum arm_smmu_context_fmt {
401 ARM_SMMU_CTX_FMT_NONE,
402 ARM_SMMU_CTX_FMT_AARCH64,
403 ARM_SMMU_CTX_FMT_AARCH32_L,
404 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100405};
406
407struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408 u8 cbndx;
409 u8 irptndx;
410 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100411 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100413#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800415#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
416#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100417
Will Deaconc752ce42014-06-25 22:46:31 +0100418enum arm_smmu_domain_stage {
419 ARM_SMMU_DOMAIN_S1 = 0,
420 ARM_SMMU_DOMAIN_S2,
421 ARM_SMMU_DOMAIN_NESTED,
422};
423
Will Deacon45ae7cf2013-06-24 18:31:25 +0100424struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100425 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000426 struct io_pgtable_ops *pgtbl_ops;
427 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100428 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100429 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000430 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100431 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432};
433
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200434struct arm_smmu_phandle_args {
435 struct device_node *np;
436 int args_count;
437 uint32_t args[MAX_MASTER_STREAMIDS];
438};
439
Will Deacon45ae7cf2013-06-24 18:31:25 +0100440static DEFINE_SPINLOCK(arm_smmu_devices_lock);
441static LIST_HEAD(arm_smmu_devices);
442
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000443struct arm_smmu_option_prop {
444 u32 opt;
445 const char *prop;
446};
447
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800448static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
449
Mitchel Humpherys29073202014-07-08 09:52:18 -0700450static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000451 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800452 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800453 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000454 { 0, NULL},
455};
456
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800457static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700458static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
459static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800460static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800461static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
462 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700463static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
464 dma_addr_t iova);
465static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
466 struct iommu_domain *domain, dma_addr_t iova);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800467
Joerg Roedel1d672632015-03-26 13:43:10 +0100468static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
469{
470 return container_of(dom, struct arm_smmu_domain, domain);
471}
472
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000473static void parse_driver_options(struct arm_smmu_device *smmu)
474{
475 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700476
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000477 do {
478 if (of_property_read_bool(smmu->dev->of_node,
479 arm_smmu_options[i].prop)) {
480 smmu->options |= arm_smmu_options[i].opt;
481 dev_notice(smmu->dev, "option %s\n",
482 arm_smmu_options[i].prop);
483 }
484 } while (arm_smmu_options[++i].opt);
485}
486
Will Deacon8f68f8e2014-07-15 11:27:08 +0100487static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100488{
489 if (dev_is_pci(dev)) {
490 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700491
Will Deacona9a1b0b2014-05-01 18:05:08 +0100492 while (!pci_is_root_bus(bus))
493 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100494 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100495 }
496
Will Deacon8f68f8e2014-07-15 11:27:08 +0100497 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100498}
499
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
501 struct device_node *dev_node)
502{
503 struct rb_node *node = smmu->masters.rb_node;
504
505 while (node) {
506 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700507
Will Deacon45ae7cf2013-06-24 18:31:25 +0100508 master = container_of(node, struct arm_smmu_master, node);
509
510 if (dev_node < master->of_node)
511 node = node->rb_left;
512 else if (dev_node > master->of_node)
513 node = node->rb_right;
514 else
515 return master;
516 }
517
518 return NULL;
519}
520
Will Deacona9a1b0b2014-05-01 18:05:08 +0100521static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100522find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100523{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100524 struct arm_smmu_master_cfg *cfg = NULL;
525 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100526
Will Deacon8f68f8e2014-07-15 11:27:08 +0100527 if (group) {
528 cfg = iommu_group_get_iommudata(group);
529 iommu_group_put(group);
530 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100531
Will Deacon8f68f8e2014-07-15 11:27:08 +0100532 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100533}
534
Will Deacon45ae7cf2013-06-24 18:31:25 +0100535static int insert_smmu_master(struct arm_smmu_device *smmu,
536 struct arm_smmu_master *master)
537{
538 struct rb_node **new, *parent;
539
540 new = &smmu->masters.rb_node;
541 parent = NULL;
542 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700543 struct arm_smmu_master *this
544 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100545
546 parent = *new;
547 if (master->of_node < this->of_node)
548 new = &((*new)->rb_left);
549 else if (master->of_node > this->of_node)
550 new = &((*new)->rb_right);
551 else
552 return -EEXIST;
553 }
554
555 rb_link_node(&master->node, parent, new);
556 rb_insert_color(&master->node, &smmu->masters);
557 return 0;
558}
559
560static int register_smmu_master(struct arm_smmu_device *smmu,
561 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200562 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100563{
564 int i;
565 struct arm_smmu_master *master;
566
567 master = find_smmu_master(smmu, masterspec->np);
568 if (master) {
569 dev_err(dev,
570 "rejecting multiple registrations for master device %s\n",
571 masterspec->np->name);
572 return -EBUSY;
573 }
574
575 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
576 dev_err(dev,
577 "reached maximum number (%d) of stream IDs for master device %s\n",
578 MAX_MASTER_STREAMIDS, masterspec->np->name);
579 return -ENOSPC;
580 }
581
582 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
583 if (!master)
584 return -ENOMEM;
585
Will Deacona9a1b0b2014-05-01 18:05:08 +0100586 master->of_node = masterspec->np;
587 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588
Olav Haugan3c8766d2014-08-22 17:12:32 -0700589 for (i = 0; i < master->cfg.num_streamids; ++i) {
590 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591
Olav Haugan3c8766d2014-08-22 17:12:32 -0700592 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
593 (streamid >= smmu->num_mapping_groups)) {
594 dev_err(dev,
595 "stream ID for master device %s greater than maximum allowed (%d)\n",
596 masterspec->np->name, smmu->num_mapping_groups);
597 return -ERANGE;
598 }
599 master->cfg.streamids[i] = streamid;
600 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100601 return insert_smmu_master(smmu, master);
602}
603
Will Deacon44680ee2014-06-25 11:29:12 +0100604static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100605{
Will Deacon44680ee2014-06-25 11:29:12 +0100606 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100607 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100608 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100609
610 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100611 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100612 master = find_smmu_master(smmu, dev_node);
613 if (master)
614 break;
615 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100616 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100617
Will Deacona9a1b0b2014-05-01 18:05:08 +0100618 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100619}
620
621static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
622{
623 int idx;
624
625 do {
626 idx = find_next_zero_bit(map, end, start);
627 if (idx == end)
628 return -ENOSPC;
629 } while (test_and_set_bit(idx, map));
630
631 return idx;
632}
633
634static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
635{
636 clear_bit(idx, map);
637}
638
639/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000640static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100641{
642 int count = 0;
643 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
644
645 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
646 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
647 & sTLBGSTATUS_GSACTIVE) {
648 cpu_relax();
649 if (++count == TLB_LOOP_TIMEOUT) {
650 dev_err_ratelimited(smmu->dev,
651 "TLB sync timed out -- SMMU may be deadlocked\n");
652 return;
653 }
654 udelay(1);
655 }
656}
657
Will Deacon518f7132014-11-14 17:17:54 +0000658static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100659{
Will Deacon518f7132014-11-14 17:17:54 +0000660 struct arm_smmu_domain *smmu_domain = cookie;
661 __arm_smmu_tlb_sync(smmu_domain->smmu);
662}
663
664static void arm_smmu_tlb_inv_context(void *cookie)
665{
666 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100667 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
668 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100669 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000670 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100671
672 if (stage1) {
673 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800674 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100675 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100676 } else {
677 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800678 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100679 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100680 }
681
Will Deacon518f7132014-11-14 17:17:54 +0000682 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100683}
684
Will Deacon518f7132014-11-14 17:17:54 +0000685static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000686 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000687{
688 struct arm_smmu_domain *smmu_domain = cookie;
689 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
690 struct arm_smmu_device *smmu = smmu_domain->smmu;
691 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
692 void __iomem *reg;
693
694 if (stage1) {
695 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
696 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
697
Robin Murphy7602b872016-04-28 17:12:09 +0100698 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000699 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800700 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000701 do {
702 writel_relaxed(iova, reg);
703 iova += granule;
704 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000705 } else {
706 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800707 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000708 do {
709 writeq_relaxed(iova, reg);
710 iova += granule >> 12;
711 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000712 }
Will Deacon518f7132014-11-14 17:17:54 +0000713 } else if (smmu->version == ARM_SMMU_V2) {
714 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
715 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
716 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000717 iova >>= 12;
718 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100719 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000720 iova += granule >> 12;
721 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000722 } else {
723 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800724 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000725 }
726}
727
Will Deacon518f7132014-11-14 17:17:54 +0000728static struct iommu_gather_ops arm_smmu_gather_ops = {
729 .tlb_flush_all = arm_smmu_tlb_inv_context,
730 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
731 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000732};
733
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700734static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
735 dma_addr_t iova, u32 fsr)
736{
737 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
738 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
739 struct arm_smmu_device *smmu;
740 void __iomem *cb_base;
741 u64 sctlr, sctlr_orig;
742 phys_addr_t phys;
743
744 smmu = smmu_domain->smmu;
745 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
746
747 arm_smmu_halt_nowait(smmu);
748
749 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
750
751 arm_smmu_wait_for_halt(smmu);
752
753 /* clear FSR to allow ATOS to log any faults */
754 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
755
756 /* disable stall mode momentarily */
757 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
758 sctlr = sctlr_orig & ~SCTLR_CFCFG;
759 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
760
761 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
762
763 if (!phys) {
764 dev_err(smmu->dev,
765 "ATOS failed. Will issue a TLBIALL and try again...\n");
766 arm_smmu_tlb_inv_context(smmu_domain);
767 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
768 if (phys)
769 dev_err(smmu->dev,
770 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
771 else
772 dev_err(smmu->dev,
773 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
774 }
775
776 /* restore SCTLR */
777 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
778
779 arm_smmu_resume(smmu);
780
781 return phys;
782}
783
Will Deacon45ae7cf2013-06-24 18:31:25 +0100784static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
785{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600786 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -0700787 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100788 unsigned long iova;
789 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100790 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100791 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
792 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100793 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -0800794 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800795 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800796 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -0800797 u32 frsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100798
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700799 static DEFINE_RATELIMIT_STATE(_rs,
800 DEFAULT_RATELIMIT_INTERVAL,
801 DEFAULT_RATELIMIT_BURST);
802
Shalaj Jain04059c52015-03-03 13:34:59 -0800803 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100804 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100805 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
806
807 if (!(fsr & FSR_FAULT))
808 return IRQ_NONE;
809
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800810 if (fatal_asf && (fsr & FSR_ASF)) {
811 dev_err(smmu->dev,
812 "Took an address size fault. Refusing to recover.\n");
813 BUG();
814 }
815
Will Deacon45ae7cf2013-06-24 18:31:25 +0100816 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -0700817 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600818 if (fsr & FSR_TF)
819 flags |= IOMMU_FAULT_TRANSLATION;
820 if (fsr & FSR_PF)
821 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -0700822 if (fsr & FSR_EF)
823 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600824 if (fsr & FSR_SS)
825 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -0700826
Robin Murphyf9a05f02016-04-13 18:13:01 +0100827 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800828 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -0800829 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
830 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600831 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
832 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800833 dev_dbg(smmu->dev,
834 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
835 iova, fsr, fsynr, cfg->cbndx);
836 dev_dbg(smmu->dev,
837 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -0700838 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -0700839 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -0700840 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700841 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
842 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700843 if (__ratelimit(&_rs)) {
844 dev_err(smmu->dev,
845 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
846 iova, fsr, fsynr, cfg->cbndx);
847 dev_err(smmu->dev, "FAR = %016lx\n",
848 (unsigned long)iova);
849 dev_err(smmu->dev,
850 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
851 fsr,
852 (fsr & 0x02) ? "TF " : "",
853 (fsr & 0x04) ? "AFF " : "",
854 (fsr & 0x08) ? "PF " : "",
855 (fsr & 0x10) ? "EF " : "",
856 (fsr & 0x20) ? "TLBMCF " : "",
857 (fsr & 0x40) ? "TLBLKF " : "",
858 (fsr & 0x80) ? "MHF " : "",
859 (fsr & 0x40000000) ? "SS " : "",
860 (fsr & 0x80000000) ? "MULTI " : "");
861 dev_err(smmu->dev,
862 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -0800863 if (!phys_soft)
864 dev_err(smmu->dev,
865 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
866 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700867 dev_err(smmu->dev,
868 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
869 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
870 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700871 ret = IRQ_NONE;
872 resume = RESUME_TERMINATE;
873 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100874
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600875 /*
876 * If the client returns -EBUSY, do not clear FSR and do not RESUME
877 * if stalled. This is required to keep the IOMMU client stalled on
878 * the outstanding fault. This gives the client a chance to take any
879 * debug action and then terminate the stalled transaction.
880 * So, the sequence in case of stall on fault should be:
881 * 1) Do not clear FSR or write to RESUME here
882 * 2) Client takes any debug action
883 * 3) Client terminates the stalled transaction and resumes the IOMMU
884 * 4) Client clears FSR. The FSR should only be cleared after 3) and
885 * not before so that the fault remains outstanding. This ensures
886 * SCTLR.HUPCF has the desired effect if subsequent transactions also
887 * need to be terminated.
888 */
889 if (tmp != -EBUSY) {
890 /* Clear the faulting FSR */
891 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700892
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600893 /*
894 * Barrier required to ensure that the FSR is cleared
895 * before resuming SMMU operation
896 */
897 wmb();
898
899 /* Retry or terminate any stalled transactions */
900 if (fsr & FSR_SS)
901 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
902 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700903
904 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100905}
906
907static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
908{
909 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
910 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000911 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100912
913 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
914 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
915 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
916 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
917
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000918 if (!gfsr)
919 return IRQ_NONE;
920
Will Deacon45ae7cf2013-06-24 18:31:25 +0100921 dev_err_ratelimited(smmu->dev,
922 "Unexpected global fault, this could be serious\n");
923 dev_err_ratelimited(smmu->dev,
924 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
925 gfsr, gfsynr0, gfsynr1, gfsynr2);
926
927 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100928 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100929}
930
Will Deacon518f7132014-11-14 17:17:54 +0000931static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
932 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100933{
934 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100935 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100936 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100937 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
938 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100939 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100940
Will Deacon45ae7cf2013-06-24 18:31:25 +0100941 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100942 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
943 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100944
Will Deacon4a1c93c2015-03-04 12:21:03 +0000945 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100946 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
947 reg = CBA2R_RW64_64BIT;
948 else
949 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800950 /* 16-bit VMIDs live in CBA2R */
951 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800952 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800953
Will Deacon4a1c93c2015-03-04 12:21:03 +0000954 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
955 }
956
Will Deacon45ae7cf2013-06-24 18:31:25 +0100957 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100958 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100959 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700960 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961
Will Deacon57ca90f2014-02-06 14:59:05 +0000962 /*
963 * Use the weakest shareability/memory types, so they are
964 * overridden by the ttbcr/pte.
965 */
966 if (stage1) {
967 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
968 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800969 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
970 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800971 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000972 }
Will Deacon44680ee2014-06-25 11:29:12 +0100973 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100974
Will Deacon518f7132014-11-14 17:17:54 +0000975 /* TTBRs */
976 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100977 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100978
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800979 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100980 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100981
982 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800983 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100984 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000985 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100986 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100987 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000988 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989
Will Deacon518f7132014-11-14 17:17:54 +0000990 /* TTBCR */
991 if (stage1) {
992 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
993 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
994 if (smmu->version > ARM_SMMU_V1) {
995 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100996 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000997 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998 }
999 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001000 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1001 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002 }
1003
Will Deacon518f7132014-11-14 17:17:54 +00001004 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001006 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001007 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001008 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1009 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001010 }
1011
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 /* SCTLR */
Patrick Daly5ba28112016-08-30 19:18:52 -07001013 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001014 if (stage1)
1015 reg |= SCTLR_S1_ASIDPNE;
1016#ifdef __BIG_ENDIAN
1017 reg |= SCTLR_E;
1018#endif
Will Deacon25724842013-08-21 13:49:53 +01001019 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020}
1021
1022static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001023 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001024{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001025 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001026 unsigned long ias, oas;
1027 struct io_pgtable_ops *pgtbl_ops;
1028 struct io_pgtable_cfg pgtbl_cfg;
1029 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001030 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001031 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032
Will Deacon518f7132014-11-14 17:17:54 +00001033 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001034 if (smmu_domain->smmu)
1035 goto out_unlock;
1036
Robin Murphy98006992016-04-20 14:53:33 +01001037 /* We're bypassing these SIDs, so don't allocate an actual context */
1038 if (domain->type == IOMMU_DOMAIN_DMA) {
1039 smmu_domain->smmu = smmu;
1040 goto out_unlock;
1041 }
1042
Will Deaconc752ce42014-06-25 22:46:31 +01001043 /*
1044 * Mapping the requested stage onto what we support is surprisingly
1045 * complicated, mainly because the spec allows S1+S2 SMMUs without
1046 * support for nested translation. That means we end up with the
1047 * following table:
1048 *
1049 * Requested Supported Actual
1050 * S1 N S1
1051 * S1 S1+S2 S1
1052 * S1 S2 S2
1053 * S1 S1 S1
1054 * N N N
1055 * N S1+S2 S2
1056 * N S2 S2
1057 * N S1 S1
1058 *
1059 * Note that you can't actually request stage-2 mappings.
1060 */
1061 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1062 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1063 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1064 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1065
Robin Murphy7602b872016-04-28 17:12:09 +01001066 /*
1067 * Choosing a suitable context format is even more fiddly. Until we
1068 * grow some way for the caller to express a preference, and/or move
1069 * the decision into the io-pgtable code where it arguably belongs,
1070 * just aim for the closest thing to the rest of the system, and hope
1071 * that the hardware isn't esoteric enough that we can't assume AArch64
1072 * support to be a superset of AArch32 support...
1073 */
1074 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1075 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1076 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1077 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1078 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1079 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1080 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1081
1082 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1083 ret = -EINVAL;
1084 goto out_unlock;
1085 }
1086
Will Deaconc752ce42014-06-25 22:46:31 +01001087 switch (smmu_domain->stage) {
1088 case ARM_SMMU_DOMAIN_S1:
1089 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1090 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001091 ias = smmu->va_size;
1092 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001093 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001094 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001095 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001096 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001097 ias = min(ias, 32UL);
1098 oas = min(oas, 40UL);
1099 }
Will Deaconc752ce42014-06-25 22:46:31 +01001100 break;
1101 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001102 /*
1103 * We will likely want to change this if/when KVM gets
1104 * involved.
1105 */
Will Deaconc752ce42014-06-25 22:46:31 +01001106 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001107 cfg->cbar = CBAR_TYPE_S2_TRANS;
1108 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001109 ias = smmu->ipa_size;
1110 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001111 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001112 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001113 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001114 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001115 ias = min(ias, 40UL);
1116 oas = min(oas, 40UL);
1117 }
Will Deaconc752ce42014-06-25 22:46:31 +01001118 break;
1119 default:
1120 ret = -EINVAL;
1121 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122 }
1123
1124 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1125 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001126 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001127 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001128
Will Deacon44680ee2014-06-25 11:29:12 +01001129 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +01001130 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001131 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1132 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001133 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001134 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001135 }
1136
Will Deacon518f7132014-11-14 17:17:54 +00001137 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001138 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001139 .ias = ias,
1140 .oas = oas,
1141 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001142 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001143 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001144
Will Deacon518f7132014-11-14 17:17:54 +00001145 smmu_domain->smmu = smmu;
1146 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1147 if (!pgtbl_ops) {
1148 ret = -ENOMEM;
1149 goto out_clear_smmu;
1150 }
1151
Robin Murphyd5466352016-05-09 17:20:09 +01001152 /* Update the domain's page sizes to reflect the page table format */
1153 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001154
1155 /* Initialise the context bank with our page table cfg */
1156 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1157
1158 /*
1159 * Request context fault interrupt. Do this last to avoid the
1160 * handler seeing a half-initialised domain state.
1161 */
Will Deacon44680ee2014-06-25 11:29:12 +01001162 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001163 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
1164 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1165 "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001166 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001168 cfg->irptndx, irq);
1169 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 }
1171
Will Deacon518f7132014-11-14 17:17:54 +00001172 mutex_unlock(&smmu_domain->init_mutex);
1173
1174 /* Publish page table ops for map/unmap */
1175 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001176 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177
Will Deacon518f7132014-11-14 17:17:54 +00001178out_clear_smmu:
1179 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001180out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001181 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001182 return ret;
1183}
1184
1185static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1186{
Joerg Roedel1d672632015-03-26 13:43:10 +01001187 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001188 struct arm_smmu_device *smmu = smmu_domain->smmu;
1189 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001190 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001191 int irq;
1192
Robin Murphy98006992016-04-20 14:53:33 +01001193 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001194 return;
1195
Will Deacon518f7132014-11-14 17:17:54 +00001196 /*
1197 * Disable the context bank and free the page tables before freeing
1198 * it.
1199 */
Will Deacon44680ee2014-06-25 11:29:12 +01001200 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001201 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001202
Will Deacon44680ee2014-06-25 11:29:12 +01001203 if (cfg->irptndx != INVALID_IRPTNDX) {
1204 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001205 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001206 }
1207
Markus Elfring44830b02015-11-06 18:32:41 +01001208 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001209 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001210}
1211
Joerg Roedel1d672632015-03-26 13:43:10 +01001212static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001213{
1214 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001215
Robin Murphy9adb9592016-01-26 18:06:36 +00001216 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001217 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001218 /*
1219 * Allocate the domain and initialise some of its data structures.
1220 * We can't really do anything meaningful until we've added a
1221 * master.
1222 */
1223 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1224 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001225 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226
Robin Murphy9adb9592016-01-26 18:06:36 +00001227 if (type == IOMMU_DOMAIN_DMA &&
1228 iommu_get_dma_cookie(&smmu_domain->domain)) {
1229 kfree(smmu_domain);
1230 return NULL;
1231 }
1232
Will Deacon518f7132014-11-14 17:17:54 +00001233 mutex_init(&smmu_domain->init_mutex);
1234 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001235
1236 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001237}
1238
Joerg Roedel1d672632015-03-26 13:43:10 +01001239static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240{
Joerg Roedel1d672632015-03-26 13:43:10 +01001241 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001242
1243 /*
1244 * Free the domain resources. We assume that all devices have
1245 * already been detached.
1246 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001247 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001249 kfree(smmu_domain);
1250}
1251
1252static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001253 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254{
1255 int i;
1256 struct arm_smmu_smr *smrs;
1257 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1258
1259 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1260 return 0;
1261
Will Deacona9a1b0b2014-05-01 18:05:08 +01001262 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263 return -EEXIST;
1264
Mitchel Humpherys29073202014-07-08 09:52:18 -07001265 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001267 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1268 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269 return -ENOMEM;
1270 }
1271
Will Deacon44680ee2014-06-25 11:29:12 +01001272 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001273 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1275 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001276 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277 dev_err(smmu->dev, "failed to allocate free SMR\n");
1278 goto err_free_smrs;
1279 }
1280
1281 smrs[i] = (struct arm_smmu_smr) {
1282 .idx = idx,
1283 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001284 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285 };
1286 }
1287
1288 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001289 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1291 smrs[i].mask << SMR_MASK_SHIFT;
1292 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1293 }
1294
Will Deacona9a1b0b2014-05-01 18:05:08 +01001295 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296 return 0;
1297
1298err_free_smrs:
1299 while (--i >= 0)
1300 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1301 kfree(smrs);
1302 return -ENOSPC;
1303}
1304
1305static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001306 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001307{
1308 int i;
1309 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001310 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311
Will Deacon43b412b2014-07-15 11:22:24 +01001312 if (!smrs)
1313 return;
1314
Will Deacon45ae7cf2013-06-24 18:31:25 +01001315 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001316 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001318
Will Deacon45ae7cf2013-06-24 18:31:25 +01001319 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1320 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1321 }
1322
Will Deacona9a1b0b2014-05-01 18:05:08 +01001323 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001324 kfree(smrs);
1325}
1326
Will Deacon45ae7cf2013-06-24 18:31:25 +01001327static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001328 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001329{
1330 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001331 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1333
Will Deacon5f634952016-04-20 14:53:32 +01001334 /*
1335 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1336 * for all devices behind the SMMU. Note that we need to take
1337 * care configuring SMRs for devices both a platform_device and
1338 * and a PCI device (i.e. a PCI host controller)
1339 */
1340 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1341 return 0;
1342
Will Deacon8f68f8e2014-07-15 11:27:08 +01001343 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001344 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001346 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001347
Will Deacona9a1b0b2014-05-01 18:05:08 +01001348 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001349 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001350
Will Deacona9a1b0b2014-05-01 18:05:08 +01001351 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001352 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001353 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1355 }
1356
1357 return 0;
1358}
1359
1360static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001361 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001362{
Will Deacon43b412b2014-07-15 11:22:24 +01001363 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001364 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001365 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366
Will Deacon8f68f8e2014-07-15 11:27:08 +01001367 /* An IOMMU group is torn down by the first device to be removed */
1368 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1369 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001370
1371 /*
1372 * We *must* clear the S2CR first, because freeing the SMR means
1373 * that it can be re-allocated immediately.
1374 */
Will Deacon43b412b2014-07-15 11:22:24 +01001375 for (i = 0; i < cfg->num_streamids; ++i) {
1376 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001377 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001378
Robin Murphy25a1c962016-02-10 14:25:33 +00001379 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001380 }
1381
Will Deacona9a1b0b2014-05-01 18:05:08 +01001382 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001383}
1384
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001385static void arm_smmu_detach_dev(struct device *dev,
1386 struct arm_smmu_master_cfg *cfg)
1387{
1388 struct iommu_domain *domain = dev->archdata.iommu;
1389 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1390
1391 dev->archdata.iommu = NULL;
1392 arm_smmu_domain_remove_master(smmu_domain, cfg);
1393}
1394
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1396{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001397 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001398 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001399 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001400 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001401
Will Deacon8f68f8e2014-07-15 11:27:08 +01001402 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001403 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1405 return -ENXIO;
1406 }
1407
Will Deacon518f7132014-11-14 17:17:54 +00001408 /* Ensure that the domain is finalised */
1409 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001410 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001411 return ret;
1412
Will Deacon45ae7cf2013-06-24 18:31:25 +01001413 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001414 * Sanity check the domain. We don't support domains across
1415 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416 */
Will Deacon518f7132014-11-14 17:17:54 +00001417 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 dev_err(dev,
1419 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001420 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1421 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001422 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423
1424 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001425 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001426 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001427 return -ENODEV;
1428
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001429 /* Detach the dev from its current domain */
1430 if (dev->archdata.iommu)
1431 arm_smmu_detach_dev(dev, cfg);
1432
Will Deacon844e35b2014-07-17 11:23:51 +01001433 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1434 if (!ret)
1435 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436 return ret;
1437}
1438
Will Deacon45ae7cf2013-06-24 18:31:25 +01001439static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001440 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001441{
Will Deacon518f7132014-11-14 17:17:54 +00001442 int ret;
1443 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001444 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001445 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001446
Will Deacon518f7132014-11-14 17:17:54 +00001447 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001448 return -ENODEV;
1449
Will Deacon518f7132014-11-14 17:17:54 +00001450 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1451 ret = ops->map(ops, iova, paddr, size, prot);
1452 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1453 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001454}
1455
1456static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1457 size_t size)
1458{
Will Deacon518f7132014-11-14 17:17:54 +00001459 size_t ret;
1460 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001461 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001462 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001463
Will Deacon518f7132014-11-14 17:17:54 +00001464 if (!ops)
1465 return 0;
1466
1467 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1468 ret = ops->unmap(ops, iova, size);
1469 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1470 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001471}
1472
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001473static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001474 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001475{
Joerg Roedel1d672632015-03-26 13:43:10 +01001476 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001477 struct arm_smmu_device *smmu = smmu_domain->smmu;
1478 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1479 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1480 struct device *dev = smmu->dev;
1481 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001482 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001483 u32 tmp;
1484 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001485 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001486
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001487 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001488 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001489 phys = 0;
1490 goto out_unlock;
1491 }
1492
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001493 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1494
Robin Murphy661d9622015-05-27 17:09:34 +01001495 /* ATS1 registers can only be written atomically */
1496 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001497 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001498 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1499 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001500 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001501
1502 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1503 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001504 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001505 dev_err(dev,
1506 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1507 &iova, &phys);
1508 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001509 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001510 }
1511
Robin Murphyf9a05f02016-04-13 18:13:01 +01001512 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001513 if (phys & CB_PAR_F) {
1514 dev_err(dev, "translation fault!\n");
1515 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001516 phys = 0;
1517 } else {
1518 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001519 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001520out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001521 if (do_halt)
1522 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001523out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001524 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001525 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001526}
1527
Will Deacon45ae7cf2013-06-24 18:31:25 +01001528static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001529 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530{
Will Deacon518f7132014-11-14 17:17:54 +00001531 phys_addr_t ret;
1532 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001533 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001534 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001535
Will Deacon518f7132014-11-14 17:17:54 +00001536 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001537 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001538
Will Deacon518f7132014-11-14 17:17:54 +00001539 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001540 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001541 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001542
Will Deacon518f7132014-11-14 17:17:54 +00001543 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001544}
1545
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001546/*
1547 * This function can sleep, and cannot be called from atomic context. Will
1548 * power on register block if required. This restriction does not apply to the
1549 * original iova_to_phys() op.
1550 */
1551static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1552 dma_addr_t iova)
1553{
1554 phys_addr_t ret = 0;
1555 unsigned long flags;
1556 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1557
1558 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1559 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1560 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001561 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001562
1563 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1564
1565 return ret;
1566}
1567
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001568static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
1569 struct iommu_domain *domain, dma_addr_t iova)
1570{
1571 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
1572}
1573
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001574static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001575{
Will Deacond0948942014-06-24 17:30:10 +01001576 switch (cap) {
1577 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001578 /*
1579 * Return true here as the SMMU can always send out coherent
1580 * requests.
1581 */
1582 return true;
Will Deacond0948942014-06-24 17:30:10 +01001583 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001584 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001585 case IOMMU_CAP_NOEXEC:
1586 return true;
Will Deacond0948942014-06-24 17:30:10 +01001587 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001588 return false;
Will Deacond0948942014-06-24 17:30:10 +01001589 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591
Will Deacona9a1b0b2014-05-01 18:05:08 +01001592static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1593{
1594 *((u16 *)data) = alias;
1595 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001596}
1597
Will Deacon8f68f8e2014-07-15 11:27:08 +01001598static void __arm_smmu_release_pci_iommudata(void *data)
1599{
1600 kfree(data);
1601}
1602
Joerg Roedelaf659932015-10-21 23:51:41 +02001603static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1604 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605{
Will Deacon03edb222015-01-19 14:27:33 +00001606 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001607 u16 sid;
1608 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001609
Will Deacon03edb222015-01-19 14:27:33 +00001610 cfg = iommu_group_get_iommudata(group);
1611 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001612 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001613 if (!cfg)
1614 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001615
Will Deacon03edb222015-01-19 14:27:33 +00001616 iommu_group_set_iommudata(group, cfg,
1617 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001618 }
1619
Joerg Roedelaf659932015-10-21 23:51:41 +02001620 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1621 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001622
Will Deacon03edb222015-01-19 14:27:33 +00001623 /*
1624 * Assume Stream ID == Requester ID for now.
1625 * We need a way to describe the ID mappings in FDT.
1626 */
1627 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1628 for (i = 0; i < cfg->num_streamids; ++i)
1629 if (cfg->streamids[i] == sid)
1630 break;
1631
1632 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1633 if (i == cfg->num_streamids)
1634 cfg->streamids[cfg->num_streamids++] = sid;
1635
1636 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001637}
1638
Joerg Roedelaf659932015-10-21 23:51:41 +02001639static int arm_smmu_init_platform_device(struct device *dev,
1640 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001641{
Will Deacon03edb222015-01-19 14:27:33 +00001642 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001643 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001644
1645 if (!smmu)
1646 return -ENODEV;
1647
1648 master = find_smmu_master(smmu, dev->of_node);
1649 if (!master)
1650 return -ENODEV;
1651
Will Deacon03edb222015-01-19 14:27:33 +00001652 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001653
1654 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001655}
1656
1657static int arm_smmu_add_device(struct device *dev)
1658{
Joerg Roedelaf659932015-10-21 23:51:41 +02001659 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001660
Joerg Roedelaf659932015-10-21 23:51:41 +02001661 group = iommu_group_get_for_dev(dev);
1662 if (IS_ERR(group))
1663 return PTR_ERR(group);
1664
Peng Fan9a4a9d82015-11-20 16:56:18 +08001665 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001666 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001667}
1668
Will Deacon45ae7cf2013-06-24 18:31:25 +01001669static void arm_smmu_remove_device(struct device *dev)
1670{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001671 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001672}
1673
Joerg Roedelaf659932015-10-21 23:51:41 +02001674static struct iommu_group *arm_smmu_device_group(struct device *dev)
1675{
1676 struct iommu_group *group;
1677 int ret;
1678
1679 if (dev_is_pci(dev))
1680 group = pci_device_group(dev);
1681 else
1682 group = generic_device_group(dev);
1683
1684 if (IS_ERR(group))
1685 return group;
1686
1687 if (dev_is_pci(dev))
1688 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1689 else
1690 ret = arm_smmu_init_platform_device(dev, group);
1691
1692 if (ret) {
1693 iommu_group_put(group);
1694 group = ERR_PTR(ret);
1695 }
1696
1697 return group;
1698}
1699
Will Deaconc752ce42014-06-25 22:46:31 +01001700static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1701 enum iommu_attr attr, void *data)
1702{
Joerg Roedel1d672632015-03-26 13:43:10 +01001703 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001704
1705 switch (attr) {
1706 case DOMAIN_ATTR_NESTING:
1707 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1708 return 0;
1709 default:
1710 return -ENODEV;
1711 }
1712}
1713
1714static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1715 enum iommu_attr attr, void *data)
1716{
Will Deacon518f7132014-11-14 17:17:54 +00001717 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001718 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001719
Will Deacon518f7132014-11-14 17:17:54 +00001720 mutex_lock(&smmu_domain->init_mutex);
1721
Will Deaconc752ce42014-06-25 22:46:31 +01001722 switch (attr) {
1723 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001724 if (smmu_domain->smmu) {
1725 ret = -EPERM;
1726 goto out_unlock;
1727 }
1728
Will Deaconc752ce42014-06-25 22:46:31 +01001729 if (*(int *)data)
1730 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1731 else
1732 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1733
Will Deacon518f7132014-11-14 17:17:54 +00001734 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001735 default:
Will Deacon518f7132014-11-14 17:17:54 +00001736 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001737 }
Will Deacon518f7132014-11-14 17:17:54 +00001738
1739out_unlock:
1740 mutex_unlock(&smmu_domain->init_mutex);
1741 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001742}
1743
Will Deacon518f7132014-11-14 17:17:54 +00001744static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001745 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001746 .domain_alloc = arm_smmu_domain_alloc,
1747 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001748 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001749 .map = arm_smmu_map,
1750 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001751 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001752 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001753 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01001754 .add_device = arm_smmu_add_device,
1755 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001756 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001757 .domain_get_attr = arm_smmu_domain_get_attr,
1758 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001759 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760};
1761
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001762static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001763{
1764 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001765 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001766
1767 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1768 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1769 0, 30000)) {
1770 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1771 return -EBUSY;
1772 }
1773
1774 return 0;
1775}
1776
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001777static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
1778{
1779 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1780 u32 reg;
1781
1782 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1783 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1784 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1785
1786 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
1787}
1788
1789static int arm_smmu_halt(struct arm_smmu_device *smmu)
1790{
1791 return __arm_smmu_halt(smmu, true);
1792}
1793
1794static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
1795{
1796 return __arm_smmu_halt(smmu, false);
1797}
1798
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001799static void arm_smmu_resume(struct arm_smmu_device *smmu)
1800{
1801 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1802 u32 reg;
1803
1804 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1805 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1806 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1807}
1808
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001809static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1810{
1811 int i;
1812 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1813
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001814 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001815 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1816 writel_relaxed(regs[i].value,
1817 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001818 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001819}
1820
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08001821static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001822{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08001823 int i;
1824 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001826 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827
Peng Fan3ca37122016-05-03 21:50:30 +08001828 /*
1829 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1830 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1831 * bit is only present in MMU-500r2 onwards.
1832 */
1833 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1834 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1835 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1836 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1837 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1838 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1839 }
1840
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001841 /* Make sure all context banks are disabled and clear CB_FSR */
1842 for (i = 0; i < smmu->num_context_banks; ++i) {
1843 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1844 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1845 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001846 /*
1847 * Disable MMU-500's not-particularly-beneficial next-page
1848 * prefetcher for the sake of errata #841119 and #826419.
1849 */
1850 if (smmu->model == ARM_MMU500) {
1851 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1852 reg &= ~ARM_MMU500_ACTLR_CPRE;
1853 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1854 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001855
1856 if (smmu->model == QCOM_SMMUV2) {
1857 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1858 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1859 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1860 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1861 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001862 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08001863}
1864
1865static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1866{
1867 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1868 int i = 0;
1869 u32 reg;
1870
1871 /* clear global FSR */
1872 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1873 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1874
1875 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
1876 /*
1877 * Mark all SMRn as invalid and all S2CRn as bypass unless
1878 * overridden
1879 */
1880 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1881 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1882 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1883 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1884 }
1885
1886 arm_smmu_context_bank_reset(smmu);
1887 }
Will Deacon1463fe42013-07-31 19:21:27 +01001888
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001889 /* Program implementation defined registers */
1890 arm_smmu_impl_def_programming(smmu);
1891
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1894 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1895
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001896 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001897
Will Deacon45ae7cf2013-06-24 18:31:25 +01001898 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001899 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900
1901 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001902 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001903
Robin Murphy25a1c962016-02-10 14:25:33 +00001904 /* Enable client access, handling unmatched streams as appropriate */
1905 reg &= ~sCR0_CLIENTPD;
1906 if (disable_bypass)
1907 reg |= sCR0_USFCFG;
1908 else
1909 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001910
1911 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001912 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001913
1914 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001915 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001916
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001917 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1918 reg |= sCR0_VMID16EN;
1919
Will Deacon45ae7cf2013-06-24 18:31:25 +01001920 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001921 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001922 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001923}
1924
1925static int arm_smmu_id_size_to_bits(int size)
1926{
1927 switch (size) {
1928 case 0:
1929 return 32;
1930 case 1:
1931 return 36;
1932 case 2:
1933 return 40;
1934 case 3:
1935 return 42;
1936 case 4:
1937 return 44;
1938 case 5:
1939 default:
1940 return 48;
1941 }
1942}
1943
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001944static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1945{
1946 struct device *dev = smmu->dev;
1947 int i, ntuples, ret;
1948 u32 *tuples;
1949 struct arm_smmu_impl_def_reg *regs, *regit;
1950
1951 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1952 return 0;
1953
1954 ntuples /= sizeof(u32);
1955 if (ntuples % 2) {
1956 dev_err(dev,
1957 "Invalid number of attach-impl-defs registers: %d\n",
1958 ntuples);
1959 return -EINVAL;
1960 }
1961
1962 regs = devm_kmalloc(
1963 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1964 GFP_KERNEL);
1965 if (!regs)
1966 return -ENOMEM;
1967
1968 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1969 if (!tuples)
1970 return -ENOMEM;
1971
1972 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1973 tuples, ntuples);
1974 if (ret)
1975 return ret;
1976
1977 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1978 regit->offset = tuples[i];
1979 regit->value = tuples[i + 1];
1980 }
1981
1982 devm_kfree(dev, tuples);
1983
1984 smmu->impl_def_attach_registers = regs;
1985 smmu->num_impl_def_attach_registers = ntuples / 2;
1986
1987 return 0;
1988}
1989
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1991{
1992 unsigned long size;
1993 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1994 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001995 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996
1997 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001998 dev_notice(smmu->dev, "SMMUv%d with:\n",
1999 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002000
2001 /* ID0 */
2002 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01002003
2004 /* Restrict available stages based on module parameter */
2005 if (force_stage == 1)
2006 id &= ~(ID0_S2TS | ID0_NTS);
2007 else if (force_stage == 2)
2008 id &= ~(ID0_S1TS | ID0_NTS);
2009
Will Deacon45ae7cf2013-06-24 18:31:25 +01002010 if (id & ID0_S1TS) {
2011 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2012 dev_notice(smmu->dev, "\tstage 1 translation\n");
2013 }
2014
2015 if (id & ID0_S2TS) {
2016 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2017 dev_notice(smmu->dev, "\tstage 2 translation\n");
2018 }
2019
2020 if (id & ID0_NTS) {
2021 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
2022 dev_notice(smmu->dev, "\tnested translation\n");
2023 }
2024
2025 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01002026 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002027 dev_err(smmu->dev, "\tno translation support!\n");
2028 return -ENODEV;
2029 }
2030
Robin Murphyb7862e32016-04-13 18:13:03 +01002031 if ((id & ID0_S1TS) &&
2032 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002033 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
2034 dev_notice(smmu->dev, "\taddress translation ops\n");
2035 }
2036
Robin Murphybae2c2d2015-07-29 19:46:05 +01002037 /*
2038 * In order for DMA API calls to work properly, we must defer to what
2039 * the DT says about coherency, regardless of what the hardware claims.
2040 * Fortunately, this also opens up a workaround for systems where the
2041 * ID register value has ended up configured incorrectly.
2042 */
2043 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
2044 cttw_reg = !!(id & ID0_CTTW);
2045 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002046 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002047 if (cttw_dt || cttw_reg)
2048 dev_notice(smmu->dev, "\t%scoherent table walk\n",
2049 cttw_dt ? "" : "non-");
2050 if (cttw_dt != cttw_reg)
2051 dev_notice(smmu->dev,
2052 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002053
2054 if (id & ID0_SMS) {
2055 u32 smr, sid, mask;
2056
2057 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
2058 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
2059 ID0_NUMSMRG_MASK;
2060 if (smmu->num_mapping_groups == 0) {
2061 dev_err(smmu->dev,
2062 "stream-matching supported, but no SMRs present!\n");
2063 return -ENODEV;
2064 }
2065
2066 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
2067 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
2068 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
2069 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
2070
2071 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
2072 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
2073 if ((mask & sid) != sid) {
2074 dev_err(smmu->dev,
2075 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
2076 mask, sid);
2077 return -ENODEV;
2078 }
2079
2080 dev_notice(smmu->dev,
2081 "\tstream matching with %u register groups, mask 0x%x",
2082 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07002083 } else {
2084 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
2085 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002086 }
2087
Robin Murphy7602b872016-04-28 17:12:09 +01002088 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
2089 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
2090 if (!(id & ID0_PTFS_NO_AARCH32S))
2091 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
2092 }
2093
Will Deacon45ae7cf2013-06-24 18:31:25 +01002094 /* ID1 */
2095 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01002096 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002097
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002098 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00002099 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01002100 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002101 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07002102 dev_warn(smmu->dev,
2103 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
2104 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002105
Will Deacon518f7132014-11-14 17:17:54 +00002106 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002107 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
2108 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
2109 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
2110 return -ENODEV;
2111 }
2112 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
2113 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01002114 /*
2115 * Cavium CN88xx erratum #27704.
2116 * Ensure ASID and VMID allocation is unique across all SMMUs in
2117 * the system.
2118 */
2119 if (smmu->model == CAVIUM_SMMUV2) {
2120 smmu->cavium_id_base =
2121 atomic_add_return(smmu->num_context_banks,
2122 &cavium_smmu_context_count);
2123 smmu->cavium_id_base -= smmu->num_context_banks;
2124 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002125
2126 /* ID2 */
2127 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2128 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002129 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002130
Will Deacon518f7132014-11-14 17:17:54 +00002131 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002133 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002134
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002135 if (id & ID2_VMID16)
2136 smmu->features |= ARM_SMMU_FEAT_VMID16;
2137
Robin Murphyf1d84542015-03-04 16:41:05 +00002138 /*
2139 * What the page table walker can address actually depends on which
2140 * descriptor format is in use, but since a) we don't know that yet,
2141 * and b) it can vary per context bank, this will have to do...
2142 */
2143 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2144 dev_warn(smmu->dev,
2145 "failed to set DMA mask for table walker\n");
2146
Robin Murphyb7862e32016-04-13 18:13:03 +01002147 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002148 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002149 if (smmu->version == ARM_SMMU_V1_64K)
2150 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002151 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002152 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002153 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002154 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002155 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002156 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002157 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002158 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002159 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002160 }
2161
Robin Murphy7602b872016-04-28 17:12:09 +01002162 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002163 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002164 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002165 if (smmu->features &
2166 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002167 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002168 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002169 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002170 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002171 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002172
Robin Murphyd5466352016-05-09 17:20:09 +01002173 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2174 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2175 else
2176 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2177 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2178 smmu->pgsize_bitmap);
2179
Will Deacon518f7132014-11-14 17:17:54 +00002180
Will Deacon28d60072014-09-01 16:24:48 +01002181 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2182 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002183 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002184
2185 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2186 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002187 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002188
Will Deacon45ae7cf2013-06-24 18:31:25 +01002189 return 0;
2190}
2191
Robin Murphy67b65a32016-04-13 18:12:57 +01002192struct arm_smmu_match_data {
2193 enum arm_smmu_arch_version version;
2194 enum arm_smmu_implementation model;
2195};
2196
2197#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2198static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2199
2200ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2201ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002202ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002203ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002204ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002205ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002206
Joerg Roedel09b52692014-10-02 12:24:45 +02002207static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002208 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2209 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2210 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002211 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002212 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002213 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002214 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002215 { },
2216};
2217MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2218
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2220{
Robin Murphy09360402014-08-28 17:51:59 +01002221 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002222 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002223 struct resource *res;
2224 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002225 struct device *dev = &pdev->dev;
2226 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002227 struct of_phandle_iterator it;
2228 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002229 int num_irqs, i, err;
2230
2231 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2232 if (!smmu) {
2233 dev_err(dev, "failed to allocate arm_smmu_device\n");
2234 return -ENOMEM;
2235 }
2236 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002237 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002238
Robin Murphy09360402014-08-28 17:51:59 +01002239 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002240 data = of_id->data;
2241 smmu->version = data->version;
2242 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002243
Will Deacon45ae7cf2013-06-24 18:31:25 +01002244 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002245 smmu->base = devm_ioremap_resource(dev, res);
2246 if (IS_ERR(smmu->base))
2247 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249
2250 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2251 &smmu->num_global_irqs)) {
2252 dev_err(dev, "missing #global-interrupts property\n");
2253 return -ENODEV;
2254 }
2255
2256 num_irqs = 0;
2257 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2258 num_irqs++;
2259 if (num_irqs > smmu->num_global_irqs)
2260 smmu->num_context_irqs++;
2261 }
2262
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002263 if (!smmu->num_context_irqs) {
2264 dev_err(dev, "found %d interrupts but expected at least %d\n",
2265 num_irqs, smmu->num_global_irqs + 1);
2266 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002267 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002268
2269 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2270 GFP_KERNEL);
2271 if (!smmu->irqs) {
2272 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2273 return -ENOMEM;
2274 }
2275
2276 for (i = 0; i < num_irqs; ++i) {
2277 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002278
Will Deacon45ae7cf2013-06-24 18:31:25 +01002279 if (irq < 0) {
2280 dev_err(dev, "failed to get irq index %d\n", i);
2281 return -ENODEV;
2282 }
2283 smmu->irqs[i] = irq;
2284 }
2285
Olav Haugan3c8766d2014-08-22 17:12:32 -07002286 err = arm_smmu_device_cfg_probe(smmu);
2287 if (err)
2288 return err;
2289
Will Deacon45ae7cf2013-06-24 18:31:25 +01002290 i = 0;
2291 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002292
2293 err = -ENOMEM;
2294 /* No need to zero the memory for masterspec */
2295 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2296 if (!masterspec)
2297 goto out_put_masters;
2298
2299 of_for_each_phandle(&it, err, dev->of_node,
2300 "mmu-masters", "#stream-id-cells", 0) {
2301 int count = of_phandle_iterator_args(&it, masterspec->args,
2302 MAX_MASTER_STREAMIDS);
2303 masterspec->np = of_node_get(it.node);
2304 masterspec->args_count = count;
2305
2306 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002307 if (err) {
2308 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002309 masterspec->np->name);
2310 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311 goto out_put_masters;
2312 }
2313
2314 i++;
2315 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002316
Will Deacon45ae7cf2013-06-24 18:31:25 +01002317 dev_notice(dev, "registered %d master devices\n", i);
2318
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002319 kfree(masterspec);
2320
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002321 err = arm_smmu_parse_impl_def_registers(smmu);
2322 if (err)
2323 goto out_put_masters;
2324
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002325 parse_driver_options(smmu);
2326
Robin Murphyb7862e32016-04-13 18:13:03 +01002327 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002328 smmu->num_context_banks != smmu->num_context_irqs) {
2329 dev_err(dev,
2330 "found only %d context interrupt(s) but %d required\n",
2331 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002332 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002333 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002334 }
2335
Will Deacon45ae7cf2013-06-24 18:31:25 +01002336 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08002337 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
2338 NULL, arm_smmu_global_fault,
2339 IRQF_ONESHOT | IRQF_SHARED,
2340 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341 if (err) {
2342 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2343 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002344 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002345 }
2346 }
2347
2348 INIT_LIST_HEAD(&smmu->list);
2349 spin_lock(&arm_smmu_devices_lock);
2350 list_add(&smmu->list, &arm_smmu_devices);
2351 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002352
2353 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002354 return 0;
2355
Will Deacon45ae7cf2013-06-24 18:31:25 +01002356out_put_masters:
2357 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002358 struct arm_smmu_master *master
2359 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002360 of_node_put(master->of_node);
2361 }
2362
2363 return err;
2364}
2365
2366static int arm_smmu_device_remove(struct platform_device *pdev)
2367{
2368 int i;
2369 struct device *dev = &pdev->dev;
2370 struct arm_smmu_device *curr, *smmu = NULL;
2371 struct rb_node *node;
2372
2373 spin_lock(&arm_smmu_devices_lock);
2374 list_for_each_entry(curr, &arm_smmu_devices, list) {
2375 if (curr->dev == dev) {
2376 smmu = curr;
2377 list_del(&smmu->list);
2378 break;
2379 }
2380 }
2381 spin_unlock(&arm_smmu_devices_lock);
2382
2383 if (!smmu)
2384 return -ENODEV;
2385
Will Deacon45ae7cf2013-06-24 18:31:25 +01002386 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002387 struct arm_smmu_master *master
2388 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002389 of_node_put(master->of_node);
2390 }
2391
Will Deaconecfadb62013-07-31 19:21:28 +01002392 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002393 dev_err(dev, "removing device with active domains!\n");
2394
2395 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002396 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002397
2398 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002399 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002400 return 0;
2401}
2402
Will Deacon45ae7cf2013-06-24 18:31:25 +01002403static struct platform_driver arm_smmu_driver = {
2404 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002405 .name = "arm-smmu",
2406 .of_match_table = of_match_ptr(arm_smmu_of_match),
2407 },
2408 .probe = arm_smmu_device_dt_probe,
2409 .remove = arm_smmu_device_remove,
2410};
2411
2412static int __init arm_smmu_init(void)
2413{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002414 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002415 int ret;
2416
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002417 /*
2418 * Play nice with systems that don't have an ARM SMMU by checking that
2419 * an ARM SMMU exists in the system before proceeding with the driver
2420 * and IOMMU bus operation registration.
2421 */
2422 np = of_find_matching_node(NULL, arm_smmu_of_match);
2423 if (!np)
2424 return 0;
2425
2426 of_node_put(np);
2427
Will Deacon45ae7cf2013-06-24 18:31:25 +01002428 ret = platform_driver_register(&arm_smmu_driver);
2429 if (ret)
2430 return ret;
2431
2432 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002433 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002434 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2435
Will Deacond123cf82014-02-04 22:17:53 +00002436#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002437 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002438 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002439#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002440
Will Deacona9a1b0b2014-05-01 18:05:08 +01002441#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002442 if (!iommu_present(&pci_bus_type)) {
2443 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002444 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002445 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002446#endif
2447
Will Deacon45ae7cf2013-06-24 18:31:25 +01002448 return 0;
2449}
2450
2451static void __exit arm_smmu_exit(void)
2452{
2453 return platform_driver_unregister(&arm_smmu_driver);
2454}
2455
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002456subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002457module_exit(arm_smmu_exit);
2458
2459MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2460MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2461MODULE_LICENSE("GPL v2");