blob: 26329137fb2128d4f0f0614b225b565843a5ce3f [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
223#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000224#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100225#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000229#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100230#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVAL 0x620
232#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
233#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100234#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000235#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236
237#define SCTLR_S1_ASIDPNE (1 << 12)
238#define SCTLR_CFCFG (1 << 7)
239#define SCTLR_CFIE (1 << 6)
240#define SCTLR_CFRE (1 << 5)
241#define SCTLR_E (1 << 4)
242#define SCTLR_AFE (1 << 2)
243#define SCTLR_TRE (1 << 1)
244#define SCTLR_M (1 << 0)
245#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700251/* Definitions for implementation-defined registers */
252#define ACTLR_QCOM_OSH_SHIFT 28
253#define ACTLR_QCOM_OSH 1
254
255#define ACTLR_QCOM_ISH_SHIFT 29
256#define ACTLR_QCOM_ISH 1
257
258#define ACTLR_QCOM_NSH_SHIFT 30
259#define ACTLR_QCOM_NSH 1
260
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700261#define ARM_SMMU_IMPL_DEF0(smmu) \
262 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
263#define ARM_SMMU_IMPL_DEF1(smmu) \
264 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
265#define IMPL_DEF1_MICRO_MMU_CTRL 0
266#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
267#define MICRO_MMU_CTRL_IDLE (1 << 3)
268
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000303static bool disable_bypass;
304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100319};
320
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700321struct arm_smmu_impl_def_reg {
322 u32 offset;
323 u32 value;
324};
325
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326struct arm_smmu_smr {
327 u8 idx;
328 u16 mask;
329 u16 id;
330};
331
Will Deacona9a1b0b2014-05-01 18:05:08 +0100332struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333 int num_streamids;
334 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 struct arm_smmu_smr *smrs;
336};
337
Will Deacona9a1b0b2014-05-01 18:05:08 +0100338struct arm_smmu_master {
339 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340 struct rb_node node;
341 struct arm_smmu_master_cfg cfg;
342};
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_device {
345 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346
347 void __iomem *base;
348 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100349 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350
351#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
352#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
353#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
354#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
355#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000356#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800357#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100358#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
359#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
360#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
361#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
362#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000364
365#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800366#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000367 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100368 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100369 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370
371 u32 num_context_banks;
372 u32 num_s2_context_banks;
373 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
374 atomic_t irptndx;
375
376 u32 num_mapping_groups;
377 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
378
Will Deacon518f7132014-11-14 17:17:54 +0000379 unsigned long va_size;
380 unsigned long ipa_size;
381 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100382 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383
384 u32 num_global_irqs;
385 u32 num_context_irqs;
386 unsigned int *irqs;
387
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388 struct list_head list;
389 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800390
391 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700392 /* Specific to QCOM */
393 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
394 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800395
396 spinlock_t atos_lock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
398
Robin Murphy7602b872016-04-28 17:12:09 +0100399enum arm_smmu_context_fmt {
400 ARM_SMMU_CTX_FMT_NONE,
401 ARM_SMMU_CTX_FMT_AARCH64,
402 ARM_SMMU_CTX_FMT_AARCH32_L,
403 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404};
405
406struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407 u8 cbndx;
408 u8 irptndx;
409 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100410 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100412#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800414#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
415#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100416
Will Deaconc752ce42014-06-25 22:46:31 +0100417enum arm_smmu_domain_stage {
418 ARM_SMMU_DOMAIN_S1 = 0,
419 ARM_SMMU_DOMAIN_S2,
420 ARM_SMMU_DOMAIN_NESTED,
421};
422
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100424 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000425 struct io_pgtable_ops *pgtbl_ops;
426 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100427 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100428 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000429 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100430 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431};
432
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200433struct arm_smmu_phandle_args {
434 struct device_node *np;
435 int args_count;
436 uint32_t args[MAX_MASTER_STREAMIDS];
437};
438
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439static DEFINE_SPINLOCK(arm_smmu_devices_lock);
440static LIST_HEAD(arm_smmu_devices);
441
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000442struct arm_smmu_option_prop {
443 u32 opt;
444 const char *prop;
445};
446
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800447static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
448
Mitchel Humpherys29073202014-07-08 09:52:18 -0700449static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000450 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800451 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000452 { 0, NULL},
453};
454
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800455static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700456static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
457static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800458static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800459static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
460 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700461static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
462 dma_addr_t iova);
463static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
464 struct iommu_domain *domain, dma_addr_t iova);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800465
Joerg Roedel1d672632015-03-26 13:43:10 +0100466static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
467{
468 return container_of(dom, struct arm_smmu_domain, domain);
469}
470
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000471static void parse_driver_options(struct arm_smmu_device *smmu)
472{
473 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700474
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000475 do {
476 if (of_property_read_bool(smmu->dev->of_node,
477 arm_smmu_options[i].prop)) {
478 smmu->options |= arm_smmu_options[i].opt;
479 dev_notice(smmu->dev, "option %s\n",
480 arm_smmu_options[i].prop);
481 }
482 } while (arm_smmu_options[++i].opt);
483}
484
Will Deacon8f68f8e2014-07-15 11:27:08 +0100485static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100486{
487 if (dev_is_pci(dev)) {
488 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700489
Will Deacona9a1b0b2014-05-01 18:05:08 +0100490 while (!pci_is_root_bus(bus))
491 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100492 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100493 }
494
Will Deacon8f68f8e2014-07-15 11:27:08 +0100495 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100496}
497
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
499 struct device_node *dev_node)
500{
501 struct rb_node *node = smmu->masters.rb_node;
502
503 while (node) {
504 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700505
Will Deacon45ae7cf2013-06-24 18:31:25 +0100506 master = container_of(node, struct arm_smmu_master, node);
507
508 if (dev_node < master->of_node)
509 node = node->rb_left;
510 else if (dev_node > master->of_node)
511 node = node->rb_right;
512 else
513 return master;
514 }
515
516 return NULL;
517}
518
Will Deacona9a1b0b2014-05-01 18:05:08 +0100519static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100520find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100521{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100522 struct arm_smmu_master_cfg *cfg = NULL;
523 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100524
Will Deacon8f68f8e2014-07-15 11:27:08 +0100525 if (group) {
526 cfg = iommu_group_get_iommudata(group);
527 iommu_group_put(group);
528 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100529
Will Deacon8f68f8e2014-07-15 11:27:08 +0100530 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100531}
532
Will Deacon45ae7cf2013-06-24 18:31:25 +0100533static int insert_smmu_master(struct arm_smmu_device *smmu,
534 struct arm_smmu_master *master)
535{
536 struct rb_node **new, *parent;
537
538 new = &smmu->masters.rb_node;
539 parent = NULL;
540 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700541 struct arm_smmu_master *this
542 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100543
544 parent = *new;
545 if (master->of_node < this->of_node)
546 new = &((*new)->rb_left);
547 else if (master->of_node > this->of_node)
548 new = &((*new)->rb_right);
549 else
550 return -EEXIST;
551 }
552
553 rb_link_node(&master->node, parent, new);
554 rb_insert_color(&master->node, &smmu->masters);
555 return 0;
556}
557
558static int register_smmu_master(struct arm_smmu_device *smmu,
559 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200560 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100561{
562 int i;
563 struct arm_smmu_master *master;
564
565 master = find_smmu_master(smmu, masterspec->np);
566 if (master) {
567 dev_err(dev,
568 "rejecting multiple registrations for master device %s\n",
569 masterspec->np->name);
570 return -EBUSY;
571 }
572
573 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
574 dev_err(dev,
575 "reached maximum number (%d) of stream IDs for master device %s\n",
576 MAX_MASTER_STREAMIDS, masterspec->np->name);
577 return -ENOSPC;
578 }
579
580 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
581 if (!master)
582 return -ENOMEM;
583
Will Deacona9a1b0b2014-05-01 18:05:08 +0100584 master->of_node = masterspec->np;
585 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586
Olav Haugan3c8766d2014-08-22 17:12:32 -0700587 for (i = 0; i < master->cfg.num_streamids; ++i) {
588 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589
Olav Haugan3c8766d2014-08-22 17:12:32 -0700590 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
591 (streamid >= smmu->num_mapping_groups)) {
592 dev_err(dev,
593 "stream ID for master device %s greater than maximum allowed (%d)\n",
594 masterspec->np->name, smmu->num_mapping_groups);
595 return -ERANGE;
596 }
597 master->cfg.streamids[i] = streamid;
598 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100599 return insert_smmu_master(smmu, master);
600}
601
Will Deacon44680ee2014-06-25 11:29:12 +0100602static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100603{
Will Deacon44680ee2014-06-25 11:29:12 +0100604 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100605 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100606 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100607
608 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100609 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610 master = find_smmu_master(smmu, dev_node);
611 if (master)
612 break;
613 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100614 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100615
Will Deacona9a1b0b2014-05-01 18:05:08 +0100616 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100617}
618
619static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
620{
621 int idx;
622
623 do {
624 idx = find_next_zero_bit(map, end, start);
625 if (idx == end)
626 return -ENOSPC;
627 } while (test_and_set_bit(idx, map));
628
629 return idx;
630}
631
632static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
633{
634 clear_bit(idx, map);
635}
636
637/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000638static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100639{
640 int count = 0;
641 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
642
643 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
644 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
645 & sTLBGSTATUS_GSACTIVE) {
646 cpu_relax();
647 if (++count == TLB_LOOP_TIMEOUT) {
648 dev_err_ratelimited(smmu->dev,
649 "TLB sync timed out -- SMMU may be deadlocked\n");
650 return;
651 }
652 udelay(1);
653 }
654}
655
Will Deacon518f7132014-11-14 17:17:54 +0000656static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100657{
Will Deacon518f7132014-11-14 17:17:54 +0000658 struct arm_smmu_domain *smmu_domain = cookie;
659 __arm_smmu_tlb_sync(smmu_domain->smmu);
660}
661
662static void arm_smmu_tlb_inv_context(void *cookie)
663{
664 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100665 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
666 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100667 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000668 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100669
670 if (stage1) {
671 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800672 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100673 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100674 } else {
675 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800676 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100677 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100678 }
679
Will Deacon518f7132014-11-14 17:17:54 +0000680 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100681}
682
Will Deacon518f7132014-11-14 17:17:54 +0000683static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000684 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000685{
686 struct arm_smmu_domain *smmu_domain = cookie;
687 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
688 struct arm_smmu_device *smmu = smmu_domain->smmu;
689 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
690 void __iomem *reg;
691
692 if (stage1) {
693 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
694 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
695
Robin Murphy7602b872016-04-28 17:12:09 +0100696 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000697 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800698 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000699 do {
700 writel_relaxed(iova, reg);
701 iova += granule;
702 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000703 } else {
704 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800705 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000706 do {
707 writeq_relaxed(iova, reg);
708 iova += granule >> 12;
709 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000710 }
Will Deacon518f7132014-11-14 17:17:54 +0000711 } else if (smmu->version == ARM_SMMU_V2) {
712 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
713 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
714 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000715 iova >>= 12;
716 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100717 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000718 iova += granule >> 12;
719 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000720 } else {
721 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800722 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000723 }
724}
725
Will Deacon518f7132014-11-14 17:17:54 +0000726static struct iommu_gather_ops arm_smmu_gather_ops = {
727 .tlb_flush_all = arm_smmu_tlb_inv_context,
728 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
729 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000730};
731
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700732static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
733 dma_addr_t iova, u32 fsr)
734{
735 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
736 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
737 struct arm_smmu_device *smmu;
738 void __iomem *cb_base;
739 u64 sctlr, sctlr_orig;
740 phys_addr_t phys;
741
742 smmu = smmu_domain->smmu;
743 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
744
745 arm_smmu_halt_nowait(smmu);
746
747 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
748
749 arm_smmu_wait_for_halt(smmu);
750
751 /* clear FSR to allow ATOS to log any faults */
752 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
753
754 /* disable stall mode momentarily */
755 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
756 sctlr = sctlr_orig & ~SCTLR_CFCFG;
757 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
758
759 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
760
761 if (!phys) {
762 dev_err(smmu->dev,
763 "ATOS failed. Will issue a TLBIALL and try again...\n");
764 arm_smmu_tlb_inv_context(smmu_domain);
765 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
766 if (phys)
767 dev_err(smmu->dev,
768 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
769 else
770 dev_err(smmu->dev,
771 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
772 }
773
774 /* restore SCTLR */
775 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
776
777 arm_smmu_resume(smmu);
778
779 return phys;
780}
781
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
783{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600784 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -0700785 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786 unsigned long iova;
787 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100788 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100789 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
790 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100791 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -0800792 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800793 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800794 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -0800795 u32 frsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796
Shalaj Jain04059c52015-03-03 13:34:59 -0800797 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100798 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
800
801 if (!(fsr & FSR_FAULT))
802 return IRQ_NONE;
803
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800804 if (fatal_asf && (fsr & FSR_ASF)) {
805 dev_err(smmu->dev,
806 "Took an address size fault. Refusing to recover.\n");
807 BUG();
808 }
809
Will Deacon45ae7cf2013-06-24 18:31:25 +0100810 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -0700811 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600812 if (fsr & FSR_TF)
813 flags |= IOMMU_FAULT_TRANSLATION;
814 if (fsr & FSR_PF)
815 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -0700816 if (fsr & FSR_EF)
817 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600818 if (fsr & FSR_SS)
819 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -0700820
Robin Murphyf9a05f02016-04-13 18:13:01 +0100821 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800822 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -0800823 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
824 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600825 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
826 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800827 dev_dbg(smmu->dev,
828 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
829 iova, fsr, fsynr, cfg->cbndx);
830 dev_dbg(smmu->dev,
831 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -0700832 ret = IRQ_HANDLED;
833 resume = RESUME_RETRY;
834 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700835 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
836 fsr);
837
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800838 dev_err(smmu->dev,
839 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
840 iova, fsr, fsynr, cfg->cbndx);
841 dev_err(smmu->dev, "FAR = %016lx\n", (unsigned long)iova);
842 dev_err(smmu->dev, "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
843 (fsr & 0x02) ? "TF " : "",
844 (fsr & 0x04) ? "AFF " : "",
845 (fsr & 0x08) ? "PF " : "",
846 (fsr & 0x10) ? "EF " : "",
847 (fsr & 0x20) ? "TLBMCF " : "",
848 (fsr & 0x40) ? "TLBLKF " : "",
849 (fsr & 0x80) ? "MHF " : "",
850 (fsr & 0x40000000) ? "SS " : "",
851 (fsr & 0x80000000) ? "MULTI " : "");
852 dev_err(smmu->dev,
853 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700854 dev_err(smmu->dev,
855 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
Shalaj Jain04059c52015-03-03 13:34:59 -0800856 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
Patrick Daly5ba28112016-08-30 19:18:52 -0700857 ret = IRQ_NONE;
858 resume = RESUME_TERMINATE;
859 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100860
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600861 /*
862 * If the client returns -EBUSY, do not clear FSR and do not RESUME
863 * if stalled. This is required to keep the IOMMU client stalled on
864 * the outstanding fault. This gives the client a chance to take any
865 * debug action and then terminate the stalled transaction.
866 * So, the sequence in case of stall on fault should be:
867 * 1) Do not clear FSR or write to RESUME here
868 * 2) Client takes any debug action
869 * 3) Client terminates the stalled transaction and resumes the IOMMU
870 * 4) Client clears FSR. The FSR should only be cleared after 3) and
871 * not before so that the fault remains outstanding. This ensures
872 * SCTLR.HUPCF has the desired effect if subsequent transactions also
873 * need to be terminated.
874 */
875 if (tmp != -EBUSY) {
876 /* Clear the faulting FSR */
877 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700878
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600879 /*
880 * Barrier required to ensure that the FSR is cleared
881 * before resuming SMMU operation
882 */
883 wmb();
884
885 /* Retry or terminate any stalled transactions */
886 if (fsr & FSR_SS)
887 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
888 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700889
890 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100891}
892
893static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
894{
895 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
896 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000897 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100898
899 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
900 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
901 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
902 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
903
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000904 if (!gfsr)
905 return IRQ_NONE;
906
Will Deacon45ae7cf2013-06-24 18:31:25 +0100907 dev_err_ratelimited(smmu->dev,
908 "Unexpected global fault, this could be serious\n");
909 dev_err_ratelimited(smmu->dev,
910 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
911 gfsr, gfsynr0, gfsynr1, gfsynr2);
912
913 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100914 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100915}
916
Will Deacon518f7132014-11-14 17:17:54 +0000917static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
918 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919{
920 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100921 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100922 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100923 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
924 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100925 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100926
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100928 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
929 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930
Will Deacon4a1c93c2015-03-04 12:21:03 +0000931 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100932 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
933 reg = CBA2R_RW64_64BIT;
934 else
935 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800936 /* 16-bit VMIDs live in CBA2R */
937 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800938 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800939
Will Deacon4a1c93c2015-03-04 12:21:03 +0000940 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
941 }
942
Will Deacon45ae7cf2013-06-24 18:31:25 +0100943 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100944 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100945 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700946 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100947
Will Deacon57ca90f2014-02-06 14:59:05 +0000948 /*
949 * Use the weakest shareability/memory types, so they are
950 * overridden by the ttbcr/pte.
951 */
952 if (stage1) {
953 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
954 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800955 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
956 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800957 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000958 }
Will Deacon44680ee2014-06-25 11:29:12 +0100959 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100960
Will Deacon518f7132014-11-14 17:17:54 +0000961 /* TTBRs */
962 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100963 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100964
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800965 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100966 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100967
968 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800969 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100970 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000971 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100972 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100973 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000974 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100975
Will Deacon518f7132014-11-14 17:17:54 +0000976 /* TTBCR */
977 if (stage1) {
978 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
979 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
980 if (smmu->version > ARM_SMMU_V1) {
981 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100982 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000983 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100984 }
985 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000986 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
987 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100988 }
989
Will Deacon518f7132014-11-14 17:17:54 +0000990 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100991 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000992 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100993 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000994 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
995 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100996 }
997
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998 /* SCTLR */
Patrick Daly5ba28112016-08-30 19:18:52 -0700999 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000 if (stage1)
1001 reg |= SCTLR_S1_ASIDPNE;
1002#ifdef __BIG_ENDIAN
1003 reg |= SCTLR_E;
1004#endif
Will Deacon25724842013-08-21 13:49:53 +01001005 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001006}
1007
1008static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001009 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001010{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001011 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001012 unsigned long ias, oas;
1013 struct io_pgtable_ops *pgtbl_ops;
1014 struct io_pgtable_cfg pgtbl_cfg;
1015 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001016 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001017 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001018
Will Deacon518f7132014-11-14 17:17:54 +00001019 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001020 if (smmu_domain->smmu)
1021 goto out_unlock;
1022
Robin Murphy98006992016-04-20 14:53:33 +01001023 /* We're bypassing these SIDs, so don't allocate an actual context */
1024 if (domain->type == IOMMU_DOMAIN_DMA) {
1025 smmu_domain->smmu = smmu;
1026 goto out_unlock;
1027 }
1028
Will Deaconc752ce42014-06-25 22:46:31 +01001029 /*
1030 * Mapping the requested stage onto what we support is surprisingly
1031 * complicated, mainly because the spec allows S1+S2 SMMUs without
1032 * support for nested translation. That means we end up with the
1033 * following table:
1034 *
1035 * Requested Supported Actual
1036 * S1 N S1
1037 * S1 S1+S2 S1
1038 * S1 S2 S2
1039 * S1 S1 S1
1040 * N N N
1041 * N S1+S2 S2
1042 * N S2 S2
1043 * N S1 S1
1044 *
1045 * Note that you can't actually request stage-2 mappings.
1046 */
1047 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1048 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1049 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1050 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1051
Robin Murphy7602b872016-04-28 17:12:09 +01001052 /*
1053 * Choosing a suitable context format is even more fiddly. Until we
1054 * grow some way for the caller to express a preference, and/or move
1055 * the decision into the io-pgtable code where it arguably belongs,
1056 * just aim for the closest thing to the rest of the system, and hope
1057 * that the hardware isn't esoteric enough that we can't assume AArch64
1058 * support to be a superset of AArch32 support...
1059 */
1060 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1061 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1062 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1063 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1064 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1065 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1066 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1067
1068 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1069 ret = -EINVAL;
1070 goto out_unlock;
1071 }
1072
Will Deaconc752ce42014-06-25 22:46:31 +01001073 switch (smmu_domain->stage) {
1074 case ARM_SMMU_DOMAIN_S1:
1075 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1076 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001077 ias = smmu->va_size;
1078 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001079 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001080 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001081 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001082 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001083 ias = min(ias, 32UL);
1084 oas = min(oas, 40UL);
1085 }
Will Deaconc752ce42014-06-25 22:46:31 +01001086 break;
1087 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001088 /*
1089 * We will likely want to change this if/when KVM gets
1090 * involved.
1091 */
Will Deaconc752ce42014-06-25 22:46:31 +01001092 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001093 cfg->cbar = CBAR_TYPE_S2_TRANS;
1094 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001095 ias = smmu->ipa_size;
1096 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001097 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001098 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001099 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001100 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001101 ias = min(ias, 40UL);
1102 oas = min(oas, 40UL);
1103 }
Will Deaconc752ce42014-06-25 22:46:31 +01001104 break;
1105 default:
1106 ret = -EINVAL;
1107 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108 }
1109
1110 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1111 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001112 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001113 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114
Will Deacon44680ee2014-06-25 11:29:12 +01001115 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +01001116 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001117 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1118 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001120 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001121 }
1122
Will Deacon518f7132014-11-14 17:17:54 +00001123 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001124 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001125 .ias = ias,
1126 .oas = oas,
1127 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001128 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001129 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001130
Will Deacon518f7132014-11-14 17:17:54 +00001131 smmu_domain->smmu = smmu;
1132 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1133 if (!pgtbl_ops) {
1134 ret = -ENOMEM;
1135 goto out_clear_smmu;
1136 }
1137
Robin Murphyd5466352016-05-09 17:20:09 +01001138 /* Update the domain's page sizes to reflect the page table format */
1139 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001140
1141 /* Initialise the context bank with our page table cfg */
1142 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1143
1144 /*
1145 * Request context fault interrupt. Do this last to avoid the
1146 * handler seeing a half-initialised domain state.
1147 */
Will Deacon44680ee2014-06-25 11:29:12 +01001148 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001149 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
1150 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1151 "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001152 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001153 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001154 cfg->irptndx, irq);
1155 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001156 }
1157
Will Deacon518f7132014-11-14 17:17:54 +00001158 mutex_unlock(&smmu_domain->init_mutex);
1159
1160 /* Publish page table ops for map/unmap */
1161 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001162 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163
Will Deacon518f7132014-11-14 17:17:54 +00001164out_clear_smmu:
1165 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001166out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001167 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168 return ret;
1169}
1170
1171static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1172{
Joerg Roedel1d672632015-03-26 13:43:10 +01001173 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001174 struct arm_smmu_device *smmu = smmu_domain->smmu;
1175 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001176 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177 int irq;
1178
Robin Murphy98006992016-04-20 14:53:33 +01001179 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180 return;
1181
Will Deacon518f7132014-11-14 17:17:54 +00001182 /*
1183 * Disable the context bank and free the page tables before freeing
1184 * it.
1185 */
Will Deacon44680ee2014-06-25 11:29:12 +01001186 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001187 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001188
Will Deacon44680ee2014-06-25 11:29:12 +01001189 if (cfg->irptndx != INVALID_IRPTNDX) {
1190 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001191 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 }
1193
Markus Elfring44830b02015-11-06 18:32:41 +01001194 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001195 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196}
1197
Joerg Roedel1d672632015-03-26 13:43:10 +01001198static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199{
1200 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201
Robin Murphy9adb9592016-01-26 18:06:36 +00001202 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001203 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204 /*
1205 * Allocate the domain and initialise some of its data structures.
1206 * We can't really do anything meaningful until we've added a
1207 * master.
1208 */
1209 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1210 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001211 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212
Robin Murphy9adb9592016-01-26 18:06:36 +00001213 if (type == IOMMU_DOMAIN_DMA &&
1214 iommu_get_dma_cookie(&smmu_domain->domain)) {
1215 kfree(smmu_domain);
1216 return NULL;
1217 }
1218
Will Deacon518f7132014-11-14 17:17:54 +00001219 mutex_init(&smmu_domain->init_mutex);
1220 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001221
1222 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223}
1224
Joerg Roedel1d672632015-03-26 13:43:10 +01001225static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226{
Joerg Roedel1d672632015-03-26 13:43:10 +01001227 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001228
1229 /*
1230 * Free the domain resources. We assume that all devices have
1231 * already been detached.
1232 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001233 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235 kfree(smmu_domain);
1236}
1237
1238static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001239 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240{
1241 int i;
1242 struct arm_smmu_smr *smrs;
1243 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1244
1245 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1246 return 0;
1247
Will Deacona9a1b0b2014-05-01 18:05:08 +01001248 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001249 return -EEXIST;
1250
Mitchel Humpherys29073202014-07-08 09:52:18 -07001251 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001253 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1254 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255 return -ENOMEM;
1256 }
1257
Will Deacon44680ee2014-06-25 11:29:12 +01001258 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001259 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1261 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001262 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263 dev_err(smmu->dev, "failed to allocate free SMR\n");
1264 goto err_free_smrs;
1265 }
1266
1267 smrs[i] = (struct arm_smmu_smr) {
1268 .idx = idx,
1269 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001270 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 };
1272 }
1273
1274 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001275 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1277 smrs[i].mask << SMR_MASK_SHIFT;
1278 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1279 }
1280
Will Deacona9a1b0b2014-05-01 18:05:08 +01001281 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001282 return 0;
1283
1284err_free_smrs:
1285 while (--i >= 0)
1286 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1287 kfree(smrs);
1288 return -ENOSPC;
1289}
1290
1291static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001292 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001293{
1294 int i;
1295 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001296 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297
Will Deacon43b412b2014-07-15 11:22:24 +01001298 if (!smrs)
1299 return;
1300
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001302 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001304
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1306 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1307 }
1308
Will Deacona9a1b0b2014-05-01 18:05:08 +01001309 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310 kfree(smrs);
1311}
1312
Will Deacon45ae7cf2013-06-24 18:31:25 +01001313static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001314 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001315{
1316 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001317 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1319
Will Deacon5f634952016-04-20 14:53:32 +01001320 /*
1321 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1322 * for all devices behind the SMMU. Note that we need to take
1323 * care configuring SMRs for devices both a platform_device and
1324 * and a PCI device (i.e. a PCI host controller)
1325 */
1326 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1327 return 0;
1328
Will Deacon8f68f8e2014-07-15 11:27:08 +01001329 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001330 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001332 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333
Will Deacona9a1b0b2014-05-01 18:05:08 +01001334 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001336
Will Deacona9a1b0b2014-05-01 18:05:08 +01001337 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001338 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001339 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1341 }
1342
1343 return 0;
1344}
1345
1346static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001347 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001348{
Will Deacon43b412b2014-07-15 11:22:24 +01001349 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001350 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001351 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001352
Will Deacon8f68f8e2014-07-15 11:27:08 +01001353 /* An IOMMU group is torn down by the first device to be removed */
1354 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1355 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001356
1357 /*
1358 * We *must* clear the S2CR first, because freeing the SMR means
1359 * that it can be re-allocated immediately.
1360 */
Will Deacon43b412b2014-07-15 11:22:24 +01001361 for (i = 0; i < cfg->num_streamids; ++i) {
1362 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001363 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001364
Robin Murphy25a1c962016-02-10 14:25:33 +00001365 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001366 }
1367
Will Deacona9a1b0b2014-05-01 18:05:08 +01001368 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001369}
1370
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001371static void arm_smmu_detach_dev(struct device *dev,
1372 struct arm_smmu_master_cfg *cfg)
1373{
1374 struct iommu_domain *domain = dev->archdata.iommu;
1375 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1376
1377 dev->archdata.iommu = NULL;
1378 arm_smmu_domain_remove_master(smmu_domain, cfg);
1379}
1380
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1382{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001383 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001384 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001385 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001386 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387
Will Deacon8f68f8e2014-07-15 11:27:08 +01001388 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001389 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001390 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1391 return -ENXIO;
1392 }
1393
Will Deacon518f7132014-11-14 17:17:54 +00001394 /* Ensure that the domain is finalised */
1395 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001396 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001397 return ret;
1398
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001400 * Sanity check the domain. We don't support domains across
1401 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001402 */
Will Deacon518f7132014-11-14 17:17:54 +00001403 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404 dev_err(dev,
1405 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001406 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1407 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001408 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001409
1410 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001411 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001412 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001413 return -ENODEV;
1414
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001415 /* Detach the dev from its current domain */
1416 if (dev->archdata.iommu)
1417 arm_smmu_detach_dev(dev, cfg);
1418
Will Deacon844e35b2014-07-17 11:23:51 +01001419 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1420 if (!ret)
1421 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001422 return ret;
1423}
1424
Will Deacon45ae7cf2013-06-24 18:31:25 +01001425static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001426 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001427{
Will Deacon518f7132014-11-14 17:17:54 +00001428 int ret;
1429 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001430 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001431 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001432
Will Deacon518f7132014-11-14 17:17:54 +00001433 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434 return -ENODEV;
1435
Will Deacon518f7132014-11-14 17:17:54 +00001436 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1437 ret = ops->map(ops, iova, paddr, size, prot);
1438 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1439 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440}
1441
1442static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1443 size_t size)
1444{
Will Deacon518f7132014-11-14 17:17:54 +00001445 size_t ret;
1446 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001447 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001448 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001449
Will Deacon518f7132014-11-14 17:17:54 +00001450 if (!ops)
1451 return 0;
1452
1453 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1454 ret = ops->unmap(ops, iova, size);
1455 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1456 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457}
1458
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001459static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001460 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001461{
Joerg Roedel1d672632015-03-26 13:43:10 +01001462 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001463 struct arm_smmu_device *smmu = smmu_domain->smmu;
1464 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1465 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1466 struct device *dev = smmu->dev;
1467 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001468 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001469 u32 tmp;
1470 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001471 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001472
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001473 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001474 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001475 phys = 0;
1476 goto out_unlock;
1477 }
1478
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001479 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1480
Robin Murphy661d9622015-05-27 17:09:34 +01001481 /* ATS1 registers can only be written atomically */
1482 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001483 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001484 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1485 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001486 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001487
1488 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1489 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001490 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001491 dev_err(dev,
1492 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1493 &iova, &phys);
1494 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001495 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001496 }
1497
Robin Murphyf9a05f02016-04-13 18:13:01 +01001498 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001499 if (phys & CB_PAR_F) {
1500 dev_err(dev, "translation fault!\n");
1501 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001502 phys = 0;
1503 } else {
1504 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001505 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001506out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001507 if (do_halt)
1508 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001509out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001510 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001511 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001512}
1513
Will Deacon45ae7cf2013-06-24 18:31:25 +01001514static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001515 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001516{
Will Deacon518f7132014-11-14 17:17:54 +00001517 phys_addr_t ret;
1518 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001519 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001520 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001521
Will Deacon518f7132014-11-14 17:17:54 +00001522 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001523 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001524
Will Deacon518f7132014-11-14 17:17:54 +00001525 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001526 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001527 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001528
Will Deacon518f7132014-11-14 17:17:54 +00001529 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530}
1531
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001532/*
1533 * This function can sleep, and cannot be called from atomic context. Will
1534 * power on register block if required. This restriction does not apply to the
1535 * original iova_to_phys() op.
1536 */
1537static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1538 dma_addr_t iova)
1539{
1540 phys_addr_t ret = 0;
1541 unsigned long flags;
1542 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1543
1544 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1545 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1546 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001547 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001548
1549 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1550
1551 return ret;
1552}
1553
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001554static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
1555 struct iommu_domain *domain, dma_addr_t iova)
1556{
1557 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
1558}
1559
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001560static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001561{
Will Deacond0948942014-06-24 17:30:10 +01001562 switch (cap) {
1563 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001564 /*
1565 * Return true here as the SMMU can always send out coherent
1566 * requests.
1567 */
1568 return true;
Will Deacond0948942014-06-24 17:30:10 +01001569 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001570 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001571 case IOMMU_CAP_NOEXEC:
1572 return true;
Will Deacond0948942014-06-24 17:30:10 +01001573 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001574 return false;
Will Deacond0948942014-06-24 17:30:10 +01001575 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001576}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001577
Will Deacona9a1b0b2014-05-01 18:05:08 +01001578static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1579{
1580 *((u16 *)data) = alias;
1581 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001582}
1583
Will Deacon8f68f8e2014-07-15 11:27:08 +01001584static void __arm_smmu_release_pci_iommudata(void *data)
1585{
1586 kfree(data);
1587}
1588
Joerg Roedelaf659932015-10-21 23:51:41 +02001589static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1590 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591{
Will Deacon03edb222015-01-19 14:27:33 +00001592 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001593 u16 sid;
1594 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001595
Will Deacon03edb222015-01-19 14:27:33 +00001596 cfg = iommu_group_get_iommudata(group);
1597 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001598 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001599 if (!cfg)
1600 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001601
Will Deacon03edb222015-01-19 14:27:33 +00001602 iommu_group_set_iommudata(group, cfg,
1603 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001604 }
1605
Joerg Roedelaf659932015-10-21 23:51:41 +02001606 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1607 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001608
Will Deacon03edb222015-01-19 14:27:33 +00001609 /*
1610 * Assume Stream ID == Requester ID for now.
1611 * We need a way to describe the ID mappings in FDT.
1612 */
1613 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1614 for (i = 0; i < cfg->num_streamids; ++i)
1615 if (cfg->streamids[i] == sid)
1616 break;
1617
1618 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1619 if (i == cfg->num_streamids)
1620 cfg->streamids[cfg->num_streamids++] = sid;
1621
1622 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001623}
1624
Joerg Roedelaf659932015-10-21 23:51:41 +02001625static int arm_smmu_init_platform_device(struct device *dev,
1626 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001627{
Will Deacon03edb222015-01-19 14:27:33 +00001628 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001629 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001630
1631 if (!smmu)
1632 return -ENODEV;
1633
1634 master = find_smmu_master(smmu, dev->of_node);
1635 if (!master)
1636 return -ENODEV;
1637
Will Deacon03edb222015-01-19 14:27:33 +00001638 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001639
1640 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001641}
1642
1643static int arm_smmu_add_device(struct device *dev)
1644{
Joerg Roedelaf659932015-10-21 23:51:41 +02001645 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001646
Joerg Roedelaf659932015-10-21 23:51:41 +02001647 group = iommu_group_get_for_dev(dev);
1648 if (IS_ERR(group))
1649 return PTR_ERR(group);
1650
Peng Fan9a4a9d82015-11-20 16:56:18 +08001651 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001652 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001653}
1654
Will Deacon45ae7cf2013-06-24 18:31:25 +01001655static void arm_smmu_remove_device(struct device *dev)
1656{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001657 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001658}
1659
Joerg Roedelaf659932015-10-21 23:51:41 +02001660static struct iommu_group *arm_smmu_device_group(struct device *dev)
1661{
1662 struct iommu_group *group;
1663 int ret;
1664
1665 if (dev_is_pci(dev))
1666 group = pci_device_group(dev);
1667 else
1668 group = generic_device_group(dev);
1669
1670 if (IS_ERR(group))
1671 return group;
1672
1673 if (dev_is_pci(dev))
1674 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1675 else
1676 ret = arm_smmu_init_platform_device(dev, group);
1677
1678 if (ret) {
1679 iommu_group_put(group);
1680 group = ERR_PTR(ret);
1681 }
1682
1683 return group;
1684}
1685
Will Deaconc752ce42014-06-25 22:46:31 +01001686static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1687 enum iommu_attr attr, void *data)
1688{
Joerg Roedel1d672632015-03-26 13:43:10 +01001689 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001690
1691 switch (attr) {
1692 case DOMAIN_ATTR_NESTING:
1693 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1694 return 0;
1695 default:
1696 return -ENODEV;
1697 }
1698}
1699
1700static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1701 enum iommu_attr attr, void *data)
1702{
Will Deacon518f7132014-11-14 17:17:54 +00001703 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001704 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001705
Will Deacon518f7132014-11-14 17:17:54 +00001706 mutex_lock(&smmu_domain->init_mutex);
1707
Will Deaconc752ce42014-06-25 22:46:31 +01001708 switch (attr) {
1709 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001710 if (smmu_domain->smmu) {
1711 ret = -EPERM;
1712 goto out_unlock;
1713 }
1714
Will Deaconc752ce42014-06-25 22:46:31 +01001715 if (*(int *)data)
1716 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1717 else
1718 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1719
Will Deacon518f7132014-11-14 17:17:54 +00001720 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001721 default:
Will Deacon518f7132014-11-14 17:17:54 +00001722 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001723 }
Will Deacon518f7132014-11-14 17:17:54 +00001724
1725out_unlock:
1726 mutex_unlock(&smmu_domain->init_mutex);
1727 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001728}
1729
Will Deacon518f7132014-11-14 17:17:54 +00001730static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001731 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001732 .domain_alloc = arm_smmu_domain_alloc,
1733 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001734 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001735 .map = arm_smmu_map,
1736 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001737 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001738 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001739 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01001740 .add_device = arm_smmu_add_device,
1741 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001742 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001743 .domain_get_attr = arm_smmu_domain_get_attr,
1744 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001745 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746};
1747
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001748static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001749{
1750 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001751 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001752
1753 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
1754 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
1755 0, 30000)) {
1756 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
1757 return -EBUSY;
1758 }
1759
1760 return 0;
1761}
1762
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001763static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
1764{
1765 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1766 u32 reg;
1767
1768 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1769 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1770 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1771
1772 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
1773}
1774
1775static int arm_smmu_halt(struct arm_smmu_device *smmu)
1776{
1777 return __arm_smmu_halt(smmu, true);
1778}
1779
1780static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
1781{
1782 return __arm_smmu_halt(smmu, false);
1783}
1784
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001785static void arm_smmu_resume(struct arm_smmu_device *smmu)
1786{
1787 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
1788 u32 reg;
1789
1790 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1791 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
1792 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
1793}
1794
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001795static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1796{
1797 int i;
1798 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1799
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001800 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001801 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1802 writel_relaxed(regs[i].value,
1803 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07001804 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001805}
1806
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1808{
1809 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001810 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001812 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001813
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001814 /* clear global FSR */
1815 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1816 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001817
Robin Murphy25a1c962016-02-10 14:25:33 +00001818 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1819 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001820 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001821 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001822 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823 }
1824
Peng Fan3ca37122016-05-03 21:50:30 +08001825 /*
1826 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1827 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1828 * bit is only present in MMU-500r2 onwards.
1829 */
1830 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1831 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1832 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1833 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1834 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1835 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1836 }
1837
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001838 /* Make sure all context banks are disabled and clear CB_FSR */
1839 for (i = 0; i < smmu->num_context_banks; ++i) {
1840 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1841 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1842 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001843 /*
1844 * Disable MMU-500's not-particularly-beneficial next-page
1845 * prefetcher for the sake of errata #841119 and #826419.
1846 */
1847 if (smmu->model == ARM_MMU500) {
1848 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1849 reg &= ~ARM_MMU500_ACTLR_CPRE;
1850 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1851 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001852
1853 if (smmu->model == QCOM_SMMUV2) {
1854 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1855 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1856 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1857 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1858 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001859 }
Will Deacon1463fe42013-07-31 19:21:27 +01001860
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001861 /* Program implementation defined registers */
1862 arm_smmu_impl_def_programming(smmu);
1863
Will Deacon45ae7cf2013-06-24 18:31:25 +01001864 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1866 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1867
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001868 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001869
Will Deacon45ae7cf2013-06-24 18:31:25 +01001870 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001871 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001872
1873 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001874 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875
Robin Murphy25a1c962016-02-10 14:25:33 +00001876 /* Enable client access, handling unmatched streams as appropriate */
1877 reg &= ~sCR0_CLIENTPD;
1878 if (disable_bypass)
1879 reg |= sCR0_USFCFG;
1880 else
1881 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882
1883 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001884 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885
1886 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001887 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001889 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1890 reg |= sCR0_VMID16EN;
1891
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001893 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001894 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895}
1896
1897static int arm_smmu_id_size_to_bits(int size)
1898{
1899 switch (size) {
1900 case 0:
1901 return 32;
1902 case 1:
1903 return 36;
1904 case 2:
1905 return 40;
1906 case 3:
1907 return 42;
1908 case 4:
1909 return 44;
1910 case 5:
1911 default:
1912 return 48;
1913 }
1914}
1915
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001916static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1917{
1918 struct device *dev = smmu->dev;
1919 int i, ntuples, ret;
1920 u32 *tuples;
1921 struct arm_smmu_impl_def_reg *regs, *regit;
1922
1923 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1924 return 0;
1925
1926 ntuples /= sizeof(u32);
1927 if (ntuples % 2) {
1928 dev_err(dev,
1929 "Invalid number of attach-impl-defs registers: %d\n",
1930 ntuples);
1931 return -EINVAL;
1932 }
1933
1934 regs = devm_kmalloc(
1935 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1936 GFP_KERNEL);
1937 if (!regs)
1938 return -ENOMEM;
1939
1940 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1941 if (!tuples)
1942 return -ENOMEM;
1943
1944 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1945 tuples, ntuples);
1946 if (ret)
1947 return ret;
1948
1949 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1950 regit->offset = tuples[i];
1951 regit->value = tuples[i + 1];
1952 }
1953
1954 devm_kfree(dev, tuples);
1955
1956 smmu->impl_def_attach_registers = regs;
1957 smmu->num_impl_def_attach_registers = ntuples / 2;
1958
1959 return 0;
1960}
1961
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1963{
1964 unsigned long size;
1965 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1966 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001967 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968
1969 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001970 dev_notice(smmu->dev, "SMMUv%d with:\n",
1971 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972
1973 /* ID0 */
1974 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001975
1976 /* Restrict available stages based on module parameter */
1977 if (force_stage == 1)
1978 id &= ~(ID0_S2TS | ID0_NTS);
1979 else if (force_stage == 2)
1980 id &= ~(ID0_S1TS | ID0_NTS);
1981
Will Deacon45ae7cf2013-06-24 18:31:25 +01001982 if (id & ID0_S1TS) {
1983 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1984 dev_notice(smmu->dev, "\tstage 1 translation\n");
1985 }
1986
1987 if (id & ID0_S2TS) {
1988 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1989 dev_notice(smmu->dev, "\tstage 2 translation\n");
1990 }
1991
1992 if (id & ID0_NTS) {
1993 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1994 dev_notice(smmu->dev, "\tnested translation\n");
1995 }
1996
1997 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001998 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001999 dev_err(smmu->dev, "\tno translation support!\n");
2000 return -ENODEV;
2001 }
2002
Robin Murphyb7862e32016-04-13 18:13:03 +01002003 if ((id & ID0_S1TS) &&
2004 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002005 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
2006 dev_notice(smmu->dev, "\taddress translation ops\n");
2007 }
2008
Robin Murphybae2c2d2015-07-29 19:46:05 +01002009 /*
2010 * In order for DMA API calls to work properly, we must defer to what
2011 * the DT says about coherency, regardless of what the hardware claims.
2012 * Fortunately, this also opens up a workaround for systems where the
2013 * ID register value has ended up configured incorrectly.
2014 */
2015 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
2016 cttw_reg = !!(id & ID0_CTTW);
2017 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002019 if (cttw_dt || cttw_reg)
2020 dev_notice(smmu->dev, "\t%scoherent table walk\n",
2021 cttw_dt ? "" : "non-");
2022 if (cttw_dt != cttw_reg)
2023 dev_notice(smmu->dev,
2024 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002025
2026 if (id & ID0_SMS) {
2027 u32 smr, sid, mask;
2028
2029 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
2030 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
2031 ID0_NUMSMRG_MASK;
2032 if (smmu->num_mapping_groups == 0) {
2033 dev_err(smmu->dev,
2034 "stream-matching supported, but no SMRs present!\n");
2035 return -ENODEV;
2036 }
2037
2038 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
2039 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
2040 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
2041 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
2042
2043 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
2044 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
2045 if ((mask & sid) != sid) {
2046 dev_err(smmu->dev,
2047 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
2048 mask, sid);
2049 return -ENODEV;
2050 }
2051
2052 dev_notice(smmu->dev,
2053 "\tstream matching with %u register groups, mask 0x%x",
2054 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07002055 } else {
2056 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
2057 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058 }
2059
Robin Murphy7602b872016-04-28 17:12:09 +01002060 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
2061 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
2062 if (!(id & ID0_PTFS_NO_AARCH32S))
2063 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
2064 }
2065
Will Deacon45ae7cf2013-06-24 18:31:25 +01002066 /* ID1 */
2067 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01002068 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002070 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00002071 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01002072 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002073 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07002074 dev_warn(smmu->dev,
2075 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
2076 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002077
Will Deacon518f7132014-11-14 17:17:54 +00002078 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002079 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
2080 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
2081 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
2082 return -ENODEV;
2083 }
2084 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
2085 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01002086 /*
2087 * Cavium CN88xx erratum #27704.
2088 * Ensure ASID and VMID allocation is unique across all SMMUs in
2089 * the system.
2090 */
2091 if (smmu->model == CAVIUM_SMMUV2) {
2092 smmu->cavium_id_base =
2093 atomic_add_return(smmu->num_context_banks,
2094 &cavium_smmu_context_count);
2095 smmu->cavium_id_base -= smmu->num_context_banks;
2096 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002097
2098 /* ID2 */
2099 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2100 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002101 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002102
Will Deacon518f7132014-11-14 17:17:54 +00002103 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002104 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002105 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002106
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002107 if (id & ID2_VMID16)
2108 smmu->features |= ARM_SMMU_FEAT_VMID16;
2109
Robin Murphyf1d84542015-03-04 16:41:05 +00002110 /*
2111 * What the page table walker can address actually depends on which
2112 * descriptor format is in use, but since a) we don't know that yet,
2113 * and b) it can vary per context bank, this will have to do...
2114 */
2115 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2116 dev_warn(smmu->dev,
2117 "failed to set DMA mask for table walker\n");
2118
Robin Murphyb7862e32016-04-13 18:13:03 +01002119 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002120 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002121 if (smmu->version == ARM_SMMU_V1_64K)
2122 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002125 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002126 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002127 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002128 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002129 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002130 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002131 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132 }
2133
Robin Murphy7602b872016-04-28 17:12:09 +01002134 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002135 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002136 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002137 if (smmu->features &
2138 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002139 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002140 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002141 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002142 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002143 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002144
Robin Murphyd5466352016-05-09 17:20:09 +01002145 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2146 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2147 else
2148 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2149 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2150 smmu->pgsize_bitmap);
2151
Will Deacon518f7132014-11-14 17:17:54 +00002152
Will Deacon28d60072014-09-01 16:24:48 +01002153 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2154 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002155 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002156
2157 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2158 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002159 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002160
Will Deacon45ae7cf2013-06-24 18:31:25 +01002161 return 0;
2162}
2163
Robin Murphy67b65a32016-04-13 18:12:57 +01002164struct arm_smmu_match_data {
2165 enum arm_smmu_arch_version version;
2166 enum arm_smmu_implementation model;
2167};
2168
2169#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2170static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2171
2172ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2173ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002174ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002175ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002176ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002177ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002178
Joerg Roedel09b52692014-10-02 12:24:45 +02002179static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002180 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2181 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2182 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002183 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002184 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002185 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002186 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002187 { },
2188};
2189MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2190
Will Deacon45ae7cf2013-06-24 18:31:25 +01002191static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2192{
Robin Murphy09360402014-08-28 17:51:59 +01002193 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002194 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002195 struct resource *res;
2196 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002197 struct device *dev = &pdev->dev;
2198 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002199 struct of_phandle_iterator it;
2200 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201 int num_irqs, i, err;
2202
2203 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2204 if (!smmu) {
2205 dev_err(dev, "failed to allocate arm_smmu_device\n");
2206 return -ENOMEM;
2207 }
2208 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002209 spin_lock_init(&smmu->atos_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002210
Robin Murphy09360402014-08-28 17:51:59 +01002211 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002212 data = of_id->data;
2213 smmu->version = data->version;
2214 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002215
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002217 smmu->base = devm_ioremap_resource(dev, res);
2218 if (IS_ERR(smmu->base))
2219 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002220 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002221
2222 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2223 &smmu->num_global_irqs)) {
2224 dev_err(dev, "missing #global-interrupts property\n");
2225 return -ENODEV;
2226 }
2227
2228 num_irqs = 0;
2229 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2230 num_irqs++;
2231 if (num_irqs > smmu->num_global_irqs)
2232 smmu->num_context_irqs++;
2233 }
2234
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002235 if (!smmu->num_context_irqs) {
2236 dev_err(dev, "found %d interrupts but expected at least %d\n",
2237 num_irqs, smmu->num_global_irqs + 1);
2238 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002239 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002240
2241 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2242 GFP_KERNEL);
2243 if (!smmu->irqs) {
2244 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2245 return -ENOMEM;
2246 }
2247
2248 for (i = 0; i < num_irqs; ++i) {
2249 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002250
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251 if (irq < 0) {
2252 dev_err(dev, "failed to get irq index %d\n", i);
2253 return -ENODEV;
2254 }
2255 smmu->irqs[i] = irq;
2256 }
2257
Olav Haugan3c8766d2014-08-22 17:12:32 -07002258 err = arm_smmu_device_cfg_probe(smmu);
2259 if (err)
2260 return err;
2261
Will Deacon45ae7cf2013-06-24 18:31:25 +01002262 i = 0;
2263 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002264
2265 err = -ENOMEM;
2266 /* No need to zero the memory for masterspec */
2267 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2268 if (!masterspec)
2269 goto out_put_masters;
2270
2271 of_for_each_phandle(&it, err, dev->of_node,
2272 "mmu-masters", "#stream-id-cells", 0) {
2273 int count = of_phandle_iterator_args(&it, masterspec->args,
2274 MAX_MASTER_STREAMIDS);
2275 masterspec->np = of_node_get(it.node);
2276 masterspec->args_count = count;
2277
2278 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002279 if (err) {
2280 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002281 masterspec->np->name);
2282 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002283 goto out_put_masters;
2284 }
2285
2286 i++;
2287 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002288
Will Deacon45ae7cf2013-06-24 18:31:25 +01002289 dev_notice(dev, "registered %d master devices\n", i);
2290
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002291 kfree(masterspec);
2292
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002293 err = arm_smmu_parse_impl_def_registers(smmu);
2294 if (err)
2295 goto out_put_masters;
2296
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002297 parse_driver_options(smmu);
2298
Robin Murphyb7862e32016-04-13 18:13:03 +01002299 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002300 smmu->num_context_banks != smmu->num_context_irqs) {
2301 dev_err(dev,
2302 "found only %d context interrupt(s) but %d required\n",
2303 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002304 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002305 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002306 }
2307
Will Deacon45ae7cf2013-06-24 18:31:25 +01002308 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08002309 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
2310 NULL, arm_smmu_global_fault,
2311 IRQF_ONESHOT | IRQF_SHARED,
2312 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002313 if (err) {
2314 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2315 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002316 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002317 }
2318 }
2319
2320 INIT_LIST_HEAD(&smmu->list);
2321 spin_lock(&arm_smmu_devices_lock);
2322 list_add(&smmu->list, &arm_smmu_devices);
2323 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002324
2325 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002326 return 0;
2327
Will Deacon45ae7cf2013-06-24 18:31:25 +01002328out_put_masters:
2329 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002330 struct arm_smmu_master *master
2331 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002332 of_node_put(master->of_node);
2333 }
2334
2335 return err;
2336}
2337
2338static int arm_smmu_device_remove(struct platform_device *pdev)
2339{
2340 int i;
2341 struct device *dev = &pdev->dev;
2342 struct arm_smmu_device *curr, *smmu = NULL;
2343 struct rb_node *node;
2344
2345 spin_lock(&arm_smmu_devices_lock);
2346 list_for_each_entry(curr, &arm_smmu_devices, list) {
2347 if (curr->dev == dev) {
2348 smmu = curr;
2349 list_del(&smmu->list);
2350 break;
2351 }
2352 }
2353 spin_unlock(&arm_smmu_devices_lock);
2354
2355 if (!smmu)
2356 return -ENODEV;
2357
Will Deacon45ae7cf2013-06-24 18:31:25 +01002358 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002359 struct arm_smmu_master *master
2360 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002361 of_node_put(master->of_node);
2362 }
2363
Will Deaconecfadb62013-07-31 19:21:28 +01002364 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002365 dev_err(dev, "removing device with active domains!\n");
2366
2367 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002368 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002369
2370 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002371 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002372 return 0;
2373}
2374
Will Deacon45ae7cf2013-06-24 18:31:25 +01002375static struct platform_driver arm_smmu_driver = {
2376 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002377 .name = "arm-smmu",
2378 .of_match_table = of_match_ptr(arm_smmu_of_match),
2379 },
2380 .probe = arm_smmu_device_dt_probe,
2381 .remove = arm_smmu_device_remove,
2382};
2383
2384static int __init arm_smmu_init(void)
2385{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002386 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002387 int ret;
2388
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002389 /*
2390 * Play nice with systems that don't have an ARM SMMU by checking that
2391 * an ARM SMMU exists in the system before proceeding with the driver
2392 * and IOMMU bus operation registration.
2393 */
2394 np = of_find_matching_node(NULL, arm_smmu_of_match);
2395 if (!np)
2396 return 0;
2397
2398 of_node_put(np);
2399
Will Deacon45ae7cf2013-06-24 18:31:25 +01002400 ret = platform_driver_register(&arm_smmu_driver);
2401 if (ret)
2402 return ret;
2403
2404 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002405 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002406 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2407
Will Deacond123cf82014-02-04 22:17:53 +00002408#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002409 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002410 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002411#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002412
Will Deacona9a1b0b2014-05-01 18:05:08 +01002413#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002414 if (!iommu_present(&pci_bus_type)) {
2415 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002416 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002417 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002418#endif
2419
Will Deacon45ae7cf2013-06-24 18:31:25 +01002420 return 0;
2421}
2422
2423static void __exit arm_smmu_exit(void)
2424{
2425 return platform_driver_unregister(&arm_smmu_driver);
2426}
2427
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002428subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002429module_exit(arm_smmu_exit);
2430
2431MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2432MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2433MODULE_LICENSE("GPL v2");